@@ -67,6 +67,56 @@ list_allocate_items(size_t capacity)
67
67
PyObject * * items = PyMem_Malloc (capacity * sizeof (PyObject * ));
68
68
return items ;
69
69
}
70
+
71
+ /* Ensure ob_item has room for at least newsize elements, and set
72
+ * ob_size to newsize. If newsize > ob_size on entry, the content
73
+ * of the new slots at exit is undefined heap trash; it's the caller's
74
+ * responsibility to overwrite them with sane values.
75
+ * The number of allocated elements may grow, shrink, or stay the same.
76
+ * Note that self->ob_item may change, and even if newsize is less
77
+ * than ob_size on entry.
78
+ */
79
+ static int
80
+ list_ensure_capacity_slow (PyListObject * self , Py_ssize_t base , Py_ssize_t extra )
81
+ {
82
+ if (base > PY_SSIZE_T_MAX /(Py_ssize_t )sizeof (PyObject * ) - extra ) {
83
+ PyErr_NoMemory ();
84
+ return -1 ;
85
+ }
86
+
87
+ Py_ssize_t reqsize = base + extra ;
88
+ Py_ssize_t allocated = self -> allocated ;
89
+ if (allocated >= reqsize ) {
90
+ assert (self -> ob_item != NULL || reqsize == 0 );
91
+ return 0 ;
92
+ }
93
+
94
+ if (!_Py_IsOwnedByCurrentThread ((PyObject * )self )) {
95
+ _PyObject_GC_SET_SHARED (self );
96
+ }
97
+
98
+ size_t capacity = list_good_size (reqsize );
99
+ PyObject * * items = list_allocate_items (capacity );
100
+ if (items == NULL ) {
101
+ PyErr_NoMemory ();
102
+ return -1 ;
103
+ }
104
+ PyObject * * old = self -> ob_item ;
105
+ if (self -> ob_item ) {
106
+ memcpy (items , self -> ob_item , allocated * sizeof (PyObject * ));
107
+ }
108
+ _Py_atomic_store_ptr_release (& self -> ob_item , items );
109
+ self -> allocated = capacity ;
110
+ if (old ) {
111
+ if (_PyObject_GC_IS_SHARED (self )) {
112
+ _PyMem_FreeDelayed (old );
113
+ }
114
+ else {
115
+ PyMem_Free (old );
116
+ }
117
+ }
118
+ return 0 ;
119
+ }
70
120
#endif
71
121
72
122
static PyListObject *
@@ -184,8 +234,14 @@ list_resize(PyListObject *self, Py_ssize_t newsize)
184
234
}
185
235
186
236
static int
187
- list_preallocate_exact (PyListObject * self , Py_ssize_t size )
237
+ list_ensure_capacity (PyListObject * self , Py_ssize_t base , Py_ssize_t extra )
188
238
{
239
+ #ifdef Py_GIL_DISABLED
240
+ if (base > self -> allocated - extra ) {
241
+ return list_ensure_capacity_slow (self , base , extra );
242
+ }
243
+ #else
244
+ Py_ssize_t size = extra ;
189
245
assert (self -> ob_item == NULL );
190
246
assert (size > 0 );
191
247
@@ -202,6 +258,7 @@ list_preallocate_exact(PyListObject *self, Py_ssize_t size)
202
258
}
203
259
self -> ob_item = items ;
204
260
self -> allocated = size ;
261
+ #endif
205
262
return 0 ;
206
263
}
207
264
@@ -389,7 +446,7 @@ PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem)
389
446
int
390
447
_PyList_AppendTakeRefListResize (PyListObject * self , PyObject * newitem )
391
448
{
392
- Py_ssize_t len = Py_SIZE (self );
449
+ Py_ssize_t len = PyList_GET_SIZE (self );
393
450
assert (self -> allocated == -1 || self -> allocated == len );
394
451
if (list_resize (self , len + 1 ) < 0 ) {
395
452
Py_DECREF (newitem );
@@ -1027,7 +1084,7 @@ list_extend_fast(PyListObject *self, PyObject *iterable)
1027
1084
// an overflow on any relevant platform.
1028
1085
assert (m < PY_SSIZE_T_MAX - n );
1029
1086
if (self -> ob_item == NULL ) {
1030
- if (list_preallocate_exact (self , n ) < 0 ) {
1087
+ if (list_ensure_capacity (self , m , n ) < 0 ) {
1031
1088
return -1 ;
1032
1089
}
1033
1090
Py_SET_SIZE (self , n );
@@ -1075,7 +1132,7 @@ list_extend_iter(PyListObject *self, PyObject *iterable)
1075
1132
*/
1076
1133
}
1077
1134
else if (self -> ob_item == NULL ) {
1078
- if (n && list_preallocate_exact (self , n ) < 0 )
1135
+ if (n && list_ensure_capacity (self , m , n ) < 0 )
1079
1136
goto error ;
1080
1137
}
1081
1138
else {
0 commit comments