From 84f429d7e30eb3661390aa33787c4a9eca9a1932 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 7 Feb 2021 13:24:29 -0700 Subject: [PATCH 01/31] REL: prepare 1.20.x for further development --- doc/source/release.rst | 1 + doc/source/release/1.20.2-notes.rst | 45 +++++++++++++++++++++++++++++ pavement.py | 2 +- setup.py | 4 +-- 4 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 doc/source/release/1.20.2-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 3e975b6cb335..1edd693b9e0b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release Notes .. toctree:: :maxdepth: 3 + 1.20.2 1.20.1 1.20.0 1.19.5 diff --git a/doc/source/release/1.20.2-notes.rst b/doc/source/release/1.20.2-notes.rst new file mode 100644 index 000000000000..e863de56f214 --- /dev/null +++ b/doc/source/release/1.20.2-notes.rst @@ -0,0 +1,45 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.20.2 Release Notes +========================== + + +Highlights +========== + + +New functions +============= + + +Deprecations +============ + + +Future Changes +============== + + +Expired deprecations +==================== + + +Compatibility notes +=================== + + +C API changes +============= + + +New Features +============ + + +Improvements +============ + + +Changes +======= diff --git a/pavement.py b/pavement.py index 2d989ae72bae..a5c021f513b8 100644 --- a/pavement.py +++ b/pavement.py @@ -37,7 +37,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/1.20.1-notes.rst' +RELEASE_NOTES = 'doc/source/release/1.20.2-notes.rst' #------------------------------------------------------- diff --git a/setup.py b/setup.py index 8019536365ff..77a9b82bb68f 100755 --- a/setup.py +++ b/setup.py @@ -56,8 +56,8 @@ MAJOR = 1 MINOR = 20 -MICRO = 1 -ISRELEASED = True +MICRO = 2 +ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) # The first version not in the `Programming Language :: Python :: ...` classifiers above From b6a8fb3dccf84faca9f991727123560134c05e80 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 9 Feb 2021 17:13:03 -0700 Subject: [PATCH 02/31] MAINT: Update f2py from master. This will make it easier to backport the f2py fixes currently going into master. --- numpy/f2py/__version__.py | 9 +-- numpy/f2py/capi_maps.py | 4 +- numpy/f2py/cb_rules.py | 8 +- numpy/f2py/common_rules.py | 2 - numpy/f2py/crackfortran.py | 32 ++++---- numpy/f2py/f2py2e.py | 30 +++---- numpy/f2py/rules.py | 17 ++-- numpy/f2py/src/fortranobject.c | 4 +- numpy/f2py/tests/test_array_from_pyobj.py | 97 +++++++++++++---------- numpy/f2py/tests/test_callback.py | 48 +++++++++++ numpy/f2py/tests/test_crackfortran.py | 29 +++++++ numpy/tests/test_scripts.py | 4 +- 12 files changed, 183 insertions(+), 101 deletions(-) diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py index 104c2e1a899e..e20d7c1dbb38 100644 --- a/numpy/f2py/__version__.py +++ b/numpy/f2py/__version__.py @@ -1,8 +1 @@ -major = 2 - -try: - from __svn_version__ import version - version_info = (major, version) - version = '%s_%s' % version_info -except (ImportError, ValueError): - version = str(major) +from numpy.version import version diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index fabbfc4c24ac..fe0d4a52bd16 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -11,8 +11,6 @@ Pearu Peterson """ -__version__ = "$Revision: 1.60 $"[10:-1] - from . import __version__ f2py_version = __version__.version @@ -309,7 +307,7 @@ def getstrlength(var): len = a['*'] elif 'len' in a: len = a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): if isintent_hide(var): errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( repr(var))) diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 60bc1ad1142c..62aa2fca9e56 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -70,7 +70,8 @@ /*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ #static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { - #name#_t *cb; + #name#_t cb_local = { NULL, NULL, 0 }; + #name#_t *cb = NULL; PyTupleObject *capi_arglist = NULL; PyObject *capi_return = NULL; PyObject *capi_tmp = NULL; @@ -82,12 +83,17 @@ f2py_cb_start_clock(); #endif cb = get_active_#name#(); + if (cb == NULL) { + capi_longjmp_ok = 0; + cb = &cb_local; + } capi_arglist = cb->args_capi; CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); if (cb->capi==NULL) { capi_longjmp_ok = 0; cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); } if (cb->capi==NULL) { PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index 90483e55be83..937d8bc723bd 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -13,8 +13,6 @@ Pearu Peterson """ -__version__ = "$Revision: 1.19 $"[10:-1] - from . import __version__ f2py_version = __version__.version diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 8640ce7a71df..1149633c0dfa 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -294,10 +294,10 @@ def getextension(name): return '' return name[i + 1:] -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search _free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match @@ -868,7 +868,7 @@ def appenddecl(decl, decl2, force=1): return decl selectpattern = re.compile( - r'\s*(?P(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) nameargspattern = re.compile( r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) callnameargspattern = re.compile( @@ -1389,7 +1389,7 @@ def analyzeline(m, case, line): previous_context = ('common', bn, groupcounter) elif case == 'use': m1 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) if m1: mm = m1.groupdict() if 'use' not in groupcache[groupcounter]: @@ -1406,7 +1406,7 @@ def analyzeline(m, case, line): for l in ll: if '=' in l: m2 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z', l, re.I) + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) if m2: rl[m2.group('local').strip()] = m2.group( 'use').strip() @@ -1482,15 +1482,15 @@ def cracktypespec0(typespec, ll): ll = ll[i + 2:] return typespec, selector, attr, ll ##### -namepattern = re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z', re.I) +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) kindselector = re.compile( - r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) charselector = re.compile( - r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z', re.I) + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) lenkindpattern = re.compile( r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) lenarraypattern = re.compile( - r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) def removespaces(expr): @@ -1611,6 +1611,10 @@ def updatevars(typespec, selector, attrspec, entitydecl): edecl['charselector'] = copy.copy(charselect) edecl['typename'] = typename edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) if m.group('after'): m1 = lenarraypattern.match(markouterparen(m.group('after'))) if m1: @@ -2988,10 +2992,10 @@ def analyzeargs(block): block['vars'][block['result']] = {} return block -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P[\w]+)|)\Z', re.I) +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) determineexprtype_re_3 = re.compile( - r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P[\w]+)|)\Z', re.I) + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index be2c345d1a99..a14f068f15dd 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -29,18 +29,14 @@ from . import capi_maps f2py_version = __version__.version +numpy_version = __version__.version errmess = sys.stderr.write # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__usage__ = """\ -Usage: +__usage__ =\ +f"""Usage: 1) To construct extension module sources: @@ -97,8 +93,8 @@ --[no-]latex-doc Create (or not) module.tex. Default is --no-latex-doc. --short-latex Create 'incomplete' LaTeX document (without commands - \\documentclass, \\tableofcontents, and \\begin{document}, - \\end{document}). + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). --[no-]rest-doc Create (or not) module.rst. Default is --no-rest-doc. @@ -167,12 +163,12 @@ array. Integer sets the threshold for array sizes when a message should be shown. -Version: %s -numpy Version: %s +Version: {f2py_version} +numpy Version: {numpy_version} Requires: Python 3.5 or higher. License: NumPy license (see LICENSE.txt in the NumPy source code) Copyright 1999 - 2011 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version) +http://cens.ioc.ee/projects/f2py2e/""" def scaninputline(inputline): @@ -515,14 +511,14 @@ def run_compile(): remove_build_dir = 1 build_dir = tempfile.mkdtemp() - _reg1 = re.compile(r'[-][-]link[-]') + _reg1 = re.compile(r'--link-') sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] if sysinfo_flags: sysinfo_flags = [f[7:] for f in sysinfo_flags] _reg2 = re.compile( - r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include') + r'--((no-|)(wrap-functions|lower)|debug-capi|quiet)|-include') f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] f2py_flags2 = [] @@ -540,11 +536,11 @@ def run_compile(): sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] _reg3 = re.compile( - r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)') + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in flib_flags] _reg4 = re.compile( - r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))') + r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in fc_flags] @@ -573,7 +569,7 @@ def run_compile(): del flib_flags[i] assert len(flib_flags) <= 2, repr(flib_flags) - _reg5 = re.compile(r'[-][-](verbose)') + _reg5 = re.compile(r'--(verbose)') setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in setup_flags] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 0311e4f7b021..b9cbc5487278 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -50,18 +50,15 @@ Pearu Peterson """ -__version__ = "$Revision: 1.129 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -from .. import version as _numpy_version -numpy_version = _numpy_version.version - import os import time import copy +# __version__.version is now the same as the NumPy version +from . import __version__ +f2py_version = __version__.version +numpy_version = __version__.version + from .auxfuncs import ( applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote, @@ -202,7 +199,7 @@ \tif (PyErr_Occurred()) \t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} \td = PyModule_GetDict(m); -\ts = PyUnicode_FromString(\"$R""" + """evision: $\"); +\ts = PyUnicode_FromString(\"#f2py_version#\"); \tPyDict_SetItemString(d, \"__version__\", s); \tPy_DECREF(s); \ts = PyUnicode_FromString( @@ -812,7 +809,7 @@ """, {debugcapi: ["""\ fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); - CFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, """\ CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 3275f90ad2cb..b9ef18701ce3 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -799,7 +799,7 @@ PyArrayObject* array_from_pyobj(const int type_num, && ARRAY_ISCOMPATIBLE(arr,type_num) && F2PY_CHECK_ALIGNMENT(arr, intent) ) { - if ((intent & F2PY_INTENT_C)?PyArray_ISCARRAY(arr):PyArray_ISFARRAY(arr)) { + if ((intent & F2PY_INTENT_C)?PyArray_ISCARRAY_RO(arr):PyArray_ISFARRAY_RO(arr)) { if ((intent & F2PY_INTENT_OUT)) { Py_INCREF(arr); } @@ -807,9 +807,9 @@ PyArrayObject* array_from_pyobj(const int type_num, return arr; } } - if (intent & F2PY_INTENT_INOUT) { strcpy(mess, "failed to initialize intent(inout) array"); + /* Must use PyArray_IS*ARRAY because intent(inout) requires writable input */ if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) strcat(mess, " -- input not contiguous"); if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index b719f2495ed4..77149e4e7a0b 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -3,9 +3,8 @@ import copy import pytest -from numpy import ( - array, alltrue, ndarray, zeros, dtype, intp, clongdouble - ) +import numpy as np + from numpy.testing import assert_, assert_equal from numpy.core.multiarray import typeinfo from . import util @@ -119,7 +118,7 @@ def is_intent_exact(self, *names): # 16 byte long double types this means the inout intent cannot be satisfied # and several tests fail as the alignment flag can be randomly true or fals # when numpy gains an aligned allocator the tests could be enabled again -if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and sys.platform != 'win32'): _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ @@ -133,7 +132,7 @@ class Type: _type_cache = {} def __new__(cls, name): - if isinstance(name, dtype): + if isinstance(name, np.dtype): dtype0 = name name = None for n, i in typeinfo.items(): @@ -153,7 +152,8 @@ def _init(self, name): info = typeinfo[self.NAME] self.type_num = getattr(wrap, 'NPY_' + self.NAME) assert_equal(self.type_num, info.num) - self.dtype = info.type + self.dtype = np.dtype(info.type) + self.type = info.type self.elsize = info.bits / 8 self.dtypechar = info.char @@ -202,7 +202,7 @@ def __init__(self, typ, dims, intent, obj): # arr.dtypechar may be different from typ.dtypechar self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) - assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) + assert_(isinstance(self.arr, np.ndarray), repr(type(self.arr))) self.arr_attr = wrap.array_attrs(self.arr) @@ -225,13 +225,15 @@ def __init__(self, typ, dims, intent, obj): return if intent.is_intent('cache'): - assert_(isinstance(obj, ndarray), repr(type(obj))) - self.pyarr = array(obj).reshape(*dims).copy() + assert_(isinstance(obj, np.ndarray), repr(type(obj))) + self.pyarr = np.array(obj).reshape(*dims).copy() else: - self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims), - order=self.intent.is_intent('c') and 'C' or 'F') + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=self.intent.is_intent('c') and 'C' or 'F') assert_(self.pyarr.dtype == typ, repr((self.pyarr.dtype, typ))) + self.pyarr.setflags(write=self.arr.flags['WRITEABLE']) assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) self.pyarr_attr = wrap.array_attrs(self.pyarr) @@ -266,7 +268,7 @@ def __init__(self, typ, dims, intent, obj): repr((self.arr_attr[5][3], self.type.elsize))) assert_(self.arr_equal(self.pyarr, self.arr)) - if isinstance(self.obj, ndarray): + if isinstance(self.obj, np.ndarray): if typ.elsize == Type(obj.dtype).elsize: if not intent.is_intent('copy') and self.arr_attr[1] <= 1: assert_(self.has_shared_memory()) @@ -274,8 +276,7 @@ def __init__(self, typ, dims, intent, obj): def arr_equal(self, arr1, arr2): if arr1.shape != arr2.shape: return False - s = arr1 == arr2 - return alltrue(s.flatten()) + return (arr1 == arr2).all() def __str__(self): return str(self.arr) @@ -285,7 +286,7 @@ def has_shared_memory(self): """ if self.obj is self.arr: return True - if not isinstance(self.obj, ndarray): + if not isinstance(self.obj, np.ndarray): return False obj_attr = wrap.array_attrs(self.obj) return obj_attr[0] == self.arr_attr[0] @@ -318,7 +319,7 @@ def test_in_from_2seq(self): def test_in_from_2casttype(self): for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_, obj) if t.elsize == self.type.elsize: assert_( @@ -326,8 +327,20 @@ def test_in_from_2casttype(self): else: assert_(not a.has_shared_memory(), repr(t.dtype)) + @pytest.mark.parametrize('write', ['w', 'ro']) + @pytest.mark.parametrize('order', ['C', 'F']) + @pytest.mark.parametrize('inp', ['2seq', '23seq']) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies + """ + seq = getattr(self, 'num' + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, ((order=='C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + def test_inout_2seq(self): - obj = array(self.num2seq, dtype=self.type.dtype) + obj = np.array(self.num2seq, dtype=self.type.dtype) a = self.array([len(self.num2seq)], intent.inout, obj) assert_(a.has_shared_memory()) @@ -341,12 +354,12 @@ def test_inout_2seq(self): raise SystemError('intent(inout) should have failed on sequence') def test_f_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype, order='F') + obj = np.array(self.num23seq, dtype=self.type.dtype, order='F') shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.inout, obj) assert_(a.has_shared_memory()) - obj = array(self.num23seq, dtype=self.type.dtype, order='C') + obj = np.array(self.num23seq, dtype=self.type.dtype, order='C') shape = (len(self.num23seq), len(self.num23seq[0])) try: a = self.array(shape, intent.in_.inout, obj) @@ -359,14 +372,14 @@ def test_f_inout_23seq(self): 'intent(inout) should have failed on improper array') def test_c_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype) + obj = np.array(self.num23seq, dtype=self.type.dtype) shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.c.inout, obj) assert_(a.has_shared_memory()) def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) @@ -377,14 +390,14 @@ def test_c_in_from_23seq(self): def test_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_f_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') + obj = np.array(self.num23seq, dtype=t.dtype, order='F') a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) if t.elsize == self.type.elsize: @@ -394,7 +407,7 @@ def test_f_in_from_23casttype(self): def test_c_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) if t.elsize == self.type.elsize: @@ -404,14 +417,14 @@ def test_c_in_from_23casttype(self): def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') + obj = np.array(self.num23seq, dtype=t.dtype, order='F') a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) @@ -420,7 +433,7 @@ def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) a = self.array(shape, intent.in_.c.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) @@ -428,7 +441,7 @@ def test_in_cache_from_2casttype(self): a = self.array(shape, intent.in_.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) - obj = array(self.num2seq, dtype=t.dtype, order='F') + obj = np.array(self.num2seq, dtype=t.dtype, order='F') a = self.array(shape, intent.in_.c.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) @@ -449,7 +462,7 @@ def test_in_cache_from_2casttype_failure(self): for t in self.type.all_types(): if t.elsize >= self.type.elsize: continue - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) try: self.array(shape, intent.in_.cache, obj) # Should succeed @@ -485,18 +498,18 @@ def test_hidden(self): shape = (2,) a = self.array(shape, intent.hide, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) shape = (2, 3) a = self.array(shape, intent.hide, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) shape = (2, 3) a = self.array(shape, intent.c.hide, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) shape = (-1, 3) @@ -514,18 +527,18 @@ def test_optional_none(self): shape = (2,) a = self.array(shape, intent.optional, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) shape = (2, 3) a = self.array(shape, intent.optional, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) shape = (2, 3) a = self.array(shape, intent.c.optional, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) def test_optional_from_2seq(self): @@ -547,14 +560,14 @@ def test_optional_from_23seq(self): assert_(not a.has_shared_memory()) def test_inplace(self): - obj = array(self.num23seq, dtype=self.type.dtype) + obj = np.array(self.num23seq, dtype=self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape, intent.inplace, obj) assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) a.arr[1][2] = 54 assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) + np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! assert_(not obj.flags['CONTIGUOUS']) @@ -563,17 +576,17 @@ def test_inplace_from_casttype(self): for t in self.type.cast_types(): if t is self.type: continue - obj = array(self.num23seq, dtype=t.dtype) - assert_(obj.dtype.type == t.dtype) - assert_(obj.dtype.type is not self.type.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) + assert_(obj.dtype.type == t.type) + assert_(obj.dtype.type is not self.type.type) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape, intent.inplace, obj) assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) a.arr[1][2] = 54 assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) + np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! assert_(not obj.flags['CONTIGUOUS']) - assert_(obj.dtype.type is self.type.dtype) # obj changed inplace! + assert_(obj.dtype.type is self.type.type) # obj changed inplace! diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 4d4f2b443a99..2cb429ec21d5 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -61,6 +61,21 @@ class TestF77Callback(util.F2PyTest): a = callback(cu, lencu) end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end """ @pytest.mark.parametrize('name', 't,t2'.split(',')) @@ -204,6 +219,39 @@ def runner(name): if errors: raise AssertionError(errors) + def test_hidden_callback(self): + try: + self.module.hidden_callback(2) + except Exception as msg: + assert_(str(msg).startswith('Callback global_f not defined')) + + try: + self.module.hidden_callback2(2) + except Exception as msg: + assert_(str(msg).startswith('cb: Callback global_f not defined')) + + self.module.global_f = lambda x: x + 1 + r = self.module.hidden_callback(2) + assert_(r == 3) + + self.module.global_f = lambda x: x + 2 + r = self.module.hidden_callback(2) + assert_(r == 4) + + del self.module.global_f + try: + self.module.hidden_callback(2) + except Exception as msg: + assert_(str(msg).startswith('Callback global_f not defined')) + + self.module.global_f = lambda x=0: x + 3 + r = self.module.hidden_callback(2) + assert_(r == 5) + + # reproducer of gh18341 + r = self.module.hidden_callback2(2) + assert_(r == 3) + class TestF77CallbackPythonTLS(TestF77Callback): """ diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 735804024c9e..827c71ae9965 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -86,3 +86,32 @@ def test_defaultPublic(self, tmp_path): assert 'public' not in mod['vars']['a']['attrspec'] assert 'private' not in mod['vars']['seta']['attrspec'] assert 'public' in mod['vars']['seta']['attrspec'] + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + code = """ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end + """ + + def test_external_as_statement(self): + def incr(x): + return x + 123 + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + r = self.module.external_as_attribute(incr) + assert r == 123 diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index a0f2ba70a4a1..e67a829471dc 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -38,9 +38,9 @@ def find_f2py_commands(): def test_f2py(f2py_cmd): # test that we can run f2py script stdout = subprocess.check_output([f2py_cmd, '-v']) - assert_equal(stdout.strip(), b'2') + assert_equal(stdout.strip(), np.__version__.encode('ascii')) def test_pep338(): stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) - assert_equal(stdout.strip(), b'2') + assert_equal(stdout.strip(), np.__version__.encode('ascii')) From a3582d86a59e76b6e891a088110dc6febfc9158b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 21 Feb 2021 16:11:52 +0100 Subject: [PATCH 03/31] BUG: Fixed an issue where `diagflat` could overflow on windows and 32-bit platforms --- numpy/lib/twodim_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 2b4cbdfbbc11..22a8849c0e37 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -5,8 +5,8 @@ from numpy.core.numeric import ( asanyarray, arange, zeros, greater_equal, multiply, ones, - asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal, - nonzero + asarray, where, int8, int16, int32, int64, intp, empty, promote_types, + diagonal, nonzero, indices ) from numpy.core.overrides import set_array_function_like_doc, set_module from numpy.core import overrides @@ -347,10 +347,10 @@ def diagflat(v, k=0): n = s + abs(k) res = zeros((n, n), v.dtype) if (k >= 0): - i = arange(0, n-k) + i = arange(0, n-k, dtype=intp) fi = i+k+i*n else: - i = arange(0, n+k) + i = arange(0, n+k, dtype=intp) fi = i+(i-k)*n res.flat[fi] = v if not wrap: From 45f4ca46d3f745d6b6fb6e123ef11a53c486ddfd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 16 Feb 2021 23:17:51 -0600 Subject: [PATCH 04/31] BUG: Fix refcount leak in f2py `complex_double_from_pyobj` --- numpy/f2py/cfuncs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 40496ccf10cd..974062f2617a 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1023,6 +1023,7 @@ } (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; + Py_DECREF(arr); return 1; } /* Python does not provide PyNumber_Complex function :-( */ From 8563a3ce4ac4daaeeab4741e0582cdae51c63a02 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 15 Feb 2021 17:02:34 -0600 Subject: [PATCH 05/31] BUG: Fix tiny memory leaks when `like=` overrides are used I thought I had fixed these leaks, but it appears I missed some. We probably should backport this to 1.20.x (its simple), but the leaks are also pretty harmless unless someone uses `like=` hundrets of thousands of times in a running program (and its a new fetaure). --- numpy/core/src/multiarray/multiarraymodule.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index cc747d8623bf..66f71de5179e 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1829,6 +1829,8 @@ array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) array_function_result = array_implement_c_array_function_creation( "empty", args, kwds); if (array_function_result != Py_NotImplemented) { + Py_XDECREF(typecode); + npy_free_cache_dim_obj(shape); return array_function_result; } @@ -2026,6 +2028,8 @@ array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) array_function_result = array_implement_c_array_function_creation( "zeros", args, kwds); if (array_function_result != Py_NotImplemented) { + Py_XDECREF(typecode); + npy_free_cache_dim_obj(shape); return array_function_result; } @@ -2139,11 +2143,13 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) array_function_result = array_implement_c_array_function_creation( "fromfile", args, keywds); if (array_function_result != Py_NotImplemented) { + Py_XDECREF(type); return array_function_result; } file = NpyPath_PathlikeToFspath(file); if (file == NULL) { + Py_XDECREF(type); return NULL; } @@ -2250,6 +2256,7 @@ array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds array_function_result = array_implement_c_array_function_creation( "frombuffer", args, keywds); if (array_function_result != Py_NotImplemented) { + Py_XDECREF(type); return array_function_result; } From f2b40c6354efbe102588ba30fcbd79cdc6f93563 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 22 Jan 2020 14:44:33 -0800 Subject: [PATCH 06/31] BUG: Remove temporary change of descr/flags in VOID functions This is done using a hack around stack allocated arrays, which requires some dark magic with respect to the setting the base object (we skip normal arrays and INCREF/DECREF during base setting, which _does_ work for such static arrays, but only if we give them a (not quite) fake refcount of 1 and set the base and their type correctly to PyArrayType. Closes gh-15387 --- numpy/core/src/multiarray/arraytypes.c.src | 189 ++++++++++++--------- 1 file changed, 108 insertions(+), 81 deletions(-) diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index ecaca72a1848..ad74612272b2 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -42,6 +42,32 @@ #include "npy_cblas.h" #include "npy_buffer.h" + +/* + * Define a stack allocated dummy array with only the minimum information set: + * 1. The descr, the main field interesting here. + * 2. The flags, which are needed for alignment;. + * 3. The type is set to NULL and the base is the original array, if this + * is used within a subarray getitem to create a new view, the base + * must be walked until the type is not NULL. + * + * The following should create errors in debug mode (if deallocated + * incorrectly), since base would be incorrectly decref'd as well. + * This is especially important for nonzero and copyswap, which may run with + * the GIL released. + */ +static NPY_INLINE PyArrayObject_fields +get_dummy_stack_array(PyArrayObject *orig) +{ + PyArrayObject_fields new_fields; + new_fields.flags = PyArray_FLAGS(orig); + /* Set to NULL so the dummy object can be distinguished from the real one */ + Py_TYPE(&new_fields) = NULL; + new_fields.base = (PyObject *)orig; + return new_fields; +} + + /* check for sequences, but ignore the types numpy considers scalars */ static NPY_INLINE npy_bool PySequence_NoString_Check(PyObject *op) { @@ -674,6 +700,7 @@ OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap)) return PyErr_Occurred() ? -1 : 0; } + /* VOID */ static PyObject * @@ -681,22 +708,21 @@ VOID_getitem(void *input, void *vap) { PyArrayObject *ap = vap; char *ip = input; - PyArray_Descr* descr; + PyArray_Descr* descr = PyArray_DESCR(vap); - descr = PyArray_DESCR(ap); if (PyDataType_HASFIELDS(descr)) { PyObject *key; PyObject *names; int i, n; PyObject *ret; PyObject *tup; - int savedflags; + PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; /* get the names from the fields dictionary*/ names = descr->names; n = PyTuple_GET_SIZE(names); ret = PyTuple_New(n); - savedflags = PyArray_FLAGS(ap); for (i = 0; i < n; i++) { npy_intp offset; PyArray_Descr *new; @@ -704,26 +730,19 @@ VOID_getitem(void *input, void *vap) tup = PyDict_GetItem(descr->fields, key); if (_unpack_field(tup, &new, &offset) < 0) { Py_DECREF(ret); - ((PyArrayObject_fields *)ap)->descr = descr; return NULL; } - /* - * TODO: temporarily modifying the array like this - * is bad coding style, should be changed. - */ - ((PyArrayObject_fields *)ap)->descr = new; + dummy_fields.descr = new; /* update alignment based on offset */ if ((new->alignment > 1) && ((((npy_intp)(ip+offset)) % new->alignment) != 0)) { - PyArray_CLEARFLAGS(ap, NPY_ARRAY_ALIGNED); + PyArray_CLEARFLAGS(dummy_arr, NPY_ARRAY_ALIGNED); } else { - PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED); + PyArray_ENABLEFLAGS(dummy_arr, NPY_ARRAY_ALIGNED); } - PyTuple_SET_ITEM(ret, i, PyArray_GETITEM(ap, ip+offset)); - ((PyArrayObject_fields *)ap)->flags = savedflags; + PyTuple_SET_ITEM(ret, i, PyArray_GETITEM(dummy_arr, ip+offset)); } - ((PyArrayObject_fields *)ap)->descr = descr; return ret; } @@ -739,11 +758,28 @@ VOID_getitem(void *input, void *vap) return NULL; } Py_INCREF(descr->subarray->base); + + /* + * NOTE: There is the possibility of recursive calls from the above + * field branch. These calls use a dummy arr for thread + * (and general) safety. However, we must set the base array, + * so if such a dummy array was passed (its type is NULL), + * we have walk its base until the initial array is found. + * + * TODO: This should be fixed, the next "generation" of GETITEM will + * probably need to pass in the original array (in addition + * to the dtype as a method). Alternatively, VOID dtypes + * could have special handling. + */ + PyObject *base = (PyObject *)ap; + while (Py_TYPE(base) == NULL) { + base = PyArray_BASE((PyArrayObject *)base); + } ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, PyArray_FLAGS(ap) & ~NPY_ARRAY_F_CONTIGUOUS, - NULL, (PyObject *)ap); + NULL, base); npy_free_cache_dim_obj(shape); return (PyObject *)ret; } @@ -761,7 +797,8 @@ NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *); * individual fields of a numpy structure, in VOID_setitem. Compare to inner * loops in VOID_getitem and VOID_nonzero. * - * WARNING: Clobbers arr's dtype and alignment flag. + * WARNING: Clobbers arr's dtype and alignment flag, should not be used + * on the original array! */ NPY_NO_EXPORT int _setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr, @@ -798,7 +835,7 @@ static int _copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata, PyArray_Descr *srcdescr, char *srcdata){ PyArrayObject_fields dummy_struct; - PyArrayObject *dummy = (PyArrayObject *)&dummy_struct; + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_struct; npy_int names_size = PyTuple_GET_SIZE(dstdescr->names); npy_intp offset; npy_int i; @@ -808,11 +845,11 @@ _copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata, if (PyArray_EquivTypes(srcdescr, dstdescr)) { for (i = 0; i < names_size; i++) { /* neither line can ever fail, in principle */ - if (_setup_field(i, dstdescr, dummy, &offset, dstdata)) { + if (_setup_field(i, dstdescr, dummy_arr, &offset, dstdata)) { return -1; } - PyArray_DESCR(dummy)->f->copyswap(dstdata + offset, - srcdata + offset, 0, dummy); + PyArray_DESCR(dummy_arr)->f->copyswap(dstdata + offset, + srcdata + offset, 0, dummy_arr); } return 0; } @@ -831,13 +868,10 @@ VOID_setitem(PyObject *op, void *input, void *vap) { char *ip = input; PyArrayObject *ap = vap; - PyArray_Descr *descr; - int flags; - int itemsize=PyArray_DESCR(ap)->elsize; + int itemsize = PyArray_DESCR(ap)->elsize; int res; + PyArray_Descr *descr = PyArray_DESCR(ap); - descr = PyArray_DESCR(ap); - flags = PyArray_FLAGS(ap); if (PyDataType_HASFIELDS(descr)) { PyObject *errmsg; npy_int i; @@ -874,11 +908,13 @@ VOID_setitem(PyObject *op, void *input, void *vap) return -1; } + PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; + for (i = 0; i < names_size; i++) { PyObject *item; - /* temporarily make ap have only this field */ - if (_setup_field(i, descr, ap, &offset, ip) == -1) { + if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { failed = 1; break; } @@ -888,7 +924,7 @@ VOID_setitem(PyObject *op, void *input, void *vap) break; } /* use setitem to set this field */ - if (PyArray_SETITEM(ap, ip + offset, item) < 0) { + if (PyArray_SETITEM(dummy_arr, ip + offset, item) < 0) { failed = 1; break; } @@ -898,24 +934,23 @@ VOID_setitem(PyObject *op, void *input, void *vap) /* Otherwise must be non-void scalar. Try to assign to each field */ npy_intp names_size = PyTuple_GET_SIZE(descr->names); + PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; + for (i = 0; i < names_size; i++) { /* temporarily make ap have only this field */ - if (_setup_field(i, descr, ap, &offset, ip) == -1) { + if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(ap, ip + offset, op) < 0) { + if (PyArray_SETITEM(dummy_arr, ip + offset, op) < 0) { failed = 1; break; } } } - /* reset clobbered attributes */ - ((PyArrayObject_fields *)(ap))->descr = descr; - ((PyArrayObject_fields *)(ap))->flags = flags; - if (failed) { return -1; } @@ -924,7 +959,6 @@ VOID_setitem(PyObject *op, void *input, void *vap) else if (PyDataType_HASSUBARRAY(descr)) { /* copy into an array of the same basic type */ PyArray_Dims shape = {NULL, -1}; - PyArrayObject *ret; if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { npy_free_cache_dim_obj(shape); PyErr_SetString(PyExc_ValueError, @@ -932,10 +966,15 @@ VOID_setitem(PyObject *op, void *input, void *vap) return -1; } Py_INCREF(descr->subarray->base); - ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( + /* + * Note we set no base object here, as to not rely on the input + * being a valid object for base setting. `ret` nevertheless does + * does not own its data, this is generally not good, but localized. + */ + PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, - PyArray_FLAGS(ap), NULL, (PyObject *)ap); + PyArray_FLAGS(ap), NULL, NULL); npy_free_cache_dim_obj(shape); if (!ret) { return -1; @@ -2287,6 +2326,7 @@ STRING_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, return; } + /* */ static void VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, @@ -2303,29 +2343,26 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, if (PyArray_HASFIELDS(arr)) { PyObject *key, *value; - Py_ssize_t pos = 0; + PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; + while (PyDict_Next(descr->fields, &pos, &key, &value)) { npy_intp offset; - PyArray_Descr * new; + PyArray_Descr *new; if (NPY_TITLE_KEY(key, value)) { continue; } if (_unpack_field(value, &new, &offset) < 0) { - ((PyArrayObject_fields *)arr)->descr = descr; return; } - /* - * TODO: temporarily modifying the array like this - * is bad coding style, should be changed. - */ - ((PyArrayObject_fields *)arr)->descr = new; + + dummy_fields.descr = new; new->f->copyswapn(dst+offset, dstride, (src != NULL ? src+offset : NULL), - sstride, n, swap, arr); + sstride, n, swap, dummy_arr); } - ((PyArrayObject_fields *)arr)->descr = descr; return; } if (PyDataType_HASSUBARRAY(descr)) { @@ -2351,11 +2388,6 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, } new = descr->subarray->base; - /* - * TODO: temporarily modifying the array like this - * is bad coding style, should be changed. - */ - ((PyArrayObject_fields *)arr)->descr = new; dstptr = dst; srcptr = src; subitemsize = new->elsize; @@ -2363,16 +2395,20 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, /* There cannot be any elements, so return */ return; } + + PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; + ((PyArrayObject_fields *)dummy_arr)->descr = new; + num = descr->elsize / subitemsize; for (i = 0; i < n; i++) { new->f->copyswapn(dstptr, subitemsize, srcptr, - subitemsize, num, swap, arr); + subitemsize, num, swap, dummy_arr); dstptr += dstride; if (srcptr) { srcptr += sstride; } } - ((PyArrayObject_fields *)arr)->descr = descr; return; } /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */ @@ -2396,26 +2432,24 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) PyObject *key, *value; Py_ssize_t pos = 0; + PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; + while (PyDict_Next(descr->fields, &pos, &key, &value)) { npy_intp offset; + PyArray_Descr * new; if (NPY_TITLE_KEY(key, value)) { continue; } if (_unpack_field(value, &new, &offset) < 0) { - ((PyArrayObject_fields *)arr)->descr = descr; return; } - /* - * TODO: temporarily modifying the array like this - * is bad coding style, should be changed. - */ - ((PyArrayObject_fields *)arr)->descr = new; + dummy_fields.descr = new; new->f->copyswap(dst+offset, (src != NULL ? src+offset : NULL), - swap, arr); + swap, dummy_arr); } - ((PyArrayObject_fields *)arr)->descr = descr; return; } if (PyDataType_HASSUBARRAY(descr)) { @@ -2439,20 +2473,19 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) } new = descr->subarray->base; - /* - * TODO: temporarily modifying the array like this - * is bad coding style, should be changed. - */ - ((PyArrayObject_fields *)arr)->descr = new; subitemsize = new->elsize; if (subitemsize == 0) { /* There cannot be any elements, so return */ return; } + + PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; + dummy_fields.descr = new; + num = descr->elsize / subitemsize; new->f->copyswapn(dst, subitemsize, src, - subitemsize, num, swap, arr); - ((PyArrayObject_fields *)arr)->descr = descr; + subitemsize, num, swap, dummy_arr); return; } /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */ @@ -2707,11 +2740,11 @@ VOID_nonzero (char *ip, PyArrayObject *ap) if (PyArray_HASFIELDS(ap)) { PyArray_Descr *descr; PyObject *key, *value; - int savedflags; Py_ssize_t pos = 0; + PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); + PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; descr = PyArray_DESCR(ap); - savedflags = PyArray_FLAGS(ap); while (PyDict_Next(descr->fields, &pos, &key, &value)) { PyArray_Descr * new; npy_intp offset; @@ -2722,12 +2755,8 @@ VOID_nonzero (char *ip, PyArrayObject *ap) PyErr_Clear(); continue; } - /* - * TODO: temporarily modifying the array like this - * is bad coding style, should be changed. - */ - ((PyArrayObject_fields *)ap)->descr = new; - ((PyArrayObject_fields *)ap)->flags = savedflags; + + dummy_fields.descr = new; if ((new->alignment > 1) && !__ALIGNED(ip + offset, new->alignment)) { PyArray_CLEARFLAGS(ap, NPY_ARRAY_ALIGNED); @@ -2735,13 +2764,11 @@ VOID_nonzero (char *ip, PyArrayObject *ap) else { PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED); } - if (new->f->nonzero(ip+offset, ap)) { + if (new->f->nonzero(ip+offset, dummy_arr)) { nonz = NPY_TRUE; break; } } - ((PyArrayObject_fields *)ap)->descr = descr; - ((PyArrayObject_fields *)ap)->flags = savedflags; return nonz; } len = PyArray_DESCR(ap)->elsize; From 317acdb6795dc575e949bab944fb81f70711dbcc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 2 Mar 2020 09:45:59 -0800 Subject: [PATCH 07/31] TST: Add test for nonzero and copyswapn (through advanced indexing) --- numpy/core/tests/test_indexing.py | 24 ++++++++++++++++++++++++ numpy/core/tests/test_numeric.py | 21 +++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index 73dbc429c89c..57b1f38272ed 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -563,6 +563,30 @@ def test_too_many_advanced_indices(self, index, num, original_ndim): with pytest.raises(IndexError): arr[(index,) * num] = 1. + def test_structured_advanced_indexing(self): + # Test that copyswap(n) used by integer array indexing is threadsafe + # for structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)] * 2) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0] + + rng = np.random.default_rng() + def func(arr): + indx = rng.integers(0, len(arr), size=6000, dtype=np.intp) + arr[indx] + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + class TestFieldIndexing: def test_scalar_return_type(self): diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index a6c20abd1c34..91aafe4d769f 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1517,6 +1517,27 @@ def __bool__(self): a = np.array([[ThrowsAfter(15)]]*10) assert_raises(ValueError, np.nonzero, a) + def test_structured_threadsafety(self): + # Nonzero (and some other functions) should be threadsafe for + # structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)]) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0] + def func(arr): + arr.nonzero() + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + class TestIndex: def test_boolean(self): From b946f0995a183273df5eec7efbb6c2a74bc058f5 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Sat, 20 Feb 2021 12:53:47 -0500 Subject: [PATCH 08/31] BUG: Segfault in nditer buffer dealloc for Object arrays --- numpy/core/src/multiarray/einsum.c.src | 4 +++- numpy/core/src/multiarray/nditer_api.c | 4 +++- numpy/core/src/umath/ufunc_object.c | 22 ++++++++++++++++------ numpy/core/tests/test_nditer.py | 13 +++++++++++++ 4 files changed, 35 insertions(+), 8 deletions(-) diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src index 6ad375f670a5..85806fab3612 100644 --- a/numpy/core/src/multiarray/einsum.c.src +++ b/numpy/core/src/multiarray/einsum.c.src @@ -1100,6 +1100,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, char **dataptr; npy_intp *stride; npy_intp *countptr; + int needs_api; NPY_BEGIN_THREADS_DEF; iternext = NpyIter_GetIterNext(iter, NULL); @@ -1110,12 +1111,13 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, dataptr = NpyIter_GetDataPtrArray(iter); stride = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); NPY_BEGIN_THREADS_NDITER(iter); NPY_EINSUM_DBG_PRINT("Einsum loop\n"); do { sop(nop, dataptr, stride, *countptr); - } while(iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; /* If the API was needed, it may have thrown an error */ diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 059f2c437b15..cf28b8a8a68f 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -2640,6 +2640,7 @@ npyiter_clear_buffers(NpyIter *iter) /* Cleanup any buffers with references */ char **buffers = NBF_BUFFERS(bufferdata); PyArray_Descr **dtypes = NIT_DTYPES(iter); + npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); for (int iop = 0; iop < nop; ++iop, ++buffers) { /* * We may want to find a better way to do this, on the other hand, @@ -2648,7 +2649,8 @@ npyiter_clear_buffers(NpyIter *iter) * a well defined state (either NULL or owning the reference). * Only we implement cleanup */ - if (!PyDataType_REFCHK(dtypes[iop])) { + if (!PyDataType_REFCHK(dtypes[iop]) || + !(op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER)) { continue; } if (*buffers == 0) { diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 1a035eb619e2..cd6e27a35c6b 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -1431,6 +1431,7 @@ iterator_loop(PyUFuncObject *ufunc, char **dataptr; npy_intp *stride; npy_intp *count_ptr; + int needs_api; PyArrayObject **op_it; npy_uint32 iter_flags; @@ -1525,6 +1526,7 @@ iterator_loop(PyUFuncObject *ufunc, dataptr = NpyIter_GetDataPtrArray(iter); stride = NpyIter_GetInnerStrideArray(iter); count_ptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); NPY_BEGIN_THREADS_NDITER(iter); @@ -1532,7 +1534,7 @@ iterator_loop(PyUFuncObject *ufunc, do { NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*count_ptr); innerloop(dataptr, count_ptr, stride, innerloopdata); - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; } @@ -1859,6 +1861,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc, dataptr = NpyIter_GetDataPtrArray(iter); strides = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); NPY_BEGIN_THREADS_NDITER(iter); @@ -1869,7 +1872,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc, innerloop(dataptr, strides, dataptr[nop], strides[nop], *countptr, innerloopdata); - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; @@ -2973,6 +2976,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, } dataptr = NpyIter_GetDataPtrArray(iter); count_ptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { NPY_BEGIN_THREADS_THRESHOLDED(total_problem_size); @@ -2980,7 +2984,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, do { inner_dimensions[0] = *count_ptr; innerloop(dataptr, inner_dimensions, inner_strides, innerloopdata); - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { NPY_END_THREADS; @@ -3520,6 +3524,10 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, innerloop(dataptrs_copy, &count, strides_copy, innerloopdata); + if (needs_api && PyErr_Occurred()) { + break; + } + /* Jump to the faster loop when skipping is done */ if (skip_first_count == 0) { if (iternext(iter)) { @@ -3569,7 +3577,7 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, n = 1; } } - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); finish_loop: NPY_END_THREADS; @@ -3882,6 +3890,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, goto fail; } dataptr = NpyIter_GetDataPtrArray(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); /* Execute the loop with just the outer iterator */ @@ -3932,7 +3941,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, innerloop(dataptr_copy, &count_m1, stride_copy, innerloopdata); } - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; } @@ -4263,6 +4272,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, npy_intp stride0_ind = PyArray_STRIDE(op[0], axis); int itemsize = op_dtypes[0]->elsize; + int needs_api = NpyIter_IterationNeedsAPI(iter); /* Get the variables needed for the loop */ iternext = NpyIter_GetIterNext(iter, NULL); @@ -4327,7 +4337,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, stride_copy, innerloopdata); } } - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; } diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index e10c7ad92db3..7e0c39966f35 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2724,6 +2724,19 @@ def test_0d_iter(): assert_equal(vals['c'], [[(0.5)]*3]*2) assert_equal(vals['d'], 0.5) +def test_object_iter_cleanup(): + # see gh-18450 + # object arrays can raise a python exception in ufunc inner loops using + # nditer, which should cause iteration to stop & cleanup. There were bugs + # in the nditer cleanup when decref'ing object arrays. + # This test would trigger valgrind "uninitialized read" before the bugfix. + assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None) + + # this more explicit code also triggers the invalid access + arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str) + oarr = arr.astype(object) + oarr[:, -1] = None + assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1])) def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due From c69bef642bcba6d184fe6e81bfb7c6d05d3b87c7 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 19:20:26 -0800 Subject: [PATCH 09/31] MAINT: Remove suspicious type casting --- numpy/core/src/multiarray/dtypemeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index 2931977c204e..c2c9f040f46f 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -162,7 +162,7 @@ void_discover_descr_from_pyobject( } if (PyBytes_Check(obj)) { PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_VOID); - Py_ssize_t itemsize = (int)PyBytes_Size(obj); + Py_ssize_t itemsize = PyBytes_Size(obj); if (itemsize > NPY_MAX_INT) { PyErr_SetString(PyExc_TypeError, "byte-like to large to store inside array."); From 0511a93cb6db4861ce7f324ab5785848e66273ab Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 20:45:16 -0800 Subject: [PATCH 10/31] MAINT: cast Py_ssize_t to int --- numpy/core/src/multiarray/dtypemeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index c2c9f040f46f..9e023bfb90bf 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -167,7 +167,7 @@ void_discover_descr_from_pyobject( PyErr_SetString(PyExc_TypeError, "byte-like to large to store inside array."); } - descr->elsize = itemsize; + descr->elsize = (int)itemsize; return descr; } PyErr_Format(PyExc_TypeError, From 242a0642d92212a3eccfe7f783c3a65fc2840f38 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 19:37:14 -0800 Subject: [PATCH 11/31] MAINT: remove nonsensical comparison of pointer < 0 --- numpy/core/src/multiarray/ctors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index ef105ff2d3f8..08c285708fe1 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -552,7 +552,7 @@ PyArray_AssignFromCache_Recursive( else { PyArrayObject *view; view = (PyArrayObject *)array_item_asarray(self, i); - if (view < 0) { + if (view == NULL) { goto fail; } if (PyArray_AssignFromCache_Recursive(view, ndim, cache) < 0) { From ede709c94eab0b706643d426447fd6c7c1be537a Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 19:44:59 -0800 Subject: [PATCH 12/31] MAINT: verify pointer against NULL before using it --- numpy/core/src/multiarray/dtype_transfer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index 630bd76f3955..9b93ecde43a2 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -1138,10 +1138,10 @@ get_datetime_to_unicode_transfer_function(int aligned, /* Get an ASCII string data type, adapted to match the UNICODE one */ str_dtype = PyArray_DescrNewFromType(NPY_STRING); - str_dtype->elsize = dst_dtype->elsize / 4; if (str_dtype == NULL) { return NPY_FAIL; } + str_dtype->elsize = dst_dtype->elsize / 4; /* Get the copy/swap operation to dst */ if (PyArray_GetDTypeCopySwapFn(aligned, From 2837ac3ef10f2c5839c6d5e5c6c6be52b6cf4552 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 20:08:28 -0800 Subject: [PATCH 13/31] BUG: check if PyArray_malloc succeeded --- numpy/core/src/umath/ufunc_object.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index cd6e27a35c6b..269b2e81ade5 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5219,7 +5219,11 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, if (cmp == 0 && current != NULL && current->arg_dtypes == NULL) { current->arg_dtypes = PyArray_malloc(ufunc->nargs * sizeof(PyArray_Descr*)); - if (arg_dtypes != NULL) { + if (current->arg_dtypes == NULL) { + PyErr_NoMemory(); + result = -1; + } + else if (arg_dtypes != NULL) { for (i = 0; i < ufunc->nargs; i++) { current->arg_dtypes[i] = arg_dtypes[i]; Py_INCREF(current->arg_dtypes[i]); From dfef2629722c5fd2b118b0d28cc6a3ca20c309f9 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Wed, 24 Feb 2021 06:57:11 -0800 Subject: [PATCH 14/31] Goto done when an error condition is reached --- numpy/core/src/umath/ufunc_object.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 269b2e81ade5..f30f31a2ee2f 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5222,6 +5222,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, if (current->arg_dtypes == NULL) { PyErr_NoMemory(); result = -1; + goto done; } else if (arg_dtypes != NULL) { for (i = 0; i < ufunc->nargs; i++) { From 33398cac9db2b5abade3beb8354ad606026f7b4d Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Tue, 2 Mar 2021 13:28:24 -0500 Subject: [PATCH 15/31] BUG: incorrect error fallthrough in nditer Fixup to gh-18450 --- numpy/core/src/umath/ufunc_object.c | 7 ++++++- numpy/core/tests/test_nditer.py | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index f30f31a2ee2f..653e0b5befd7 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -3525,7 +3525,7 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, strides_copy, innerloopdata); if (needs_api && PyErr_Occurred()) { - break; + goto finish_loop; } /* Jump to the faster loop when skipping is done */ @@ -3539,6 +3539,11 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, } } while (iternext(iter)); } + + if (needs_api && PyErr_Occurred()) { + goto finish_loop; + } + do { /* Turn the two items into three for the inner loop */ dataptrs_copy[0] = dataptrs[0]; diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 7e0c39966f35..cb6d77bd7226 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2738,6 +2738,14 @@ def test_object_iter_cleanup(): oarr[:, -1] = None assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1])) + # followup: this tests for a bug introduced in the first pass of gh-18450, + # caused by an incorrect fallthrough of the TypeError + class T: + def __bool__(self): + raise TypeError("Ambiguous") + assert_raises(TypeError, np.logical_or.reduce, + np.array([T(), T()], dtype='O')) + def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to From 89d254101afcee01f1cc4f3cc86dad0ae3039336 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 5 Mar 2021 08:26:30 -0700 Subject: [PATCH 16/31] CI: Pin docker image. The azure-pipeline test Linux_Python_38_32bit_full_with_asserts has been failing after the release of the latest manylinux2010. Pin the docker image to an earlier version to fix this. Closes #18553. --- azure-pipelines.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f8773dc36abc..45004f26c124 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -39,12 +39,11 @@ stages: - job: Linux_Python_38_32bit_full_with_asserts pool: - vmImage: 'ubuntu-18.04' + vmImage: 'ubuntu-20.04' steps: - script: | - docker pull quay.io/pypa/manylinux2010_i686 docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ - -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2010_i686 \ + -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2010_i686:2021-02-28-1f32361 \ /bin/bash -xc "cd numpy && \ /opt/python/cp38-cp38/bin/python -mvenv venv &&\ source venv/bin/activate && \ From 57cf41afaa961b8ce4eef678aae49f605022f7ac Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 2 Mar 2021 15:51:24 -0700 Subject: [PATCH 17/31] CI: Use Ubuntu 18.04 to run "full" test. NumPy does not build using the `--coverage` flag on Ubuntu 20.04, the problem seems to be gcc 9.3.0-17. Work around that by running on Ubuntu 18.04 instead. --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 1363d93276db..d6e81026838c 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -80,7 +80,7 @@ jobs: full: needs: smoke_test - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 env: USE_WHEEL: 1 RUN_FULL_TESTS: 1 From a21cbb12bebc287019e24aa53b1a8523d2cf197a Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Sat, 13 Mar 2021 10:03:28 +0100 Subject: [PATCH 18/31] MAINT: Add annotations for `dtype.__getitem__`, `__mul__` and `names` (#18599) --- numpy/__init__.pyi | 20 ++++++++++++++++++++ numpy/typing/tests/data/pass/dtype.py | 16 ++++++++++++++++ numpy/typing/tests/data/reveal/dtype.py | 19 +++++++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bf91aafdd3ed..d275ad27a45e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -862,6 +862,24 @@ class dtype(Generic[_DTypeScalar]): align: bool = ..., copy: bool = ..., ) -> dtype[void]: ... + + @overload + def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: Union[str, int]) -> dtype[Any]: ... + + # NOTE: In the future 1-based multiplications will also yield `void` dtypes + @overload + def __mul__(self, value: Literal[0]) -> None: ... # type: ignore[misc] + @overload + def __mul__(self, value: Literal[1]) -> dtype[_DTypeScalar]: ... + @overload + def __mul__(self, value: int) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.800. Set the return-type to `Any` for now. + def __rmul__(self, value: int) -> Any: ... + def __eq__(self, other: DTypeLike) -> bool: ... def __ne__(self, other: DTypeLike) -> bool: ... def __gt__(self, other: DTypeLike) -> bool: ... @@ -901,6 +919,8 @@ class dtype(Generic[_DTypeScalar]): @property def name(self) -> str: ... @property + def names(self) -> Optional[Tuple[str, ...]]: ... + @property def num(self) -> int: ... @property def shape(self) -> _Shape: ... diff --git a/numpy/typing/tests/data/pass/dtype.py b/numpy/typing/tests/data/pass/dtype.py index cbae8c078551..90715209eac2 100644 --- a/numpy/typing/tests/data/pass/dtype.py +++ b/numpy/typing/tests/data/pass/dtype.py @@ -1,5 +1,8 @@ import numpy as np +dtype_obj = np.dtype(np.str_) +void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)]) + np.dtype(dtype=np.int64) np.dtype(int) np.dtype("int") @@ -33,3 +36,16 @@ class Test: np.dtype(Test()) + +dtype_obj.names + +dtype_obj * 0 +dtype_obj * 2 + +0 * dtype_obj +2 * dtype_obj + +void_dtype_obj["f0"] +void_dtype_obj[0] +void_dtype_obj[["f0", "f1"]] +void_dtype_obj[["f0"]] diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.py index d414f2c4934f..83a7695b9bb5 100644 --- a/numpy/typing/tests/data/reveal/dtype.py +++ b/numpy/typing/tests/data/reveal/dtype.py @@ -1,5 +1,8 @@ import numpy as np +dtype_obj: np.dtype[np.str_] +void_dtype_obj: np.dtype[np.void] + reveal_type(np.dtype(np.float64)) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(np.dtype(np.int64)) # E: numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] @@ -31,3 +34,19 @@ # Void reveal_type(np.dtype(("U", 10))) # E: numpy.dtype[numpy.void] + +reveal_type(dtype_obj.name) # E: str +reveal_type(dtype_obj.names) # E: Union[builtins.tuple[builtins.str], None] + +reveal_type(dtype_obj * 0) # E: None +reveal_type(dtype_obj * 1) # E: numpy.dtype[numpy.str_] +reveal_type(dtype_obj * 2) # E: numpy.dtype[numpy.void] + +reveal_type(0 * dtype_obj) # E: Any +reveal_type(1 * dtype_obj) # E: Any +reveal_type(2 * dtype_obj) # E: Any + +reveal_type(void_dtype_obj["f0"]) # E: numpy.dtype[Any] +reveal_type(void_dtype_obj[0]) # E: numpy.dtype[Any] +reveal_type(void_dtype_obj[["f0", "f1"]]) # E: numpy.dtype[numpy.void] +reveal_type(void_dtype_obj[["f0"]]) # E: numpy.dtype[numpy.void] From 72b00877a6ce1eb35dea04f923d24dbec1f02046 Mon Sep 17 00:00:00 2001 From: Michael Lamparski Date: Wed, 3 Mar 2021 18:46:17 -0500 Subject: [PATCH 19/31] BUG: NameError in numpy.distutils.fcompiler.compaq Fix a simple mistake in commit da0497fdf35 which can produce a NameError when installing numpy in MinGW/MSYS2. --- numpy/distutils/fcompiler/compaq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py index 6ce590c7c821..1a356866a283 100644 --- a/numpy/distutils/fcompiler/compaq.py +++ b/numpy/distutils/fcompiler/compaq.py @@ -80,8 +80,8 @@ class CompaqVisualFCompiler(FCompiler): except DistutilsPlatformError: pass except AttributeError as e: - if '_MSVCCompiler__root' in str(msg): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) + if '_MSVCCompiler__root' in str(e): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) else: raise except IOError as e: From f7c52ab544fac0190c6444c7295dd31ca1057841 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Thu, 11 Mar 2021 23:59:16 +0100 Subject: [PATCH 20/31] BUG: Fixed ``where`` keyword for ``np.mean`` & ``np.var`` methods (gh-18560) * Fixed keyword bug * Added test case * Reverted to original notation * Added tests for var and std Closes gh-18552 --- numpy/core/_methods.py | 4 ++-- numpy/core/tests/test_multiarray.py | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index c730e2035f36..fc118326a572 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -164,7 +164,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): is_float16_result = False rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) - if rcount == 0 if where is True else umr_any(rcount == 0): + if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default @@ -197,7 +197,7 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) # Make this warning show up on top. - if ddof >= rcount if where is True else umr_any(ddof >= rcount): + if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None): warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index bd8c51ab78fd..3ce46c43f472 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -5713,6 +5713,15 @@ def test_mean_where(self): np.array(_res)) assert_allclose(np.mean(a, axis=_ax, where=_wh), np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[1.5, 5.5], [9.5, 13.5]] + assert_allclose(a3d.mean(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), + np.array(_res)) + with pytest.warns(RuntimeWarning) as w: assert_allclose(a.mean(axis=1, where=wh_partial), np.array([np.nan, 5.5, 9.5, np.nan])) @@ -5788,6 +5797,15 @@ def test_var_where(self): np.array(_res)) assert_allclose(np.var(a, axis=_ax, where=_wh), np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.25, 0.25], [0.25, 0.25]] + assert_allclose(a3d.var(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a3d, axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a, axis=1, where=wh_full), np.var(a[wh_full].reshape((5, 3)), axis=1)) assert_allclose(np.var(a, axis=0, where=wh_partial), @@ -5827,6 +5845,14 @@ def test_std_where(self): assert_allclose(a.std(axis=_ax, where=_wh), _res) assert_allclose(np.std(a, axis=_ax, where=_wh), _res) + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.5, 0.5], [0.5, 0.5]] + assert_allclose(a3d.std(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.std(a3d, axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(a.std(axis=1, where=whf), np.std(a[whf].reshape((5,3)), axis=1)) assert_allclose(np.std(a, axis=1, where=whf), From 2f5e65cc3fd07d60afc05913029f8286bbff7847 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 15 Mar 2021 07:44:40 -0600 Subject: [PATCH 21/31] CI: Update apt package list before Python install Closes #18613. --- azure-pipelines.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 45004f26c124..93e7071964f9 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -237,6 +237,7 @@ stages: vmImage: 'ubuntu-18.04' steps: - script: | + sudo apt update sudo apt install python3.7 sudo apt install python3.7-dev if ! `gcc-4.8 2>/dev/null`; then From 376ec468c46ac8f55e361e9b06a1a80c29f338c5 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 15 Mar 2021 14:50:43 +0100 Subject: [PATCH 22/31] API: Formally classify `np.lib.stride_tricks` as part of the public API With `as_strided`, and the newly introduced `sliding_window_view` function, there are currently 2 public objects that can: a. Only be imported from a private module b. Are publicly documented to-be imported from aforementioned module Both observations are problematic and in need of rectification. This commit therefore moves `np.lib.stride_tricks` to the `PUBLIC_MODULES` list. --- numpy/tests/test_public_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 1382e1c4b52a..55f548451443 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -154,6 +154,7 @@ def test_NPY_NO_EXPORT(): "lib.mixins", "lib.recfunctions", "lib.scimath", + "lib.stride_tricks", "linalg", "ma", "ma.extras", @@ -279,7 +280,6 @@ def test_NPY_NO_EXPORT(): "lib.npyio", "lib.polynomial", "lib.shape_base", - "lib.stride_tricks", "lib.twodim_base", "lib.type_check", "lib.ufunclike", From 9602fb2d4835541491ff041f786249150c6a5c10 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 15 Mar 2021 14:56:31 +0100 Subject: [PATCH 23/31] MAINT: Re-export a number of sub-modules Ensures that type checkers will allow the likes of: >>> import numpy as np >>> out = np.lib.stride_tricks.sliding_window_view(...) --- numpy/lib/__init__.pyi | 7 +++++++ numpy/ma/__init__.pyi | 2 ++ numpy/polynomial/__init__.pyi | 10 ++++++++++ numpy/typing/tests/data/fail/modules.py | 5 +++++ numpy/typing/tests/data/pass/modules.py | 13 +++++++++++++ numpy/typing/tests/data/reveal/modules.py | 13 +++++++++++++ 6 files changed, 50 insertions(+) diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index a8eb242074b6..984423984d13 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,5 +1,12 @@ from typing import Any, List +from numpy.lib import ( + format as format, + mixins as mixins, + scimath as scimath, + stride_tricks as stride_stricks, +) + __all__: List[str] emath: Any diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index 66dfe40de6a5..16e026272605 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -1,5 +1,7 @@ from typing import Any, List +from numpy.ma import extras as extras + __all__: List[str] core: Any diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 817ba22ac83f..5f4d11e9097a 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,5 +1,15 @@ from typing import Any +from numpy.polynomial import ( + chebyshev as chebyshev, + hermite as hermite, + hermite_e as hermite_e, + laguerre as laguerre, + legendre as legendre, + polynomial as polynomial, + polyutils as polyutils, +) + Polynomial: Any Chebyshev: Any Legendre: Any diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py index 5e2d820abc85..5770666bebc7 100644 --- a/numpy/typing/tests/data/fail/modules.py +++ b/numpy/typing/tests/data/fail/modules.py @@ -8,3 +8,8 @@ np.sys # E: Module has no attribute np.os # E: Module has no attribute np.math # E: Module has no attribute + +# Public sub-modules that are not imported to their parent module by default; +# e.g. one must first execute `import numpy.lib.recfunctions` +np.lib.recfunctions # E: Module has no attribute +np.ma.mrecords # E: Module has no attribute diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py index 8ae78e841fcb..238d209ca372 100644 --- a/numpy/typing/tests/data/pass/modules.py +++ b/numpy/typing/tests/data/pass/modules.py @@ -15,6 +15,19 @@ np.testing np.version +np.lib.format +np.lib.mixins +np.lib.scimath +np.lib.stride_tricks +np.ma.extras +np.polynomial.chebyshev +np.polynomial.hermite +np.polynomial.hermite_e +np.polynomial.laguerre +np.polynomial.legendre +np.polynomial.polynomial +np.polynomial.polyutils + np.__path__ np.__version__ diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py index bb7fb9ad4742..03b5f810c8b6 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.py @@ -17,6 +17,19 @@ reveal_type(np.testing) # E: ModuleType reveal_type(np.version) # E: ModuleType +reveal_type(np.lib.format) # E: ModuleType +reveal_type(np.lib.mixins) # E: ModuleType +reveal_type(np.lib.scimath) # E: ModuleType +reveal_type(np.lib.stride_tricks) # E: ModuleType +reveal_type(np.ma.extras) # E: ModuleType +reveal_type(np.polynomial.chebyshev) # E: ModuleType +reveal_type(np.polynomial.hermite) # E: ModuleType +reveal_type(np.polynomial.hermite_e) # E: ModuleType +reveal_type(np.polynomial.laguerre) # E: ModuleType +reveal_type(np.polynomial.legendre) # E: ModuleType +reveal_type(np.polynomial.polynomial) # E: ModuleType +reveal_type(np.polynomial.polyutils) # E: ModuleType + # TODO: Remove when annotations have been added to `np.testing.assert_equal` reveal_type(np.testing.assert_equal) # E: Any From 6b0490ce71d438d91303931f4d4a0a607d0bcd76 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 16 Mar 2021 17:14:39 +0100 Subject: [PATCH 24/31] API: Move `polynomial.polyutils` to the `PRIVATE_BUT_PRESENT_MODULES` list Aforementioned module was accidently marked as public --- numpy/polynomial/__init__.pyi | 1 - numpy/tests/test_public_api.py | 2 +- numpy/typing/tests/data/pass/modules.py | 1 - numpy/typing/tests/data/reveal/modules.py | 1 - 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 5f4d11e9097a..755f7521bfb2 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -7,7 +7,6 @@ from numpy.polynomial import ( laguerre as laguerre, legendre as legendre, polynomial as polynomial, - polyutils as polyutils, ) Polynomial: Any diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 55f548451443..4df57ce98fc0 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -167,7 +167,6 @@ def test_NPY_NO_EXPORT(): "polynomial.laguerre", "polynomial.legendre", "polynomial.polynomial", - "polynomial.polyutils", "random", "testing", "typing", @@ -293,6 +292,7 @@ def test_NPY_NO_EXPORT(): "ma.timer_comparison", "matrixlib", "matrixlib.defmatrix", + "polynomial.polyutils", "random.mtrand", "random.bit_generator", "testing.print_coercion_tables", diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py index 238d209ca372..f2d779e20e63 100644 --- a/numpy/typing/tests/data/pass/modules.py +++ b/numpy/typing/tests/data/pass/modules.py @@ -26,7 +26,6 @@ np.polynomial.laguerre np.polynomial.legendre np.polynomial.polynomial -np.polynomial.polyutils np.__path__ np.__version__ diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py index 03b5f810c8b6..00fca3eec580 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.py @@ -28,7 +28,6 @@ reveal_type(np.polynomial.laguerre) # E: ModuleType reveal_type(np.polynomial.legendre) # E: ModuleType reveal_type(np.polynomial.polynomial) # E: ModuleType -reveal_type(np.polynomial.polyutils) # E: ModuleType # TODO: Remove when annotations have been added to `np.testing.assert_equal` reveal_type(np.testing.assert_equal) # E: Any From 75e13a693ca86efe3e00708d82eae060d5f2953f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 12 Mar 2021 11:16:19 -0600 Subject: [PATCH 25/31] BUG: Fix ma coercion list-of-ma-arrays if they do not cast to bool There was a regression here due to force casting to bool, but if that happens to fail (it does, but should not for strings). The mask would just be dropped. Of course masked arrays are held together by faith here, but its a regression. Closes gh-18551 --- numpy/ma/core.py | 5 +++-- numpy/ma/tests/test_core.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index bdf7f0d8c26b..492070b62f4d 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2859,8 +2859,9 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, elif isinstance(data, (tuple, list)): try: # If data is a sequence of masked array - mask = np.array([getmaskarray(np.asanyarray(m, dtype=mdtype)) - for m in data], dtype=mdtype) + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) except ValueError: # If data is nested mask = nomask diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index f4078062521f..e1cc47063261 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -238,6 +238,25 @@ def test_creation_with_list_of_maskedarrays(self): assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) assert_(data.mask is nomask) + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + # Te above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool(): + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + def test_creation_from_ndarray_with_padding(self): x = np.array([('A', 0)], dtype={'names':['f0','f1'], 'formats':['S4','i8'], From 9719ac578c26c00f1305fd781a000cbf915da9c2 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 12 Mar 2021 11:46:05 -0600 Subject: [PATCH 26/31] Apply suggestions from code review --- numpy/ma/tests/test_core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e1cc47063261..ad395f169dd3 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -244,7 +244,8 @@ def test_creation_with_list_of_maskedarrays_no_bool_cast(self): normal_int = np.arange(2) res = np.ma.asarray([masked_str, normal_int]) assert_array_equal(res.mask, [[True, False], [False, False]]) - # Te above only failed due a long chain of oddity, try also with + + # The above only failed due a long chain of oddity, try also with # an object array that cannot be converted to bool always: class NotBool(): def __bool__(self): From fb44ee2a4bf4914a235df3600186c8610d267fcc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 13 Mar 2021 14:39:45 -0600 Subject: [PATCH 27/31] Update numpy/ma/tests/test_core.py --- numpy/ma/tests/test_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index ad395f169dd3..9bfb82d1ff1d 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -242,7 +242,7 @@ def test_creation_with_list_of_maskedarrays_no_bool_cast(self): # Tests the regression in gh-18551 masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) normal_int = np.arange(2) - res = np.ma.asarray([masked_str, normal_int]) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") assert_array_equal(res.mask, [[True, False], [False, False]]) # The above only failed due a long chain of oddity, try also with From b9edc252eb52cbb26f65ab67b4dbca3eccc1db65 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 19 Mar 2021 23:20:57 -0500 Subject: [PATCH 28/31] BUG: Fix small valgrind-found issues (#18651) * BUG: Fix small valgrind-found issues This should be backportable. There was at least one that I could not reproduce when running the tests again. And the new random-shuffle tests give false-positives (which is just slightly annoying, considering that we are very close to almost only "longdouble" related false-positives) * BUG: Add missing decref in user-dtype fallback paths The missing decref here only leaks references and can never leak actual memory fortunately. * MAINT,TST: Simplify the "refcount logic" in the dispatch tests again Using SETREF can be nice, but was just overcomplicating thing here... --- numpy/core/src/multiarray/array_coercion.c | 1 + numpy/core/src/multiarray/buffer.c | 1 + numpy/core/src/multiarray/legacy_dtype_implementation.c | 2 ++ numpy/core/src/umath/_umath_tests.c.src | 1 + numpy/core/tests/test_multiarray.py | 2 +- 5 files changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 1eac401bc2f3..7aa1288c77ee 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -745,6 +745,7 @@ find_descriptor_from_array( NULL, DType, &flags, item_DType) < 0) { Py_DECREF(iter); Py_DECREF(elem); + Py_XDECREF(*out_descr); Py_XDECREF(item_DType); return -1; } diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index 813850224714..5458c81cccec 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -878,6 +878,7 @@ void_getbuffer(PyObject *self, Py_buffer *view, int flags) */ _buffer_info_t *info = _buffer_get_info(&scalar->_buffer_info, self, flags); if (info == NULL) { + Py_DECREF(self); return -1; } view->format = info->format; diff --git a/numpy/core/src/multiarray/legacy_dtype_implementation.c b/numpy/core/src/multiarray/legacy_dtype_implementation.c index 3ce4710fddb4..d2e95348dd70 100644 --- a/numpy/core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/core/src/multiarray/legacy_dtype_implementation.c @@ -161,10 +161,12 @@ PyArray_LegacyCanCastSafely(int fromtype, int totype) while (*curtype != NPY_NOTYPE) { if (*curtype++ == totype) { + Py_DECREF(from); return 1; } } } + Py_DECREF(from); return 0; } diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index 750fbeb92a7b..4e250e43b650 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -614,6 +614,7 @@ UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dumm if (item == NULL || PyDict_SetItemString(dict, "@str@", item) < 0) { goto err; } + Py_DECREF(item); /**end repeat**/ item = PyList_New(0); if (item == NULL || PyDict_SetItemString(dict, "all", item) < 0) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 3ce46c43f472..6335a471c8ff 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -7479,7 +7479,7 @@ def test_out_of_order_fields(self): memoryview(arr) def test_max_dims(self): - a = np.empty((1,) * 32) + a = np.ones((1,) * 32) self._check_roundtrip(a) @pytest.mark.slow From afc861e4f3fbc653a3094fb50d6309b7816c7adf Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 14:11:52 -0500 Subject: [PATCH 29/31] BUG: Fix small issues found with pytest-leaks None of these are particularly worrying as they either usually only leak reference (and not memory) or appear in rare or almost impossible error-paths, or are limited to the tests. Unfortunately, this PR will not apply to 1.20.x, due to small changes in the overrides. Backport of gh-18670 (with two small differences) --- .../src/multiarray/arrayfunction_override.c | 31 ++++++++++++------- numpy/core/src/multiarray/convert_datatype.c | 3 +- numpy/core/src/multiarray/multiarraymodule.c | 1 + numpy/core/src/umath/_umath_tests.c.src | 1 + numpy/core/src/umath/_umath_tests.dispatch.c | 1 + numpy/core/src/umath/ufunc_object.c | 4 ++- numpy/core/tests/test_overrides.py | 26 +++++++++------- 7 files changed, 41 insertions(+), 26 deletions(-) diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c index 2c07cdebc625..a7aa3c49449e 100644 --- a/numpy/core/src/multiarray/arrayfunction_override.c +++ b/numpy/core/src/multiarray/arrayfunction_override.c @@ -341,18 +341,23 @@ array_implement_array_function( return NULL; } - /* Remove `like=` kwarg, which is NumPy-exclusive and thus not present + /* + * Remove `like=` kwarg, which is NumPy-exclusive and thus not present * in downstream libraries. If `like=` is specified but doesn't * implement `__array_function__`, raise a `TypeError`. */ if (kwargs != NULL && PyDict_Contains(kwargs, npy_ma_str_like)) { PyObject *like_arg = PyDict_GetItem(kwargs, npy_ma_str_like); - if (like_arg && !get_array_function(like_arg)) { - return PyErr_Format(PyExc_TypeError, - "The `like` argument must be an array-like that implements " - "the `__array_function__` protocol."); + if (like_arg != NULL) { + PyObject *tmp_has_override = get_array_function(like_arg); + if (tmp_has_override == NULL) { + return PyErr_Format(PyExc_TypeError, + "The `like` argument must be an array-like that " + "implements the `__array_function__` protocol."); + } + Py_DECREF(tmp_has_override); + PyDict_DelItem(kwargs, npy_ma_str_like); } - PyDict_DelItem(kwargs, npy_ma_str_like); } PyObject *res = array_implement_array_function_internal( @@ -387,14 +392,18 @@ array_implement_c_array_function_creation( return Py_NotImplemented; } - PyObject *like_arg = PyDict_GetItem(kwargs, npy_ma_str_like); + PyObject *like_arg = PyDict_GetItemWithError(kwargs, npy_ma_str_like); if (like_arg == NULL) { return NULL; } - else if (!get_array_function(like_arg)) { - return PyErr_Format(PyExc_TypeError, - "The `like` argument must be an array-like that implements " - "the `__array_function__` protocol."); + else { + PyObject *tmp_has_override = get_array_function(like_arg); + if (tmp_has_override == NULL) { + return PyErr_Format(PyExc_TypeError, + "The `like` argument must be an array-like that " + "implements the `__array_function__` protocol."); + } + Py_DECREF(tmp_has_override); } PyObject *relevant_args = PyTuple_Pack(1, like_arg); PyDict_DelItem(kwargs, npy_ma_str_like); diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 5d5b69bd5c5b..77657b260c33 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -902,8 +902,7 @@ PyArray_FindConcatenationDescriptor( "The dtype `%R` is not a valid dtype for concatenation " "since it is a subarray dtype (the subarray dimensions " "would be added as array dimensions).", result); - Py_DECREF(result); - return NULL; + Py_SETREF(result, NULL); } goto finish; } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 66f71de5179e..f88a41749817 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2100,6 +2100,7 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds array_function_result = array_implement_c_array_function_creation( "fromstring", args, keywds); if (array_function_result != Py_NotImplemented) { + Py_XDECREF(descr); return array_function_result; } diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index 4e250e43b650..7cc74a4f35ed 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -621,6 +621,7 @@ UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dumm goto err; } NPY_CPU_DISPATCH_CALL_ALL(_umath_tests_dispatch_attach, (item)); + Py_SETREF(item, NULL); if (PyErr_Occurred()) { goto err; } diff --git a/numpy/core/src/umath/_umath_tests.dispatch.c b/numpy/core/src/umath/_umath_tests.dispatch.c index d86a54411367..85f3650106ea 100644 --- a/numpy/core/src/umath/_umath_tests.dispatch.c +++ b/numpy/core/src/umath/_umath_tests.dispatch.c @@ -29,5 +29,6 @@ void NPY_CPU_DISPATCH_CURFX(_umath_tests_dispatch_attach)(PyObject *list) PyObject *item = PyUnicode_FromString(NPY_TOSTRING(NPY_CPU_DISPATCH_CURFX(func))); if (item) { PyList_Append(list, item); + Py_DECREF(item); } } diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 653e0b5befd7..d70d15c50301 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5495,6 +5495,7 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { + Py_DECREF(tmp); return NULL; } ap1 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0); @@ -5514,6 +5515,7 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { + Py_DECREF(tmp); Py_DECREF(ap1); return NULL; } @@ -5538,7 +5540,7 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) "maximum supported dimension for an ndarray is %d, but " "`%s.outer()` result would have %d.", NPY_MAXDIMS, ufunc->name, newdims.len); - return NPY_FAIL; + goto fail; } if (newdims.ptr == NULL) { goto fail; diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py index 6862fca03826..94e39f98c716 100644 --- a/numpy/core/tests/test_overrides.py +++ b/numpy/core/tests/test_overrides.py @@ -1,5 +1,6 @@ import inspect import sys +import os import tempfile from io import StringIO from unittest import mock @@ -558,18 +559,19 @@ def test_array_like_fromfile(self, numpy_ref): data = np.random.random(5) - fname = tempfile.mkstemp()[1] - data.tofile(fname) - - array_like = np.fromfile(fname, like=ref) - if numpy_ref is True: - assert type(array_like) is np.ndarray - np_res = np.fromfile(fname, like=ref) - assert_equal(np_res, data) - assert_equal(array_like, np_res) - else: - assert type(array_like) is self.MyArray - assert array_like.function is self.MyArray.fromfile + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, "testfile") + data.tofile(fname) + + array_like = np.fromfile(fname, like=ref) + if numpy_ref is True: + assert type(array_like) is np.ndarray + np_res = np.fromfile(fname, like=ref) + assert_equal(np_res, data) + assert_equal(array_like, np_res) + else: + assert type(array_like) is self.MyArray + assert array_like.function is self.MyArray.fromfile @requires_array_function def test_exception_handling(self): From 5dc057cffd38cff1b2a53e3a1081ebd4e49a6f81 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 21 Mar 2021 16:15:31 -0600 Subject: [PATCH 30/31] REL: Prepare for the NumPy 1.20.2 release. - Create the 1.20.2-changelog - Update the 1.20.2-notes --- doc/changelog/1.20.2-changelog.rst | 40 +++++++++++++++++ doc/source/release/1.20.2-notes.rst | 66 +++++++++++++++-------------- 2 files changed, 75 insertions(+), 31 deletions(-) create mode 100644 doc/changelog/1.20.2-changelog.rst diff --git a/doc/changelog/1.20.2-changelog.rst b/doc/changelog/1.20.2-changelog.rst new file mode 100644 index 000000000000..831cf03324de --- /dev/null +++ b/doc/changelog/1.20.2-changelog.rst @@ -0,0 +1,40 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Bas van Beek +* Charles Harris +* Christoph Gohlke +* Mateusz Sokół + +* Michael Lamparski +* Sebastian Berg + +Pull requests merged +==================== + +A total of 20 pull requests were merged for this release. + +* `#18382 `__: MAINT: Update f2py from master. +* `#18459 `__: BUG: ``diagflat`` could overflow on windows or 32-bit platforms +* `#18460 `__: BUG: Fix refcount leak in f2py ``complex_double_from_pyobj``. +* `#18461 `__: BUG: Fix tiny memory leaks when ``like=`` overrides are used +* `#18462 `__: BUG: Remove temporary change of descr/flags in VOID functions +* `#18469 `__: BUG: Segfault in nditer buffer dealloc for Object arrays +* `#18485 `__: BUG: Remove suspicious type casting +* `#18486 `__: BUG: remove nonsensical comparison of pointer < 0 +* `#18487 `__: BUG: verify pointer against NULL before using it +* `#18488 `__: BUG: check if PyArray_malloc succeeded +* `#18546 `__: BUG: incorrect error fallthrough in nditer +* `#18559 `__: CI: Backport CI fixes from main. +* `#18599 `__: MAINT: Add annotations for ``dtype.__getitem__``, ``__mul__`` and... +* `#18611 `__: BUG: NameError in numpy.distutils.fcompiler.compaq +* `#18612 `__: BUG: Fixed ``where`` keyword for ``np.mean`` & ``np.var`` methods +* `#18617 `__: CI: Update apt package list before Python install +* `#18636 `__: MAINT: Ensure that re-exported sub-modules are properly annotated +* `#18638 `__: BUG: Fix ma coercion list-of-ma-arrays if they do not cast to... +* `#18661 `__: BUG: Fix small valgrind-found issues +* `#18671 `__: BUG: Fix small issues found with pytest-leaks diff --git a/doc/source/release/1.20.2-notes.rst b/doc/source/release/1.20.2-notes.rst index e863de56f214..cdf45b65ea26 100644 --- a/doc/source/release/1.20.2-notes.rst +++ b/doc/source/release/1.20.2-notes.rst @@ -4,42 +4,46 @@ NumPy 1.20.2 Release Notes ========================== +NumPy 1,20.2 is a bugfix release containing several fixes merged to the main +branch after the NumPy 1.20.1 release. -Highlights -========== - -New functions -============= - - -Deprecations +Contributors ============ +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. -Future Changes -============== - +* Allan Haldane +* Bas van Beek +* Charles Harris +* Christoph Gohlke +* Mateusz Sokół + +* Michael Lamparski +* Sebastian Berg -Expired deprecations +Pull requests merged ==================== - -Compatibility notes -=================== - - -C API changes -============= - - -New Features -============ - - -Improvements -============ - - -Changes -======= +A total of 20 pull requests were merged for this release. + +* `#18382 `__: MAINT: Update f2py from master. +* `#18459 `__: BUG: ``diagflat`` could overflow on windows or 32-bit platforms +* `#18460 `__: BUG: Fix refcount leak in f2py ``complex_double_from_pyobj``. +* `#18461 `__: BUG: Fix tiny memory leaks when ``like=`` overrides are used +* `#18462 `__: BUG: Remove temporary change of descr/flags in VOID functions +* `#18469 `__: BUG: Segfault in nditer buffer dealloc for Object arrays +* `#18485 `__: BUG: Remove suspicious type casting +* `#18486 `__: BUG: remove nonsensical comparison of pointer < 0 +* `#18487 `__: BUG: verify pointer against NULL before using it +* `#18488 `__: BUG: check if PyArray_malloc succeeded +* `#18546 `__: BUG: incorrect error fallthrough in nditer +* `#18559 `__: CI: Backport CI fixes from main. +* `#18599 `__: MAINT: Add annotations for `dtype.__getitem__`, `__mul__` and... +* `#18611 `__: BUG: NameError in numpy.distutils.fcompiler.compaq +* `#18612 `__: BUG: Fixed ``where`` keyword for ``np.mean`` & ``np.var`` methods +* `#18617 `__: CI: Update apt package list before Python install +* `#18636 `__: MAINT: Ensure that re-exported sub-modules are properly annotated +* `#18638 `__: BUG: Fix ma coercion list-of-ma-arrays if they do not cast to... +* `#18661 `__: BUG: Fix small valgrind-found issues +* `#18671 `__: BUG: Fix small issues found with pytest-leaks From b19ad5bfa396a4600a52a598a30a65d4e993f831 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 27 Mar 2021 12:45:51 -0600 Subject: [PATCH 31/31] REL: NumPy 1.20.2 release. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 77a9b82bb68f..1206bcc6a995 100755 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ MAJOR = 1 MINOR = 20 MICRO = 2 -ISRELEASED = False +ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) # The first version not in the `Programming Language :: Python :: ...` classifiers above