From 1cfefdf05c5f4b47cc101df46690e5841d0a3c78 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 22 Jun 2021 07:37:35 -0600 Subject: [PATCH 01/40] REL: prepare 1.21.x for further development --- doc/source/release/1.21.1-notes.rst | 45 +++++++++++++++++++++++++++++ pavement.py | 2 +- 2 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 doc/source/release/1.21.1-notes.rst diff --git a/doc/source/release/1.21.1-notes.rst b/doc/source/release/1.21.1-notes.rst new file mode 100644 index 000000000000..43940c950a54 --- /dev/null +++ b/doc/source/release/1.21.1-notes.rst @@ -0,0 +1,45 @@ +:orphan: + +========================== +NumPy 1.21.1 Release Notes +========================== + + +Highlights +========== + + +New functions +============= + + +Deprecations +============ + + +Future Changes +============== + + +Expired deprecations +==================== + + +Compatibility notes +=================== + + +C API changes +============= + + +New Features +============ + + +Improvements +============ + + +Changes +======= diff --git a/pavement.py b/pavement.py index 66c2cf953eaf..c7ee86839e4f 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/1.21.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/1.21.1-notes.rst' #------------------------------------------------------- From 4ebcc2bd4834b9fbb1ecb5b4fbdaa50f09af2620 Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Wed, 23 Jun 2021 15:11:52 +0200 Subject: [PATCH 02/40] REV,BUG: Replace `NotImplemented` with `typing.Any` --- numpy/typing/__init__.py | 14 ++++++------- numpy/typing/_extended_precision.py | 26 ++++++++++++------------ numpy/typing/_shape.py | 4 ++-- numpy/typing/tests/test_generic_alias.py | 4 ++-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 1bfdf07ae74e..8e758b26cb6f 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -161,7 +161,7 @@ # NOTE: The API section will be appended with additional entries # further down in this file -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING, List, Any if TYPE_CHECKING: import sys @@ -364,14 +364,14 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _GUFunc_Nin2_Nout1, ) else: - _UFunc_Nin1_Nout1 = NotImplemented - _UFunc_Nin2_Nout1 = NotImplemented - _UFunc_Nin1_Nout2 = NotImplemented - _UFunc_Nin2_Nout2 = NotImplemented - _GUFunc_Nin2_Nout1 = NotImplemented + _UFunc_Nin1_Nout1 = Any + _UFunc_Nin2_Nout1 = Any + _UFunc_Nin1_Nout2 = Any + _UFunc_Nin2_Nout2 = Any + _GUFunc_Nin2_Nout1 = Any # Clean up the namespace -del TYPE_CHECKING, final, List +del TYPE_CHECKING, final, List, Any if __doc__ is not None: from ._add_docstring import _docstrings diff --git a/numpy/typing/_extended_precision.py b/numpy/typing/_extended_precision.py index 3f1ce2038282..bad20b048e9f 100644 --- a/numpy/typing/_extended_precision.py +++ b/numpy/typing/_extended_precision.py @@ -4,7 +4,7 @@ that they can be imported conditionally via the numpy's mypy plugin. """ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any import numpy as np from . import ( @@ -28,15 +28,15 @@ complex256 = np.complexfloating[_128Bit, _128Bit] complex512 = np.complexfloating[_256Bit, _256Bit] else: - uint128 = NotImplemented - uint256 = NotImplemented - int128 = NotImplemented - int256 = NotImplemented - float80 = NotImplemented - float96 = NotImplemented - float128 = NotImplemented - float256 = NotImplemented - complex160 = NotImplemented - complex192 = NotImplemented - complex256 = NotImplemented - complex512 = NotImplemented + uint128 = Any + uint256 = Any + int128 = Any + int256 = Any + float80 = Any + float96 = Any + float128 = Any + float256 = Any + complex160 = Any + complex192 = Any + complex256 = Any + complex512 = Any diff --git a/numpy/typing/_shape.py b/numpy/typing/_shape.py index b720c3ffc192..cac121026407 100644 --- a/numpy/typing/_shape.py +++ b/numpy/typing/_shape.py @@ -1,5 +1,5 @@ import sys -from typing import Sequence, Tuple, Union +from typing import Sequence, Tuple, Union, Any if sys.version_info >= (3, 8): from typing import SupportsIndex @@ -7,7 +7,7 @@ try: from typing_extensions import SupportsIndex except ImportError: - SupportsIndex = NotImplemented + SupportsIndex = Any _Shape = Tuple[int, ...] diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py index 0b99174392f0..8cbdd2e6e073 100644 --- a/numpy/typing/tests/test_generic_alias.py +++ b/numpy/typing/tests/test_generic_alias.py @@ -21,8 +21,8 @@ NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref)) FuncType = Callable[[Union[_GenericAlias, types.GenericAlias]], Any] else: - DType_ref = NotImplemented - NDArray_ref = NotImplemented + DType_ref = Any + NDArray_ref = Any FuncType = Callable[[_GenericAlias], Any] GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS) From cd3391295dd7ea5073291c0a4e365c46a0511b3c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 19 Jun 2021 10:57:18 +0200 Subject: [PATCH 03/40] MAINT: Fixed an issue with the return-dtype of `ndarray.real` and `imag The latter two would previously return complex arrays if the initial array was also complex --- numpy/__init__.pyi | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4ec46aea01a4..49c600d198fa 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1645,6 +1645,14 @@ _ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]] class _SupportsItem(Protocol[_T_co]): def item(self, __args: Any) -> _T_co: ... +class _SupportsReal(Protocol[_T_co]): + @property + def real(self) -> _T_co: ... + +class _SupportsImag(Protocol[_T_co]): + @property + def imag(self) -> _T_co: ... + class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def base(self) -> Optional[ndarray]: ... @@ -1653,11 +1661,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def size(self) -> int: ... @property - def real(self: _ArraySelf) -> _ArraySelf: ... + def real( + self: NDArray[_SupportsReal[_ScalarType]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ... @real.setter def real(self, value: ArrayLike) -> None: ... @property - def imag(self: _ArraySelf) -> _ArraySelf: ... + def imag( + self: NDArray[_SupportsImag[_ScalarType]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ... @imag.setter def imag(self, value: ArrayLike) -> None: ... def __new__( From 7d397c28608e411a5ebbdf9327d08202079b342b Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Thu, 24 Jun 2021 17:47:52 +0200 Subject: [PATCH 04/40] MAINT: Replace `"dtype[Any]"` with `dtype` in the definiton of `npt.ArrayLike` Strings and types that are not subscriptable during runtime can cause issues with runtime Introspection helpers such as `typing.get_type_hints`. While this is very much an upstream issue, the particular case of `npt.ArrayLike` can be quite easily resolved in numpy. --- numpy/typing/_array_like.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 9f57b22956cc..cfd9aacb4927 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -77,7 +77,7 @@ def __array__(self) -> ndarray[Any, _DType_co]: ... ArrayLike = Union[ _RecursiveSequence, _ArrayLike[ - "dtype[Any]", + dtype, Union[bool, int, float, complex, str, bytes] ], ] From 90e7ff12d24f7b46f9047e6e0527d56643af2c96 Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Thu, 24 Jun 2021 17:49:15 +0200 Subject: [PATCH 05/40] TST: Test that the `numpy.typing` public API works in combination with runtime Introspection helpers * `typing.get_type_hints` * `typing.get_args` * `typing.get_origin` --- numpy/typing/tests/test_runtime.py | 90 ++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 numpy/typing/tests/test_runtime.py diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py new file mode 100644 index 000000000000..e82b08ac26a0 --- /dev/null +++ b/numpy/typing/tests/test_runtime.py @@ -0,0 +1,90 @@ +"""Test the runtime usage of `numpy.typing`.""" + +from __future__ import annotations + +import sys +from typing import get_type_hints, Union, Tuple, NamedTuple + +import pytest +import numpy as np +import numpy.typing as npt + +try: + from typing_extensions import get_args, get_origin + SKIP = False +except ImportError: + SKIP = True + + +class TypeTup(NamedTuple): + typ: type + args: Tuple[type, ...] + origin: None | type + + +if sys.version_info >= (3, 9): + NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) +else: + NDArrayTup = TypeTup(npt.NDArray, (), None) + +TYPES = { + "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), + "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), + "NBitBase": TypeTup(npt.NBitBase, (), None), + "NDArray": NDArrayTup, +} + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +@pytest.mark.skipif(SKIP, reason="requires typing-extensions") +def test_get_args(name: type, tup: TypeTup) -> None: + """Test `typing.get_args`.""" + typ, ref = tup.typ, tup.args + out = get_args(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +@pytest.mark.skipif(SKIP, reason="requires typing-extensions") +def test_get_origin(name: type, tup: TypeTup) -> None: + """Test `typing.get_origin`.""" + typ, ref = tup.typ, tup.origin + out = get_origin(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints`.""" + typ = tup.typ + + # Explicitly set `__annotations__` in order to circumvent the + # stringification performed by `from __future__ import annotations` + def func(a): pass + func.__annotations__ = {"a": typ, "return": None} + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints_str(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints` with string-representation of types.""" + typ_str, typ = f"npt.{name}", tup.typ + + # Explicitly set `__annotations__` in order to circumvent the + # stringification performed by `from __future__ import annotations` + def func(a): pass + func.__annotations__ = {"a": typ_str, "return": None} + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +def test_keys() -> None: + """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced.""" + keys = TYPES.keys() + ref = set(npt.__all__) + assert keys == ref From c433e54668d2cfc0b4076b3e049cf7badb83704d Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Thu, 24 Jun 2021 22:40:37 +0200 Subject: [PATCH 06/40] MAINT: Add a subscriptable runtime-only placeholder for `_SupportsDType` --- numpy/typing/_dtype_like.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py index a41e2f358d97..636e2209b45f 100644 --- a/numpy/typing/_dtype_like.py +++ b/numpy/typing/_dtype_like.py @@ -1,5 +1,15 @@ import sys -from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, TYPE_CHECKING +from typing import ( + Any, + List, + Sequence, + Tuple, + Union, + Type, + TypeVar, + Generic, + TYPE_CHECKING, +) import numpy as np from ._shape import _ShapeLike @@ -81,7 +91,9 @@ def dtype(self) -> _DType_co: ... else: _DTypeDict = Any - _SupportsDType = Any + + class _SupportsDType(Generic[_DType_co]): + pass # Would create a dtype[np.void] @@ -112,7 +124,7 @@ def dtype(self) -> _DType_co: ... # array-scalar types and generic types type, # TODO: enumerate these when we add type hints for numpy scalars # anything with a dtype attribute - "_SupportsDType[np.dtype[Any]]", + _SupportsDType[np.dtype], # character codes, type strings or comma-separated fields, e.g., 'float64' str, _VoidDTypeLike, From 937c4943f14954f7a9c260df1011f7773a56b905 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 22 Jun 2021 18:55:20 -0600 Subject: [PATCH 07/40] DOC: Fix some docstrings crash pdf generation. The byte string output of numpy.Generator.bytes must have the `\` properly escaped. Also make a small fix to the legacy docstring. --- numpy/random/_generator.pyx | 2 +- numpy/random/mtrand.pyx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index cd0b248723d2..e2430d139a42 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -585,7 +585,7 @@ cdef class Generator: Examples -------- >>> np.random.default_rng().bytes(10) - b'\xfeC\x9b\x86\x17\xf2\xa1\xafcp' # random + b'\\xfeC\\x9b\\x86\\x17\\xf2\\xa1\\xafcp' # random """ cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 863879a0465f..4f5862faa1d9 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -795,7 +795,7 @@ cdef class RandomState: Examples -------- >>> np.random.bytes(10) - ' eh\\x85\\x022SZ\\xbf\\xa4' #random + b' eh\\x85\\x022SZ\\xbf\\xa4' #random """ cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1) # Interpret the uint32s as little-endian to convert them to bytes From a30410141f5392802711d504ddfe7fd73b5e340e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 25 Jun 2021 10:19:52 -0600 Subject: [PATCH 08/40] MAINT: bump scipy-mathjax Backport of #19314. Bump scipy-mathjax submodule. This is actually a no-op (npm is used only if you upgrade the mathjax stored in scipy-mathjax to a newer version manually, last done in 2017), but silences some code quality scanners. The package.json, rebuild.sh could also be removed from the shipped documentation as they do nothing there, but not sure if that's worth the hassle. --- doc/source/_static/scipy-mathjax | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/_static/scipy-mathjax b/doc/source/_static/scipy-mathjax index 3d21c58225c0..36f4c898f225 160000 --- a/doc/source/_static/scipy-mathjax +++ b/doc/source/_static/scipy-mathjax @@ -1 +1 @@ -Subproject commit 3d21c58225c09243d5a088b1557654d280925e02 +Subproject commit 36f4c898f2255e0c98eb6949cd67381552d5ffea From 34c10e84631805d891bc4d78443e1eac7dd7f5b0 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 18 Jun 2021 13:59:18 -0500 Subject: [PATCH 09/40] MAINT: Introduce `PyArray_PyIntFromIntp` to remove ifdef's --- numpy/core/src/multiarray/conversion_utils.c | 6 +---- numpy/core/src/multiarray/conversion_utils.h | 11 +++++++++ numpy/core/src/multiarray/getset.c | 24 ++------------------ numpy/core/src/multiarray/iterators.c | 23 +++---------------- 4 files changed, 17 insertions(+), 47 deletions(-) diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index 3c4c21dedd23..adfff11292a3 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -1222,11 +1222,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals) goto fail; } for (i = 0; i < len; i++) { -#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG - PyObject *o = PyLong_FromLong((long) vals[i]); -#else - PyObject *o = PyLong_FromLongLong((npy_longlong) vals[i]); -#endif + PyObject *o = PyArray_PyIntFromIntp(vals[i]); if (!o) { Py_DECREF(intTuple); intTuple = NULL; diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h index 7d1871c43ddb..55c0cdd3578f 100644 --- a/numpy/core/src/multiarray/conversion_utils.h +++ b/numpy/core/src/multiarray/conversion_utils.h @@ -39,6 +39,17 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals); NPY_NO_EXPORT int PyArray_TypestrConvert(int itemsize, int gentype); + +static NPY_INLINE PyObject * +PyArray_PyIntFromIntp(npy_intp const value) +{ +#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG + return PyLong_FromLong((long)value); +#else + return PyLong_FromLongLong((npy_longlong)value); +#endif +} + NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp(int len, npy_intp const *vals); diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index 3575d6fad54e..bccbb7b0c54a 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -419,33 +419,13 @@ array_itemsize_get(PyArrayObject *self) static PyObject * array_size_get(PyArrayObject *self) { - npy_intp size=PyArray_SIZE(self); -#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG - return PyLong_FromLong((long) size); -#else - if (size > NPY_MAX_LONG || size < NPY_MIN_LONG) { - return PyLong_FromLongLong(size); - } - else { - return PyLong_FromLong((long) size); - } -#endif + return PyArray_PyIntFromIntp(PyArray_SIZE(self)); } static PyObject * array_nbytes_get(PyArrayObject *self) { - npy_intp nbytes = PyArray_NBYTES(self); -#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG - return PyLong_FromLong((long) nbytes); -#else - if (nbytes > NPY_MAX_LONG || nbytes < NPY_MIN_LONG) { - return PyLong_FromLongLong(nbytes); - } - else { - return PyLong_FromLong((long) nbytes); - } -#endif + return PyArray_PyIntFromIntp(PyArray_NBYTES(self)); } diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 3ebd4c858974..640896f2a937 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -15,6 +15,7 @@ #include "iterators.h" #include "ctors.h" #include "common.h" +#include "conversion_utils.h" #include "array_coercion.h" #define NEWAXIS_INDEX -1 @@ -1410,31 +1411,13 @@ arraymultiter_dealloc(PyArrayMultiIterObject *multi) static PyObject * arraymultiter_size_get(PyArrayMultiIterObject *self) { -#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG - return PyLong_FromLong((long) self->size); -#else - if (self->size < NPY_MAX_LONG) { - return PyLong_FromLong((long) self->size); - } - else { - return PyLong_FromLongLong((npy_longlong) self->size); - } -#endif + return PyArray_PyIntFromIntp(self->size); } static PyObject * arraymultiter_index_get(PyArrayMultiIterObject *self) { -#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG - return PyLong_FromLong((long) self->index); -#else - if (self->size < NPY_MAX_LONG) { - return PyLong_FromLong((long) self->index); - } - else { - return PyLong_FromLongLong((npy_longlong) self->index); - } -#endif + return PyArray_PyIntFromIntp(self->index); } static PyObject * From dc5ec9818e140ed7c432501230c4318a2ffa3fa6 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 18 Jun 2021 14:01:18 -0500 Subject: [PATCH 10/40] BUG: Fix `arr.flat.index` for large or big-endian machines The type read when exposing was previously int, but has to be intp. this would only be visible for >2**31 elements, but is also visible on big-endian machines. Closes gh-19153 --- numpy/core/src/multiarray/iterators.c | 16 ++++++++++------ numpy/core/tests/test_multiarray.py | 11 +++++++++++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 640896f2a937..576ea89b32fc 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -1063,13 +1063,15 @@ static PyMemberDef iter_members[] = { T_OBJECT, offsetof(PyArrayIterObject, ao), READONLY, NULL}, - {"index", - T_INT, - offsetof(PyArrayIterObject, index), - READONLY, NULL}, {NULL, 0, 0, 0, NULL}, }; +static PyObject * +iter_index_get(PyArrayIterObject *self) +{ + return PyArray_PyIntFromIntp(self->index); +} + static PyObject * iter_coords_get(PyArrayIterObject *self) { @@ -1096,10 +1098,12 @@ iter_coords_get(PyArrayIterObject *self) } static PyGetSetDef iter_getsets[] = { + {"index", + (getter)iter_index_get, + NULL, NULL, NULL}, {"coords", (getter)iter_coords_get, - NULL, - NULL, NULL}, + NULL, NULL, NULL}, {NULL, NULL, NULL, NULL, NULL}, }; diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index d567653f5a4a..f807b90a3654 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -5364,6 +5364,17 @@ def test_refcount(self): assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) + def test_index_getset(self): + it = np.arange(10).reshape(2, 1, 5).flat + with pytest.raises(AttributeError): + it.index = 10 + + for _ in it: + pass + # Check the value of `.index` is updated correctly (see also gh-19153) + # If the type was incorrect, this would show up on big-endian machines + assert it.index == it.base.size + class TestResize: From c52aeca30fd4c8fcd1981ae22babaa7c252c2efc Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 19 Jun 2021 18:03:04 +0200 Subject: [PATCH 11/40] ENH: add `numpy.f2py.get_include` function This is useful for similar reasons as `numpy.get_include`, see https://github.com/numpy/numpy/issues/14960#issuecomment-846460159 --- numpy/f2py/__init__.py | 49 ++++++++++++++++++++++++++++- numpy/f2py/tests/test_regression.py | 20 ++++++++---- 2 files changed, 62 insertions(+), 7 deletions(-) diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index 07ab6cd7da96..4f6938ed2b4e 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -2,7 +2,7 @@ """Fortran to Python Interface Generator. """ -__all__ = ['run_main', 'compile', 'f2py_testing'] +__all__ = ['run_main', 'compile', 'get_include', 'f2py_testing'] import sys import subprocess @@ -122,6 +122,53 @@ def compile(source, return cp.returncode +def get_include(): + """ + Return the directory that contains the fortranobject.c and .h files. + + .. note:: + + This function is not needed when building an extension with + `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files + in one go. + + Python extension modules built with f2py-generated code need to use + ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` + header. This function can be used to obtain the directory containing + both of these files. + + Returns + ------- + include_path : str + Absolute path to the directory containing ``fortranobject.c`` and + ``fortranobject.h``. + + Notes + ----- + .. versionadded:: 1.22.0 + + Unless the build system you are using has specific support for f2py, + building a Python extension using a ``.pyf`` signature file is a two-step + process. For a module ``mymod``: + + - Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This + generates ``_mymodmodule.c`` and (if needed) + ``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``. + - Step 2: build your Python extension module. This requires the + following source files: + + - ``_mymodmodule.c`` + - ``_mymod-f2pywrappers.f`` (if it was generated in step 1) + - ``fortranobject.c`` + + See Also + -------- + numpy.get_include : function that returns the numpy include directory + + """ + return os.path.join(os.path.dirname(__file__), 'src') + + if sys.version_info[:2] >= (3, 7): # module level getattr is only supported in 3.7 onwards # https://www.python.org/dev/peps/pep-0562/ diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index a1b772069a0b..b91499e4adb3 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -25,23 +25,31 @@ def test_inout(self): x = np.arange(3, dtype=np.float32) self.module.foo(x) assert_equal(x, [3, 1, 2]) - + class TestNumpyVersionAttribute(util.F2PyTest): # Check that th attribute __f2py_numpy_version__ is present # in the compiled module and that has the value np.__version__. sources = [_path('src', 'regression', 'inout.f90')] - + @pytest.mark.slow def test_numpy_version_attribute(self): - + # Check that self.module has an attribute named "__f2py_numpy_version__" - assert_(hasattr(self.module, "__f2py_numpy_version__"), + assert_(hasattr(self.module, "__f2py_numpy_version__"), msg="Fortran module does not have __f2py_numpy_version__") - + # Check that the attribute __f2py_numpy_version__ is a string assert_(isinstance(self.module.__f2py_numpy_version__, str), msg="__f2py_numpy_version__ is not a string") - + # Check that __f2py_numpy_version__ has the value numpy.__version__ assert_string_equal(np.__version__, self.module.__f2py_numpy_version__) + + +def test_include_path(): + incdir = np.f2py.get_include() + fnames_in_dir = os.listdir(incdir) + for fname in ('fortranobject.c', 'fortranobject.h'): + assert fname in fnames_in_dir + From 3b2473599bf1aea6f1da3c6f7d9049e871f22bdd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 21 Jun 2021 10:14:33 -0500 Subject: [PATCH 12/40] BUG: Fix reference count leak This adds a missing decref to the signature/dtype keyword argument logic in reductions. (The code will change quite a bit after 1.21, but this avoids a reference count leak on 1.21.) --- numpy/core/src/umath/ufunc_object.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 0644a28c011b..b448505df986 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4159,6 +4159,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, } Py_INCREF(dtype->singleton); otype = dtype->singleton; + Py_DECREF(dtype); } if (out_obj && !PyArray_OutputConverter(out_obj, &out)) { goto fail; From 70ac04ce1ae76a1a6d1a997b541932de04ea964e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 21 Jun 2021 11:55:43 -0500 Subject: [PATCH 13/40] BUG: Remove unnecessary incref in type resolution `ensure_dtype_nbo()` already increments the reference count, so the INCREF is not necessary here. --- numpy/core/src/umath/ufunc_object.c | 2 +- numpy/core/src/umath/ufunc_type_resolution.c | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index b448505df986..9e73dfd94a9c 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4157,8 +4157,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (dtype == NULL) { goto fail; } - Py_INCREF(dtype->singleton); otype = dtype->singleton; + Py_INCREF(otype); Py_DECREF(dtype); } if (out_obj && !PyArray_OutputConverter(out_obj, &out)) { diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 2834235e409f..88aa9ed6c112 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -390,7 +390,6 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, operands, type_tup, out_dtypes); } - Py_INCREF(descr); out_dtypes[0] = ensure_dtype_nbo(descr); if (out_dtypes[0] == NULL) { return -1; From 4061a2e911f3b1cb03aa73ce5a184a8e2e3e0192 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 25 Jun 2021 18:12:45 +0200 Subject: [PATCH 14/40] MAINT: Annotate missing attributes of `np.number` subclasses * `integer.numerator` & `denominator` * `integer.__round__` & `floating.__round__` * `floating.as_integer_ratio` * `float64.__getnewargs__` / `complex128.__getnewargs__` * `float64.is_integer`, `hex`, `fromhex`, `__trunc__` & `__getformat__` * `float64.__ceil__` & `float64.__floor__` (python >= 3.9 only) --- numpy/__init__.pyi | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 49c600d198fa..e427f7311a3a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3072,6 +3072,15 @@ else: ] class integer(number[_NBit1]): # type: ignore + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + # NOTE: `__index__` is technically defined in the bottom-most # sub-classes (`int64`, `uint32`, etc) def item( @@ -3145,6 +3154,10 @@ class timedelta64(generic): __value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ..., __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ..., ) -> None: ... + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... # NOTE: Only a limited number of units support conversion # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` @@ -3214,7 +3227,8 @@ uint0 = unsignedinteger[_NBitIntP] uint = unsignedinteger[_NBitInt] ulonglong = unsignedinteger[_NBitLongLong] -class inexact(number[_NBit1]): ... # type: ignore +class inexact(number[_NBit1]): # type: ignore + def __getnewargs__(self: inexact[_64Bit]) -> Tuple[float, ...]: ... _IntType = TypeVar("_IntType", bound=integer) _FloatType = TypeVar('_FloatType', bound=floating) @@ -3226,6 +3240,21 @@ class floating(inexact[_NBit1]): __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> float: ... def tolist(self) -> float: ... + def is_integer(self: float64) -> bool: ... + def hex(self: float64) -> str: ... + @classmethod + def fromhex(cls: Type[float64], __string: str) -> float64: ... + def as_integer_ratio(self) -> Tuple[int, int]: ... + if sys.version_info >= (3, 9): + def __ceil__(self: float64) -> int: ... + def __floor__(self: float64) -> int: ... + def __trunc__(self: float64) -> int: ... + def __getnewargs__(self: float64) -> Tuple[float]: ... + def __getformat__(self: float64, __typestr: L["double", "float"]) -> str: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... __add__: _FloatOp[_NBit1] __radd__: _FloatOp[_NBit1] __sub__: _FloatOp[_NBit1] @@ -3270,6 +3299,9 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): @property def imag(self) -> floating[_NBit2]: ... # type: ignore[override] def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] + def __getnewargs__(self: complex128) -> Tuple[float, float]: ... + # NOTE: Deprecated + # def __round__(self, ndigits=...): ... __add__: _ComplexOp[_NBit1] __radd__: _ComplexOp[_NBit1] __sub__: _ComplexOp[_NBit1] From fc4bc18d7510ae68c74e361da7118c743f249d9d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 25 Jun 2021 18:22:26 +0200 Subject: [PATCH 15/40] TST: Update the typing tests for `np.number` subclasses --- numpy/typing/tests/data/fail/scalars.py | 12 +++++++++ numpy/typing/tests/data/reveal/scalars.py | 30 +++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py index 0aeff398fc87..099418e67a81 100644 --- a/numpy/typing/tests/data/fail/scalars.py +++ b/numpy/typing/tests/data/fail/scalars.py @@ -1,7 +1,9 @@ +import sys import numpy as np f2: np.float16 f8: np.float64 +c8: np.complex64 # Construction @@ -80,3 +82,13 @@ def func(a: np.float32) -> None: ... func(f2) # E: incompatible type func(f8) # E: incompatible type + +round(c8) # E: No overload variant + +c8.__getnewargs__() # E: Invalid self argument +f2.__getnewargs__() # E: Invalid self argument +f2.is_integer() # E: Invalid self argument +f2.hex() # E: Invalid self argument +np.float16.fromhex("0x0.0p+0") # E: Invalid self argument +f2.__trunc__() # E: Invalid self argument +f2.__getformat__("float") # E: Invalid self argument diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py index d98388422e07..8d1181f84607 100644 --- a/numpy/typing/tests/data/reveal/scalars.py +++ b/numpy/typing/tests/data/reveal/scalars.py @@ -1,3 +1,4 @@ +import sys import numpy as np b: np.bool_ @@ -6,6 +7,7 @@ f8: np.float64 c8: np.complex64 c16: np.complex128 +m: np.timedelta64 U: np.str_ S: np.bytes_ @@ -114,3 +116,31 @@ reveal_type(c16.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] reveal_type(U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] reveal_type(S.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(f8.as_integer_ratio()) # E: Tuple[builtins.int, builtins.int] +reveal_type(f8.is_integer()) # E: bool +reveal_type(f8.__trunc__()) # E: int +reveal_type(f8.__getformat__("float")) # E: str +reveal_type(f8.hex()) # E: str +reveal_type(np.float64.fromhex("0x0.0p+0")) # E: {float64} + +reveal_type(f8.__getnewargs__()) # E: Tuple[builtins.float] +reveal_type(c16.__getnewargs__()) # E: Tuple[builtins.float, builtins.float] + +reveal_type(i8.numerator) # E: {int64} +reveal_type(i8.denominator) # E: Literal[1] +reveal_type(u8.numerator) # E: {uint64} +reveal_type(u8.denominator) # E: Literal[1] +reveal_type(m.numerator) # E: numpy.timedelta64 +reveal_type(m.denominator) # E: Literal[1] + +reveal_type(round(i8)) # E: int +reveal_type(round(i8, 3)) # E: {int64} +reveal_type(round(u8)) # E: int +reveal_type(round(u8, 3)) # E: {uint64} +reveal_type(round(f8)) # E: int +reveal_type(round(f8, 3)) # E: {float64} + +if sys.version_info >= (3, 9): + reveal_type(f8.__ceil__()) # E: int + reveal_type(f8.__floor__()) # E: int From 1765a1857e3e1fb932aab8d587a7ab831cbff728 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 23 Jun 2021 20:14:17 -0500 Subject: [PATCH 16/40] BUG: Fix cast safety and comparisons for zero sized voids These are more complicated and weird things could happen... right now, the only "weird" thing is that some sized to unsized voids casts may be considered as safe when they should not be. I think this is fine... In general, we need to fix all casts to a strict interpretation of V0, S0, and U0 and then then allow "V", "S", and "U" explicitly on the python entry-points Right now, this is as minimal as I could make it work, it isn't as minimal as I would _like_ after a release, but here we go... --- numpy/core/src/multiarray/arrayobject.c | 21 ++++++++++++++------ numpy/core/src/multiarray/convert_datatype.c | 6 +++++- numpy/core/src/multiarray/convert_datatype.h | 4 ++++ numpy/core/tests/test_casting_unittests.py | 3 +++ numpy/core/tests/test_numeric.py | 16 +++++++++++++++ 5 files changed, 43 insertions(+), 7 deletions(-) diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index e7fbb88cd282..0f772c689954 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -41,6 +41,7 @@ maintainer email: oliphant.travis@ieee.org #include "arraytypes.h" #include "scalartypes.h" #include "arrayobject.h" +#include "convert_datatype.h" #include "conversion_utils.h" #include "ctors.h" #include "dtypemeta.h" @@ -1390,9 +1391,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) return Py_NotImplemented; } - _res = PyArray_CanCastTypeTo(PyArray_DESCR(self), - PyArray_DESCR(array_other), - NPY_EQUIV_CASTING); + _res = PyArray_CheckCastSafety( + NPY_EQUIV_CASTING, + PyArray_DESCR(self), PyArray_DESCR(array_other), NULL); + if (_res < 0) { + PyErr_Clear(); + _res = 0; + } if (_res == 0) { /* 2015-05-07, 1.10 */ Py_DECREF(array_other); @@ -1441,9 +1446,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) return Py_NotImplemented; } - _res = PyArray_CanCastTypeTo(PyArray_DESCR(self), - PyArray_DESCR(array_other), - NPY_EQUIV_CASTING); + _res = PyArray_CheckCastSafety( + NPY_EQUIV_CASTING, + PyArray_DESCR(self), PyArray_DESCR(array_other), NULL); + if (_res < 0) { + PyErr_Clear(); + _res = 0; + } if (_res == 0) { /* 2015-05-07, 1.10 */ Py_DECREF(array_other); diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index d197a4bea31e..19127291a848 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -457,7 +457,7 @@ PyArray_GetCastSafety( * is ignored). * @return 0 for an invalid cast, 1 for a valid and -1 for an error. */ -static int +NPY_NO_EXPORT int PyArray_CheckCastSafety(NPY_CASTING casting, PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype) { @@ -2841,6 +2841,10 @@ cast_to_void_dtype_class( loop_descrs[1]->elsize = given_descrs[0]->elsize; Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; + if (loop_descrs[0]->type_num == NPY_VOID && + loop_descrs[0]->subarray == NULL && loop_descrs[1]->names == NULL) { + return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + } return NPY_SAFE_CASTING | _NPY_CAST_IS_VIEW; } diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h index ba16d4d1bd5a..22b3859d2ab3 100644 --- a/numpy/core/src/multiarray/convert_datatype.h +++ b/numpy/core/src/multiarray/convert_datatype.h @@ -71,6 +71,10 @@ NPY_NO_EXPORT NPY_CASTING PyArray_GetCastSafety( PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype); +NPY_NO_EXPORT int +PyArray_CheckCastSafety(NPY_CASTING casting, + PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype); + NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *self, diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index 2cec1acd3490..c15de3e888fa 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -147,6 +147,9 @@ def test_to_void(self): assert not np.can_cast("U1", "V1") # Structured to unstructured is just like any other: assert np.can_cast("d,i", "V", casting="same_kind") + # Unstructured void to unstructured is actually no cast at all: + assert np.can_cast("V3", "V", casting="no") + assert np.can_cast("V0", "V", casting="no") class TestCasting: diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index f5113150e8f7..fe310058a72a 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1724,6 +1724,22 @@ def test_array_equiv(self): assert_(not res) assert_(type(res) is bool) + @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"]) + def test_compare_unstructured_voids(self, dtype): + zeros = np.zeros(3, dtype=dtype) + + assert_array_equal(zeros, zeros) + assert not (zeros != zeros).any() + + if dtype == "V0": + # Can't test != of actually different data + return + + nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype) + + assert not (zeros == nonzeros).any() + assert (zeros != nonzeros).all() + def assert_array_strict_equal(x, y): assert_array_equal(x, y) From 5b736ccfd3cc6e9526c5a200e3c407e3afdde603 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 24 Jun 2021 09:37:36 +0100 Subject: [PATCH 17/40] BUG: Correct Cython declaration Correct return type from double to void closes #19312 --- numpy/random/_common.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd index 4f404b7a11e3..9f2e8c3ca117 100644 --- a/numpy/random/_common.pxd +++ b/numpy/random/_common.pxd @@ -39,7 +39,7 @@ cdef extern from "include/aligned_malloc.h": cdef void *PyArray_calloc_aligned(size_t n, size_t s) cdef void PyArray_free_aligned(void *p) -ctypedef double (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil ctypedef double (*random_double_0)(void *state) nogil ctypedef double (*random_double_1)(void *state, double a) nogil ctypedef double (*random_double_2)(void *state, double a, double b) nogil From be8898ce7af649f7f3b2fa96f8fef0e0b0e0280a Mon Sep 17 00:00:00 2001 From: Gregory Lee Date: Thu, 24 Jun 2021 08:37:01 -0400 Subject: [PATCH 18/40] BUG: protect against access an attribute of a NULL pointer Have PyArray_GetCastSafety return -1 if from is NULL --- numpy/core/src/multiarray/convert_datatype.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index d197a4bea31e..716e5dd3d111 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -417,6 +417,9 @@ PyArray_GetCastSafety( if (to != NULL) { to_dtype = NPY_DTYPE(to); } + if (from == NULL) { + return -1; + } PyObject *meth = PyArray_GetCastingImpl(NPY_DTYPE(from), to_dtype); if (meth == NULL) { return -1; @@ -3293,8 +3296,10 @@ void_to_void_resolve_descriptors( casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW; } } - NPY_CASTING field_casting = PyArray_GetCastSafety( - given_descrs[0]->subarray->base, given_descrs[1]->subarray->base, NULL); + + PyArray_Descr *from_base = (from_sub == NULL) ? NULL : from_sub->base; + PyArray_Descr *to_base = (to_sub == NULL) ? NULL : to_sub->base; + NPY_CASTING field_casting = PyArray_GetCastSafety(from_base, to_base, NULL); if (field_casting < 0) { return -1; } From 6c4e3a9889d3703b57410ea6721deff667d1c386 Mon Sep 17 00:00:00 2001 From: Gregory Lee Date: Thu, 24 Jun 2021 11:16:22 -0400 Subject: [PATCH 19/40] pass descriptor rather than null --- numpy/core/src/multiarray/convert_datatype.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 716e5dd3d111..1bba276d2d16 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -417,9 +417,6 @@ PyArray_GetCastSafety( if (to != NULL) { to_dtype = NPY_DTYPE(to); } - if (from == NULL) { - return -1; - } PyObject *meth = PyArray_GetCastingImpl(NPY_DTYPE(from), to_dtype); if (meth == NULL) { return -1; @@ -3297,8 +3294,8 @@ void_to_void_resolve_descriptors( } } - PyArray_Descr *from_base = (from_sub == NULL) ? NULL : from_sub->base; - PyArray_Descr *to_base = (to_sub == NULL) ? NULL : to_sub->base; + PyArray_Descr *from_base = (from_sub == NULL) ? given_descrs[0] : from_sub->base; + PyArray_Descr *to_base = (to_sub == NULL) ? given_descrs[1] : to_sub->base; NPY_CASTING field_casting = PyArray_GetCastSafety(from_base, to_base, NULL); if (field_casting < 0) { return -1; From 4f56383499da051acc204a6133c9480d5d7d7a26 Mon Sep 17 00:00:00 2001 From: Gregory Lee Date: Thu, 24 Jun 2021 11:44:37 -0400 Subject: [PATCH 20/40] TST: test can_cast when only one argument has a subarray --- numpy/core/tests/test_casting_unittests.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index 2cec1acd3490..34f316d5ddc5 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -646,3 +646,9 @@ def test_object_to_parametric_internal_error(self): with pytest.raises(TypeError, match="casting from object to the parametric DType"): cast._resolve_descriptors((np.dtype("O"), None)) + + def test_void_to_structured_with_subarray(self): + # test case corresponding to gh-19325 + dtype = np.dtype([("foo", " Date: Thu, 24 Jun 2021 13:41:03 -0400 Subject: [PATCH 21/40] TST: test both argument orders --- numpy/core/tests/test_casting_unittests.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index 34f316d5ddc5..d1924c1fda17 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -647,8 +647,10 @@ def test_object_to_parametric_internal_error(self): match="casting from object to the parametric DType"): cast._resolve_descriptors((np.dtype("O"), None)) - def test_void_to_structured_with_subarray(self): + @pytest.mark.parametrize("casting", ["no", "unsafe"]) + def test_void_and_structured_with_subarray(self, casting): # test case corresponding to gh-19325 dtype = np.dtype([("foo", " Date: Sun, 27 Jun 2021 19:50:25 +0200 Subject: [PATCH 22/40] BUG, SIMD: Fix detecting AVX512 features on Darwin On Darwin, machines with AVX512 support, by default, threads are created with AVX512 masked off in XCR0 and an AVX-sized savearea is used. However, AVX512 capabilities are advertised in the commpage and via sysctl. For more information, check: - https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201 - https://github.com/golang/go/issues/43089 - https://github.com/numpy/numpy/issues/19319 --- numpy/core/src/common/npy_cpu_features.c.src | 24 +++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src index 4f3a95c717a1..1e0f4a57179d 100644 --- a/numpy/core/src/common/npy_cpu_features.c.src +++ b/numpy/core/src/common/npy_cpu_features.c.src @@ -394,8 +394,30 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_FMA] = npy__cpu_have[NPY_CPU_FEATURE_FMA3]; // check AVX512 OS support - if ((xcr & 0xe6) != 0xe6) + int avx512_os = (xcr & 0xe6) == 0xe6; +#if defined(__APPLE__) && defined(__x86_64__) + /** + * On darwin, machines with AVX512 support, by default, threads are created with + * AVX512 masked off in XCR0 and an AVX-sized savearea is used. + * However, AVX512 capabilities are advertised in the commpage and via sysctl. + * for more information, check: + * - https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201 + * - https://github.com/golang/go/issues/43089 + * - https://github.com/numpy/numpy/issues/19319 + */ + if (!avx512_os) { + npy_uintp commpage64_addr = 0x00007fffffe00000ULL; + npy_uint16 commpage64_ver = *((npy_uint16*)(commpage64_addr + 0x01E)); + // cpu_capabilities64 undefined in versions < 13 + if (commpage64_ver > 12) { + npy_uint64 commpage64_cap = *((npy_uint64*)(commpage64_addr + 0x010)); + avx512_os = (commpage64_cap & 0x0000004000000000ULL) != 0; + } + } +#endif + if (!avx512_os) { return; + } npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && npy__cpu_have[NPY_CPU_FEATURE_AVX512CD]) { From 2f4caec2785dd8b6e47e1245953fea8ad274e980 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 27 Jun 2021 17:52:14 +0200 Subject: [PATCH 23/40] MAINT: remove `print()`'s in distutils template handling These print statements were not warnings, they're simply printing things out that work as designed. This is never a good design - it should not emit anything but warnings. I considered adding a `quiet` keyword, but that doesn't seem warranted here. [ci skip] --- numpy/distutils/conv_template.py | 1 - numpy/distutils/from_template.py | 1 - 2 files changed, 2 deletions(-) diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py index 90e07f8b1036..c8933d1d4286 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/distutils/conv_template.py @@ -271,7 +271,6 @@ def resolve_includes(source): if not os.path.isabs(fn): fn = os.path.join(d, fn) if os.path.isfile(fn): - print('Including file', fn) lines.extend(resolve_includes(fn)) else: lines.append(line) diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py index 7add44c7679d..90d1f4c384c7 100644 --- a/numpy/distutils/from_template.py +++ b/numpy/distutils/from_template.py @@ -219,7 +219,6 @@ def resolve_includes(source): if not os.path.isabs(fn): fn = os.path.join(d, fn) if os.path.isfile(fn): - print('Including file', fn) lines.extend(resolve_includes(fn)) else: lines.append(line) From 7bb64d9ca592ef11aa3ad44421c38bba91822b0d Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Fri, 28 May 2021 23:25:44 +0530 Subject: [PATCH 24/40] ENH: SIMD architectures to show_config --- numpy/distutils/misc_util.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index e797745e12db..60696438f346 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -2357,6 +2357,10 @@ def show(): * ``src_dirs``: directories containing library source files * ``define_macros``: preprocessor macros used by ``distutils.setup`` + * ``baseline``: minimum CPU features required + * ``found``: dispatched features supported in the system + * ``not found``: dispatched features that are not supported + in the system Examples -------- @@ -2368,6 +2372,9 @@ def show(): libraries = ['openblas', 'openblas'] library_dirs = ['/usr/local/lib'] """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) for name,info_dict in globals().items(): if name[0] == "_" or type(info_dict) is not type({}): continue print(name + ":") @@ -2378,6 +2385,19 @@ def show(): if k == "sources" and len(v) > 200: v = v[:60] + " ...\n... " + v[-60:] print(" %s = %s" % (k,v)) + + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + + print("Supported SIMD extensions in this NumPy install:") + print(" baseline = %s" % (','.join(__cpu_baseline__))) + print(" found = %s" % (','.join(features_found))) + print(" not found = %s" % (','.join(features_not_found))) + ''')) return target From 844571f5b7d762f017c960535b3c9624cca944ff Mon Sep 17 00:00:00 2001 From: "Thomas J. Fan" Date: Tue, 29 Jun 2021 14:19:02 -0400 Subject: [PATCH 25/40] BUG: Do not raise deprecation warning for all nans in unique (#19301) This PR adjusts np.unique for the edge cases where all values are nan. Fixes gh-19300 --- numpy/lib/arraysetops.py | 4 +++- numpy/lib/tests/test_arraysetops.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index 7600e17be88b..bd56b6975669 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -339,7 +339,9 @@ def _unique1d(ar, return_index=False, return_inverse=False, aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') else: aux_firstnan = np.searchsorted(aux, aux[-1], side='left') - mask[1:aux_firstnan] = (aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) mask[aux_firstnan] = True mask[aux_firstnan + 1:] = False else: diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index d62da9efba0b..13385cd2409d 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -610,6 +610,17 @@ def check_all(a, b, i1, i2, c, dt): assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + def test_unique_axis_errors(self): assert_raises(TypeError, self._run_axis_tests, object) assert_raises(TypeError, self._run_axis_tests, From 9b38c1ca1ce2255666ea272ccf1a3301b67119a1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 29 Jun 2021 13:06:20 -0500 Subject: [PATCH 26/40] BUG: Fix NULL special case in object-to-any cast code Apparently `np.empty_like` is an easy way to create NULL filled object arrays. In general, it is not typical, which explains how this case could have managed to not be found by our tests or downstream tests. Closes gh-19373 --- numpy/core/src/multiarray/dtype_transfer.c | 4 ++-- numpy/core/tests/test_casting_unittests.py | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index aa8cc84ffa6f..50db627eafd3 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -322,11 +322,11 @@ strided_to_strided_object_to_any( while (N > 0) { memcpy(&src_ref, src, sizeof(src_ref)); - if (PyArray_Pack(data->descr, dst, src_ref) < 0) { + if (PyArray_Pack(data->descr, dst, src_ref ? src_ref : Py_None) < 0) { return -1; } - if (data->move_references) { + if (data->move_references && src_ref != NULL) { Py_DECREF(src_ref); memset(src, 0, sizeof(src_ref)); } diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index 1c465fea1d1c..8398b3cad1f0 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -657,3 +657,20 @@ def test_void_and_structured_with_subarray(self, casting): expected = casting == "unsafe" assert np.can_cast("V4", dtype, casting=casting) == expected assert np.can_cast(dtype, "V4", casting=casting) == expected + + @pytest.mark.parametrize("dtype", np.typecodes["All"]) + def test_object_casts_NULL_None_equivalence(self, dtype): + # None to casts may succeed or fail, but a NULL'ed array must + # behave the same as one filled with None's. + arr_normal = np.array([None] * 5) + arr_NULLs = np.empty_like([None] * 5) + # If the check fails (maybe it should) the test would lose its purpose: + assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes + + try: + expected = arr_normal.astype(dtype) + except TypeError: + with pytest.raises(TypeError): + arr_NULLs.astype(dtype) + else: + assert_array_equal(expected, arr_NULLs.astype(dtype)) From 7842ce785f33f114d57d2caba9fb91aa28113c24 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 7 Jul 2021 13:23:15 -0600 Subject: [PATCH 27/40] MAINT: Use arm64-graviton2 for testing on travis This avoids the errors we have seen with the lxd containers in recent weeks. --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1486bbb885fb..70f6fbde11e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -45,7 +45,8 @@ jobs: - python: 3.7 os: linux - arch: arm64 + arch: arm64-graviton2 + virt: vm env: # use OpenBLAS build, not system ATLAS - DOWNLOAD_OPENBLAS=1 From 573dc7bff086a62bc1dfabc551064c686e5c9d64 Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 16 Jul 2021 09:54:41 +0300 Subject: [PATCH 28/40] BUILD: update OpenBLAS to v0.3.17 --- doc/release/upcoming_changes/19462.change.rst | 3 +++ tools/openblas_support.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/19462.change.rst diff --git a/doc/release/upcoming_changes/19462.change.rst b/doc/release/upcoming_changes/19462.change.rst new file mode 100644 index 000000000000..8fbadb394ded --- /dev/null +++ b/doc/release/upcoming_changes/19462.change.rst @@ -0,0 +1,3 @@ +OpenBLAS v0.3.17 +---------------- +Update the OpenBLAS used in testing and in wheels to v0.3.17 diff --git a/tools/openblas_support.py b/tools/openblas_support.py index d11ad173befa..8509326f0d74 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -13,8 +13,8 @@ from urllib.request import urlopen, Request from urllib.error import HTTPError -OPENBLAS_V = '0.3.13' -OPENBLAS_LONG = 'v0.3.13-62-gaf2b0d02' +OPENBLAS_V = '0.3.17' +OPENBLAS_LONG = 'v0.3.17' BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' SUPPORTED_PLATFORMS = [ From 8d7ad3748b5f6db0fd6aea35693909d5c36b0f5c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 12 Jul 2021 08:58:17 -0500 Subject: [PATCH 29/40] MAINT: Avoid unicode characters in division SIMD code comments This avoids unicode characters in the division SIMD code to circumvent problems reading the utf-8 encoded file in windows. The proper fix is probably to just assume utf-8 and feel free to use unicode characters in `c.src` files. But here, it doesn't matter too much to just avoid utf-8 quickly. See gh-19454 --- numpy/core/src/umath/loops_arithmetic.dispatch.c.src | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index 19e05f2b57b0..1ddf7c3b1a6f 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -22,17 +22,17 @@ ** Defining the SIMD kernels * * Floor division of signed is based on T. Granlund and P. L. Montgomery - * “Division by invariant integers using multiplication(see [Figure 6.1] + * "Division by invariant integers using multiplication(see [Figure 6.1] * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556)" * For details on TRUNC division see simd/intdiv.h for more clarification *********************************************************************************** - ** Figure 6.1: Signed division by run–time invariant divisor, rounded towards -INF + ** Figure 6.1: Signed division by run-time invariant divisor, rounded towards -INF *********************************************************************************** * For q = FLOOR(a/d), all sword: - * sword −dsign = SRL(d, N − 1); - * uword −nsign = (n < −dsign); - * uword −qsign = EOR(−nsign, −dsign); - * q = TRUNC((n − (−dsign ) + (−nsign))/d) − (−qsign); + * sword -dsign = SRL(d, N - 1); + * uword -nsign = (n < -dsign); + * uword -qsign = EOR(-nsign, -dsign); + * q = TRUNC((n - (-dsign ) + (-nsign))/d) - (-qsign); ********************************************************************************/ #if NPY_SIMD From 16ba6ba513c3175ff7fdf7c71f0ea47763c1df2d Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 2 Jul 2021 04:58:37 +0200 Subject: [PATCH 30/40] BUG, SIMD: Fix infinite loop during count non-zero on GCC-11 The issue appears when the compiler miss inlining a function that returns or accepts a SIMD vector. --- numpy/core/src/multiarray/item_selection.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index fb354ce5473a..2b8ea9e79ace 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2131,7 +2131,7 @@ count_nonzero_bytes_384(const npy_uint64 * w) #if NPY_SIMD /* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */ -static NPY_INLINE NPY_GCC_OPT_3 npyv_u8 +NPY_FINLINE NPY_GCC_OPT_3 npyv_u8 count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_count) { const npyv_u8 vone = npyv_setall_u8(1); @@ -2150,7 +2150,7 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_cou return vsum8; } -static NPY_INLINE NPY_GCC_OPT_3 npyv_u16x2 +NPY_FINLINE NPY_GCC_OPT_3 npyv_u16x2 count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_count) { npyv_u16x2 vsum16; From 8a659e7795178a5cda84d9d708ee62b267c32741 Mon Sep 17 00:00:00 2001 From: Hugo Defois Date: Fri, 2 Jul 2021 15:52:46 +0200 Subject: [PATCH 31/40] BUG: fix a numpy.npiter leak in npyiter_multi_index_set --- numpy/core/src/multiarray/nditer_pywrap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index 7698ae43d07e..73df962e4520 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -1595,8 +1595,8 @@ npyiter_multi_index_set(NewNpyArrayIterObject *self, PyObject *value) for (idim = 0; idim < ndim; ++idim) { PyObject *v = PySequence_GetItem(value, idim); multi_index[idim] = PyLong_AsLong(v); + Py_DECREF(v); if (error_converting(multi_index[idim])) { - Py_XDECREF(v); return -1; } } From a275b5130d2ac1d4edf2be482549e77c131eb724 Mon Sep 17 00:00:00 2001 From: Hugo Defois Date: Sat, 3 Jul 2021 17:42:17 +0200 Subject: [PATCH 32/40] TST: add multi_index set tests on non-error cases --- numpy/core/tests/test_nditer.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index b44343c5755c..b77a276c8248 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -185,6 +185,29 @@ def test_iter_c_or_f_order(): assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='A')) +def test_nditer_multi_index_set(): + # Test the multi_index set + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + + # Removes the iteration on two first elements of a[0] + it.multi_index = (0, 2,) + + assert_equal([i for i in it], [2, 3, 4, 5]) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_nditer_multi_index_set_refcount(): + # Test if the reference count on inde variable is decreased + + index = 0 + i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index']) + + start_count = sys.getrefcount(index) + i.multi_index = (index,) + end_count = sys.getrefcount(index) + + assert_equal(start_count, end_count) + def test_iter_best_order_multi_index_1d(): # The multi-indices should be correct with any reordering From 96dcd082ef7f7c2c6123f8c4e7449f3b1abc32c4 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 6 Jul 2021 13:05:06 +0300 Subject: [PATCH 33/40] ENH: fix typo --- numpy/core/tests/test_nditer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index b77a276c8248..adcf921f6016 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -197,7 +197,7 @@ def test_nditer_multi_index_set(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_nditer_multi_index_set_refcount(): - # Test if the reference count on inde variable is decreased + # Test if the reference count on index variable is decreased index = 0 i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index']) From c87de48835823bd7a5e725cb3fe41f395331f0eb Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 12 Jul 2021 11:07:31 +0200 Subject: [PATCH 34/40] TST: Fix a `GenericAlias` test failure for python 3.9.0 --- numpy/typing/tests/test_generic_alias.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py index 8cbdd2e6e073..5f0ac915352c 100644 --- a/numpy/typing/tests/test_generic_alias.py +++ b/numpy/typing/tests/test_generic_alias.py @@ -60,7 +60,6 @@ class TestGenericAlias: ("__call__", lambda n: n(shape=(1,), dtype=np.int64, buffer=BUFFER)), ("subclassing", lambda n: _get_subclass_mro(n)), ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))), - ("__weakref__", lambda n: n == weakref.ref(n)()), ]) def test_pass(self, name: str, func: FuncType) -> None: """Compare `types.GenericAlias` with its numpy-based backport. @@ -75,6 +74,14 @@ def test_pass(self, name: str, func: FuncType) -> None: value_ref = func(NDArray_ref) assert value == value_ref + def test_weakref(self) -> None: + """Test ``__weakref__``.""" + value = weakref.ref(NDArray)() + + if sys.version_info >= (3, 9, 1): # xref bpo-42332 + value_ref = weakref.ref(NDArray_ref)() + assert value == value_ref + @pytest.mark.parametrize("name", GETATTR_NAMES) def test_getattr(self, name: str) -> None: """Test that `getattr` wraps around the underlying type, From 58fa2d6008416708d5f85fb33b7d3b3af615a6b0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 10 Jul 2021 07:52:47 -0600 Subject: [PATCH 35/40] MAINT: Start testing with Python 3.10.0b3. Python 3.10.0b3 is currently available for github/actions, b4 should be out in a couple of days. Time to start tracking the next Python release. --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index c7d463348f6d..0ff1ceef2489 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -56,7 +56,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8, 3.9] + python-version: [3.8, 3.9, 3.10.0-beta.3] steps: - uses: actions/checkout@v2 with: From 8cd3546bffecdef1c9bd0812d7aede043af421d5 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Jul 2021 14:23:26 +0200 Subject: [PATCH 36/40] MAINT: Add `ctypes` overloads to `np.dtype` --- numpy/__init__.pyi | 63 ++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e427f7311a3a..c4f58ca6ddf3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,6 +1,9 @@ import builtins import os import sys +import mmap +import ctypes as ct +import array as _array import datetime as dt from abc import abstractmethod from types import TracebackType @@ -938,51 +941,51 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__(cls, dtype: Type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... - # `unsignedinteger` string-based representations + # `unsignedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _UInt8Codes, align: bool = ..., copy: bool = ...) -> dtype[uint8]: ... + def __new__(cls, dtype: _UInt8Codes | Type[ct.c_uint8], align: bool = ..., copy: bool = ...) -> dtype[uint8]: ... @overload - def __new__(cls, dtype: _UInt16Codes, align: bool = ..., copy: bool = ...) -> dtype[uint16]: ... + def __new__(cls, dtype: _UInt16Codes | Type[ct.c_uint16], align: bool = ..., copy: bool = ...) -> dtype[uint16]: ... @overload - def __new__(cls, dtype: _UInt32Codes, align: bool = ..., copy: bool = ...) -> dtype[uint32]: ... + def __new__(cls, dtype: _UInt32Codes | Type[ct.c_uint32], align: bool = ..., copy: bool = ...) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UInt64Codes, align: bool = ..., copy: bool = ...) -> dtype[uint64]: ... + def __new__(cls, dtype: _UInt64Codes | Type[ct.c_uint64], align: bool = ..., copy: bool = ...) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _UByteCodes, align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ... + def __new__(cls, dtype: _UByteCodes | Type[ct.c_ubyte], align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ... @overload - def __new__(cls, dtype: _UShortCodes, align: bool = ..., copy: bool = ...) -> dtype[ushort]: ... + def __new__(cls, dtype: _UShortCodes | Type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ... @overload - def __new__(cls, dtype: _UIntCCodes, align: bool = ..., copy: bool = ...) -> dtype[uintc]: ... + def __new__(cls, dtype: _UIntCCodes | Type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ... @overload - def __new__(cls, dtype: _UIntPCodes, align: bool = ..., copy: bool = ...) -> dtype[uintp]: ... + def __new__(cls, dtype: _UIntPCodes | Type[ct.c_void_p] | Type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _UIntCodes, align: bool = ..., copy: bool = ...) -> dtype[uint]: ... + def __new__(cls, dtype: _UIntCodes | Type[ct.c_ulong], align: bool = ..., copy: bool = ...) -> dtype[uint]: ... @overload - def __new__(cls, dtype: _ULongLongCodes, align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ... + def __new__(cls, dtype: _ULongLongCodes | Type[ct.c_ulonglong], align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ... - # `signedinteger` string-based representations + # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes, align: bool = ..., copy: bool = ...) -> dtype[int8]: ... + def __new__(cls, dtype: _Int8Codes | Type[ct.c_int8], align: bool = ..., copy: bool = ...) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _Int16Codes, align: bool = ..., copy: bool = ...) -> dtype[int16]: ... + def __new__(cls, dtype: _Int16Codes | Type[ct.c_int16], align: bool = ..., copy: bool = ...) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _Int32Codes, align: bool = ..., copy: bool = ...) -> dtype[int32]: ... + def __new__(cls, dtype: _Int32Codes | Type[ct.c_int32], align: bool = ..., copy: bool = ...) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _Int64Codes, align: bool = ..., copy: bool = ...) -> dtype[int64]: ... + def __new__(cls, dtype: _Int64Codes | Type[ct.c_int64], align: bool = ..., copy: bool = ...) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _ByteCodes, align: bool = ..., copy: bool = ...) -> dtype[byte]: ... + def __new__(cls, dtype: _ByteCodes | Type[ct.c_byte], align: bool = ..., copy: bool = ...) -> dtype[byte]: ... @overload - def __new__(cls, dtype: _ShortCodes, align: bool = ..., copy: bool = ...) -> dtype[short]: ... + def __new__(cls, dtype: _ShortCodes | Type[ct.c_short], align: bool = ..., copy: bool = ...) -> dtype[short]: ... @overload - def __new__(cls, dtype: _IntCCodes, align: bool = ..., copy: bool = ...) -> dtype[intc]: ... + def __new__(cls, dtype: _IntCCodes | Type[ct.c_int], align: bool = ..., copy: bool = ...) -> dtype[intc]: ... @overload - def __new__(cls, dtype: _IntPCodes, align: bool = ..., copy: bool = ...) -> dtype[intp]: ... + def __new__(cls, dtype: _IntPCodes | Type[ct.c_ssize_t], align: bool = ..., copy: bool = ...) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _IntCodes, align: bool = ..., copy: bool = ...) -> dtype[int_]: ... + def __new__(cls, dtype: _IntCodes | Type[ct.c_long], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... @overload - def __new__(cls, dtype: _LongLongCodes, align: bool = ..., copy: bool = ...) -> dtype[longlong]: ... + def __new__(cls, dtype: _LongLongCodes | Type[ct.c_longlong], align: bool = ..., copy: bool = ...) -> dtype[longlong]: ... - # `floating` string-based representations + # `floating` string-based representations and ctypes @overload def __new__(cls, dtype: _Float16Codes, align: bool = ..., copy: bool = ...) -> dtype[float16]: ... @overload @@ -992,11 +995,11 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ...) -> dtype[half]: ... @overload - def __new__(cls, dtype: _SingleCodes, align: bool = ..., copy: bool = ...) -> dtype[single]: ... + def __new__(cls, dtype: _SingleCodes | Type[ct.c_float], align: bool = ..., copy: bool = ...) -> dtype[single]: ... @overload - def __new__(cls, dtype: _DoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[double]: ... + def __new__(cls, dtype: _DoubleCodes | Type[ct.c_double], align: bool = ..., copy: bool = ...) -> dtype[double]: ... @overload - def __new__(cls, dtype: _LongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ... + def __new__(cls, dtype: _LongDoubleCodes | Type[ct.c_longdouble], align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ... # `complexfloating` string-based representations @overload @@ -1010,9 +1013,9 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__(cls, dtype: _CLongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[clongdouble]: ... - # Miscellaneous string-based representations + # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes, align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... + def __new__(cls, dtype: _BoolCodes | Type[ct.c_bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... @overload def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ...) -> dtype[timedelta64]: ... @overload @@ -1020,11 +1023,11 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ...) -> dtype[str_]: ... @overload - def __new__(cls, dtype: _BytesCodes, align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... + def __new__(cls, dtype: _BytesCodes | Type[ct.c_char], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... @overload def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ...) -> dtype[void]: ... @overload - def __new__(cls, dtype: _ObjectCodes, align: bool = ..., copy: bool = ...) -> dtype[object_]: ... + def __new__(cls, dtype: _ObjectCodes | Type[ct.py_object], align: bool = ..., copy: bool = ...) -> dtype[object_]: ... # dtype of a dtype is the same dtype @overload From 648110fda77dd7af7d8dc34fd818dce37d68ab9d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Jul 2021 14:24:43 +0200 Subject: [PATCH 37/40] MAINT: Add a missing `object` overload to `np.dtype` --- numpy/__init__.pyi | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c4f58ca6ddf3..bd81766142e7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -923,7 +923,7 @@ class dtype(Generic[_DTypeScalar_co]): # other special cases. Order is sometimes important because of the # subtype relationships # - # bool < int < float < complex + # bool < int < float < complex < object # # so we have to make sure the overloads for the narrowest type is # first. @@ -1052,7 +1052,7 @@ class dtype(Generic[_DTypeScalar_co]): align: bool = ..., copy: bool = ..., ) -> dtype[Any]: ... - # Catchall overload + # Catchall overload for void-likes @overload def __new__( cls, @@ -1060,6 +1060,14 @@ class dtype(Generic[_DTypeScalar_co]): align: bool = ..., copy: bool = ..., ) -> dtype[void]: ... + # Catchall overload for object-likes + @overload + def __new__( + cls, + dtype: Type[object], + align: bool = ..., + copy: bool = ..., + ) -> dtype[object_]: ... @overload def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ... From 374440e71b3fc222f0969721d3d80d0d7bf71cf8 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Jul 2021 14:32:12 +0200 Subject: [PATCH 38/40] TST: Update the `dtype` typing tests --- numpy/typing/tests/data/fail/dtype.py | 2 -- numpy/typing/tests/data/reveal/dtype.py | 10 ++++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/fail/dtype.py b/numpy/typing/tests/data/fail/dtype.py index 7d419a1d1e5f..0f3810f3c014 100644 --- a/numpy/typing/tests/data/fail/dtype.py +++ b/numpy/typing/tests/data/fail/dtype.py @@ -18,5 +18,3 @@ class Test2: "field2": (int, 3), } ) - -np.dtype[np.float64](np.int64) # E: Argument 1 to "dtype" has incompatible type diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.py index 215d89ead66c..299fed30ab48 100644 --- a/numpy/typing/tests/data/reveal/dtype.py +++ b/numpy/typing/tests/data/reveal/dtype.py @@ -1,3 +1,4 @@ +import ctypes as ct import numpy as np dtype_obj: np.dtype[np.str_] @@ -22,6 +23,15 @@ reveal_type(np.dtype(bool)) # E: numpy.dtype[numpy.bool_] reveal_type(np.dtype(str)) # E: numpy.dtype[numpy.str_] reveal_type(np.dtype(bytes)) # E: numpy.dtype[numpy.bytes_] +reveal_type(np.dtype(object)) # E: numpy.dtype[numpy.object_] + +# ctypes +reveal_type(np.dtype(ct.c_double)) # E: numpy.dtype[{double}] +reveal_type(np.dtype(ct.c_longlong)) # E: numpy.dtype[{longlong}] +reveal_type(np.dtype(ct.c_uint32)) # E: numpy.dtype[{uint32}] +reveal_type(np.dtype(ct.c_bool)) # E: numpy.dtype[numpy.bool_] +reveal_type(np.dtype(ct.c_char)) # E: numpy.dtype[numpy.bytes_] +reveal_type(np.dtype(ct.py_object)) # E: numpy.dtype[numpy.object_] # Special case for None reveal_type(np.dtype(None)) # E: numpy.dtype[{double}] From cadd4babf1411f01175426d1a4797d3dbb63789b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Jul 2021 15:39:10 +0200 Subject: [PATCH 39/40] DOC: Document that `size_t` is not guaranteed to map to `np.intp` under rare circumstances --- numpy/__init__.pyi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bd81766142e7..74c33b16243e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -956,6 +956,9 @@ class dtype(Generic[_DTypeScalar_co]): def __new__(cls, dtype: _UShortCodes | Type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ... @overload def __new__(cls, dtype: _UIntCCodes | Type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ... + + # NOTE: We're assuming here that `uint_ptr_t == size_t`, + # an assumption that does not hold in rare cases (same for `ssize_t`) @overload def __new__(cls, dtype: _UIntPCodes | Type[ct.c_void_p] | Type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ... @overload From 7031a8263c5b768da27139b1cd0b4978354ff8c3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 18 Jul 2021 06:20:41 -0600 Subject: [PATCH 40/40] REL: Prepare for NumPy 1.21.1 release. - Update the release 1.21.1-notes.rst - Add release notes to index - Create the 1.21.1-changelog.rst - Delete towncrier notes fragment --- doc/changelog/1.21.1-changelog.rst | 51 +++++++++++ doc/release/upcoming_changes/19462.change.rst | 3 - doc/source/release.rst | 1 + doc/source/release/1.21.1-notes.rst | 88 ++++++++++++------- 4 files changed, 108 insertions(+), 35 deletions(-) create mode 100644 doc/changelog/1.21.1-changelog.rst delete mode 100644 doc/release/upcoming_changes/19462.change.rst diff --git a/doc/changelog/1.21.1-changelog.rst b/doc/changelog/1.21.1-changelog.rst new file mode 100644 index 000000000000..f219c5012323 --- /dev/null +++ b/doc/changelog/1.21.1-changelog.rst @@ -0,0 +1,51 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bas van Beek +* Charles Harris +* Ganesh Kathiresan +* Gregory R. Lee +* Hugo Defois + +* Kevin Sheppard +* Matti Picus +* Ralf Gommers +* Sayed Adel +* Sebastian Berg +* Thomas J. Fan + +Pull requests merged +==================== + +A total of 26 pull requests were merged for this release. + +* `#19311 `__: REV,BUG: Replace ``NotImplemented`` with ``typing.Any`` +* `#19324 `__: MAINT: Fixed the return-dtype of ``ndarray.real`` and ``imag`` +* `#19330 `__: MAINT: Replace ``"dtype[Any]"`` with ``dtype`` in the definiton of... +* `#19342 `__: DOC: Fix some docstrings that crash pdf generation. +* `#19343 `__: MAINT: bump scipy-mathjax +* `#19347 `__: BUG: Fix arr.flat.index for large arrays and big-endian machines +* `#19348 `__: ENH: add ``numpy.f2py.get_include`` function +* `#19349 `__: BUG: Fix reference count leak in ufunc dtype handling +* `#19350 `__: MAINT: Annotate missing attributes of ``np.number`` subclasses +* `#19351 `__: BUG: Fix cast safety and comparisons for zero sized voids +* `#19352 `__: BUG: Correct Cython declaration in random +* `#19353 `__: BUG: protect against accessing base attribute of a NULL subarray +* `#19365 `__: BUG, SIMD: Fix detecting AVX512 features on Darwin +* `#19366 `__: MAINT: remove ``print()``'s in distutils template handling +* `#19390 `__: ENH: SIMD architectures to show_config +* `#19391 `__: BUG: Do not raise deprecation warning for all nans in unique... +* `#19392 `__: BUG: Fix NULL special case in object-to-any cast code +* `#19430 `__: MAINT: Use arm64-graviton2 for testing on travis +* `#19495 `__: BUILD: update OpenBLAS to v0.3.17 +* `#19496 `__: MAINT: Avoid unicode characters in division SIMD code comments +* `#19499 `__: BUG, SIMD: Fix infinite loop during count non-zero on GCC-11 +* `#19500 `__: BUG: fix a numpy.npiter leak in npyiter_multi_index_set +* `#19501 `__: TST: Fix a ``GenericAlias`` test failure for python 3.9.0 +* `#19502 `__: MAINT: Start testing with Python 3.10.0b3. +* `#19503 `__: MAINT: Add missing dtype overloads for object- and ctypes-based... +* `#19510 `__: REL: Prepare for NumPy 1.21.1 release. + diff --git a/doc/release/upcoming_changes/19462.change.rst b/doc/release/upcoming_changes/19462.change.rst deleted file mode 100644 index 8fbadb394ded..000000000000 --- a/doc/release/upcoming_changes/19462.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -OpenBLAS v0.3.17 ----------------- -Update the OpenBLAS used in testing and in wheels to v0.3.17 diff --git a/doc/source/release.rst b/doc/source/release.rst index 6d208d395b90..4a9d2b1945fa 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release Notes .. toctree:: :maxdepth: 3 + 1.21.1 1.21.0 1.20.3 1.20.2 diff --git a/doc/source/release/1.21.1-notes.rst b/doc/source/release/1.21.1-notes.rst index 43940c950a54..0194327f8159 100644 --- a/doc/source/release/1.21.1-notes.rst +++ b/doc/source/release/1.21.1-notes.rst @@ -1,45 +1,69 @@ -:orphan: +.. currentmodule:: numpy ========================== NumPy 1.21.1 Release Notes ========================== +The NumPy 1.21.1 is maintenance release that fixes bugs discovered after the +1.21.0 release and updates OpenBLAS to v0.3.17 to deal with problems on arm64. +The Python versions supported for this release are 3.7-3.9. The 1.21.x series +is compatible with development Python 3.10. Python 3.10 will be officially +supported after it is released. -Highlights -========== +.. warning:: + There are unresolved problems compiling NumPy 1.20.0 with gcc-11.1. + * Optimization level `-O3` results in many incorrect warnings when + running the tests. + * On some hardware NumPY will hang in an infinite loop. -New functions -============= - - -Deprecations +Contributors ============ - -Future Changes -============== - - -Expired deprecations +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bas van Beek +* Charles Harris +* Ganesh Kathiresan +* Gregory R. Lee +* Hugo Defois + +* Kevin Sheppard +* Matti Picus +* Ralf Gommers +* Sayed Adel +* Sebastian Berg +* Thomas J. Fan + +Pull requests merged ==================== +A total of 26 pull requests were merged for this release. + +* `#19311 `__: REV,BUG: Replace ``NotImplemented`` with ``typing.Any`` +* `#19324 `__: MAINT: Fixed the return-dtype of ``ndarray.real`` and ``imag`` +* `#19330 `__: MAINT: Replace ``"dtype[Any]"`` with ``dtype`` in the definiton of... +* `#19342 `__: DOC: Fix some docstrings that crash pdf generation. +* `#19343 `__: MAINT: bump scipy-mathjax +* `#19347 `__: BUG: Fix arr.flat.index for large arrays and big-endian machines +* `#19348 `__: ENH: add ``numpy.f2py.get_include`` function +* `#19349 `__: BUG: Fix reference count leak in ufunc dtype handling +* `#19350 `__: MAINT: Annotate missing attributes of ``np.number`` subclasses +* `#19351 `__: BUG: Fix cast safety and comparisons for zero sized voids +* `#19352 `__: BUG: Correct Cython declaration in random +* `#19353 `__: BUG: protect against accessing base attribute of a NULL subarray +* `#19365 `__: BUG, SIMD: Fix detecting AVX512 features on Darwin +* `#19366 `__: MAINT: remove ``print()``'s in distutils template handling +* `#19390 `__: ENH: SIMD architectures to show_config +* `#19391 `__: BUG: Do not raise deprecation warning for all nans in unique... +* `#19392 `__: BUG: Fix NULL special case in object-to-any cast code +* `#19430 `__: MAINT: Use arm64-graviton2 for testing on travis +* `#19495 `__: BUILD: update OpenBLAS to v0.3.17 +* `#19496 `__: MAINT: Avoid unicode characters in division SIMD code comments +* `#19499 `__: BUG, SIMD: Fix infinite loop during count non-zero on GCC-11 +* `#19500 `__: BUG: fix a numpy.npiter leak in npyiter_multi_index_set +* `#19501 `__: TST: Fix a ``GenericAlias`` test failure for python 3.9.0 +* `#19502 `__: MAINT: Start testing with Python 3.10.0b3. +* `#19503 `__: MAINT: Add missing dtype overloads for object- and ctypes-based... +* `#19510 `__: REL: Prepare for NumPy 1.21.1 release. -Compatibility notes -=================== - - -C API changes -============= - - -New Features -============ - - -Improvements -============ - - -Changes -=======