diff --git a/doc/release/upcoming_changes/27156.change.rst b/doc/release/upcoming_changes/27156.change.rst new file mode 100644 index 000000000000..bd332617279e --- /dev/null +++ b/doc/release/upcoming_changes/27156.change.rst @@ -0,0 +1,9 @@ +NEP 50 promotion state option removed +------------------------------------- +The NEP 50 promotion state settings are now removed. They were always +meant as temporary means for testing. +A warning will be given if the environment variable is set to anything +but ``NPY_PROMOTION_STATE=weak`` wile ``_set_promotion_state`` +and ``_get_promotion_state`` are removed. +In case code used ``_no_nep50_warning``, a ``contextlib.nullcontext`` +could be used to replace it when not available. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 68fbb6ef3d66..051b115ab5f7 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1089,14 +1089,13 @@ Converting data types returned when the value will not overflow or be truncated to an integer when converting to a smaller type. - This is almost the same as the result of - PyArray_CanCastTypeTo(PyArray_MinScalarType(arr), totype, casting), - but it also handles a special case arising because the set - of uint values is not a subset of the int values for types with the - same number of bits. - .. c:function:: PyArray_Descr* PyArray_MinScalarType(PyArrayObject* arr) + .. note:: + With the adoption of NEP 50 in NumPy 2, this function is not used + internally. It is currently provided for backwards compatibility, + but expected to be eventually deprecated. + .. versionadded:: 1.6 If *arr* is an array, returns its data type descriptor, but if @@ -1134,8 +1133,7 @@ Converting data types .. c:function:: int PyArray_ObjectType(PyObject* op, int mintype) - This function is superseded by :c:func:`PyArray_MinScalarType` and/or - :c:func:`PyArray_ResultType`. + This function is superseded by :c:func:`PyArray_ResultType`. This function is useful for determining a common type that two or more arrays can be converted to. It only works for non-flexible @@ -3243,30 +3241,18 @@ Array scalars .. c:function:: NPY_SCALARKIND PyArray_ScalarKind( \ int typenum, PyArrayObject** arr) - See the function :c:func:`PyArray_MinScalarType` for an alternative - mechanism introduced in NumPy 1.6.0. + Legacy way to query special promotion for scalar values. This is not + used in NumPy itself anymore and is expected to be deprecated eventually. - Return the kind of scalar represented by *typenum* and the array - in *\*arr* (if *arr* is not ``NULL`` ). The array is assumed to be - rank-0 and only used if *typenum* represents a signed integer. If - *arr* is not ``NULL`` and the first element is negative then - :c:data:`NPY_INTNEG_SCALAR` is returned, otherwise - :c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values - are the enumerated values in :c:type:`NPY_SCALARKIND`. + New DTypes can define promotion rules specific to Python scalars. .. c:function:: int PyArray_CanCoerceScalar( \ char thistype, char neededtype, NPY_SCALARKIND scalar) - See the function :c:func:`PyArray_ResultType` for details of - NumPy type promotion, updated in NumPy 1.6.0. + Legacy way to query special promotion for scalar values. This is not + used in NumPy itself anymore and is expected to be deprecated eventually. - Implements the rules for scalar coercion. Scalars are only - silently coerced from thistype to neededtype if this function - returns nonzero. If scalar is :c:data:`NPY_NOSCALAR`, then this - function is equivalent to :c:func:`PyArray_CanCastSafely`. The rule is - that scalars of the same KIND can be coerced into arrays of the - same KIND. This rule means that high-precision scalars will never - cause low-precision arrays of the same KIND to be upcast. + Use ``PyArray_ResultType`` for similar purposes. Data-type descriptors diff --git a/numpy/__init__.py b/numpy/__init__.py index 27e5d2d6801d..8674fb164a70 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -120,8 +120,8 @@ from . import _core from ._core import ( - False_, ScalarType, True_, _get_promotion_state, _no_nep50_warning, - _set_promotion_state, abs, absolute, acos, acosh, add, all, allclose, + False_, ScalarType, True_, + abs, absolute, acos, acosh, add, all, allclose, amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh, arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort, argwhere, around, array, array2string, array_equal, array_equiv, @@ -529,8 +529,11 @@ def hugepage_setup(): _core.multiarray._multiarray_umath._reload_guard() # TODO: Remove the environment variable entirely now that it is "weak" - _core._set_promotion_state( - os.environ.get("NPY_PROMOTION_STATE", "weak")) + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) # Tell PyInstaller where to find hook-numpy.py def _pyinstaller_hooks_dir(): diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e73d6f16765b..66778140f431 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3533,11 +3533,6 @@ class errstate: ) -> None: ... def __call__(self, func: _CallType) -> _CallType: ... -@contextmanager -def _no_nep50_warning() -> Generator[None, None, None]: ... -def _get_promotion_state() -> str: ... -def _set_promotion_state(state: str, /) -> None: ... - _ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) class ndenumerate(Generic[_ScalarType_co]): diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index 388854e664a5..03c673fc0ff8 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -14,7 +14,6 @@ from numpy._core.multiarray import asanyarray from numpy._core import numerictypes as nt from numpy._core import _exceptions -from numpy._core._ufunc_config import _no_nep50_warning from numpy._globals import _NoValue # save those O(100) nanoseconds! @@ -135,9 +134,8 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) if isinstance(ret, mu.ndarray): - with _no_nep50_warning(): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) if is_float16_result and out is None: ret = arr.dtype.type(ret) elif hasattr(ret, 'dtype'): @@ -180,9 +178,8 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # matching rcount to arrmean when where is specified as array div = rcount.reshape(arrmean.shape) if isinstance(arrmean, mu.ndarray): - with _no_nep50_warning(): - arrmean = um.true_divide(arrmean, div, out=arrmean, - casting='unsafe', subok=False) + arrmean = um.true_divide(arrmean, div, out=arrmean, + casting='unsafe', subok=False) elif hasattr(arrmean, "dtype"): arrmean = arrmean.dtype.type(arrmean / rcount) else: @@ -212,9 +209,8 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # divide by degrees of freedom if isinstance(ret, mu.ndarray): - with _no_nep50_warning(): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) elif hasattr(ret, 'dtype'): ret = ret.dtype.type(ret / rcount) else: diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index d60e7cbbda97..b31af64b8a4b 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -14,7 +14,7 @@ __all__ = [ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", - "errstate", '_no_nep50_warning' + "errstate" ] @@ -482,22 +482,3 @@ def inner(*args, **kwargs): _extobj_contextvar.reset(_token) return inner - - -NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False) - -@set_module('numpy') -@contextlib.contextmanager -def _no_nep50_warning(): - """ - Context manager to disable NEP 50 warnings. This context manager is - only relevant if the NEP 50 warnings are enabled globally (which is not - thread/context safe). - - This warning context manager itself is fully safe, however. - """ - token = NO_NEP50_WARNING.set(True) - try: - yield - finally: - NO_NEP50_WARNING.reset(token) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index e2ca115b3728..a13f8d71f520 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -17,7 +17,6 @@ _flagdict, from_dlpack, _place, _reconstruct, _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version, _get_madvise_hugepage, _set_madvise_hugepage, - _get_promotion_state, _set_promotion_state ) __all__ = [ @@ -40,8 +39,7 @@ 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', 'set_typeDict', 'shares_memory', 'typeinfo', - 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros', - '_get_promotion_state', '_set_promotion_state'] + 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros'] # For backward compatibility, make sure pickle imports # these functions from here @@ -67,8 +65,6 @@ nested_iters.__module__ = 'numpy' promote_types.__module__ = 'numpy' zeros.__module__ = 'numpy' -_get_promotion_state.__module__ = 'numpy' -_set_promotion_state.__module__ = 'numpy' normalize_axis_index.__module__ = 'numpy.lib.array_utils' diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 39b3de44fabe..da783c7e586d 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -17,8 +17,7 @@ empty, empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter, fromstring, inner, lexsort, matmul, may_share_memory, min_scalar_type, ndarray, nditer, nested_iters, promote_types, putmask, result_type, - shares_memory, vdot, where, zeros, normalize_axis_index, - _get_promotion_state, _set_promotion_state, vecdot + shares_memory, vdot, where, zeros, normalize_axis_index, vecdot ) from . import overrides @@ -28,7 +27,7 @@ from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes from ..exceptions import AxisError -from ._ufunc_config import errstate, _no_nep50_warning +from ._ufunc_config import errstate bitwise_not = invert ufunc = type(sin) @@ -53,7 +52,7 @@ 'identity', 'allclose', 'putmask', 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', - 'may_share_memory', '_get_promotion_state', '_set_promotion_state'] + 'may_share_memory'] def _zeros_like_dispatcher( @@ -2457,7 +2456,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): elif isinstance(y, int): y = float(y) - with errstate(invalid='ignore'), _no_nep50_warning(): + with errstate(invalid='ignore'): result = (less_equal(abs(x-y), atol + rtol * abs(y)) & isfinite(y) | (x == y)) diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 9524be8a0c89..931ced5d8176 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -275,41 +275,10 @@ static int #endif ) { PyArray_Descr *descr = PyArray_DescrFromType(NPY_@TYPE@); - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_LEGACY_PROMOTION || ( - promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings())) { - /* - * This path will be taken both for the "promotion" case such as - * `uint8_arr + 123` as well as the assignment case. - * The "legacy" path should only ever be taken for assignment - * (legacy promotion will prevent overflows by promoting up) - * so a normal deprecation makes sense. - * When weak promotion is active, we use "future" behavior unless - * warnings were explicitly opt-in. - */ - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "NumPy will stop allowing conversion of out-of-bound " - "Python integers to integer arrays. The conversion " - "of %.100R to %S will fail in the future.\n" - "For the old behavior, usually:\n" - " np.array(value).astype(dtype)\n" - "will give the desired result (the cast overflows).", - obj, descr) < 0) { - Py_DECREF(descr); - return -1; - } - Py_DECREF(descr); - return 0; - } - else { - /* Live in the future, outright error: */ - PyErr_Format(PyExc_OverflowError, - "Python integer %R out of bounds for %S", obj, descr); - Py_DECREF(descr); - return -1; - } - assert(0); + PyErr_Format(PyExc_OverflowError, + "Python integer %R out of bounds for %S", obj, descr); + Py_DECREF(descr); + return -1; } return 0; } diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 550d3e253868..a24b14623957 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -49,18 +49,6 @@ */ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; -static NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; - -NPY_NO_EXPORT int -get_npy_promotion_state() { - return npy_promotion_state; -} - -NPY_NO_EXPORT void -set_npy_promotion_state(int new_promotion_state) { - npy_promotion_state = new_promotion_state; -} - static PyObject * PyArray_GetGenericToVoidCastingImpl(void); @@ -74,80 +62,6 @@ static PyObject * PyArray_GetObjectToGenericCastingImpl(void); -/* - * Return 1 if promotion warnings should be given and 0 if they are currently - * suppressed in the local context. - */ -NPY_NO_EXPORT int -npy_give_promotion_warnings(void) -{ - PyObject *val; - - if (npy_cache_import_runtime( - "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &npy_runtime_imports.NO_NEP50_WARNING) == -1) { - PyErr_WriteUnraisable(NULL); - return 1; - } - - if (PyContextVar_Get(npy_runtime_imports.NO_NEP50_WARNING, - Py_False, &val) < 0) { - /* Errors should not really happen, but if it does assume we warn. */ - PyErr_WriteUnraisable(NULL); - return 1; - } - Py_DECREF(val); - /* only when the no-warnings context is false, we give warnings */ - return val == Py_False; -} - - -NPY_NO_EXPORT PyObject * -npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)) { - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_WEAK_PROMOTION) { - return PyUnicode_FromString("weak"); - } - else if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) { - return PyUnicode_FromString("weak_and_warn"); - } - else if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - return PyUnicode_FromString("legacy"); - } - PyErr_SetString(PyExc_SystemError, "invalid promotion state!"); - return NULL; -} - - -NPY_NO_EXPORT PyObject * -npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg) -{ - if (!PyUnicode_Check(arg)) { - PyErr_SetString(PyExc_TypeError, - "_set_promotion_state() argument or NPY_PROMOTION_STATE " - "must be a string."); - return NULL; - } - int new_promotion_state; - if (PyUnicode_CompareWithASCIIString(arg, "weak") == 0) { - new_promotion_state = NPY_USE_WEAK_PROMOTION; - } - else if (PyUnicode_CompareWithASCIIString(arg, "weak_and_warn") == 0) { - new_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; - } - else if (PyUnicode_CompareWithASCIIString(arg, "legacy") == 0) { - new_promotion_state = NPY_USE_LEGACY_PROMOTION; - } - else { - PyErr_Format(PyExc_TypeError, - "_set_promotion_state() argument or NPY_PROMOTION_STATE must be " - "'weak', 'legacy', or 'weak_and_warn' but got '%.100S'", arg); - return NULL; - } - set_npy_promotion_state(new_promotion_state); - Py_RETURN_NONE; -} - /** * Fetch the casting implementation from one DType to another. * @@ -724,26 +638,6 @@ dtype_kind_to_ordering(char kind) } } -/* Converts a type number from unsigned to signed */ -static int -type_num_unsigned_to_signed(int type_num) -{ - switch (type_num) { - case NPY_UBYTE: - return NPY_BYTE; - case NPY_USHORT: - return NPY_SHORT; - case NPY_UINT: - return NPY_INT; - case NPY_ULONG: - return NPY_LONG; - case NPY_ULONGLONG: - return NPY_LONGLONG; - default: - return type_num; - } -} - /*NUMPY_API * Returns true if data of type 'from' may be cast to data of type @@ -789,83 +683,6 @@ static int min_scalar_type_num(char *valueptr, int type_num, int *is_small_unsigned); -/* - * NOTE: This function uses value based casting logic for scalars. It will - * require updates when we phase out value-based-casting. - */ -NPY_NO_EXPORT npy_bool -can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, - PyArray_Descr *to, NPY_CASTING casting) -{ - /* - * If the two dtypes are actually references to the same object - * or if casting type is forced unsafe then always OK. - * - * TODO: Assuming that unsafe casting always works is not actually correct - */ - if (scal_type == to || casting == NPY_UNSAFE_CASTING ) { - return 1; - } - - int valid = PyArray_CheckCastSafety(casting, scal_type, to, NPY_DTYPE(to)); - if (valid == 1) { - /* This is definitely a valid cast. */ - return 1; - } - if (valid < 0) { - /* Probably must return 0, but just keep trying for now. */ - PyErr_Clear(); - } - - /* - * If the scalar isn't a number, value-based casting cannot kick in and - * we must not attempt it. - * (Additional fast-checks would be possible, but probably unnecessary.) - */ - if (!PyTypeNum_ISNUMBER(scal_type->type_num)) { - return 0; - } - - /* - * At this point we have to check value-based casting. - */ - PyArray_Descr *dtype; - int is_small_unsigned = 0, type_num; - /* An aligned memory buffer large enough to hold any builtin numeric type */ - npy_longlong value[4]; - - int swap = !PyArray_ISNBO(scal_type->byteorder); - PyDataType_GetArrFuncs(scal_type)->copyswap(&value, scal_data, swap, NULL); - - type_num = min_scalar_type_num((char *)&value, scal_type->type_num, - &is_small_unsigned); - - /* - * If we've got a small unsigned scalar, and the 'to' type - * is not unsigned, then make it signed to allow the value - * to be cast more appropriately. - */ - if (is_small_unsigned && !(PyTypeNum_ISUNSIGNED(to->type_num))) { - type_num = type_num_unsigned_to_signed(type_num); - } - - dtype = PyArray_DescrFromType(type_num); - if (dtype == NULL) { - return 0; - } -#if 0 - printf("min scalar cast "); - PyObject_Print(dtype, stdout, 0); - printf(" to "); - PyObject_Print(to, stdout, 0); - printf("\n"); -#endif - npy_bool ret = PyArray_CanCastTypeTo(dtype, to, casting); - Py_DECREF(dtype); - return ret; -} - - NPY_NO_EXPORT npy_bool can_cast_pyscalar_scalar_to( int flags, PyArray_Descr *to, NPY_CASTING casting) @@ -932,25 +749,14 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, to = NULL; } - if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { - /* - * If it's a scalar, check the value. (This only currently matters for - * numeric types and for `to == NULL` it can't be numeric.) - */ - if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) { - return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting); - } - } - else { - /* - * If it's a scalar, check the value. (This only currently matters for - * numeric types and for `to == NULL` it can't be numeric.) - */ - if (PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL && to != NULL) { - return can_cast_pyscalar_scalar_to( - PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL, to, - casting); - } + /* + * If it's a scalar, check the value. (This only currently matters for + * numeric types and for `to == NULL` it can't be numeric.) + */ + if (PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL && to != NULL) { + return can_cast_pyscalar_scalar_to( + PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL, to, + casting); } /* Otherwise, use the standard rules (same as `PyArray_CanCastTypeTo`) */ @@ -1030,58 +836,6 @@ PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) return (npy_bool) PyArray_CanCastSafely(fromtype, totype); } -/* - * Internal promote types function which handles unsigned integers which - * fit in same-sized signed integers specially. - */ -static PyArray_Descr * -promote_types(PyArray_Descr *type1, PyArray_Descr *type2, - int is_small_unsigned1, int is_small_unsigned2) -{ - if (is_small_unsigned1) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num2 < NPY_NTYPES_LEGACY && !(PyTypeNum_ISBOOL(type_num2) || - PyTypeNum_ISUNSIGNED(type_num2))) { - /* Convert to the equivalent-sized signed integer */ - type_num1 = type_num_unsigned_to_signed(type_num1); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else if (is_small_unsigned2) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num1 < NPY_NTYPES_LEGACY && !(PyTypeNum_ISBOOL(type_num1) || - PyTypeNum_ISUNSIGNED(type_num1))) { - /* Convert to the equivalent-sized signed integer */ - type_num2 = type_num_unsigned_to_signed(type_num2); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else { - return PyArray_PromoteTypes(type1, type2); - } - -} - /** * This function should possibly become public API eventually. At this @@ -1576,11 +1330,19 @@ static int min_scalar_type_num(char *valueptr, int type_num, } +/*NUMPY_API + * If arr is a scalar (has 0 dimensions) with a built-in number data type, + * finds the smallest type size/kind which can still represent its data. + * Otherwise, returns the array's data type. + * + * NOTE: This API is a left over from before NumPy 2 (and NEP 50) and should + * probably be eventually deprecated and removed. + */ NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned) +PyArray_MinScalarType(PyArrayObject *arr) { + int is_small_unsigned; PyArray_Descr *dtype = PyArray_DESCR(arr); - *is_small_unsigned = 0; /* * If the array isn't a numeric scalar, just return the array's dtype. */ @@ -1597,23 +1359,11 @@ PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned) return PyArray_DescrFromType( min_scalar_type_num((char *)&value, - dtype->type_num, is_small_unsigned)); + dtype->type_num, &is_small_unsigned)); } } -/*NUMPY_API - * If arr is a scalar (has 0 dimensions) with a built-in number data type, - * finds the smallest type size/kind which can still represent its data. - * Otherwise, returns the array's data type. - * - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType(PyArrayObject *arr) -{ - int is_small_unsigned; - return PyArray_MinScalarType_internal(arr, &is_small_unsigned); -} /* * Provides an ordering for the dtype 'kind' character codes, to help @@ -1814,14 +1564,7 @@ PyArray_ResultType( all_descriptors[i] = descrs[i]; } - int at_least_one_scalar = 0; - int all_pyscalar = ndtypes == 0; for (npy_intp i=0, i_all=ndtypes; i < narrs; i++, i_all++) { - /* Array descr is also the correct "default" for scalars: */ - if (PyArray_NDIM(arrs[i]) == 0) { - at_least_one_scalar = 1; - } - /* * If the original was a Python scalar/literal, we use only the * corresponding abstract DType (and no descriptor) below. @@ -1831,10 +1574,6 @@ PyArray_ResultType( if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_INT) { /* This could even be an object dtype here for large ints */ all_DTypes[i_all] = &PyArray_PyLongDType; - if (PyArray_TYPE(arrs[i]) != NPY_LONG) { - /* Not a "normal" scalar, so we cannot avoid the legacy path */ - all_pyscalar = 0; - } } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) { all_DTypes[i_all] = &PyArray_PyFloatDType; @@ -1845,7 +1584,6 @@ PyArray_ResultType( else { all_descriptors[i_all] = PyArray_DTYPE(arrs[i]); all_DTypes[i_all] = NPY_DTYPE(all_descriptors[i_all]); - all_pyscalar = 0; } Py_INCREF(all_DTypes[i_all]); } @@ -1906,24 +1644,6 @@ PyArray_ResultType( } } - /* - * Unfortunately, when 0-D "scalar" arrays are involved and mixed, we *may* - * have to use the value-based logic. - * `PyArray_CheckLegacyResultType` may behave differently based on the - * current value of `npy_legacy_promotion`: - * 1. It does nothing (we use the "new" behavior) - * 2. It does nothing, but warns if there the result would differ. - * 3. It replaces the result based on the legacy value-based logic. - */ - if (at_least_one_scalar && !all_pyscalar && result->type_num < NPY_NTYPES_LEGACY) { - if (PyArray_CheckLegacyResultType( - &result, narrs, arrs, ndtypes, descrs) < 0) { - Py_DECREF(common_dtype); - Py_DECREF(result); - return NULL; - } - } - Py_DECREF(common_dtype); PyMem_Free(info_on_heap); return result; @@ -1936,145 +1656,6 @@ PyArray_ResultType( } -/* - * Produces the result type of a bunch of inputs, using the UFunc - * type promotion rules. Use this function when you have a set of - * input arrays, and need to determine an output array dtype. - * - * If all the inputs are scalars (have 0 dimensions) or the maximum "kind" - * of the scalars is greater than the maximum "kind" of the arrays, does - * a regular type promotion. - * - * Otherwise, does a type promotion on the MinScalarType - * of all the inputs. Data types passed directly are treated as array - * types. - */ -NPY_NO_EXPORT int -PyArray_CheckLegacyResultType( - PyArray_Descr **new_result, - npy_intp narrs, PyArrayObject **arr, - npy_intp ndtypes, PyArray_Descr **dtypes) -{ - PyArray_Descr *ret = NULL; - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_WEAK_PROMOTION) { - return 0; - } - if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings()) { - return 0; - } - - npy_intp i; - - /* If there's just one type, results must match */ - if (narrs + ndtypes == 1) { - return 0; - } - - int use_min_scalar = should_use_min_scalar(narrs, arr, ndtypes, dtypes); - - /* Loop through all the types, promoting them */ - if (!use_min_scalar) { - - /* Build a single array of all the dtypes */ - PyArray_Descr **all_dtypes = PyArray_malloc( - sizeof(*all_dtypes) * (narrs + ndtypes)); - if (all_dtypes == NULL) { - PyErr_NoMemory(); - return -1; - } - for (i = 0; i < narrs; ++i) { - all_dtypes[i] = PyArray_DESCR(arr[i]); - } - for (i = 0; i < ndtypes; ++i) { - all_dtypes[narrs + i] = dtypes[i]; - } - ret = PyArray_PromoteTypeSequence(all_dtypes, narrs + ndtypes); - PyArray_free(all_dtypes); - } - else { - int ret_is_small_unsigned = 0; - - for (i = 0; i < narrs; ++i) { - int tmp_is_small_unsigned; - PyArray_Descr *tmp = PyArray_MinScalarType_internal( - arr[i], &tmp_is_small_unsigned); - if (tmp == NULL) { - Py_XDECREF(ret); - return -1; - } - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - ret_is_small_unsigned = tmp_is_small_unsigned; - } - else { - PyArray_Descr *tmpret = promote_types( - tmp, ret, tmp_is_small_unsigned, ret_is_small_unsigned); - Py_DECREF(tmp); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return -1; - } - - ret_is_small_unsigned = tmp_is_small_unsigned && - ret_is_small_unsigned; - } - } - - for (i = 0; i < ndtypes; ++i) { - PyArray_Descr *tmp = dtypes[i]; - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - PyArray_Descr *tmpret = promote_types( - tmp, ret, 0, ret_is_small_unsigned); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return -1; - } - } - } - /* None of the above loops ran */ - if (ret == NULL) { - PyErr_SetString(PyExc_TypeError, - "no arrays or types available to calculate result type"); - } - } - - if (ret == NULL) { - return -1; - } - - int unchanged_result = PyArray_EquivTypes(*new_result, ret); - if (unchanged_result) { - Py_DECREF(ret); - return 0; - } - - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - Py_SETREF(*new_result, ret); - return 0; - } - - assert(promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN); - if (PyErr_WarnFormat(PyExc_UserWarning, 1, - "result dtype changed due to the removal of value-based " - "promotion from NumPy. Changed from %S to %S.", - ret, *new_result) < 0) { - Py_DECREF(ret); - return -1; - } - Py_DECREF(ret); - return 0; -} - /** * Promotion of descriptors (of arbitrary DType) to their correctly * promoted instances of the given DType. diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index f848ad3b4c8e..5dc6b4deacb6 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -9,19 +9,6 @@ extern "C" { extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; -#define NPY_USE_LEGACY_PROMOTION 0 -#define NPY_USE_WEAK_PROMOTION 1 -#define NPY_USE_WEAK_PROMOTION_AND_WARN 2 - -NPY_NO_EXPORT int -npy_give_promotion_warnings(void); - -NPY_NO_EXPORT PyObject * -npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)); - -NPY_NO_EXPORT PyObject * -npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg); - NPY_NO_EXPORT PyObject * PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to); @@ -53,11 +40,6 @@ PyArray_ValidType(int type); NPY_NO_EXPORT int dtype_kind_to_ordering(char kind); -/* Used by PyArray_CanCastArrayTo and in the legacy ufunc type resolution */ -NPY_NO_EXPORT npy_bool -can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, - PyArray_Descr *to, NPY_CASTING casting); - NPY_NO_EXPORT npy_bool can_cast_pyscalar_scalar_to( int flags, PyArray_Descr *to, NPY_CASTING casting); @@ -133,12 +115,6 @@ simple_cast_resolve_descriptors( NPY_NO_EXPORT int PyArray_InitializeCasts(void); -NPY_NO_EXPORT int -get_npy_promotion_state(); - -NPY_NO_EXPORT void -set_npy_promotion_state(int new_promotion_state); - #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 6681edda1e55..44edea32c861 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -3549,30 +3549,18 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), * TODO: `PyArray_IsScalar` should not be required for new dtypes. * weak-promotion branch is in practice identical to dtype one. */ - if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { - PyObject *descr = PyObject_GetAttr(from_obj, npy_interned_str.dtype); - if (descr == NULL) { - goto finish; - } - if (!PyArray_DescrCheck(descr)) { - Py_DECREF(descr); - PyErr_SetString(PyExc_TypeError, - "numpy_scalar.dtype did not return a dtype instance."); - goto finish; - } - ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); - Py_DECREF(descr); + PyObject *descr = PyObject_GetAttr(from_obj, npy_interned_str.dtype); + if (descr == NULL) { + goto finish; } - else { - /* need to convert to object to consider old value-based logic */ - PyArrayObject *arr; - arr = (PyArrayObject *)PyArray_FROM_O(from_obj); - if (arr == NULL) { - goto finish; - } - ret = PyArray_CanCastArrayTo(arr, d2, casting); - Py_DECREF(arr); + if (!PyArray_DescrCheck(descr)) { + Py_DECREF(descr); + PyErr_SetString(PyExc_TypeError, + "numpy_scalar.dtype did not return a dtype instance."); + goto finish; } + ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); + Py_DECREF(descr); } else if (PyArray_IsPythonNumber(from_obj)) { PyErr_SetString(PyExc_TypeError, @@ -4627,14 +4615,6 @@ static struct PyMethodDef array_module_methods[] = { {"get_handler_version", (PyCFunction) get_handler_version, METH_VARARGS, NULL}, - {"_get_promotion_state", - (PyCFunction)npy__get_promotion_state, - METH_NOARGS, "Get the current NEP 50 promotion state."}, - {"_set_promotion_state", - (PyCFunction)npy__set_promotion_state, - METH_O, "Set the NEP 50 promotion state. This is not thread-safe.\n" - "The optional warnings can be safely silenced using the \n" - "`np._no_nep50_warning()` context manager."}, {"_set_numpy_warn_if_no_mem_policy", (PyCFunction)_set_numpy_warn_if_no_mem_policy, METH_O, "Change the warn if no mem policy flag for testing."}, diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 110e2f40ab32..55a99cc5e7c8 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -976,28 +976,8 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } - int current_promotion_state = get_npy_promotion_state(); - - if (force_legacy_promotion && legacy_promotion_is_possible - && current_promotion_state == NPY_USE_LEGACY_PROMOTION - && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { - /* - * We must use legacy promotion for value-based logic. Call the old - * resolver once up-front to get the "actual" loop dtypes. - * After this (additional) promotion, we can even use normal caching. - */ - int cacheable = 1; /* unused, as we modify the original `op_dtypes` */ - if (legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, op_dtypes, &cacheable, NPY_FALSE) < 0) { - goto handle_error; - } - } - - /* Pause warnings and always use "new" path */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); - set_npy_promotion_state(current_promotion_state); if (info == NULL) { goto handle_error; @@ -1006,26 +986,6 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); - /* If necessary, check if the old result would have been different */ - if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) - && (force_legacy_promotion || promoting_pyscalars) - && npy_give_promotion_warnings()) { - PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; - for (int i = 0; i < nargs; i++) { - check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( - all_dtypes, i); - } - /* Before calling to the legacy promotion, pretend that is the state: */ - set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); - int res = legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, check_dtypes, NULL, NPY_TRUE); - /* Reset the promotion state: */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); - if (res < 0) { - goto handle_error; - } - } - /* * In certain cases (only the logical ufuncs really), the loop we found may * not be reduce-compatible. Since the machinery can't distinguish a diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index cd28e4405b6d..ecf37e83b586 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -956,10 +956,6 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) if (PyFloat_CheckExact(value)) { if (!IS_SAFE(NPY_DOUBLE, NPY_@TYPE@)) { - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } /* Weak promotion is used when self is float or complex: */ if (!PyTypeNum_ISFLOAT(NPY_@TYPE@) && !PyTypeNum_ISCOMPLEX(NPY_@TYPE@)) { return PROMOTION_REQUIRED; @@ -976,19 +972,12 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) * long -> (c)longdouble is safe, so `OTHER_IS_UNKNOWN_OBJECT` will * be returned below for huge integers. */ - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } return CONVERT_PYSCALAR; } int overflow; long val = PyLong_AsLongAndOverflow(value, &overflow); if (overflow) { /* handle as if "unsafe" */ - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - return OTHER_IS_UNKNOWN_OBJECT; - } return CONVERT_PYSCALAR; } if (error_converting(val)) { @@ -1000,10 +989,6 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) if (PyComplex_CheckExact(value)) { if (!IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@)) { - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } /* Weak promotion is used when self is float or complex: */ if (!PyTypeNum_ISCOMPLEX(NPY_@TYPE@)) { return PROMOTION_REQUIRED; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6bd02b0fec87..a531e4a7e0ae 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -664,12 +664,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, continue; } - // TODO: Is this equivalent/better by removing the logic which enforces - // that we always use weak promotion in the core? - if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { - continue; /* Skip use of special dtypes */ - } - /* * Handle the "weak" Python scalars/literals. We use a special DType * for these. @@ -6065,10 +6059,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, PyArray_DTypeMeta *signature[NPY_MAXARGS] = {NULL}; PyArray_Descr *operation_descrs[NPY_MAXARGS] = {NULL}; - /* This entry-point to promotion lives in the NEP 50 future: */ - int original_promotion_state = get_npy_promotion_state(); - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); - npy_bool promoting_pyscalars = NPY_FALSE; if (_get_fixed_signature(ufunc, NULL, signature_obj, signature) < 0) { @@ -6250,8 +6240,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, Py_DECREF(capsule); finish: - set_npy_promotion_state(original_promotion_state); - Py_XDECREF(result_dtype_tuple); for (int i = 0; i < ufunc->nargs; i++) { Py_XDECREF(signature[i]); diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index cabcff3b9bef..77607fdbedd4 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1919,17 +1919,7 @@ linear_search_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - int promotion_state = get_npy_promotion_state(); - - assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); - /* Always "use" with new promotion in case of Python int/float/complex */ - int use_min_scalar; - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); - } - else { - use_min_scalar = should_use_min_scalar_weak_literals(nin, op); - } + int use_min_scalar = should_use_min_scalar_weak_literals(nin, op); /* If the ufunc has userloops, search for them. */ if (self->userloops) { @@ -2123,17 +2113,7 @@ type_tuple_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - int promotion_state = get_npy_promotion_state(); - - assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); - /* Always "use" with new promotion in case of Python int/float/complex */ - int use_min_scalar; - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); - } - else { - use_min_scalar = should_use_min_scalar_weak_literals(nin, op); - } + int use_min_scalar = should_use_min_scalar_weak_literals(nin, op); /* Fill in specified_types from the tuple or string */ const char *bad_type_tup_msg = ( diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 869183956f78..5439ce44dc7a 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -13,7 +13,7 @@ from numpy._core._multiarray_tests import create_custom_field_dtype from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, - IS_PYSTON, _OLD_PROMOTION) + IS_PYSTON) from itertools import permutations import random @@ -1433,34 +1433,25 @@ class TestPromotion: """Test cases related to more complex DType promotions. Further promotion tests are defined in `test_numeric.py` """ - @np._no_nep50_warning() - @pytest.mark.parametrize(["other", "expected", "expected_weak"], - [(2**16-1, np.complex64, None), - (2**32-1, np.complex128, np.complex64), - (np.float16(2), np.complex64, None), - (np.float32(2), np.complex64, None), - (np.longdouble(2), np.complex64, np.clongdouble), + @pytest.mark.parametrize(["other", "expected"], + [(2**16-1, np.complex64), + (2**32-1, np.complex64), + (np.float16(2), np.complex64), + (np.float32(2), np.complex64), + (np.longdouble(2), np.clongdouble), # Base of the double value to sidestep any rounding issues: - (np.longdouble(np.nextafter(1.7e308, 0.)), - np.complex128, np.clongdouble), + (np.longdouble(np.nextafter(1.7e308, 0.)), np.clongdouble), # Additionally use "nextafter" so the cast can't round down: - (np.longdouble(np.nextafter(1.7e308, np.inf)), - np.clongdouble, None), + (np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble), # repeat for complex scalars: - (np.complex64(2), np.complex64, None), - (np.clongdouble(2), np.complex64, np.clongdouble), + (np.complex64(2), np.complex64), + (np.clongdouble(2), np.clongdouble), # Base of the double value to sidestep any rounding issues: - (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), - np.complex128, np.clongdouble), + (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.clongdouble), # Additionally use "nextafter" so the cast can't round down: - (np.clongdouble(np.nextafter(1.7e308, np.inf)), - np.clongdouble, None), + (np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble), ]) - def test_complex_other_value_based(self, - weak_promotion, other, expected, expected_weak): - if weak_promotion and expected_weak is not None: - expected = expected_weak - + def test_complex_other_value_based(self, other, expected): # This would change if we modify the value based promotion min_complex = np.dtype(np.complex64) @@ -1511,22 +1502,11 @@ def test_python_integer_promotion(self, val): @pytest.mark.parametrize(["other", "expected"], [(1, rational), (1., np.float64)]) - @np._no_nep50_warning() - def test_float_int_pyscalar_promote_rational( - self, weak_promotion, other, expected): + def test_float_int_pyscalar_promote_rational(self, other, expected): # Note that rationals are a bit awkward as they promote with float64 # or default ints, but not float16 or uint8/int8 (which looks - # inconsistent here). The new promotion fixes this (partially?) - if not weak_promotion and type(other) == float: - # The float version, checks float16 in the legacy path, which fails - # the integer version seems to check int8 (also), so it can - # pass. - with pytest.raises(TypeError, - match=r".* do not have a common DType"): - np.result_type(other, rational) - else: - assert np.result_type(other, rational) == expected - + # inconsistent here). The new promotion fixed this (partially?) + assert np.result_type(other, rational) == expected assert np.result_type(other, rational(1, 2)) == expected @pytest.mark.parametrize(["dtypes", "expected"], [ diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 0a97693f73b0..f58d6f4ac432 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -308,7 +308,6 @@ def test_einsum_views(self): assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) - @np._no_nep50_warning() def check_einsum_sums(self, dtype, do_opt=False): dtype = np.dtype(dtype) # Check various sums. Does many sizes to exercise unrolled loops. diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index fbc1bf6a0a6d..92d08f7f5286 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -3,7 +3,7 @@ import numpy as np from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal, _OLD_PROMOTION, IS_WASM +from numpy.testing import assert_, assert_equal, IS_WASM def assert_raises_fpe(strmatch, callable, *args, **kwargs): @@ -93,7 +93,6 @@ def test_half_conversion_from_string(self, string_dt): @pytest.mark.parametrize("offset", [None, "up", "down"]) @pytest.mark.parametrize("shift", [None, "up", "down"]) @pytest.mark.parametrize("float_t", [np.float32, np.float64]) - @np._no_nep50_warning() def test_half_conversion_rounding(self, float_t, shift, offset): # Assumes that round to even is used during casting. max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) @@ -460,8 +459,7 @@ def test_half_ufuncs(self): assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) - @np._no_nep50_warning() - def test_half_coercion(self, weak_promotion): + def test_half_coercion(self): """Test that half gets coerced properly with the other types""" a16 = np.array((1,), dtype=float16) a32 = np.array((1,), dtype=float32) @@ -471,14 +469,12 @@ def test_half_coercion(self, weak_promotion): assert np.power(a16, 2).dtype == float16 assert np.power(a16, 2.0).dtype == float16 assert np.power(a16, b16).dtype == float16 - expected_dt = float32 if weak_promotion else float16 - assert np.power(a16, b32).dtype == expected_dt + assert np.power(a16, b32).dtype == float32 assert np.power(a16, a16).dtype == float16 assert np.power(a16, a32).dtype == float32 - expected_dt = float16 if weak_promotion else float64 - assert np.power(b16, 2).dtype == expected_dt - assert np.power(b16, 2.0).dtype == expected_dt + assert np.power(b16, 2).dtype == float16 + assert np.power(b16, 2.0).dtype == float16 assert np.power(b16, b16).dtype, float16 assert np.power(b16, b32).dtype, float32 assert np.power(b16, a16).dtype, float16 @@ -486,8 +482,7 @@ def test_half_coercion(self, weak_promotion): assert np.power(a32, a16).dtype == float32 assert np.power(a32, b16).dtype == float32 - expected_dt = float32 if weak_promotion else float16 - assert np.power(b32, a16).dtype == expected_dt + assert np.power(b32, a16).dtype == float32 assert np.power(b32, b16).dtype == float32 @pytest.mark.skipif(platform.machine() == "armv5tel", diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index ab800cb5b959..688be5338437 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -17,65 +17,40 @@ from numpy.testing import assert_array_equal, IS_WASM -@pytest.fixture(scope="module", autouse=True) -def _weak_promotion_enabled(): - state = np._get_promotion_state() - np._set_promotion_state("weak_and_warn") - yield - np._set_promotion_state(state) - - @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for fp errors") def test_nep50_examples(): - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.uint8(1) + 2 + res = np.uint8(1) + 2 assert res.dtype == np.uint8 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1], np.uint8) + np.int64(1) + res = np.array([1], np.uint8) + np.int64(1) assert res.dtype == np.int64 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1], np.uint8) + np.array(1, dtype=np.int64) + res = np.array([1], np.uint8) + np.array(1, dtype=np.int64) assert res.dtype == np.int64 - with pytest.warns(UserWarning, match="result dtype changed"): - # Note: For "weak_and_warn" promotion state the overflow warning is - # unfortunately not given (because we use the full array path). - with np.errstate(over="raise"): - res = np.uint8(100) + 200 + with pytest.warns(RuntimeWarning, match="overflow"): + res = np.uint8(100) + 200 assert res.dtype == np.uint8 - with pytest.warns(Warning) as recwarn: + with pytest.warns(RuntimeWarning, match="overflow"): res = np.float32(1) + 3e100 - # Check that both warnings were given in the one call: - warning = str(recwarn.pop(UserWarning).message) - assert warning.startswith("result dtype changed") - warning = str(recwarn.pop(RuntimeWarning).message) - assert warning.startswith("overflow") - assert len(recwarn) == 0 # no further warnings assert np.isinf(res) assert res.dtype == np.float32 - # Changes, but we don't warn for it (too noisy) res = np.array([0.1], np.float32) == np.float64(0.1) assert res[0] == False - # Additional test, since the above silences the warning: - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([0.1], np.float32) + np.float64(0.1) + res = np.array([0.1], np.float32) + np.float64(0.1) assert res.dtype == np.float64 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1.], np.float32) + np.int64(3) + res = np.array([1.], np.float32) + np.int64(3) assert res.dtype == np.float64 @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) def test_nep50_weak_integers(dtype): # Avoids warning (different code path for scalars) - np._set_promotion_state("weak") scalar_type = np.dtype(dtype).type maxint = int(np.iinfo(dtype).max) @@ -94,7 +69,6 @@ def test_nep50_weak_integers(dtype): @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) def test_nep50_weak_integers_with_inexact(dtype): # Avoids warning (different code path for scalars) - np._set_promotion_state("weak") scalar_type = np.dtype(dtype).type too_big_int = int(np.finfo(dtype).max) * 2 @@ -137,7 +111,6 @@ def test_nep50_weak_integers_with_inexact(dtype): @pytest.mark.parametrize("op", [operator.add, operator.pow]) def test_weak_promotion_scalar_path(op): # Some additional paths exercising the weak scalars. - np._set_promotion_state("weak") # Integer path: res = op(np.uint8(3), 5) @@ -154,8 +127,6 @@ def test_weak_promotion_scalar_path(op): def test_nep50_complex_promotion(): - np._set_promotion_state("weak") - with pytest.warns(RuntimeWarning, match=".*overflow"): res = np.complex64(3) + complex(2**300) @@ -163,8 +134,6 @@ def test_nep50_complex_promotion(): def test_nep50_integer_conversion_errors(): - # Do not worry about warnings here (auto-fixture will reset). - np._set_promotion_state("weak") # Implementation for error paths is mostly missing (as of writing) with pytest.raises(OverflowError, match=".*uint8"): np.array([1], np.uint8) + 300 @@ -178,51 +147,24 @@ def test_nep50_integer_conversion_errors(): np.uint8(1) + -1 -def test_nep50_integer_regression(): - # Test the old integer promotion rules. When the integer is too large, - # we need to keep using the old-style promotion. - np._set_promotion_state("legacy") - arr = np.array(1) - assert (arr + 2**63).dtype == np.float64 - assert (arr[()] + 2**63).dtype == np.float64 - - def test_nep50_with_axisconcatenator(): - # I promised that this will be an error in the future in the 1.25 - # release notes; test this (NEP 50 opt-in makes the deprecation an error). - np._set_promotion_state("weak") - + # Concatenate/r_ does not promote, so this has to error: with pytest.raises(OverflowError): np.r_[np.arange(5, dtype=np.int8), 255] @pytest.mark.parametrize("ufunc", [np.add, np.power]) -@pytest.mark.parametrize("state", ["weak", "weak_and_warn"]) -def test_nep50_huge_integers(ufunc, state): +def test_nep50_huge_integers(ufunc): # Very large integers are complicated, because they go to uint64 or - # object dtype. This tests covers a few possible paths (some of which - # cannot give the NEP 50 warnings). - np._set_promotion_state(state) - + # object dtype. This tests covers a few possible paths. with pytest.raises(OverflowError): ufunc(np.int64(0), 2**63) # 2**63 too large for int64 - if state == "weak_and_warn": - with pytest.warns(UserWarning, - match="result dtype changed.*float64.*uint64"): - with pytest.raises(OverflowError): - ufunc(np.uint64(0), 2**64) - else: - with pytest.raises(OverflowError): - ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 # However, 2**63 can be represented by the uint64 (and that is used): - if state == "weak_and_warn": - with pytest.warns(UserWarning, - match="result dtype changed.*float64.*uint64"): - res = ufunc(np.uint64(1), 2**63) - else: - res = ufunc(np.uint64(1), 2**63) + res = ufunc(np.uint64(1), 2**63) assert res.dtype == np.uint64 assert res == ufunc(1, 2**63, dtype=object) @@ -240,14 +182,10 @@ def test_nep50_huge_integers(ufunc, state): def test_nep50_in_concat_and_choose(): - np._set_promotion_state("weak_and_warn") - - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.concatenate([np.float32(1), 1.], axis=None) + res = np.concatenate([np.float32(1), 1.], axis=None) assert res.dtype == "float32" - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.choose(1, [np.float32(1), 1.]) + res = np.choose(1, [np.float32(1), 1.]) assert res.dtype == "float32" @@ -263,8 +201,6 @@ def test_nep50_in_concat_and_choose(): ]) @hypothesis.given(data=strategies.data()) def test_expected_promotion(expected, dtypes, optional_dtypes, data): - np._set_promotion_state("weak") - # Sample randomly while ensuring "dtypes" is always present: optional = data.draw(strategies.lists( strategies.sampled_from(dtypes + optional_dtypes))) @@ -284,8 +220,6 @@ def test_expected_promotion(expected, dtypes, optional_dtypes, data): [operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt]) def test_integer_comparison(sctype, other_val, comp): - np._set_promotion_state("weak") - # Test that comparisons with integers (especially out-of-bound) ones # works correctly. val_obj = 10 @@ -307,8 +241,6 @@ def test_integer_comparison(sctype, other_val, comp): [np.equal, np.not_equal, np.less_equal, np.less, np.greater_equal, np.greater]) def test_integer_integer_comparison(comp): - np._set_promotion_state("weak") - # Test that the NumPy comparison ufuncs work with large Python integers assert comp(2**200, -2**200) == comp(2**200, -2**200, dtype=object) @@ -342,26 +274,3 @@ def test_oob_creation(sctype, create): assert create(sctype, iinfo.min) == iinfo.min assert create(sctype, iinfo.max) == iinfo.max - - -@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") -def test_thread_local_promotion_state(): - b = threading.Barrier(2) - - def legacy_no_warn(): - np._set_promotion_state("legacy") - b.wait() - assert np._get_promotion_state() == "legacy" - - def weak_warn(): - np._set_promotion_state("weak") - b.wait() - assert np._get_promotion_state() == "weak" - - task1 = threading.Thread(target=legacy_no_warn) - task2 = threading.Thread(target=weak_warn) - - task1.start() - task2.start() - task1.join() - task2.join() diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 7cec42f67dde..ec42590d29ac 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1488,21 +1488,22 @@ def test_can_cast_structured_to_simple(self): assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', casting='unsafe')) - @pytest.mark.xfail(np._get_promotion_state() != "legacy", - reason="NEP 50: no python int/float/complex support (yet)") def test_can_cast_values(self): - # gh-5917 - for dt in sctypes['int'] + sctypes['uint']: - ii = np.iinfo(dt) - assert_(np.can_cast(ii.min, dt)) - assert_(np.can_cast(ii.max, dt)) - assert_(not np.can_cast(ii.min - 1, dt)) - assert_(not np.can_cast(ii.max + 1, dt)) - - for dt in sctypes['float']: - fi = np.finfo(dt) - assert_(np.can_cast(fi.min, dt)) - assert_(np.can_cast(fi.max, dt)) + # With NumPy 2 and NEP 50, can_cast errors on Python scalars. We could + # define this as (usually safe) at some point, and already do so + # in `copyto` and ufuncs (but there an error is raised if the integer + # is out of bounds and a warning for out-of-bound floats). + # Raises even for unsafe, previously checked within range (for floats + # that was approximately whether it would overflow to inf). + with pytest.raises(TypeError): + np.can_cast(4, "int8", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4.0, "float64", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4j, "complex128", casting="unsafe") + @pytest.mark.parametrize("dtype", list("?bhilqBHILQefdgFDG") + [rational]) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 35350b01ef3a..1060072d68ba 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -1071,7 +1071,6 @@ def test_longdouble_complex(): @pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) @pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) -@np._no_nep50_warning() def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): # This tests that python scalar subclasses behave like a float64 (if they # don't override it). diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 26b6a1aa5c27..101273787c62 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -537,9 +537,6 @@ def test_partial_signature_mismatch_with_cache(self): with pytest.raises(TypeError): np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) - @pytest.mark.xfail(np._get_promotion_state() != "legacy", - reason="NEP 50 impl breaks casting checks when `dtype=` is used " - "together with python scalars.") def test_use_output_signature_for_all_arguments(self): # Test that providing only `dtype=` or `signature=(None, None, dtype)` # is sufficient if falling back to a homogeneous signature works. @@ -2749,7 +2746,6 @@ def test_ufunc_types(ufunc): @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) if isinstance(getattr(np, x), np.ufunc)]) -@np._no_nep50_warning() def test_ufunc_noncontiguous(ufunc): ''' Check that contiguous and non-contiguous calls to ufuncs diff --git a/numpy/conftest.py b/numpy/conftest.py index 677537e206f0..5662024b2e71 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -150,23 +150,6 @@ def env_setup(monkeypatch): monkeypatch.setenv('PYTHONHASHSEED', '0') -@pytest.fixture(params=[True, False]) -def weak_promotion(request): - """ - Fixture to ensure "legacy" promotion state or change it to use the new - weak promotion (plus warning). `old_promotion` should be used as a - parameter in the function. - """ - state = numpy._get_promotion_state() - if request.param: - numpy._set_promotion_state("weak_and_warn") - else: - numpy._set_promotion_state("legacy") - - yield request.param - numpy._set_promotion_state(state) - - if HAVE_SCPDT: @contextmanager diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index bc3ce6409f1c..f1adb24aaf12 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3193,8 +3193,6 @@ def test_linear_interpolation(self, input_dtype, expected_dtype): expected_dtype = np.dtype(expected_dtype) - if np._get_promotion_state() == "legacy": - expected_dtype = np.promote_types(expected_dtype, np.float64) arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) weights = np.ones_like(arr) if weighted else None diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index ffd9550e7c1d..0745654a0730 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1855,8 +1855,8 @@ def test_basic_property(self, shape, dtype, upper): b = np.matmul(c.transpose(t).conj(), c) else: b = np.matmul(c, c.transpose(t).conj()) - with np._no_nep50_warning(): - atol = 500 * a.shape[0] * np.finfo(dtype).eps + + atol = 500 * a.shape[0] * np.finfo(dtype).eps assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') # Check diag(L or U) is real and positive diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index f22df0ddaab8..2bac090a3a3e 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -40,7 +40,7 @@ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', - '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD', + 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD', 'IS_EDITABLE', 'run_threaded', ] @@ -60,8 +60,6 @@ class KnownFailureException(Exception): HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 -_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy' - IS_MUSL = False # alternate way is # from packaging.tags import sys_tags @@ -466,7 +464,6 @@ def print_assert_equal(test_string, actual, desired): raise AssertionError(msg.getvalue()) -@np._no_nep50_warning() def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): """ Raises an AssertionError if two items are not equal up to desired @@ -593,7 +590,6 @@ def _build_err_msg(): raise AssertionError(_build_err_msg()) -@np._no_nep50_warning() def assert_approx_equal(actual, desired, significant=7, err_msg='', verbose=True): """ @@ -694,7 +690,6 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', raise AssertionError(msg) -@np._no_nep50_warning() def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6, equal_nan=True, equal_inf=True, *, strict=False, names=('ACTUAL', 'DESIRED')): @@ -1027,7 +1022,6 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, strict=strict) -@np._no_nep50_warning() @_rename_parameter(['x', 'y'], ['actual', 'desired'], dep_version='2.0.0') def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', verbose=True):