From f4889614194fc50dd48b0e56ede1c223b5202566 Mon Sep 17 00:00:00 2001 From: SabrinaSimao Date: Fri, 12 Jun 2020 14:13:23 -0300 Subject: [PATCH 0001/1270] unify the docs for np.transpose and ndarray.transpose --- numpy/core/_add_newdocs.py | 3 +++ numpy/core/fromnumeric.py | 1 + 2 files changed, 4 insertions(+) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index d0ed3d381111..a83037132bc7 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -3998,6 +3998,8 @@ Returns a view of the array with axes transposed. + Refer to `numpy.transpose` for full documentation. + For a 1-D array this has no effect, as a transposed vector is simply the same vector. To convert a 1-D array into a 2D column vector, an additional dimension must be added. `np.atleast2d(a).T` achieves this, as does @@ -4027,6 +4029,7 @@ See Also -------- + ndarray.transpose : Method to reverse or permute the axes of an array. ndarray.T : Array property returning the array transposed. ndarray.reshape : Give a new shape to an array without changing its data. diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 2b88ccedfe65..cbab9424bccb 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -624,6 +624,7 @@ def transpose(a, axes=None): See Also -------- + ndarray.transpose : Method to reverse or permute the axes of an array. moveaxis argsort From dd5fbac3ce8bc1128dc7db2503df694f3cf1b51c Mon Sep 17 00:00:00 2001 From: SabrinaSimao Date: Fri, 12 Jun 2020 18:22:47 -0300 Subject: [PATCH 0002/1270] DOC: changed the way the link is being used from transpose to ndarray.transpose I feel like the `ndarray.transpose` has more details, and it's also called by `transpose`, so i changed the way the link between them was being used --- numpy/core/_add_newdocs.py | 4 +--- numpy/core/fromnumeric.py | 4 +++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index a83037132bc7..93be30b39fcc 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -3998,8 +3998,6 @@ Returns a view of the array with axes transposed. - Refer to `numpy.transpose` for full documentation. - For a 1-D array this has no effect, as a transposed vector is simply the same vector. To convert a 1-D array into a 2D column vector, an additional dimension must be added. `np.atleast2d(a).T` achieves this, as does @@ -4029,7 +4027,7 @@ See Also -------- - ndarray.transpose : Method to reverse or permute the axes of an array. + transpose : Equivalent function ndarray.T : Array property returning the array transposed. ndarray.reshape : Give a new shape to an array without changing its data. diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index cbab9424bccb..8c24030f2c87 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -605,6 +605,8 @@ def transpose(a, axes=None): For an array a with two axes, transpose(a) gives the matrix transpose. + Refer to `numpy.ndarray.transpose` for full documentation. + Parameters ---------- a : array_like @@ -624,7 +626,7 @@ def transpose(a, axes=None): See Also -------- - ndarray.transpose : Method to reverse or permute the axes of an array. + ndarray.transpose : Equivalent method moveaxis argsort From 7d7e446fcbeeff70d905bde2eb0264a797488280 Mon Sep 17 00:00:00 2001 From: marload Date: Tue, 14 Jul 2020 12:51:37 +0900 Subject: [PATCH 0003/1270] DOC: Fix Typo --- numpy/core/_add_newdocs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 2461971ebb4b..a3c404a64d99 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -1122,13 +1122,13 @@ add_newdoc('numpy.core.multiarray', 'fromiter', """ - fromiter(iterable, dtype, count=-1) + fromiter(iter, dtype, count=-1) Create a new 1-dimensional array from an iterable object. Parameters ---------- - iterable : iterable object + iter : iterable object An iterable object providing data for the array. dtype : data-type The data-type of the returned array. From 29c0bb32aed55ab4c353113d622c77d0b8541247 Mon Sep 17 00:00:00 2001 From: scimax Date: Sat, 1 Aug 2020 16:32:50 +0200 Subject: [PATCH 0004/1270] Phase unwrapping generalized Phase unwrapping generalized to arbitrary interval size, such as 360 for phase in degree instead of radian. Also integer unwrapping is supported, but the return values are floats. --- numpy/lib/function_base.py | 51 ++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 42ea8c7a785d..804f7bef3028 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1487,37 +1487,44 @@ def _unwrap_dispatcher(p, discont=None, axis=None): @array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, discont=pi, axis=-1): - """ - Unwrap by changing deltas between values to 2*pi complement. +# def _unwrap_dispatcher(p, discont=None, axis=None, *, min_val=None, max_val=None): +# return (p,) - Unwrap radian phase `p` by changing absolute jumps greater than - `discont` to their 2*pi complement along the given axis. +# @array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, interval_size= 2*pi, discont=None, axis=-1): + """ + Unwrap by changing deltas between values to complement. + + For the default case where `interval_size= 2*pi`, `discont=pi`, + It unwraps radian phase `p` by changing absolute jumps greater + than `discont` to their 2*pi complement along the given axis. + + In general it unwrapps a signal `p` by changing absolute jumps + greater than `discont` to their `interval_size` complementary values. + Parameters ---------- p : array_like Input array. + interval_size: float, optional + size of the range over which the input wraps. discont : float, optional - Maximum discontinuity between values, default is ``pi``. + Maximum discontinuity between values, default is ``interval_size/2``. axis : int, optional Axis along which unwrap will operate, default is the last axis. - Returns ------- out : ndarray Output array. - See Also -------- rad2deg, deg2rad - Notes ----- - If the discontinuity in `p` is smaller than ``pi``, but larger than - `discont`, no unwrapping is done because taking the 2*pi complement - would only make the discontinuity larger. - + If the discontinuity in `p` is smaller than ``interval_size/2``, + but larger than `discont`, no unwrapping is done because taking + the complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) @@ -1526,16 +1533,28 @@ def unwrap(p, discont=pi, axis=-1): array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary - + >>> unwrap([0, 1, 2, -1, 0], interval_size=4) + array([0., 1., 2., 3., 4.]) + >>> unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], interval_size=6) + array([1., 2., 3., 4., 5., 6., 7., 8., 9.]) + >>> unwrap([2, 3, 4, 5, 2, 3, 4, 5], interval_size=4) + array([2., 3., 4., 5., 6., 7., 8., 9.]) + >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 + >>> unwrap(phase_deg, interval_size=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) """ + if discont is None: + discont = interval_size/2 p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) slice1 = tuple(slice1) - ddmod = mod(dd + pi, 2*pi) - pi - _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) + ddmod = mod(dd + interval_size/2, interval_size) - interval_size/2 + _nx.copyto(ddmod, interval_size/2, where=(ddmod == -interval_size/2) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') From f08059353890eff34c993d8bd5a8291f629f8fc7 Mon Sep 17 00:00:00 2001 From: scimax Date: Sun, 2 Aug 2020 17:31:25 +0200 Subject: [PATCH 0005/1270] Update numpy/lib/function_base.py unwrap function signature compatible Co-authored-by: Eric Wieser --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 804f7bef3028..cc8e1a191da0 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1492,7 +1492,7 @@ def _unwrap_dispatcher(p, discont=None, axis=None): # @array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, interval_size= 2*pi, discont=None, axis=-1): +def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): """ Unwrap by changing deltas between values to complement. From dfee8851c65276907eedd084ff40334ea7d46865 Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Sun, 2 Aug 2020 18:10:01 +0200 Subject: [PATCH 0006/1270] Tests added according to #14877, obsolete comments removed. --- numpy/lib/function_base.py | 5 ----- numpy/lib/tests/test_function_base.py | 18 +++++++++++++++++- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index cc8e1a191da0..16453ffbe7cd 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1487,11 +1487,6 @@ def _unwrap_dispatcher(p, discont=None, axis=None): @array_function_dispatch(_unwrap_dispatcher) -# def _unwrap_dispatcher(p, discont=None, axis=None, *, min_val=None, max_val=None): -# return (p,) - - -# @array_function_dispatch(_unwrap_dispatcher) def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): """ Unwrap by changing deltas between values to complement. diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index eb2fc3311aca..878fb3de1070 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1756,7 +1756,23 @@ def test_simple(self): assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) # check that unwrap maintains continuity assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) - + + def test_minmax(self): + # check that unwrap removes jumps greater that 255 + assert_array_equal(unwrap([1, 1 + 256], interval_size=255), [1, 2]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 1000, interval_size=255)) < 255)) + # check simple case + simple_seq = np.array([0, 75, 150, 225, 300]) + wrap_seq = np.mod(simple_seq, 255) + assert_array_equal(unwrap(wrap_seq, interval_size=255), simple_seq) + # check custom discont value + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + no_discont = unwrap(wrap_uneven, interval_size=250) + assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) + sm_discont = unwrap(wrap_uneven, interval_size=250, discont=140) + assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) class TestFilterwindows: From 6131310089d6eaca6e52db0e2a1406f6f5a77096 Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Mon, 3 Aug 2020 09:11:44 +0200 Subject: [PATCH 0007/1270] Release Note --- doc/release/upcoming_changes/16987.improvement.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 doc/release/upcoming_changes/16987.improvement.rst diff --git a/doc/release/upcoming_changes/16987.improvement.rst b/doc/release/upcoming_changes/16987.improvement.rst new file mode 100644 index 000000000000..28acc1ce2fa5 --- /dev/null +++ b/doc/release/upcoming_changes/16987.improvement.rst @@ -0,0 +1,14 @@ +Arbitrary `interval_size` option for ``numpy.unwrap`` +----------------------------------------------------- +The size of the interval, over which phases are unwraped, is not restricted to `2 * pi` +anymore. This is especially useful for unwrapping degrees but can also be used for other +intervals. + +.. code:: python + + >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 + >>> unwrap(phase_deg, interval_size=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + From 0b7ad2c5185aaea651181ac3815def7e1c2f6247 Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Mon, 3 Aug 2020 09:25:37 +0200 Subject: [PATCH 0008/1270] dispatcher fixed --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 16453ffbe7cd..c5541cc862b2 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1482,7 +1482,7 @@ def angle(z, deg=False): return a -def _unwrap_dispatcher(p, discont=None, axis=None): +def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=2*pi): return (p,) From 2c0d4b8e4411ca046cbf92aaabe3e860499f0e21 Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Mon, 3 Aug 2020 16:54:26 +0200 Subject: [PATCH 0009/1270] Hybrid solution including interval boundaries, and keeping backward compatibility without boundary --- numpy/lib/function_base.py | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c5541cc862b2..772f44fcf264 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1482,12 +1482,12 @@ def angle(z, deg=False): return a -def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=2*pi): +def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=2*pi, min_val=None, max_val=None): return (p,) @array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): +def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi, min_val=None, max_val=None): """ Unwrap by changing deltas between values to complement. @@ -1502,12 +1502,25 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): ---------- p : array_like Input array. - interval_size: float, optional - size of the range over which the input wraps. discont : float, optional Maximum discontinuity between values, default is ``interval_size/2``. axis : int, optional Axis along which unwrap will operate, default is the last axis. + interval_size: float, optional + Size of the range over which the input wraps. By default, it is 2 pi. + If ``min_val`` and ``max_val`` are given, ``interval_size`` is ignored + and the interval size is ``max_val - min_val``. + min_val, max_val: float, optional + Boundaries of the interval over which the input array is expected to + wrap. By default, they are ``None`` and the interval is considered as + ``[-interval_size, interval_size]``. In case the first value of the + phase input array, ``p[0]``, is outside of the interval + ``[min_val, max_val]`` it will be corrected by an integral multiple of + the interval size such that it will be within the + boundaries. + Both boundaries require each other. If only one boundary is + provided without the other, it will be ignored. + Returns ------- out : ndarray @@ -1540,11 +1553,20 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) """ - if discont is None: - discont = interval_size/2 p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) + offset = 0 + if (not min_val is None) and (not max_val is None): + interval_size = max_val - min_val + slice0list = [slice(None, None)]*nd # full slices + slice0list[axis] = 0 + slice0 = tuple(slice0list) + offset_mul = (p[slice0] - min_val)//interval_size + slice0list[axis] = None + offset = -offset_mul[tuple(slice0list)]*interval_size + if discont is None: + discont = interval_size/2 slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) slice1 = tuple(slice1) @@ -1552,8 +1574,9 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): _nx.copyto(ddmod, interval_size/2, where=(ddmod == -interval_size/2) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + p += offset up = array(p, copy=True, dtype='d') - up[slice1] = p[slice1] + ph_correct.cumsum(axis) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up From f14555480722147ff301b5beed038e54b985a345 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 11 Aug 2020 09:52:36 +0800 Subject: [PATCH 0010/1270] use usimd to reconstruct PackInner --- numpy/core/setup.py | 1 + numpy/core/src/common/simd/avx2/operators.h | 3 + numpy/core/src/common/simd/neon/operators.h | 9 ++ numpy/core/src/common/simd/sse/operators.h | 3 + numpy/core/src/multiarray/compiled_base.c | 150 +----------------- .../src/multiarray/compiled_base.dispatch.c | 103 ++++++++++++ numpy/core/src/multiarray/compiled_base.h | 23 +++ 7 files changed, 148 insertions(+), 144 deletions(-) create mode 100644 numpy/core/src/multiarray/compiled_base.dispatch.c diff --git a/numpy/core/setup.py b/numpy/core/setup.py index aede12080017..39bfc56cad09 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -839,6 +839,7 @@ def get_mathlib_info(*args): join('src', 'multiarray', 'buffer.c'), join('src', 'multiarray', 'calculation.c'), join('src', 'multiarray', 'compiled_base.c'), + join('src', 'multiarray', 'compiled_base.dispatch.c'), join('src', 'multiarray', 'common.c'), join('src', 'multiarray', 'convert.c'), join('src', 'multiarray', 'convert_datatype.c'), diff --git a/numpy/core/src/common/simd/avx2/operators.h b/numpy/core/src/common/simd/avx2/operators.h index c1d30413fe5e..d9d26d2625a5 100644 --- a/numpy/core/src/common/simd/avx2/operators.h +++ b/numpy/core/src/common/simd/avx2/operators.h @@ -197,4 +197,7 @@ NPY_FINLINE __m256i npyv_cmpge_u32(__m256i a, __m256i b) #define npyv_cmpge_f32(A, B) _mm256_castps_si256(_mm256_cmp_ps(A, B, _CMP_GE_OQ)) #define npyv_cmpge_f64(A, B) _mm256_castpd_si256(_mm256_cmp_pd(A, B, _CMP_GE_OQ)) +// Create mask from the most significant bit of each 8-bit element +#define npyv_movemask_u8(a) _mm256_movemask_epi8(a) + #endif // _NPY_SIMD_AVX2_OPERATORS_H diff --git a/numpy/core/src/common/simd/neon/operators.h b/numpy/core/src/common/simd/neon/operators.h index c1ad4ba12a6f..970bef1c550e 100644 --- a/numpy/core/src/common/simd/neon/operators.h +++ b/numpy/core/src/common/simd/neon/operators.h @@ -215,4 +215,13 @@ #define npyv_cmple_f32(A, B) npyv_cmpge_f32(B, A) #define npyv_cmple_f64(A, B) npyv_cmpge_f64(B, A) +// Create mask from the most significant bit of each 8-bit element +NPY_INLINE int32_t npyv_movemask_u8(uint8x16_t input) +{ + int8x8_t m0 = vcreate_s8(0x0706050403020100ULL); + uint8x16_t v0 = vshlq_u8(vshrq_n_u8(input, 7), vcombine_s8(m0, m0)); + uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0))); + return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8); +} + #endif // _NPY_SIMD_NEON_OPERATORS_H diff --git a/numpy/core/src/common/simd/sse/operators.h b/numpy/core/src/common/simd/sse/operators.h index 6e32ca4fd9b3..52cd0ad393d8 100644 --- a/numpy/core/src/common/simd/sse/operators.h +++ b/numpy/core/src/common/simd/sse/operators.h @@ -255,4 +255,7 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_cmpge_f32(a, b) _mm_castps_si128(_mm_cmpge_ps(a, b)) #define npyv_cmpge_f64(a, b) _mm_castpd_si128(_mm_cmpge_pd(a, b)) +// Create mask from the most significant bit of each 8-bit element +#define npyv_movemask_u8(a) _mm_movemask_epi8(a) + #endif // _NPY_SIMD_SSE_OPERATORS_H diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index a8e4aa78907b..68e2fd2645cc 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1,19 +1,4 @@ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include -#include -#include - -#define _MULTIARRAYMODULE -#include "numpy/arrayobject.h" -#include "numpy/npy_3kcompat.h" -#include "numpy/npy_math.h" -#include "npy_config.h" -#include "templ_common.h" /* for npy_mul_with_overflow_intp */ -#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ -#include "alloc.h" -#include "ctors.h" -#include "common.h" - +#include "compiled_base.h" /* * Returns -1 if the array is monotonic decreasing, @@ -1500,26 +1485,6 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) Py_RETURN_NONE; } -#if defined NPY_HAVE_SSE2_INTRINSICS -#include -#endif - -#ifdef NPY_HAVE_NEON - typedef npy_uint64 uint64_unaligned __attribute__((aligned(16))); - static NPY_INLINE int32_t - sign_mask(uint8x16_t input) - { - int8x8_t m0 = vcreate_s8(0x0706050403020100ULL); - uint8x16_t v0 = vshlq_u8(vshrq_n_u8(input, 7), vcombine_s8(m0, m0)); - uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0))); - return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8); - } -#endif -/* - * This function packs boolean values in the input array into the bits of a - * byte array. Truth values are determined as usual: 0 is false, everything - * else is true. - */ static NPY_INLINE void pack_inner(const char *inptr, npy_intp element_size, /* in bytes */ @@ -1530,114 +1495,11 @@ pack_inner(const char *inptr, npy_intp out_stride, char order) { - /* - * Loop through the elements of inptr. - * Determine whether or not it is nonzero. - * Yes: set corresponding bit (and adjust build value) - * No: move on - * Every 8th value, set the value of build and increment the outptr - */ - npy_intp index = 0; - int remain = n_in % 8; /* uneven bits */ - -#if defined NPY_HAVE_SSE2_INTRINSICS && defined HAVE__M_FROM_INT64 - if (in_stride == 1 && element_size == 1 && n_out > 2) { - __m128i zero = _mm_setzero_si128(); - /* don't handle non-full 8-byte remainder */ - npy_intp vn_out = n_out - (remain ? 1 : 0); - vn_out -= (vn_out & 1); - for (index = 0; index < vn_out; index += 2) { - unsigned int r; - npy_uint64 a = *(npy_uint64*)inptr; - npy_uint64 b = *(npy_uint64*)(inptr + 8); - if (order == 'b') { - a = npy_bswap8(a); - b = npy_bswap8(b); - } - - /* note x86 can load unaligned */ - __m128i v = _mm_set_epi64(_m_from_int64(b), _m_from_int64(a)); - /* false -> 0x00 and true -> 0xFF (there is no cmpneq) */ - v = _mm_cmpeq_epi8(v, zero); - v = _mm_cmpeq_epi8(v, zero); - /* extract msb of 16 bytes and pack it into 16 bit */ - r = _mm_movemask_epi8(v); - /* store result */ - memcpy(outptr, &r, 1); - outptr += out_stride; - memcpy(outptr, (char*)&r + 1, 1); - outptr += out_stride; - inptr += 16; - } - } -#elif defined NPY_HAVE_NEON - if (in_stride == 1 && element_size == 1 && n_out > 2) { - /* don't handle non-full 8-byte remainder */ - npy_intp vn_out = n_out - (remain ? 1 : 0); - vn_out -= (vn_out & 1); - for (index = 0; index < vn_out; index += 2) { - unsigned int r; - npy_uint64 a = *((uint64_unaligned*)inptr); - npy_uint64 b = *((uint64_unaligned*)(inptr + 8)); - if (order == 'b') { - a = npy_bswap8(a); - b = npy_bswap8(b); - } - uint64x2_t v = vcombine_u64(vcreate_u64(a), vcreate_u64(b)); - uint64x2_t zero = vdupq_n_u64(0); - /* false -> 0x00 and true -> 0xFF */ - v = vreinterpretq_u64_u8(vmvnq_u8(vceqq_u8(vreinterpretq_u8_u64(v), vreinterpretq_u8_u64(zero)))); - /* extract msb of 16 bytes and pack it into 16 bit */ - uint8x16_t input = vreinterpretq_u8_u64(v); - r = sign_mask(input); - /* store result */ - memcpy(outptr, &r, 1); - outptr += out_stride; - memcpy(outptr, (char*)&r + 1, 1); - outptr += out_stride; - inptr += 16; - } - } -#endif - - if (remain == 0) { /* assumes n_in > 0 */ - remain = 8; - } - /* Don't reset index. Just handle remainder of above block */ - for (; index < n_out; index++) { - unsigned char build = 0; - int i, maxi; - npy_intp j; - - maxi = (index == n_out - 1) ? remain : 8; - if (order == 'b') { - for (i = 0; i < maxi; i++) { - build <<= 1; - for (j = 0; j < element_size; j++) { - build |= (inptr[j] != 0); - } - inptr += in_stride; - } - if (index == n_out - 1) { - build <<= 8 - remain; - } - } - else - { - for (i = 0; i < maxi; i++) { - build >>= 1; - for (j = 0; j < element_size; j++) { - build |= (inptr[j] != 0) ? 128 : 0; - } - inptr += in_stride; - } - if (index == n_out - 1) { - build >>= 8 - remain; - } - } - *outptr = (char)build; - outptr += out_stride; - } + #ifndef NPY_DISABLE_OPTIMIZATION + #include "compiled_base.dispatch.h" + #endif + NPY_CPU_DISPATCH_CALL(return compiled_base_pack_inner, + (inptr, element_size, n_in, in_stride, outptr, n_out, out_stride, order)) } static PyObject * diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c new file mode 100644 index 000000000000..e914816ade28 --- /dev/null +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -0,0 +1,103 @@ +/** + * @targets $maxopt baseline + * SSE2 + * VSX VSX2 + * NEON ASIMDDP + */ +#include "compiled_base.h" + +/* + * This function packs boolean values in the input array into the bits of a + * byte array. Truth values are determined as usual: 0 is false, everything + * else is true. + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) +(const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order) +{ + /* + * Loop through the elements of inptr. + * Determine whether or not it is nonzero. + * Yes: set corresponding bit (and adjust build value) + * No: move on + * Every 8th value, set the value of build and increment the outptr + */ + npy_intp index = 0; + int remain = n_in % 8; /* uneven bits */ + +#if defined NPY_SIMD + if (in_stride == 1 && element_size == 1 && n_out > 2) { + npyv_u64 zero = npyv_zero_u64(); + /* don't handle non-full 8-byte remainder */ + npy_intp vn_out = n_out - (remain ? 1 : 0); + vn_out -= (vn_out & 1); + const int vstep = npyv_nlanes_u64; + npy_uint64 a[4]; + for (index = 0; index < vn_out; index += vstep) { + unsigned int r; + for(int i = 0; i < vstep; i++) { + a[i] = *(npy_uint64*)(inptr + 8 * i); + if (order == 'b') { + a[i] = npy_bswap8(a[i]); + } + } + /* note x86 can load unaligned */ + npyv_u64 v; + if (vstep == 4) { + v = npyv_setf_u64(a[3], a[2], a[1], a[0]); + } else { + v = npyv_setf_u64(a[1], a[0]); + } + /* false -> 0x00 and true -> 0xFF (there is no cmpneq) */ + v = npyv_cmpeq_u8(v, zero); + v = npyv_cmpeq_u8(v, zero); + /* extract msb of 16 bytes and pack it into 16 bit */ + r = npyv_movemask_u8(v); + /* store result */ + for (int i = 0; i < vstep; i++) { + memcpy(outptr, (char*)&r + i, 1); + outptr += out_stride; + } + inptr += 8 * vstep; + } + } +#endif + + if (remain == 0) { /* assumes n_in > 0 */ + remain = 8; + } + /* Don't reset index. Just handle remainder of above block */ + for (; index < n_out; index++) { + unsigned char build = 0; + int i, maxi; + npy_intp j; + + maxi = (index == n_out - 1) ? remain : 8; + if (order == 'b') { + for (i = 0; i < maxi; i++) { + build <<= 1; + for (j = 0; j < element_size; j++) { + build |= (inptr[j] != 0); + } + inptr += in_stride; + } + if (index == n_out - 1) { + build <<= 8 - remain; + } + } + else + { + for (i = 0; i < maxi; i++) { + build >>= 1; + for (j = 0; j < element_size; j++) { + build |= (inptr[j] != 0) ? 128 : 0; + } + inptr += in_stride; + } + if (index == n_out - 1) { + build >>= 8 - remain; + } + } + *outptr = (char)build; + outptr += out_stride; + } +} \ No newline at end of file diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h index 082139910717..632ac0550d5c 100644 --- a/numpy/core/src/multiarray/compiled_base.h +++ b/numpy/core/src/multiarray/compiled_base.h @@ -1,6 +1,29 @@ #ifndef _NPY_PRIVATE__COMPILED_BASE_H_ #define _NPY_PRIVATE__COMPILED_BASE_H_ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#include +#include +#include + +#define _MULTIARRAYMODULE +#include "numpy/arrayobject.h" +#include "numpy/npy_3kcompat.h" #include +#include "numpy/npy_math.h" +#include "npy_config.h" +#include "templ_common.h" /* for npy_mul_with_overflow_intp */ +#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ +#include "alloc.h" +#include "simd/simd.h" +#include "ctors.h" +#include "common.h" + +#ifndef NPY_DISABLE_OPTIMIZATION + #include "compiled_base.dispatch.h" +#endif + +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void compiled_base_pack_inner, +(const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order)) NPY_NO_EXPORT PyObject * arr_insert(PyObject *, PyObject *, PyObject *); From 24afdab04507d92b47c7fba48f71b0ba0b012e28 Mon Sep 17 00:00:00 2001 From: scimax Date: Mon, 17 Aug 2020 22:19:46 +0200 Subject: [PATCH 0011/1270] shorter slice Co-authored-by: Eric Wieser --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 772f44fcf264..88926ff5bf06 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1559,7 +1559,7 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi, min_val=None, max_va offset = 0 if (not min_val is None) and (not max_val is None): interval_size = max_val - min_val - slice0list = [slice(None, None)]*nd # full slices + slice0list = [slice(None)]*nd # full slices slice0list[axis] = 0 slice0 = tuple(slice0list) offset_mul = (p[slice0] - min_val)//interval_size From 5baccbc3f3171b2e8b7787e3d889dfca062c6c38 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 18 Aug 2020 10:58:28 +0800 Subject: [PATCH 0012/1270] add neon adapt --- numpy/core/src/common/simd/avx2/conversion.h | 4 ++++ numpy/core/src/common/simd/avx512/conversion.h | 4 ++++ numpy/core/src/common/simd/neon/conversion.h | 4 ++++ numpy/core/src/common/simd/sse/conversion.h | 4 ++++ numpy/core/src/common/simd/vsx/conversion.h | 4 ++++ numpy/core/src/multiarray/compiled_base.dispatch.c | 6 +++--- 6 files changed, 23 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/conversion.h b/numpy/core/src/common/simd/avx2/conversion.h index 9fd86016d3d9..86e7ab6eb187 100644 --- a/numpy/core/src/common/simd/avx2/conversion.h +++ b/numpy/core/src/common/simd/avx2/conversion.h @@ -29,4 +29,8 @@ #define npyv_cvt_b32_f32(BL) _mm256_castps_si256(BL) #define npyv_cvt_b64_f64(BL) _mm256_castpd_si256(BL) +// convert integer vectors between different sizes +#define npyv_cvt_u64_u8(A) A +#define npyv_cvt_u8_u64(A) A + #endif // _NPY_SIMD_AVX2_CVT_H diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 0f7e27de3a0a..1160480c1f31 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -51,4 +51,8 @@ #define npyv_cvt_b32_f32(A) npyv_cvt_b32_u32(_mm512_castps_si512(A)) #define npyv_cvt_b64_f64(A) npyv_cvt_b64_u64(_mm512_castpd_si512(A)) +// convert integer vectors between different sizes +#define npyv_cvt_u64_u8(A) A +#define npyv_cvt_u8_u64(A) A + #endif // _NPY_SIMD_AVX512_CVT_H diff --git a/numpy/core/src/common/simd/neon/conversion.h b/numpy/core/src/common/simd/neon/conversion.h index b286931d1404..60bc263b74f3 100644 --- a/numpy/core/src/common/simd/neon/conversion.h +++ b/numpy/core/src/common/simd/neon/conversion.h @@ -29,4 +29,8 @@ #define npyv_cvt_b32_f32(BL) vreinterpretq_u32_f32(BL) #define npyv_cvt_b64_f64(BL) vreinterpretq_u64_f64(BL) +// convert integer vectors between different sizes +#define npyv_cvt_u64_u8(A) vreinterpretq_u8_u64(A) +#define npyv_cvt_u8_u64(A) vreinterpretq_u64_u8(A) + #endif // _NPY_SIMD_NEON_CVT_H diff --git a/numpy/core/src/common/simd/sse/conversion.h b/numpy/core/src/common/simd/sse/conversion.h index ea9660d13a30..36ed18d8c761 100644 --- a/numpy/core/src/common/simd/sse/conversion.h +++ b/numpy/core/src/common/simd/sse/conversion.h @@ -29,4 +29,8 @@ #define npyv_cvt_b32_f32(A) _mm_castps_si128(A) #define npyv_cvt_b64_f64(A) _mm_castpd_si128(A) +// convert integer vectors between different sizes +#define npyv_cvt_u64_u8(A) A +#define npyv_cvt_u8_u64(A) A + #endif // _NPY_SIMD_SSE_CVT_H diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 6ed135990ccc..49e474136cd6 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -29,4 +29,8 @@ #define npyv_cvt_b32_f32(A) ((npyv_b32) A) #define npyv_cvt_b64_f64(A) ((npyv_b64) A) +// convert integer vectors between different sizes +#define npyv_cvt_u64_u8(A) ((npyv_u8) A) +#define npyv_cvt_u8_u64(A) ((npyv_u64) A) + #endif // _NPY_SIMD_VSX_CVT_H diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index e914816ade28..3eccfe3a44e5 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -48,10 +48,10 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) v = npyv_setf_u64(a[1], a[0]); } /* false -> 0x00 and true -> 0xFF (there is no cmpneq) */ - v = npyv_cmpeq_u8(v, zero); - v = npyv_cmpeq_u8(v, zero); + v = npyv_cvt_u8_u64(npyv_cmpeq_u8(npyv_cvt_u64_u8(v), npyv_cvt_u64_u8(zero))); + v = npyv_cvt_u8_u64(npyv_cmpeq_u8(npyv_cvt_u64_u8(v), npyv_cvt_u64_u8(zero))); /* extract msb of 16 bytes and pack it into 16 bit */ - r = npyv_movemask_u8(v); + r = npyv_movemask_u8(npyv_cvt_u64_u8(v)); /* store result */ for (int i = 0; i < vstep; i++) { memcpy(outptr, (char*)&r + i, 1); From e03d99dd780f11b0ab760cab89e5020a63af869a Mon Sep 17 00:00:00 2001 From: Rohit Sanjay Date: Sun, 12 Jul 2020 22:58:32 +0530 Subject: [PATCH 0013/1270] DOC: added examples for rfft2 and irfft2 docstring --- numpy/fft/_pocketfft.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 83ac86036bc0..cd972b32702b 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -1242,6 +1242,15 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None): This is really just `rfftn` with different default behavior. For more details see `rfftn`. + Examples + -------- + >>> a = np.ones((2, 2)) + >>> np.fft.rfft2(a) + array([[4.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]) + >>> np.fft.rfft2(a, axes=(1, 1)) + array([[2.+0.j, 2.+0.j], + [2.+0.j, 2.+0.j]]) """ return rfftn(a, s, axes, norm) @@ -1399,5 +1408,13 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None): This is really `irfftn` with different defaults. For more details see `irfftn`. + Examples + -------- + >>> a = np.zeros((3, 3)) + >>> a[0, 0] = 3 * 3 + >>> np.fft.irfft2(a) + array([[0.75, 0.75, 0.75, 0.75], + [0.75, 0.75, 0.75, 0.75], + [0.75, 0.75, 0.75, 0.75]]) """ return irfftn(a, s, axes, norm) From 64847eddebe1bd933504c9daa61477e04ff59197 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Mon, 17 Aug 2020 23:16:50 -0700 Subject: [PATCH 0014/1270] DOC: update rfft2/irfft2 examples. Update rfft2 example to conform to fft2 example and switch the irfft2 example to illustrate round-tripping. --- numpy/fft/_pocketfft.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index cd972b32702b..2066b95eac2d 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -1244,13 +1244,13 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None): Examples -------- - >>> a = np.ones((2, 2)) + >>> a = np.mgrid[:5, :5][0] >>> np.fft.rfft2(a) - array([[4.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j]]) - >>> np.fft.rfft2(a, axes=(1, 1)) - array([[2.+0.j, 2.+0.j], - [2.+0.j, 2.+0.j]]) + array([[ 50. +0.j , 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]]) """ return rfftn(a, s, axes, norm) @@ -1410,11 +1410,13 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None): Examples -------- - >>> a = np.zeros((3, 3)) - >>> a[0, 0] = 3 * 3 - >>> np.fft.irfft2(a) - array([[0.75, 0.75, 0.75, 0.75], - [0.75, 0.75, 0.75, 0.75], - [0.75, 0.75, 0.75, 0.75]]) + >>> a = np.mgrid[:5, :5][0] + >>> A = np.fft.rfft2(a) + >>> np.fft.irfft2(A, s=a.shape) + array([[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) """ return irfftn(a, s, axes, norm) From 748b21b665bdabe76207b94f7cbf61e5f4869946 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 19 Aug 2020 11:31:06 +0800 Subject: [PATCH 0015/1270] add AVX2 dispatch --- numpy/core/src/multiarray/compiled_base.dispatch.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index 3eccfe3a44e5..8f71cd3237e2 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -1,6 +1,6 @@ /** * @targets $maxopt baseline - * SSE2 + * SSE2 AVX2 * VSX VSX2 * NEON ASIMDDP */ @@ -29,8 +29,9 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) npyv_u64 zero = npyv_zero_u64(); /* don't handle non-full 8-byte remainder */ npy_intp vn_out = n_out - (remain ? 1 : 0); - vn_out -= (vn_out & 1); const int vstep = npyv_nlanes_u64; + vn_out -= (vn_out & (vstep - 1)); + // Maximum paraller abillity: handle 8 64bits at one time npy_uint64 a[4]; for (index = 0; index < vn_out; index += vstep) { unsigned int r; @@ -43,9 +44,9 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) /* note x86 can load unaligned */ npyv_u64 v; if (vstep == 4) { - v = npyv_setf_u64(a[3], a[2], a[1], a[0]); + v = npyv_set_u64(a[0], a[1], a[2], a[3]); } else { - v = npyv_setf_u64(a[1], a[0]); + v = npyv_set_u64(a[0], a[1]); } /* false -> 0x00 and true -> 0xFF (there is no cmpneq) */ v = npyv_cvt_u8_u64(npyv_cmpeq_u8(npyv_cvt_u64_u8(v), npyv_cvt_u64_u8(zero))); From c30f729df230eb2426ee883818d7c4369c8a1c08 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 19 Aug 2020 14:29:28 +0800 Subject: [PATCH 0016/1270] add allowlist --- numpy/core/src/multiarray/compiled_base.dispatch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index 8f71cd3237e2..a6e331736cda 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -24,14 +24,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) npy_intp index = 0; int remain = n_in % 8; /* uneven bits */ -#if defined NPY_SIMD +#if defined(NPY_HAVE_SSE) || defined(NPY_HAVE_AVX2) || defined(NPY_HAVE_NEON) if (in_stride == 1 && element_size == 1 && n_out > 2) { npyv_u64 zero = npyv_zero_u64(); /* don't handle non-full 8-byte remainder */ npy_intp vn_out = n_out - (remain ? 1 : 0); const int vstep = npyv_nlanes_u64; vn_out -= (vn_out & (vstep - 1)); - // Maximum paraller abillity: handle 8 64bits at one time + // Maximum paraller abillity: handle four 64bits at one time npy_uint64 a[4]; for (index = 0; index < vn_out; index += vstep) { unsigned int r; From 72a5a89865805a0a07d397e84d2897fc8c5f8557 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 20 Aug 2020 09:29:18 +0800 Subject: [PATCH 0017/1270] merge declarations and initialization --- .../src/multiarray/compiled_base.dispatch.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index a6e331736cda..d288ebde3f8d 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -31,9 +31,9 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) npy_intp vn_out = n_out - (remain ? 1 : 0); const int vstep = npyv_nlanes_u64; vn_out -= (vn_out & (vstep - 1)); - // Maximum paraller abillity: handle four 64bits at one time - npy_uint64 a[4]; for (index = 0; index < vn_out; index += vstep) { + // Maximum paraller abillity: handle four 64bits at one time + npy_uint64 a[4]; unsigned int r; for(int i = 0; i < vstep; i++) { a[i] = *(npy_uint64*)(inptr + 8 * i); @@ -69,14 +69,11 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) /* Don't reset index. Just handle remainder of above block */ for (; index < n_out; index++) { unsigned char build = 0; - int i, maxi; - npy_intp j; - - maxi = (index == n_out - 1) ? remain : 8; + int maxi = (index == n_out - 1) ? remain : 8; if (order == 'b') { - for (i = 0; i < maxi; i++) { + for (int i = 0; i < maxi; i++) { build <<= 1; - for (j = 0; j < element_size; j++) { + for (npy_intp j = 0; j < element_size; j++) { build |= (inptr[j] != 0); } inptr += in_stride; @@ -87,9 +84,9 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) } else { - for (i = 0; i < maxi; i++) { + for (int i = 0; i < maxi; i++) { build >>= 1; - for (j = 0; j < element_size; j++) { + for (npy_intp j = 0; j < element_size; j++) { build |= (inptr[j] != 0) ? 128 : 0; } inptr += in_stride; From 16e3bec1744c39915e7ec3e490529c01e2cbde7a Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Thu, 20 Aug 2020 11:06:58 +0200 Subject: [PATCH 0018/1270] Rolling back to only. Improved documentation --- numpy/lib/function_base.py | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 88926ff5bf06..910c2bc43d86 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1482,18 +1482,19 @@ def angle(z, deg=False): return a -def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=2*pi, min_val=None, max_val=None): +def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=2*pi): return (p,) @array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi, min_val=None, max_val=None): +def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): """ Unwrap by changing deltas between values to complement. For the default case where `interval_size= 2*pi`, `discont=pi`, It unwraps radian phase `p` by changing absolute jumps greater - than `discont` to their 2*pi complement along the given axis. + than `discont` to their 2*pi complement along the given axis. Jumps equal + to `discont` are not changed. In general it unwrapps a signal `p` by changing absolute jumps greater than `discont` to their `interval_size` complementary values. @@ -1507,19 +1508,7 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi, min_val=None, max_va axis : int, optional Axis along which unwrap will operate, default is the last axis. interval_size: float, optional - Size of the range over which the input wraps. By default, it is 2 pi. - If ``min_val`` and ``max_val`` are given, ``interval_size`` is ignored - and the interval size is ``max_val - min_val``. - min_val, max_val: float, optional - Boundaries of the interval over which the input array is expected to - wrap. By default, they are ``None`` and the interval is considered as - ``[-interval_size, interval_size]``. In case the first value of the - phase input array, ``p[0]``, is outside of the interval - ``[min_val, max_val]`` it will be corrected by an integral multiple of - the interval size such that it will be within the - boundaries. - Both boundaries require each other. If only one boundary is - provided without the other, it will be ignored. + Size of the range over which the input wraps. By default, it is ``2 pi``. Returns ------- @@ -1556,15 +1545,6 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi, min_val=None, max_va p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) - offset = 0 - if (not min_val is None) and (not max_val is None): - interval_size = max_val - min_val - slice0list = [slice(None)]*nd # full slices - slice0list[axis] = 0 - slice0 = tuple(slice0list) - offset_mul = (p[slice0] - min_val)//interval_size - slice0list[axis] = None - offset = -offset_mul[tuple(slice0list)]*interval_size if discont is None: discont = interval_size/2 slice1 = [slice(None, None)]*nd # full slices @@ -1574,7 +1554,6 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi, min_val=None, max_va _nx.copyto(ddmod, interval_size/2, where=(ddmod == -interval_size/2) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) - p += offset up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up From de7aa73c56d8128e3fa5cca1e99ff352fffdf0c5 Mon Sep 17 00:00:00 2001 From: scimax Date: Thu, 20 Aug 2020 11:49:22 +0200 Subject: [PATCH 0019/1270] Minor code clean up Co-authored-by: Eric Wieser --- doc/release/upcoming_changes/16987.improvement.rst | 5 ++--- numpy/lib/function_base.py | 6 ++++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/release/upcoming_changes/16987.improvement.rst b/doc/release/upcoming_changes/16987.improvement.rst index 28acc1ce2fa5..9ff81380afed 100644 --- a/doc/release/upcoming_changes/16987.improvement.rst +++ b/doc/release/upcoming_changes/16987.improvement.rst @@ -9,6 +9,5 @@ intervals. >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 >>> unwrap(phase_deg, interval_size=360) array([-180., -140., -100., -60., -20., 20., 60., 100., 140., - 180., 220., 260., 300., 340., 380., 420., 460., 500., - 540.]) - + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 910c2bc43d86..3115f7a80664 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1482,7 +1482,7 @@ def angle(z, deg=False): return a -def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=2*pi): +def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=None): return (p,) @@ -1551,11 +1551,13 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): slice1[axis] = slice(1, None) slice1 = tuple(slice1) ddmod = mod(dd + interval_size/2, interval_size) - interval_size/2 + # the above line made `ddmod[abs(dd) == interval_size/2] == -interval_size/2`. + # correct these such that `ddmod[abs(dd) == interval_size/2] == sign(dd)*interval_size/2`. _nx.copyto(ddmod, interval_size/2, where=(ddmod == -interval_size/2) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') - up[slice1] = p[slice1] + ph_correct.cumsum(axis) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up From 11ae4340173c644768368755bba93ced112b4505 Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Thu, 20 Aug 2020 11:59:52 +0200 Subject: [PATCH 0020/1270] Renamed new argument for unwraping. Extended release note --- .../upcoming_changes/16987.improvement.rst | 9 ++++-- numpy/lib/function_base.py | 32 +++++++++---------- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/doc/release/upcoming_changes/16987.improvement.rst b/doc/release/upcoming_changes/16987.improvement.rst index 9ff81380afed..e85fbda915a5 100644 --- a/doc/release/upcoming_changes/16987.improvement.rst +++ b/doc/release/upcoming_changes/16987.improvement.rst @@ -1,4 +1,4 @@ -Arbitrary `interval_size` option for ``numpy.unwrap`` +Arbitrary `period` option for ``numpy.unwrap`` ----------------------------------------------------- The size of the interval, over which phases are unwraped, is not restricted to `2 * pi` anymore. This is especially useful for unwrapping degrees but can also be used for other @@ -7,7 +7,12 @@ intervals. .. code:: python >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 - >>> unwrap(phase_deg, interval_size=360) + >>> phase_deg + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + -180., -140., -100., -60., -20., 20., 60., 100., 140., + -180.]) + + >>> unwrap(phase_deg, period=360) array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 3115f7a80664..ceea92849903 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1482,32 +1482,32 @@ def angle(z, deg=False): return a -def _unwrap_dispatcher(p, discont=None, axis=None, *, interval_size=None): +def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): return (p,) @array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): +def unwrap(p, discont=None, axis=-1, *, period=2*pi): """ Unwrap by changing deltas between values to complement. - For the default case where `interval_size= 2*pi`, `discont=pi`, + For the default case where `period= 2*pi`, `discont=pi`, It unwraps radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Jumps equal to `discont` are not changed. In general it unwrapps a signal `p` by changing absolute jumps - greater than `discont` to their `interval_size` complementary values. + greater than `discont` to their `period` complementary values. Parameters ---------- p : array_like Input array. discont : float, optional - Maximum discontinuity between values, default is ``interval_size/2``. + Maximum discontinuity between values, default is ``period/2``. axis : int, optional Axis along which unwrap will operate, default is the last axis. - interval_size: float, optional + period: float, optional Size of the range over which the input wraps. By default, it is ``2 pi``. Returns @@ -1519,7 +1519,7 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): rad2deg, deg2rad Notes ----- - If the discontinuity in `p` is smaller than ``interval_size/2``, + If the discontinuity in `p` is smaller than ``period/2``, but larger than `discont`, no unwrapping is done because taking the complement would only make the discontinuity larger. Examples @@ -1530,14 +1530,14 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary - >>> unwrap([0, 1, 2, -1, 0], interval_size=4) + >>> unwrap([0, 1, 2, -1, 0], period=4) array([0., 1., 2., 3., 4.]) - >>> unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], interval_size=6) + >>> unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) array([1., 2., 3., 4., 5., 6., 7., 8., 9.]) - >>> unwrap([2, 3, 4, 5, 2, 3, 4, 5], interval_size=4) + >>> unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) array([2., 3., 4., 5., 6., 7., 8., 9.]) >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 - >>> unwrap(phase_deg, interval_size=360) + >>> unwrap(phase_deg, period=360) array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) @@ -1546,14 +1546,14 @@ def unwrap(p, discont=None, axis=-1, *, interval_size=2*pi): nd = p.ndim dd = diff(p, axis=axis) if discont is None: - discont = interval_size/2 + discont = period/2 slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) slice1 = tuple(slice1) - ddmod = mod(dd + interval_size/2, interval_size) - interval_size/2 - # the above line made `ddmod[abs(dd) == interval_size/2] == -interval_size/2`. - # correct these such that `ddmod[abs(dd) == interval_size/2] == sign(dd)*interval_size/2`. - _nx.copyto(ddmod, interval_size/2, where=(ddmod == -interval_size/2) & (dd > 0)) + ddmod = mod(dd + period/2, period) - period/2 + # for `mask = (abs(dd) == period/2)`, the above line made `ddmod[mask] == -period/2`. + # correct these such that `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, period/2, where=(ddmod == -period/2) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') From 8cf0872290fa05b6adc18d53e45e121ef9f74bd3 Mon Sep 17 00:00:00 2001 From: scimax Date: Thu, 20 Aug 2020 12:01:53 +0200 Subject: [PATCH 0021/1270] Update numpy/lib/tests/test_function_base.py Co-authored-by: Eric Wieser --- numpy/lib/tests/test_function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 878fb3de1070..44ff7ea81dad 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1757,7 +1757,7 @@ def test_simple(self): # check that unwrap maintains continuity assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) - def test_minmax(self): + def test_period(self): # check that unwrap removes jumps greater that 255 assert_array_equal(unwrap([1, 1 + 256], interval_size=255), [1, 2]) # check that unwrap maintains continuity From 81e08d9a04d2a7e2aab231ac86909e3837e5630e Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 20 Aug 2020 19:48:12 +0800 Subject: [PATCH 0022/1270] remove npyv_cvt and fix typos. --- numpy/core/src/common/simd/avx2/conversion.h | 4 ---- numpy/core/src/common/simd/avx512/conversion.h | 4 ---- numpy/core/src/common/simd/neon/conversion.h | 4 ---- numpy/core/src/common/simd/sse/conversion.h | 4 ---- numpy/core/src/common/simd/vsx/conversion.h | 4 ---- numpy/core/src/multiarray/compiled_base.dispatch.c | 12 +++++------- 6 files changed, 5 insertions(+), 27 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/conversion.h b/numpy/core/src/common/simd/avx2/conversion.h index 86e7ab6eb187..9fd86016d3d9 100644 --- a/numpy/core/src/common/simd/avx2/conversion.h +++ b/numpy/core/src/common/simd/avx2/conversion.h @@ -29,8 +29,4 @@ #define npyv_cvt_b32_f32(BL) _mm256_castps_si256(BL) #define npyv_cvt_b64_f64(BL) _mm256_castpd_si256(BL) -// convert integer vectors between different sizes -#define npyv_cvt_u64_u8(A) A -#define npyv_cvt_u8_u64(A) A - #endif // _NPY_SIMD_AVX2_CVT_H diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 1160480c1f31..0f7e27de3a0a 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -51,8 +51,4 @@ #define npyv_cvt_b32_f32(A) npyv_cvt_b32_u32(_mm512_castps_si512(A)) #define npyv_cvt_b64_f64(A) npyv_cvt_b64_u64(_mm512_castpd_si512(A)) -// convert integer vectors between different sizes -#define npyv_cvt_u64_u8(A) A -#define npyv_cvt_u8_u64(A) A - #endif // _NPY_SIMD_AVX512_CVT_H diff --git a/numpy/core/src/common/simd/neon/conversion.h b/numpy/core/src/common/simd/neon/conversion.h index 60bc263b74f3..b286931d1404 100644 --- a/numpy/core/src/common/simd/neon/conversion.h +++ b/numpy/core/src/common/simd/neon/conversion.h @@ -29,8 +29,4 @@ #define npyv_cvt_b32_f32(BL) vreinterpretq_u32_f32(BL) #define npyv_cvt_b64_f64(BL) vreinterpretq_u64_f64(BL) -// convert integer vectors between different sizes -#define npyv_cvt_u64_u8(A) vreinterpretq_u8_u64(A) -#define npyv_cvt_u8_u64(A) vreinterpretq_u64_u8(A) - #endif // _NPY_SIMD_NEON_CVT_H diff --git a/numpy/core/src/common/simd/sse/conversion.h b/numpy/core/src/common/simd/sse/conversion.h index 36ed18d8c761..ea9660d13a30 100644 --- a/numpy/core/src/common/simd/sse/conversion.h +++ b/numpy/core/src/common/simd/sse/conversion.h @@ -29,8 +29,4 @@ #define npyv_cvt_b32_f32(A) _mm_castps_si128(A) #define npyv_cvt_b64_f64(A) _mm_castpd_si128(A) -// convert integer vectors between different sizes -#define npyv_cvt_u64_u8(A) A -#define npyv_cvt_u8_u64(A) A - #endif // _NPY_SIMD_SSE_CVT_H diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 49e474136cd6..6ed135990ccc 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -29,8 +29,4 @@ #define npyv_cvt_b32_f32(A) ((npyv_b32) A) #define npyv_cvt_b64_f64(A) ((npyv_b64) A) -// convert integer vectors between different sizes -#define npyv_cvt_u64_u8(A) ((npyv_u8) A) -#define npyv_cvt_u8_u64(A) ((npyv_u64) A) - #endif // _NPY_SIMD_VSX_CVT_H diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index d288ebde3f8d..1cdcc08afa0f 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -1,7 +1,6 @@ /** * @targets $maxopt baseline * SSE2 AVX2 - * VSX VSX2 * NEON ASIMDDP */ #include "compiled_base.h" @@ -34,8 +33,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) for (index = 0; index < vn_out; index += vstep) { // Maximum paraller abillity: handle four 64bits at one time npy_uint64 a[4]; - unsigned int r; - for(int i = 0; i < vstep; i++) { + for (int i = 0; i < vstep; i++) { a[i] = *(npy_uint64*)(inptr + 8 * i); if (order == 'b') { a[i] = npy_bswap8(a[i]); @@ -49,10 +47,10 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) v = npyv_set_u64(a[0], a[1]); } /* false -> 0x00 and true -> 0xFF (there is no cmpneq) */ - v = npyv_cvt_u8_u64(npyv_cmpeq_u8(npyv_cvt_u64_u8(v), npyv_cvt_u64_u8(zero))); - v = npyv_cvt_u8_u64(npyv_cmpeq_u8(npyv_cvt_u64_u8(v), npyv_cvt_u64_u8(zero))); + v = npyv_reinterpret_u8_u64(npyv_cmpeq_u8(npyv_reinterpret_u8_u64(v), npyv_reinterpret_u8_u64(zero))); + v = npyv_reinterpret_u8_u64(npyv_cmpeq_u8(npyv_reinterpret_u8_u64(v), npyv_reinterpret_u8_u64(zero))); /* extract msb of 16 bytes and pack it into 16 bit */ - r = npyv_movemask_u8(npyv_cvt_u64_u8(v)); + unsigned int r = npyv_movemask_u8(npyv_reinterpret_u8_u64(v)); /* store result */ for (int i = 0; i < vstep; i++) { memcpy(outptr, (char*)&r + i, 1); @@ -98,4 +96,4 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) *outptr = (char)build; outptr += out_stride; } -} \ No newline at end of file +} From c3ea9b680e458a91e0a7e97ce61d6853c379385f Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Thu, 20 Aug 2020 14:34:22 +0200 Subject: [PATCH 0023/1270] Integer input returning integer output Co-authored-by: Eric Wieser --- numpy/lib/function_base.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index ceea92849903..491b259150e5 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1550,13 +1550,21 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) slice1 = tuple(slice1) - ddmod = mod(dd + period/2, period) - period/2 - # for `mask = (abs(dd) == period/2)`, the above line made `ddmod[mask] == -period/2`. - # correct these such that `ddmod[mask] == sign(dd[mask])*period/2`. - _nx.copyto(ddmod, period/2, where=(ddmod == -period/2) & (dd > 0)) + dtype = np.result_type(dd, period) + if _nx.issubdtype(dtype, _nx.integer): + interval_low = -(period // 2) + interval_high = -interval_low + else: + interval_low = -period / 2 + interval_high = -interval_low + ddmod = mod(dd - interval_low, period) + interval_low + if period % 2 == 0: + # for `mask = (abs(dd) == period/2)`, the above line made `ddmod[mask] == -period/2`. + # correct these such that `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, where=(ddmod == interval_low) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) - up = array(p, copy=True, dtype='d') + up = array(p, copy=True, dtype=dtype) up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up From 07a584bbe432ff7a58f3aebccd31bfc6fe4c8a18 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 21 Aug 2020 12:08:13 +0800 Subject: [PATCH 0024/1270] add AVX512/VSX support and small optimize --- numpy/core/src/common/simd/avx2/operators.h | 2 +- numpy/core/src/common/simd/avx512/operators.h | 12 +++++++ numpy/core/src/common/simd/neon/operators.h | 2 +- numpy/core/src/common/simd/sse/operators.h | 2 +- numpy/core/src/common/simd/vsx/operators.h | 7 ++++ numpy/core/src/multiarray/compiled_base.c | 2 +- .../src/multiarray/compiled_base.dispatch.c | 33 ++++++++----------- numpy/core/src/multiarray/compiled_base.h | 2 +- 8 files changed, 38 insertions(+), 24 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/operators.h b/numpy/core/src/common/simd/avx2/operators.h index d9d26d2625a5..74517c3f4e7b 100644 --- a/numpy/core/src/common/simd/avx2/operators.h +++ b/numpy/core/src/common/simd/avx2/operators.h @@ -198,6 +198,6 @@ NPY_FINLINE __m256i npyv_cmpge_u32(__m256i a, __m256i b) #define npyv_cmpge_f64(A, B) _mm256_castpd_si256(_mm256_cmp_pd(A, B, _CMP_GE_OQ)) // Create mask from the most significant bit of each 8-bit element -#define npyv_movemask_u8(a) _mm256_movemask_epi8(a) +#define npyv_movemask_b8(a) _mm256_movemask_epi8(a) #endif // _NPY_SIMD_AVX2_OPERATORS_H diff --git a/numpy/core/src/common/simd/avx512/operators.h b/numpy/core/src/common/simd/avx512/operators.h index f76ea5e2d6c5..72e84608d320 100644 --- a/numpy/core/src/common/simd/avx512/operators.h +++ b/numpy/core/src/common/simd/avx512/operators.h @@ -256,4 +256,16 @@ #define npyv_cmpge_f32(A, B) _mm512_cmp_ps_mask(A, B, _CMP_GE_OQ) #define npyv_cmpge_f64(A, B) _mm512_cmp_pd_mask(A, B, _CMP_GE_OQ) +// Create mask from the most significant bit of each 8-bit element +// AVX512F & AVX512BW +NPY_FINLINE npy_uint64 npyv_movemask_b8(npyv_b8 mask) +{ +#ifdef NPY_HAVE_AVX512BW + return (npy_uint64)_cvtmask64_u64(mask); +#else + int mask_lo = _mm256_movemask_epi8(_mm512_castsi512_si256(mask)); + int mask_hi = _mm256_movemask_epi8(_mm512_extracti64x4_epi64(mask, 1)); + return (unsigned)mask_lo | ((npy_uint64)(unsigned)mask_hi << 32); +#endif +} #endif // _NPY_SIMD_AVX512_OPERATORS_H diff --git a/numpy/core/src/common/simd/neon/operators.h b/numpy/core/src/common/simd/neon/operators.h index 970bef1c550e..61d100225888 100644 --- a/numpy/core/src/common/simd/neon/operators.h +++ b/numpy/core/src/common/simd/neon/operators.h @@ -216,7 +216,7 @@ #define npyv_cmple_f64(A, B) npyv_cmpge_f64(B, A) // Create mask from the most significant bit of each 8-bit element -NPY_INLINE int32_t npyv_movemask_u8(uint8x16_t input) +NPY_INLINE int32_t npyv_movemask_b8(uint8x16_t input) { int8x8_t m0 = vcreate_s8(0x0706050403020100ULL); uint8x16_t v0 = vshlq_u8(vshrq_n_u8(input, 7), vcombine_s8(m0, m0)); diff --git a/numpy/core/src/common/simd/sse/operators.h b/numpy/core/src/common/simd/sse/operators.h index 52cd0ad393d8..96835a3dfbaa 100644 --- a/numpy/core/src/common/simd/sse/operators.h +++ b/numpy/core/src/common/simd/sse/operators.h @@ -256,6 +256,6 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_cmpge_f64(a, b) _mm_castpd_si128(_mm_cmpge_pd(a, b)) // Create mask from the most significant bit of each 8-bit element -#define npyv_movemask_u8(a) _mm_movemask_epi8(a) +#define npyv_movemask_b8(a) _mm_movemask_epi8(a) #endif // _NPY_SIMD_SSE_OPERATORS_H diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h index ca020d9e0e8c..b99eef08ebf9 100644 --- a/numpy/core/src/common/simd/vsx/operators.h +++ b/numpy/core/src/common/simd/vsx/operators.h @@ -213,4 +213,11 @@ NPY_FINLINE npyv_f64 npyv_not_f64(npyv_f64 a) #define npyv_cmple_f32(A, B) npyv_cmpge_f32(B, A) #define npyv_cmple_f64(A, B) npyv_cmpge_f64(B, A) +NPY_FINLINE npy_uint64 npyv_movemask_b8(npyv_b8 mask) +{ + const npyv_u8 bperm = {120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0}; + npyv_s32 vmask = (npyv_s32)vec_vbpermq((npyv_u8)mask, bperm); + return (npy_uint64)vec_extract(vmask, 2); +} + #endif // _NPY_SIMD_VSX_OPERATORS_H diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 68e2fd2645cc..6b509d887ef6 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1498,7 +1498,7 @@ pack_inner(const char *inptr, #ifndef NPY_DISABLE_OPTIMIZATION #include "compiled_base.dispatch.h" #endif - NPY_CPU_DISPATCH_CALL(return compiled_base_pack_inner, + NPY_CPU_DISPATCH_CALL(simd_compiled_base_pack_inner, (inptr, element_size, n_in, in_stride, outptr, n_out, out_stride, order)) } diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index 1cdcc08afa0f..f4c67983aa45 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -1,6 +1,7 @@ /** * @targets $maxopt baseline - * SSE2 AVX2 + * SSE2 AVX2 AVX512F + * VSX VSX2 * NEON ASIMDDP */ #include "compiled_base.h" @@ -10,7 +11,7 @@ * byte array. Truth values are determined as usual: 0 is false, everything * else is true. */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(simd_compiled_base_pack_inner) (const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order) { /* @@ -23,34 +24,28 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(compiled_base_pack_inner) npy_intp index = 0; int remain = n_in % 8; /* uneven bits */ -#if defined(NPY_HAVE_SSE) || defined(NPY_HAVE_AVX2) || defined(NPY_HAVE_NEON) +#ifdef NPY_SIMD if (in_stride == 1 && element_size == 1 && n_out > 2) { - npyv_u64 zero = npyv_zero_u64(); + npyv_u8 v_zero = npyv_zero_u8(); /* don't handle non-full 8-byte remainder */ npy_intp vn_out = n_out - (remain ? 1 : 0); const int vstep = npyv_nlanes_u64; vn_out -= (vn_out & (vstep - 1)); for (index = 0; index < vn_out; index += vstep) { - // Maximum paraller abillity: handle four 64bits at one time - npy_uint64 a[4]; + // Maximum paraller abillity: handle eight 64bits at one time + npy_uint64 a[8]; for (int i = 0; i < vstep; i++) { a[i] = *(npy_uint64*)(inptr + 8 * i); - if (order == 'b') { + + } + if (order == 'b') { + for (int i = 0; i < vstep; i++) { a[i] = npy_bswap8(a[i]); } } - /* note x86 can load unaligned */ - npyv_u64 v; - if (vstep == 4) { - v = npyv_set_u64(a[0], a[1], a[2], a[3]); - } else { - v = npyv_set_u64(a[0], a[1]); - } - /* false -> 0x00 and true -> 0xFF (there is no cmpneq) */ - v = npyv_reinterpret_u8_u64(npyv_cmpeq_u8(npyv_reinterpret_u8_u64(v), npyv_reinterpret_u8_u64(zero))); - v = npyv_reinterpret_u8_u64(npyv_cmpeq_u8(npyv_reinterpret_u8_u64(v), npyv_reinterpret_u8_u64(zero))); - /* extract msb of 16 bytes and pack it into 16 bit */ - unsigned int r = npyv_movemask_u8(npyv_reinterpret_u8_u64(v)); + npyv_u8 v = npyv_set_u64(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]); + npyv_b8 bmask = npyv_cmpneq_u8(v, v_zero); + npy_uint64 r = npyv_movemask_b8(bmask); /* store result */ for (int i = 0; i < vstep; i++) { memcpy(outptr, (char*)&r + i, 1); diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h index 632ac0550d5c..7cf242a2c783 100644 --- a/numpy/core/src/multiarray/compiled_base.h +++ b/numpy/core/src/multiarray/compiled_base.h @@ -22,7 +22,7 @@ #include "compiled_base.dispatch.h" #endif -NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void compiled_base_pack_inner, +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void simd_compiled_base_pack_inner, (const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order)) NPY_NO_EXPORT PyObject * From 26c6ec3394f0d9345a562a73a6848426948da95c Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 21 Aug 2020 14:44:30 +0800 Subject: [PATCH 0025/1270] remove blankline and add convert in NEON/VSX --- numpy/core/src/multiarray/compiled_base.dispatch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index f4c67983aa45..095a47f993dc 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -36,14 +36,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(simd_compiled_base_pack_inner) npy_uint64 a[8]; for (int i = 0; i < vstep; i++) { a[i] = *(npy_uint64*)(inptr + 8 * i); - } if (order == 'b') { for (int i = 0; i < vstep; i++) { a[i] = npy_bswap8(a[i]); } } - npyv_u8 v = npyv_set_u64(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]); + npyv_u8 v = npyv_reinterpret_u8_u64(npyv_set_u64(a[0], a[1], a[2], a[3], + a[4], a[5], a[6], a[7])); npyv_b8 bmask = npyv_cmpneq_u8(v, v_zero); npy_uint64 r = npyv_movemask_b8(bmask); /* store result */ From 00dcda244bc1eb58cb3b4f30c7b18a71a8569194 Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Fri, 21 Aug 2020 22:55:29 +0200 Subject: [PATCH 0026/1270] Updated incorrect argument in tests. boundary correction for int and float. Co-authored-by: Eric Wieser --- numpy/lib/function_base.py | 4 +++- numpy/lib/tests/test_function_base.py | 11 ++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 491b259150e5..2a942a13a572 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1554,11 +1554,13 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): if _nx.issubdtype(dtype, _nx.integer): interval_low = -(period // 2) interval_high = -interval_low + boundary_ambiguous = (period % 2 == 0) else: interval_low = -period / 2 interval_high = -interval_low + boundary_ambiguous = True ddmod = mod(dd - interval_low, period) + interval_low - if period % 2 == 0: + if boundary_ambiguous: # for `mask = (abs(dd) == period/2)`, the above line made `ddmod[mask] == -period/2`. # correct these such that `ddmod[mask] == sign(dd[mask])*period/2`. _nx.copyto(ddmod, interval_high, where=(ddmod == interval_low) & (dd > 0)) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 44ff7ea81dad..2ebde9aec6d9 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1759,20 +1759,21 @@ def test_simple(self): def test_period(self): # check that unwrap removes jumps greater that 255 - assert_array_equal(unwrap([1, 1 + 256], interval_size=255), [1, 2]) + assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) # check that unwrap maintains continuity - assert_(np.all(diff(unwrap(rand(10) * 1000, interval_size=255)) < 255)) + assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255)) # check simple case simple_seq = np.array([0, 75, 150, 225, 300]) wrap_seq = np.mod(simple_seq, 255) - assert_array_equal(unwrap(wrap_seq, interval_size=255), simple_seq) + assert_array_equal(unwrap(wrap_seq, period=255), simple_seq) # check custom discont value uneven_seq = np.array([0, 75, 150, 225, 300, 430]) wrap_uneven = np.mod(uneven_seq, 250) - no_discont = unwrap(wrap_uneven, interval_size=250) + no_discont = unwrap(wrap_uneven, period=250) assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) - sm_discont = unwrap(wrap_uneven, interval_size=250, discont=140) + sm_discont = unwrap(wrap_uneven, period=250, discont=140) assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) + assert sm_discont.dtype == wrap_uneven.dtype class TestFilterwindows: From 8755c7f102363bee2572a08a5d9f83671af7957d Mon Sep 17 00:00:00 2001 From: scimax Date: Fri, 21 Aug 2020 23:08:14 +0200 Subject: [PATCH 0027/1270] Code cleanup Co-authored-by: Eric Wieser --- numpy/lib/function_base.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 2a942a13a572..4f0377efec2d 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1552,13 +1552,12 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): slice1 = tuple(slice1) dtype = np.result_type(dd, period) if _nx.issubdtype(dtype, _nx.integer): - interval_low = -(period // 2) - interval_high = -interval_low - boundary_ambiguous = (period % 2 == 0) + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 else: - interval_low = -period / 2 - interval_high = -interval_low + interval_high = period / 2 boundary_ambiguous = True + interval_low = -interval_high ddmod = mod(dd - interval_low, period) + interval_low if boundary_ambiguous: # for `mask = (abs(dd) == period/2)`, the above line made `ddmod[mask] == -period/2`. From 8a6bcbcfe8db6cf2fd784631b480d5865077ad98 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 19 Aug 2020 08:23:04 -0600 Subject: [PATCH 0028/1270] MAINT: Use numpy version for f2py version. Because f2py is released as part of numpy, the old svn versioning no longer makes sense and it was decided to use the numpy version instead. --- numpy/f2py/__version__.py | 9 +-------- numpy/f2py/capi_maps.py | 2 -- numpy/f2py/common_rules.py | 2 -- numpy/f2py/f2py2e.py | 20 ++++++++------------ numpy/f2py/rules.py | 15 ++++++--------- numpy/tests/test_scripts.py | 4 ++-- 6 files changed, 17 insertions(+), 35 deletions(-) diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py index 104c2e1a899e..e20d7c1dbb38 100644 --- a/numpy/f2py/__version__.py +++ b/numpy/f2py/__version__.py @@ -1,8 +1 @@ -major = 2 - -try: - from __svn_version__ import version - version_info = (major, version) - version = '%s_%s' % version_info -except (ImportError, ValueError): - version = str(major) +from numpy.version import version diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index fabbfc4c24ac..472ddde43e66 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -11,8 +11,6 @@ Pearu Peterson """ -__version__ = "$Revision: 1.60 $"[10:-1] - from . import __version__ f2py_version = __version__.version diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index 90483e55be83..937d8bc723bd 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -13,8 +13,6 @@ Pearu Peterson """ -__version__ = "$Revision: 1.19 $"[10:-1] - from . import __version__ f2py_version = __version__.version diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 71a049e410f6..a91a06340e16 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -29,18 +29,14 @@ from . import capi_maps f2py_version = __version__.version +numpy_version = __version__.version errmess = sys.stderr.write # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__usage__ = """\ -Usage: +__usage__ =\ +f"""Usage: 1) To construct extension module sources: @@ -97,8 +93,8 @@ --[no-]latex-doc Create (or not) module.tex. Default is --no-latex-doc. --short-latex Create 'incomplete' LaTeX document (without commands - \\documentclass, \\tableofcontents, and \\begin{document}, - \\end{document}). + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). --[no-]rest-doc Create (or not) module.rst. Default is --no-rest-doc. @@ -167,12 +163,12 @@ array. Integer sets the threshold for array sizes when a message should be shown. -Version: %s -numpy Version: %s +Version: {f2py_version} +numpy Version: {numpy_version} Requires: Python 3.5 or higher. License: NumPy license (see LICENSE.txt in the NumPy source code) Copyright 1999 - 2011 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version) +http://cens.ioc.ee/projects/f2py2e/""" def scaninputline(inputline): diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index a14f60194408..f1490527eafd 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -50,18 +50,15 @@ Pearu Peterson """ -__version__ = "$Revision: 1.129 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -from .. import version as _numpy_version -numpy_version = _numpy_version.version - import os import time import copy +# __version__.version is now the same as the NumPy version +from . import __version__ +f2py_version = __version__.version +numpy_version = __version__.version + from .auxfuncs import ( applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote, @@ -202,7 +199,7 @@ \tif (PyErr_Occurred()) \t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} \td = PyModule_GetDict(m); -\ts = PyUnicode_FromString(\"$R""" + """evision: $\"); +\ts = PyUnicode_FromString(\"#f2py_version#\"); \tPyDict_SetItemString(d, \"__version__\", s); \tPy_DECREF(s); \ts = PyUnicode_FromString( diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index a0f2ba70a4a1..e67a829471dc 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -38,9 +38,9 @@ def find_f2py_commands(): def test_f2py(f2py_cmd): # test that we can run f2py script stdout = subprocess.check_output([f2py_cmd, '-v']) - assert_equal(stdout.strip(), b'2') + assert_equal(stdout.strip(), np.__version__.encode('ascii')) def test_pep338(): stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) - assert_equal(stdout.strip(), b'2') + assert_equal(stdout.strip(), np.__version__.encode('ascii')) From fde3fdb05eda00a4c84d6e52184c43928b320035 Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Sun, 23 Aug 2020 17:19:02 +0200 Subject: [PATCH 0029/1270] Minor corrections in unwrapping docstrings Co-authored-by: Eric Wieser --- doc/release/upcoming_changes/16987.improvement.rst | 4 ++-- numpy/lib/function_base.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/release/upcoming_changes/16987.improvement.rst b/doc/release/upcoming_changes/16987.improvement.rst index e85fbda915a5..e25b2832fa2a 100644 --- a/doc/release/upcoming_changes/16987.improvement.rst +++ b/doc/release/upcoming_changes/16987.improvement.rst @@ -9,8 +9,8 @@ intervals. >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 >>> phase_deg array([-180., -140., -100., -60., -20., 20., 60., 100., 140., - -180., -140., -100., -60., -20., 20., 60., 100., 140., - -180.]) + -180., -140., -100., -60., -20., 20., 60., 100., 140., + -180.]) >>> unwrap(phase_deg, period=360) array([-180., -140., -100., -60., -20., 20., 60., 100., 140., diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 4f0377efec2d..7c819eaf38df 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1531,11 +1531,11 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary >>> unwrap([0, 1, 2, -1, 0], period=4) - array([0., 1., 2., 3., 4.]) + array([0, 1, 2, 3, 4]) >>> unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) - array([1., 2., 3., 4., 5., 6., 7., 8., 9.]) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) - array([2., 3., 4., 5., 6., 7., 8., 9.]) + array([2, 3, 4, 5, 6, 7, 8, 9]) >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 >>> unwrap(phase_deg, period=360) array([-180., -140., -100., -60., -20., 20., 60., 100., 140., From 7c56c83d39677deb00fc42e31a96a7118fe255a0 Mon Sep 17 00:00:00 2001 From: scimax Date: Sun, 23 Aug 2020 21:55:48 +0200 Subject: [PATCH 0030/1270] Apply suggestions from code review Co-authored-by: Eric Wieser --- .../upcoming_changes/16987.improvement.rst | 5 ++--- numpy/lib/function_base.py | 19 ++++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/doc/release/upcoming_changes/16987.improvement.rst b/doc/release/upcoming_changes/16987.improvement.rst index e25b2832fa2a..04a17e98f0a3 100644 --- a/doc/release/upcoming_changes/16987.improvement.rst +++ b/doc/release/upcoming_changes/16987.improvement.rst @@ -1,8 +1,7 @@ Arbitrary `period` option for ``numpy.unwrap`` ----------------------------------------------------- -The size of the interval, over which phases are unwraped, is not restricted to `2 * pi` -anymore. This is especially useful for unwrapping degrees but can also be used for other -intervals. +The size of the interval over which phases are unwrapped is no longer restricted to `2 * pi`. +This is especially useful for unwrapping degrees, but can also be used for other intervals. .. code:: python diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 7c819eaf38df..41dec5e80919 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1489,15 +1489,15 @@ def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): @array_function_dispatch(_unwrap_dispatcher) def unwrap(p, discont=None, axis=-1, *, period=2*pi): """ - Unwrap by changing deltas between values to complement. - - For the default case where `period= 2*pi`, `discont=pi`, - It unwraps radian phase `p` by changing absolute jumps greater - than `discont` to their 2*pi complement along the given axis. Jumps equal - to `discont` are not changed. - - In general it unwrapps a signal `p` by changing absolute jumps - greater than `discont` to their `period` complementary values. + Unwrap by taking the complement of large deltas with respect to the period. + + This unwraps a signal `p` by changing elements which have an absolute + difference from their predecessor of more than ``max(discont, period/2)`` + to their `period`-complementary values. + + For the default case where `period` is :math:`2\pi` and is `discont` is :math:`\pi`, + this unwraps a radian phase `p` such that adjacent differences are never + greater than :math:`\pi` by adding :math:`2k\pi` for some integer :math:`k`. Parameters ---------- @@ -1505,6 +1505,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): Input array. discont : float, optional Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. axis : int, optional Axis along which unwrap will operate, default is the last axis. period: float, optional From a84b28afb9fdd0547148ba186d59fcaba9e82a8d Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Sun, 23 Aug 2020 22:05:12 +0200 Subject: [PATCH 0031/1270] Comment in docs on discontinuity in unwrap --- numpy/lib/function_base.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 41dec5e80919..04b051973b57 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1504,8 +1504,9 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): p : array_like Input array. discont : float, optional - Maximum discontinuity between values, default is ``period/2``. - Values below ``period/2`` are treated as if they were ``period/2``. + Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. To have an effect + different from the default, `discont` should be larger than ``period/2``. axis : int, optional Axis along which unwrap will operate, default is the last axis. period: float, optional From 36d2f9fa01e18008b6fd61840f816081849b1f0e Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 24 Aug 2020 10:33:48 +0800 Subject: [PATCH 0032/1270] split headers according to three rules --- numpy/core/src/multiarray/compiled_base.dispatch.c | 4 ++-- numpy/core/src/multiarray/compiled_base.h | 8 -------- .../core/src/multiarray/compiled_base_pack_inner.h | 13 +++++++++++++ 3 files changed, 15 insertions(+), 10 deletions(-) create mode 100644 numpy/core/src/multiarray/compiled_base_pack_inner.h diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index 095a47f993dc..5dd84737d366 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -4,7 +4,7 @@ * VSX VSX2 * NEON ASIMDDP */ -#include "compiled_base.h" +#include "compiled_base_pack_inner.h" /* * This function packs boolean values in the input array into the bits of a @@ -24,7 +24,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(simd_compiled_base_pack_inner) npy_intp index = 0; int remain = n_in % 8; /* uneven bits */ -#ifdef NPY_SIMD +#if NPY_SIMD if (in_stride == 1 && element_size == 1 && n_out > 2) { npyv_u8 v_zero = npyv_zero_u8(); /* don't handle non-full 8-byte remainder */ diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h index 7cf242a2c783..6b383967a2f4 100644 --- a/numpy/core/src/multiarray/compiled_base.h +++ b/numpy/core/src/multiarray/compiled_base.h @@ -14,17 +14,9 @@ #include "templ_common.h" /* for npy_mul_with_overflow_intp */ #include "lowlevel_strided_loops.h" /* for npy_bswap8 */ #include "alloc.h" -#include "simd/simd.h" #include "ctors.h" #include "common.h" -#ifndef NPY_DISABLE_OPTIMIZATION - #include "compiled_base.dispatch.h" -#endif - -NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void simd_compiled_base_pack_inner, -(const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order)) - NPY_NO_EXPORT PyObject * arr_insert(PyObject *, PyObject *, PyObject *); NPY_NO_EXPORT PyObject * diff --git a/numpy/core/src/multiarray/compiled_base_pack_inner.h b/numpy/core/src/multiarray/compiled_base_pack_inner.h new file mode 100644 index 000000000000..ff4588f04cf8 --- /dev/null +++ b/numpy/core/src/multiarray/compiled_base_pack_inner.h @@ -0,0 +1,13 @@ +#ifndef _NPY_PRIVATE__COMPILED_BASE_PACK_INNER_H_ +#define _NPY_PRIVATE__COMPILED_BASE_PACK_INNER_H_ +#include +#include "simd/simd.h" +#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ +#ifndef NPY_DISABLE_OPTIMIZATION + #include "compiled_base.dispatch.h" +#endif + +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void simd_compiled_base_pack_inner, +(const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order)) + +#endif \ No newline at end of file From d0a4f453b4c514d6778e8b83177c96ac3882b95b Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 25 Aug 2020 14:19:31 +0800 Subject: [PATCH 0033/1270] adjust the header --- numpy/core/src/multiarray/compiled_base.c | 17 +++++++++++++++-- .../src/multiarray/compiled_base.dispatch.c | 3 +++ numpy/core/src/multiarray/compiled_base.h | 15 --------------- .../src/multiarray/compiled_base_pack_inner.h | 8 ++++---- 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 6b509d887ef6..3d6472c96b74 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1,5 +1,18 @@ -#include "compiled_base.h" - +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#include +#include +#include + +#define _MULTIARRAYMODULE +#include "numpy/arrayobject.h" +#include "numpy/npy_3kcompat.h" +#include "numpy/npy_math.h" +#include "npy_config.h" +#include "templ_common.h" /* for npy_mul_with_overflow_intp */ +#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ +#include "alloc.h" +#include "ctors.h" +#include "common.h" /* * Returns -1 if the array is monotonic decreasing, * +1 if the array is monotonic increasing, diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c index 5dd84737d366..8a9ae428bbf9 100644 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ b/numpy/core/src/multiarray/compiled_base.dispatch.c @@ -4,6 +4,9 @@ * VSX VSX2 * NEON ASIMDDP */ +#include +#include "simd/simd.h" +#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ #include "compiled_base_pack_inner.h" /* diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h index 6b383967a2f4..082139910717 100644 --- a/numpy/core/src/multiarray/compiled_base.h +++ b/numpy/core/src/multiarray/compiled_base.h @@ -1,21 +1,6 @@ #ifndef _NPY_PRIVATE__COMPILED_BASE_H_ #define _NPY_PRIVATE__COMPILED_BASE_H_ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include -#include -#include - -#define _MULTIARRAYMODULE -#include "numpy/arrayobject.h" -#include "numpy/npy_3kcompat.h" #include -#include "numpy/npy_math.h" -#include "npy_config.h" -#include "templ_common.h" /* for npy_mul_with_overflow_intp */ -#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ -#include "alloc.h" -#include "ctors.h" -#include "common.h" NPY_NO_EXPORT PyObject * arr_insert(PyObject *, PyObject *, PyObject *); diff --git a/numpy/core/src/multiarray/compiled_base_pack_inner.h b/numpy/core/src/multiarray/compiled_base_pack_inner.h index ff4588f04cf8..2cc036ac73c9 100644 --- a/numpy/core/src/multiarray/compiled_base_pack_inner.h +++ b/numpy/core/src/multiarray/compiled_base_pack_inner.h @@ -1,8 +1,8 @@ #ifndef _NPY_PRIVATE__COMPILED_BASE_PACK_INNER_H_ #define _NPY_PRIVATE__COMPILED_BASE_PACK_INNER_H_ -#include -#include "simd/simd.h" -#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ +#include +#include "npy_cpu_dispatch.h" + #ifndef NPY_DISABLE_OPTIMIZATION #include "compiled_base.dispatch.h" #endif @@ -10,4 +10,4 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void simd_compiled_base_pack_inner, (const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order)) -#endif \ No newline at end of file +#endif From d31cc73b3e8772550d551931a8b0a5d8f3aae9c9 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Wed, 2 Sep 2020 12:40:27 +0100 Subject: [PATCH 0034/1270] Update numpy/lib/function_base.py --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 04b051973b57..ebfba1af19fd 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1488,7 +1488,7 @@ def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): @array_function_dispatch(_unwrap_dispatcher) def unwrap(p, discont=None, axis=-1, *, period=2*pi): - """ + r""" Unwrap by taking the complement of large deltas with respect to the period. This unwraps a signal `p` by changing elements which have an absolute From 974603643e54a276e7485e9e1fd5080780713035 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 10 Sep 2020 17:43:53 +0800 Subject: [PATCH 0035/1270] add blank line between include and comment --- numpy/core/src/multiarray/compiled_base.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 3d6472c96b74..36101fb81229 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -13,6 +13,7 @@ #include "alloc.h" #include "ctors.h" #include "common.h" + /* * Returns -1 if the array is monotonic decreasing, * +1 if the array is monotonic increasing, From 1d0ca535fd7808b391b30148e562ff8977c2ad7d Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 10 Sep 2020 19:32:13 +0800 Subject: [PATCH 0036/1270] add coma --- numpy/core/src/multiarray/compiled_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index db936d9e11c4..7ba9db5dce1c 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1513,7 +1513,7 @@ pack_inner(const char *inptr, #include "compiled_base.dispatch.h" #endif NPY_CPU_DISPATCH_CALL(simd_compiled_base_pack_inner, - (inptr, element_size, n_in, in_stride, outptr, n_out, out_stride, order)) + (inptr, element_size, n_in, in_stride, outptr, n_out, out_stride, order)); } static PyObject * From dbb347295ac7414818ab5068abe31dc19a63563c Mon Sep 17 00:00:00 2001 From: Max Kellermeier Date: Fri, 11 Sep 2020 17:57:30 +0200 Subject: [PATCH 0037/1270] fix CI fails by blank lines --- numpy/lib/function_base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index ebfba1af19fd..d6dc7e5b95a4 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1516,14 +1516,17 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): ------- out : ndarray Output array. + See Also -------- rad2deg, deg2rad + Notes ----- If the discontinuity in `p` is smaller than ``period/2``, but larger than `discont`, no unwrapping is done because taking the complement would only make the discontinuity larger. + Examples -------- >>> phase = np.linspace(0, np.pi, num=5) From 35f7783a8566c2b0db917e44c5e32c932545cf17 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 14 Sep 2020 10:02:53 +0800 Subject: [PATCH 0038/1270] Add NPY_HAVE_AVX512BW_MASK --- numpy/core/src/common/simd/avx512/operators.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/common/simd/avx512/operators.h b/numpy/core/src/common/simd/avx512/operators.h index 72e84608d320..05978a6cf3b5 100644 --- a/numpy/core/src/common/simd/avx512/operators.h +++ b/numpy/core/src/common/simd/avx512/operators.h @@ -260,8 +260,10 @@ // AVX512F & AVX512BW NPY_FINLINE npy_uint64 npyv_movemask_b8(npyv_b8 mask) { -#ifdef NPY_HAVE_AVX512BW +#ifdef NPY_HAVE_AVX512BW_MASK return (npy_uint64)_cvtmask64_u64(mask); +#elif NPY_HAVE_AVX512BW + return (npy_uint64)mask; #else int mask_lo = _mm256_movemask_epi8(_mm512_castsi512_si256(mask)); int mask_hi = _mm256_movemask_epi8(_mm512_extracti64x4_epi64(mask, 1)); From 94b2d8602b01ef86b979475f16fe17cb9a49decc Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 14 Sep 2020 10:52:21 +0800 Subject: [PATCH 0039/1270] abandon dispatch way because packbits is not ufunc --- numpy/core/setup.py | 1 - numpy/core/src/multiarray/compiled_base.c | 89 +++++++++++++++-- .../src/multiarray/compiled_base.dispatch.c | 97 ------------------- .../src/multiarray/compiled_base_pack_inner.h | 13 --- 4 files changed, 83 insertions(+), 117 deletions(-) delete mode 100644 numpy/core/src/multiarray/compiled_base.dispatch.c delete mode 100644 numpy/core/src/multiarray/compiled_base_pack_inner.h diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 39a787897122..92dcacede412 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -821,7 +821,6 @@ def get_mathlib_info(*args): join('src', 'multiarray', 'buffer.c'), join('src', 'multiarray', 'calculation.c'), join('src', 'multiarray', 'compiled_base.c'), - join('src', 'multiarray', 'compiled_base.dispatch.c'), join('src', 'multiarray', 'common.c'), join('src', 'multiarray', 'convert.c'), join('src', 'multiarray', 'convert_datatype.c'), diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 7ba9db5dce1c..70550308f8a2 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -13,7 +13,7 @@ #include "alloc.h" #include "ctors.h" #include "common.h" - +#include "simd/simd.h" /* * Returns -1 if the array is monotonic decreasing, * +1 if the array is monotonic increasing, @@ -1499,6 +1499,11 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) Py_RETURN_NONE; } +/* + * This function packs boolean values in the input array into the bits of a + * byte array. Truth values are determined as usual: 0 is false, everything + * else is true. + */ static NPY_INLINE void pack_inner(const char *inptr, npy_intp element_size, /* in bytes */ @@ -1509,11 +1514,83 @@ pack_inner(const char *inptr, npy_intp out_stride, char order) { - #ifndef NPY_DISABLE_OPTIMIZATION - #include "compiled_base.dispatch.h" - #endif - NPY_CPU_DISPATCH_CALL(simd_compiled_base_pack_inner, - (inptr, element_size, n_in, in_stride, outptr, n_out, out_stride, order)); + /* + * Loop through the elements of inptr. + * Determine whether or not it is nonzero. + * Yes: set corresponding bit (and adjust build value) + * No: move on + * Every 8th value, set the value of build and increment the outptr + */ + npy_intp index = 0; + int remain = n_in % 8; /* uneven bits */ + +#if NPY_SIMD + if (in_stride == 1 && element_size == 1 && n_out > 2) { + npyv_u8 v_zero = npyv_zero_u8(); + /* don't handle non-full 8-byte remainder */ + npy_intp vn_out = n_out - (remain ? 1 : 0); + const int vstep = npyv_nlanes_u64; + vn_out -= (vn_out & (vstep - 1)); + for (index = 0; index < vn_out; index += vstep) { + // Maximum paraller abillity: handle eight 64bits at one time + npy_uint64 a[8]; + for (int i = 0; i < vstep; i++) { + a[i] = *(npy_uint64*)(inptr + 8 * i); + } + if (order == 'b') { + for (int i = 0; i < vstep; i++) { + a[i] = npy_bswap8(a[i]); + } + } + npyv_u8 v = npyv_reinterpret_u8_u64(npyv_set_u64(a[0], a[1], a[2], a[3], + a[4], a[5], a[6], a[7])); + npyv_b8 bmask = npyv_cmpneq_u8(v, v_zero); + npy_uint64 r = npyv_movemask_b8(bmask); + /* store result */ + for (int i = 0; i < vstep; i++) { + memcpy(outptr, (char*)&r + i, 1); + outptr += out_stride; + } + inptr += 8 * vstep; + } + } +#endif + + if (remain == 0) { /* assumes n_in > 0 */ + remain = 8; + } + /* Don't reset index. Just handle remainder of above block */ + for (; index < n_out; index++) { + unsigned char build = 0; + int maxi = (index == n_out - 1) ? remain : 8; + if (order == 'b') { + for (int i = 0; i < maxi; i++) { + build <<= 1; + for (npy_intp j = 0; j < element_size; j++) { + build |= (inptr[j] != 0); + } + inptr += in_stride; + } + if (index == n_out - 1) { + build <<= 8 - remain; + } + } + else + { + for (int i = 0; i < maxi; i++) { + build >>= 1; + for (npy_intp j = 0; j < element_size; j++) { + build |= (inptr[j] != 0) ? 128 : 0; + } + inptr += in_stride; + } + if (index == n_out - 1) { + build >>= 8 - remain; + } + } + *outptr = (char)build; + outptr += out_stride; + } } static PyObject * diff --git a/numpy/core/src/multiarray/compiled_base.dispatch.c b/numpy/core/src/multiarray/compiled_base.dispatch.c deleted file mode 100644 index 8a9ae428bbf9..000000000000 --- a/numpy/core/src/multiarray/compiled_base.dispatch.c +++ /dev/null @@ -1,97 +0,0 @@ -/** - * @targets $maxopt baseline - * SSE2 AVX2 AVX512F - * VSX VSX2 - * NEON ASIMDDP - */ -#include -#include "simd/simd.h" -#include "lowlevel_strided_loops.h" /* for npy_bswap8 */ -#include "compiled_base_pack_inner.h" - -/* - * This function packs boolean values in the input array into the bits of a - * byte array. Truth values are determined as usual: 0 is false, everything - * else is true. - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(simd_compiled_base_pack_inner) -(const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order) -{ - /* - * Loop through the elements of inptr. - * Determine whether or not it is nonzero. - * Yes: set corresponding bit (and adjust build value) - * No: move on - * Every 8th value, set the value of build and increment the outptr - */ - npy_intp index = 0; - int remain = n_in % 8; /* uneven bits */ - -#if NPY_SIMD - if (in_stride == 1 && element_size == 1 && n_out > 2) { - npyv_u8 v_zero = npyv_zero_u8(); - /* don't handle non-full 8-byte remainder */ - npy_intp vn_out = n_out - (remain ? 1 : 0); - const int vstep = npyv_nlanes_u64; - vn_out -= (vn_out & (vstep - 1)); - for (index = 0; index < vn_out; index += vstep) { - // Maximum paraller abillity: handle eight 64bits at one time - npy_uint64 a[8]; - for (int i = 0; i < vstep; i++) { - a[i] = *(npy_uint64*)(inptr + 8 * i); - } - if (order == 'b') { - for (int i = 0; i < vstep; i++) { - a[i] = npy_bswap8(a[i]); - } - } - npyv_u8 v = npyv_reinterpret_u8_u64(npyv_set_u64(a[0], a[1], a[2], a[3], - a[4], a[5], a[6], a[7])); - npyv_b8 bmask = npyv_cmpneq_u8(v, v_zero); - npy_uint64 r = npyv_movemask_b8(bmask); - /* store result */ - for (int i = 0; i < vstep; i++) { - memcpy(outptr, (char*)&r + i, 1); - outptr += out_stride; - } - inptr += 8 * vstep; - } - } -#endif - - if (remain == 0) { /* assumes n_in > 0 */ - remain = 8; - } - /* Don't reset index. Just handle remainder of above block */ - for (; index < n_out; index++) { - unsigned char build = 0; - int maxi = (index == n_out - 1) ? remain : 8; - if (order == 'b') { - for (int i = 0; i < maxi; i++) { - build <<= 1; - for (npy_intp j = 0; j < element_size; j++) { - build |= (inptr[j] != 0); - } - inptr += in_stride; - } - if (index == n_out - 1) { - build <<= 8 - remain; - } - } - else - { - for (int i = 0; i < maxi; i++) { - build >>= 1; - for (npy_intp j = 0; j < element_size; j++) { - build |= (inptr[j] != 0) ? 128 : 0; - } - inptr += in_stride; - } - if (index == n_out - 1) { - build >>= 8 - remain; - } - } - *outptr = (char)build; - outptr += out_stride; - } -} diff --git a/numpy/core/src/multiarray/compiled_base_pack_inner.h b/numpy/core/src/multiarray/compiled_base_pack_inner.h deleted file mode 100644 index 2cc036ac73c9..000000000000 --- a/numpy/core/src/multiarray/compiled_base_pack_inner.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _NPY_PRIVATE__COMPILED_BASE_PACK_INNER_H_ -#define _NPY_PRIVATE__COMPILED_BASE_PACK_INNER_H_ -#include -#include "npy_cpu_dispatch.h" - -#ifndef NPY_DISABLE_OPTIMIZATION - #include "compiled_base.dispatch.h" -#endif - -NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void simd_compiled_base_pack_inner, -(const char *inptr, npy_intp element_size, npy_intp n_in, npy_intp in_stride, char *outptr, npy_intp n_out, npy_intp out_stride, char order)) - -#endif From e6bea5f79c127a53a9252d8d3f51108815c60267 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Mon, 14 Sep 2020 10:16:37 +0100 Subject: [PATCH 0040/1270] Apply suggestions from code review Cleanup whitespace --- numpy/lib/function_base.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index d6dc7e5b95a4..ceb6148c5b03 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1526,7 +1526,6 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): If the discontinuity in `p` is smaller than ``period/2``, but larger than `discont`, no unwrapping is done because taking the complement would only make the discontinuity larger. - Examples -------- >>> phase = np.linspace(0, np.pi, num=5) @@ -1544,8 +1543,8 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 >>> unwrap(phase_deg, period=360) array([-180., -140., -100., -60., -20., 20., 60., 100., 140., - 180., 220., 260., 300., 340., 380., 420., 460., 500., - 540.]) + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) """ p = asarray(p) nd = p.ndim From ba6fc3ae7594f599ba3f561efb2697427da8a3f5 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Mon, 14 Sep 2020 10:17:03 +0100 Subject: [PATCH 0041/1270] Cleanup whitespace --- numpy/lib/function_base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index ceb6148c5b03..87929d696b9a 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1511,7 +1511,6 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): Axis along which unwrap will operate, default is the last axis. period: float, optional Size of the range over which the input wraps. By default, it is ``2 pi``. - Returns ------- out : ndarray From 985c2a64b46885ef22942df8350517a6163dfd9a Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Mon, 14 Sep 2020 10:17:34 +0100 Subject: [PATCH 0042/1270] Add missing newline --- numpy/lib/function_base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 87929d696b9a..d1afdf96a059 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1525,6 +1525,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): If the discontinuity in `p` is smaller than ``period/2``, but larger than `discont`, no unwrapping is done because taking the complement would only make the discontinuity larger. + Examples -------- >>> phase = np.linspace(0, np.pi, num=5) From d7322c71f1dcd4b8defea12dba6bf35f28aa2b56 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Mon, 14 Sep 2020 10:18:06 +0100 Subject: [PATCH 0043/1270] Cleanup whitespace --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index d1afdf96a059..dde0217cd587 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1498,7 +1498,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): For the default case where `period` is :math:`2\pi` and is `discont` is :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences are never greater than :math:`\pi` by adding :math:`2k\pi` for some integer :math:`k`. - + Parameters ---------- p : array_like From e54a06ca81dd93cfda4570ae03740711092e08d3 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Mon, 14 Sep 2020 10:18:55 +0100 Subject: [PATCH 0044/1270] Add missing whitespace --- numpy/lib/tests/test_function_base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 2ebde9aec6d9..f881ddf00792 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1775,6 +1775,7 @@ def test_period(self): assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) assert sm_discont.dtype == wrap_uneven.dtype + class TestFilterwindows: def test_hanning(self): From 42f57cb343f192c956a0553d801df1ff04656b06 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 17 Sep 2020 10:05:39 +0800 Subject: [PATCH 0045/1270] fix typo --- numpy/core/src/multiarray/compiled_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 70550308f8a2..e11e19d9572c 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1532,7 +1532,7 @@ pack_inner(const char *inptr, const int vstep = npyv_nlanes_u64; vn_out -= (vn_out & (vstep - 1)); for (index = 0; index < vn_out; index += vstep) { - // Maximum paraller abillity: handle eight 64bits at one time + // Maximum parallel ability: handle eight 64-bit integers at one time npy_uint64 a[8]; for (int i = 0; i < vstep; i++) { a[i] = *(npy_uint64*)(inptr + 8 * i); From c480ffba021f83287f23dd386f011c48c37c53b5 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 18 Sep 2020 10:12:20 +0800 Subject: [PATCH 0046/1270] test avx2 --- numpy/core/src/common/simd/avx2/avx2.h | 2 +- numpy/core/src/common/simd/simd.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/avx2.h b/numpy/core/src/common/simd/avx2/avx2.h index c99d628ee408..27a0fddda5a4 100644 --- a/numpy/core/src/common/simd/avx2/avx2.h +++ b/numpy/core/src/common/simd/avx2/avx2.h @@ -1,7 +1,7 @@ #ifndef _NPY_SIMD_H_ #error "Not a standalone header" #endif - +#include #define NPY_SIMD 256 #define NPY_SIMD_WIDTH 32 #define NPY_SIMD_F64 1 diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 2f39c8427b5d..03a83741c8c6 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -10,7 +10,7 @@ #include "numpy/npy_common.h" #include "npy_cpu_dispatch.h" #include "simd_utils.h" - +#define NPY_HAVE_AVX2 #ifdef __cplusplus extern "C" { #endif From 888634543fc591f70abfb62e350afbac67b66f6d Mon Sep 17 00:00:00 2001 From: takanori-pskq Date: Sat, 10 Oct 2020 15:00:04 +0900 Subject: [PATCH 0047/1270] DOC: Add module template --- doc/source/_templates/autosummary/module.rst | 38 ++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 doc/source/_templates/autosummary/module.rst diff --git a/doc/source/_templates/autosummary/module.rst b/doc/source/_templates/autosummary/module.rst new file mode 100644 index 000000000000..c2b9e45fb2ae --- /dev/null +++ b/doc/source/_templates/autosummary/module.rst @@ -0,0 +1,38 @@ +{% extends "!autosummary/module.rst" %} + +{% block attributes %} +{% if attributes %} + .. rubric:: Module Attributes + + .. autosummary:: + :toctree: + {% for item in attributes %} + {{ item }} + {%- endfor %} +{% endif %} +{% endblock %} + +{% block functions %} +{% if functions %} + .. rubric:: Functions + + .. autosummary:: + :toctree: + {% for item in functions %} + {{ item }} + {%- endfor %} +{% endif %} +{% endblock %} + +{% block classes %} +{% if classes %} + .. rubric:: Classes + + .. autosummary:: + :toctree: + {% for item in classes %} + {{ item }} + {%- endfor %} +{% endif %} +{% endblock %} + From 05debfd0faf61552152d440676e0fb67041bf881 Mon Sep 17 00:00:00 2001 From: Takanori H Date: Thu, 29 Oct 2020 21:57:22 +0900 Subject: [PATCH 0048/1270] Update doc/source/_templates/autosummary/module.rst Co-authored-by: Eric Wieser --- doc/source/_templates/autosummary/module.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/_templates/autosummary/module.rst b/doc/source/_templates/autosummary/module.rst index c2b9e45fb2ae..e1f428d6598e 100644 --- a/doc/source/_templates/autosummary/module.rst +++ b/doc/source/_templates/autosummary/module.rst @@ -1,5 +1,8 @@ {% extends "!autosummary/module.rst" %} +{# This file is almost the same as the default, but adds :toctree: to the autosummary directives. + The original can be found at `sphinx/ext/autosummary/templates/autosummary/module.rst`. #} + {% block attributes %} {% if attributes %} .. rubric:: Module Attributes @@ -35,4 +38,3 @@ {%- endfor %} {% endif %} {% endblock %} - From 6c8aa42ce668aa57ddc3347df35998fc32e7240a Mon Sep 17 00:00:00 2001 From: takanori-pskq Date: Fri, 6 Nov 2020 01:40:02 +0000 Subject: [PATCH 0049/1270] DOC: Fix for buildling with sphinx 3 --- doc/source/conf.py | 10 +- doc/source/reference/arrays.scalars.rst | 99 ++++++++----- doc/source/reference/c-api/array.rst | 90 ++++++------ doc/source/reference/c-api/coremath.rst | 10 +- doc/source/reference/c-api/dtype.rst | 134 +++++++++++------- doc/source/reference/c-api/iterator.rst | 4 +- .../reference/c-api/types-and-structures.rst | 22 +-- .../random/bit_generators/mt19937.rst | 3 +- .../reference/random/bit_generators/pcg64.rst | 3 +- .../random/bit_generators/philox.rst | 3 +- .../reference/random/bit_generators/sfc64.rst | 3 +- doc/source/reference/random/generator.rst | 3 +- doc/source/reference/random/legacy.rst | 3 +- doc/source/user/c-info.beyond-basics.rst | 6 +- doc/source/user/c-info.how-to-extend.rst | 10 +- doc_requirements.txt | 2 +- 16 files changed, 236 insertions(+), 169 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 381a01612401..e76f60063408 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -4,7 +4,7 @@ import sys # Minimum version, enforced by sphinx -needs_sphinx = '2.2.0' +needs_sphinx = '3.2.0' # This is a nasty hack to use platform-agnostic names for types in the @@ -146,6 +146,14 @@ def setup(app): app.add_config_value('python_version_major', str(sys.version_info.major), 'env') app.add_lexer('NumPyC', NumPyLexer) +# While these objects do have type `module`, the names are aliases for modules +# elsewhere. Sphinx does not support referring to modules by an aliases name, +# so we make the alias look like a "real" module for it. +# If we deemed it desirable, we could in future make these real modules, which +# would make `from numpy.char import split` work. +sys.modules['numpy.char'] = numpy.char +sys.modules['numpy.testing.dec'] = numpy.testing.dec + # ----------------------------------------------------------------------------- # HTML output # ----------------------------------------------------------------------------- diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 4b5da2e13a5c..29438f70f4c9 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -94,112 +94,141 @@ Python Boolean scalar. .. tip:: The default data type in NumPy is :class:`float_`. .. autoclass:: numpy.generic - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.number - :exclude-members: + :members: __init__ + :exclude-members: __init__ Integer types ~~~~~~~~~~~~~ .. autoclass:: numpy.integer - :exclude-members: + :members: __init__ + :exclude-members: __init__ Signed integer types ++++++++++++++++++++ .. autoclass:: numpy.signedinteger - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.byte - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.short - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.intc - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.int_ - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.longlong - :exclude-members: + :members: __init__ + :exclude-members: __init__ Unsigned integer types ++++++++++++++++++++++ .. autoclass:: numpy.unsignedinteger - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.ubyte - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.ushort - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.uintc - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.uint - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.ulonglong - :exclude-members: + :members: __init__ + :exclude-members: __init__ Inexact types ~~~~~~~~~~~~~ .. autoclass:: numpy.inexact - :exclude-members: + :members: __init__ + :exclude-members: __init__ Floating-point types ++++++++++++++++++++ .. autoclass:: numpy.floating - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.half - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.single - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.double - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.longdouble - :exclude-members: + :members: __init__ + :exclude-members: __init__ Complex floating-point types ++++++++++++++++++++++++++++ .. autoclass:: numpy.complexfloating - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.csingle - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.cdouble - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.clongdouble - :exclude-members: + :members: __init__ + :exclude-members: __init__ Other types ~~~~~~~~~~~ .. autoclass:: numpy.bool_ - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.datetime64 - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.timedelta64 - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.object_ - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. note:: @@ -222,16 +251,20 @@ arrays. (In the character codes ``#`` is an integer denoting how many elements the data type consists of.) .. autoclass:: numpy.flexible - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.bytes_ - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.str_ - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. autoclass:: numpy.void - :exclude-members: + :members: __init__ + :exclude-members: __init__ .. warning:: diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 3aa541b79e64..1673f1d6b1f7 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -22,8 +22,8 @@ Array structure and data access These macros access the :c:type:`PyArrayObject` structure members and are defined in ``ndarraytypes.h``. The input argument, *arr*, can be any -:c:type:`PyObject *` that is directly interpretable as a -:c:type:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type` +:c:expr:`PyObject *` that is directly interpretable as a +:c:expr:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type` and its sub-types). .. c:function:: int PyArray_NDIM(PyArrayObject *arr) @@ -825,7 +825,7 @@ General check of Python Type Evaluates true if *op* is an instance of (a subclass of) :c:data:`PyArray_Type` and has 0 dimensions. -.. c:function:: PyArray_IsScalar(op, cls) +.. c:macro:: PyArray_IsScalar(op, cls) Evaluates true if *op* is an instance of ``Py{cls}ArrType_Type``. @@ -864,8 +864,8 @@ Data-type checking For the typenum macros, the argument is an integer representing an enumerated array data type. For the array type checking macros the -argument must be a :c:type:`PyObject *` that can be directly interpreted as a -:c:type:`PyArrayObject *`. +argument must be a :c:expr:`PyObject *` that can be directly interpreted as a +:c:expr:`PyArrayObject *`. .. c:function:: int PyTypeNum_ISUNSIGNED(int num) @@ -1022,7 +1022,7 @@ argument must be a :c:type:`PyObject *` that can be directly interpret .. c:function:: int PyArray_EquivByteorders(int b1, int b2) - True if byteorder characters ( :c:data:`NPY_LITTLE`, + True if byteorder characters *b1* and *b2* ( :c:data:`NPY_LITTLE`, :c:data:`NPY_BIG`, :c:data:`NPY_NATIVE`, :c:data:`NPY_IGNORE` ) are either equal or equivalent as to their specification of a native byte order. Thus, on a little-endian machine :c:data:`NPY_LITTLE` @@ -2781,14 +2781,14 @@ Data-type descriptors Data-type objects must be reference counted so be aware of the action on the data-type reference of different C-API calls. The standard rule is that when a data-type object is returned it is a - new reference. Functions that take :c:type:`PyArray_Descr *` objects and + new reference. Functions that take :c:expr:`PyArray_Descr *` objects and return arrays steal references to the data-type their inputs unless otherwise noted. Therefore, you must own a reference to any data-type object used as input to such a function. .. c:function:: int PyArray_DescrCheck(PyObject* obj) - Evaluates as true if *obj* is a data-type object ( :c:type:`PyArray_Descr *` ). + Evaluates as true if *obj* is a data-type object ( :c:expr:`PyArray_Descr *` ). .. c:function:: PyArray_Descr* PyArray_DescrNew(PyArray_Descr* obj) @@ -3485,10 +3485,6 @@ Miscellaneous Macros Evaluates as True if arrays *a1* and *a2* have the same shape. -.. c:var:: a - -.. c:var:: b - .. c:macro:: PyArray_MAX(a,b) Returns the maximum of *a* and *b*. If (*a*) or (*b*) are @@ -3547,22 +3543,22 @@ Miscellaneous Macros Enumerated Types ^^^^^^^^^^^^^^^^ -.. c:type:: NPY_SORTKIND +.. c:enum:: NPY_SORTKIND A special variable-type which can take on different values to indicate the sorting algorithm being used. - .. c:var:: NPY_QUICKSORT + .. c:enumerator:: NPY_QUICKSORT - .. c:var:: NPY_HEAPSORT + .. c:enumerator:: NPY_HEAPSORT - .. c:var:: NPY_MERGESORT + .. c:enumerator:: NPY_MERGESORT - .. c:var:: NPY_STABLESORT + .. c:enumerator:: NPY_STABLESORT Used as an alias of :c:data:`NPY_MERGESORT` and vica versa. - .. c:var:: NPY_NSORTS + .. c:enumerator:: NPY_NSORTS Defined to be the number of sorts. It is fixed at three by the need for backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and @@ -3570,90 +3566,90 @@ Enumerated Types of several stable sorting algorithms depending on the data type. -.. c:type:: NPY_SCALARKIND +.. c:enum:: NPY_SCALARKIND A special variable type indicating the number of "kinds" of scalars distinguished in determining scalar-coercion rules. This variable can take on the values: - .. c:var:: NPY_NOSCALAR + .. c:enumerator:: NPY_NOSCALAR - .. c:var:: NPY_BOOL_SCALAR + .. c:enumerator:: NPY_BOOL_SCALAR - .. c:var:: NPY_INTPOS_SCALAR + .. c:enumerator:: NPY_INTPOS_SCALAR - .. c:var:: NPY_INTNEG_SCALAR + .. c:enumerator:: NPY_INTNEG_SCALAR - .. c:var:: NPY_FLOAT_SCALAR + .. c:enumerator:: NPY_FLOAT_SCALAR - .. c:var:: NPY_COMPLEX_SCALAR + .. c:enumerator:: NPY_COMPLEX_SCALAR - .. c:var:: NPY_OBJECT_SCALAR + .. c:enumerator:: NPY_OBJECT_SCALAR - .. c:var:: NPY_NSCALARKINDS + .. c:enumerator:: NPY_NSCALARKINDS Defined to be the number of scalar kinds (not including :c:data:`NPY_NOSCALAR`). -.. c:type:: NPY_ORDER +.. c:enum:: NPY_ORDER An enumeration type indicating the element order that an array should be interpreted in. When a brand new array is created, generally only **NPY_CORDER** and **NPY_FORTRANORDER** are used, whereas when one or more inputs are provided, the order can be based on them. - .. c:var:: NPY_ANYORDER + .. c:enumerator:: NPY_ANYORDER Fortran order if all the inputs are Fortran, C otherwise. - .. c:var:: NPY_CORDER + .. c:enumerator:: NPY_CORDER C order. - .. c:var:: NPY_FORTRANORDER + .. c:enumerator:: NPY_FORTRANORDER Fortran order. - .. c:var:: NPY_KEEPORDER + .. c:enumerator:: NPY_KEEPORDER An order as close to the order of the inputs as possible, even if the input is in neither C nor Fortran order. -.. c:type:: NPY_CLIPMODE +.. c:enum:: NPY_CLIPMODE A variable type indicating the kind of clipping that should be applied in certain functions. - .. c:var:: NPY_RAISE + .. c:enumerator:: NPY_RAISE The default for most operations, raises an exception if an index is out of bounds. - .. c:var:: NPY_CLIP + .. c:enumerator:: NPY_CLIP Clips an index to the valid range if it is out of bounds. - .. c:var:: NPY_WRAP + .. c:enumerator:: NPY_WRAP Wraps an index to the valid range if it is out of bounds. -.. c:type:: NPY_SEARCHSIDE +.. c:enum:: NPY_SEARCHSIDE A variable type indicating whether the index returned should be that of the first suitable location (if :c:data:`NPY_SEARCHLEFT`) or of the last (if :c:data:`NPY_SEARCHRIGHT`). - .. c:var:: NPY_SEARCHLEFT + .. c:enumerator:: NPY_SEARCHLEFT - .. c:var:: NPY_SEARCHRIGHT + .. c:enumerator:: NPY_SEARCHRIGHT -.. c:type:: NPY_SELECTKIND +.. c:enum:: NPY_SELECTKIND A variable type indicating the selection algorithm being used. - .. c:var:: NPY_INTROSELECT + .. c:enumerator:: NPY_INTROSELECT -.. c:type:: NPY_CASTING +.. c:enum:: NPY_CASTING .. versionadded:: 1.6 @@ -3661,25 +3657,25 @@ Enumerated Types be. This is used by the iterator added in NumPy 1.6, and is intended to be used more broadly in a future version. - .. c:var:: NPY_NO_CASTING + .. c:enumerator:: NPY_NO_CASTING Only allow identical types. - .. c:var:: NPY_EQUIV_CASTING + .. c:enumerator:: NPY_EQUIV_CASTING Allow identical and casts involving byte swapping. - .. c:var:: NPY_SAFE_CASTING + .. c:enumerator:: NPY_SAFE_CASTING Only allow casts which will not cause values to be rounded, truncated, or otherwise changed. - .. c:var:: NPY_SAME_KIND_CASTING + .. c:enumerator:: NPY_SAME_KIND_CASTING Allow any safe casts, and casts between types of the same kind. For example, float64 -> float32 is permitted with this rule. - .. c:var:: NPY_UNSAFE_CASTING + .. c:enumerator:: NPY_UNSAFE_CASTING Allow any cast, no matter what kind of data loss may occur. diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index 338c584a1024..cec83b150d21 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -46,30 +46,30 @@ Floating point classification corresponding single and extension precision macro are available with the suffix F and L. -.. c:function:: int npy_isnan(x) +.. c:macro:: npy_isnan(x) This is a macro, and is equivalent to C99 isnan: works for single, double and extended precision, and return a non 0 value is x is a NaN. -.. c:function:: int npy_isfinite(x) +.. c:macro:: npy_isfinite(x) This is a macro, and is equivalent to C99 isfinite: works for single, double and extended precision, and return a non 0 value is x is neither a NaN nor an infinity. -.. c:function:: int npy_isinf(x) +.. c:macro:: npy_isinf(x) This is a macro, and is equivalent to C99 isinf: works for single, double and extended precision, and return a non 0 value is x is infinite (positive and negative). -.. c:function:: int npy_signbit(x) +.. c:macro:: npy_signbit(x) This is a macro, and is equivalent to C99 signbit: works for single, double and extended precision, and return a non 0 value is x has the signbit set (that is the number is negative). -.. c:function:: double npy_copysign(double x, double y) +.. c:macro:: npy_copysign(x, y) This is a function equivalent to C99 copysign: return x with the same sign as y. Works for any value, including inf and nan. Single and extended diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index a1a53cdb600b..382e45dc0aa7 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -25,157 +25,157 @@ select the precision desired. Enumerated Types ---------------- -.. c:var:: NPY_TYPES +.. c:enumerator:: NPY_TYPES There is a list of enumerated types defined providing the basic 24 data types plus some useful generic names. Whenever the code requires a type number, one of these enumerated types is requested. The types are all called ``NPY_{NAME}``: -.. c:var:: NPY_BOOL +.. c:enumerator:: NPY_BOOL The enumeration value for the boolean type, stored as one byte. It may only be set to the values 0 and 1. -.. c:var:: NPY_BYTE -.. c:var:: NPY_INT8 +.. c:enumerator:: NPY_BYTE +.. c:enumerator:: NPY_INT8 The enumeration value for an 8-bit/1-byte signed integer. -.. c:var:: NPY_SHORT -.. c:var:: NPY_INT16 +.. c:enumerator:: NPY_SHORT +.. c:enumerator:: NPY_INT16 The enumeration value for a 16-bit/2-byte signed integer. -.. c:var:: NPY_INT -.. c:var:: NPY_INT32 +.. c:enumerator:: NPY_INT +.. c:enumerator:: NPY_INT32 The enumeration value for a 32-bit/4-byte signed integer. -.. c:var:: NPY_LONG +.. c:enumerator:: NPY_LONG Equivalent to either NPY_INT or NPY_LONGLONG, depending on the platform. -.. c:var:: NPY_LONGLONG -.. c:var:: NPY_INT64 +.. c:enumerator:: NPY_LONGLONG +.. c:enumerator:: NPY_INT64 The enumeration value for a 64-bit/8-byte signed integer. -.. c:var:: NPY_UBYTE -.. c:var:: NPY_UINT8 +.. c:enumerator:: NPY_UBYTE +.. c:enumerator:: NPY_UINT8 The enumeration value for an 8-bit/1-byte unsigned integer. -.. c:var:: NPY_USHORT -.. c:var:: NPY_UINT16 +.. c:enumerator:: NPY_USHORT +.. c:enumerator:: NPY_UINT16 The enumeration value for a 16-bit/2-byte unsigned integer. -.. c:var:: NPY_UINT -.. c:var:: NPY_UINT32 +.. c:enumerator:: NPY_UINT +.. c:enumerator:: NPY_UINT32 The enumeration value for a 32-bit/4-byte unsigned integer. -.. c:var:: NPY_ULONG +.. c:enumerator:: NPY_ULONG Equivalent to either NPY_UINT or NPY_ULONGLONG, depending on the platform. -.. c:var:: NPY_ULONGLONG -.. c:var:: NPY_UINT64 +.. c:enumerator:: NPY_ULONGLONG +.. c:enumerator:: NPY_UINT64 The enumeration value for a 64-bit/8-byte unsigned integer. -.. c:var:: NPY_HALF -.. c:var:: NPY_FLOAT16 +.. c:enumerator:: NPY_HALF +.. c:enumerator:: NPY_FLOAT16 The enumeration value for a 16-bit/2-byte IEEE 754-2008 compatible floating point type. -.. c:var:: NPY_FLOAT -.. c:var:: NPY_FLOAT32 +.. c:enumerator:: NPY_FLOAT +.. c:enumerator:: NPY_FLOAT32 The enumeration value for a 32-bit/4-byte IEEE 754 compatible floating point type. -.. c:var:: NPY_DOUBLE -.. c:var:: NPY_FLOAT64 +.. c:enumerator:: NPY_DOUBLE +.. c:enumerator:: NPY_FLOAT64 The enumeration value for a 64-bit/8-byte IEEE 754 compatible floating point type. -.. c:var:: NPY_LONGDOUBLE +.. c:enumerator:: NPY_LONGDOUBLE The enumeration value for a platform-specific floating point type which is at least as large as NPY_DOUBLE, but larger on many platforms. -.. c:var:: NPY_CFLOAT -.. c:var:: NPY_COMPLEX64 +.. c:enumerator:: NPY_CFLOAT +.. c:enumerator:: NPY_COMPLEX64 The enumeration value for a 64-bit/8-byte complex type made up of two NPY_FLOAT values. -.. c:var:: NPY_CDOUBLE -.. c:var:: NPY_COMPLEX128 +.. c:enumerator:: NPY_CDOUBLE +.. c:enumerator:: NPY_COMPLEX128 The enumeration value for a 128-bit/16-byte complex type made up of two NPY_DOUBLE values. -.. c:var:: NPY_CLONGDOUBLE +.. c:enumerator:: NPY_CLONGDOUBLE The enumeration value for a platform-specific complex floating point type which is made up of two NPY_LONGDOUBLE values. -.. c:var:: NPY_DATETIME +.. c:enumerator:: NPY_DATETIME The enumeration value for a data type which holds dates or datetimes with a precision based on selectable date or time units. -.. c:var:: NPY_TIMEDELTA +.. c:enumerator:: NPY_TIMEDELTA The enumeration value for a data type which holds lengths of times in integers of selectable date or time units. -.. c:var:: NPY_STRING +.. c:enumerator:: NPY_STRING The enumeration value for ASCII strings of a selectable size. The strings have a fixed maximum size within a given array. -.. c:var:: NPY_UNICODE +.. c:enumerator:: NPY_UNICODE The enumeration value for UCS4 strings of a selectable size. The strings have a fixed maximum size within a given array. -.. c:var:: NPY_OBJECT +.. c:enumerator:: NPY_OBJECT The enumeration value for references to arbitrary Python objects. -.. c:var:: NPY_VOID +.. c:enumerator:: NPY_VOID Primarily used to hold struct dtypes, but can contain arbitrary binary data. Some useful aliases of the above types are -.. c:var:: NPY_INTP +.. c:enumerator:: NPY_INTP The enumeration value for a signed integer type which is the same size as a (void \*) pointer. This is the type used by all arrays of indices. -.. c:var:: NPY_UINTP +.. c:enumerator:: NPY_UINTP The enumeration value for an unsigned integer type which is the same size as a (void \*) pointer. -.. c:var:: NPY_MASK +.. c:enumerator:: NPY_MASK The enumeration value of the type used for masks, such as with the :c:data:`NPY_ITER_ARRAYMASK` iterator flag. This is equivalent to :c:data:`NPY_UINT8`. -.. c:var:: NPY_DEFAULT_TYPE +.. c:enumerator:: NPY_DEFAULT_TYPE The default type to use when no dtype is explicitly specified, for example when calling np.zero(shape). This is equivalent to @@ -297,9 +297,13 @@ Boolean Unsigned versions of the integers can be defined by pre-pending a 'u' to the front of the integer name. -.. c:type:: npy_(u)byte +.. c:type:: npy_byte - (unsigned) char + char + +.. c:type:: npy_ubyte + + unsigned char .. c:type:: npy_short @@ -309,14 +313,14 @@ to the front of the integer name. unsigned short -.. c:type:: npy_uint - - unsigned int - .. c:type:: npy_int int +.. c:type:: npy_uint + + unsigned int + .. c:type:: npy_int16 16-bit integer @@ -341,13 +345,21 @@ to the front of the integer name. 64-bit unsigned integer -.. c:type:: npy_(u)long +.. c:type:: npy_long - (unsigned) long int + long int -.. c:type:: npy_(u)longlong +.. c:type:: npy_ulong - (unsigned long long int) + unsigned long int + +.. c:type:: npy_longlong + + long long int + +.. c:type:: npy_ulonglong + + unsigned long long int .. c:type:: npy_intp @@ -367,18 +379,30 @@ to the front of the integer name. 16-bit float -.. c:type:: npy_(c)float +.. c:type:: npy_float 32-bit float -.. c:type:: npy_(c)double +.. c:type:: npy_cfloat + + 32-bit complex float + +.. c:type:: npy_double 64-bit double -.. c:type:: npy_(c)longdouble +.. c:type:: npy_cdouble + + 64-bit complex double + +.. c:type:: npy_longdouble long double +.. c:type:: npy_clongdouble + + long complex double + complex types are structures with **.real** and **.imag** members (in that order). diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst index ae96bb3fb056..add96e3b4698 100644 --- a/doc/source/reference/c-api/iterator.rst +++ b/doc/source/reference/c-api/iterator.rst @@ -370,7 +370,7 @@ Construction and Destruction arrays or structured arrays containing an object type) may be accepted and used in the iterator. If this flag is enabled, the caller must be sure to check whether - :c:func:`NpyIter_IterationNeedsAPI(iter)` is true, in which case + :c:expr:`NpyIter_IterationNeedsAPI(iter)` is true, in which case it may not release the GIL during iteration. .. c:macro:: NPY_ITER_ZEROSIZE_OK @@ -738,7 +738,7 @@ Construction and Destruction the iterator. Any cached functions or pointers from the iterator must be retrieved again! - After calling this function, :c:func:`NpyIter_HasMultiIndex(iter)` will + After calling this function, :c:expr:`NpyIter_HasMultiIndex(iter)` will return false. Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 6a9c4a9cf766..042f9714793f 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -7,7 +7,7 @@ Python Types and C-Structures Several new types are defined in the C-code. Most of these are accessible from Python, but a few are not exposed due to their limited -use. Every new Python type has an associated :c:type:`PyObject *` with an +use. Every new Python type has an associated :c:expr:`PyObject *` with an internal structure that includes a pointer to a "method table" that defines how the new object behaves in Python. When you receive a Python object into C code, you always get a pointer to a @@ -61,7 +61,7 @@ hierarchy of actual Python types. PyArray_Type and PyArrayObject ------------------------------ -.. c:var:: PyArray_Type +.. c:var:: PyTypeObject PyArray_Type The Python type of the ndarray is :c:data:`PyArray_Type`. In C, every ndarray is a pointer to a :c:type:`PyArrayObject` structure. The ob_type @@ -177,7 +177,7 @@ PyArray_Type and PyArrayObject PyArrayDescr_Type and PyArray_Descr ----------------------------------- -.. c:var:: PyArrayDescr_Type +.. c:var:: PyTypeObject PyArrayDescr_Type The :c:data:`PyArrayDescr_Type` is the built-in type of the data-type-descriptor objects used to describe how the bytes comprising @@ -612,7 +612,7 @@ PyArrayDescr_Type and PyArray_Descr Either ``NULL`` or a dictionary containing low-level casting functions for user- defined data-types. Each function is - wrapped in a :c:type:`PyCapsule *` and keyed by + wrapped in a :c:expr:`PyCapsule *` and keyed by the data-type number. .. c:member:: NPY_SCALARKIND scalarkind(PyArrayObject* arr) @@ -730,7 +730,7 @@ The :c:data:`PyArray_Type` can also be sub-typed. PyUFunc_Type and PyUFuncObject ------------------------------ -.. c:var:: PyUFunc_Type +.. c:var:: PyTypeObject PyUFunc_Type The ufunc object is implemented by creation of the :c:data:`PyUFunc_Type`. It is a very simple type that implements only @@ -982,7 +982,7 @@ PyUFunc_Type and PyUFuncObject PyArrayIter_Type and PyArrayIterObject -------------------------------------- -.. c:var:: PyArrayIter_Type +.. c:var:: PyTypeObject PyArrayIter_Type This is an iterator object that makes it easy to loop over an N-dimensional array. It is the object returned from the flat @@ -1086,13 +1086,13 @@ the internal structure of the iterator object, and merely interact with it through the use of the macros :c:func:`PyArray_ITER_NEXT` (it), :c:func:`PyArray_ITER_GOTO` (it, dest), or :c:func:`PyArray_ITER_GOTO1D` (it, index). All of these macros require the argument *it* to be a -:c:type:`PyArrayIterObject *`. +:c:expr:`PyArrayIterObject *`. PyArrayMultiIter_Type and PyArrayMultiIterObject ------------------------------------------------ -.. c:var:: PyArrayMultiIter_Type +.. c:var:: PyTypeObject PyArrayMultiIter_Type This type provides an iterator that encapsulates the concept of broadcasting. It allows :math:`N` arrays to be broadcast together @@ -1154,7 +1154,7 @@ PyArrayMultiIter_Type and PyArrayMultiIterObject PyArrayNeighborhoodIter_Type and PyArrayNeighborhoodIterObject -------------------------------------------------------------- -.. c:var:: PyArrayNeighborhoodIter_Type +.. c:var:: PyTypeObject PyArrayNeighborhoodIter_Type This is an iterator object that makes it easy to loop over an N-dimensional neighborhood. @@ -1193,7 +1193,7 @@ PyArrayNeighborhoodIter_Type and PyArrayNeighborhoodIterObject PyArrayFlags_Type and PyArrayFlagsObject ---------------------------------------- -.. c:var:: PyArrayFlags_Type +.. c:var:: PyTypeObject PyArrayFlags_Type When the flags attribute is retrieved from Python, a special builtin object of this type is constructed. This special type makes @@ -1442,7 +1442,7 @@ for completeness and assistance in understanding the code. to define a 1-d loop for a ufunc for every defined signature of a user-defined data-type. -.. c:var:: PyArrayMapIter_Type +.. c:var:: PyTypeObject PyArrayMapIter_Type Advanced indexing is handled with this Python type. It is simply a loose wrapper around the C-structure containing the variables diff --git a/doc/source/reference/random/bit_generators/mt19937.rst b/doc/source/reference/random/bit_generators/mt19937.rst index 71875db4e5dd..d05ea7c6f09a 100644 --- a/doc/source/reference/random/bit_generators/mt19937.rst +++ b/doc/source/reference/random/bit_generators/mt19937.rst @@ -4,7 +4,8 @@ Mersenne Twister (MT19937) .. currentmodule:: numpy.random .. autoclass:: MT19937 - :exclude-members: + :members: __init__ + :exclude-members: __init__ State ===== diff --git a/doc/source/reference/random/bit_generators/pcg64.rst b/doc/source/reference/random/bit_generators/pcg64.rst index edac4620baa3..889965f77bc2 100644 --- a/doc/source/reference/random/bit_generators/pcg64.rst +++ b/doc/source/reference/random/bit_generators/pcg64.rst @@ -4,7 +4,8 @@ Permuted Congruential Generator (64-bit, PCG64) .. currentmodule:: numpy.random .. autoclass:: PCG64 - :exclude-members: + :members: __init__ + :exclude-members: __init__ State ===== diff --git a/doc/source/reference/random/bit_generators/philox.rst b/doc/source/reference/random/bit_generators/philox.rst index 8eba2d3511e4..3c2fa4cc5aa1 100644 --- a/doc/source/reference/random/bit_generators/philox.rst +++ b/doc/source/reference/random/bit_generators/philox.rst @@ -4,7 +4,8 @@ Philox Counter-based RNG .. currentmodule:: numpy.random .. autoclass:: Philox - :exclude-members: + :members: __init__ + :exclude-members: __init__ State ===== diff --git a/doc/source/reference/random/bit_generators/sfc64.rst b/doc/source/reference/random/bit_generators/sfc64.rst index d34124a33355..8cb255bc154d 100644 --- a/doc/source/reference/random/bit_generators/sfc64.rst +++ b/doc/source/reference/random/bit_generators/sfc64.rst @@ -4,7 +4,8 @@ SFC64 Small Fast Chaotic PRNG .. currentmodule:: numpy.random .. autoclass:: SFC64 - :exclude-members: + :members: __init__ + :exclude-members: __init__ State ===== diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index 8706e1de2c80..a359d22538e7 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -15,7 +15,8 @@ can be changed by passing an instantized BitGenerator to ``Generator``. .. autofunction:: default_rng .. autoclass:: Generator - :exclude-members: + :members: __init__ + :exclude-members: __init__ Accessing the BitGenerator ========================== diff --git a/doc/source/reference/random/legacy.rst b/doc/source/reference/random/legacy.rst index 6cf4775b81ba..42437dbb6173 100644 --- a/doc/source/reference/random/legacy.rst +++ b/doc/source/reference/random/legacy.rst @@ -48,7 +48,8 @@ using the state of the `RandomState`: .. autoclass:: RandomState - :exclude-members: + :members: __init__ + :exclude-members: __init__ Seeding and State ================= diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 124162d6ce17..289a7951b792 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -172,8 +172,8 @@ iterators so that all that needs to be done to advance to the next element in each array is for PyArray_ITER_NEXT to be called for each of the inputs. This incrementing is automatically performed by :c:func:`PyArray_MultiIter_NEXT` ( ``obj`` ) macro (which can handle a -multiterator ``obj`` as either a :c:type:`PyArrayMultiObject *` or a -:c:type:`PyObject *`). The data from input number ``i`` is available using +multiterator ``obj`` as either a :c:expr:`PyArrayMultiObject *` or a +:c:expr:`PyObject *`). The data from input number ``i`` is available using :c:func:`PyArray_MultiIter_DATA` ( ``obj``, ``i`` ) and the total (broadcasted) size as :c:func:`PyArray_MultiIter_SIZE` ( ``obj``). An example of using this feature follows. @@ -400,7 +400,7 @@ describe the desired behavior of the type. Typically, a new C-structure is also created to contain the instance-specific information needed for each object of the type as well. For example, :c:data:`&PyArray_Type` is a pointer to the type-object table for the ndarray -while a :c:type:`PyArrayObject *` variable is a pointer to a particular instance +while a :c:expr:`PyArrayObject *` variable is a pointer to a particular instance of an ndarray (one of the members of the ndarray structure is, in turn, a pointer to the type- object table :c:data:`&PyArray_Type`). Finally :c:func:`PyType_Ready` () must be called for diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst index 845ce0a74c31..ebb4b7518d1d 100644 --- a/doc/source/user/c-info.how-to-extend.rst +++ b/doc/source/user/c-info.how-to-extend.rst @@ -174,7 +174,7 @@ rule. There are several converter functions defined in the NumPy C-API that may be of use. In particular, the :c:func:`PyArray_DescrConverter` function is very useful to support arbitrary data-type specification. This function transforms any valid data-type Python object into a -:c:type:`PyArray_Descr *` object. Remember to pass in the address of the +:c:expr:`PyArray_Descr *` object. Remember to pass in the address of the C-variables that should be filled in. There are lots of examples of how to use :c:func:`PyArg_ParseTuple` @@ -192,7 +192,7 @@ It is important to keep in mind that you get a *borrowed* reference to the object when using the "O" format string. However, the converter functions usually require some form of memory handling. In this example, if the conversion is successful, *dtype* will hold a new -reference to a :c:type:`PyArray_Descr *` object, while *input* will hold a +reference to a :c:expr:`PyArray_Descr *` object, while *input* will hold a borrowed reference. Therefore, if this conversion were mixed with another conversion (say to an integer) and the data-type conversion was successful but the integer conversion failed, then you would need @@ -213,9 +213,9 @@ The :c:func:`Py_BuildValue` (format_string, c_variables...) function makes it easy to build tuples of Python objects from C variables. Pay special attention to the difference between 'N' and 'O' in the format string or you can easily create memory leaks. The 'O' format string -increments the reference count of the :c:type:`PyObject *` C-variable it +increments the reference count of the :c:expr:`PyObject *` C-variable it corresponds to, while the 'N' format string steals a reference to the -corresponding :c:type:`PyObject *` C-variable. You should use 'N' if you have +corresponding :c:expr:`PyObject *` C-variable. You should use 'N' if you have already created a reference for the object and just want to give that reference to the tuple. You should use 'O' if you only have a borrowed reference to an object and need to create one to provide for the @@ -510,7 +510,7 @@ by providing default values for common use cases. Getting at ndarray memory and accessing elements of the ndarray --------------------------------------------------------------- -If obj is an ndarray (:c:type:`PyArrayObject *`), then the data-area of the +If obj is an ndarray (:c:expr:`PyArrayObject *`), then the data-area of the ndarray is pointed to by the void* pointer :c:func:`PyArray_DATA` (obj) or the char* pointer :c:func:`PyArray_BYTES` (obj). Remember that (in general) this data-area may not be aligned according to the data-type, it may diff --git a/doc_requirements.txt b/doc_requirements.txt index f69cc5dcb406..0650e0a58d3b 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx>=2.2.0,<3.0 +sphinx>=3.2 numpydoc==1.1.0 ipython scipy From 179038f94c213b528fbfb7ac6d55c94f95d18a62 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 7 Nov 2020 13:28:07 +0530 Subject: [PATCH 0050/1270] ENH: Added libdiv --- numpy/core/setup.py | 7 +++ numpy/core/src/umath/fast_loop_macros.h | 13 +++++- numpy/core/src/umath/loops.c.src | 58 +++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 2 deletions(-) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 68aa0a8513fb..a3eb16a5ceae 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -386,6 +386,10 @@ def check_mathlib(config_cmd): "MATHLIB env variable") return mathlibs +def check_libdivide(): + return os.environ.get('NPY_USE_LIBDIVIDE') is not None + + def visibility_define(config): """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty string).""" @@ -442,6 +446,9 @@ def generate_config_h(ext, build_dir): mathlibs = check_mathlib(config_cmd) moredefs.append(('MATHLIB', ','.join(mathlibs))) + # Check if libdivide needs to be used + check_libdivide() and moredefs.append('USE_LIBDIVIDE') + check_math_capabilities(config_cmd, ext, moredefs, mathlibs) moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h index 74bf016436dd..6fe0824cb882 100644 --- a/numpy/core/src/umath/fast_loop_macros.h +++ b/numpy/core/src/umath/fast_loop_macros.h @@ -46,12 +46,21 @@ abs_ptrdiff(char *a, char *b) npy_intp i;\ for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2) -/** (ip1, ip2) -> (op1) */ -#define BINARY_LOOP\ +#define BINARY_LOOP_BASE\ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\ npy_intp n = dimensions[0];\ npy_intp i;\ + +#define BINARY_LOOP_FIXED\ + for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) + +#define BINARY_LOOP_SLIDING\ + for(i = 0; i < n; i++, ip1 += is1, op1 += os1) + +/** (ip1, ip2) -> (op1) */ +#define BINARY_LOOP\ + BINARY_LOOP_BASE\ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) /** (ip1, ip2) -> (op1, op2) */ diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index ef3d5a21a413..d30f5a64ca0a 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -19,6 +19,7 @@ #include "ufunc_object.h" #include /* for memchr */ +#include /* * cutoff blocksize for pairwise summation @@ -826,6 +827,7 @@ NPY_NO_EXPORT void * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong# * #c = ,,,l,ll# + * #div = s32, s32, s32, s64, s64# */ NPY_NO_EXPORT NPY_GCC_OPT_3 void @@ -840,6 +842,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0)); } +#ifndef USE_LIBDIVIDE NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { @@ -865,6 +868,61 @@ NPY_NO_EXPORT void } } } +#else +NPY_NO_EXPORT void +@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP_BASE + + if(!is2) { + struct libdivide_@div@_t fast_d = libdivide_@div@_gen(*(int*)ip2); + const @type@ in2 = *(@type@ *)ip2; + BINARY_LOOP_FIXED { + const @type@ in1 = *(@type@ *)ip1; + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } + else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { + *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d) - 1; + } + else { + *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d); + } + } + } + else { + BINARY_LOOP_SLIDING { // XXX Lot of repeated code + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } + else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { + *((@type@ *)op1) = in1/in2 - 1; + } + else { + *((@type@ *)op1) = in1/in2; + } + } + } +} +#endif NPY_NO_EXPORT void @TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) From e89175b20efe9383a805dc6515d615f3f8792f25 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 7 Nov 2020 21:58:09 +0530 Subject: [PATCH 0051/1270] ENH: Fixed typos in header | use in2 over ip2 --- numpy/core/src/umath/fast_loop_macros.h | 6 +++--- numpy/core/src/umath/loops.c.src | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h index 6fe0824cb882..90dcad3685d6 100644 --- a/numpy/core/src/umath/fast_loop_macros.h +++ b/numpy/core/src/umath/fast_loop_macros.h @@ -52,16 +52,16 @@ abs_ptrdiff(char *a, char *b) npy_intp n = dimensions[0];\ npy_intp i;\ -#define BINARY_LOOP_FIXED\ +#define BINARY_LOOP_SLIDING\ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) -#define BINARY_LOOP_SLIDING\ +#define BINARY_LOOP_FIXED\ for(i = 0; i < n; i++, ip1 += is1, op1 += os1) /** (ip1, ip2) -> (op1) */ #define BINARY_LOOP\ BINARY_LOOP_BASE\ - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) + BINARY_LOOP_SLIDING /** (ip1, ip2) -> (op1, op2) */ #define BINARY_LOOP_TWO_OUT\ diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index d30f5a64ca0a..ad50f021b926 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -875,8 +875,8 @@ NPY_NO_EXPORT void BINARY_LOOP_BASE if(!is2) { - struct libdivide_@div@_t fast_d = libdivide_@div@_gen(*(int*)ip2); const @type@ in2 = *(@type@ *)ip2; + struct libdivide_@div@_t fast_d = libdivide_@div@_gen(in2); BINARY_LOOP_FIXED { const @type@ in1 = *(@type@ *)ip1; /* From 565759be07004e1994ce8497f5573fd73cded7d0 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 8 Nov 2020 12:13:11 +0530 Subject: [PATCH 0052/1270] ENH: Added optimal divisor --- numpy/core/setup.py | 5 ++ numpy/core/src/umath/loops.c.src | 100 ++++++++++++++++++++++++------- 2 files changed, 82 insertions(+), 23 deletions(-) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index a3eb16a5ceae..ca108863c355 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -389,6 +389,8 @@ def check_mathlib(config_cmd): def check_libdivide(): return os.environ.get('NPY_USE_LIBDIVIDE') is not None +def check_optimal_divisor(): + return os.environ.get('NPY_USE_OPTIMAL_DIVISOR') is not None def visibility_define(config): """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty @@ -449,6 +451,9 @@ def generate_config_h(ext, build_dir): # Check if libdivide needs to be used check_libdivide() and moredefs.append('USE_LIBDIVIDE') + # Check if optimal divisor code needs to be used + check_optimal_divisor() and moredefs.append('USE_OPTIMAL_DIVISOR') + check_math_capabilities(config_cmd, ext, moredefs, mathlibs) moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index ad50f021b926..3a7543b99421 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -842,33 +842,61 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0)); } -#ifndef USE_LIBDIVIDE +#ifdef USE_LIBDIVIDE NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; + BINARY_LOOP_BASE + + if(!is2) { const @type@ in2 = *(@type@ *)ip2; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = in1/in2 - 1; + struct libdivide_@div@_t fast_d = libdivide_@div@_gen(in2); + BINARY_LOOP_FIXED { + const @type@ in1 = *(@type@ *)ip1; + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } + else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { + *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d) - 1; + } + else { + *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d); + } } - else { - *((@type@ *)op1) = in1/in2; + } + else { + BINARY_LOOP_SLIDING { // XXX Lot of repeated code + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } + else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { + *((@type@ *)op1) = in1/in2 - 1; + } + else { + *((@type@ *)op1) = in1/in2; + } } } } -#else +#elif defined(USE_OPTIMAL_DIVISOR) NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { @@ -876,7 +904,7 @@ NPY_NO_EXPORT void if(!is2) { const @type@ in2 = *(@type@ *)ip2; - struct libdivide_@div@_t fast_d = libdivide_@div@_gen(in2); + const float in2_f = (float) in2; BINARY_LOOP_FIXED { const @type@ in1 = *(@type@ *)ip1; /* @@ -890,11 +918,11 @@ NPY_NO_EXPORT void npy_set_floatstatus_divbyzero(); *((@type@ *)op1) = 0; } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d) - 1; + else if ((in1 > 0) != (in2 > 0)) { + *((@type@ *)op1) = floor(in1/in2_f); } else { - *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d); + *((@type@ *)op1) = in1/in2; } } } @@ -922,6 +950,32 @@ NPY_NO_EXPORT void } } } +#else +NPY_NO_EXPORT void +@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } + else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { + *((@type@ *)op1) = in1/in2 - 1; + } + else { + *((@type@ *)op1) = in1/in2; + } + } +} #endif NPY_NO_EXPORT void From d0c934cf1627eecdc2771e0dec945804669bb019 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 8 Nov 2020 13:59:29 +0530 Subject: [PATCH 0053/1270] ENH: Added libdivide header --- numpy/core/include/numpy/libdivide.h | 2079 ++++++++++++++++++++++++++ numpy/core/src/umath/loops.c.src | 7 +- 2 files changed, 2085 insertions(+), 1 deletion(-) create mode 100644 numpy/core/include/numpy/libdivide.h diff --git a/numpy/core/include/numpy/libdivide.h b/numpy/core/include/numpy/libdivide.h new file mode 100644 index 000000000000..81057b7b43de --- /dev/null +++ b/numpy/core/include/numpy/libdivide.h @@ -0,0 +1,2079 @@ +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. + +#ifndef LIBDIVIDE_H +#define LIBDIVIDE_H + +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 + +#include + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include +#endif + +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) + #include +#endif + +#if defined(_MSC_VER) + #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC +#endif + +#if !defined(__has_builtin) + #define __has_builtin(x) 0 +#endif + +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif +#endif + +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 +#endif + +#if defined(__i386__) + #define LIBDIVIDE_i386 +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM +#endif + +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif + +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + abort(); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + abort(); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) +#endif + +#ifdef __cplusplus +namespace libdivide { +#endif + +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) + +struct libdivide_u32_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_t { + int64_t magic; + uint8_t more; +}; + +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; + +#pragma pack(pop) + +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. + +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; + +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); + +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); + +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); + +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { + uint64_t xl = x, yl = y; + uint64_t rl = xl * yl; + return (uint32_t)(rl >> 32); +} + +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) + __uint128_t xl = x, yl = y; + __uint128_t rl = xl * yl; + return (uint64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; + uint64_t temp = x1y0 + x0y0_hi; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); +#endif +} + +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) + __int128_t xl = x, yl = y; + __int128_t rl = xl * yl; + return (int64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); +#endif +} + +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros + return __builtin_clz(val); +#elif defined(LIBDIVIDE_VC) + unsigned long result; + if (_BitScanReverse(&result, val)) { + return 31 - result; + } + return 0; +#else + if (val == 0) + return 32; + int32_t result = 8; + uint32_t hi = 0xFFU << 24; + while ((val & hi) == 0) { + hi >>= 8; + result += 8; + } + while (val & hi) { + result -= 1; + hi <<= 1; + } + return result; +#endif +} + +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros + return __builtin_clzll(val); +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) + unsigned long result; + if (_BitScanReverse64(&result, val)) { + return 63 - result; + } + return 0; +#else + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); +#endif +} + +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint32_t result; + __asm__("divl %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#else + uint64_t n = ((uint64_t)u1 << 32) | u0; + uint32_t result = (uint32_t)(n / v); + *r = (uint32_t)(n - result * (uint64_t)v); + return result; +#endif +} + +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r +static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint64_t result; + __asm__("divq %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; +#else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm + + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm + + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } + + // count leading zeros + s = libdivide_count_leading_zeros64(v); + if (s > 0) { + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left + } else { + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; + un10 = u0; + } + + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; + + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; + + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { + q1 = q1 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; + + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { + q0 = q0 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif +} + +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; +#else + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; + + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} + +////////// UINT32 + +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint8_t more; + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint32_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. + } + return result; +} + +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + +uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint32_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_32_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + uint32_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +/////////// UINT64 + +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint64_t proposed_m, rem; + uint8_t more; + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint64_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. + } + return result; +} + +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + +uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint64_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_64_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + uint64_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +/////////// SINT32 + +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s32_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + LIBDIVIDE_ASSERT(floor_log_2_d >= 1); + + uint8_t more; + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); + const uint32_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + +int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; + return q; + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; + } + int32_t q = (int32_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; + } +} + +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); +} + +///////////// SINT64 + +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s64_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint8_t more; + uint64_t rem, proposed_m; + proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); + const uint64_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + +int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; + return q; + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; + } + int64_t q = (int64_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + int64_t magic = denom->magic; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#endif + +/////////// C++ stuff + +#ifdef __cplusplus + +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE +}; + +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i +#endif + +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } +#endif + +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // LIBDIVIDE_H diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 3a7543b99421..114cfed8b553 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -19,7 +19,12 @@ #include "ufunc_object.h" #include /* for memchr */ -#include + +/* Use Libdivide for faster division */ +/* TODO Explore placing specialised versions in `numpy/core/src/common/simd` */ +#ifdef USE_LIBDIVIDE +#include "numpy/libdivide.h" +#endif /* * cutoff blocksize for pairwise summation From b02399ac1c0838a84c6d966ef2c34cd60c82c30c Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 8 Nov 2020 18:08:32 +0530 Subject: [PATCH 0054/1270] ENH: Made libdivide default --- numpy/core/setup.py | 14 +++----- numpy/core/src/umath/loops.c.src | 58 ++------------------------------ 2 files changed, 6 insertions(+), 66 deletions(-) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index ca108863c355..448499926f57 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -386,11 +386,8 @@ def check_mathlib(config_cmd): "MATHLIB env variable") return mathlibs -def check_libdivide(): - return os.environ.get('NPY_USE_LIBDIVIDE') is not None - -def check_optimal_divisor(): - return os.environ.get('NPY_USE_OPTIMAL_DIVISOR') is not None +def check_use_legacy_division(): + return os.environ.get('NPY_USE_LEGACY_DIVISION') is not None def visibility_define(config): """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty @@ -448,11 +445,8 @@ def generate_config_h(ext, build_dir): mathlibs = check_mathlib(config_cmd) moredefs.append(('MATHLIB', ','.join(mathlibs))) - # Check if libdivide needs to be used - check_libdivide() and moredefs.append('USE_LIBDIVIDE') - - # Check if optimal divisor code needs to be used - check_optimal_divisor() and moredefs.append('USE_OPTIMAL_DIVISOR') + # Check if legacy division needs to be used + check_use_legacy_division() and moredefs.append('USE_LEGACY_DIVISION') check_math_capabilities(config_cmd, ext, moredefs, mathlibs) moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 114cfed8b553..0a493affbf96 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -22,7 +22,7 @@ /* Use Libdivide for faster division */ /* TODO Explore placing specialised versions in `numpy/core/src/common/simd` */ -#ifdef USE_LIBDIVIDE +#ifndef USE_LEGACY_DIVISION #include "numpy/libdivide.h" #endif @@ -847,7 +847,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0)); } -#ifdef USE_LIBDIVIDE +#ifndef USE_LEGACY_DIVISION NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { @@ -901,60 +901,6 @@ NPY_NO_EXPORT void } } } -#elif defined(USE_OPTIMAL_DIVISOR) -NPY_NO_EXPORT void -@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP_BASE - - if(!is2) { - const @type@ in2 = *(@type@ *)ip2; - const float in2_f = (float) in2; - BINARY_LOOP_FIXED { - const @type@ in1 = *(@type@ *)ip1; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else if ((in1 > 0) != (in2 > 0)) { - *((@type@ *)op1) = floor(in1/in2_f); - } - else { - *((@type@ *)op1) = in1/in2; - } - } - } - else { - BINARY_LOOP_SLIDING { // XXX Lot of repeated code - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = in1/in2 - 1; - } - else { - *((@type@ *)op1) = in1/in2; - } - } - } -} #else NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) From f0ddb7c6839fc2799cce891cb7ef71c2b0dfb097 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 8 Nov 2020 18:23:19 +0530 Subject: [PATCH 0055/1270] ENH: Handled divide by 0 case --- numpy/core/src/umath/loops.c.src | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 0a493affbf96..43f839931482 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -851,11 +851,12 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { + static const struct libdivide_@div@_t EmptyStruct; BINARY_LOOP_BASE if(!is2) { const @type@ in2 = *(@type@ *)ip2; - struct libdivide_@div@_t fast_d = libdivide_@div@_gen(in2); + struct libdivide_@div@_t fast_d = in2 ? libdivide_@div@_gen(in2) : EmptyStruct; BINARY_LOOP_FIXED { const @type@ in1 = *(@type@ *)ip1; /* From 11728e8a3cf967455aacc36e2d2237745b07c1e7 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 9 Nov 2020 03:32:54 +0200 Subject: [PATCH 0056/1270] BUG, Benchmark: fix passing optimization build options to asv --- benchmarks/asv_compare.conf.json.tpl | 4 +++- benchmarks/asv_pip_nopep517.py | 15 +++++++++++++++ runtests.py | 1 + 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 benchmarks/asv_pip_nopep517.py diff --git a/benchmarks/asv_compare.conf.json.tpl b/benchmarks/asv_compare.conf.json.tpl index 1f339077c66d..03d13d985c8d 100644 --- a/benchmarks/asv_compare.conf.json.tpl +++ b/benchmarks/asv_compare.conf.json.tpl @@ -78,7 +78,9 @@ "build_command" : [ "python setup.py build {numpy_build_options}", - "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}" + // pip ignores '--global-option' when pep517 is enabled, we also enabling pip verbose to + // be reached from asv `--verbose` so we can verify the build options. + "PIP_NO_BUILD_ISOLATION=false python {build_dir}/benchmarks/asv_pip_nopep517.py -v {numpy_global_options} --no-deps --no-index -w {build_cache_dir} {build_dir}" ], // The commits after which the regression search in `asv publish` // should start looking for regressions. Dictionary whose keys are diff --git a/benchmarks/asv_pip_nopep517.py b/benchmarks/asv_pip_nopep517.py new file mode 100644 index 000000000000..9ba165493085 --- /dev/null +++ b/benchmarks/asv_pip_nopep517.py @@ -0,0 +1,15 @@ +""" +This file is used by asv_compare.conf.json.tpl. +""" +import subprocess, sys +# pip ignores '--global-option' when pep517 is enabled therefore we disable it. +cmd = [sys.executable, '-mpip', 'wheel', '--no-use-pep517'] +try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True) +except Exception as e: + output = str(e.output) +if "no such option" in output: + print("old version of pip, escape '--no-use-pep517'") + cmd.pop() + +subprocess.run(cmd + sys.argv[1:]) diff --git a/runtests.py b/runtests.py index 87e26768b351..20295ef33673 100755 --- a/runtests.py +++ b/runtests.py @@ -524,6 +524,7 @@ def asv_compare_config(bench_path, args, h_commits): is_cached = asv_substitute_config(conf_path, nconf_path, numpy_build_options = ' '.join([f'\\"{v}\\"' for v in build]), + numpy_global_options= ' '.join([f'--global-option=\\"{v}\\"' for v in ["build"] + build]) ) if not is_cached: asv_clear_cache(bench_path, h_commits) From 97ba579bd17043b8885ff8e13970a2a38bd7a981 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 9 Nov 2020 16:35:36 +0800 Subject: [PATCH 0057/1270] Optimize the performance of multiply --- .../core/src/multiarray/einsum_sumprod.c.src | 193 +++++++++--------- 1 file changed, 91 insertions(+), 102 deletions(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index c58e742874d0..f5478bf8f81c 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -17,7 +17,8 @@ #include "einsum_sumprod.h" #include "einsum_debug.h" - +#include "simd/simd.h" +#include "common.h" #ifdef NPY_HAVE_SSE_INTRINSICS #define EINSUM_USE_SSE1 1 @@ -41,6 +42,28 @@ #define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0) +// ARM/Neon don't have instructions for aligned memory access +#ifdef NPY_HAVE_NEON + #define EINSUM_IS_ALIGNED(x) 0 +#else + #define EINSUM_IS_ALIGNED(x) npy_is_aligned(x, NPY_SIMD_WIDTH) +#endif + +/** + * This macro is used to enable a scalar loop which advances 4 elements at a + * time, which appears after a main SIMD loop gated by `CHK` that unrolls by + * `NPY_SIMD_WIDTH * unroll_by` elements, and before a non-unrolled scalar loop + * that finishes up all the remaining scalars. The purpose of the unrolled loop + * is to enable auto-vectorization in cases when all of the following are true: + * + * - optimization is allowed + * - either: + * - we did not run the SIMD loop at all, due to NPV being disabled. + * - the SIMD loop was larger than 128bit, so there are likely to be many + * elements left to process. + */ +#define EINSUM_UNROLL_4_SCALARS(CHK) (!defined(NPY_DISABLE_OPTIMIZATION) && (!(CHK) || NPY_SIMD > 128)) + /**********************************************/ /**begin repeat @@ -56,6 +79,10 @@ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_float, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# + * #sfx = s8, s16, s32, long, s64, + * u8, u16, u32, ulong, u64, + * half, f32, f64, longdouble, + * f32, f64, clongdouble# * #to = ,,,,, * ,,,,, * npy_float_to_half,,,, @@ -76,6 +103,10 @@ * 0*5, * 0,0,1,0, * 0*3# + * #NPYV_CHK = 0*5, + * 0*5, + * 0, NPY_SIMD, NPY_SIMD_F64, 0, + * 0*3# */ /**begin repeat1 @@ -250,115 +281,73 @@ static void @type@ *data0 = (@type@ *)dataptr[0]; @type@ *data1 = (@type@ *)dataptr[1]; @type@ *data_out = (@type@ *)dataptr[2]; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b; -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, b; -#endif - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n", (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(@from@(data0[@i@]) * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ + // NPYV check for @type@, in X86, 128bits intrinsincs have a side effect in optimization +#if @NPYV_CHK@ /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) && - EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data1 += 8; - data_out += 8; + const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1) && + EINSUM_IS_ALIGNED(data_out); + const int vstep = npyv_nlanes_@sfx@; + + /**begin repeat2 + * #cond = if(is_aligned), else# + * #ld = loada, load# + * #st = storea, store# + */ + @cond@ { + const npy_intp vstepx4 = vstep * 4; + for (; count >= vstepx4; count -= vstepx4, data0 += vstepx4, data1 += vstepx4, data_out += vstepx4) { + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data0 + vstep * @i@); + npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data1 + vstep * @i@); + npyv_@sfx@ c@i@ = npyv_@ld@_@sfx@(data_out + vstep * @i@); + /**end repeat3**/ + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@sfx@ abc@i@ = npyv_muladd_@sfx@(a@i@, b@i@, c@i@); + /**end repeat3**/ + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@st@_@sfx@(data_out + vstep * @i@, abc@i@); + /**end repeat3**/ } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; } -#elif EINSUM_USE_SSE2 && @float64@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) && - EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_load_pd(data_out+@i@)); - _mm_store_pd(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + /**end repeat2**/ + npyv_cleanup(); +#endif // NPYV check for @type@ + +#if EINSUM_UNROLL_4_SCALARS(@NPYV_CHK@) + for (; count >= 4; count -= 4, data0 += 4, data1 += 4, data_out += 4) { + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + const @type@ a@i@ = @from@(data0[@i@]); + const @type@ b@i@ = @from@(data1[@i@]); + const @type@ c@i@ = @from@(data_out[@i@]); + /**end repeat2**/ + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + const @type@ abc@i@ = a@i@ * b@i@ + c@i@; + /**end repeat2**/ + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + data_out[@i@] = @to@(abc@i@); + /**end repeat2**/ } #endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@)); - _mm_storeu_pd(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(@from@(data0[@i@]) * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data0 += 8; - data1 += 8; - data_out += 8; + for (; count > 0; --count, ++data0, ++data1, ++data_out) { + const @type@ a = @from@(*data0); + const @type@ b = @from@(*data1); + const @type@ c = @from@(*data_out); + *data_out = @to@(a * b + c); } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; } /* Some extra specializations for the two operand case */ From 72dcc042ee572f5522b2731807a15c1fefac1315 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 9 Nov 2020 18:46:13 +0530 Subject: [PATCH 0058/1270] ENH: Added libdivide zlib license --- LICENSES_bundled.txt | 5 +++++ .../core/include/numpy/libdivide/LICENSE.txt | 21 +++++++++++++++++++ .../include/numpy/{ => libdivide}/libdivide.h | 0 numpy/core/src/umath/loops.c.src | 2 +- 4 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 numpy/core/include/numpy/libdivide/LICENSE.txt rename numpy/core/include/numpy/{ => libdivide}/libdivide.h (100%) diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index 00b7473777ca..80557d3e6ee7 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -15,3 +15,8 @@ Name: dragon4 Files: numpy/core/src/multiarray/dragon4.c License: MIT For license text, see numpy/core/src/multiarray/dragon4.c + +Name: libdivide +Files: numpy/core/include/numpy/libdivide/* +License: zlib + For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt diff --git a/numpy/core/include/numpy/libdivide/LICENSE.txt b/numpy/core/include/numpy/libdivide/LICENSE.txt new file mode 100644 index 000000000000..d72a7c388d40 --- /dev/null +++ b/numpy/core/include/numpy/libdivide/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, + Copyright (C) 2016 - 2019 Kim Walisch, + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/numpy/core/include/numpy/libdivide.h b/numpy/core/include/numpy/libdivide/libdivide.h similarity index 100% rename from numpy/core/include/numpy/libdivide.h rename to numpy/core/include/numpy/libdivide/libdivide.h diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 43f839931482..ae99d5bf3daa 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -23,7 +23,7 @@ /* Use Libdivide for faster division */ /* TODO Explore placing specialised versions in `numpy/core/src/common/simd` */ #ifndef USE_LEGACY_DIVISION -#include "numpy/libdivide.h" +#include "numpy/libdivide/libdivide.h" #endif /* From 19835d291fd67b57976a818b08a20e9d9734c787 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 10 Nov 2020 23:25:45 +0530 Subject: [PATCH 0059/1270] ENH: Removed empty structure --- numpy/core/src/umath/loops.c.src | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index ae99d5bf3daa..c82626385ce9 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -851,12 +851,13 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - static const struct libdivide_@div@_t EmptyStruct; BINARY_LOOP_BASE if(!is2) { const @type@ in2 = *(@type@ *)ip2; - struct libdivide_@div@_t fast_d = in2 ? libdivide_@div@_gen(in2) : EmptyStruct; + + /* Creating a divisor of 0 is treated as an error by libdivide */ + struct libdivide_@div@_t fast_d = in2 ? libdivide_@div@_gen(in2) : (struct libdivide_@div@_t){0}; BINARY_LOOP_FIXED { const @type@ in1 = *(@type@ *)ip1; /* From 3975a28d8b3efa385c58a0196f55d7d377e21a77 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 11 Nov 2020 11:08:41 +0530 Subject: [PATCH 0060/1270] ENH: Auto generate libdivide structs --- numpy/core/src/umath/loops.c.src | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index c82626385ce9..061dbb230f55 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -832,7 +832,6 @@ NPY_NO_EXPORT void * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong# * #c = ,,,l,ll# - * #div = s32, s32, s32, s64, s64# */ NPY_NO_EXPORT NPY_GCC_OPT_3 void @@ -847,6 +846,19 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0)); } +/* Using nested loops, few more fields to be added in the future */ +/**begin repeat1 + * #kind = t, gen, do# + */ +/* Libdivde only supports 32 and 64 bit types + * We try to pick the best possible one */ +#if NPY_BITSOF_@TYPE@ <= 32 +#define libdivide_@type@_@kind@ libdivide_s32_@kind@ +#else +#define libdivide_@type@_@kind@ libdivide_s64_@kind@ +#endif +/**end repeat1**/ + #ifndef USE_LEGACY_DIVISION NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) @@ -857,7 +869,7 @@ NPY_NO_EXPORT void const @type@ in2 = *(@type@ *)ip2; /* Creating a divisor of 0 is treated as an error by libdivide */ - struct libdivide_@div@_t fast_d = in2 ? libdivide_@div@_gen(in2) : (struct libdivide_@div@_t){0}; + struct libdivide_@type@_t fast_d = in2 ? libdivide_@type@_gen(in2) : (struct libdivide_@type@_t){0}; BINARY_LOOP_FIXED { const @type@ in1 = *(@type@ *)ip1; /* @@ -872,10 +884,10 @@ NPY_NO_EXPORT void *((@type@ *)op1) = 0; } else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d) - 1; + *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d) - 1; } else { - *((@type@ *)op1) = libdivide_@div@_do(in1, &fast_d); + *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); } } } From 90e6cf529e27543a974b53c0aa912329c6374f21 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 11 Nov 2020 11:45:08 +0530 Subject: [PATCH 0061/1270] ENH: Logic to optimize % --- numpy/core/src/umath/loops.c.src | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 061dbb230f55..a58ee5a788b8 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -883,11 +883,12 @@ NPY_NO_EXPORT void npy_set_floatstatus_divbyzero(); *((@type@ *)op1) = 0; } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d) - 1; - } else { *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); + + if((*((@type@ *)op1) <= 0) && (*((@type@ *)op1) * in2 != in1)) { + *((@type@ *)op1) = *((@type@ *)op1) - 1; + } } } } From 969aa039d962818459a434ffdd3976865c87afe1 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 11 Nov 2020 12:10:12 +0530 Subject: [PATCH 0062/1270] ENH: Fix breaking case --- numpy/core/src/umath/loops.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index a58ee5a788b8..45a9ccef4c8b 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -886,7 +886,7 @@ NPY_NO_EXPORT void else { *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); - if((*((@type@ *)op1) <= 0) && (*((@type@ *)op1) * in2 != in1)) { + if(((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { *((@type@ *)op1) = *((@type@ *)op1) - 1; } } From 44a3a31d23fe9429da4cff067685b8adbbe0106a Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 11 Nov 2020 16:19:06 +0530 Subject: [PATCH 0063/1270] ENH: Change comments Co-authored-by: Eric Wieser --- numpy/core/src/umath/loops.c.src | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 45a9ccef4c8b..c99a6d7f3836 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -846,12 +846,11 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0)); } -/* Using nested loops, few more fields to be added in the future */ +/* Libdivide only supports 32 and 64 bit types + * We try to pick the best possible one */ /**begin repeat1 * #kind = t, gen, do# */ -/* Libdivde only supports 32 and 64 bit types - * We try to pick the best possible one */ #if NPY_BITSOF_@TYPE@ <= 32 #define libdivide_@type@_@kind@ libdivide_s32_@kind@ #else From b3d70efeef31b953fa977a61311035c9e02e9ad2 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 11 Nov 2020 19:06:07 +0530 Subject: [PATCH 0064/1270] ENH: Improved floor division (#17727) --- doc/release/upcoming_changes/17727.improvement.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100755 doc/release/upcoming_changes/17727.improvement.rst diff --git a/doc/release/upcoming_changes/17727.improvement.rst b/doc/release/upcoming_changes/17727.improvement.rst new file mode 100755 index 000000000000..83054a3ea1da --- /dev/null +++ b/doc/release/upcoming_changes/17727.improvement.rst @@ -0,0 +1,8 @@ +Improved performance in integer division of NumPy arrays +-------------------------------------------------------- +Integer division of NumPy arrays now uses libdivide. +With builtin support for SSE2, AVX2 and AVX512 vector +division from libdivide and other minor improvements, +there is a large speedup. +The ``//`` operator and ``np.floor_divide`` makes use +of the new changes. From 931134bfa428e0aa50fff8583fa526c2da1bbc53 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 11 Nov 2020 22:15:08 +0530 Subject: [PATCH 0065/1270] ENH: Added asv benchmarks --- benchmarks/benchmarks/bench_ufunc.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 9f45a72575ff..c388da5b5adc 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -134,6 +134,17 @@ def time_less_than_scalar2(self, dtype): (self.d < 1) +class CustomScalarInt(Benchmark): + params = [10**size for size in range(1, 8)] + param_names = ['size'] + + def setup(self, size): + self.x = np.arange(size) + + def time_floor_divide(self, size): + self.x//8 + + class Scalar(Benchmark): def setup(self): self.x = np.asarray(1.0) From 6e2e281a270652cee0028e4e1e98a1c19b57b11b Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Thu, 12 Nov 2020 08:59:37 +0530 Subject: [PATCH 0066/1270] ENH: Change comments Co-authored-by: Sebastian Berg --- numpy/core/src/umath/loops.c.src | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index c99a6d7f3836..fe60993a762b 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -21,7 +21,6 @@ #include /* for memchr */ /* Use Libdivide for faster division */ -/* TODO Explore placing specialised versions in `numpy/core/src/common/simd` */ #ifndef USE_LEGACY_DIVISION #include "numpy/libdivide/libdivide.h" #endif From 90a84af7ae1edd91cc5a45069ad6a824e436d3cd Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Thu, 12 Nov 2020 09:00:10 +0530 Subject: [PATCH 0067/1270] ENH: Linting Co-authored-by: Sebastian Berg --- numpy/core/src/umath/loops.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index fe60993a762b..448e774ccd48 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -884,7 +884,7 @@ NPY_NO_EXPORT void else { *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); - if(((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { + if (((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { *((@type@ *)op1) = *((@type@ *)op1) - 1; } } From 61c3d38e3293c08f48621ca52808097845252f83 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Thu, 12 Nov 2020 09:13:03 +0530 Subject: [PATCH 0068/1270] MAINT: Added libdivide as linguist-vendored --- .gitattributes | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitattributes b/.gitattributes index bce3dbe6daad..f4b6c0dcfbd1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -14,6 +14,7 @@ doc/release/*.rst merge=union numpy/linalg/lapack_lite/f2c.c linguist-vendored numpy/linalg/lapack_lite/f2c.h linguist-vendored tools/npy_tempita/* linguist-vendored +numpy/core/include/numpy/libdivide/* linguist-vendored # Mark some files as generated numpy/linalg/lapack_lite/f2c_*.c linguist-generated From 827bc38a21f8dbeb3b992a26751ef723577cb7d9 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Thu, 12 Nov 2020 10:05:32 +0530 Subject: [PATCH 0069/1270] ENH: Removed legacy division --- numpy/core/setup.py | 6 ------ numpy/core/src/umath/loops.c.src | 37 ++++++-------------------------- 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 448499926f57..68aa0a8513fb 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -386,9 +386,6 @@ def check_mathlib(config_cmd): "MATHLIB env variable") return mathlibs -def check_use_legacy_division(): - return os.environ.get('NPY_USE_LEGACY_DIVISION') is not None - def visibility_define(config): """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty string).""" @@ -445,9 +442,6 @@ def generate_config_h(ext, build_dir): mathlibs = check_mathlib(config_cmd) moredefs.append(('MATHLIB', ','.join(mathlibs))) - # Check if legacy division needs to be used - check_use_legacy_division() and moredefs.append('USE_LEGACY_DIVISION') - check_math_capabilities(config_cmd, ext, moredefs, mathlibs) moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 448e774ccd48..b37f4c4272f4 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -857,7 +857,6 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void #endif /**end repeat1**/ -#ifndef USE_LEGACY_DIVISION NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { @@ -884,6 +883,7 @@ NPY_NO_EXPORT void else { *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); + /* Negative quotients needs to be rounded down */ if (((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { *((@type@ *)op1) = *((@type@ *)op1) - 1; } @@ -905,42 +905,17 @@ NPY_NO_EXPORT void npy_set_floatstatus_divbyzero(); *((@type@ *)op1) = 0; } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = in1/in2 - 1; - } else { *((@type@ *)op1) = in1/in2; + + /* Negative quotients needs to be rounded down */ + if (((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { + *((@type@ *)op1) = *((@type@ *)op1) - 1; + } } } } } -#else -NPY_NO_EXPORT void -@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = in1/in2 - 1; - } - else { - *((@type@ *)op1) = in1/in2; - } - } -} -#endif NPY_NO_EXPORT void @TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) From 0ce0ebd3b895678f2a59797564e17a0aedad6872 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Thu, 12 Nov 2020 09:38:17 +0530 Subject: [PATCH 0070/1270] ENH: Improved floor division (#17727) --- .../{17727.improvement.rst => 17727.performance.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/release/upcoming_changes/{17727.improvement.rst => 17727.performance.rst} (100%) diff --git a/doc/release/upcoming_changes/17727.improvement.rst b/doc/release/upcoming_changes/17727.performance.rst similarity index 100% rename from doc/release/upcoming_changes/17727.improvement.rst rename to doc/release/upcoming_changes/17727.performance.rst From c85c44a8091dffc921ac81059280f99b9d4dc198 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Fri, 13 Nov 2020 12:02:03 +0530 Subject: [PATCH 0071/1270] ENH: Added libdivide to timedelta --- numpy/core/src/umath/loops.c.src | 87 +++++++++++++++++++++++++------- 1 file changed, 68 insertions(+), 19 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index b37f4c4272f4..a7c0cb365f22 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -862,7 +862,7 @@ NPY_NO_EXPORT void { BINARY_LOOP_BASE - if(!is2) { + if (!is2) { const @type@ in2 = *(@type@ *)ip2; /* Creating a divisor of 0 is treated as an error by libdivide */ @@ -1403,14 +1403,33 @@ TIMEDELTA_dm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const NPY_NO_EXPORT void TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - BINARY_LOOP { - const npy_timedelta in1 = *(npy_timedelta *)ip1; + BINARY_LOOP_BASE + + if (!is2) { const npy_int64 in2 = *(npy_int64 *)ip2; - if (in1 == NPY_DATETIME_NAT || in2 == 0) { - *((npy_timedelta *)op1) = NPY_DATETIME_NAT; + + /* Creating a divisor of 0 is treated as an error by libdivide */ + struct libdivide_s64_t fast_d = in2 ? libdivide_s64_gen(in2) : (struct libdivide_s64_t){0}; + BINARY_LOOP_FIXED { + const npy_timedelta in1 = *(npy_timedelta *)ip1; + if (in1 == NPY_DATETIME_NAT || in2 == 0) { + *((npy_timedelta *)op1) = NPY_DATETIME_NAT; + } + else { + *((npy_timedelta *)op1) = libdivide_s64_do(in1, &fast_d);; + } } - else { - *((npy_timedelta *)op1) = in1 / in2; + } + else { + BINARY_LOOP_SLIDING { + const npy_timedelta in1 = *(npy_timedelta *)ip1; + const npy_int64 in2 = *(npy_int64 *)ip2; + if (in1 == NPY_DATETIME_NAT || in2 == 0) { + *((npy_timedelta *)op1) = NPY_DATETIME_NAT; + } + else { + *((npy_timedelta *)op1) = in1 / in2; + } } } } @@ -1482,23 +1501,53 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp const *dimensions, npy_intp const NPY_NO_EXPORT void TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - BINARY_LOOP { - const npy_timedelta in1 = *(npy_timedelta *)ip1; + /* TODO: This code is similar to array floor divide*/ + BINARY_LOOP_BASE + + if (!is2) { const npy_timedelta in2 = *(npy_timedelta *)ip2; - if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { - npy_set_floatstatus_invalid(); - *((npy_int64 *)op1) = 0; - } - else if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - *((npy_int64 *)op1) = 0; + + /* Creating a divisor of 0 is treated as an error by libdivide */ + struct libdivide_s64_t fast_d = in2 ? libdivide_s64_gen(in2) : (struct libdivide_s64_t){0}; + BINARY_LOOP_FIXED { + const npy_timedelta in1 = *(npy_timedelta *)ip1; + if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { + npy_set_floatstatus_invalid(); + *((npy_int64 *)op1) = 0; + } + else if (in2 == 0) { + npy_set_floatstatus_divbyzero(); + *((npy_int64 *)op1) = 0; + } + else { + *((npy_int64 *)op1) = libdivide_s64_do(in1, &fast_d); + + /* Negative quotients needs to be rounded down */ + if (((in1 > 0) != (in2 > 0)) && (*((npy_int64 *)op1) * in2 != in1)) { + *((npy_int64 *)op1) = *((npy_int64 *)op1) - 1; + } + } } - else { - if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((npy_int64 *)op1) = in1/in2 - 1; + } + else { + BINARY_LOOP_SLIDING { + const npy_timedelta in1 = *(npy_timedelta *)ip1; + const npy_timedelta in2 = *(npy_timedelta *)ip2; + if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { + npy_set_floatstatus_invalid(); + *((npy_int64 *)op1) = 0; + } + else if (in2 == 0) { + npy_set_floatstatus_divbyzero(); + *((npy_int64 *)op1) = 0; } else { *((npy_int64 *)op1) = in1/in2; + + /* Negative quotients needs to be rounded down */ + if (((in1 > 0) != (in2 > 0)) && (*((npy_int64 *)op1) * in2 != in1)) { + *((npy_int64 *)op1) = *((npy_int64 *)op1) - 1; + } } } } From 594dd5d97ec9989f19de96f064930a955478b9a4 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 16 Nov 2020 19:24:40 +0800 Subject: [PATCH 0072/1270] fix misleading comment --- numpy/core/src/multiarray/einsum_sumprod.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index f5478bf8f81c..c9ab71e28523 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -283,7 +283,7 @@ static void @type@ *data_out = (@type@ *)dataptr[2]; NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n", (int)count); - // NPYV check for @type@, in X86, 128bits intrinsincs have a side effect in optimization + // NPYV check for @type@ #if @NPYV_CHK@ /* Use aligned instructions if possible */ const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1) && From 1923034242aa7d09a25c7cd70f0a27d280c68f71 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 16 Nov 2020 21:27:11 +0000 Subject: [PATCH 0073/1270] ENH, SIMD: Add new NPYV intrinsics pack(0) - add bitfield conversion for boolean vectors - add reverse elements of each 64-bit lane - add testing cases --- numpy/core/src/_simd/_simd.dispatch.c.src | 67 +++++++++++++------ numpy/core/src/common/simd/avx2/conversion.h | 22 ++++-- numpy/core/src/common/simd/avx2/reorder.h | 32 +++++++++ .../core/src/common/simd/avx512/conversion.h | 31 +++++++++ numpy/core/src/common/simd/avx512/reorder.h | 56 ++++++++++++++++ numpy/core/src/common/simd/neon/conversion.h | 66 ++++++++++++++---- numpy/core/src/common/simd/neon/reorder.h | 9 +++ numpy/core/src/common/simd/sse/conversion.h | 21 ++++-- numpy/core/src/common/simd/sse/reorder.h | 41 ++++++++++++ numpy/core/src/common/simd/vsx/conversion.h | 22 ++++++ numpy/core/src/common/simd/vsx/reorder.h | 41 ++++++++++++ numpy/core/tests/test_simd.py | 40 +++++++++++ 12 files changed, 409 insertions(+), 39 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index 18c38387165c..e3dbcdece581 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -9,9 +9,9 @@ #include "_simd_arg.inc" #include "_simd_easyintrin.inc" -/************************************************************************* - * Defining NPYV intrinsics as module functions - *************************************************************************/ +//######################################################################### +//## Defining NPYV intrinsics as module functions +//######################################################################### /**begin repeat * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# @@ -22,6 +22,7 @@ * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #sum_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# + * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# * #shr_imm = 0, 0, 16, 16, 32, 32, 64, 64, 0, 0# @@ -227,7 +228,6 @@ err: /**end repeat1**/ #endif // @ncont_sup@ - /*************************** * Misc ***************************/ @@ -289,6 +289,10 @@ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@x2, v@sfx@, v@sfx@) /**end repeat1**/ +#if @rev64_sup@ +SIMD_IMPL_INTRIN_1(rev64_@sfx@, v@sfx@, v@sfx@) +#endif + /*************************** * Operators ***************************/ @@ -370,14 +374,26 @@ SIMD_IMPL_INTRIN_1(@intrin@_@sfx@, v@sfx@, v@sfx@) #endif // simd_sup /**end repeat**/ -/*************************** +/************************************************************************* * Variant - ***************************/ + ************************************************************************/ SIMD_IMPL_INTRIN_0N(cleanup) - /************************************************************************* - * Attach module functions - *************************************************************************/ + * A special section for boolean intrinsics outside the main repeater + ************************************************************************/ +/*************************** + * Conversions + ***************************/ +// Convert mask vector to integer bitfield +/**begin repeat + * #bsfx = b8, b16, b32, b64# + */ +SIMD_IMPL_INTRIN_1(tobits_@bsfx@, u64, v@bsfx@) +/**end repeat**/ + +//######################################################################### +//## Attach module functions +//######################################################################### static PyMethodDef simd__intrinsics_methods[] = { /**begin repeat * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# @@ -389,6 +405,7 @@ static PyMethodDef simd__intrinsics_methods[] = { * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #sum_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# + * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# * #shr_imm = 0, 0, 16, 16, 32, 32, 64, 64, 0, 0# @@ -416,7 +433,6 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ #endif // ncont_sup - /*************************** * Misc ***************************/ @@ -444,8 +460,9 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ -SIMD_INTRIN_DEF(cvt_@sfx@_@bsfx@) -SIMD_INTRIN_DEF(cvt_@bsfx@_@sfx@) +#if @rev64_sup@ +SIMD_INTRIN_DEF(rev64_@sfx@) +#endif /*************************** * Operators @@ -517,23 +534,35 @@ SIMD_INTRIN_DEF(sum_@sfx@) SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ #endif - #endif // simd_sup /**end repeat**/ +/************************************************************************* + * Variant + ************************************************************************/ +SIMD_INTRIN_DEF(cleanup) +/************************************************************************* + * A special section for boolean intrinsics outside the main repeater + ************************************************************************/ /*************************** - * Variant + * Conversions ***************************/ -SIMD_INTRIN_DEF(cleanup) -/***************************/ +// Convert mask vector to integer bitfield +/**begin repeat + * #bsfx = b8, b16, b32, b64# + */ +SIMD_INTRIN_DEF(tobits_@bsfx@) +/**end repeat**/ + +/************************************************************************/ {NULL, NULL, 0, NULL} }; // PyMethodDef #endif // NPY_SIMD -/************************************************************************* - * Defining a separate module for each target - *************************************************************************/ +//######################################################################### +//## Defining a separate module for each target +//######################################################################### NPY_VISIBILITY_HIDDEN PyObject * NPY_CPU_DISPATCH_CURFX(simd_create_module)(void) { diff --git a/numpy/core/src/common/simd/avx2/conversion.h b/numpy/core/src/common/simd/avx2/conversion.h index 9fd86016d3d9..f72678b548a4 100644 --- a/numpy/core/src/common/simd/avx2/conversion.h +++ b/numpy/core/src/common/simd/avx2/conversion.h @@ -14,8 +14,8 @@ #define npyv_cvt_s32_b32(A) A #define npyv_cvt_u64_b64(A) A #define npyv_cvt_s64_b64(A) A -#define npyv_cvt_f32_b32(A) _mm256_castsi256_ps(A) -#define npyv_cvt_f64_b64(A) _mm256_castsi256_pd(A) +#define npyv_cvt_f32_b32 _mm256_castsi256_ps +#define npyv_cvt_f64_b64 _mm256_castsi256_pd // convert integer types to mask types #define npyv_cvt_b8_u8(BL) BL @@ -26,7 +26,21 @@ #define npyv_cvt_b32_s32(BL) BL #define npyv_cvt_b64_u64(BL) BL #define npyv_cvt_b64_s64(BL) BL -#define npyv_cvt_b32_f32(BL) _mm256_castps_si256(BL) -#define npyv_cvt_b64_f64(BL) _mm256_castpd_si256(BL) +#define npyv_cvt_b32_f32 _mm256_castps_si256 +#define npyv_cvt_b64_f64 _mm256_castpd_si256 + +// convert boolean vector to integer bitfield +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) +{ return (npy_uint32)_mm256_movemask_epi8(a); } + +NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) +{ + __m128i pack = _mm_packs_epi16(_mm256_castsi256_si128(a), _mm256_extracti128_si256(a, 1)); + return (npy_uint16)_mm_movemask_epi8(pack); +} +NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) +{ return (npy_uint8)_mm256_movemask_ps(_mm256_castsi256_ps(a)); } +NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) +{ return (npy_uint8)_mm256_movemask_pd(_mm256_castsi256_pd(a)); } #endif // _NPY_SIMD_AVX2_CVT_H diff --git a/numpy/core/src/common/simd/avx2/reorder.h b/numpy/core/src/common/simd/avx2/reorder.h index 5a9e68e32286..4d6ec8f759b5 100644 --- a/numpy/core/src/common/simd/avx2/reorder.h +++ b/numpy/core/src/common/simd/avx2/reorder.h @@ -94,4 +94,36 @@ NPY_FINLINE npyv_f64x2 npyv_zip_f64(__m256d a, __m256d b) return npyv_combine_f64(ab0, ab1); } +// Reverse elements of each 64-bit lane +NPY_FINLINE npyv_u8 npyv_rev64_u8(npyv_u8 a) +{ + const __m256i idx = _mm256_setr_epi8( + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8 + ); + return _mm256_shuffle_epi8(a, idx); +} +#define npyv_rev64_s8 npyv_rev64_u8 + +NPY_FINLINE npyv_u16 npyv_rev64_u16(npyv_u16 a) +{ + const __m256i idx = _mm256_setr_epi8( + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9, + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9 + ); + return _mm256_shuffle_epi8(a, idx); +} +#define npyv_rev64_s16 npyv_rev64_u16 + +NPY_FINLINE npyv_u32 npyv_rev64_u32(npyv_u32 a) +{ + return _mm256_shuffle_epi32(a, _MM_SHUFFLE(2, 3, 0, 1)); +} +#define npyv_rev64_s32 npyv_rev64_u32 + +NPY_FINLINE npyv_f32 npyv_rev64_f32(npyv_f32 a) +{ + return _mm256_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); +} + #endif // _NPY_SIMD_AVX2_REORDER_H diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 0f7e27de3a0a..bd92abccd44c 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -51,4 +51,35 @@ #define npyv_cvt_b32_f32(A) npyv_cvt_b32_u32(_mm512_castps_si512(A)) #define npyv_cvt_b64_f64(A) npyv_cvt_b64_u64(_mm512_castpd_si512(A)) +// convert boolean vectors to integer bitfield +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) +{ +#ifdef NPY_HAVE_AVX512BW_MASK + return (npy_uint64)_cvtmask64_u64(a); +#elif NPY_HAVE_AVX512BW + return (npy_uint64)a; +#else + int mask_lo = _mm256_movemask_epi8(npyv512_lower_si256(a)); + int mask_hi = _mm256_movemask_epi8(npyv512_higher_si256(a)); + return (unsigned)mask_lo | ((npy_uint64)(unsigned)mask_hi << 32); +#endif +} +NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) +{ +#ifdef NPY_HAVE_AVX512BW_MASK + return (npy_uint32)_cvtmask32_u32(a); +#elif NPY_HAVE_AVX512BW + return (npy_uint32)a; +#else + __m256i pack = _mm256_packs_epi16( + npyv512_lower_si256(a), npyv512_higher_si256(a) + ); + return (npy_uint32)_mm256_movemask_epi8(_mm256_permute4x64_epi64(pack, _MM_SHUFFLE(3, 1, 2, 0))); +#endif +} +NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) +{ return (npy_uint16)a; } +NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) +{ return (npy_uint8)a; } + #endif // _NPY_SIMD_AVX512_CVT_H diff --git a/numpy/core/src/common/simd/avx512/reorder.h b/numpy/core/src/common/simd/avx512/reorder.h index cdbae7aaccee..f043004ecc45 100644 --- a/numpy/core/src/common/simd/avx512/reorder.h +++ b/numpy/core/src/common/simd/avx512/reorder.h @@ -167,4 +167,60 @@ NPY_FINLINE npyv_f64x2 npyv_zip_f64(__m512d a, __m512d b) return r; } +// Reverse elements of each 64-bit lane +NPY_FINLINE npyv_u8 npyv_rev64_u8(npyv_u8 a) +{ +#ifdef NPY_HAVE_AVX512BW + const __m512i idx = npyv_set_u8( + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8 + ); + return _mm512_shuffle_epi8(a, idx); +#else + const __m256i idx = _mm256_setr_epi8( + 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 + ); + __m256i lo = _mm256_shuffle_epi8(npyv512_lower_si256(a), idx); + __m256i hi = _mm256_shuffle_epi8(npyv512_higher_si256(a), idx); + return npyv512_combine_si256(lo, hi); +#endif +} +#define npyv_rev64_s8 npyv_rev64_u8 + +NPY_FINLINE npyv_u16 npyv_rev64_u16(npyv_u16 a) +{ +#ifdef NPY_HAVE_AVX512BW + const __m512i idx = npyv_set_u8( + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9, + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9, + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9, + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9 + ); + return _mm512_shuffle_epi8(a, idx); +#else + const __m256i idx = _mm256_setr_epi8( + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9, + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9 + ); + __m256i lo = _mm256_shuffle_epi8(npyv512_lower_si256(a), idx); + __m256i hi = _mm256_shuffle_epi8(npyv512_higher_si256(a), idx); + return npyv512_combine_si256(lo, hi); +#endif +} +#define npyv_rev64_s16 npyv_rev64_u16 + +NPY_FINLINE npyv_u32 npyv_rev64_u32(npyv_u32 a) +{ + return _mm512_shuffle_epi32(a, _MM_SHUFFLE(2, 3, 0, 1)); +} +#define npyv_rev64_s32 npyv_rev64_u32 + +NPY_FINLINE npyv_f32 npyv_rev64_f32(npyv_f32 a) +{ + return _mm512_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); +} + #endif // _NPY_SIMD_AVX512_REORDER_H diff --git a/numpy/core/src/common/simd/neon/conversion.h b/numpy/core/src/common/simd/neon/conversion.h index b286931d1404..f9840b1cb710 100644 --- a/numpy/core/src/common/simd/neon/conversion.h +++ b/numpy/core/src/common/simd/neon/conversion.h @@ -7,26 +7,68 @@ // convert boolean vectors to integer vectors #define npyv_cvt_u8_b8(A) A -#define npyv_cvt_s8_b8(A) vreinterpretq_s8_u8(A) +#define npyv_cvt_s8_b8 vreinterpretq_s8_u8 #define npyv_cvt_u16_b16(A) A -#define npyv_cvt_s16_b16(A) vreinterpretq_s16_u16(A) +#define npyv_cvt_s16_b16 vreinterpretq_s16_u16 #define npyv_cvt_u32_b32(A) A -#define npyv_cvt_s32_b32(A) vreinterpretq_s32_u32(A) +#define npyv_cvt_s32_b32 vreinterpretq_s32_u32 #define npyv_cvt_u64_b64(A) A -#define npyv_cvt_s64_b64(A) vreinterpretq_s64_u64(A) -#define npyv_cvt_f32_b32(A) vreinterpretq_f32_u32(A) -#define npyv_cvt_f64_b64(A) vreinterpretq_f64_u64(A) +#define npyv_cvt_s64_b64 vreinterpretq_s64_u64 +#define npyv_cvt_f32_b32 vreinterpretq_f32_u32 +#define npyv_cvt_f64_b64 vreinterpretq_f64_u64 // convert integer vectors to boolean vectors #define npyv_cvt_b8_u8(BL) BL -#define npyv_cvt_b8_s8(BL) vreinterpretq_u8_s8(BL) +#define npyv_cvt_b8_s8 vreinterpretq_u8_s8 #define npyv_cvt_b16_u16(BL) BL -#define npyv_cvt_b16_s16(BL) vreinterpretq_u16_s16(BL) +#define npyv_cvt_b16_s16 vreinterpretq_u16_s16 #define npyv_cvt_b32_u32(BL) BL -#define npyv_cvt_b32_s32(BL) vreinterpretq_u32_s32(BL) +#define npyv_cvt_b32_s32 vreinterpretq_u32_s32 #define npyv_cvt_b64_u64(BL) BL -#define npyv_cvt_b64_s64(BL) vreinterpretq_u64_s64(BL) -#define npyv_cvt_b32_f32(BL) vreinterpretq_u32_f32(BL) -#define npyv_cvt_b64_f64(BL) vreinterpretq_u64_f64(BL) +#define npyv_cvt_b64_s64 vreinterpretq_u64_s64 +#define npyv_cvt_b32_f32 vreinterpretq_u32_f32 +#define npyv_cvt_b64_f64 vreinterpretq_u64_f64 + +// convert boolean vector to integer bitfield +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) +{ + const npyv_u8 scale = npyv_set_u8(1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128); + npyv_u8 seq_scale = vandq_u8(a, scale); +#if NPY_SIMD_F64 + npy_uint8 sumlo = vaddv_u8(vget_low_u8(seq_scale)); + npy_uint8 sumhi = vaddv_u8(vget_high_u8(seq_scale)); + return sumlo + ((int)sumhi << 8); +#else + npyv_u64 sumh = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(seq_scale))); + return vgetq_lane_u64(sumh, 0) + ((int)vgetq_lane_u64(sumh, 1) << 8); +#endif +} +NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) +{ + const npyv_u16 scale = npyv_set_u16(1, 2, 4, 8, 16, 32, 64, 128); + npyv_u16 seq_scale = vandq_u16(a, scale); +#if NPY_SIMD_F64 + return vaddvq_u16(seq_scale); +#else + npyv_u64 sumh = vpaddlq_u32(vpaddlq_u16(seq_scale)); + return vgetq_lane_u64(sumh, 0) + vgetq_lane_u64(sumh, 1); +#endif +} +NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) +{ + const npyv_u32 scale = npyv_set_u32(1, 2, 4, 8); + npyv_u32 seq_scale = vandq_u32(a, scale); +#if NPY_SIMD_F64 + return vaddvq_u32(seq_scale); +#else + npyv_u64 sumh = vpaddlq_u32(seq_scale); + return vgetq_lane_u64(sumh, 0) + vgetq_lane_u64(sumh, 1); +#endif +} +NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) +{ + npyv_u64 bit = vshrq_n_u64(a, 63); + return vgetq_lane_u64(bit, 0) | ((int)vgetq_lane_u64(bit, 1) << 1); +} #endif // _NPY_SIMD_NEON_CVT_H diff --git a/numpy/core/src/common/simd/neon/reorder.h b/numpy/core/src/common/simd/neon/reorder.h index 712a77982ee6..50b06ed11c01 100644 --- a/numpy/core/src/common/simd/neon/reorder.h +++ b/numpy/core/src/common/simd/neon/reorder.h @@ -107,4 +107,13 @@ NPYV_IMPL_NEON_COMBINE(npyv_f64, f64) #define npyv_zip_u64 npyv_combine_u64 #define npyv_zip_s64 npyv_combine_s64 +// Reverse elements of each 64-bit lane +#define npyv_rev64_u8 vrev64q_u8 +#define npyv_rev64_s8 vrev64q_s8 +#define npyv_rev64_u16 vrev64q_u16 +#define npyv_rev64_s16 vrev64q_s16 +#define npyv_rev64_u32 vrev64q_u32 +#define npyv_rev64_s32 vrev64q_s32 +#define npyv_rev64_f32 vrev64q_f32 + #endif // _NPY_SIMD_NEON_REORDER_H diff --git a/numpy/core/src/common/simd/sse/conversion.h b/numpy/core/src/common/simd/sse/conversion.h index ea9660d13a30..ab4beea96db6 100644 --- a/numpy/core/src/common/simd/sse/conversion.h +++ b/numpy/core/src/common/simd/sse/conversion.h @@ -14,8 +14,8 @@ #define npyv_cvt_s32_b32(BL) BL #define npyv_cvt_u64_b64(BL) BL #define npyv_cvt_s64_b64(BL) BL -#define npyv_cvt_f32_b32(BL) _mm_castsi128_ps(BL) -#define npyv_cvt_f64_b64(BL) _mm_castsi128_pd(BL) +#define npyv_cvt_f32_b32 _mm_castsi128_ps +#define npyv_cvt_f64_b64 _mm_castsi128_pd // convert integer types to mask types #define npyv_cvt_b8_u8(A) A @@ -26,7 +26,20 @@ #define npyv_cvt_b32_s32(A) A #define npyv_cvt_b64_u64(A) A #define npyv_cvt_b64_s64(A) A -#define npyv_cvt_b32_f32(A) _mm_castps_si128(A) -#define npyv_cvt_b64_f64(A) _mm_castpd_si128(A) +#define npyv_cvt_b32_f32 _mm_castps_si128 +#define npyv_cvt_b64_f64 _mm_castpd_si128 + +// convert boolean vector to integer bitfield +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) +{ return (npy_uint16)_mm_movemask_epi8(a); } +NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) +{ + __m128i pack = _mm_packs_epi16(a, a); + return (npy_uint8)_mm_movemask_epi8(pack); +} +NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) +{ return (npy_uint8)_mm_movemask_ps(_mm_castsi128_ps(a)); } +NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) +{ return (npy_uint8)_mm_movemask_pd(_mm_castsi128_pd(a)); } #endif // _NPY_SIMD_SSE_CVT_H diff --git a/numpy/core/src/common/simd/sse/reorder.h b/numpy/core/src/common/simd/sse/reorder.h index 3f68b4ad70e4..d96ab9c5688b 100644 --- a/numpy/core/src/common/simd/sse/reorder.h +++ b/numpy/core/src/common/simd/sse/reorder.h @@ -81,4 +81,45 @@ NPYV_IMPL_SSE_ZIP(npyv_s64, s64, epi64) NPYV_IMPL_SSE_ZIP(npyv_f32, f32, ps) NPYV_IMPL_SSE_ZIP(npyv_f64, f64, pd) +// Reverse elements of each 64-bit lane +NPY_FINLINE npyv_u16 npyv_rev64_u16(npyv_u16 a) +{ +#ifdef NPY_HAVE_SSSE3 + const __m128i idx = _mm_setr_epi8( + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9 + ); + return _mm_shuffle_epi8(a, idx); +#else + __m128i lo = _mm_shufflelo_epi16(a, _MM_SHUFFLE(0, 1, 2, 3)); + return _mm_shufflehi_epi16(lo, _MM_SHUFFLE(0, 1, 2, 3)); +#endif +} +#define npyv_rev64_s16 npyv_rev64_u16 + +NPY_FINLINE npyv_u8 npyv_rev64_u8(npyv_u8 a) +{ +#ifdef NPY_HAVE_SSSE3 + const __m128i idx = _mm_setr_epi8( + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8 + ); + return _mm_shuffle_epi8(a, idx); +#else + __m128i rev16 = npyv_rev64_u16(a); + // swap 8bit pairs + return _mm_or_si128(_mm_slli_epi16(rev16, 8), _mm_srli_epi16(rev16, 8)); +#endif +} +#define npyv_rev64_s8 npyv_rev64_u8 + +NPY_FINLINE npyv_u32 npyv_rev64_u32(npyv_u32 a) +{ + return _mm_shuffle_epi32(a, _MM_SHUFFLE(2, 3, 0, 1)); +} +#define npyv_rev64_s32 npyv_rev64_u32 + +NPY_FINLINE npyv_f32 npyv_rev64_f32(npyv_f32 a) +{ + return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); +} + #endif // _NPY_SIMD_SSE_REORDER_H diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 6ed135990ccc..5803e1cdd7a0 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -29,4 +29,26 @@ #define npyv_cvt_b32_f32(A) ((npyv_b32) A) #define npyv_cvt_b64_f64(A) ((npyv_b64) A) +// convert boolean vector to integer bitfield +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) +{ + const npyv_u8 qperm = npyv_set_u8(120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0); + return vec_extract((npyv_u32)vec_vbpermq((npyv_u8)a, qperm), 2); +} +NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) +{ + const npyv_u8 qperm = npyv_setf_u8(128, 112, 96, 80, 64, 48, 32, 16, 0); + return vec_extract((npyv_u32)vec_vbpermq((npyv_u8)a, qperm), 2); +} +NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) +{ + const npyv_u8 qperm = npyv_setf_u8(128, 96, 64, 32, 0); + return vec_extract((npyv_u32)vec_vbpermq((npyv_u8)a, qperm), 2); +} +NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) +{ + npyv_u64 bit = npyv_shri_u64((npyv_u64)a, 63); + return vec_extract(bit, 0) | (int)vec_extract(bit, 1) << 1; +} + #endif // _NPY_SIMD_VSX_CVT_H diff --git a/numpy/core/src/common/simd/vsx/reorder.h b/numpy/core/src/common/simd/vsx/reorder.h index bfb9115fac81..6533e50933d2 100644 --- a/numpy/core/src/common/simd/vsx/reorder.h +++ b/numpy/core/src/common/simd/vsx/reorder.h @@ -62,4 +62,45 @@ NPYV_IMPL_VSX_COMBINE_ZIP(npyv_s64, s64) NPYV_IMPL_VSX_COMBINE_ZIP(npyv_f32, f32) NPYV_IMPL_VSX_COMBINE_ZIP(npyv_f64, f64) +// Reverse elements of each 64-bit lane +NPY_FINLINE npyv_u8 npyv_rev64_u8(npyv_u8 a) +{ +#if defined(NPY_HAVE_VSX3) && ((defined(__GNUC__) && __GNUC__ > 7) || defined(__IBMC__)) + return (npyv_u8)vec_revb((npyv_u64)a); +#elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX_ASM) + npyv_u8 ret; + __asm__ ("xxbrd %x0,%x1" : "=wa" (ret) : "wa" (a)); + return ret; +#else + const npyv_u8 idx = npyv_set_u8( + 7, 6, 5, 4, 3, 2, 1, 0,/*64*/15, 14, 13, 12, 11, 10, 9, 8 + ); + return vec_perm(a, a, idx); +#endif +} +NPY_FINLINE npyv_s8 npyv_rev64_s8(npyv_s8 a) +{ return (npyv_s8)npyv_rev64_u8((npyv_u8)a); } + +NPY_FINLINE npyv_u16 npyv_rev64_u16(npyv_u16 a) +{ + const npyv_u8 idx = npyv_set_u8( + 6, 7, 4, 5, 2, 3, 0, 1,/*64*/14, 15, 12, 13, 10, 11, 8, 9 + ); + return vec_perm(a, a, idx); +} +NPY_FINLINE npyv_s16 npyv_rev64_s16(npyv_s16 a) +{ return (npyv_s16)npyv_rev64_u16((npyv_u16)a); } + +NPY_FINLINE npyv_u32 npyv_rev64_u32(npyv_u32 a) +{ + const npyv_u8 idx = npyv_set_u8( + 4, 5, 6, 7, 0, 1, 2, 3,/*64*/12, 13, 14, 15, 8, 9, 10, 11 + ); + return vec_perm(a, a, idx); +} +NPY_FINLINE npyv_s32 npyv_rev64_s32(npyv_s32 a) +{ return (npyv_s32)npyv_rev64_u32((npyv_u32)a); } +NPY_FINLINE npyv_f32 npyv_rev64_f32(npyv_f32 a) +{ return (npyv_f32)npyv_rev64_u32((npyv_u32)a); } + #endif // _NPY_SIMD_VSX_REORDER_H diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 13e8d5ede460..196003cdd3e3 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -92,6 +92,32 @@ def _nan(self): v = self.npyv.setall_u32(0x7fc00000) return self.npyv.reinterpret_f32_u32(v)[0] +class _SIMD_BOOL(_Test_Utility): + """ + To test all boolean vector types at once + """ + def _data(self, start=None, count=None, reverse=False): + nlanes = getattr(self.npyv, "nlanes_u" + self.sfx[1:]) + true_mask = self._true_mask() + rng = range(nlanes) + if reverse: + rng = reversed(rng) + return [true_mask if x % 2 else 0 for x in rng] + + def _load_b(self, data): + len_str = self.sfx[1:] + load = getattr(self.npyv, "load_u" + len_str) + cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}") + return cvt(load(data)) + + def test_tobits(self): + data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)]) + for data in (self._data(), self._data(reverse=True)): + vdata = self._load_b(data) + data_bits = data2bits(data) + tobits = bin(self.tobits(vdata)) + assert tobits == bin(data_bits) + class _SIMD_INT(_Test_Utility): """ To test all integer vector types at once @@ -459,6 +485,18 @@ def test_reorder(self): vzip = self.zip(vdata_a, vdata_b) assert vzip == (data_zipl, data_ziph) + def test_reorder_rev64(self): + # Reverse elements of each 64-bit lane + ssize = self._scalar_size() + if ssize == 64: + return + data_rev64 = [ + y for x in range(0, self.nlanes, 64//ssize) + for y in reversed(range(x, x + 64//ssize)) + ] + rev64 = self.rev64(self.load(range(self.nlanes))) + assert rev64 == data_rev64 + def test_operators_comparison(self): if self._is_fp(): data_a = self._data() @@ -594,10 +632,12 @@ def test_arithmetic_reduce_sum(self): vsum = self.sum(vdata) assert vsum == data_sum +bool_sfx = ("b8", "b16", "b32", "b64") int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") fp_sfx = ("f32", "f64") all_sfx = int_sfx + fp_sfx tests_registry = { + bool_sfx: _SIMD_BOOL, int_sfx : _SIMD_INT, fp_sfx : _SIMD_FP, all_sfx : _SIMD_ALL From dcbfe242064b032d3f0772a10e714aeb19458cb3 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 17 Nov 2020 01:10:24 +0000 Subject: [PATCH 0074/1270] BLD, SIMD: Add check for inline ASM VSX support --- numpy/distutils/ccompiler_opt.py | 2 +- numpy/distutils/checks/extra_vsx_asm.c | 36 ++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 numpy/distutils/checks/extra_vsx_asm.c diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 3eba6e32af91..20dbb5c00ddb 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -276,7 +276,7 @@ class _Config: ), # IBM/Power ## Power7/ISA 2.06 - VSX = dict(interest=1, headers="altivec.h"), + VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), ## Power8/ISA 2.07 VSX2 = dict(interest=2, implies="VSX", implies_detect=False), ## Power9/ISA 3.00 diff --git a/numpy/distutils/checks/extra_vsx_asm.c b/numpy/distutils/checks/extra_vsx_asm.c new file mode 100644 index 000000000000..b73a6f43808e --- /dev/null +++ b/numpy/distutils/checks/extra_vsx_asm.c @@ -0,0 +1,36 @@ +/** + * Testing ASM VSX register number fixer '%x' + * + * old versions of CLANG doesn't support %x in the inline asm template + * which fixes register number when using any of the register constraints wa, wd, wf. + * + * xref: + * - https://bugs.llvm.org/show_bug.cgi?id=31837 + * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html + */ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + float z4[] = {0, 0, 0, 0}; + signed int zout[] = {0, 0, 0, 0}; + + __vector float vz4 = vsx_ld(0, z4); + __vector signed int asm_ret = vsx_ld(0, zout); + + __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret)); + + vsx_st(asm_ret, 0, zout); + return zout[0]; +} From 7f5392d2581e789917b8ba5352d821277d5de8ab Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 17 Nov 2020 22:55:45 +0100 Subject: [PATCH 0075/1270] ENH: Add `_BoolLike` to the union defining `_IntLike` --- numpy/typing/_scalars.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/typing/_scalars.py b/numpy/typing/_scalars.py index e4fc28b074ac..d1a801cc233d 100644 --- a/numpy/typing/_scalars.py +++ b/numpy/typing/_scalars.py @@ -7,8 +7,10 @@ _CharLike = Union[str, bytes] +# The 6 `Like` type-aliases below represent all scalars that can be +# coerced into `` (with the casting rule `same_kind`) _BoolLike = Union[bool, np.bool_] -_IntLike = Union[int, np.integer] +_IntLike = Union[_BoolLike, int, np.integer] _FloatLike = Union[_IntLike, float, np.floating] _ComplexLike = Union[_FloatLike, complex, np.complexfloating] _NumberLike = Union[int, float, complex, np.number, np.bool_] From a3703dc8b95b533bd2626a53a483f532155dcc4f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 17 Nov 2020 22:57:04 +0100 Subject: [PATCH 0076/1270] ENH: Add `_UIntLike` and `_TD64Like` --- numpy/typing/__init__.py | 2 ++ numpy/typing/_scalars.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index cbde75462332..d8186a631bb1 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -175,9 +175,11 @@ class _8Bit(_16Bit): ... # type: ignore[misc] from ._scalars import ( _CharLike, _BoolLike, + _UIntLike, _IntLike, _FloatLike, _ComplexLike, + _TD64Like, _NumberLike, _ScalarLike, _VoidLike, diff --git a/numpy/typing/_scalars.py b/numpy/typing/_scalars.py index d1a801cc233d..90b2eff7b464 100644 --- a/numpy/typing/_scalars.py +++ b/numpy/typing/_scalars.py @@ -10,11 +10,13 @@ # The 6 `Like` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) _BoolLike = Union[bool, np.bool_] +_UIntLike = Union[_BoolLike, np.unsignedinteger] _IntLike = Union[_BoolLike, int, np.integer] _FloatLike = Union[_IntLike, float, np.floating] _ComplexLike = Union[_FloatLike, complex, np.complexfloating] -_NumberLike = Union[int, float, complex, np.number, np.bool_] +_TD64Like = Union[_IntLike, np.timedelta64] +_NumberLike = Union[int, float, complex, np.number, np.bool_] _ScalarLike = Union[ int, float, From 0da188da1e7a65b85b73d2499f7397fea89ff7bd Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 17 Nov 2020 22:58:16 +0100 Subject: [PATCH 0077/1270] MAINT: Simplify existing `_Like` annotations --- numpy/__init__.pyi | 49 +++++++++++++++++++-------------------- numpy/typing/_callable.py | 2 +- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 579dd03d63f7..cfa53b5c0cbd 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -16,6 +16,7 @@ from numpy.typing import ( _IntLike, _FloatLike, _ComplexLike, + _TD64Like, _NumberLike, _SupportsDType, _VoidDTypeLike, @@ -986,10 +987,8 @@ _SortSide = Literal["left", "right"] _ArrayLikeBool = Union[_BoolLike, Sequence[_BoolLike], ndarray] _ArrayLikeIntOrBool = Union[ _IntLike, - _BoolLike, ndarray, Sequence[_IntLike], - Sequence[_BoolLike], Sequence[Sequence[Any]], # TODO: wait for support for recursive types ] @@ -1392,7 +1391,7 @@ class _ArrayOrScalarCommon: @overload def take( self, - indices: Union[_IntLike, _BoolLike], + indices: _IntLike, axis: Optional[int] = ..., out: None = ..., mode: _ModeKind = ..., @@ -1752,12 +1751,12 @@ class datetime64(generic): __value: int, __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] ) -> None: ... - def __add__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ... - def __radd__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ... + def __add__(self, other: _TD64Like) -> datetime64: ... + def __radd__(self, other: _TD64Like) -> datetime64: ... @overload def __sub__(self, other: datetime64) -> timedelta64: ... @overload - def __sub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ... + def __sub__(self, other: _TD64Like) -> datetime64: ... def __rsub__(self, other: datetime64) -> timedelta64: ... # Support for `__index__` was added in python 3.8 (bpo-20092) @@ -1776,20 +1775,20 @@ class integer(number[_NBit_co]): # type: ignore def __index__(self) -> int: ... __truediv__: _IntTrueDiv[_NBit_co] __rtruediv__: _IntTrueDiv[_NBit_co] - def __mod__(self, value: Union[_IntLike, integer]) -> integer: ... - def __rmod__(self, value: Union[_IntLike, integer]) -> integer: ... + def __mod__(self, value: _IntLike) -> integer: ... + def __rmod__(self, value: _IntLike) -> integer: ... def __invert__(self: _IntType) -> _IntType: ... # Ensure that objects annotated as `integer` support bit-wise operations - def __lshift__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __rlshift__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __rshift__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __rrshift__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __and__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __rand__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __or__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __ror__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __xor__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... - def __rxor__(self, other: Union[_IntLike, _BoolLike]) -> integer: ... + def __lshift__(self, other: _IntLike) -> integer: ... + def __rlshift__(self, other: _IntLike) -> integer: ... + def __rshift__(self, other: _IntLike) -> integer: ... + def __rrshift__(self, other: _IntLike) -> integer: ... + def __and__(self, other: _IntLike) -> integer: ... + def __rand__(self, other: _IntLike) -> integer: ... + def __or__(self, other: _IntLike) -> integer: ... + def __ror__(self, other: _IntLike) -> integer: ... + def __xor__(self, other: _IntLike) -> integer: ... + def __rxor__(self, other: _IntLike) -> integer: ... class signedinteger(integer[_NBit_co]): def __init__(self, __value: _IntValue = ...) -> None: ... @@ -1835,12 +1834,12 @@ class timedelta64(generic): def __neg__(self: _ArraySelf) -> _ArraySelf: ... def __pos__(self: _ArraySelf) -> _ArraySelf: ... def __abs__(self: _ArraySelf) -> _ArraySelf: ... - def __add__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ... - def __radd__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ... - def __sub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ... - def __rsub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ... - def __mul__(self, other: Union[_FloatLike, _BoolLike]) -> timedelta64: ... - def __rmul__(self, other: Union[_FloatLike, _BoolLike]) -> timedelta64: ... + def __add__(self, other: _TD64Like) -> timedelta64: ... + def __radd__(self, other: _TD64Like) -> timedelta64: ... + def __sub__(self, other: _TD64Like) -> timedelta64: ... + def __rsub__(self, other: _TD64Like) -> timedelta64: ... + def __mul__(self, other: _FloatLike) -> timedelta64: ... + def __rmul__(self, other: _FloatLike) -> timedelta64: ... __truediv__: _TD64Div[float64] __floordiv__: _TD64Div[int64] def __rtruediv__(self, other: timedelta64) -> float64: ... @@ -1941,7 +1940,7 @@ complex128 = complexfloating[_64Bit, _64Bit] class flexible(generic): ... # type: ignore class void(flexible): - def __init__(self, __value: Union[_IntLike, _BoolLike, bytes]): ... + def __init__(self, __value: Union[_IntLike, bytes]): ... @property def real(self: _ArraySelf) -> _ArraySelf: ... @property diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 91b7a4ec21c0..97dc3c2e3dce 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -101,7 +101,7 @@ def __call__(self, __other: _NumberType) -> _NumberType: ... class _BoolTrueDiv(Protocol): @overload - def __call__(self, __other: Union[float, _IntLike, _BoolLike]) -> float64: ... + def __call__(self, __other: Union[float, _IntLike]) -> float64: ... @overload def __call__(self, __other: complex) -> complex128: ... @overload From cba1d6f1cab31b1dca0e7ec7e6ee40ead786ca08 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 17 Nov 2020 08:46:41 +0000 Subject: [PATCH 0078/1270] BLD: Enable Werror=undef in travis --- numpy/core/include/numpy/npy_common.h | 14 ++++++-- numpy/core/include/numpy/npy_math.h | 6 ++-- numpy/core/src/umath/loops.c.src | 2 +- numpy/core/src/umath/simd.inc.src | 34 ++++++++++---------- numpy/core/src/umath/ufunc_type_resolution.c | 5 +++ numpy/random/src/pcg64/pcg64.h | 6 ++-- tools/travis-test.sh | 7 ++++ 7 files changed, 48 insertions(+), 26 deletions(-) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index 78d6d31207f1..9ce5caa66056 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -10,13 +10,23 @@ #include #endif +// compile time environment variables +#ifndef NPY_RELAXED_STRIDES_CHECKING + #define NPY_RELAXED_STRIDES_CHECKING 0 +#endif +#ifndef NPY_RELAXED_STRIDES_DEBUG + #define NPY_RELAXED_STRIDES_DEBUG 0 +#endif + /* * using static inline modifiers when defining npy_math functions * allows the compiler to make optimizations when possible */ -#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD #ifndef NPY_INLINE_MATH -#define NPY_INLINE_MATH 1 +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + #define NPY_INLINE_MATH 1 +#else + #define NPY_INLINE_MATH 0 #endif #endif diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h index dbade058f9dc..7d71c36ccd3b 100644 --- a/numpy/core/include/numpy/npy_math.h +++ b/numpy/core/include/numpy/npy_math.h @@ -211,7 +211,7 @@ double npy_spacing(double x); /* use builtins to avoid function calls in tight loops * only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISNAN +#ifdef HAVE___BUILTIN_ISNAN #define npy_isnan(x) __builtin_isnan(x) #else #ifndef NPY_HAVE_DECL_ISNAN @@ -227,7 +227,7 @@ double npy_spacing(double x); /* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISFINITE +#ifdef HAVE___BUILTIN_ISFINITE #define npy_isfinite(x) __builtin_isfinite(x) #else #ifndef NPY_HAVE_DECL_ISFINITE @@ -242,7 +242,7 @@ double npy_spacing(double x); #endif /* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISINF +#ifdef HAVE___BUILTIN_ISINF #define npy_isinf(x) __builtin_isinf(x) #else #ifndef NPY_HAVE_DECL_ISINF diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index c9efdeb4efb6..be9923bceece 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -570,7 +570,7 @@ NPY_NO_EXPORT void /**begin repeat1 * #isa = , _avx2# * #ISA = , AVX2# - * #CHK = 1, HAVE_ATTRIBUTE_TARGET_AVX2# + * #CHK = 1, defined(HAVE_ATTRIBUTE_TARGET_AVX2)# * #ATTR = , NPY_GCC_TARGET_AVX2# */ diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 71ee7e07ee70..a118fb0d091e 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -864,7 +864,7 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) } } } -#elif __AVX2__ +#elif defined __AVX2__ const npy_intp vector_size_bytes = 32; LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) op[i] = ip1[i] @OP@ ip2[i]; @@ -1006,7 +1006,7 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i } -#elif __AVX2__ +#elif defined __AVX2__ const npy_intp vector_size_bytes = 32; const @vtype256@ a = @vpre256@_set1_@vsuf@(ip1[0]); LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) @@ -1073,7 +1073,7 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i } } -#elif __AVX2__ +#elif defined __AVX2__ const npy_intp vector_size_bytes = 32; const @vtype256@ b = @vpre256@_set1_@vsuf@(ip2[0]); LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) @@ -2116,7 +2116,7 @@ AVX512_SKX_@func@_@TYPE@(npy_bool* op, @type@* ip, const npy_intp array_size, co x1 = avx512_masked_gather_@vsuffix@(zeros_f, ip, vindex_ip, load_mask); } #if @is_signbit@ - x1 = _mm512_and_@vsuffix@(x1,signbit); + x1 = _mm512_and_@vsuffix@(x1,signbit); #endif @mask@ fpclassmask = _mm512_fpclass_@vsuffix@_mask(x1, @IMM8@); @@ -2209,7 +2209,7 @@ AVX512_SKX_ldexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const } @vtype1@ out = _mm512_scalef_@vsuffix@(x1, _mm512_cvtepi32_@vsuffix@(x2)); - + if (stride_op == 1) { _mm512_mask_storeu_@vsuffix@(op, load_mask, out); } @@ -3035,7 +3035,7 @@ AVX512F_exp_DOUBLE(npy_double * op, __m512d mTable_tail_1 = _mm512_loadu_pd(&(EXP_Table_tail[8*1])); __m512d mTable_tail_2 = _mm512_loadu_pd(&(EXP_Table_tail[8*2])); __m512d mTable_tail_3 = _mm512_loadu_pd(&(EXP_Table_tail[8*3])); - + __mmask8 overflow_mask = avx512_get_partial_load_mask_pd(0, num_lanes); __mmask8 load_mask = avx512_get_full_load_mask_pd(); __mmask8 xmin_mask, xmax_mask, inf_mask, nan_mask, nearzero_mask; @@ -3059,12 +3059,12 @@ AVX512F_exp_DOUBLE(npy_double * op, xmax_mask = _mm512_cmp_pd_mask(x, mTH_max, _CMP_GT_OQ); xmin_mask = _mm512_cmp_pd_mask(x, mTH_min, _CMP_LT_OQ); inf_mask = _mm512_cmp_pd_mask(x, mTH_inf, _CMP_EQ_OQ); - __m512i x_abs = _mm512_and_epi64(_mm512_castpd_si512(x), + __m512i x_abs = _mm512_and_epi64(_mm512_castpd_si512(x), _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF)); - nearzero_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(x_abs), + nearzero_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(x_abs), mTH_nearzero, _CMP_LT_OQ); nearzero_mask = _mm512_kxor(nearzero_mask, nan_mask); - overflow_mask = _mm512_kor(overflow_mask, + overflow_mask = _mm512_kor(overflow_mask, _mm512_kxor(xmax_mask, inf_mask)); x = avx512_set_masked_lanes_pd(x, zeros_d, _mm512_kor(_mm512_kor(nan_mask, xmin_mask), @@ -3072,7 +3072,7 @@ AVX512F_exp_DOUBLE(npy_double * op, /* z = x * 32/ln2 */ __m512d z = _mm512_mul_pd(x, InvLn2N); - + /* round to nearest */ __m512d kd = _mm512_add_pd(z, mShift); __m512i ki = _mm512_castpd_si512(kd); @@ -3099,9 +3099,9 @@ AVX512F_exp_DOUBLE(npy_double * op, __m512d tail = avx512_permute_x4var_pd(mTable_tail_0, mTable_tail_1, mTable_tail_2, mTable_tail_3, j); - /* + /* * s = top + tail; - * exp(x) = 2^m * (top + (tail + s * p)); + * exp(x) = 2^m * (top + (tail + s * p)); */ __m512d s = _mm512_add_pd(top, tail); __m512d res = _mm512_fmadd_pd(s, p, tail); @@ -3109,9 +3109,9 @@ AVX512F_exp_DOUBLE(npy_double * op, res= _mm512_scalef_pd(res, _mm512_div_pd(kd, _mm512_set1_pd(32))); /* return special cases */ - res = avx512_set_masked_lanes_pd(res, _mm512_add_pd(x, ones_d), + res = avx512_set_masked_lanes_pd(res, _mm512_add_pd(x, ones_d), nearzero_mask); - res = avx512_set_masked_lanes_pd(res, _mm512_set1_pd(NPY_NAN), + res = avx512_set_masked_lanes_pd(res, _mm512_set1_pd(NPY_NAN), nan_mask); res = avx512_set_masked_lanes_pd(res, mTH_inf, xmax_mask); res = avx512_set_masked_lanes_pd(res, zeros_d, xmin_mask); @@ -3131,8 +3131,8 @@ AVX512F_exp_DOUBLE(npy_double * op, /* * Vectorized implementation of log double using AVX512 - * Reference: - * [1] Tang, Ping Tak Peter. Table-lookup algorithms for elementary functions + * Reference: + * [1] Tang, Ping Tak Peter. Table-lookup algorithms for elementary functions * and their error analysis. No. CONF-9106103-1. Argonne National Lab., * IL (USA), 1991. * [2] Tang, Ping-Tak Peter. "Table-driven implementation of the logarithm @@ -3197,7 +3197,7 @@ AVX512F_log_DOUBLE(npy_double * op, __m512d mLUT_TOP_@i@ = _mm512_loadu_pd(&(LOG_TABLE_TOP[8*@i@])); __m512d mLUT_TAIL_@i@ = _mm512_loadu_pd(&(LOG_TABLE_TAIL[8*@i@])); - + /**end repeat**/ __mmask8 load_mask = avx512_get_full_load_mask_pd(); diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 3abeb2c5a202..3ce06322f589 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -12,6 +12,11 @@ #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +// printif debug tracing +#ifndef NPY_UF_DBG_TRACING + #define NPY_UF_DBG_TRACING 0 +#endif + #include #include "Python.h" diff --git a/numpy/random/src/pcg64/pcg64.h b/numpy/random/src/pcg64/pcg64.h index 2a7217dd9ba8..31899cbc1469 100644 --- a/numpy/random/src/pcg64/pcg64.h +++ b/numpy/random/src/pcg64/pcg64.h @@ -57,11 +57,11 @@ #define inline __forceinline #endif -#if __GNUC_GNU_INLINE__ && !defined(__cplusplus) +#if defined(__GNUC_GNU_INLINE__) && !defined(__cplusplus) #error Nonstandard GNU inlining semantics. Compile with -std=c99 or better. #endif -#if __cplusplus +#ifdef __cplusplus extern "C" { #endif @@ -255,7 +255,7 @@ typedef pcg_state_setseq_128 pcg64_random_t; #define pcg64_advance_r pcg_setseq_128_advance_r #define PCG64_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER -#if __cplusplus +#ifdef __cplusplus } #endif diff --git a/tools/travis-test.sh b/tools/travis-test.sh index bd2229ee7d8b..0d018fb660a3 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -46,6 +46,13 @@ setup_base() # the advantage that it tests that numpy is 'pip install' compatible, # see e.g. gh-2766... if [ -z "$USE_DEBUG" ]; then + # activates '-Werror=undef' when DEBUG isn't enabled since _cffi_backend' + # extension breaks the build due to the following error: + # + # error: "HAVE_FFI_PREP_CIF_VAR" is not defined, evaluates to 0 [-Werror=undef] + # #if !HAVE_FFI_PREP_CIF_VAR && defined(__arm64__) && defined(__APPLE__) + # + export CFLAGS="$CFLAGS -Werror=undef" $PIP install -v . 2>&1 | tee log else # The job run with USE_DEBUG=1 on travis needs this. From 95d6052902fc4763cbceee51ec08a3fff3dc6b1f Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 19 Nov 2020 11:04:30 +0800 Subject: [PATCH 0079/1270] optimize the remaining elements using npyv_load_tillz --- .../core/src/multiarray/einsum_sumprod.c.src | 27 ++++++------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index c9ab71e28523..efe9a59db609 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -49,21 +49,6 @@ #define EINSUM_IS_ALIGNED(x) npy_is_aligned(x, NPY_SIMD_WIDTH) #endif -/** - * This macro is used to enable a scalar loop which advances 4 elements at a - * time, which appears after a main SIMD loop gated by `CHK` that unrolls by - * `NPY_SIMD_WIDTH * unroll_by` elements, and before a non-unrolled scalar loop - * that finishes up all the remaining scalars. The purpose of the unrolled loop - * is to enable auto-vectorization in cases when all of the following are true: - * - * - optimization is allowed - * - either: - * - we did not run the SIMD loop at all, due to NPV being disabled. - * - the SIMD loop was larger than 128bit, so there are likely to be many - * elements left to process. - */ -#define EINSUM_UNROLL_4_SCALARS(CHK) (!defined(NPY_DISABLE_OPTIMIZATION) && (!(CHK) || NPY_SIMD > 128)) - /**********************************************/ /**begin repeat @@ -318,10 +303,14 @@ static void } } /**end repeat2**/ + for (; count > 0; count -= vstep, data0 += vstep, data1 += vstep, data_out += vstep) { + npyv_@sfx@ a = npyv_load_tillz_@sfx@(data0, count); + npyv_@sfx@ b = npyv_load_tillz_@sfx@(data1, count); + npyv_@sfx@ c = npyv_load_tillz_@sfx@(data_out, count); + npyv_store_till_@sfx@(data_out, count, npyv_muladd_@sfx@(a, b, c)); + } npyv_cleanup(); -#endif // NPYV check for @type@ - -#if EINSUM_UNROLL_4_SCALARS(@NPYV_CHK@) +#else for (; count >= 4; count -= 4, data0 += 4, data1 += 4, data_out += 4) { /**begin repeat2 * #i = 0, 1, 2, 3# @@ -341,7 +330,7 @@ static void data_out[@i@] = @to@(abc@i@); /**end repeat2**/ } -#endif +#endif // NPYV check for @type@ for (; count > 0; --count, ++data0, ++data1, ++data_out) { const @type@ a = @from@(*data0); const @type@ b = @from@(*data1); From f921f0d13bb34d82503bfa2b3bff24d095bb9385 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 19 Nov 2020 16:44:08 +0800 Subject: [PATCH 0080/1270] add guard #ifndef NPY_DISABLE_OPTIMIZATION --- numpy/core/src/multiarray/einsum_sumprod.c.src | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index efe9a59db609..caba0e00ad29 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -311,6 +311,7 @@ static void } npyv_cleanup(); #else +#ifndef NPY_DISABLE_OPTIMIZATION for (; count >= 4; count -= 4, data0 += 4, data1 += 4, data_out += 4) { /**begin repeat2 * #i = 0, 1, 2, 3# @@ -330,13 +331,15 @@ static void data_out[@i@] = @to@(abc@i@); /**end repeat2**/ } -#endif // NPYV check for @type@ +#endif // !NPY_DISABLE_OPTIMIZATION for (; count > 0; --count, ++data0, ++data1, ++data_out) { const @type@ a = @from@(*data0); const @type@ b = @from@(*data1); const @type@ c = @from@(*data_out); *data_out = @to@(a * b + c); } +#endif // NPYV check for @type@ + } /* Some extra specializations for the two operand case */ From 0517f134365808f8b81c6646cad1b0fe431f6d99 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Fri, 20 Nov 2020 23:47:45 +0530 Subject: [PATCH 0081/1270] TST: Added UT for floor divide --- numpy/core/tests/test_umath.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 818b2ad6c842..bd7dd23d829a 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -249,6 +249,29 @@ def test_division_int(self): assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + @pytest.mark.parametrize("input_dtype", + [np.int8, np.int16, np.int32, np.int64]) + def test_division_int_boundary(self, input_dtype): + class ListWithDiv(list): + def __floordiv__(self, divisor): + return [i//divisor for i in self] + + iinfo = np.iinfo(input_dtype) + + # Create array with min, 25th percentile, 0, 75th percentile, max + arr = ListWithDiv([iinfo.min, iinfo.min//2, 0, iinfo.max//2, iinfo.max]) + dividends = [iinfo.min, iinfo.min//2, iinfo.max//2, iinfo.max] + a = np.array(arr, dtype = input_dtype) + + for dividend in dividends: + div_a = a // dividend + div_arr = arr // dividend + assert_(all(div_a == div_arr)) + + with np.errstate(divide='raise'): + with pytest.raises(FloatingPointError): + a // 0 + def test_division_complex(self): # check that implementation is correct msg = "Complex division implementation check" From a769d6f402b6aba2ebe9268635872fc1166d9510 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 21 Nov 2020 18:41:21 +0530 Subject: [PATCH 0082/1270] ENH: Improved floor division (#17727) --- doc/release/upcoming_changes/17727.performance.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/release/upcoming_changes/17727.performance.rst b/doc/release/upcoming_changes/17727.performance.rst index 83054a3ea1da..c3a08bc8e04a 100755 --- a/doc/release/upcoming_changes/17727.performance.rst +++ b/doc/release/upcoming_changes/17727.performance.rst @@ -1,8 +1,7 @@ Improved performance in integer division of NumPy arrays -------------------------------------------------------- -Integer division of NumPy arrays now uses libdivide. -With builtin support for SSE2, AVX2 and AVX512 vector -division from libdivide and other minor improvements, -there is a large speedup. +Integer division of NumPy arrays now uses libdivide when +the divisor is a constant. With the usage of libdivde and +other minor optimizations, there is a large speedup. The ``//`` operator and ``np.floor_divide`` makes use of the new changes. From 0e2116f2d61ed15e69ca1b4e31e8709f17b86f71 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 21 Nov 2020 19:23:20 +0530 Subject: [PATCH 0083/1270] ENH: Optimized 0 divisor cases --- numpy/core/src/umath/fast_loop_macros.h | 3 + numpy/core/src/umath/loops.c.src | 113 +++++++++++++++--------- 2 files changed, 74 insertions(+), 42 deletions(-) diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h index 90dcad3685d6..7ff4d1602956 100644 --- a/numpy/core/src/umath/fast_loop_macros.h +++ b/numpy/core/src/umath/fast_loop_macros.h @@ -58,6 +58,9 @@ abs_ptrdiff(char *a, char *b) #define BINARY_LOOP_FIXED\ for(i = 0; i < n; i++, ip1 += is1, op1 += os1) +#define BINARY_LOOP_ZERO\ + for(i = 0; i < n; i++, op1 += os1) + /** (ip1, ip2) -> (op1) */ #define BINARY_LOOP\ BINARY_LOOP_BASE\ diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index a7c0cb365f22..bfd23924c701 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -862,36 +862,45 @@ NPY_NO_EXPORT void { BINARY_LOOP_BASE - if (!is2) { + /* When the divisor is a constant, use libdivde for faster division */ + if (steps[1] == 0) { const @type@ in2 = *(@type@ *)ip2; - /* Creating a divisor of 0 is treated as an error by libdivide */ - struct libdivide_@type@_t fast_d = in2 ? libdivide_@type@_gen(in2) : (struct libdivide_@type@_t){0}; - BINARY_LOOP_FIXED { - const @type@ in1 = *(@type@ *)ip1; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { + /* If divisor is 0, we need not compute anything*/ + if (in2 == 0) { + BINARY_LOOP_ZERO { npy_set_floatstatus_divbyzero(); *((@type@ *)op1) = 0; } - else { - *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); + } + else { + struct libdivide_@type@_t fast_d = libdivide_@type@_gen(in2); + BINARY_LOOP_FIXED { + const @type@ in1 = *(@type@ *)ip1; + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (in1 == NPY_MIN_@TYPE@ && in2 == -1) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } + else { + *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); - /* Negative quotients needs to be rounded down */ - if (((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { - *((@type@ *)op1) = *((@type@ *)op1) - 1; + /* Negative quotients needs to be rounded down */ + if (((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { + *((@type@ *)op1) = *((@type@ *)op1) - 1; + } } } } } else { - BINARY_LOOP_SLIDING { // XXX Lot of repeated code + BINARY_LOOP_SLIDING { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; /* @@ -1405,18 +1414,27 @@ TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *s { BINARY_LOOP_BASE - if (!is2) { + /* When the divisor is a constant, use libdivde for faster division */ + if (steps[1] == 0) { const npy_int64 in2 = *(npy_int64 *)ip2; - /* Creating a divisor of 0 is treated as an error by libdivide */ - struct libdivide_s64_t fast_d = in2 ? libdivide_s64_gen(in2) : (struct libdivide_s64_t){0}; - BINARY_LOOP_FIXED { - const npy_timedelta in1 = *(npy_timedelta *)ip1; - if (in1 == NPY_DATETIME_NAT || in2 == 0) { + /* If divisor is 0, we need not compute anything */ + if (in2 == 0) { + BINARY_LOOP_ZERO { + npy_set_floatstatus_divbyzero(); *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } - else { - *((npy_timedelta *)op1) = libdivide_s64_do(in1, &fast_d);; + } + else { + struct libdivide_s64_t fast_d = libdivide_s64_gen(in2); + BINARY_LOOP_FIXED { + const npy_timedelta in1 = *(npy_timedelta *)ip1; + if (in1 == NPY_DATETIME_NAT) { + *((npy_timedelta *)op1) = NPY_DATETIME_NAT; + } + else { + *((npy_timedelta *)op1) = libdivide_s64_do(in1, &fast_d);; + } } } } @@ -1501,30 +1519,41 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp const *dimensions, npy_intp const NPY_NO_EXPORT void TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - /* TODO: This code is similar to array floor divide*/ + /* NOTE: This code is similar to array floor divide*/ BINARY_LOOP_BASE - if (!is2) { + /* When the divisor is a constant, use libdivde for faster division */ + if (steps[1] == 0) { const npy_timedelta in2 = *(npy_timedelta *)ip2; - /* Creating a divisor of 0 is treated as an error by libdivide */ - struct libdivide_s64_t fast_d = in2 ? libdivide_s64_gen(in2) : (struct libdivide_s64_t){0}; - BINARY_LOOP_FIXED { - const npy_timedelta in1 = *(npy_timedelta *)ip1; - if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { - npy_set_floatstatus_invalid(); + /* If divisor is 0 or NAT, we need not compute anything */ + if (in2 == 0) { + BINARY_LOOP_ZERO { + npy_set_floatstatus_divbyzero(); *((npy_int64 *)op1) = 0; } - else if (in2 == 0) { - npy_set_floatstatus_divbyzero(); + } + else if (in2 == NPY_DATETIME_NAT) { + BINARY_LOOP_ZERO { + npy_set_floatstatus_invalid(); *((npy_int64 *)op1) = 0; } - else { - *((npy_int64 *)op1) = libdivide_s64_do(in1, &fast_d); + } + else { + struct libdivide_s64_t fast_d = libdivide_s64_gen(in2); + BINARY_LOOP_FIXED { + const npy_timedelta in1 = *(npy_timedelta *)ip1; + if (in1 == NPY_DATETIME_NAT) { + npy_set_floatstatus_invalid(); + *((npy_int64 *)op1) = 0; + } + else { + *((npy_int64 *)op1) = libdivide_s64_do(in1, &fast_d); - /* Negative quotients needs to be rounded down */ - if (((in1 > 0) != (in2 > 0)) && (*((npy_int64 *)op1) * in2 != in1)) { - *((npy_int64 *)op1) = *((npy_int64 *)op1) - 1; + /* Negative quotients needs to be rounded down */ + if (((in1 > 0) != (in2 > 0)) && (*((npy_int64 *)op1) * in2 != in1)) { + *((npy_int64 *)op1) = *((npy_int64 *)op1) - 1; + } } } } From f93ca93e93a9a215d25751cee442665018e345e6 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 21 Nov 2020 20:04:27 +0530 Subject: [PATCH 0084/1270] TST: Minor changes to floor divide | Added cases for timedelta divide --- numpy/core/tests/test_umath.py | 53 ++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index bd7dd23d829a..846968c118bb 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -252,25 +252,54 @@ def test_division_int(self): @pytest.mark.parametrize("input_dtype", [np.int8, np.int16, np.int32, np.int64]) def test_division_int_boundary(self, input_dtype): - class ListWithDiv(list): - def __floordiv__(self, divisor): - return [i//divisor for i in self] - iinfo = np.iinfo(input_dtype) - # Create array with min, 25th percentile, 0, 75th percentile, max - arr = ListWithDiv([iinfo.min, iinfo.min//2, 0, iinfo.max//2, iinfo.max]) - dividends = [iinfo.min, iinfo.min//2, iinfo.max//2, iinfo.max] - a = np.array(arr, dtype = input_dtype) + # Create list with min, 25th percentile, 0, 75th percentile, max + lst = [iinfo.min, iinfo.min//2, 0, iinfo.max//2, iinfo.max] + divisors = [iinfo.min, iinfo.min//2, iinfo.max//2, iinfo.max] + a = np.array(lst, dtype=input_dtype) - for dividend in dividends: - div_a = a // dividend - div_arr = arr // dividend - assert_(all(div_a == div_arr)) + for divisor in divisors: + div_a = a // divisor + b = a.copy(); b //= divisor + div_lst = [i // divisor for i in lst] + assert_(all(div_a == div_lst)) + assert_(all(div_a == b)) with np.errstate(divide='raise'): with pytest.raises(FloatingPointError): a // 0 + with pytest.raises(FloatingPointError): + a //= 0 + + @pytest.mark.parametrize( + "dividend,divisor,quotient", + [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12), + (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12), + (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12), + (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12), + (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1), + (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0), + (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')), + (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')), + (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')), + (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), + (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), + (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')), + (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')), + ]) + def test_division_int_timedelta(self, dividend, divisor, quotient): + # If either divisor is 0 or quotient is Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): + assert_(dividend // divisor == quotient) + # Test for arrays as well + assert_(all( + np.array([dividend]*5) // divisor \ + == np.array([quotient]*5))) + else: + with np.errstate(divide='raise', invalid='raise'): + with pytest.raises(FloatingPointError): + dividend // divisor def test_division_complex(self): # check that implementation is correct From 285d810bcbaa883c23282f067d51f7329e8869b1 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 22 Nov 2020 11:33:09 +0530 Subject: [PATCH 0085/1270] ENH: Remove looping definitions | Renamed fast loop macros --- numpy/core/src/umath/fast_loop_macros.h | 15 +++--------- numpy/core/src/umath/loops.c.src | 32 ++++++++++++------------- 2 files changed, 19 insertions(+), 28 deletions(-) diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h index 7ff4d1602956..5c22c6f1c2f8 100644 --- a/numpy/core/src/umath/fast_loop_macros.h +++ b/numpy/core/src/umath/fast_loop_macros.h @@ -46,7 +46,7 @@ abs_ptrdiff(char *a, char *b) npy_intp i;\ for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2) -#define BINARY_LOOP_BASE\ +#define BINARY_DEFS\ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\ npy_intp n = dimensions[0];\ @@ -55,15 +55,9 @@ abs_ptrdiff(char *a, char *b) #define BINARY_LOOP_SLIDING\ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) -#define BINARY_LOOP_FIXED\ - for(i = 0; i < n; i++, ip1 += is1, op1 += os1) - -#define BINARY_LOOP_ZERO\ - for(i = 0; i < n; i++, op1 += os1) - /** (ip1, ip2) -> (op1) */ #define BINARY_LOOP\ - BINARY_LOOP_BASE\ + BINARY_DEFS\ BINARY_LOOP_SLIDING /** (ip1, ip2) -> (op1, op2) */ @@ -167,10 +161,7 @@ abs_ptrdiff(char *a, char *b) #define IVDEP_LOOP #endif #define BASE_BINARY_LOOP_INP(tin, tout, op) \ - char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ - npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\ - npy_intp n = dimensions[0];\ - npy_intp i;\ + BINARY_DEFS\ IVDEP_LOOP \ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { \ const tin in1 = *(tin *)ip1; \ diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index bfd23924c701..29d9959b44f9 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -847,20 +847,20 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void /* Libdivide only supports 32 and 64 bit types * We try to pick the best possible one */ -/**begin repeat1 - * #kind = t, gen, do# - */ #if NPY_BITSOF_@TYPE@ <= 32 -#define libdivide_@type@_@kind@ libdivide_s32_@kind@ +#define libdivide_@type@_t libdivide_s32_t +#define libdivide_@type@_gen libdivide_s32_gen +#define libdivide_@type@_do libdivide_s32_do #else -#define libdivide_@type@_@kind@ libdivide_s64_@kind@ +#define libdivide_@type@_t libdivide_s64_t +#define libdivide_@type@_gen libdivide_s64_gen +#define libdivide_@type@_do libdivide_s64_do #endif -/**end repeat1**/ NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - BINARY_LOOP_BASE + BINARY_DEFS /* When the divisor is a constant, use libdivde for faster division */ if (steps[1] == 0) { @@ -868,14 +868,14 @@ NPY_NO_EXPORT void /* If divisor is 0, we need not compute anything*/ if (in2 == 0) { - BINARY_LOOP_ZERO { + BINARY_LOOP_SLIDING { npy_set_floatstatus_divbyzero(); *((@type@ *)op1) = 0; } } else { struct libdivide_@type@_t fast_d = libdivide_@type@_gen(in2); - BINARY_LOOP_FIXED { + BINARY_LOOP_SLIDING { const @type@ in1 = *(@type@ *)ip1; /* * FIXME: On x86 at least, dividing the smallest representable integer @@ -1412,7 +1412,7 @@ TIMEDELTA_dm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const NPY_NO_EXPORT void TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - BINARY_LOOP_BASE + BINARY_DEFS /* When the divisor is a constant, use libdivde for faster division */ if (steps[1] == 0) { @@ -1420,14 +1420,14 @@ TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *s /* If divisor is 0, we need not compute anything */ if (in2 == 0) { - BINARY_LOOP_ZERO { + BINARY_LOOP_SLIDING { npy_set_floatstatus_divbyzero(); *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } } else { struct libdivide_s64_t fast_d = libdivide_s64_gen(in2); - BINARY_LOOP_FIXED { + BINARY_LOOP_SLIDING { const npy_timedelta in1 = *(npy_timedelta *)ip1; if (in1 == NPY_DATETIME_NAT) { *((npy_timedelta *)op1) = NPY_DATETIME_NAT; @@ -1520,7 +1520,7 @@ NPY_NO_EXPORT void TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { /* NOTE: This code is similar to array floor divide*/ - BINARY_LOOP_BASE + BINARY_DEFS /* When the divisor is a constant, use libdivde for faster division */ if (steps[1] == 0) { @@ -1528,20 +1528,20 @@ TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp co /* If divisor is 0 or NAT, we need not compute anything */ if (in2 == 0) { - BINARY_LOOP_ZERO { + BINARY_LOOP_SLIDING { npy_set_floatstatus_divbyzero(); *((npy_int64 *)op1) = 0; } } else if (in2 == NPY_DATETIME_NAT) { - BINARY_LOOP_ZERO { + BINARY_LOOP_SLIDING { npy_set_floatstatus_invalid(); *((npy_int64 *)op1) = 0; } } else { struct libdivide_s64_t fast_d = libdivide_s64_gen(in2); - BINARY_LOOP_FIXED { + BINARY_LOOP_SLIDING { const npy_timedelta in1 = *(npy_timedelta *)ip1; if (in1 == NPY_DATETIME_NAT) { npy_set_floatstatus_invalid(); From 98257957b67835b63badd0932c481482c650e0d0 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 22 Nov 2020 11:36:24 +0530 Subject: [PATCH 0086/1270] ENH: Removed unsed macro check --- numpy/core/src/umath/loops.c.src | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 29d9959b44f9..5e4c0de0456b 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -21,9 +21,7 @@ #include /* for memchr */ /* Use Libdivide for faster division */ -#ifndef USE_LEGACY_DIVISION #include "numpy/libdivide/libdivide.h" -#endif /* * cutoff blocksize for pairwise summation From 1f104fd565cce988cddb6120564b157f3f9ef240 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 23 Nov 2020 12:34:44 +0530 Subject: [PATCH 0087/1270] BUG: Added better 0 checks --- numpy/core/src/umath/loops.c.src | 38 ++++++++++++++++++++++++++++---- numpy/core/tests/test_umath.py | 30 +++++++++++++++++-------- 2 files changed, 55 insertions(+), 13 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 5e4c0de0456b..c9f3b27b87ca 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -862,12 +862,21 @@ NPY_NO_EXPORT void /* When the divisor is a constant, use libdivde for faster division */ if (steps[1] == 0) { + /* If divisor is 0, set warning*/ + if (*(@type@ *)ip2 == 0) { + npy_set_floatstatus_divbyzero(); + } + + /* In case of empty array, just return*/ + if (n == 0) { + return; + } + const @type@ in2 = *(@type@ *)ip2; /* If divisor is 0, we need not compute anything*/ if (in2 == 0) { BINARY_LOOP_SLIDING { - npy_set_floatstatus_divbyzero(); *((@type@ *)op1) = 0; } } @@ -1410,16 +1419,26 @@ TIMEDELTA_dm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const NPY_NO_EXPORT void TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { + /* NOTE: This code is similar to array floor divide*/ BINARY_DEFS /* When the divisor is a constant, use libdivde for faster division */ if (steps[1] == 0) { + /* If divisor is 0, set warning*/ + if (*(npy_int64 *)ip2 == 0) { + npy_set_floatstatus_divbyzero(); + } + + /* In case of empty array, just return*/ + if (n == 0) { + return; + } + const npy_int64 in2 = *(npy_int64 *)ip2; /* If divisor is 0, we need not compute anything */ if (in2 == 0) { BINARY_LOOP_SLIDING { - npy_set_floatstatus_divbyzero(); *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } } @@ -1522,18 +1541,29 @@ TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp co /* When the divisor is a constant, use libdivde for faster division */ if (steps[1] == 0) { + /* If divisor is 0 or NAT, set warning*/ + if (*(npy_timedelta *)ip2 == 0) { + npy_set_floatstatus_divbyzero(); + } + else if(*(npy_timedelta *)ip2 == NPY_DATETIME_NAT) { + npy_set_floatstatus_invalid(); + } + + /* In case of empty array, just return*/ + if (n == 0) { + return; + } + const npy_timedelta in2 = *(npy_timedelta *)ip2; /* If divisor is 0 or NAT, we need not compute anything */ if (in2 == 0) { BINARY_LOOP_SLIDING { - npy_set_floatstatus_divbyzero(); *((npy_int64 *)op1) = 0; } } else if (in2 == NPY_DATETIME_NAT) { BINARY_LOOP_SLIDING { - npy_set_floatstatus_invalid(); *((npy_int64 *)op1) = 0; } } diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 846968c118bb..215913da51eb 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -263,14 +263,20 @@ def test_division_int_boundary(self, input_dtype): div_a = a // divisor b = a.copy(); b //= divisor div_lst = [i // divisor for i in lst] - assert_(all(div_a == div_lst)) - assert_(all(div_a == b)) + + msg = "Integer arrays floor division check (//)" + assert all(div_a == div_lst), msg + + msg = "Integer arrays floor division check (//=)" + assert all(div_a == b), msg with np.errstate(divide='raise'): with pytest.raises(FloatingPointError): a // 0 with pytest.raises(FloatingPointError): a //= 0 + with pytest.raises(FloatingPointError): + np.array([], dtype=input_dtype) // 0 @pytest.mark.parametrize( "dividend,divisor,quotient", @@ -279,7 +285,8 @@ def test_division_int_boundary(self, input_dtype): (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12), (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12), (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1), - (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0), + (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), None), + (np.array([], dtype='timedelta64[Y]'), np.timedelta64('Nat','M'), None), (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')), (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')), (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')), @@ -287,15 +294,20 @@ def test_division_int_boundary(self, input_dtype): (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')), (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')), + (np.array([], dtype='timedelta64[Y]'), 0, None), ]) def test_division_int_timedelta(self, dividend, divisor, quotient): - # If either divisor is 0 or quotient is Nat, check for division by 0 - if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): - assert_(dividend // divisor == quotient) + # If either divisor is 0 or quotient is None or Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or + not (quotient is None or np.isnat(quotient))): + msg = "Timedelta floor division check" + assert dividend // divisor == quotient, msg + # Test for arrays as well - assert_(all( - np.array([dividend]*5) // divisor \ - == np.array([quotient]*5))) + msg = "Timedelta arrays floor division check" + dividend_array = np.array([dividend]*5) + quotient_array = np.array([quotient]*5) + assert all(dividend_array // divisor == quotient_array), msg else: with np.errstate(divide='raise', invalid='raise'): with pytest.raises(FloatingPointError): From 2fde590521fd88bde9e9df1c960e38df438bd040 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 23 Nov 2020 14:42:35 +0530 Subject: [PATCH 0088/1270] BENCH: Added floor divide benchmarks (#17727) --- benchmarks/benchmarks/bench_ufunc.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index c388da5b5adc..ef3ebe75f9bb 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -134,15 +134,20 @@ def time_less_than_scalar2(self, dtype): (self.d < 1) -class CustomScalarInt(Benchmark): - params = [10**size for size in range(1, 8)] - param_names = ['size'] - - def setup(self, size): - self.x = np.arange(size) - - def time_floor_divide(self, size): - self.x//8 +class CustomScalarFloorDivideInt(Benchmark): + params = ([np.int8, np.int16, np.int32, np.int64], [8, -8, 43, -43]) + param_names = ['dtype', 'divisors'] + max_value = 10**7 + min_value = -10**7 + + def setup(self, dtype, divisor): + iinfo = np.iinfo(dtype) + self.x = np.arange( + max(iinfo.min, self.min_value), + min(iinfo.max, self.max_value)) + + def time_floor_divide_int(self, dtpye, divisor): + self.x // divisor class Scalar(Benchmark): From 8912ffd9da549bb5a4dbb34eb9de10fd1c19ce43 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 23 Nov 2020 14:45:56 +0530 Subject: [PATCH 0089/1270] DOC: Improved floor division (#17727) --- doc/release/upcoming_changes/17727.performance.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/17727.performance.rst b/doc/release/upcoming_changes/17727.performance.rst index c3a08bc8e04a..7b447a3b203d 100755 --- a/doc/release/upcoming_changes/17727.performance.rst +++ b/doc/release/upcoming_changes/17727.performance.rst @@ -1,7 +1,7 @@ Improved performance in integer division of NumPy arrays -------------------------------------------------------- -Integer division of NumPy arrays now uses libdivide when -the divisor is a constant. With the usage of libdivde and +Integer division of NumPy arrays now uses `libdivide ` +when the divisor is a constant. With the usage of libdivde and other minor optimizations, there is a large speedup. The ``//`` operator and ``np.floor_divide`` makes use of the new changes. From a5e12353c78e6be9a04a0bd2b2c92daa74875a64 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 23 Nov 2020 15:16:20 +0530 Subject: [PATCH 0090/1270] BENCH: Improve floor divide benchmarks (#17727) --- benchmarks/benchmarks/bench_ufunc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index ef3ebe75f9bb..13b7382a1708 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -135,7 +135,7 @@ def time_less_than_scalar2(self, dtype): class CustomScalarFloorDivideInt(Benchmark): - params = ([np.int8, np.int16, np.int32, np.int64], [8, -8, 43, -43]) + params = ([np.int8, np.int16, np.int32, np.int64], [8, -8, 43, -43, 0]) param_names = ['dtype', 'divisors'] max_value = 10**7 min_value = -10**7 @@ -144,7 +144,7 @@ def setup(self, dtype, divisor): iinfo = np.iinfo(dtype) self.x = np.arange( max(iinfo.min, self.min_value), - min(iinfo.max, self.max_value)) + min(iinfo.max, self.max_value), dtype=dtype) def time_floor_divide_int(self, dtpye, divisor): self.x // divisor From ca4ba20fabcae7dd0944c0374ed0e452c684d4ac Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 23 Nov 2020 21:24:04 +0530 Subject: [PATCH 0091/1270] BUG,TST: Fixed division by 0 status setting --- numpy/core/src/umath/loops.c.src | 28 +++++++--------------------- numpy/core/tests/test_umath.py | 13 +++++-------- 2 files changed, 12 insertions(+), 29 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index c9f3b27b87ca..6637c0e4e4f9 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -860,13 +860,8 @@ NPY_NO_EXPORT void { BINARY_DEFS - /* When the divisor is a constant, use libdivde for faster division */ + /* When the divisor is a constant, use libdivide for faster division */ if (steps[1] == 0) { - /* If divisor is 0, set warning*/ - if (*(@type@ *)ip2 == 0) { - npy_set_floatstatus_divbyzero(); - } - /* In case of empty array, just return*/ if (n == 0) { return; @@ -876,6 +871,7 @@ NPY_NO_EXPORT void /* If divisor is 0, we need not compute anything*/ if (in2 == 0) { + npy_set_floatstatus_divbyzero(); BINARY_LOOP_SLIDING { *((@type@ *)op1) = 0; } @@ -1422,13 +1418,8 @@ TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *s /* NOTE: This code is similar to array floor divide*/ BINARY_DEFS - /* When the divisor is a constant, use libdivde for faster division */ + /* When the divisor is a constant, use libdivide for faster division */ if (steps[1] == 0) { - /* If divisor is 0, set warning*/ - if (*(npy_int64 *)ip2 == 0) { - npy_set_floatstatus_divbyzero(); - } - /* In case of empty array, just return*/ if (n == 0) { return; @@ -1438,6 +1429,7 @@ TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *s /* If divisor is 0, we need not compute anything */ if (in2 == 0) { + npy_set_floatstatus_divbyzero(); BINARY_LOOP_SLIDING { *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } @@ -1539,16 +1531,8 @@ TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp co /* NOTE: This code is similar to array floor divide*/ BINARY_DEFS - /* When the divisor is a constant, use libdivde for faster division */ + /* When the divisor is a constant, use libdivide for faster division */ if (steps[1] == 0) { - /* If divisor is 0 or NAT, set warning*/ - if (*(npy_timedelta *)ip2 == 0) { - npy_set_floatstatus_divbyzero(); - } - else if(*(npy_timedelta *)ip2 == NPY_DATETIME_NAT) { - npy_set_floatstatus_invalid(); - } - /* In case of empty array, just return*/ if (n == 0) { return; @@ -1558,11 +1542,13 @@ TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp co /* If divisor is 0 or NAT, we need not compute anything */ if (in2 == 0) { + npy_set_floatstatus_divbyzero(); BINARY_LOOP_SLIDING { *((npy_int64 *)op1) = 0; } } else if (in2 == NPY_DATETIME_NAT) { + npy_set_floatstatus_invalid(); BINARY_LOOP_SLIDING { *((npy_int64 *)op1) = 0; } diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 215913da51eb..2655192737da 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -275,8 +275,8 @@ def test_division_int_boundary(self, input_dtype): a // 0 with pytest.raises(FloatingPointError): a //= 0 - with pytest.raises(FloatingPointError): - np.array([], dtype=input_dtype) // 0 + + np.array([], dtype=input_dtype) // 0 @pytest.mark.parametrize( "dividend,divisor,quotient", @@ -285,8 +285,7 @@ def test_division_int_boundary(self, input_dtype): (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12), (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12), (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1), - (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), None), - (np.array([], dtype='timedelta64[Y]'), np.timedelta64('Nat','M'), None), + (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0), (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')), (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')), (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')), @@ -294,12 +293,10 @@ def test_division_int_boundary(self, input_dtype): (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')), (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')), - (np.array([], dtype='timedelta64[Y]'), 0, None), ]) def test_division_int_timedelta(self, dividend, divisor, quotient): - # If either divisor is 0 or quotient is None or Nat, check for division by 0 - if divisor and (isinstance(quotient, int) or - not (quotient is None or np.isnat(quotient))): + # If either divisor is 0 or quotient is Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): msg = "Timedelta floor division check" assert dividend // divisor == quotient, msg From b8cb0a8fc9c8b40483f336fb7dc9ecc490c982b1 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 24 Nov 2020 09:11:16 +0000 Subject: [PATCH 0092/1270] MAINT: Add git rules to ignore all SIMD generated files --- .gitignore | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index d24605871e4b..f85c577647c4 100644 --- a/.gitignore +++ b/.gitignore @@ -131,20 +131,11 @@ numpy/core/include/numpy/config.h numpy/core/include/numpy/multiarray_api.txt numpy/core/include/numpy/ufunc_api.txt numpy/core/lib/ -numpy/core/src/_simd/_simd.dispatch.avx512_skx.c -numpy/core/src/_simd/_simd.dispatch.avx512f.c -numpy/core/src/_simd/_simd.dispatch.c -numpy/core/src/_simd/_simd.dispatch.fma3.avx2.c -numpy/core/src/_simd/_simd.dispatch.h -numpy/core/src/_simd/_simd.dispatch.sse42.c -numpy/core/src/_simd/_simd_data.inc -numpy/core/src/_simd/_simd_inc.h numpy/core/src/common/npy_binsearch.h numpy/core/src/common/npy_cpu_features.c numpy/core/src/common/npy_partition.h numpy/core/src/common/npy_sort.h numpy/core/src/common/templ_common.h -numpy/core/src/common/_cpu_dispatch.h numpy/core/src/multiarray/_multiarray_tests.c numpy/core/src/multiarray/arraytypes.c numpy/core/src/multiarray/einsum.c @@ -181,9 +172,6 @@ numpy/core/src/umath/simd.inc numpy/core/src/umath/struct_ufunc_test.c numpy/core/src/umath/test_rational.c numpy/core/src/umath/umath_tests.c -numpy/core/src/umath/_umath_tests.dispatch.avx2.c -numpy/core/src/umath/_umath_tests.dispatch.h -numpy/core/src/umath/_umath_tests.dispatch.sse41.c numpy/distutils/__config__.py numpy/linalg/umath_linalg.c doc/source/**/generated/ @@ -214,3 +202,19 @@ tools/swig/test/Tensor.py tools/swig/test/Vector.py tools/swig/test/Vector_wrap.cxx tools/swig/test/Array.py + +# SIMD generated files # +################################### +# the main config header, contains all the definitions and +# headers of instruction-sets +numpy/core/src/common/_cpu_dispatch.h +# config headers of dispatchable sources +*.dispatch.h +# wrapped sources of dispatched targets, e.g. *.dispatch.avx2.c +*.dispatch.*.c +# _simd module +numpy/core/src/_simd/_simd.dispatch.c +numpy/core/src/_simd/_simd_data.inc +numpy/core/src/_simd/_simd_inc.h +# umath module +numpy/core/src/umath/loops_unary_fp.dispatch.c From 9088ba0b204f2bba37b11b348a539ab84cd17ab1 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 23 Sep 2020 14:46:39 +0200 Subject: [PATCH 0093/1270] ENH: Added annotations for `ndarray`/`generic` comparison ops --- numpy/__init__.pyi | 25 +++++++++++++++++++++---- numpy/typing/_callable.py | 9 +++++++++ 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2366e9b75a13..ad37979ed64f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -47,6 +47,7 @@ from numpy.typing._callable import ( _FloatDivMod, _ComplexOp, _NumberOp, + _ComparisonOp, ) from typing import ( @@ -1013,12 +1014,8 @@ class _ArrayOrScalarCommon: def __repr__(self) -> str: ... def __copy__(self: _ArraySelf) -> _ArraySelf: ... def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ... - def __lt__(self, other): ... - def __le__(self, other): ... def __eq__(self, other): ... def __ne__(self, other): ... - def __gt__(self, other): ... - def __ge__(self, other): ... def astype( self: _ArraySelf, dtype: DTypeLike, @@ -1579,6 +1576,10 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container): def __iter__(self) -> Any: ... def __contains__(self, key) -> bool: ... def __index__(self) -> int: ... + def __lt__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... + def __le__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... + def __gt__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... + def __ge__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... def __matmul__(self, other: ArrayLike) -> Any: ... # NOTE: `ndarray` does not implement `__imatmul__` def __rmatmul__(self, other: ArrayLike) -> Any: ... @@ -1689,6 +1690,10 @@ class number(generic, Generic[_NBit_co]): # type: ignore __rpow__: _NumberOp __truediv__: _NumberOp __rtruediv__: _NumberOp + __lt__: _ComparisonOp[_NumberLike] + __le__: _ComparisonOp[_NumberLike] + __gt__: _ComparisonOp[_NumberLike] + __ge__: _ComparisonOp[_NumberLike] class bool_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1727,6 +1732,10 @@ class bool_(generic): __rmod__: _BoolMod __divmod__: _BoolDivMod __rdivmod__: _BoolDivMod + __lt__: _ComparisonOp[_NumberLike] + __le__: _ComparisonOp[_NumberLike] + __gt__: _ComparisonOp[_NumberLike] + __ge__: _ComparisonOp[_NumberLike] class object_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1755,6 +1764,10 @@ class datetime64(generic): @overload def __sub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ... def __rsub__(self, other: datetime64) -> timedelta64: ... + __lt__: _ComparisonOp[datetime64] + __le__: _ComparisonOp[datetime64] + __gt__: _ComparisonOp[datetime64] + __ge__: _ComparisonOp[datetime64] # Support for `__index__` was added in python 3.8 (bpo-20092) if sys.version_info >= (3, 8): @@ -1845,6 +1858,10 @@ class timedelta64(generic): def __rmod__(self, other: timedelta64) -> timedelta64: ... def __divmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... def __rdivmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... + __lt__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] + __le__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] + __gt__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] + __ge__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] class unsignedinteger(integer[_NBit_co]): # NOTE: `uint64 + signedinteger -> float64` diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 91b7a4ec21c0..c703df28a5b0 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -20,6 +20,7 @@ ) from numpy import ( + ndarray, generic, bool_, timedelta64, @@ -41,6 +42,7 @@ _NumberLike, ) from . import NBitBase +from ._array_like import ArrayLike if sys.version_info >= (3, 8): from typing import Protocol @@ -312,6 +314,12 @@ def __call__( class _NumberOp(Protocol): def __call__(self, __other: _NumberLike) -> number: ... + class _ComparisonOp(Protocol[_T]): + @overload + def __call__(self, __other: _T) -> bool_: ... + @overload + def __call__(self, __other: ArrayLike) -> Union[ndarray, bool_]: ... + else: _BoolOp = Any _BoolBitOp = Any @@ -334,3 +342,4 @@ def __call__(self, __other: _NumberLike) -> number: ... _FloatDivMod = Any _ComplexOp = Any _NumberOp = Any + _ComparisonOp = Any From e78b9f3f06677d20463a1fed9d8da37924633749 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 15 Nov 2020 20:06:58 +0100 Subject: [PATCH 0094/1270] TST: Added typing tests for comparison ops --- numpy/typing/tests/data/pass/comparisons.py | 247 ++++++++++++++++++ numpy/typing/tests/data/reveal/comparisons.py | 247 ++++++++++++++++++ 2 files changed, 494 insertions(+) create mode 100644 numpy/typing/tests/data/pass/comparisons.py create mode 100644 numpy/typing/tests/data/reveal/comparisons.py diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py new file mode 100644 index 000000000000..b298117a62af --- /dev/null +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -0,0 +1,247 @@ +import numpy as np + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool_() + +b = bool() +c = complex() +f = float() +i = int() + +AR = np.array([0], dtype=np.int64) +AR.setflags(write=False) + +SEQ = (0, 1, 2, 3, 4) + +# Time structures + +dt > dt + +td > td +td > i +td > i4 +td > i8 +td > AR +td > SEQ + +# boolean + +b_ > b +b_ > b_ +b_ > i +b_ > i8 +b_ > i4 +b_ > u8 +b_ > u4 +b_ > f +b_ > f8 +b_ > f4 +b_ > c +b_ > c16 +b_ > c8 +b_ > AR +b_ > SEQ + +# Complex + +c16 > c16 +c16 > f8 +c16 > i8 +c16 > c8 +c16 > f4 +c16 > i4 +c16 > b_ +c16 > b +c16 > c +c16 > f +c16 > i +c16 > AR +c16 > SEQ + +c16 > c16 +f8 > c16 +i8 > c16 +c8 > c16 +f4 > c16 +i4 > c16 +b_ > c16 +b > c16 +c > c16 +f > c16 +i > c16 +AR > c16 +SEQ > c16 + +c8 > c16 +c8 > f8 +c8 > i8 +c8 > c8 +c8 > f4 +c8 > i4 +c8 > b_ +c8 > b +c8 > c +c8 > f +c8 > i +c8 > AR +c8 > SEQ + +c16 > c8 +f8 > c8 +i8 > c8 +c8 > c8 +f4 > c8 +i4 > c8 +b_ > c8 +b > c8 +c > c8 +f > c8 +i > c8 +AR > c8 +SEQ > c8 + +# Float + +f8 > f8 +f8 > i8 +f8 > f4 +f8 > i4 +f8 > b_ +f8 > b +f8 > c +f8 > f +f8 > i +f8 > AR +f8 > SEQ + +f8 > f8 +i8 > f8 +f4 > f8 +i4 > f8 +b_ > f8 +b > f8 +c > f8 +f > f8 +i > f8 +AR > f8 +SEQ > f8 + +f4 > f8 +f4 > i8 +f4 > f4 +f4 > i4 +f4 > b_ +f4 > b +f4 > c +f4 > f +f4 > i +f4 > AR +f4 > SEQ + +f8 > f4 +i8 > f4 +f4 > f4 +i4 > f4 +b_ > f4 +b > f4 +c > f4 +f > f4 +i > f4 +AR > f4 +SEQ > f4 + +# Int + +i8 > i8 +i8 > u8 +i8 > i4 +i8 > u4 +i8 > b_ +i8 > b +i8 > c +i8 > f +i8 > i +i8 > AR +i8 > SEQ + +u8 > u8 +u8 > i4 +u8 > u4 +u8 > b_ +u8 > b +u8 > c +u8 > f +u8 > i +u8 > AR +u8 > SEQ + +i8 > i8 +u8 > i8 +i4 > i8 +u4 > i8 +b_ > i8 +b > i8 +c > i8 +f > i8 +i > i8 +AR > i8 +SEQ > i8 + +u8 > u8 +i4 > u8 +u4 > u8 +b_ > u8 +b > u8 +c > u8 +f > u8 +i > u8 +AR > u8 +SEQ > u8 + +i4 > i8 +i4 > i4 +i4 > i +i4 > b_ +i4 > b +i4 > AR +i4 > SEQ + +u4 > i8 +u4 > i4 +u4 > u8 +u4 > u4 +u4 > i +u4 > b_ +u4 > b +u4 > AR +u4 > SEQ + +i8 > i4 +i4 > i4 +i > i4 +b_ > i4 +b > i4 +AR > i4 +SEQ > i4 + +i8 > u4 +i4 > u4 +u8 > u4 +u4 > u4 +b_ > u4 +b > u4 +i > u4 +AR > u4 +SEQ > u4 diff --git a/numpy/typing/tests/data/reveal/comparisons.py b/numpy/typing/tests/data/reveal/comparisons.py new file mode 100644 index 000000000000..82d1fa6dee68 --- /dev/null +++ b/numpy/typing/tests/data/reveal/comparisons.py @@ -0,0 +1,247 @@ +import numpy as np + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool_() + +b = bool() +c = complex() +f = float() +i = int() + +AR = np.array([0], dtype=np.int64) +AR.setflags(write=False) + +SEQ = (0, 1, 2, 3, 4) + +# Time structures + +reveal_type(dt > dt) # E: numpy.bool_ + +reveal_type(td > td) # E: numpy.bool_ +reveal_type(td > i) # E: numpy.bool_ +reveal_type(td > i4) # E: numpy.bool_ +reveal_type(td > i8) # E: numpy.bool_ +reveal_type(td > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(td > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +# boolean + +reveal_type(b_ > b) # E: numpy.bool_ +reveal_type(b_ > b_) # E: numpy.bool_ +reveal_type(b_ > i) # E: numpy.bool_ +reveal_type(b_ > i8) # E: numpy.bool_ +reveal_type(b_ > i4) # E: numpy.bool_ +reveal_type(b_ > u8) # E: numpy.bool_ +reveal_type(b_ > u4) # E: numpy.bool_ +reveal_type(b_ > f) # E: numpy.bool_ +reveal_type(b_ > f8) # E: numpy.bool_ +reveal_type(b_ > f4) # E: numpy.bool_ +reveal_type(b_ > c) # E: numpy.bool_ +reveal_type(b_ > c16) # E: numpy.bool_ +reveal_type(b_ > c8) # E: numpy.bool_ +reveal_type(b_ > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(b_ > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +# Complex + +reveal_type(c16 > c16) # E: numpy.bool_ +reveal_type(c16 > f8) # E: numpy.bool_ +reveal_type(c16 > i8) # E: numpy.bool_ +reveal_type(c16 > c8) # E: numpy.bool_ +reveal_type(c16 > f4) # E: numpy.bool_ +reveal_type(c16 > i4) # E: numpy.bool_ +reveal_type(c16 > b_) # E: numpy.bool_ +reveal_type(c16 > b) # E: numpy.bool_ +reveal_type(c16 > c) # E: numpy.bool_ +reveal_type(c16 > f) # E: numpy.bool_ +reveal_type(c16 > i) # E: numpy.bool_ +reveal_type(c16 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(c16 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(c16 > c16) # E: numpy.bool_ +reveal_type(f8 > c16) # E: numpy.bool_ +reveal_type(i8 > c16) # E: numpy.bool_ +reveal_type(c8 > c16) # E: numpy.bool_ +reveal_type(f4 > c16) # E: numpy.bool_ +reveal_type(i4 > c16) # E: numpy.bool_ +reveal_type(b_ > c16) # E: numpy.bool_ +reveal_type(b > c16) # E: numpy.bool_ +reveal_type(c > c16) # E: numpy.bool_ +reveal_type(f > c16) # E: numpy.bool_ +reveal_type(i > c16) # E: numpy.bool_ +reveal_type(AR > c16) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > c16) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(c8 > c16) # E: numpy.bool_ +reveal_type(c8 > f8) # E: numpy.bool_ +reveal_type(c8 > i8) # E: numpy.bool_ +reveal_type(c8 > c8) # E: numpy.bool_ +reveal_type(c8 > f4) # E: numpy.bool_ +reveal_type(c8 > i4) # E: numpy.bool_ +reveal_type(c8 > b_) # E: numpy.bool_ +reveal_type(c8 > b) # E: numpy.bool_ +reveal_type(c8 > c) # E: numpy.bool_ +reveal_type(c8 > f) # E: numpy.bool_ +reveal_type(c8 > i) # E: numpy.bool_ +reveal_type(c8 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(c8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(c16 > c8) # E: numpy.bool_ +reveal_type(f8 > c8) # E: numpy.bool_ +reveal_type(i8 > c8) # E: numpy.bool_ +reveal_type(c8 > c8) # E: numpy.bool_ +reveal_type(f4 > c8) # E: numpy.bool_ +reveal_type(i4 > c8) # E: numpy.bool_ +reveal_type(b_ > c8) # E: numpy.bool_ +reveal_type(b > c8) # E: numpy.bool_ +reveal_type(c > c8) # E: numpy.bool_ +reveal_type(f > c8) # E: numpy.bool_ +reveal_type(i > c8) # E: numpy.bool_ +reveal_type(AR > c8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > c8) # E: Union[numpy.ndarray, numpy.bool_] + +# Float + +reveal_type(f8 > f8) # E: numpy.bool_ +reveal_type(f8 > i8) # E: numpy.bool_ +reveal_type(f8 > f4) # E: numpy.bool_ +reveal_type(f8 > i4) # E: numpy.bool_ +reveal_type(f8 > b_) # E: numpy.bool_ +reveal_type(f8 > b) # E: numpy.bool_ +reveal_type(f8 > c) # E: numpy.bool_ +reveal_type(f8 > f) # E: numpy.bool_ +reveal_type(f8 > i) # E: numpy.bool_ +reveal_type(f8 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(f8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(f8 > f8) # E: numpy.bool_ +reveal_type(i8 > f8) # E: numpy.bool_ +reveal_type(f4 > f8) # E: numpy.bool_ +reveal_type(i4 > f8) # E: numpy.bool_ +reveal_type(b_ > f8) # E: numpy.bool_ +reveal_type(b > f8) # E: numpy.bool_ +reveal_type(c > f8) # E: numpy.bool_ +reveal_type(f > f8) # E: numpy.bool_ +reveal_type(i > f8) # E: numpy.bool_ +reveal_type(AR > f8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > f8) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(f4 > f8) # E: numpy.bool_ +reveal_type(f4 > i8) # E: numpy.bool_ +reveal_type(f4 > f4) # E: numpy.bool_ +reveal_type(f4 > i4) # E: numpy.bool_ +reveal_type(f4 > b_) # E: numpy.bool_ +reveal_type(f4 > b) # E: numpy.bool_ +reveal_type(f4 > c) # E: numpy.bool_ +reveal_type(f4 > f) # E: numpy.bool_ +reveal_type(f4 > i) # E: numpy.bool_ +reveal_type(f4 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(f4 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(f8 > f4) # E: numpy.bool_ +reveal_type(i8 > f4) # E: numpy.bool_ +reveal_type(f4 > f4) # E: numpy.bool_ +reveal_type(i4 > f4) # E: numpy.bool_ +reveal_type(b_ > f4) # E: numpy.bool_ +reveal_type(b > f4) # E: numpy.bool_ +reveal_type(c > f4) # E: numpy.bool_ +reveal_type(f > f4) # E: numpy.bool_ +reveal_type(i > f4) # E: numpy.bool_ +reveal_type(AR > f4) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > f4) # E: Union[numpy.ndarray, numpy.bool_] + +# Int + +reveal_type(i8 > i8) # E: numpy.bool_ +reveal_type(i8 > u8) # E: numpy.bool_ +reveal_type(i8 > i4) # E: numpy.bool_ +reveal_type(i8 > u4) # E: numpy.bool_ +reveal_type(i8 > b_) # E: numpy.bool_ +reveal_type(i8 > b) # E: numpy.bool_ +reveal_type(i8 > c) # E: numpy.bool_ +reveal_type(i8 > f) # E: numpy.bool_ +reveal_type(i8 > i) # E: numpy.bool_ +reveal_type(i8 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(i8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(u8 > u8) # E: numpy.bool_ +reveal_type(u8 > i4) # E: numpy.bool_ +reveal_type(u8 > u4) # E: numpy.bool_ +reveal_type(u8 > b_) # E: numpy.bool_ +reveal_type(u8 > b) # E: numpy.bool_ +reveal_type(u8 > c) # E: numpy.bool_ +reveal_type(u8 > f) # E: numpy.bool_ +reveal_type(u8 > i) # E: numpy.bool_ +reveal_type(u8 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(u8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(i8 > i8) # E: numpy.bool_ +reveal_type(u8 > i8) # E: numpy.bool_ +reveal_type(i4 > i8) # E: numpy.bool_ +reveal_type(u4 > i8) # E: numpy.bool_ +reveal_type(b_ > i8) # E: numpy.bool_ +reveal_type(b > i8) # E: numpy.bool_ +reveal_type(c > i8) # E: numpy.bool_ +reveal_type(f > i8) # E: numpy.bool_ +reveal_type(i > i8) # E: numpy.bool_ +reveal_type(AR > i8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > i8) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(u8 > u8) # E: numpy.bool_ +reveal_type(i4 > u8) # E: numpy.bool_ +reveal_type(u4 > u8) # E: numpy.bool_ +reveal_type(b_ > u8) # E: numpy.bool_ +reveal_type(b > u8) # E: numpy.bool_ +reveal_type(c > u8) # E: numpy.bool_ +reveal_type(f > u8) # E: numpy.bool_ +reveal_type(i > u8) # E: numpy.bool_ +reveal_type(AR > u8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > u8) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(i4 > i8) # E: numpy.bool_ +reveal_type(i4 > i4) # E: numpy.bool_ +reveal_type(i4 > i) # E: numpy.bool_ +reveal_type(i4 > b_) # E: numpy.bool_ +reveal_type(i4 > b) # E: numpy.bool_ +reveal_type(i4 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(i4 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(u4 > i8) # E: numpy.bool_ +reveal_type(u4 > i4) # E: numpy.bool_ +reveal_type(u4 > u8) # E: numpy.bool_ +reveal_type(u4 > u4) # E: numpy.bool_ +reveal_type(u4 > i) # E: numpy.bool_ +reveal_type(u4 > b_) # E: numpy.bool_ +reveal_type(u4 > b) # E: numpy.bool_ +reveal_type(u4 > AR) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(u4 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(i8 > i4) # E: numpy.bool_ +reveal_type(i4 > i4) # E: numpy.bool_ +reveal_type(i > i4) # E: numpy.bool_ +reveal_type(b_ > i4) # E: numpy.bool_ +reveal_type(b > i4) # E: numpy.bool_ +reveal_type(AR > i4) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > i4) # E: Union[numpy.ndarray, numpy.bool_] + +reveal_type(i8 > u4) # E: numpy.bool_ +reveal_type(i4 > u4) # E: numpy.bool_ +reveal_type(u8 > u4) # E: numpy.bool_ +reveal_type(u4 > u4) # E: numpy.bool_ +reveal_type(b_ > u4) # E: numpy.bool_ +reveal_type(b > u4) # E: numpy.bool_ +reveal_type(i > u4) # E: numpy.bool_ +reveal_type(AR > u4) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(SEQ > u4) # E: Union[numpy.ndarray, numpy.bool_] From b17dd63a01585e001e6a75daf89ea0bbbdd8fce4 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 20 Nov 2020 16:25:52 -0600 Subject: [PATCH 0095/1270] BUG: Ignore fewer errors during array-coercion This changes it so that we only ignore attribute errors on looking up `__array__` and propagate errors when checking for sequences `len(obj)` if those errors are either RecursionError or MemoryError (we consider them unrecoverable). Also adds test for bad recursive array-like with sequence as reported in gh-17785. The test might be flaky/more complicated in which case it should probably just be deleted. --- numpy/core/src/multiarray/array_coercion.c | 18 +++++++++++-- numpy/core/src/multiarray/ctors.c | 4 +-- numpy/core/tests/test_array_coercion.py | 30 ++++++++++++++++++++++ 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 53d891049e23..603e9d93be6f 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -979,14 +979,28 @@ PyArray_DiscoverDTypeAndShape_Recursive( * and to handle it recursively. That is, unless we have hit the * dimension limit. */ - npy_bool is_sequence = (PySequence_Check(obj) && PySequence_Size(obj) >= 0); + npy_bool is_sequence = PySequence_Check(obj); + if (is_sequence) { + is_sequence = PySequence_Size(obj) >= 0; + if (NPY_UNLIKELY(!is_sequence)) { + /* NOTE: This should likely just raise all errors */ + if (PyErr_ExceptionMatches(PyExc_RecursionError) || + PyErr_ExceptionMatches(PyExc_MemoryError)) { + /* + * Consider these unrecoverable errors, continuing execution + * might crash the interpreter. + */ + return -1; + } + PyErr_Clear(); + } + } if (NPY_UNLIKELY(*flags & DISCOVER_TUPLES_AS_ELEMENTS) && PyTuple_Check(obj)) { is_sequence = NPY_FALSE; } if (curr_dims == max_dims || !is_sequence) { /* Clear any PySequence_Size error which would corrupts further calls */ - PyErr_Clear(); max_dims = handle_scalar( obj, curr_dims, &max_dims, out_descr, out_shape, fixed_DType, flags, NULL); diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index ff262369bff6..4ed3deab859c 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2122,7 +2122,7 @@ PyArray_FromInterface(PyObject *origin) if (iface == NULL) { if (PyErr_Occurred()) { - PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + return NULL; } return Py_NotImplemented; } @@ -2390,7 +2390,7 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) array_meth = PyArray_LookupSpecial_OnInstance(op, "__array__"); if (array_meth == NULL) { if (PyErr_Occurred()) { - PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + return NULL; } return Py_NotImplemented; } diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py index 78def936022d..b966ee7b096e 100644 --- a/numpy/core/tests/test_array_coercion.py +++ b/numpy/core/tests/test_array_coercion.py @@ -689,3 +689,33 @@ def test_too_large_array_error_paths(self): np.array(arr) with pytest.raises(MemoryError): np.array([arr]) + + @pytest.mark.parametrize("attribute", + ["__array_interface__", "__array__", "__array_struct__"]) + def test_bad_array_like_attributes(self, attribute): + # Check that errors during attribute retrieval are raised unless + # they are Attribute errors. + + class BadInterface: + def __getattr__(self, attr): + if attr == attribute: + raise RuntimeError + super().__getattr__(attr) + + with pytest.raises(RuntimeError): + np.array(BadInterface()) + + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_bad_length(self, error): + # RecursionError and MemoryError are considered "critical" in + # sequences. We could expand this more generally though. (NumPy 1.20) + class BadSequence: + def __len__(self): + raise error + def __getitem__(self): + # must have getitem to be a Sequence + return 1 + + with pytest.raises(error): + np.array(BadSequence()) + From aeb0dcfda0ac52f679f93be29f8ff7742d8be324 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 25 Nov 2020 19:52:14 +0200 Subject: [PATCH 0096/1270] TST: use latest pypy37 not pypy36 --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index d38ae0934b33..21d9227eb0af 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -184,7 +184,7 @@ jobs: fetch-depth: 0 - name: get_pypy run: | - wget -q https://downloads.python.org/pypy/pypy3.6-v7.3.2-linux64.tar.bz2 -O pypy.tar.bz2 + wget -q https://downloads.python.org/pypy/pypy3.7-v7.3.3-linux64.tar.bz2 -O pypy.tar.bz2 mkdir -p pypy3 (cd pypy3; tar --strip-components=1 -xf ../pypy.tar.bz2) pypy3/bin/pypy3 -mensurepip From 2eb0b12b75363f539870a09e155bf8e7a6ca8afa Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Thu, 26 Nov 2020 12:08:23 +0100 Subject: [PATCH 0097/1270] DOC: Clarify docs of np.resize(). - Iteration is done in C-order, not in memory storage order, and does not disregard strides during iteration (the implementation starts with calling `ravel()`). - This applies even when the new shape is larger, except that on top of that there's cycling in that case. The previous wording made it sound like "larger" was handled completely differently. --- numpy/core/fromnumeric.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index d65e26827751..efb052bc244d 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1375,7 +1375,7 @@ def resize(a, new_shape): reshaped_array : ndarray The new array is formed from the data in the old array, repeated if necessary to fill out the required number of elements. The - data are repeated in the order that they are stored in memory. + data are repeated iterating over the array in C-order. See Also -------- @@ -1392,11 +1392,11 @@ def resize(a, new_shape): Warning: This functionality does **not** consider axes separately, i.e. it does not apply interpolation/extrapolation. - It fills the return array with the required number of elements, taken - from `a` as they are laid out in memory, disregarding strides and axes. - (This is in case the new shape is smaller. For larger, see above.) - This functionality is therefore not suitable to resize images, - or data where each axis represents a separate and distinct entity. + It fills the return array with the required number of elements, iterating + over `a` in C-order, disregarding axes (and cycling back from the start if + the new shape is larger). This functionality is therefore not suitable to + resize images, or data where each axis represents a separate and distinct + entity. Examples -------- From d7a3e02bbe43c03a97804ad524145bc4e067a981 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 5 Nov 2020 18:50:52 +0100 Subject: [PATCH 0098/1270] ENH: Make `ndarray` generic w.r.t. its shape and dtype --- numpy/__init__.pyi | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2366e9b75a13..342f7a28e812 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -913,8 +913,6 @@ class dtype(Generic[_DTypeScalar]): @property def type(self) -> Type[generic]: ... -_DType = dtype # to avoid name conflicts with ndarray.dtype - class _flagsobj: aligned: bool updateifcopy: bool @@ -1441,10 +1439,16 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., ) -> _NdArraySubClass: ... +_DType = TypeVar("_DType", bound=dtype[Any]) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) + _BufferType = Union[ndarray, bytes, bytearray, memoryview] _Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] -class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container): +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): @property def base(self) -> Optional[ndarray]: ... @property @@ -1469,8 +1473,6 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container): order: _OrderKACF = ..., ) -> _ArraySelf: ... @property - def dtype(self) -> _DType: ... - @property def ctypes(self) -> _ctypes: ... @property def shape(self) -> _Shape: ... @@ -1625,6 +1627,9 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container): def __iand__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... def __ixor__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... def __ior__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DType: ... # NOTE: while `np.generic` is not technically an instance of `ABCMeta`, # the `@abstractmethod` decorator is herein used to (forcefully) deny @@ -1644,8 +1649,6 @@ class generic(_ArrayOrScalarCommon): @property def base(self) -> None: ... @property - def dtype(self: _ScalarType) -> _DType[_ScalarType]: ... - @property def ndim(self) -> Literal[0]: ... @property def size(self) -> Literal[1]: ... @@ -1664,6 +1667,9 @@ class generic(_ArrayOrScalarCommon): self: _ScalarType, axis: Union[Literal[0], Tuple[()]] = ... ) -> _ScalarType: ... def transpose(self: _ScalarType, __axes: Tuple[()] = ...) -> _ScalarType: ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self: _ScalarType) -> dtype[_ScalarType]: ... class number(generic, Generic[_NBit_co]): # type: ignore @property From 11993ce8e465e3e0588bdbb1f66353f5a8577a77 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 5 Nov 2020 19:09:03 +0100 Subject: [PATCH 0099/1270] TST: Replace `numpy.ndarray` with `numpy.ndarray[Any, Any]` --- .../tests/data/reveal/array_constructors.py | 112 ++++---- numpy/typing/tests/data/reveal/fromnumeric.py | 252 +++++++++--------- .../typing/tests/data/reveal/ndarray_misc.py | 98 +++---- numpy/typing/tests/data/reveal/numeric.py | 62 ++--- 4 files changed, 262 insertions(+), 262 deletions(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.py index 106174736085..04d5cd229e4b 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.py +++ b/numpy/typing/tests/data/reveal/array_constructors.py @@ -11,92 +11,92 @@ class SubClass(np.ndarray): ... def func(i: int, j: int, **kwargs: Any) -> SubClass: ... -reveal_type(np.asarray(A)) # E: ndarray -reveal_type(np.asarray(B)) # E: ndarray -reveal_type(np.asarray(C)) # E: ndarray +reveal_type(np.asarray(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asarray(B)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asarray(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.asanyarray(A)) # E: ndarray +reveal_type(np.asanyarray(A)) # E: numpy.ndarray[Any, Any] reveal_type(np.asanyarray(B)) # E: SubClass -reveal_type(np.asanyarray(B, dtype=int)) # E: ndarray -reveal_type(np.asanyarray(C)) # E: ndarray +reveal_type(np.asanyarray(B, dtype=int)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asanyarray(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.ascontiguousarray(A)) # E: ndarray -reveal_type(np.ascontiguousarray(B)) # E: ndarray -reveal_type(np.ascontiguousarray(C)) # E: ndarray +reveal_type(np.ascontiguousarray(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ascontiguousarray(B)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ascontiguousarray(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.asfortranarray(A)) # E: ndarray -reveal_type(np.asfortranarray(B)) # E: ndarray -reveal_type(np.asfortranarray(C)) # E: ndarray +reveal_type(np.asfortranarray(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asfortranarray(B)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asfortranarray(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.require(A)) # E: ndarray +reveal_type(np.require(A)) # E: numpy.ndarray[Any, Any] reveal_type(np.require(B)) # E: SubClass reveal_type(np.require(B, requirements=None)) # E: SubClass -reveal_type(np.require(B, dtype=int)) # E: ndarray -reveal_type(np.require(B, requirements="E")) # E: ndarray -reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray -reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray +reveal_type(np.require(B, dtype=int)) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B, requirements="E")) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B, requirements={"F", "E"})) # E: numpy.ndarray[Any, Any] reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass reveal_type(np.require(B, requirements="W")) # E: SubClass reveal_type(np.require(B, requirements="A")) # E: SubClass -reveal_type(np.require(C)) # E: ndarray +reveal_type(np.require(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.linspace(0, 10)) # E: numpy.ndarray -reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, numpy.inexact[Any]] -reveal_type(np.logspace(0, 10)) # E: numpy.ndarray -reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray +reveal_type(np.linspace(0, 10)) # E: numpy.ndarray[Any, Any] +reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray[Any, Any], numpy.inexact[Any]] +reveal_type(np.logspace(0, 10)) # E: numpy.ndarray[Any, Any] +reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray[Any, Any] -reveal_type(np.zeros_like(A)) # E: numpy.ndarray -reveal_type(np.zeros_like(C)) # E: numpy.ndarray +reveal_type(np.zeros_like(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.zeros_like(C)) # E: numpy.ndarray[Any, Any] reveal_type(np.zeros_like(B)) # E: SubClass -reveal_type(np.zeros_like(B, dtype=np.int64)) # E: numpy.ndarray +reveal_type(np.zeros_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] -reveal_type(np.ones_like(A)) # E: numpy.ndarray -reveal_type(np.ones_like(C)) # E: numpy.ndarray +reveal_type(np.ones_like(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ones_like(C)) # E: numpy.ndarray[Any, Any] reveal_type(np.ones_like(B)) # E: SubClass -reveal_type(np.ones_like(B, dtype=np.int64)) # E: numpy.ndarray +reveal_type(np.ones_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] -reveal_type(np.empty_like(A)) # E: numpy.ndarray -reveal_type(np.empty_like(C)) # E: numpy.ndarray +reveal_type(np.empty_like(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.empty_like(C)) # E: numpy.ndarray[Any, Any] reveal_type(np.empty_like(B)) # E: SubClass -reveal_type(np.empty_like(B, dtype=np.int64)) # E: numpy.ndarray +reveal_type(np.empty_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] -reveal_type(np.full_like(A, i8)) # E: numpy.ndarray -reveal_type(np.full_like(C, i8)) # E: numpy.ndarray +reveal_type(np.full_like(A, i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full_like(C, i8)) # E: numpy.ndarray[Any, Any] reveal_type(np.full_like(B, i8)) # E: SubClass -reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: numpy.ndarray +reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: numpy.ndarray[Any, Any] -reveal_type(np.ones(1)) # E: numpy.ndarray -reveal_type(np.ones([1, 1, 1])) # E: numpy.ndarray +reveal_type(np.ones(1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ones([1, 1, 1])) # E: numpy.ndarray[Any, Any] -reveal_type(np.full(1, i8)) # E: numpy.ndarray -reveal_type(np.full([1, 1, 1], i8)) # E: numpy.ndarray +reveal_type(np.full(1, i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full([1, 1, 1], i8)) # E: numpy.ndarray[Any, Any] -reveal_type(np.indices([1, 2, 3])) # E: numpy.ndarray -reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[numpy.ndarray] +reveal_type(np.indices([1, 2, 3])) # E: numpy.ndarray[Any, Any] +reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[numpy.ndarray[Any, Any]] reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass -reveal_type(np.identity(10)) # E: numpy.ndarray +reveal_type(np.identity(10)) # E: numpy.ndarray[Any, Any] -reveal_type(np.atleast_1d(A)) # E: numpy.ndarray -reveal_type(np.atleast_1d(C)) # E: numpy.ndarray -reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray] -reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray] -reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray] +reveal_type(np.atleast_1d(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.atleast_1d(C)) # E: numpy.ndarray[Any, Any] +reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray[Any, Any]] +reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray[Any, Any]] +reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray[Any, Any]] -reveal_type(np.atleast_2d(A)) # E: numpy.ndarray +reveal_type(np.atleast_2d(A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.atleast_3d(A)) # E: numpy.ndarray +reveal_type(np.atleast_3d(A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.vstack([A, A])) # E: numpy.ndarray -reveal_type(np.vstack([A, C])) # E: numpy.ndarray -reveal_type(np.vstack([C, C])) # E: numpy.ndarray +reveal_type(np.vstack([A, A])) # E: numpy.ndarray[Any, Any] +reveal_type(np.vstack([A, C])) # E: numpy.ndarray[Any, Any] +reveal_type(np.vstack([C, C])) # E: numpy.ndarray[Any, Any] -reveal_type(np.hstack([A, A])) # E: numpy.ndarray +reveal_type(np.hstack([A, A])) # E: numpy.ndarray[Any, Any] -reveal_type(np.stack([A, A])) # E: numpy.ndarray -reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray +reveal_type(np.stack([A, A])) # E: numpy.ndarray[Any, Any] +reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray[Any, Any] reveal_type(np.stack([A, A], out=B)) # E: SubClass -reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray -reveal_type(np.block(C)) # E: numpy.ndarray +reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray[Any, Any] +reveal_type(np.block(C)) # E: numpy.ndarray[Any, Any] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.py index 75865c285f36..2972fa1afaab 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.py +++ b/numpy/typing/tests/data/reveal/fromnumeric.py @@ -24,104 +24,104 @@ np.take(B, 0) # E: Union[numpy.generic, datetime.datetime, datetime.timedelta] ) reveal_type( - np.take( # E: Union[Union[numpy.generic, datetime.datetime, datetime.timedelta], numpy.ndarray] + np.take( # E: Union[Union[numpy.generic, datetime.datetime, datetime.timedelta], numpy.ndarray[Any, Any]] A, [0] ) ) reveal_type( - np.take( # E: Union[Union[numpy.generic, datetime.datetime, datetime.timedelta], numpy.ndarray] + np.take( # E: Union[Union[numpy.generic, datetime.datetime, datetime.timedelta], numpy.ndarray[Any, Any]] B, [0] ) ) -reveal_type(np.reshape(a, 1)) # E: numpy.ndarray -reveal_type(np.reshape(b, 1)) # E: numpy.ndarray -reveal_type(np.reshape(c, 1)) # E: numpy.ndarray -reveal_type(np.reshape(A, 1)) # E: numpy.ndarray -reveal_type(np.reshape(B, 1)) # E: numpy.ndarray +reveal_type(np.reshape(a, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(b, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(c, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(A, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(B, 1)) # E: numpy.ndarray[Any, Any] reveal_type(np.choose(a, [True, True])) # E: numpy.bool_ -reveal_type(np.choose(A, [True, True])) # E: numpy.ndarray +reveal_type(np.choose(A, [True, True])) # E: numpy.ndarray[Any, Any] -reveal_type(np.repeat(a, 1)) # E: numpy.ndarray -reveal_type(np.repeat(b, 1)) # E: numpy.ndarray -reveal_type(np.repeat(c, 1)) # E: numpy.ndarray -reveal_type(np.repeat(A, 1)) # E: numpy.ndarray -reveal_type(np.repeat(B, 1)) # E: numpy.ndarray +reveal_type(np.repeat(a, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(b, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(c, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(A, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(B, 1)) # E: numpy.ndarray[Any, Any] # TODO: Add tests for np.put() -reveal_type(np.swapaxes(A, 0, 0)) # E: numpy.ndarray -reveal_type(np.swapaxes(B, 0, 0)) # E: numpy.ndarray +reveal_type(np.swapaxes(A, 0, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.swapaxes(B, 0, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.transpose(a)) # E: numpy.ndarray -reveal_type(np.transpose(b)) # E: numpy.ndarray -reveal_type(np.transpose(c)) # E: numpy.ndarray -reveal_type(np.transpose(A)) # E: numpy.ndarray -reveal_type(np.transpose(B)) # E: numpy.ndarray +reveal_type(np.transpose(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(B)) # E: numpy.ndarray[Any, Any] -reveal_type(np.partition(a, 0, axis=None)) # E: numpy.ndarray -reveal_type(np.partition(b, 0, axis=None)) # E: numpy.ndarray -reveal_type(np.partition(c, 0, axis=None)) # E: numpy.ndarray -reveal_type(np.partition(A, 0)) # E: numpy.ndarray -reveal_type(np.partition(B, 0)) # E: numpy.ndarray +reveal_type(np.partition(a, 0, axis=None)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(b, 0, axis=None)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(c, 0, axis=None)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(A, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(B, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argpartition(a, 0)) # E: numpy.integer[Any] reveal_type(np.argpartition(b, 0)) # E: numpy.integer[Any] -reveal_type(np.argpartition(c, 0)) # E: numpy.ndarray -reveal_type(np.argpartition(A, 0)) # E: numpy.ndarray -reveal_type(np.argpartition(B, 0)) # E: numpy.ndarray +reveal_type(np.argpartition(c, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.argpartition(A, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.argpartition(B, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.sort(A, 0)) # E: numpy.ndarray -reveal_type(np.sort(B, 0)) # E: numpy.ndarray +reveal_type(np.sort(A, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.sort(B, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argsort(A, 0)) # E: numpy.ndarray -reveal_type(np.argsort(B, 0)) # E: numpy.ndarray +reveal_type(np.argsort(A, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.argsort(B, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argmax(A)) # E: numpy.integer[Any] reveal_type(np.argmax(B)) # E: numpy.integer[Any] -reveal_type(np.argmax(A, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray] -reveal_type(np.argmax(B, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray] +reveal_type(np.argmax(A, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] +reveal_type(np.argmax(B, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] reveal_type(np.argmin(A)) # E: numpy.integer[Any] reveal_type(np.argmin(B)) # E: numpy.integer[Any] -reveal_type(np.argmin(A, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray] -reveal_type(np.argmin(B, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray] +reveal_type(np.argmin(A, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] +reveal_type(np.argmin(B, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] reveal_type(np.searchsorted(A[0], 0)) # E: numpy.integer[Any] reveal_type(np.searchsorted(B[0], 0)) # E: numpy.integer[Any] -reveal_type(np.searchsorted(A[0], [0])) # E: numpy.ndarray -reveal_type(np.searchsorted(B[0], [0])) # E: numpy.ndarray +reveal_type(np.searchsorted(A[0], [0])) # E: numpy.ndarray[Any, Any] +reveal_type(np.searchsorted(B[0], [0])) # E: numpy.ndarray[Any, Any] -reveal_type(np.resize(a, (5, 5))) # E: numpy.ndarray -reveal_type(np.resize(b, (5, 5))) # E: numpy.ndarray -reveal_type(np.resize(c, (5, 5))) # E: numpy.ndarray -reveal_type(np.resize(A, (5, 5))) # E: numpy.ndarray -reveal_type(np.resize(B, (5, 5))) # E: numpy.ndarray +reveal_type(np.resize(a, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(b, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(c, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(A, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(B, (5, 5))) # E: numpy.ndarray[Any, Any] reveal_type(np.squeeze(a)) # E: numpy.bool_ reveal_type(np.squeeze(b)) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(np.squeeze(c)) # E: numpy.ndarray -reveal_type(np.squeeze(A)) # E: numpy.ndarray -reveal_type(np.squeeze(B)) # E: numpy.ndarray +reveal_type(np.squeeze(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.squeeze(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.squeeze(B)) # E: numpy.ndarray[Any, Any] -reveal_type(np.diagonal(A)) # E: numpy.ndarray -reveal_type(np.diagonal(B)) # E: numpy.ndarray +reveal_type(np.diagonal(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.diagonal(B)) # E: numpy.ndarray[Any, Any] -reveal_type(np.trace(A)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.trace(B)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(np.trace(A)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.trace(B)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.ravel(a)) # E: numpy.ndarray -reveal_type(np.ravel(b)) # E: numpy.ndarray -reveal_type(np.ravel(c)) # E: numpy.ndarray -reveal_type(np.ravel(A)) # E: numpy.ndarray -reveal_type(np.ravel(B)) # E: numpy.ndarray +reveal_type(np.ravel(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(B)) # E: numpy.ndarray[Any, Any] -reveal_type(np.nonzero(a)) # E: tuple[numpy.ndarray] -reveal_type(np.nonzero(b)) # E: tuple[numpy.ndarray] -reveal_type(np.nonzero(c)) # E: tuple[numpy.ndarray] -reveal_type(np.nonzero(A)) # E: tuple[numpy.ndarray] -reveal_type(np.nonzero(B)) # E: tuple[numpy.ndarray] +reveal_type(np.nonzero(a)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(b)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(c)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(A)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(B)) # E: tuple[numpy.ndarray[Any, Any]] reveal_type(np.shape(a)) # E: tuple[builtins.int] reveal_type(np.shape(b)) # E: tuple[builtins.int] @@ -129,99 +129,99 @@ reveal_type(np.shape(A)) # E: tuple[builtins.int] reveal_type(np.shape(B)) # E: tuple[builtins.int] -reveal_type(np.compress([True], a)) # E: numpy.ndarray -reveal_type(np.compress([True], b)) # E: numpy.ndarray -reveal_type(np.compress([True], c)) # E: numpy.ndarray -reveal_type(np.compress([True], A)) # E: numpy.ndarray -reveal_type(np.compress([True], B)) # E: numpy.ndarray +reveal_type(np.compress([True], a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], B)) # E: numpy.ndarray[Any, Any] reveal_type(np.clip(a, 0, 1.0)) # E: numpy.number[Any] reveal_type(np.clip(b, -1, 1)) # E: numpy.floating[numpy.typing._32Bit] reveal_type(np.clip(c, 0, 1)) # E: numpy.number[Any] -reveal_type(np.clip(A, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.clip(B, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(np.clip(A, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.clip(B, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.sum(a)) # E: numpy.number[Any] reveal_type(np.sum(b)) # E: numpy.floating[numpy.typing._32Bit] reveal_type(np.sum(c)) # E: numpy.number[Any] reveal_type(np.sum(A)) # E: numpy.number[Any] reveal_type(np.sum(B)) # E: numpy.number[Any] -reveal_type(np.sum(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.sum(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(np.sum(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.sum(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.all(a)) # E: numpy.bool_ reveal_type(np.all(b)) # E: numpy.bool_ reveal_type(np.all(c)) # E: numpy.bool_ reveal_type(np.all(A)) # E: numpy.bool_ reveal_type(np.all(B)) # E: numpy.bool_ -reveal_type(np.all(A, axis=0)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.all(B, axis=0)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.all(A, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.all(B, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray] +reveal_type(np.all(A, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.all(B, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.all(A, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.all(B, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] reveal_type(np.any(a)) # E: numpy.bool_ reveal_type(np.any(b)) # E: numpy.bool_ reveal_type(np.any(c)) # E: numpy.bool_ reveal_type(np.any(A)) # E: numpy.bool_ reveal_type(np.any(B)) # E: numpy.bool_ -reveal_type(np.any(A, axis=0)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.any(B, axis=0)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.any(A, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.any(B, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray] +reveal_type(np.any(A, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.any(B, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.any(A, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.any(B, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.cumsum(a)) # E: numpy.ndarray -reveal_type(np.cumsum(b)) # E: numpy.ndarray -reveal_type(np.cumsum(c)) # E: numpy.ndarray -reveal_type(np.cumsum(A)) # E: numpy.ndarray -reveal_type(np.cumsum(B)) # E: numpy.ndarray +reveal_type(np.cumsum(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(B)) # E: numpy.ndarray[Any, Any] reveal_type(np.ptp(a)) # E: numpy.number[Any] reveal_type(np.ptp(b)) # E: numpy.floating[numpy.typing._32Bit] reveal_type(np.ptp(c)) # E: numpy.number[Any] reveal_type(np.ptp(A)) # E: numpy.number[Any] reveal_type(np.ptp(B)) # E: numpy.number[Any] -reveal_type(np.ptp(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.ptp(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.ptp(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.ptp(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(np.ptp(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.ptp(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.ptp(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.ptp(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.amax(a)) # E: numpy.number[Any] reveal_type(np.amax(b)) # E: numpy.floating[numpy.typing._32Bit] reveal_type(np.amax(c)) # E: numpy.number[Any] reveal_type(np.amax(A)) # E: numpy.number[Any] reveal_type(np.amax(B)) # E: numpy.number[Any] -reveal_type(np.amax(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.amax(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.amax(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.amax(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(np.amax(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.amax(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.amax(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.amax(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.amin(a)) # E: numpy.number[Any] reveal_type(np.amin(b)) # E: numpy.floating[numpy.typing._32Bit] reveal_type(np.amin(c)) # E: numpy.number[Any] reveal_type(np.amin(A)) # E: numpy.number[Any] reveal_type(np.amin(B)) # E: numpy.number[Any] -reveal_type(np.amin(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.amin(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.amin(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.amin(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(np.amin(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.amin(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.amin(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.amin(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.prod(a)) # E: numpy.number[Any] reveal_type(np.prod(b)) # E: numpy.floating[numpy.typing._32Bit] reveal_type(np.prod(c)) # E: numpy.number[Any] reveal_type(np.prod(A)) # E: numpy.number[Any] reveal_type(np.prod(B)) # E: numpy.number[Any] -reveal_type(np.prod(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.prod(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.prod(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.prod(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.prod(b, out=d)) # E: numpy.ndarray -reveal_type(np.prod(B, out=d)) # E: numpy.ndarray - -reveal_type(np.cumprod(a)) # E: numpy.ndarray -reveal_type(np.cumprod(b)) # E: numpy.ndarray -reveal_type(np.cumprod(c)) # E: numpy.ndarray -reveal_type(np.cumprod(A)) # E: numpy.ndarray -reveal_type(np.cumprod(B)) # E: numpy.ndarray +reveal_type(np.prod(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.prod(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.prod(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.prod(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.prod(b, out=d)) # E: numpy.ndarray[Any, Any] +reveal_type(np.prod(B, out=d)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.cumprod(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(B)) # E: numpy.ndarray[Any, Any] reveal_type(np.ndim(a)) # E: int reveal_type(np.ndim(b)) # E: int @@ -238,41 +238,41 @@ reveal_type(np.around(a)) # E: numpy.number[Any] reveal_type(np.around(b)) # E: numpy.floating[numpy.typing._32Bit] reveal_type(np.around(c)) # E: numpy.number[Any] -reveal_type(np.around(A)) # E: numpy.ndarray -reveal_type(np.around(B)) # E: numpy.ndarray +reveal_type(np.around(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.around(B)) # E: numpy.ndarray[Any, Any] reveal_type(np.mean(a)) # E: numpy.number[Any] reveal_type(np.mean(b)) # E: numpy.number[Any] reveal_type(np.mean(c)) # E: numpy.number[Any] reveal_type(np.mean(A)) # E: numpy.number[Any] reveal_type(np.mean(B)) # E: numpy.number[Any] -reveal_type(np.mean(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.mean(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.mean(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.mean(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.mean(b, out=d)) # E: numpy.ndarray -reveal_type(np.mean(B, out=d)) # E: numpy.ndarray +reveal_type(np.mean(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.mean(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.mean(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.mean(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.mean(b, out=d)) # E: numpy.ndarray[Any, Any] +reveal_type(np.mean(B, out=d)) # E: numpy.ndarray[Any, Any] reveal_type(np.std(a)) # E: numpy.number[Any] reveal_type(np.std(b)) # E: numpy.number[Any] reveal_type(np.std(c)) # E: numpy.number[Any] reveal_type(np.std(A)) # E: numpy.number[Any] reveal_type(np.std(B)) # E: numpy.number[Any] -reveal_type(np.std(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.std(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.std(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.std(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.std(b, out=d)) # E: numpy.ndarray -reveal_type(np.std(B, out=d)) # E: numpy.ndarray +reveal_type(np.std(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.std(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.std(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.std(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.std(b, out=d)) # E: numpy.ndarray[Any, Any] +reveal_type(np.std(B, out=d)) # E: numpy.ndarray[Any, Any] reveal_type(np.var(a)) # E: numpy.number[Any] reveal_type(np.var(b)) # E: numpy.number[Any] reveal_type(np.var(c)) # E: numpy.number[Any] reveal_type(np.var(A)) # E: numpy.number[Any] reveal_type(np.var(B)) # E: numpy.number[Any] -reveal_type(np.var(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.var(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.var(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.var(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(np.var(b, out=d)) # E: numpy.ndarray -reveal_type(np.var(B, out=d)) # E: numpy.ndarray +reveal_type(np.var(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.var(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.var(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.var(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.var(b, out=d)) # E: numpy.ndarray[Any, Any] +reveal_type(np.var(B, out=d)) # E: numpy.ndarray[Any, Any] diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index 826c8aaa6d12..3e640b3ba8cd 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -16,135 +16,135 @@ class SubClass(np.ndarray): ... reveal_type(f8.all()) # E: numpy.bool_ reveal_type(A.all()) # E: numpy.bool_ -reveal_type(A.all(axis=0)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(A.all(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray] +reveal_type(A.all(axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(A.all(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] reveal_type(A.all(out=B)) # E: SubClass reveal_type(f8.any()) # E: numpy.bool_ reveal_type(A.any()) # E: numpy.bool_ -reveal_type(A.any(axis=0)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(A.any(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray] +reveal_type(A.any(axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(A.any(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] reveal_type(A.any(out=B)) # E: SubClass reveal_type(f8.argmax()) # E: numpy.signedinteger[Any] reveal_type(A.argmax()) # E: numpy.signedinteger[Any] -reveal_type(A.argmax(axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray] +reveal_type(A.argmax(axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] reveal_type(A.argmax(out=B)) # E: SubClass reveal_type(f8.argmin()) # E: numpy.signedinteger[Any] reveal_type(A.argmin()) # E: numpy.signedinteger[Any] -reveal_type(A.argmin(axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray] +reveal_type(A.argmin(axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] reveal_type(A.argmin(out=B)) # E: SubClass -reveal_type(f8.argsort()) # E: numpy.ndarray -reveal_type(A.argsort()) # E: numpy.ndarray +reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any] +reveal_type(A.argsort()) # E: numpy.ndarray[Any, Any] -reveal_type(f8.astype(np.int64).choose([()])) # E: numpy.ndarray -reveal_type(A.choose([0])) # E: numpy.ndarray +reveal_type(f8.astype(np.int64).choose([()])) # E: numpy.ndarray[Any, Any] +reveal_type(A.choose([0])) # E: numpy.ndarray[Any, Any] reveal_type(A.choose([0], out=B)) # E: SubClass -reveal_type(f8.clip(1)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.clip(1)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.clip(None, 1)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(f8.clip(1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.clip(1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.clip(None, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.clip(1, out=B)) # E: SubClass reveal_type(A.clip(None, 1, out=B)) # E: SubClass -reveal_type(f8.compress([0])) # E: numpy.ndarray -reveal_type(A.compress([0])) # E: numpy.ndarray +reveal_type(f8.compress([0])) # E: numpy.ndarray[Any, Any] +reveal_type(A.compress([0])) # E: numpy.ndarray[Any, Any] reveal_type(A.compress([0], out=B)) # E: SubClass reveal_type(f8.conj()) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(A.conj()) # E: numpy.ndarray +reveal_type(A.conj()) # E: numpy.ndarray[Any, Any] reveal_type(B.conj()) # E: SubClass reveal_type(f8.conjugate()) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(A.conjugate()) # E: numpy.ndarray +reveal_type(A.conjugate()) # E: numpy.ndarray[Any, Any] reveal_type(B.conjugate()) # E: SubClass -reveal_type(f8.cumprod()) # E: numpy.ndarray -reveal_type(A.cumprod()) # E: numpy.ndarray +reveal_type(f8.cumprod()) # E: numpy.ndarray[Any, Any] +reveal_type(A.cumprod()) # E: numpy.ndarray[Any, Any] reveal_type(A.cumprod(out=B)) # E: SubClass -reveal_type(f8.cumsum()) # E: numpy.ndarray -reveal_type(A.cumsum()) # E: numpy.ndarray +reveal_type(f8.cumsum()) # E: numpy.ndarray[Any, Any] +reveal_type(A.cumsum()) # E: numpy.ndarray[Any, Any] reveal_type(A.cumsum(out=B)) # E: SubClass reveal_type(f8.max()) # E: numpy.number[Any] reveal_type(A.max()) # E: numpy.number[Any] -reveal_type(A.max(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.max(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.max(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.max(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.max(out=B)) # E: SubClass reveal_type(f8.mean()) # E: numpy.number[Any] reveal_type(A.mean()) # E: numpy.number[Any] -reveal_type(A.mean(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.mean(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.mean(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.mean(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.mean(out=B)) # E: SubClass reveal_type(f8.min()) # E: numpy.number[Any] reveal_type(A.min()) # E: numpy.number[Any] -reveal_type(A.min(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.min(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.min(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.min(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.min(out=B)) # E: SubClass reveal_type(f8.newbyteorder()) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(A.newbyteorder()) # E: numpy.ndarray +reveal_type(A.newbyteorder()) # E: numpy.ndarray[Any, Any] reveal_type(B.newbyteorder('|')) # E: SubClass reveal_type(f8.prod()) # E: numpy.number[Any] reveal_type(A.prod()) # E: numpy.number[Any] -reveal_type(A.prod(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.prod(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.prod(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.prod(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.prod(out=B)) # E: SubClass reveal_type(f8.ptp()) # E: numpy.number[Any] reveal_type(A.ptp()) # E: numpy.number[Any] -reveal_type(A.ptp(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.ptp(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.ptp(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.ptp(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.ptp(out=B)) # E: SubClass reveal_type(f8.round()) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(A.round()) # E: numpy.ndarray +reveal_type(A.round()) # E: numpy.ndarray[Any, Any] reveal_type(A.round(out=B)) # E: SubClass -reveal_type(f8.repeat(1)) # E: numpy.ndarray -reveal_type(A.repeat(1)) # E: numpy.ndarray -reveal_type(B.repeat(1)) # E: numpy.ndarray +reveal_type(f8.repeat(1)) # E: numpy.ndarray[Any, Any] +reveal_type(A.repeat(1)) # E: numpy.ndarray[Any, Any] +reveal_type(B.repeat(1)) # E: numpy.ndarray[Any, Any] reveal_type(f8.std()) # E: numpy.number[Any] reveal_type(A.std()) # E: numpy.number[Any] -reveal_type(A.std(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.std(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.std(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.std(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.std(out=B)) # E: SubClass reveal_type(f8.sum()) # E: numpy.number[Any] reveal_type(A.sum()) # E: numpy.number[Any] -reveal_type(A.sum(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.sum(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.sum(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.sum(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.sum(out=B)) # E: SubClass reveal_type(f8.take(0)) # E: numpy.generic reveal_type(A.take(0)) # E: numpy.generic -reveal_type(A.take([0])) # E: numpy.ndarray +reveal_type(A.take([0])) # E: numpy.ndarray[Any, Any] reveal_type(A.take(0, out=B)) # E: SubClass reveal_type(A.take([0], out=B)) # E: SubClass reveal_type(f8.var()) # E: numpy.number[Any] reveal_type(A.var()) # E: numpy.number[Any] -reveal_type(A.var(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray] -reveal_type(A.var(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.var(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.var(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.var(out=B)) # E: SubClass -reveal_type(A.argpartition([0])) # E: numpy.ndarray +reveal_type(A.argpartition([0])) # E: numpy.ndarray[Any, Any] -reveal_type(A.diagonal()) # E: numpy.ndarray +reveal_type(A.diagonal()) # E: numpy.ndarray[Any, Any] -reveal_type(A.dot(1)) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.dot(1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.dot(1, out=B)) # E: SubClass -reveal_type(A.nonzero()) # E: tuple[numpy.ndarray] +reveal_type(A.nonzero()) # E: tuple[numpy.ndarray[Any, Any]] -reveal_type(A.searchsorted([1])) # E: numpy.ndarray +reveal_type(A.searchsorted([1])) # E: numpy.ndarray[Any, Any] -reveal_type(A.trace()) # E: Union[numpy.number[Any], numpy.ndarray] +reveal_type(A.trace()) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.trace(out=B)) # E: SubClass diff --git a/numpy/typing/tests/data/reveal/numeric.py b/numpy/typing/tests/data/reveal/numeric.py index 5cbfa4ac765c..78e5c1d6120c 100644 --- a/numpy/typing/tests/data/reveal/numeric.py +++ b/numpy/typing/tests/data/reveal/numeric.py @@ -20,53 +20,53 @@ class SubClass(np.ndarray): reveal_type(np.count_nonzero(i8)) # E: int reveal_type(np.count_nonzero(A)) # E: int reveal_type(np.count_nonzero(B)) # E: int -reveal_type(np.count_nonzero(A, keepdims=True)) # E: Union[numpy.signedinteger[Any], numpy.ndarray] -reveal_type(np.count_nonzero(A, axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray] +reveal_type(np.count_nonzero(A, keepdims=True)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] +reveal_type(np.count_nonzero(A, axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] reveal_type(np.isfortran(i8)) # E: bool reveal_type(np.isfortran(A)) # E: bool -reveal_type(np.argwhere(i8)) # E: numpy.ndarray -reveal_type(np.argwhere(A)) # E: numpy.ndarray +reveal_type(np.argwhere(i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.argwhere(A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.flatnonzero(i8)) # E: numpy.ndarray -reveal_type(np.flatnonzero(A)) # E: numpy.ndarray +reveal_type(np.flatnonzero(i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.flatnonzero(A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.correlate(B, A, mode="valid")) # E: numpy.ndarray -reveal_type(np.correlate(A, A, mode="same")) # E: numpy.ndarray +reveal_type(np.correlate(B, A, mode="valid")) # E: numpy.ndarray[Any, Any] +reveal_type(np.correlate(A, A, mode="same")) # E: numpy.ndarray[Any, Any] -reveal_type(np.convolve(B, A, mode="valid")) # E: numpy.ndarray -reveal_type(np.convolve(A, A, mode="same")) # E: numpy.ndarray +reveal_type(np.convolve(B, A, mode="valid")) # E: numpy.ndarray[Any, Any] +reveal_type(np.convolve(A, A, mode="same")) # E: numpy.ndarray[Any, Any] -reveal_type(np.outer(i8, A)) # E: numpy.ndarray -reveal_type(np.outer(B, A)) # E: numpy.ndarray -reveal_type(np.outer(A, A)) # E: numpy.ndarray +reveal_type(np.outer(i8, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.outer(B, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.outer(A, A)) # E: numpy.ndarray[Any, Any] reveal_type(np.outer(A, A, out=C)) # E: SubClass -reveal_type(np.tensordot(B, A)) # E: numpy.ndarray -reveal_type(np.tensordot(A, A)) # E: numpy.ndarray -reveal_type(np.tensordot(A, A, axes=0)) # E: numpy.ndarray -reveal_type(np.tensordot(A, A, axes=(0, 1))) # E: numpy.ndarray +reveal_type(np.tensordot(B, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.tensordot(A, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.tensordot(A, A, axes=0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.tensordot(A, A, axes=(0, 1))) # E: numpy.ndarray[Any, Any] reveal_type(np.isscalar(i8)) # E: bool reveal_type(np.isscalar(A)) # E: bool reveal_type(np.isscalar(B)) # E: bool -reveal_type(np.roll(A, 1)) # E: numpy.ndarray -reveal_type(np.roll(A, (1, 2))) # E: numpy.ndarray -reveal_type(np.roll(B, 1)) # E: numpy.ndarray +reveal_type(np.roll(A, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.roll(A, (1, 2))) # E: numpy.ndarray[Any, Any] +reveal_type(np.roll(B, 1)) # E: numpy.ndarray[Any, Any] -reveal_type(np.rollaxis(A, 0, 1)) # E: numpy.ndarray +reveal_type(np.rollaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any] -reveal_type(np.moveaxis(A, 0, 1)) # E: numpy.ndarray -reveal_type(np.moveaxis(A, (0, 1), (1, 2))) # E: numpy.ndarray +reveal_type(np.moveaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.moveaxis(A, (0, 1), (1, 2))) # E: numpy.ndarray[Any, Any] -reveal_type(np.cross(B, A)) # E: numpy.ndarray -reveal_type(np.cross(A, A)) # E: numpy.ndarray +reveal_type(np.cross(B, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cross(A, A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.indices([0, 1, 2])) # E: numpy.ndarray -reveal_type(np.indices([0, 1, 2], sparse=False)) # E: numpy.ndarray -reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[numpy.ndarray] +reveal_type(np.indices([0, 1, 2])) # E: numpy.ndarray[Any, Any] +reveal_type(np.indices([0, 1, 2], sparse=False)) # E: numpy.ndarray[Any, Any] +reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[numpy.ndarray[Any, Any]] reveal_type(np.binary_repr(1)) # E: str @@ -76,9 +76,9 @@ class SubClass(np.ndarray): reveal_type(np.allclose(B, A)) # E: bool reveal_type(np.allclose(A, A)) # E: bool -reveal_type(np.isclose(i8, A)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.isclose(B, A)) # E: Union[numpy.bool_, numpy.ndarray] -reveal_type(np.isclose(A, A)) # E: Union[numpy.bool_, numpy.ndarray] +reveal_type(np.isclose(i8, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.isclose(B, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.isclose(A, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] reveal_type(np.array_equal(i8, A)) # E: bool reveal_type(np.array_equal(B, A)) # E: bool From a37b3a82f1abc3e73287883acfd2b06f88a09f13 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 27 Nov 2020 07:55:36 -0700 Subject: [PATCH 0100/1270] MAINT: Update master after 1.20.x branch. - Update setup.py - Update pavement.py - Add 1.21.0-note.rst - Update npyconfig.h - Clear release/upcoming_changes --- .../upcoming_changes/13516.performance.rst | 47 ------------ .../upcoming_changes/14882.deprecation.rst | 30 -------- .../upcoming_changes/15121.new_function.rst | 6 -- .../upcoming_changes/15666.improvement.rst | 8 -- .../upcoming_changes/15759.improvement.rst | 4 - .../upcoming_changes/15852.new_feature.rst | 24 ------ .../upcoming_changes/15886.deprecation.rst | 7 -- .../upcoming_changes/15900.deprecation.rst | 16 ---- .../upcoming_changes/15997.improvement.rst | 12 --- .../upcoming_changes/16056.deprecation.rst | 13 ---- .../upcoming_changes/16134.compatibility.rst | 8 -- .../upcoming_changes/16134.improvement.rst | 6 -- .../upcoming_changes/16156.deprecation.rst | 5 -- doc/release/upcoming_changes/16161.change.rst | 44 ----------- .../upcoming_changes/16200.compatibility.rst | 76 ------------------- .../upcoming_changes/16232.deprecation.rst | 6 -- .../upcoming_changes/16350.compatibility.rst | 8 -- .../upcoming_changes/16476.new_feature.rst | 9 --- .../upcoming_changes/16515.new_feature.rst | 8 -- .../upcoming_changes/16519.improvement.rst | 4 - .../upcoming_changes/16554.compatibility.rst | 10 --- .../upcoming_changes/16554.deprecation.rst | 8 -- .../upcoming_changes/16554.expired.rst | 5 -- .../upcoming_changes/16558.new_feature.rst | 9 --- .../upcoming_changes/16570.compatibility.rst | 4 - .../upcoming_changes/16589.compatibility.rst | 8 -- .../upcoming_changes/16592.compatibility.rst | 13 ---- .../upcoming_changes/16594.new_feature.rst | 4 - .../upcoming_changes/16650.compatibility.rst | 16 ---- .../upcoming_changes/16675.improvement.rst | 4 - .../upcoming_changes/16710.improvement.rst | 3 - .../upcoming_changes/16730.improvement.rst | 9 --- .../upcoming_changes/16815.compatibility.rst | 8 -- doc/release/upcoming_changes/16841.change.rst | 19 ----- doc/release/upcoming_changes/16938.c_api.rst | 19 ----- .../upcoming_changes/16986.improvement.rst | 7 -- .../upcoming_changes/17010.compatibility.rst | 24 ------ .../upcoming_changes/17029.compatibility.rst | 14 ---- .../upcoming_changes/17067.expired.rst | 8 -- .../upcoming_changes/17068.compatibility.rst | 4 - .../upcoming_changes/17116.expired.rst | 2 - .../upcoming_changes/17123.new_feature.rst | 12 --- .../upcoming_changes/17195.improvement.rst | 5 -- .../upcoming_changes/17219.new_feature.rst | 12 --- .../upcoming_changes/17233.deprecation.rst | 4 - .../upcoming_changes/17241.compatibility.rst | 6 -- .../upcoming_changes/17284.new_feature.rst | 6 -- .../upcoming_changes/17344.new_feature.rst | 3 - .../upcoming_changes/17394.new_function.rst | 6 -- .../upcoming_changes/17456.new_feature.rst | 5 -- .../upcoming_changes/17535.new_function.rst | 15 ---- .../upcoming_changes/17577.compatibility.rst | 6 -- .../upcoming_changes/17580.compatibility.rst | 4 - doc/release/upcoming_changes/17596.future.rst | 30 -------- .../upcoming_changes/17706.compatibility.rst | 10 --- doc/source/release.rst | 1 + doc/source/release/1.21.0-notes.rst | 6 ++ numpy/core/include/numpy/numpyconfig.h | 1 + pavement.py | 2 +- setup.py | 2 +- 60 files changed, 10 insertions(+), 665 deletions(-) delete mode 100644 doc/release/upcoming_changes/13516.performance.rst delete mode 100644 doc/release/upcoming_changes/14882.deprecation.rst delete mode 100644 doc/release/upcoming_changes/15121.new_function.rst delete mode 100644 doc/release/upcoming_changes/15666.improvement.rst delete mode 100644 doc/release/upcoming_changes/15759.improvement.rst delete mode 100644 doc/release/upcoming_changes/15852.new_feature.rst delete mode 100644 doc/release/upcoming_changes/15886.deprecation.rst delete mode 100644 doc/release/upcoming_changes/15900.deprecation.rst delete mode 100644 doc/release/upcoming_changes/15997.improvement.rst delete mode 100644 doc/release/upcoming_changes/16056.deprecation.rst delete mode 100644 doc/release/upcoming_changes/16134.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16134.improvement.rst delete mode 100644 doc/release/upcoming_changes/16156.deprecation.rst delete mode 100644 doc/release/upcoming_changes/16161.change.rst delete mode 100644 doc/release/upcoming_changes/16200.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16232.deprecation.rst delete mode 100644 doc/release/upcoming_changes/16350.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16476.new_feature.rst delete mode 100644 doc/release/upcoming_changes/16515.new_feature.rst delete mode 100644 doc/release/upcoming_changes/16519.improvement.rst delete mode 100644 doc/release/upcoming_changes/16554.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16554.deprecation.rst delete mode 100644 doc/release/upcoming_changes/16554.expired.rst delete mode 100644 doc/release/upcoming_changes/16558.new_feature.rst delete mode 100644 doc/release/upcoming_changes/16570.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16589.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16592.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16594.new_feature.rst delete mode 100644 doc/release/upcoming_changes/16650.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16675.improvement.rst delete mode 100644 doc/release/upcoming_changes/16710.improvement.rst delete mode 100644 doc/release/upcoming_changes/16730.improvement.rst delete mode 100644 doc/release/upcoming_changes/16815.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16841.change.rst delete mode 100644 doc/release/upcoming_changes/16938.c_api.rst delete mode 100644 doc/release/upcoming_changes/16986.improvement.rst delete mode 100644 doc/release/upcoming_changes/17010.compatibility.rst delete mode 100644 doc/release/upcoming_changes/17029.compatibility.rst delete mode 100644 doc/release/upcoming_changes/17067.expired.rst delete mode 100644 doc/release/upcoming_changes/17068.compatibility.rst delete mode 100644 doc/release/upcoming_changes/17116.expired.rst delete mode 100644 doc/release/upcoming_changes/17123.new_feature.rst delete mode 100644 doc/release/upcoming_changes/17195.improvement.rst delete mode 100644 doc/release/upcoming_changes/17219.new_feature.rst delete mode 100644 doc/release/upcoming_changes/17233.deprecation.rst delete mode 100644 doc/release/upcoming_changes/17241.compatibility.rst delete mode 100644 doc/release/upcoming_changes/17284.new_feature.rst delete mode 100644 doc/release/upcoming_changes/17344.new_feature.rst delete mode 100644 doc/release/upcoming_changes/17394.new_function.rst delete mode 100644 doc/release/upcoming_changes/17456.new_feature.rst delete mode 100644 doc/release/upcoming_changes/17535.new_function.rst delete mode 100644 doc/release/upcoming_changes/17577.compatibility.rst delete mode 100644 doc/release/upcoming_changes/17580.compatibility.rst delete mode 100644 doc/release/upcoming_changes/17596.future.rst delete mode 100644 doc/release/upcoming_changes/17706.compatibility.rst create mode 100644 doc/source/release/1.21.0-notes.rst diff --git a/doc/release/upcoming_changes/13516.performance.rst b/doc/release/upcoming_changes/13516.performance.rst deleted file mode 100644 index 1b32b61d746c..000000000000 --- a/doc/release/upcoming_changes/13516.performance.rst +++ /dev/null @@ -1,47 +0,0 @@ -Enable multi-platform SIMD compiler optimizations -------------------------------------------------- - -A series of improvements for NumPy infrastructure to pave the way to -**NEP-38**, that can be summarized as follow: - -- **New Build Arguments** : - - - ``--cpu-baseline`` to specify the minimal set of required - optimizations, default value is ``min`` which provides the minimum - CPU features that can safely run on a wide range of users - platforms. - - - ``--cpu-dispatch`` to specify the dispatched set of additional - optimizations, default value is ``max -xop -fma4`` which enables - all CPU features, except for AMD legacy features. - - - ``--disable-optimization`` to explicitly disable the whole new - improvements, It also adds a new **C** compiler #definition - called ``NPY_DISABLE_OPTIMIZATION`` which it can be used as - guard for any SIMD code. - -- **Advanced CPU dispatcher**: A flexible cross-architecture CPU dispatcher built - on the top of Python/Numpy distutils, support all common compilers with a wide range of CPU features. - - The new dispatcher requires a special file extension ``*.dispatch.c`` to mark the dispatch-able - **C** sources. These sources have the ability to be compiled multiple times so that each compilation process - represents certain CPU features and provides different #definitions and flags that affect the code paths. - -- **New auto-generated C header ``core/src/common/_cpu_dispatch.h``** - This header is generated by the distutils module 'ccompiler_opt', and contains all the #definitions - and headers of instruction sets, that had been configured through command arguments '--cpu-baseline' and '--cpu-dispatch'. - -- **New C header ``core/src/common/npy_cpu_dispatch.h``** - - This header contains all utilities that required for the whole CPU dispatching process, - it also can be considered as a bridge linking the new infrastructure work with NumPy CPU runtime detection. - -- **Add new attributes to NumPy umath module(Python level)** - - - ``__cpu_baseline__`` a list contains the minimal set of required optimizations that supported - by the compiler and platform according to the specified values to command argument '--cpu-baseline'. - - - ``__cpu_dispatch__`` a list contains the dispatched set of additional optimizations that supported by the compiler - and platform according to the specified values to command argument '--cpu-dispatch'. - -- **Print the supported CPU features during the run of PytestTester** diff --git a/doc/release/upcoming_changes/14882.deprecation.rst b/doc/release/upcoming_changes/14882.deprecation.rst deleted file mode 100644 index db3b39d4c02b..000000000000 --- a/doc/release/upcoming_changes/14882.deprecation.rst +++ /dev/null @@ -1,30 +0,0 @@ -Using the aliases of builtin types like ``np.int`` is deprecated ----------------------------------------------------------------- - -For a long time, ``np.int`` has been an alias of the builtin ``int``. This is -repeatedly a cause of confusion for newcomers, and is also simply not useful. - -These aliases have been deprecated. The table below shows the full list of -deprecated aliases, along with their exact meaning. Replacing uses of items in -the first column with the contents of the second column will work identically -and silence the deprecation warning. - -In many cases, it may have been intended to use the types from the third column. -Be aware that use of these types may result in subtle but desirable behavior -changes. - -================== ================================= ================================================================== -Deprecated name Identical to Possibly intended numpy type -================== ================================= ================================================================== -``numpy.bool`` ``bool`` `numpy.bool_` -``numpy.int`` ``int`` `numpy.int_` (default int dtype), `numpy.cint` (C ``int``) -``numpy.float`` ``float`` `numpy.float_`, `numpy.double` (equivalent) -``numpy.complex`` ``complex`` `numpy.complex_`, `numpy.cdouble` (equivalent) -``numpy.object`` ``object`` `numpy.object_` -``numpy.str`` ``str`` `numpy.str_` -``numpy.long`` ``int`` (``long`` on Python 2) `numpy.int_` (C ``long``), `numpy.longlong` (largest integer type) -``numpy.unicode`` ``str`` (``unicode`` on Python 2) `numpy.unicode_` -================== ================================= ================================================================== - -Note that for technical reasons these deprecation warnings will only be emitted -on Python 3.7 and above. diff --git a/doc/release/upcoming_changes/15121.new_function.rst b/doc/release/upcoming_changes/15121.new_function.rst deleted file mode 100644 index 004fec1b07e8..000000000000 --- a/doc/release/upcoming_changes/15121.new_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -The random.Generator class has a new ``permuted`` function. ------------------------------------------------------------ -The new function differs from ``shuffle`` and ``permutation`` in that the -subarrays indexed by an axis are permuted rather than the axis being treated as -a separate 1-D array for every combination of the other indexes. For example, -it is now possible to permute the rows or columns of a 2-D array. diff --git a/doc/release/upcoming_changes/15666.improvement.rst b/doc/release/upcoming_changes/15666.improvement.rst deleted file mode 100644 index c42d709527a6..000000000000 --- a/doc/release/upcoming_changes/15666.improvement.rst +++ /dev/null @@ -1,8 +0,0 @@ -Improved string representation for polynomials (__str__) --------------------------------------------------------- - -The string representation (``__str__``) of all six polynomial types in -`numpy.polynomial` has been updated to give the polynomial as a mathematical -expression instead of an array of coefficients. Two package-wide formats for -the polynomial expressions are available - one using Unicode characters for -superscripts and subscripts, and another using only ASCII characters. diff --git a/doc/release/upcoming_changes/15759.improvement.rst b/doc/release/upcoming_changes/15759.improvement.rst deleted file mode 100644 index 0a1b255f7e7c..000000000000 --- a/doc/release/upcoming_changes/15759.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -Remove the Accelerate library as a candidate LAPACK library ------------------------------------------------------------ -Apple no longer supports Accelerate. Remove it. - diff --git a/doc/release/upcoming_changes/15852.new_feature.rst b/doc/release/upcoming_changes/15852.new_feature.rst deleted file mode 100644 index 12965e57b155..000000000000 --- a/doc/release/upcoming_changes/15852.new_feature.rst +++ /dev/null @@ -1,24 +0,0 @@ -``where`` keyword argument for ``numpy.all`` and ``numpy.any`` functions ------------------------------------------------------------------------- -The keyword argument ``where`` is added and allows to only consider specified -elements or subaxes from an array in the Boolean evaluation of ``all`` and -``any``. This new keyword is available to the functions ``all`` and ``any`` -both via ``numpy`` directly or in the methods of ``numpy.ndarray``. - -Any broadcastable Boolean array or a scalar can be set as ``where``. It -defaults to ``True`` to evaluate the functions for all elements in an array if -``where`` is not set by the user. Examples are given in the documentation of -the functions. - - -``where`` keyword argument for ``numpy`` functions ``mean``, ``std``, ``var`` ------------------------------------------------------------------------------ -The keyword argument ``where`` is added and allows to limit the scope in the -caluclation of ``mean``, ``std`` and ``var`` to only a subset of elements. It -is available both via ``numpy`` directly or in the methods of -``numpy.ndarray``. - -Any broadcastable Boolean array or a scalar can be set as ``where``. It -defaults to ``True`` to evaluate the functions for all elements in an array if -``where`` is not set by the user. Examples are given in the documentation of -the functions. diff --git a/doc/release/upcoming_changes/15886.deprecation.rst b/doc/release/upcoming_changes/15886.deprecation.rst deleted file mode 100644 index 050817e6693a..000000000000 --- a/doc/release/upcoming_changes/15886.deprecation.rst +++ /dev/null @@ -1,7 +0,0 @@ -Passing ``shape=None`` to functions with a non-optional shape argument is deprecated ------------------------------------------------------------------------------------- -Previously, this was an alias for passing ``shape=()``. -This deprecation is emitted by `PyArray_IntpConverter` in the C API. If your -API is intended to support passing `None`, then you should check for `None` -prior to invoking the converter, so as to be able to distinguish `None` and -``()``. diff --git a/doc/release/upcoming_changes/15900.deprecation.rst b/doc/release/upcoming_changes/15900.deprecation.rst deleted file mode 100644 index 22be711d0268..000000000000 --- a/doc/release/upcoming_changes/15900.deprecation.rst +++ /dev/null @@ -1,16 +0,0 @@ -Indexing errors will be reported even when index result is empty ----------------------------------------------------------------- -In the future, NumPy will raise an IndexError when an -integer array index contains out of bound values even if a non-indexed -dimension is of length 0. This will now emit a DeprecationWarning. -This can happen when the array is previously empty, or an empty -slice is involved:: - - arr1 = np.zeros((5, 0)) - arr1[[20]] - arr2 = np.zeros((5, 5)) - arr2[[20], :0] - -Previously the non-empty index ``[20]`` was not checked for correctness. -It will now be checked causing a deprecation warning which will be turned -into an error. This also applies to assignments. diff --git a/doc/release/upcoming_changes/15997.improvement.rst b/doc/release/upcoming_changes/15997.improvement.rst deleted file mode 100644 index 9b5feacb83a3..000000000000 --- a/doc/release/upcoming_changes/15997.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -Object arrays containing multi-line objects have a more readable ``repr`` -------------------------------------------------------------------------- -If elements of an object array have a ``repr`` containing new lines, then the -wrapped lines will be aligned by column. Notably, this improves the ``repr`` of -nested arrays:: - - >>> np.array([np.eye(2), np.eye(3)], dtype=object) - array([array([[1., 0.], - [0., 1.]]), - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]])], dtype=object) diff --git a/doc/release/upcoming_changes/16056.deprecation.rst b/doc/release/upcoming_changes/16056.deprecation.rst deleted file mode 100644 index bd57cd226654..000000000000 --- a/doc/release/upcoming_changes/16056.deprecation.rst +++ /dev/null @@ -1,13 +0,0 @@ -Inexact matches for mode and searchside are deprecated ------------------------------------------------------- -Inexact and case insensitive matches for mode and searchside were -valid inputs earlier and will give a DeprecationWarning now. -For example, below are some example usages which are now deprecated and will -give a DeprecationWarning:: - - import numpy as np - arr = np.array([[3, 6, 6], [4, 5, 1]]) - # mode: inexact match - np.ravel_multi_index(arr, (7, 6), mode="clap") # should be "clip" - # searchside: inexact match - np.searchsorted(arr[0], 4, side='random') # should be "right" diff --git a/doc/release/upcoming_changes/16134.compatibility.rst b/doc/release/upcoming_changes/16134.compatibility.rst deleted file mode 100644 index 6ef1c8992674..000000000000 --- a/doc/release/upcoming_changes/16134.compatibility.rst +++ /dev/null @@ -1,8 +0,0 @@ -Same kind casting in concatenate with ``axis=None`` ---------------------------------------------------- -When `~numpy.concatenate` is called with ``axis=None``, -the flattened arrays were cast with ``unsafe``. Any other axis -choice uses "same kind". That different default -has been deprecated and "same kind" casting will be used -instead. The new ``casting`` keyword argument -can be used to retain the old behaviour. diff --git a/doc/release/upcoming_changes/16134.improvement.rst b/doc/release/upcoming_changes/16134.improvement.rst deleted file mode 100644 index 0699f44bd99e..000000000000 --- a/doc/release/upcoming_changes/16134.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Concatenate supports providing an output dtype ----------------------------------------------- -Support was added to `~numpy.concatenate` to provide -an output ``dtype`` and ``casting`` using keyword -arguments. The ``dtype`` argument cannot be provided -in conjunction with the ``out`` one. diff --git a/doc/release/upcoming_changes/16156.deprecation.rst b/doc/release/upcoming_changes/16156.deprecation.rst deleted file mode 100644 index 153cc4428486..000000000000 --- a/doc/release/upcoming_changes/16156.deprecation.rst +++ /dev/null @@ -1,5 +0,0 @@ -Deprecation of `numpy.dual` ---------------------------- -The module `numpy.dual` is deprecated. Instead of importing functions -from `numpy.dual`, the functions should be imported directly from NumPy -or SciPy. diff --git a/doc/release/upcoming_changes/16161.change.rst b/doc/release/upcoming_changes/16161.change.rst deleted file mode 100644 index 7eafdfda008b..000000000000 --- a/doc/release/upcoming_changes/16161.change.rst +++ /dev/null @@ -1,44 +0,0 @@ -Changed behavior of ``divmod(1., 0.)`` and related functions ------------------------------------------------------------- -The changes also assure that different compiler versions have the same behavior -for nan or inf usages in these operations. This was previously compiler -dependent, we now force the invalid and divide by zero flags, making the -results the same across compilers. For example, gcc-5, gcc-8, or gcc-9 now -result in the same behavior. The changes are tabulated below: - -.. list-table:: Summary of New Behavior - :widths: auto - :header-rows: 1 - - * - Operator - - Old Warning - - New Warning - - Old Result - - New Result - - Works on MacOS - * - np.divmod(1.0, 0.0) - - Invalid - - Invalid and Dividebyzero - - nan, nan - - inf, nan - - Yes - * - np.fmod(1.0, 0.0) - - Invalid - - Invalid - - nan - - nan - - No? Yes - * - np.floor_divide(1.0, 0.0) - - Invalid - - Dividebyzero - - nan - - inf - - Yes - * - np.remainder(1.0, 0.0) - - Invalid - - Invalid - - nan - - nan - - Yes - - diff --git a/doc/release/upcoming_changes/16200.compatibility.rst b/doc/release/upcoming_changes/16200.compatibility.rst deleted file mode 100644 index 42c423eefdb1..000000000000 --- a/doc/release/upcoming_changes/16200.compatibility.rst +++ /dev/null @@ -1,76 +0,0 @@ -NumPy Scalars are cast when assigned to arrays ----------------------------------------------- - -When creating or assigning to arrays, in all relevant cases NumPy -scalars will now be cast identically to NumPy arrays. In particular -this changes the behaviour in some cases which previously raised an -error:: - - np.array([np.float64(np.nan)], dtype=np.int64) - -will succeed and return an undefined result (usually the smallest possible -integer). This also affects assignments:: - - arr[0] = np.float64(np.nan) - -At this time, NumPy retains the behaviour for:: - - np.array(np.float64(np.nan), dtype=np.int64) - -The above changes do not affect Python scalars:: - - np.array([float("NaN")], dtype=np.int64) - -remains unaffected (``np.nan`` is a Python ``float``, not a NumPy one). -Unlike signed integers, unsigned integers do not retain this special case, -since they always behaved more like casting. -The following code stops raising an error:: - - np.array([np.float64(np.nan)], dtype=np.uint64) - -To avoid backward compatibility issues, at this time assignment from -``datetime64`` scalar to strings of too short length remains supported. -This means that ``np.asarray(np.datetime64("2020-10-10"), dtype="S5")`` -succeeds now, when it failed before. In the long term this may be -deprecated or the unsafe cast may be allowed generally to make assignment -of arrays and scalars behave consistently. - - -Array coercion changes when Strings and other types are mixed -------------------------------------------------------------- - -When stringss and other types are mixed, such as:: - - np.array(["string", np.float64(3.)], dtype="S") - -The results will change, which may lead to string dtypes with longer strings -in some cases. In particularly, if ``dtype="S"`` is not provided any numerical -value will lead to a string results long enough to hold all possible numerical -values. (e.g. "S32" for floats). Note that you should always provide -``dtype="S"`` when converting non-strings to strings. - -If ``dtype="S"`` is provided the results will be largely identical to before, -but NumPy scalars (not a Python float like ``1.0``), will still enforce -a uniform string length:: - - np.array([np.float64(3.)], dtype="S") # gives "S32" - np.array([3.0], dtype="S") # gives "S3" - -while previously the first version gave the same result as the second. - - -Array coercion restructure --------------------------- - -Array coercion has been restructured. In general, this should not affect -users. In extremely rare corner cases where array-likes are nested:: - - np.array([array_like1]) - -things will now be more consistent with:: - - np.array([np.array(array_like1)]) - -which could potentially change output subtly for badly defined array-likes. -We are not aware of any such case where the results were not clearly -incorrect previously. diff --git a/doc/release/upcoming_changes/16232.deprecation.rst b/doc/release/upcoming_changes/16232.deprecation.rst deleted file mode 100644 index d1ac7f044c2e..000000000000 --- a/doc/release/upcoming_changes/16232.deprecation.rst +++ /dev/null @@ -1,6 +0,0 @@ -``outer`` and ``ufunc.outer`` deprecated for matrix ---------------------------------------------------- -``np.matrix`` use with `~numpy.outer` or generic ufunc outer -calls such as ``numpy.add.outer``. Previously, matrix was -converted to an array here. This will not be done in the future -requiring a manual conversion to arrays. diff --git a/doc/release/upcoming_changes/16350.compatibility.rst b/doc/release/upcoming_changes/16350.compatibility.rst deleted file mode 100644 index 67673a6b12f0..000000000000 --- a/doc/release/upcoming_changes/16350.compatibility.rst +++ /dev/null @@ -1,8 +0,0 @@ -Writing to the result of `numpy.broadcast_arrays` will export readonly buffers ------------------------------------------------------------------------------- - -In NumPy 1.17 `numpy.broadcast_arrays` started warning when the resulting array -was written to. This warning was skipped when the array was used through the -buffer interface (e.g. ``memoryview(arr)``). The same thing will now occur for the -two protocols ``__array_interface__``, and ``__array_struct__`` returning read-only -buffers instead of giving a warning. \ No newline at end of file diff --git a/doc/release/upcoming_changes/16476.new_feature.rst b/doc/release/upcoming_changes/16476.new_feature.rst deleted file mode 100644 index acfe0bd725ca..000000000000 --- a/doc/release/upcoming_changes/16476.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -``norm=backward``, ``forward`` keyword options for ``numpy.fft`` functions --------------------------------------------------------------------------- -The keyword argument option ``norm=backward`` is added as an alias for ``None`` -and acts as the default option; using it has the direct transforms unscaled -and the inverse transforms scaled by ``1/n``. - -Using the new keyword argument option ``norm=forward`` has the direct -transforms scaled by ``1/n`` and the inverse transforms unscaled (i.e. exactly -opposite to the default option ``norm=backward``). diff --git a/doc/release/upcoming_changes/16515.new_feature.rst b/doc/release/upcoming_changes/16515.new_feature.rst deleted file mode 100644 index 5b3803429d4a..000000000000 --- a/doc/release/upcoming_changes/16515.new_feature.rst +++ /dev/null @@ -1,8 +0,0 @@ -NumPy is now typed ------------------- -Type annotations have been added for large parts of NumPy. There is -also a new `numpy.typing` module that contains useful types for -end-users. The currently available types are - -- ``ArrayLike``: for objects that can be coerced to an array -- ``DtypeLike``: for objects that can be coerced to a dtype diff --git a/doc/release/upcoming_changes/16519.improvement.rst b/doc/release/upcoming_changes/16519.improvement.rst deleted file mode 100644 index 50c4305e54f6..000000000000 --- a/doc/release/upcoming_changes/16519.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -Thread-safe f2py callback functions ------------------------------------ - -Callback functions in f2py are now threadsafe. diff --git a/doc/release/upcoming_changes/16554.compatibility.rst b/doc/release/upcoming_changes/16554.compatibility.rst deleted file mode 100644 index a9f3f07e0ed5..000000000000 --- a/doc/release/upcoming_changes/16554.compatibility.rst +++ /dev/null @@ -1,10 +0,0 @@ -Numeric-style type names have been removed from type dictionaries ------------------------------------------------------------------ - -To stay in sync with the deprecation for ``np.dtype("Complex64")`` -and other numeric-style (capital case) types. These were removed -from ``np.sctypeDict`` and ``np.typeDict``. You should use -the lower case versions instead. Note that ``"Complex64"`` -corresponds to ``"complex128"`` and ``"Complex32"`` corresponds -to ``"complex64"``. The numpy style (new) versions, denote the full -size and not the size of the real/imaginary part. diff --git a/doc/release/upcoming_changes/16554.deprecation.rst b/doc/release/upcoming_changes/16554.deprecation.rst deleted file mode 100644 index 5602d2d53d32..000000000000 --- a/doc/release/upcoming_changes/16554.deprecation.rst +++ /dev/null @@ -1,8 +0,0 @@ -Further Numeric Style types Deprecated --------------------------------------- - -The remaining numeric-style type codes ``Bytes0``, ``Str0``, -``Uint32``, ``Uint64``, and ``Datetime64`` -have been deprecated. The lower-case variants should be used -instead. For bytes and string ``"S"`` and ``"U"`` -are further alternatives. diff --git a/doc/release/upcoming_changes/16554.expired.rst b/doc/release/upcoming_changes/16554.expired.rst deleted file mode 100644 index 9b5718f8abb4..000000000000 --- a/doc/release/upcoming_changes/16554.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -* The deprecation of numeric style type-codes ``np.dtype("Complex64")`` - (with upper case spelling), is expired. ``"Complex64"`` corresponded to - ``"complex128"`` and ``"Complex32"`` corresponded to ``"complex64"``. -* The deprecation of ``np.sctypeNA`` and ``np.typeNA`` is expired. Both - have been removed from the public API. Use ``np.typeDict`` instead. diff --git a/doc/release/upcoming_changes/16558.new_feature.rst b/doc/release/upcoming_changes/16558.new_feature.rst deleted file mode 100644 index 9bd508e83a78..000000000000 --- a/doc/release/upcoming_changes/16558.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -``numpy.typing`` is accessible at runtime ------------------------------------------ -The types in ``numpy.typing`` can now be imported at runtime. Code -like the following will now work: - -.. code:: python - - from numpy.typing import ArrayLike - x: ArrayLike = [1, 2, 3, 4] diff --git a/doc/release/upcoming_changes/16570.compatibility.rst b/doc/release/upcoming_changes/16570.compatibility.rst deleted file mode 100644 index 5efd1218bb57..000000000000 --- a/doc/release/upcoming_changes/16570.compatibility.rst +++ /dev/null @@ -1,4 +0,0 @@ -The ``operator.concat`` function now raises TypeError for array arguments -------------------------------------------------------------------------- -The previous behavior was to fall back to addition and add the two arrays, -which was thought to be unexpected behavior for a concatenation function. diff --git a/doc/release/upcoming_changes/16589.compatibility.rst b/doc/release/upcoming_changes/16589.compatibility.rst deleted file mode 100644 index d65fb43f646d..000000000000 --- a/doc/release/upcoming_changes/16589.compatibility.rst +++ /dev/null @@ -1,8 +0,0 @@ -``nickname`` attribute removed from ABCPolyBase ------------------------------------------------ - -An abstract property ``nickname`` has been removed from ``ABCPolyBase`` as it -was no longer used in the derived convenience classes. -This may affect users who have derived classes from ``ABCPolyBase`` and -overridden the methods for representation and display, e.g. ``__str__``, -``__repr__``, ``_repr_latex``, etc. diff --git a/doc/release/upcoming_changes/16592.compatibility.rst b/doc/release/upcoming_changes/16592.compatibility.rst deleted file mode 100644 index 289e768fce57..000000000000 --- a/doc/release/upcoming_changes/16592.compatibility.rst +++ /dev/null @@ -1,13 +0,0 @@ -float->timedelta and uint64->timedelta promotion will raise a TypeError ------------------------------------------------------------------------ -Float and timedelta promotion consistently raises a TypeError. -``np.promote_types("float32", "m8")`` aligns with -``np.promote_types("m8", "float32")`` now and both raise a TypeError. -Previously, ``np.promote_types("float32", "m8")`` returned ``"m8"`` which -was considered a bug. - -Uint64 and timedelta promotion consistently raises a TypeError. -``np.promote_types("uint64", "m8")`` aligns with -``np.promote_types("m8", "uint64")`` now and both raise a TypeError. -Previously, ``np.promote_types("uint64", "m8")`` returned ``"m8"`` which -was considered a bug. diff --git a/doc/release/upcoming_changes/16594.new_feature.rst b/doc/release/upcoming_changes/16594.new_feature.rst deleted file mode 100644 index 5d67bfad7876..000000000000 --- a/doc/release/upcoming_changes/16594.new_feature.rst +++ /dev/null @@ -1,4 +0,0 @@ -New``__f2py_numpy_version__`` attribute for f2py generated modules. -------------------------------------------------------------------- -Because f2py is released together with NumPy, ``__f2py_numpy_version__`` -provides a way to track the version f2py used to generate the module. diff --git a/doc/release/upcoming_changes/16650.compatibility.rst b/doc/release/upcoming_changes/16650.compatibility.rst deleted file mode 100644 index 6532323552c4..000000000000 --- a/doc/release/upcoming_changes/16650.compatibility.rst +++ /dev/null @@ -1,16 +0,0 @@ -`numpy.genfromtxt` now correctly unpacks structured arrays ----------------------------------------------------------- -Previously, `numpy.genfromtxt` failed to unpack if it was called with -``unpack=True`` and a structured datatype was passed to the ``dtype`` argument -(or ``dtype=None`` was passed and a structured datatype was inferred). -For example:: - - >>> data = StringIO("21 58.0\n35 72.0") - >>> np.genfromtxt(data, dtype=None, unpack=True) - array([(21, 58.), (35, 72.)], dtype=[('f0', '>> np.genfromtxt(data, dtype=None, unpack=True) - [array([21, 35]), array([58., 72.])] diff --git a/doc/release/upcoming_changes/16675.improvement.rst b/doc/release/upcoming_changes/16675.improvement.rst deleted file mode 100644 index bc70d7e0f0b6..000000000000 --- a/doc/release/upcoming_changes/16675.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -`numpy.core.records.fromfile` now supports file-like objects ------------------------------------------------------------- -`numpy.rec.fromfile` can now use file-like objects, for instance -:py:class:`io.BytesIO` diff --git a/doc/release/upcoming_changes/16710.improvement.rst b/doc/release/upcoming_changes/16710.improvement.rst deleted file mode 100644 index 4194c36a8098..000000000000 --- a/doc/release/upcoming_changes/16710.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -RPATH support on AIX added to distutils ---------------------------------------- -This allows SciPy to be built on AIX. diff --git a/doc/release/upcoming_changes/16730.improvement.rst b/doc/release/upcoming_changes/16730.improvement.rst deleted file mode 100644 index 44ec03cafbea..000000000000 --- a/doc/release/upcoming_changes/16730.improvement.rst +++ /dev/null @@ -1,9 +0,0 @@ -Use f90 compiler specified in command line args --------------------------------------------------------- - -The compiler command selection for Fortran Portland Group Compiler is changed in `numpy.distutils.fcompiler`. -This only affects the linking command. -This forces the use of the executable provided by the command line option (if provided) -instead of the pgfortran executable. -If no executable is provided to the command line option it defaults to the pgf90 executable, -wich is an alias for pgfortran according to the PGI documentation. diff --git a/doc/release/upcoming_changes/16815.compatibility.rst b/doc/release/upcoming_changes/16815.compatibility.rst deleted file mode 100644 index ceaf12a5726a..000000000000 --- a/doc/release/upcoming_changes/16815.compatibility.rst +++ /dev/null @@ -1,8 +0,0 @@ -``mgrid``, ``r_``, etc. consistently return correct outputs for non-default precision input -------------------------------------------------------------------------------------------- -Previously, ``np.mgrid[np.float32(0.1):np.float32(0.35):np.float32(0.1),]`` -and ``np.r_[0:10:np.complex64(3j)]`` failed to return meaningful output. -This bug potentially affects `~numpy.mgrid`, `~numpy.ogrid`, `~numpy.r_`, -and `~numpy.c_` when an input with dtype other than the default -``float64`` and ``complex128`` and equivalent Python types were used. -The methods have been fixed to handle varying precision correctly. diff --git a/doc/release/upcoming_changes/16841.change.rst b/doc/release/upcoming_changes/16841.change.rst deleted file mode 100644 index 3ddd240785fc..000000000000 --- a/doc/release/upcoming_changes/16841.change.rst +++ /dev/null @@ -1,19 +0,0 @@ -``np.linspace`` on integers now use floor ------------------------------------------ -When using a `int` dtype in `numpy.linspace`, previously float values would -be rounded towards zero. Now `numpy.floor` is used instead, which rounds toward -``-inf``. This changes the results for negative values. For example, the -following would previously give:: - - >>> np.linspace(-3, 1, 8, dtype=int) - array([-3, -2, -1, -1, 0, 0, 0, 1]) - -and now results in:: - - >>> np.linspace(-3, 1, 8, dtype=int) - array([-3, -3, -2, -2, -1, -1, 0, 1]) - -The former result can still be obtained with:: - - >>> np.linspace(-3, 1, 8).astype(int) - array([-3, -2, -1, -1, 0, 0, 0, 1]) diff --git a/doc/release/upcoming_changes/16938.c_api.rst b/doc/release/upcoming_changes/16938.c_api.rst deleted file mode 100644 index aff72c8e5375..000000000000 --- a/doc/release/upcoming_changes/16938.c_api.rst +++ /dev/null @@ -1,19 +0,0 @@ -Size of ``np.ndarray`` and ``np.void_`` changed ------------------------------------------------ -The size of the ``PyArrayObject`` and ``PyVoidScalarObject`` -structures have changed. The following header definition has been -removed:: - - #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -since the size must not be considered a compile time constant: it will -change for different runtime versions of NumPy. - -The most likely relevant use are potential subclasses written in C which -will have to be recompiled and should be updated. Please see the -documentation for :c:type:`PyArrayObject` for more details and contact -the NumPy developers if you are affected by this change. - -NumPy will attempt to give a graceful error but a program expecting a -fixed structure size may have undefined behaviour and likely crash. - diff --git a/doc/release/upcoming_changes/16986.improvement.rst b/doc/release/upcoming_changes/16986.improvement.rst deleted file mode 100644 index 391322d9db99..000000000000 --- a/doc/release/upcoming_changes/16986.improvement.rst +++ /dev/null @@ -1,7 +0,0 @@ -Add NumPy declarations for Cython 3.0 and later ------------------------------------------------ - -The pxd declarations for Cython 3.0 were improved to avoid using deprecated -NumPy C-API features. Extension modules built with Cython 3.0+ that use NumPy -can now set the C macro ``NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION`` to avoid -C compiler warnings about deprecated API usage. diff --git a/doc/release/upcoming_changes/17010.compatibility.rst b/doc/release/upcoming_changes/17010.compatibility.rst deleted file mode 100644 index 72ca0a963818..000000000000 --- a/doc/release/upcoming_changes/17010.compatibility.rst +++ /dev/null @@ -1,24 +0,0 @@ -Boolean array indices with mismatching shapes now properly give ``IndexError`` ------------------------------------------------------------------------------- - -Previously, if a boolean array index matched the size of the indexed array but -not the shape, it was incorrectly allowed in some cases. In other cases, it -gave an error, but the error was incorrectly a ``ValueError`` with a message -about broadcasting instead of the correct ``IndexError``. - -For example, the following used to incorrectly give ``ValueError: operands -could not be broadcast together with shapes (2,2) (1,4)``: - -.. code:: python - - np.empty((2, 2))[np.array([[True, False, False, False]])] - -And the following used to incorrectly return ``array([], dtype=float64)``: - -.. code:: python - - np.empty((2, 2))[np.array([[False, False, False, False]])] - -Both now correctly give ``IndexError: boolean index did not match indexed -array along dimension 0; dimension is 2 but corresponding boolean dimension is -1``. diff --git a/doc/release/upcoming_changes/17029.compatibility.rst b/doc/release/upcoming_changes/17029.compatibility.rst deleted file mode 100644 index 69069ce18cb2..000000000000 --- a/doc/release/upcoming_changes/17029.compatibility.rst +++ /dev/null @@ -1,14 +0,0 @@ -Casting errors interrupt Iteration ----------------------------------- -When iterating while casting values, an error may stop the iteration -earlier than before. In any case, a failed casting operation always -returned undefined, partial results. Those may now be even more -undefined and partial. -For users of the ``NpyIter`` C-API such cast errors will now -cause the `iternext()` function to return 0 and thus abort -iteration. -Currently, there is no API to detect such an error directly. -It is necessary to check ``PyErr_Occurred()``, which -may be problematic in combination with ``NpyIter_Reset``. -These issues always existed, but new API could be added -if required by users. diff --git a/doc/release/upcoming_changes/17067.expired.rst b/doc/release/upcoming_changes/17067.expired.rst deleted file mode 100644 index a1065d2c3173..000000000000 --- a/doc/release/upcoming_changes/17067.expired.rst +++ /dev/null @@ -1,8 +0,0 @@ -Financial functions removed ---------------------------- -In accordance with NEP 32, the financial functions are removed -from NumPy 1.20. The functions that have been removed are ``fv``, -``ipmt``, ``irr``, ``mirr``, ``nper``, ``npv``, ``pmt``, ``ppmt``, -``pv``, and ``rate``. These functions are available in the -`numpy_financial `_ -library. diff --git a/doc/release/upcoming_changes/17068.compatibility.rst b/doc/release/upcoming_changes/17068.compatibility.rst deleted file mode 100644 index 7aa4e58aeccd..000000000000 --- a/doc/release/upcoming_changes/17068.compatibility.rst +++ /dev/null @@ -1,4 +0,0 @@ -f2py generated code may return unicode instead of byte strings --------------------------------------------------------------- -Some byte strings previously returned by f2py generated code may now be unicode -strings. This results from the ongoing Python2 -> Python3 cleanup. diff --git a/doc/release/upcoming_changes/17116.expired.rst b/doc/release/upcoming_changes/17116.expired.rst deleted file mode 100644 index d8a3a43d5e85..000000000000 --- a/doc/release/upcoming_changes/17116.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* The 14-year deprecation of ``np.ctypeslib.ctypes_load_library`` is expired. - Use :func:`~numpy.ctypeslib.load_library` instead, which is identical. diff --git a/doc/release/upcoming_changes/17123.new_feature.rst b/doc/release/upcoming_changes/17123.new_feature.rst deleted file mode 100644 index e2aca3c55ebb..000000000000 --- a/doc/release/upcoming_changes/17123.new_feature.rst +++ /dev/null @@ -1,12 +0,0 @@ -``mypy`` tests can be run via runtests.py ------------------------------------------ -Currently running mypy with the NumPy stubs configured requires -either: - -* Installing NumPy -* Adding the source directory to MYPYPATH and linking to the mypy.ini - -Both options are somewhat inconvenient, so add a ``--mypy`` option to runtests -that handles setting things up for you. This will also be useful in the future -for any typing codegen since it will ensure the project is built before type -checking. diff --git a/doc/release/upcoming_changes/17195.improvement.rst b/doc/release/upcoming_changes/17195.improvement.rst deleted file mode 100644 index d5f31dd3f673..000000000000 --- a/doc/release/upcoming_changes/17195.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Make the window functions exactly symmetric -------------------------------------------- -Make sure the window functions provided by NumPy are symmetric. There were -previously small deviations from symmetry due to numerical precision that are -now avoided by better arrangement of the computation. diff --git a/doc/release/upcoming_changes/17219.new_feature.rst b/doc/release/upcoming_changes/17219.new_feature.rst deleted file mode 100644 index 23f0296ae795..000000000000 --- a/doc/release/upcoming_changes/17219.new_feature.rst +++ /dev/null @@ -1,12 +0,0 @@ -Negation of user-defined BLAS/LAPACK detection order ----------------------------------------------------- -`~numpy.distutils` allows negation of libraries when determining BLAS/LAPACK -libraries. -This may be used to remove an item from the library resolution phase, i.e. -to disallow NetLIB libraries one could do: - -.. code:: bash - - NPY_BLAS_ORDER='^blas' NPY_LAPACK_ORDER='^lapack' python setup.py build - -which will use any of the accelerated libraries instead. diff --git a/doc/release/upcoming_changes/17233.deprecation.rst b/doc/release/upcoming_changes/17233.deprecation.rst deleted file mode 100644 index 7615b85c4f3b..000000000000 --- a/doc/release/upcoming_changes/17233.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -The ``ndincr`` method of ``ndindex`` is deprecated --------------------------------------------------- -The documentation has warned against using this function since NumPy 1.8. -Use ``next(it)`` instead of ``it.ndincr()``. diff --git a/doc/release/upcoming_changes/17241.compatibility.rst b/doc/release/upcoming_changes/17241.compatibility.rst deleted file mode 100644 index 671f73d1e579..000000000000 --- a/doc/release/upcoming_changes/17241.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -The first element of the ``__array_interface__["data"]`` tuple must be an integer ----------------------------------------------------------------------------------- -This has been the documented interface for many years, but there was still -code that would accept a byte string representation of the pointer address. -That code has been removed, passing the address as a byte string will now -raise an error. diff --git a/doc/release/upcoming_changes/17284.new_feature.rst b/doc/release/upcoming_changes/17284.new_feature.rst deleted file mode 100644 index 9fb59e9424cb..000000000000 --- a/doc/release/upcoming_changes/17284.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -Allow passing optimizations arguments to asv build --------------------------------------------------- -It is now possible to pass ``-j``, ``--cpu-baseline``, ``--cpu-dispatch`` and -``--disable-optimization`` flags to ASV build when the ``--bench-compare`` -argument is used. - diff --git a/doc/release/upcoming_changes/17344.new_feature.rst b/doc/release/upcoming_changes/17344.new_feature.rst deleted file mode 100644 index 225bb0d5a700..000000000000 --- a/doc/release/upcoming_changes/17344.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -The NVIDIA HPC SDK nvfortran compiler is now supported ------------------------------------------------------- -Support for the nvfortran compiler, a version of pgfortran, has been added. diff --git a/doc/release/upcoming_changes/17394.new_function.rst b/doc/release/upcoming_changes/17394.new_function.rst deleted file mode 100644 index 25e1ab220328..000000000000 --- a/doc/release/upcoming_changes/17394.new_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -``sliding_window_view`` provides a sliding window view for numpy arrays ------------------------------------------------------------------------ -`numpy.lib.stride_tricks.sliding_window_view` constructs views on numpy -arrays that offer a sliding or moving window access to the array. This allows -for the simple implementation of certain algorithms, such as running means. - diff --git a/doc/release/upcoming_changes/17456.new_feature.rst b/doc/release/upcoming_changes/17456.new_feature.rst deleted file mode 100644 index 7ab014e77df9..000000000000 --- a/doc/release/upcoming_changes/17456.new_feature.rst +++ /dev/null @@ -1,5 +0,0 @@ -``dtype`` option for `cov` and `corrcoef` ----------------------------------------------------- -The ``dtype`` option is now available for `numpy.cov` and `numpy.corrcoef`. -It specifies which data-type the returned result should have. -By default the functions still return a `numpy.float64` result. diff --git a/doc/release/upcoming_changes/17535.new_function.rst b/doc/release/upcoming_changes/17535.new_function.rst deleted file mode 100644 index 5be5c4c51751..000000000000 --- a/doc/release/upcoming_changes/17535.new_function.rst +++ /dev/null @@ -1,15 +0,0 @@ -`numpy.broadcast_shapes` is a new user-facing function ------------------------------------------------------- -`~numpy.broadcast_shapes` gets the resulting shape from -broadcasting the given shape tuples against each other. - -.. code:: python - - >>> np.broadcast_shapes((1, 2), (3, 1)) - (3, 2) - - >>> np.broadcast_shapes(2, (3, 1)) - (3, 2) - - >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) - (5, 6, 7) diff --git a/doc/release/upcoming_changes/17577.compatibility.rst b/doc/release/upcoming_changes/17577.compatibility.rst deleted file mode 100644 index d08805607323..000000000000 --- a/doc/release/upcoming_changes/17577.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -poly1d respects the dtype of all-zero argument ----------------------------------------------- -Previously, constructing an instance of ``poly1d`` with all-zero -coefficients would cast the coefficients to ``np.float64``. -This affected the output dtype of methods which construct -``poly1d`` instances internally, such as ``np.polymul``. \ No newline at end of file diff --git a/doc/release/upcoming_changes/17580.compatibility.rst b/doc/release/upcoming_changes/17580.compatibility.rst deleted file mode 100644 index b8e1849afcbe..000000000000 --- a/doc/release/upcoming_changes/17580.compatibility.rst +++ /dev/null @@ -1,4 +0,0 @@ -The numpy.i file for swig is Python 3 only. -------------------------------------------- -Uses of Python 2.7 C-API functions have been updated to Python 3 only. Users -who need the old version should take it from an older version of NumPy. diff --git a/doc/release/upcoming_changes/17596.future.rst b/doc/release/upcoming_changes/17596.future.rst deleted file mode 100644 index 168556891e8f..000000000000 --- a/doc/release/upcoming_changes/17596.future.rst +++ /dev/null @@ -1,30 +0,0 @@ -Arrays cannot be using subarray dtypes --------------------------------------- -Array creation and casting using ``np.array(arr, dtype)`` -and ``arr.astype(dtype)`` will use different logic when ``dtype`` -is a subarray dtype such as ``np.dtype("(2)i,")``. - -For such a ``dtype`` the following behaviour is true:: - - res = np.array(arr, dtype) - - res.dtype is not dtype - res.dtype is dtype.base - res.shape == arr.shape + dtype.shape - -But ``res`` is filled using the logic:: - - res = np.empty(arr.shape + dtype.shape, dtype=dtype.base) - res[...] = arr - -which uses incorrect broadcasting (and often leads to an error). -In the future, this will instead cast each element individually, -leading to the same result as:: - - res = np.array(arr, dtype=np.dtype(["f", dtype]))["f"] - -Which can normally be used to opt-in to the new behaviour. - -This change does not affect ``np.array(list, dtype="(2)i,")`` unless the -``list`` itself includes at least one array. In particular, the behaviour -is unchanged for a list of tuples. diff --git a/doc/release/upcoming_changes/17706.compatibility.rst b/doc/release/upcoming_changes/17706.compatibility.rst deleted file mode 100644 index 41239dd3cb07..000000000000 --- a/doc/release/upcoming_changes/17706.compatibility.rst +++ /dev/null @@ -1,10 +0,0 @@ -Void dtype discovery in ``np.array`` ------------------------------------- -In calls using ``np.array(..., dtype="V")``, ``arr.astype("V")``, -and similar a TypeError will now be correctly raised unless all -elements have the identical void length. An example for this is:: - - np.array([b"1", b"12"], dtype="V") - -Which previously returned an array with dtype ``"V2"`` which -cannot represent ``b"1"`` faithfully. diff --git a/doc/source/release.rst b/doc/source/release.rst index 3ef1b06bd34a..29199fb83140 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release Notes .. toctree:: :maxdepth: 3 + 1.21.0 1.20.0 1.19.4 1.19.3 diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst new file mode 100644 index 000000000000..5fda1f631804 --- /dev/null +++ b/doc/source/release/1.21.0-notes.rst @@ -0,0 +1,6 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.21.0 Release Notes +========================== + diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h index e39303123822..a1b1de0ef14c 100644 --- a/numpy/core/include/numpy/numpyconfig.h +++ b/numpy/core/include/numpy/numpyconfig.h @@ -42,5 +42,6 @@ #define NPY_1_18_API_VERSION 0x00000008 #define NPY_1_19_API_VERSION 0x00000008 #define NPY_1_20_API_VERSION 0x0000000e +#define NPY_1_21_API_VERSION 0x0000000e #endif diff --git a/pavement.py b/pavement.py index ca1d307f7db3..373354432452 100644 --- a/pavement.py +++ b/pavement.py @@ -37,7 +37,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/1.18.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/1.21.0-notes.rst' #------------------------------------------------------- diff --git a/setup.py b/setup.py index 93db8c8fa10d..4d0ded9253ba 100755 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ """ MAJOR = 1 -MINOR = 20 +MINOR = 21 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) From fd821c8dc5e39879e684467e4326083822e7a796 Mon Sep 17 00:00:00 2001 From: David Stansby Date: Fri, 27 Nov 2020 20:41:50 +0000 Subject: [PATCH 0101/1270] Make it clearer that np.interp input must be monotonically increasing --- numpy/lib/function_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 984f3086e5b7..696fe617b4be 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1290,7 +1290,7 @@ def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): @array_function_dispatch(_interp_dispatcher) def interp(x, xp, fp, left=None, right=None, period=None): """ - One-dimensional linear interpolation. + One-dimensional linear interpolation for monotonically increasing sample points. Returns the one-dimensional piecewise linear interpolant to a function with given discrete data points (`xp`, `fp`), evaluated at `x`. @@ -1337,8 +1337,8 @@ def interp(x, xp, fp, left=None, right=None, period=None): -------- scipy.interpolate - Notes - ----- + Warnings + -------- The x-coordinate sequence is expected to be increasing, but this is not explicitly enforced. However, if the sequence `xp` is non-increasing, interpolation results are meaningless. From b1a61b7642506035f54252f4fd9769c8cac5c2e1 Mon Sep 17 00:00:00 2001 From: Hollow Man Date: Sun, 29 Nov 2020 02:03:31 +0800 Subject: [PATCH 0102/1270] DOC: fix typo in glossary.rst Responsiblity -> Responsibility --- doc/source/glossary.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index 17071c8f175f..57e3bcf9229d 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -502,7 +502,7 @@ Glossary Some NumPy routines always return views, some always return copies, some may return one or the other, and for some the choice can be specified. - Responsiblity for managing views and copies falls to the programmer. + Responsibility for managing views and copies falls to the programmer. :func:`numpy.shares_memory` will check whether ``b`` is a view of ``a``, but an exact answer isn't always feasible, as the documentation page explains. From b999edc3335c994b6dd0de887e6d475232177791 Mon Sep 17 00:00:00 2001 From: mattip Date: Sat, 28 Nov 2020 20:23:55 +0200 Subject: [PATCH 0103/1270] BLD: adapt to pypy37 --- numpy/core/src/multiarray/compiled_base.c | 2 +- test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 061db2250d1c..d9f48748b89b 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1420,7 +1420,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *obj; PyObject *str; - #if (PY_VERSION_HEX >= 0x030700A2) + #if PY_VERSION_HEX >= 0x030700A2 && (! defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM > 0x07030300) const char *docstr; #else char *docstr; diff --git a/test_requirements.txt b/test_requirements.txt index 33d87b3a987e..2ca10be5d16d 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -5,7 +5,7 @@ hypothesis==5.41.3 pytest==6.0.2 pytz==2020.4 pytest-cov==2.10.1 -pickle5; python_version == '3.7' +pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending cffi From 252fc19eb92d7575967d2dce0c9a1e0480417f48 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 29 Nov 2020 00:18:04 +0200 Subject: [PATCH 0104/1270] STY: fix C coding style --- numpy/core/src/multiarray/compiled_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index d9f48748b89b..8ab59201523d 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1420,7 +1420,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *obj; PyObject *str; - #if PY_VERSION_HEX >= 0x030700A2 && (! defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM > 0x07030300) + #if PY_VERSION_HEX >= 0x030700A2 && (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM > 0x07030300) const char *docstr; #else char *docstr; From dd9b2cbc4a7db7ad6db175a24500362b70ca9f32 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 29 Nov 2020 01:26:17 +0200 Subject: [PATCH 0105/1270] BLD, TST: use python-version not PYTHON_VERSION --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 21d9227eb0af..ee43561bfc9b 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -36,7 +36,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 with: - python-version: ${{ env.PYTHON_VERSION }} + python-version: ${{ matrix.python-version }} - uses: ./.github/actions debug: From f351728e9c4963f81fd145e9e8f6e07d9c3e2f95 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 29 Nov 2020 13:33:58 +0000 Subject: [PATCH 0106/1270] DOC: update the release howto for oldest-supported-numpy [ci skip] --- doc/HOWTO_RELEASE.rst.txt | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt index 5a9c4d5052b4..9dbee320c97a 100644 --- a/doc/HOWTO_RELEASE.rst.txt +++ b/doc/HOWTO_RELEASE.rst.txt @@ -460,6 +460,15 @@ The scipy.org should be a PR at https://github.com/scipy/scipy.org. The file that needs modification is ``www/index.rst``. Search for ``News``. +Update oldest-supported-numpy +----------------------------- +If this release is the first one to support a new Python version, or the first +to provide wheels for a new platform or PyPy version, the version pinnings +in https://github.com/scipy/oldest-supported-numpy should be updated. +Either submit a PR with changes to ``setup.cfg`` there, or open an issue with +info on needed changes. + + Announce to the lists --------------------- The release should be announced on the mailing lists of From 3a07bd6ad039f5f2be58c92d7843a112e9f95244 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 29 Nov 2020 19:11:11 +0000 Subject: [PATCH 0107/1270] MAINT: clean up a spurious warning in numpy/typing/setup.py Warning was: could not resolve pattern in 'numpy/typing': '*.pyi' --- numpy/typing/setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/typing/setup.py b/numpy/typing/setup.py index 694a756dc5ab..c444e769fb6d 100644 --- a/numpy/typing/setup.py +++ b/numpy/typing/setup.py @@ -3,7 +3,6 @@ def configuration(parent_package='', top_path=None): config = Configuration('typing', parent_package, top_path) config.add_subpackage('tests') config.add_data_dir('tests/data') - config.add_data_files('*.pyi') return config From 207e07968721f3380f7c71a87909440b6260d0a6 Mon Sep 17 00:00:00 2001 From: sfolje0 Date: Sun, 29 Nov 2020 21:17:58 +0100 Subject: [PATCH 0108/1270] DOC: fixed typo in np-indexing.png explaining [-2:] slice in user guide In User Guide of NumPy in tutorial for absulute beginners, i.e. in documentation document doc/source/user/absulute_beginners.rst there was a typo in the image np-indexing.png visualizing slices in **Indexing and slicing** section. Namely, in the first image in mentioned section, the last visualized array (under `data[-2:]` command), there was a picture of `data[:]` or `data[0:3]` (i.e. array([1,2,3])) presenting the slice indexing. But it should be a picture of of `data[-2:]` (i.e. array([2,3])). This commit fixes this by adding additional picture of array([2,3]) under `data[-2:]` command and changing command above last array([1,2,3]) pic to title named `data` to preserve the idea behind slice indexing. --- doc/source/user/images/np_indexing.png | Bin 64363 -> 148808 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/doc/source/user/images/np_indexing.png b/doc/source/user/images/np_indexing.png index 4303ec35b18b5ddc8ff65b32c2b969646ae7cef6..863b2d46f66af8a53a49f8fcea72448bbf60e994 100644 GIT binary patch literal 148808 zcmeFZc~lcw_bzNZwjmlrh>0 zhzQ6S8B0OtAqqldj1UGHk`N$sQgtg}zuznUyBGhtYkhYuYb^=o3U$ueXYXe}`#I;- zofF3ouUxiv*}Qr4RvI2TU^;K!D#v;A7JT}7G4en7E52`$kENH6Sb5Ex_uG}}e`#mY zjCu2ZnP+%F|CC?HSa(=>LQ(k66Fu+~jH2T4i@rYzo?hD-Rk~WMK>ld+Z{@TyrOoDr zFX8-`Xm_uZg=XjZFdQ>D)as^3Bo9sU1OK-lKogv0>9cu)YQAdH$x$bN&CPGW4`)DN zN3{Ltoyi#;W6o`uvv0<#9l84#lajvu-oNj_kyQ17yZAck)Y6W;!?XI3%7MEJF7{oB zvA*5Y;3LeP_&;<}@~6Ni&~Ll=VSeyaolMcLSH)Yt9T2nv0()efEa!L%k^1swVfqW5 zyn^e=ZJrSCTeDDy;N|XVi&&9%1G9QN>#-uB+C)7qQ_YKZYUpyAPqV_eHiqRzK(R_) z=3HX}1F{~4TAp@4``LM*_RZZ{y-^pyS8e9*iYm~8uI6){)9*T2HGDF_49JoLor{Ud zTNQ(GJ80y(Zzms~4?FH-wy?57AX3->*)&csrc=XcL+GOKXT6T<7v-b;`&$-t1IUx(B3GC_rqg5o5yTS z1fxphx4v~J*chK`EC(08#m;RIX{gtRy&RiMwDfk*B2hN86LOVA#^O&x*|tFI@ShP?5xTtux!}V(T85Wek1mq4 zS^Vt;0sBEM>J?VjDD?N7Gf5EKs=s)6z-5BABQ4n{zbqL;XK3+VRoqDQjQi~Ve9j%~ zSv}X{F$>7mAY)v1PLse1K%V@O5uT?0MT)=W6Lo^nO{;fjzDJtvv!s!NSk-UCOvGd@ z>4=nc+q84aex7N&`L`|4zJrmt&?{mCubo$D*~0INNV448DY|$OPKA z(F)uGQjAx_MHSe#hwdlmH2U)Hdc%uZOV24QEHd6)>rQ%aozz8#@NpjPH-lSu9Dda< zTAli>n-7s0gaSDlmLfhlSKrB)p{&O2Fl93y7b_u26VGL1C?T;{xRf;8c6GkK`eb;-vUptc~!hT!!?e|56 zVl($>9%bcsxk*$kr{2iq!qz()O1wp2DxI@QoT_ZD)cC%*_ggcQ8I-|9Es1OYQ9KJ} zz8x8I7mU{mQK2LLa3%DY{>$*2eo>Q_kiK2xS=nqHC-G=*!YiWDRe;=8+c_N%Oh_=7 zH!Cs2EVSCU+G?Hn0chkbob>Vnni=usfr(mG-}Z#~3De>n3Uq%)&k0V@YarpWHMK9x;luMgec$H~omq(sqeX#b*v?OHYkIa3SzOCB z{HLRXM$0eip}vi8B#)?{%`fc3T8fj~PR{LW0ykF`*!gH zP^pEwG57kcZchr8`YhGofgaxz;4d+1T-IZkIFme>2}8%w>0^q(-b{Wg_)W}5>+v8_Pw|>mu?yvX7rGJmGab$-&6SsBNOin`b?j1ae{C9<5$ivYm z)*vk_F=oxCqjqI_rzS7n zt9!ES%e*V`#FHcMmFX)${qS$o5gdibjqhe-0&S8ht5k?yT$29<{SR?PDizzNfhLPuzrh!&LJ;vmG5Lf)<4Z z%$i9#@pa9!66DkI!)eV0eGrw7I$Y{=JZLbF_-x8h>({Z3VBlVHV*2_7T{2%1T%S|bEX!c4P|Y2xzyFgZ)La?FQe}7;un82zbI$UW@IFu0X>41B`-?WJTzAb zlCv5pNA36S=m)4t@YO?8F2{~`br_=q-jU}$YNAutdpMDz7Ji%dArW*sULEr8{tm{M z3+>IYxcs=Rza%BVZs-18gO;w-JO%cw)<7(M_d7C|1C)Zj zKgn#gSVPWvlpTVvLnrw+`e2NVjxDC1t|HE2bnqgm)~^QvJz4R0Z**1# zHlwG=j7^PY>}Mc;K6WFwSdQ8VS{V2UVq1<+XV1uK6MHw} z^I1Iyf?*F~pd{Sqrl=)s<@A5F05bn@@%3l-ankR65`EX~nS2fUNHa`Fqw5Ziq!9p2@$JY%4?o54&SjyBdl-RQY6 zzR~3J)mbf!LFeODzYhnzppJE&kJO^rj*gXJFG))^ZA+fh7S&6RwX@=ASlp}WAR3JW zPc7L#CwKL}_#S_PT)`59shfHv@5|-pwmYr*oW@dwh+`U+gB zV@=G!TxyxA!H7}cw*(#t#grA8NGiOTC)IPuWqqwxzG~%Ow!wo+gNKzLr1-^-1sIGL zSBYMx+(7Ynm}TQTVs-a|?<_im)pCd1cA1L3gpL4y=P~#m+$1idb~K(MhRq@+D$pL8 zu(&_I&mVHd9foIW$wR{Yu=V%GGA85zAZjEe!?7&y&EO>K+gJzRVx%o3+C?k&!umPG zMD!GlZkxc~YCN^)nK~ZRex|q;^8tNJY~=FMO{3)s>aTCRjRAp8cnt&5=;H4PcCuiz z7fR}C3jcr*hM453oH~yQ6753r;oMaw*^JV4)NboN3+rdOT@UayXpcmAiHv?aCo72& z(W^5?;xpKq!sMHgU?Y4S)U#aH31V@_bsbhQZ-3i)SPXSZB7C4>?VREZoC8`2Qbtp{ zwK5EnL0Zfn#|+8}I%Q?e*}x)P@T}+R;x`(C>Vbz zGne|ZNo^9cn8naAOz=ZW7*N{#J*W-*Ua0WGn}L2&Ypcvnf4SL2SSiv zB;RtPc%MqXqnswP*1AhphcAq5C0g|5w_YVg5g4?QgK^hphci7W-NlVt&Zlvn(Pp z4D^3h*8U-7e|5fpeTaTY*$9IFkh1?NPenhZ?0?0`bc+8WWq(cU(DNTs_O~SXhm`#< z5={hphb%Z}^W%e#qMYF~xrX;47o}A#4AT zwf{R_e#qMYbEy2^m9>9J**~Q0|Bk|c%Do>__P?U>hm`$8%Kj%jzAbrY?tVzwvnig$ zT0f-h{|R0t9`j>Y_J2ha;wJxZ?#kv>y{Py{3;3}^oBv~n_WvC2KXz#U$1eCEJG8$^ zl>a~3p^X`Ei%W4OYk>gd42aNgj!hpv_-qGZ9?p594z?6jpUg@$BkItVl!Ka$7Y5Xz zsmFn#3rk$gSe{j=0F7`(Wd7kI3u>BxB@6YTtKk!nXp> zdsnhk5_>g{=zS(_3uU~xTlO^db+kUVc?)7w_%5b<=K0V5^L*mOVfc994#+zg{8+_X zyzv_j`!9|!IWf6!-)n{6K0(i}J7E;JdeBp1bo*T9AqV3|SCMi$kCvY>Y!-MK#r03} zMM9A)Bu0{$XeiLQWj9^DkqLrtQ9%^R`viocxL;-+{FzM}MUn%QK7J`P2Pm7AP?PD! z<_-Sa$1Ki%9*hmYu4TIS=5|-f`CuX^Rmhb%4NyzjZQR0e$HSshX!yj(o2;zuP@nwG zS=+y7j|*s^PSszAO50!bS;?q#-kb0%)w9|eF0OZKKg-wiav7ozpj5SyVFdDrm=Wa2 zJqBqEGa_*aDGkISQ~(jKVzuNbgW5PIeVMv&E<3_#ff(8L{dD(#zrp1CGj^VAiSheR z?C^!jObzX9SkCe;->d&X2zoV@*1>)6~Sfv*A$^m3Mj? zE~x$gczVBNmIxB)^Fg5Rt?k#Br1ZcaC+2DnrcTzU*D(IpbmR2x98Sl+8`1&I@8{U4 zPVI6Llds}!Q`ea0D6WQaOI~U`(n1I=-;BmMIi6VSw(gi;aes`5*z)Y-piKq31M*u}kE~fGcs*l0Dv>d5Ja{X%Ww){L z*rbqIEGF4=kTc&}yzhY8lb~7)-hbF|cwLB`_KwL%)E1wBhfqu8iwNg0-P*g~At+8{ zxy%fQIq}bMkYl;=zCApO@Ov=!J#+9?a<@uUD8oN)9|QF$fy#FV#ZQPIV= zbrE7Xj5C%#UB|`)ui@0t54F<+7gD5ozg~@s@4DLl#&5vABJ`&hC7h`h@8b^zthGP( zRQ31OpUcl&yS=>W{IlK*6%^Muq(Q~NG{YpX)f*pD591@B|9n(X|MQUU(1Ca8BZkl3 z@oKVGw;g`JMNdx;vcMDR7|Ab(^yHDNbAhISsLo?^rO67gH4}SU|7X)(dpO7ml_XE7XF{_;w z6b59l(H+uO-_;sWqu9*K9r9!48+?%MX?U%{-kvmcOE7uM(l|F(aPImPR(kgx0)YUp zgWG4urZ_XU+wx+vj_58cU7-qG%agVErM!CX3mdNga;o9V->Ky2KOmR{3rtU0Z3dt{ zD(vT|2At}m{_4gr$n?%}L9t?e|4ej|9&C$QomzR3Ze%hZoTQ$Yp!G&6i(6Ux zaB^wfJ7h@IkIaIZw#uj5>hwg$7J=LIOOYIWq4ztzh5AKS&3lk~dezzOi#`sh`+Ix| zs3+@XzAUev+WsQ^v0-3fph!jR(FsU?X`7k4zmGRY|Dy#+T+~3Ae*dqgDp2Nor;#M6 z-5#>V#zhpowqamkNSRoHqlYrTOcJXpfk#UP>{9bbe;Q4P1l|Cw;7EKP*2fd%g;H>X zUZ<*N6LGUJ^!`n~Y0qJAeYxQ#;$d@L=#Xdr^!5p|p(eQ@JurOH$9Wo)rBO$d`X_(I zA8KaIxGA-VY9N$63GAR&R6!M(T(Zy0u*r}~Qavln8@jdT<~VEaD-8cz$sn6*xkWU! zsjubFoD8wwaW|2oUM9)k!W12tIT4BClkL0YCIV)>l-k66F*B1LOm_#km)Pa9x4DY% z

P-zHj7Fj>6^3_Vd2u(!`*y4c;OT7(L>oDZ&%E)Qe1thhA=Qo;AB3^P7Hz(?u*Um4n;VX=`N@7blDs=Km0Xf-85*H{ z#R1TBs6Na##hYM@m*m}fWvV-4Sg@3OcE+$7)XA@geH}JA^a@YE{=PyIi|2~D=N4^5 z+-$B(6rKvNyLF5IB`tPGV&%ohVSza3qi3(#C%{h`HyW59Eq|L1eQMX&(5Kchy(LC% zciGz`F)9^5Z)!KxS`HpTUTat!^j!dec}y(3AaKkAbICCW1V*aAT-=O>^3jw8&C6}u zld@Ru#kXf-g^kC~z?=+z=&QfJMf~-tAhfD;k34Km`_8xo+q8oAuqFk8H)9@ly&*fv zgyr1b`Jt3&bx&_*8l_mpZm`4^NmF~Np8%c2mE?+tC4pQ$X2+w?sCNRy!o!M zRASZFZXHP5ZI53)87M)oWyz zpc{%3_d)ghwoNyP5Q5@tc$_?zn|PKc(uu_8llCHs?vWD8iFKMjadTw>{ZM_4>b)LRt;^T86mE+LSL z2?4!CeKtV_LAV6WVb18D0;md%7REHoXMayN*MrV1KQ4{mxCrzgxWzi4TIy)S)2z^r z@{(DYdVJo*%5xsXU-Ev(%ipM>efHF-NqF4oA%+KFDEPHha_s^d%@Pz(o~^?qKKJVv z!#H?SYXsk>08dfjYF9Ft)G?ixRYVNZf-!LT=CNT_;)7aNC>Y@t;ofb$cZhsh+${9g zg7SO7S2un`h==QFn&eLcMb*6MHJ)j35HHlyq}!D*8x=#7LSa~q;(2UpDPo^b4*VGQ zPwN04FMvgN@c0Ojp}4VCA2AfvM~of>iHlSZj}V3E5ui0+x*CJ$7^gJso-(?B#fY$d zKx`zZr!^kLa5&hS6!5n~Ni^|f{A9oi>%2v=(s}L0XyckIx9(icT^hSr8PMj!oKC6~ zAg;{14T{EMDu`tef0RbXK*M9d;$Jr7|(Cbn2ZRBNKX6|*J<2K@l0%kL|M`|of z)S~xB<+N%7Vl5Dd9gR=8Lf9|%9d#3j=S~@g)niIAItJnpG>b9N-A3IKx?~8Y2W=F+ z#`tQ)E`APYid=~dnJza|SQ9&9X4gBEZ8E^CrqHrWRdS( z$z+|By_9!+=~Fa67K7P%{)3cJ4m7ncZ`k;!!NV)8^POw1B(I1qjiQ3x>41QZvsXG@ zC5_G??9!j~J~!CJ1wsQxy%xH2Kg|SqKeq^R?}2k5`skQ1c|eL{UO4eg^``{TD5$`} zeF}4!OJAaorC~?Kp%%l*HY(FLQ*zf8PKQ6(ltm{$hwyQEi;mm~6*x9w0!1{T1HiaYX7Cg$o-zuBBNvFHg@XZ&3qHax?)<(E z=BXP%tMvr>0hXxHlkY8c?vQ=(JJ&m@Iy_%}AYEihAZ&kDF}CA;_SKDL_k%U7=XojZ z^AI=b2V1Cwj_b5+3KU0SrdUQw=c)*oz`f25T{H-bUwSY&#; zVSOa{kyy%ht?q;~)SkBTp6slMNocu7?PpE2ikTRZ*21G()2-3t)o7*eztKuMNEWUp zHt6T<-3DZ$YK%)h2_bB!)JNXN*G@%O{R^c0U~cEp0rxtTU>-^F&s&oy6leqLg;wj; zVY{`~pMh=Erkg!Vy#oBkHtQ4Du2TE!=F(`IquFqoQHfFjaI;M4ea<;p3~Gihp(8%5 zDt@0isOGq*;_no36g4__^e-W1YS`mLsdEti>2N9Xp1@*f!KCW;ww_T8CADbeWlJ6~ z#G$2*>etXPCE9B~y4>jMLobWW(cscZ*$1nx4o^DPdkfFhb`FVpZpg&A)T}F7-Vvz= z)r2;!N1r#h8T&)TWkL|~T?oAxbcq4!Hyf-98rC!5SH$EX{sa>`*$oeuvUP401YDfv zB=N#-oRA8nxDBjdk7>SnEcJfNB`9+eNhcrRb)6*C5;0QQa?#K1D!(GZeT)PMH}ky3 zR`38hBYZUSj?&?F*hr~w2fJcqvTw^em7o14e$J9#K#mb)%m-Y7aWbP?dXkMlgBl=! z7as_&KV`t?(rRJ&M|o=TqLYtqDle)n_ydm9yCyL>fE8})8n94gycQLfDcAO2ek=y{ z;w=)hdWe5SXlK@m4Vi4zdH=r>lt4>-~NlbO$KiD)X0g%emN^~i_YFmRmp6*m3i^ctQ#cgnCJ4;W$M+s*x ze|j_*WRFAyEVX_a`=CAGz4K0*3I0Q&cuB^f%%LMyJ{i?+N&`6Eja~_n{vb%LhEOsL zxWDJBw!wR!kQp932;mEK>0sCXbF};k`};)G+sAZ+_YC2Th+&-2$y5Vy;^!|&noa~x zVvlv0BlbKV*xcqN>P4IzRY_*MG+$ur#!&awTzM}hF2A3X8Sch5&~;J&TTXH4Ci)<4 z>?e%@Ql?CNpiV8HK)?hff0WYhEptW>kfs(K?uMiBpQ$1ttjJklfywJ0EWkEAXipJ^ z3gR{D~+X8v7-x z^RhUrken=jYx#B~?igstik9U!xa?kBk};8t4U=0up?+=>SGeKUjilbk_b}VetJ*)% zkasM*PJJO_|1?;=%(_)@Hg$0uGgTD5G`4r`M#rXAaQ^YS70Y$k1;L}SIRsQ*x7~DQ z1aAP->GOnH7tX$4zYf#E&XKOl&;~387gIaGz#KZdqaBff5I%OK_%4B9(jlmhA?C3q zh1}4TyYyv_8NPG=g6b}Kz}&NwPuh?cv;P`yvYVV3^!C{YkB6+|h$R?#< z97*-TdNqi#tD0OtLFh+6PiSuR>Rj_KdCwyg=(9)?AGpK^g)tKpw9KV@$i%J9b6k|c z$`?x4)7DanV;h2NnuX{L(OFZj&d>CJv;gPP6`Lv_pOjd&LKqDNnhYS|HX^Nmp9Ir1 zyXDk&d=zMe!nTzGjlGd<;_-Kn#c#vpYLJb$aoi{;pzVUdX+<2lA) z#O;RA_4Ezry^7vH%*X6G`hMh-l=dW&>m#vo59Ap26-)ydA77#cRR?%kH1;kOVv`>D zzocQ9Uo#$QLfCWv6+9xdm=20)ugz6+_Y5&F5b4_TVyPhZ3 z!gUi~@2SVONUd{+DVzmHBGn^4hC&pHYx5B>M!iAYnwo{-7$0A%9nUsSjG^{ut;fL> zwo!c+jAfc^V_=RhUv`A6RO<9gEeFJi0+95kXe~3^tGl2*P;I1Wf96*zCiLQddF zq$wJk(C@Ye9-Q~Hx9M%f(qE}t8zZT3DqQ^z=a^m zbt!$}K5I}Y#6ykiXGoXGOE3ssR2qn}sBH~SB;>>S=B6!g^S$w|l>%01%ea%v&m%!A z=UZ63lEQ>=-K=1h8=`krjd^MG zHB=5(us?ueGIa$!NQFtglALG{%j56MZmz97^4e7`X&I(Ok@o~vz*J0m-6L)l=}ysD z=T;|AgbcyGPi0Zb6AR4(=u_FxAWC!qZV26Z8PV*5g+{Ax+fNH<&6$WEjQD>ZTf)M4 zJOYch#*+EtD_#v;pp{~2Z{G|RwaeKE$JN^e%7$Xp;2YIal9aA zK9G-ALee8E#`!3NBx?Jdu$cj5h`m+{!7WEPy2~veeg5snnM`XNSYb;s8$vI7ehGmh zE|Y-mDJoxhUpI=Hr^fn;3m09q)&xp09|S}ecF8gAh+D(EzsxDw=U7-GqsQox}8of@5Is2{8Glyg_d-u?YG$O@_D~^+zNb9NbI(W zsS0uvC0}oAn+VCVTF`(FUC=N(3K$mJFyJ`6pRayy51La^dg=21!B@=310AE+)Df;d zQ@!K*nHK`^;#;GC#{gVH`>p2{-kpBN=R+UX2%luF5?_8T6FFw9c72>j#B+c~#n9+F z)XT0u7bL3cCo6`sKf-xky>X|vlhSu`?a5fA^qj)DjE^#vl5{1d+=H*7ML=U*Mwf`$I(G9j{0~?R{tX;j=?ycI2s26h8&Oz^d*eJ?%=>?~>5q1^8?Q~=zuS`p+ zoWJTdY{XSk5mAH@Fp?v1Abx74yxDA1@HsB94USO{IvoN^>&xD0;wu~iXfJ(fq zgg|DC;_gub{s7&7eeJ*ike^r1-WkAi>|2EGJes@eb=d*?$pdT*@h^R0aF+Nu_5e-q zZ*?H{e_X=oT&QwEf6&AHK_MdqFkX<+Ju+`Hke~X~VDdH3VO%o#iqtML1+ig^z^CdA z$MZ~3Hx#3`HB@z<(u$7!V>Mj?d@muuNgPV!0OV-H8Cinm+%>&xdFK)y|DMS# z_qN{o)N#*7Gfs8wUrpM*s=ngi1tL?v5vATu!Q*PF?Q_`gx4{ulCSByTcj^%AyBl<_ z=4wFsZQyy6E%a^3GEN&(ZX4Q=`5^=X@H6;&zen6gbxh5vp)4F_a)9(Qe01&0a2p#q zLy(&+q(*P_>SoE7z595>*Why`$ErdvpmG1?CX>N+*kTS3Wf_W;d(XsgF)wh#OKv|$ zUAn!&Djh7*&5HsD%be*KhIQG%R#4Gku$hL73o!B#*4zZN~W-SY4s6Il1Z*(6<29}640w{6QS?e z@W%_?@-2^Kgy2r36hP}iNuqNA!(Ch$TD>j|U!1K~D2fg+RxW;%(_qUHU|We=pWtNG zYME-P8!EM|rc=#PVH}}sA74W*LEGpkyoEeNPP24kA=WQQheZ^%?iu`9x*dV1C0&Hgyjo6hovg8=}T}Ev=2}JH}sC5$(MU3BvyC z?)A7rlH&Q|ud0AFNXrNjED!S8+4goRCS6|svj_FH|; z0p~vej|a8pt2kf8R)tV9)R3XVy(=ch|D`SEXYV7rb!Mlsuz}|%^fc3hs*mmHIFL9R zgFURztS5tUZ?GLG(Pn}7_GIDR6|u3+j+sn!RckGi!X20}5?{*~``#X^Nl(UruuTh8 zDUB`AhCHl{BL8f6Z41x158GaA-ff^rm(qhA#6h;C0QPI(XNv$~(27Ni!b zk^;q;J;8<^a6H4rVjVydMxc*!m4;c^y4C*07@g02R9yd-937_u2)lj5-S9VeM3gMKp=78re07zjeJ;IEz3L<9)m^o zt$;sErvvm7MssCkuGULd=U7m^Wgs+wze>^i`ByjmF0k^K0KIPC;N43tCMR28?LMEq z+7Yk3Ft)h7G=Gq?(AtI(oGSh4*#s%>!SFqYGHWqfg)~SQurGq?4rQI|0Mqm~gZCb< zUC72YC4__MNh9g-3UB7-Ivm_IxkCt2rZpiDSzTx|6W?-s(!m>Kog5jBVBTJcvDMni z&S$L~-kUb;Ywt!YHkMOc&PF>u#M0ab7%&pPRmduxAMoKM3waf-40KSSJyRssA>xPY zs18C2ScK0ZSoebc9&f3C+fl&2H0+r(hJbS#to^2NU|XTleXws^;dTP}%lm}o)=9_n zVj*s^6lzc(L|+}ulNrMD>kX0F4i@0+@f(;pub`|LOhP^5SFXl-HsIC)3NH+ID!_$H zV=wOoTltpjAMY0i6Tmd^pbWwFP(}jiMBNevpLOuKB>=80j_88WY$G=UW*F*^9~0Mh zVj_eQ&7-oBm}9&ZR+{;TdBWT#Ab$+gm3oWZT9;L8Z7d8WP)VSZMP@h(eXUK9v>oC$ zFOrGAuX6#ZWA|QZ%yfue513nJY3+d;gYX8_W+J9flwP;TSQvs{e^_w14&O62S#q-` zollzbioH_GX}^ftU%AYPvqUEG42+Ypj7iI9BwXZ#nkt&=d5?b5>0RB#Lz`FV@- z3GJH}+>qtEWU~4jDEf*9Vk4`cEMfvGSuS>4eTP{}P7mdjoNhnv+4WhuboatPqHZKc ztkB%v1bTi()1?r3B!7GIkZfG7(9LosL9BgdH>Vo^#(kF#)<&Ei0^y8>TuqjfkVaj* zr8o+={1n0*tAR*ti&mlGoAX}+5?tqpFen`3Otq&6$FjtmO8N;wW@!*A@$D88oSx&i zLmSEyM&QDQ5quUrlLX|euOm7RKMpx)DDK|{=93H>GMKu0q^st5a2 z6#go3bysudby`a@pm3s7cJH2A=szplOs~}o9>mMix#@-Wek7mcQWc>FJ z2ExV;i@JnteV%P^KB;n}^8g_1Dr9Aj$GT)CUhEAr(8)XO@zcZccF(N!sAvJvd2wq! z-G!@hxey)e$;CrC>s`KCG5=avUu|w9Zg$s2#B#hm)_Nml?IroehmNJ=okUx)KNs(R z#nNrLQdDjfigvOaq~?yP#|p5*gRghL+07)6+y`H@j>xV9Hr51cd({@^x| z(A%6JHAHl8^+QmA#@7&Elb*k1jH{6YUU|F5rLc@UyrDY||G1xQX~ay0bJqc!6|tS` zV98Iux3rpsS(Bz{Wwm=CTAMTLM8}tWnu$?Wmo-20)%knLCLW1z-+79RI~`M9(#Gu7&?)mWCj7mY?O;+Yr$316PrtCw(x z2-dk|OOU-*Qnf)Q@yhbs$J#dlv^l-;epSY>Yo z_vqyZg-#;%CJTC$us-lHd*8ST;xJVhgvFVGtIsf&Idrwnn=}3GA1c{uQ2swG;n)9W2^a?1mS*liyQ7Ve zMX0&;T>gZOMLJc{AjM>HtS9us+!Jw-me%G;x$_9>79NYq`sv7rmU;o^~jJ zKr$bo5CP88SVUf^9+%)q-c_FV(#@7oI9P^yBuu{L95=P#Hj)frQi?dP2?;2$P><6Ac^mAHwWpb+ZHFQV*-1pCq8PFeCvR$}SuwkK+_jc!8KR86@H1nq6l*WKow+-=+^IB8F!rDi< ztZQq^wi*l~{Gf=WXZ{~KGgzcRa%siB)@%s7btFPnyrtk=iG4vOepJZx?lok}Mqy(1 zZJdJhtI1PWb-)MJ0s*1k&I}qZy&F+i`gAge{yVa+^iX{8@O~5xyHXS?_-(p;CQTUH zATkHC77-{4=0!)11|w<)U4?U-6k=a+@YEyBRk>d=I(DV2PerKZT#V(u{~<%}*c z-15rK6qy!T%oo1s<^FADu@bdpLTtux3-lFJn>n~RW&ai$90D9uziq1|cOa9gpJOAq ze7nI9rsiBmR3aK~WT>hup;sX}@@>UI?jP3tMhB0G=6mi}b_VD0w&rHzni=4Tn*Rzr zr(Lz{0oDywQ!sCk7jGQyXQ<>@RSoZW>U;(8)Hptp7YW3j9Nojpth?E8I1%0$T%%Pt zv=sJuC;=E6;=+jM1PP#^hV0!sAJhiFs96;rMQkwCveYDB-0?AMz1PKuu}0g#!mgfB zvA?nN&U%(*!-GOTD{BWdsfI{QZL-2ibmWRy`54go;Q~Zl4674&9Cl9=nFgsn(+O}= zr`n|T8|tBYUAEAnxW|>+HC;#E=LgnFu4Qy{{{TnERTpey@RuILKmHhR$} z2(<2|SMz#%xnNJ%m6i-S;wQonF;o8(;RT_@XCS5JD+WXB}t{wToyB=GFzuxmjoE z39M|lQcmD)01Hc)gLa}&z>+V zk#yma+%1SkLc{fvULU@Ed#tf=BGrA$QKW1JWrsB_E;eriygz@rD_|m(V>Z^>U$m90 zgl-nszJegfw|vXG2Yf+~RMQ5v`o(ee_uFgGoZ6WTqrr{C4?l8vQeq9*Zv)t$;-5gW zK_LaNAa+1wAm5n!L`54q8YF0|1`;-^Ghm#Yn!K>>9>pdG{FvGd^(ti<-k_8~!n-$# z_6Gvj9x2F2kzK>6@2c6FNm+cdMTmaw}@V$m}6P& zjIt}GWA9k;?)R~2T`d#$;poilL1|YZ257&Zhop{n>{ab6$Tq9ynF14VLnKmH@mFRF zV9T;b$fnq359*w18lte)TPfD~3_Fx%ZAJ!cd+AhyvL_uPvRyQU*rmt4_G6G$*3JuT z#06A8^S3S3HCd2k!0-F~cwF0hRJoh>LPZz5 zd5wOF33j^`x50T~ncIL~z2S_o0t$#4nSYql2AIeldnqBNn9dujAH=mry-ROq zj49FVIy|K4aG%T1-~gXuniBloK8^8OIH1$RhYpL~eS4KHS@|`Z*?_v-p}%_Y8@f{0h}*ROt#Y!&^AQa zga-{?7fN8GlxVZ)xcydAriiq`e&a90GABe8_2n^1ls`J_v_JFL)~qh&Y+Jk!odute z@-QsWMcC?kfhp$mJAJoy5|#3C<*Bwvs%H?^3?4+wCzZf9Tp*dlJSnD{*PhZu7L1Z{ zNg_nPBY?5E(!(D_9RZOlb=a0eHlo+K@T^uMB4xPjQa;{u+FxYMH0gts$}s#xMVLG4 z*n<&k@|VU&Pq;DnmT^ai0!}P`=+`%*AvlK9Pp6B!AQSbx4^SVCJ7^=AOu!adHmX1^ zFGQ@;6ltMGG0sYq8Y0f>b||j(4avu|H!TmV&r*TBq8J+Tg0>a02#9=|1`*t*C4NiI z@DTvN{5g?aX3ZM2saf3%Iu4vpOOjurVtu2*NI#So@JY>aL_ebKPq!Gp z)q2@oJZntJzBhrp`0M6?F;%w}|tzMbWj7dYRF8WHs>ndNuL) zs=M0s>r)P%C$G-y{{Qwqa!Ld(;lnqH75O$FP!rU2BUOY!EG5^flqbl`*4w3q+}e2I zKJoHKRZEe=3u7^1pegFIA>W{o>xI-S9!S0N5*x5#3Q?%a!B33R>5>KPQgRO(`3FK= zpa<}8ic81L1BXKoD6LX!jl#N7Xq?inOiC-Lt9>C}j2)?NOkaQ=H+p;0jQ~=v4aw@x zQ{IPm#4eSyNQp&bLB}A38BXSHNU?e|d3TarV7)_9;Rc#vPK;-8HS!0r%?fpIb=YD8 z-yRn%hF?Nx3RJjc3ZNYwFdfXWZ3}N>6N@Rp=mKQ@hDGA!qqFVO)^j|m&s;vfKm%!E z1uhfM$s77sh}jlG*h=nLo@Venf?2QBz>0G{6JGn4Z@YS7r%_T&j3;aOZ-X!agtlfoJ{YDpVu8*EcJ z8f=kD+ff-X8aDyIr|9DMI|uPAHtpxsK9XA15%lN#oC0`YMQpWH2V-V;x@3!LfRDH` zVngDk46BI>?^FJ)--2Lq?^7@=sIYwgr{qfAywQO;noI3`MTEKRAX&g}bto*Fdlm5t z9O4x@ILy4O)B6LT1?0AN>$zf)WdTDQVg`V$#v?llXiPH2g5=!-z5F~RIBac8$y zqTR9d$~rb~sgFk~O!F)*|I;+hBpuD|)WjDuwyD`sA_1B49Zr67J@cb7W25?7|4fl} zSiH7^?_#YI%~=9(!0)7mT)}SUy9<88c1xY^s2G-2Y!i@rD0yA5?i2zIxKPMs&lgbD zxu$CbsZAvMbv3n#biZ&wWP7{LpL%L1m><@ZPq`}Zo(|L1{HGG%KKqEy(1ogm5$U^n zFPBK&VPdMNgleou`8hsx01f&y)GGg*;ty=LoD78^qhlD>gmgzG^q|~ZRpt|E!!t1IDb(HjsC*1dbPTv z=7h1Bzn}@W6}urCHMLMYW!|C`AWjS_+cI(F1mEjoZ#~Evh-*=7x>`A@QRYrL9lSXI zC8lj@STE@Cz#pHznZG{er$G&FLv)D3`SnqM);<_k6OVTwOJfZ-Q9u%_+8*!vt6u7T zep?j$L0ZkN9vgR0j^S33{XrlaRX(pnf?JsT2$p!Zxah+4ZWhw`UTTspP;gL#t;JzF z?3At8e=ZzVrR+(3;+!2m_Bx8iW3^pQk7DDLxp5OpvHAhJ0I~xZVHmC0-92sXJjz;l z|MZ583`F^=mmPSPF(j%$R`z31S?{;8UA9Wf)ScQ#mBBpgi^|${&5#ndZtVzg^3`Oy zfD~O^AVL;r<0fJ()3LsyTWGDyYeU>OgX5L--8EVZkC=czue90unV+=Pna`J9JTD{S zG7{@t#*pB;ktg;kq2b}hw3q*A0p}wM6M0U-wvcJS>nn>`Q}wq!#Y1Xa_agf|8X;yH zEZ-o06I~5jDltO!b25UGr7qpRXp5A-JbIvOLVabB*;}&HK6F^2|JjC&0iKy_gR%7& zHSR&)%P@}aTX9;Fr}tN0Fq+@Z`%SymHX;zA=Cji`FiaV+B_T*ynRKMi|3 z+#lrt#)Wp3;-SDzFG@wp0ga6WfXP0%_}$Xjm16G87E=uT*CbV~%}^MU<&a{cgtsRW z#?`{*d4&yspS1X}wMZ5d`T@5t?>W12=PEVJzb6{>2g6}RCADzordj>|WOZJl-;<|z z1%w3+HZd5|h69?1EX>E&gJ?(Olw)f#$jN8*@AbFe~B zHA}egj&@<|h}$UDl#x6TbE8CMAWQ20i|w$=XRAk%a%H@G>gTJhu;rNQYeS<`y7q(8 zYLq0P@EYi+whGB6Jz`JH367iKr!-L^=MsHc-(}31G}teH%Bb?fum+oI16y8@tV-%& zR)vRk^8If&xHo~W>tA2IeXIi&D9RAozW;J%V@3k9#(#Xhmr*@B|3gLiO`U?aFn~T`#Hdhgc+pDcUVA4WrS{i5Y4a2=mQc4Lyc15= zi{;3oV1C)w?UuDt?xoxDAt^}7IhMhJjEWU&TS4~TynHrBg=E9G>uqLYXcH92(w;sZ z*zHzU6I5^|%m3LVs5mWgOdRUlkGnjtQ)NX%br$P4t<@Fvnyk{Frfy-$*1wAlLT@N< zO|xg*ef+lTHEx|_W@T^dl31|RZ$+NZn$V3qmhQ@0YV}cYR+j3J`5ZpMh%575v5Oep zz50y3+-~cU5X!;~J*WLCPeg;YkCgrwZ|@!tW&Zw;Z|7Q-ZS9h@CT&QO5IW%AvTa*P zM?y?R4xy-m~KFUK>fGh}Vs`+N*Pyl;~!N$mT~zE{5`dR(Xu zu)G??b?nKlQb;WR?z5(C&(SKgj$U5zSbBKE4^YwATZCh|V?HF-IM>91r2`pLT)K`Z zs)ECM(!I?E{UGif0q2^NnVH7?P$ak%Ho#9cRi!TYle+b27VlYjqrZ0sA+;5{EY%7?z)9E@VpulA581eT_{J8jiFn5RJ~rDb>1tl2DTJiU7W4T}Z0E&l?aevL<*{i&?^8k8I; zqG8bw%z@%Nh7#h(g)55`f}0B;g|1g}aHMRRsgXS5zQ4D1AL2XQKk<7>be(TH)|C7> z5F2)%_Sb>`{iH)zQtXKS8>7yG8J@NK-&X~AmM{yW3>+ZA`6Kb)Klx5146S&!bgHLL zH6fA3n9j|WYtp?uhx+`NMtHw|P%xdQrPG<0h(+mIihAfZ7mDg+cbVMNrxQx)yU&c- z0fiWEY9CRwqYIs(f&^a2AWYzx{xVG7ba* z@2(~+-Xr(Pe5LiC1`b6*3+pAmyXN}vmFSa-Z*$gJ|ELoW7-kefHPV|Ny5V#B1`|0h9V(k9eAk%!?hHrS z6Tl2Pg6_=Lb^>}31`SP1TWvvl>Hj7vL(KJ^s@2wgv4_RwE6?Rl ze{SmQ9|K#iLhmTzg-RcR>#?Kg*&9u}?c0rf9quWs1~CtsGk#*0LGnS2^OChsZb!`I zVRy5ik{)8u)_+K7VrH~&XZEhH2$H;u9nsM_*(R4te28b(KI7QElYv5Kw?pEXCPYbh ztzULgfa~S-~8yOuQ+_7 zsBvPp24&}oB7?x~^F-k-7oc!4QEwOh8gtjsl(ktI(fsnYSrD>j^yc2ZG~t|~)7e-~ z)WOILlG{Ziactk@3sIRjdKyZ_x~D()#oCIC+tR&ZlG1l5TGmi^R;Oam?jVE5684H^ z$8X6x$!0xNo$kL`*$1e!$O-YgEz?`&v;?nW*T|(2Q5a8qVd{J1fnH>Pn*`*b%a}*( z1T9^}rONZ21qbHl5=aZaU;S?f&q|L0_v|adc)7En z1kGqVdHpez`n|YkSt57-aLFUYv)~$kZa(6$)ymFw4eGdj)b@>t_1l9&PBw&T2kg74 ze8Ej7wBCN5@YMOn{wm2%OUs+r%r|vtIGDX+IyAp_b=KsV=H@mwZ~kV#|l|9jhbugK+VEh zuUlt?9Gef0)~aqmib#!{T;r}29bVPk(7Gj7x`&LSU5BPkZZe+ z)5+$~_dxV+du=;fu z>^jMgoQWh$On$*S!*&__rkeytrWiDOu!@uON88n>=l^iAv3B`Vm)W(;PRUS#@L8R% zPIf(+YDuQPj_|4`XnVYg30j-oAdEl%PZH5~Sr*p5GBw(~a6d-vansq-RxYbKcJCs} zdx}r>y}_sYwmrM2(h$@W)Van(Q0Nqmf8Ix7b(%yph!_f1dYJrRGG?aqa8Ys|UMaqV zI`yQB1Cr&0n!%Yb>YGQrj6OaDCzNX$9Ove1ZT9;{GYyuy%%5vVrVXQJ^~|UGY1&c% z8}N6K?cseC;%!B|PJo7D~Eim9y>JiHQi#ge_6E{B-jjr31yC~JoMdqYyz*59N zZa*x$D=}_R)(xU}L?ZK;`nFdM&i=I<3Lz)Ffg8AuE-!&~7-gt+ciQCK4?b2rSA})J zuE}?IU}NQQLKAi++65G_;h~x5pZ8SLif!`Ru^b2Vq9kR5J;vA8zojfITo4;_$>%3= z)h@@Yt>oZN|Q{ zGrQ>PUmsctt%I`cGlhG+rkW)VWxML;zxPnjY+mOI#s=g|%ZYK1|4AdrUpLIH^?JA) z93j;9O8RP)!svUgU96vTx)Ph21(9AsF$>YEZA12H%#eV0pRn0I?t_DyzqT5Z`0)ik z^5lGR8Vmv!g{TWPt#3%8c>FU__@HlDJlVhBNts+D4)~iuzrwd{3HW>Iz1iU}wmS85 zn`zf1%z{lh@ov*>61eFGVIHzFom< zhw|I}s+38-kIq@ev@93j9lTl7!Rc1M?k##bS)3y0$R|Wi{FAJdyu+@wLYx~|p0pF* zr-yYAOP+|@(&2EAE2`ty;js8JF(U}gYS>iVXi?bbx5ni5Su3%Nh}|^N$*w+{|WLoc^uJCht zQv>ioBZ8-+2s=%yNg4cjffJwTDGxhOk}tb|3uU40mKK=>_}{u#VLjEvuwQ)5nAUvp zF&5KybERFcj8um=YS5LRdWh}y=0sDcqWmh>`**InV;kw4Pl<*OIo!SH+R>J(Jht|9 z(K-2$wqFj%JCKlOP-*AD=%19m9iEwQBiXP{)^&U~Mzr3Wdkd}Wsh@mt(}dnua(L+L z2g+h5d1_sXv9H6*K-&m26WZyvw@mWh*mzJP|2oG9qKwYg1s7bsErr~^Ta6OIl_Tf3 zXL70;OU_Z#M|*wHE0F6ZUx<IBl^TC{vj!jN52O(d-O-V-T|G6Y&17=Wnp<=SWgRJQ zUl6|+1EI`=hiiI95+|i|et~bA`aYsxUE6C8DfW*^HDP$xY|6&dXPL@3t|`}HP$LBC z=7wRX_=P;qaIX22vSnIHx5bnC5A-cXWQns1<)e@ex@@uy`LR6T&rhG&<=6E~{3NJ2{OrX+<;cUU~F{l(o84pImbOD#yz>u0Yds#k6FN-(4<84e+$x zivHwU&tR;2{me4Z%AHWv{j^$czJF}9LY9v-AK^^R_O#q7unrspZB9PMyA!N@9UKBz z+8T(C)!HuzY_T*D?Oxk9K35rN_|u%vwE%p^GJG4HF?V3O!^j|$viU*-zuV8Q?_*sr zzacQ5;keZNa?l#eCXRXE+)JfsXwTTr9yhCM!SHwh;>XPK<&%)$BG{yKj;eQ`BfpbP z7*AQ@@3{Hv4pt<(-4EY%XvaWmoA_sRw5BRW9#OHgDB&%L+cK0SPs4~8LU&KY!gA1V ziCoCbiLsg?{l)4|d5GuL zu3PU_`?8syrs7!!m53&x@%U1+gA0}&yIQp)drFd%!fbZ_v4nHyHp#lWe*1oTgix}l z!~6kecv9Rr5MRa7 zW5#t)yLCar_y&nZMi(o0vD4GI=>kH!9<=V4AhG#1uiZ~Qr6JZm9e5=297H0+ls@G% z)nxkTv6&dzMlYsBoxX-5yW!qDXo$S!Dq8!qr2{y8yET%Cc))I3v77BVPnU3V<+kF0 zvq~DDy}P8}{r1l>f-G(N8uX=3Qqgz!h7IrTKslfTh;vpF8jki%6%A><0(CHICurc* z5(IC<6B%L6RN|*;X;=T*Y64fey2Bt~je0WLBj$!-iNRQJ7Kapu#9`hpYjfMK!#&^q ztSWCPQ!EPo(szrK+S&{A-(-wcHq`w-f1bSfW0kmgzoO1)sn1;)l`Q-|zMz_&G@K zo@}lkwtsijeVImn!L(4%id?4&y`a@o;~|r}p?PXE=PI#(n!oSxnpr8cBn6<4BbXpL zdpY466tS$_ca>K&d=R5*SfyEg>GlOPz=Kg@V=}G#>e)5B{SO})jPM+Y7+P(BGN9dX zEW<8lb4~a&D;A|j40-##(w*PevCtjMuzc!V#5cY*)Cph(19?pdv*|D>sDkFtv#M@E z>M*FCHxtI2KP9|Gw`Zl|0LD-_7>db216y;Vd&Gv&tV_8d7liaa>JfaEJco!Bp|KgW zh(J7qn0Xi$79a)-1o9qE-hf5p25B3=dlO6>-kI`B_tZJ=#Mrhp*gC7c0sTODp*dLO z^>o`auV?bxH-CX^l5W9hyO?}%U1kRAl;zG91o!iqJd~bE*mVRsyMXgU<(25Gr*@Y8 z>U`-RA3ax!#}U*+@trz~ed1xasS7$h=70ZUX}8=c>2q8_ou$l3VxJfq|WO8kZo@ z=O094^h&deI3;eQWM9m-RQpi^wA<h@Z`}{9l^i%x#L>Kz(NoYY34K^>8Tf=!TL=IV+PjpDJ)e=RQ9}X;#)C2f zaoRhEsQm>>?D=L$I(!G5@%U={?VqfS&1cHT*u#bG>-;mSZX~`8FVR+bKg0+*zr2jR zE`DY=7NBK0g}LKkO?Xk>D@p|3QWy*lW1f+q>2eUm2#EQc%-WrzFd*oVs3hF$_Ds86~O?k1kXvEyhuy9CPygz=9 zTL$%XlH-;J)3EbDJ|9halG*vWCFlxWJShhxs{>&AjM77P0d<_*XV)awo=QnOcFoEV z;MGB`0pcx|&Kssx{O?y7z9UZqJbWnV#c+J~8n3i_$*G@GOO9j@={MMdQs8;U#w^Uo zD(RKPI45cA@fM+b%ALKXP;x-i?g(@vX!{0Z=b(|~#Lhm{DP_^ZG0q#Vk8$2+3MF+{ zLnf%8z!t!DnmbS;&=iXePCul9>B2Yf+7xjKa_ho9`);{mtr39-y9BSBVln& z=)S{{zAv2Yxu$dPtW!EV$kuAezJT?eC}XX}_e7B!=2RVeeddyDZJy{6sDyo^WkWzE z2*L2Cu~P{(oC(GntwZBvPSt|r{Cnso4e|jk{jw*tRc@OPbqVeSBZpj4i`{4emH7my zli4YMZuZpe*nwE*qIPS}i<}4TYgU3InJ3^#rk(CES3VbK?=YRVMisT-A+20_H(SL` z)#c?@S(p8+$26-+gOPEuU^JjU*OLs2i1)6B1vTnM!VfDS?qT_bTrQq}(Gg z2lyZJkqO(kjU`)y&eA8lK6jRGm!H^o`QeAc-PY~bEdAU-&_k#c!yo&pYd&Gs%ID9z z%1sfr9n-@e_t(tSS3D`_tl*p983dUToBvYC&>q^OuS}Bx^<|ZHW8!--Tj2kwa(`vXA6hF9SpAhh1T!~Ul0oezSdAKz zL7ujK*7d(SH^u*E#Mr+tU@&51=DBlcmfyeOpw+ES-eR^n zr-;D3XlB{$L^JlvX`ANM;lLyPoIiJ(nU^uPb4v0C%Fyu+UG3kWO!4GQ3*9`aHu$4Q zw*W1PifP)01{HZXMXqBYMK>g)9mkE==AlAxNO$^}lZ7|vn#%@BG}ibnmx8UNm@B^z znM^JD3I0L5+|qk#03xKkzhYuFR<;%zBzDK0Z{B8sMEA zAzaG*Q57kNddOresHxw6U=(T($RZrjWy)>Kx8y+g`RraarDc{_@vcP%nGZlHWWZrjy^kL<3r>XC*<05tf#vuTz>da4}}) z=wF7qBfDp{(pp1&R7dslo25Rh(f$4GpW#+|(OJo}Otd5y4Hj58P0_w8Pn3tbdmL;$ zQ(N7|K29$hl{y*uG3FZWa~p*_eZs1Q1bKNHX7k2ZA>yva57~$ptVOZJ=$7Gfj^4ue zDKgonnW187gn3NF4%02dUzeh>YtI)g2+O8fO-o)%nIGt#mqcClynPq?R3Dgs>q-Lo z=Itn=`IMyMi`Ahf@WFkQBP9C)6!DIBjv2dZZVQn!G|7Thyar( zDrK01>{cWxw%KrK8?&0c^3r!9&yJPm;G;$j*h-U?n_hcfYx6jD`$CVo|BawX$SW_L=BXD*llNxr-hPj5@j&Cri-9k;J)uWf9d7?AdM3-^yU zKKD5FiXb&ctbCqlndA()4p7(L&c4L#M!PdCi<{3|M<-+>p0TxW2v*^56EG4}Nfx0I zG3d^u%SF8idk~9K*V&7SYkF5IXV-b)m3D2(?q$W(He?aln3S+pbrQBRh?gm#!fGJ}_OiOupxD|ob3s>8Rbv(Ng5jF^+6{$a=b;rqO8 z2~pmZw08mYs|}W*v2F1u-PzUBh8yKzRw3snzZ_}J#O%_!dxp*^Y7Ez=`nK;|_S`Ol z7w#I`p=cUvmQUYLVT10(H8ljd|^(JSDS2kfWN`g)eiP zn)fO%YE1#-x{(*>>UVN)LbEs#Uh>Y?74KgBt>jD@Erq&;!Df=1pw`hCItnYsbOr~& zxU}`Knv_B{ZA86ZpidwWn5EEIK+Y}iCVAe`g_njvVKg|DbA8^Zmpdk0Qj~KGS_+DY zfQV&1T+l6DO25)JCe=HgPA5{X*IC9-23UYZ_a={lUhbN{Le*$4wV%$Uzd7wEQo05< zdbV>`in=wul$;LEQ@8?7gg{fWYF+E;#o4UR7TueC=%%F9L_PqY;D6eKb!*V=7Z z#L9*rCtmeG$u`3p*&A*Efb4Dkf*a9dPGC% z1%!L{pdOys57ESnt9tpwC56GDeOgKKLIPyM#CAYM@UB(CJ}RUgkvX=o zW+bOztu*KZvQ&b{^*c%Sf1V`Cr;X*BZKH%idt=CT0W@P5`8J0uY_eNpK#T9|kLOPR zQ3|J!ior0aRq!<=~Z1SMiVRM-zCepQnBu&Z570H4;8OKnQLqRXxz&TKclWBHs((A&-({j zELq+nH*x+b+(h#`&g%19xSt64Bbg2Ci!vWeN&8!jfwbS62QpMppVV)IadMkb*K`x0|E?=qC=_Ruz$ z9Dge`o`N0#+#yJ3-YH1|4=W+8+@OT=u*Ozq-+{gjbgbb{44!;$Cc7$0olr66o#+yr zAz+6q-=u;pYoihn!Sta4#|ghH3&)HFLdJ)-+cm^^af%XqDo@m2bpsWOKUmcSJo3ZR z5(maeL7lH%5JM(MxjXdulaO%9o{W#+Er}!9T9)3qD+ywzF`Ra}PuscDZo5)FZ7+DQ zsUaatI?EIjDZgBU-St@JXtJ8G?VF)_HUHUX?p4ZlnqpgXk7edyJ|aBuxPEy3C_gb? zMSw6(oI$zq)AO7At@=uFwlCdD{|u=assJ~_u+FrL+pyx13{(C3A69GvF9%xdSRCz> zc`lQ0qS*{XtAv`(uRZ`yFUno>TNbE28d7lXpA6Tciv+B@3F%?}~`X@EZ_bX{gvGV%ftc2U9bDJl7K-&M*vIhRV{CN>2K z!9IZ}TJpqVgL|G-Q%oFva&B&sknKUM7E)pqFef5pkl@4S=Zhn-T5*-cnNXU%yVX6XYRIR*w(i6hwq3Vwy=* zTo1kKO4`&1K!t%kpqGWqe)^EBQ2~0#6h$}QG^|?%80|pZ?SM#q8joPtHiM0HVuUY@ z&GL-7B!>z~QG$QVILv&dN$%$=mC#Q6JLS9XV+`e?(w)ULOy){(-G!;Q)?0y^E%mRA zF>!fbQh`yDUF&DkqiYgmPe&vrhmnr}9<* zJP-TR1{dq`0G|%2i)e;8)dlRCVhEt;^Ml-MSyB{jf`K`>RVY44-EZ4B?btRnd}*wj zB?@{;dSkv7Qz~3?Be$q}n7kEy`O$uEKfRO<$ng>>iO#-N|C$Y1(4rY{k})%hQ+1~% zc*P9S66boPhE0)u7?BhK@zUN0tf>e5p+?-J3&0o#=%W7LaWXUNB&Q3$pC`us$=3(| z9n8x}ui&GO|2^LLH1~T#BmLecwb%+ri$n2xcCi@eHso4)Qy!ypIs^tif~YQOIV)gI zEm%PybG;6yM#`D^Z^?b#ps1e9ej|UuTTU01Aa*@r(rmKu7cwf6rnugBXEF-E*9ufK zLz6yz-Zs}fUn6QFJ(dKI#&^=0(kA}zHJ2w?A$B$Cy7o{BH?Nx8HNJ^+^?3g|*!Hi8 zaaoCu4JBhCGTx%pfZm-BRTB@KNRf4HGISl0O^1FBl8i4%Ah?M~$$~OMp0CgCjn}%E z7}7_mCDPnAbspQo*G~%e#u+nD*Fc;D8FvNoTlb_9Yk{QR!Zcc^0Le_ z{Y;c{pTpyABal*V+b1%nYX}stM^{_$Q49Vcm_z?7szCN8h$8}CF13z<4xzWVI(K#K+QoDVZx7X5xDMF{w7e<1+? z{qKhOf3+gQYgeK*IhVt2;W;f-X3U*l@Q*pxni#w}btV62Hb+|E?WktzKhC7%9W%@g zaP5((=#pBi-N(&&Gww-cccsfbAG1c~}!i&n0y!&d#7| zn!vpA9XbtmD2-`<5fA-Zga0L+ZWME%`-Q=bJ;i5F%B0plA7l4Lwiq#NPcVl0L9=+b z+7`XIdS!>X?PoJOs4}A=WNqcqRZ(Y{Q!nv*KUg06-TfhO~C;Dt}c3J>~REccjvr6XuGH-X;vc^hhqT9hejP<1+Pv zycFsV$oH}yFw6NzUGQj{&_1VV<8{qt5PrLYSKCEY@{}UpvLmjG7>sEAD+TFlLs~>J za`bU>%}0~-9d4hpV-DA6(KvTU=akwGS&+r8C~BtBTVrq43+Swpazh;MAR+jp7dp!1jVxt}aF(YA=G=o>XCnOn4U0EoCGY9J zGO}ZyIrB6-v6e@C|CWiB|Byc1H~2}jA@H63wUNPA+PHt z!on;Lb2FIj^{^}pE-+PzsO_|koZBUTGHu-km2dC6b?V;{;9G6CC%ZXgAJ=a^L6*NX zBmYlisbk0w$vO$|okI8=as2=MjeHMz>?H2;=ejl-U{Ox&;p4R}+NkMr zm!-#lOpAyUty)hW>F5|3r>wuv4aKuhZYrF5S$4(0aK4ov1!NhAlq39>Gqp_Rn50lL zliipbvKi1UZKc?Nr(i?KC@31V%{O2JpQY+y`Il~#OscNx(_p6iOKV+LUOSUb_G;=A zf9mqSQ_`_|)m1gQJWHzXs36M^#MT1&Y@Avfc&qnmIZX4fO50o+6Dd<&<&ko5l(}JS zaKmpjoZ--^@8Z9+clgniyEB-VPjO2|Ubc+t3akvV2bZ zXWms|k1OPy^wwy5?72kzV84a?a&o-@B10mOfp-0hgw~9xuiOCFQrMRek28nTm%y-S)VoBx^_o`(mq+WUloU~a-Wfe~?)b1L<&V(%ankgED3IFB7(hwH!V#Nf z%laF-Mu8PxLFoUe^Jp}RcO?GHyA8rA2LgVecZt4x{Hbbd>}F;W^wXrFlCc$Z zUs~S=&Ed(9q#5!Vp62S`|!)3Mi7|P69^5Bx0srU|ZjyY0nTzwj`~9@S6ua9E4k-S`#2UE?F+r=?XSEz2^|Er}Gw!+2Vb zKawF%$S)Uh#>Uebo~jm!ukr4epSWi+0@?-7Lpj(W3@gnw_3i2<(l}UJ&c71^nv~QJ z5+D~g_ADp}I+3Jv_mtfEve)R^Co7A3DTqyZ{)}_iTsADviZN&4#44(Xf}uF-`2E;p z>eQ?F)y`E^=2w?B0@`A>s?q>34J>cYW(n!y@k9B~FZRvf3Srsr4=)=C+Fly~Z{>#ezBeB~}@M00Z zZyanh0YM3ZYkM?^fS_gq89uol@zbp;d!~9@#}%s0%XI z1T+-%IDdW@RD`_jEr)e14}QN<;|eryQiq$YfnG6ll1%rptZ?_`brZ%_h`=POit(#w zSf%XHO}{N`oN?XVz_FJftm%ez7GH;@am12K!jTUzx?-q@(70nK&}mp8rK!0_>OM(5 zR5rfjx21cMp%7@-Wp`nm=z zl-`J>oqGf>MDze%tR7MB^(oGA4_@F?J?I;y;!N${k)97*D(j8OZ|3@}`9!p&{B4EK zD&)Dk0K8zyMQU0MwH~_)&s&5zU;tRQIIsI)E77XpaAL-y4rflK!;8jE3$y_$A+l10 z1C6LEY453Cyew20eDH;)h0orG{S-@{upf3XXw=a7ZuUz-gGH}`+c;yteBB9}hQ>rd z?@1b@YFYMy&Ln;t)CR7w08#q(!y5sSFlmIfalxIpy)U}j`?~Q7N*a$!KAe>HbHLps z|M;YUgd_y%OE29wZ}O_C>%kmp8<&M;3S_Hfwm|=EC~_av;p<*vA1EmX+p$xw%7!f^ z=rgE+Z(cJA@NLaOL%!A1?J*WBouV&F^~gWoS!uE@M*Iimr`ltRhyqFsTI7Rsrs7o$ z)g$0yqkQhY_$6gsAbY*`iJj|P0rFWNuH-K;Gh;HhUUO;Do%rAWEWSvEwIRRW??b->dj^c=@H+pPOXd(J=JvR6 zo`?T}AXc8E2m1k2~p0MYWu=~dUkff&FN)q~kV>5P+;1(FR}uXT}SjuCwZj*;o7h-JHn+BtB>jG z@4eEax~Kte;%&}7p#KEDt7Tc!BAF^C(a%pYim!60>3T~u)x~gec8=>_4`*hU_#1*u zbRukN5TD;Bl?I3)@c(i3BLISROPqw4-NOdE2pXeR4}n8-t?ZU-5wMW|#JugwwQ^Hm zy!7}&$A`d7J{;WdZpYRT^EBuw{#N4hiC^|XGL)F>x7paA;gHF!@n_ljD2;V3Z$BnR z{g^aX$Ue)}NS**yX-oxQ2vyMbSKh&1Kxsq^N)x!8NDX?(G1Sh{D5wz7t@gSatJVgw z%mctsa~TWD1*CR&zJT{V2wcM%Om0_kO|L0RCfL?kV!m#OG}w6^L4C^`ejkgn42PZaKW zEfJp`p}cT4^1E_OkAOMjYr>7zDrOaEFao5q9>_{nr;!?Zw*u&O&Vj%NeR5LIgR9p} z;A>Wgk>7}AJbC+_29~rz3%S9YX~vs1T*W6JCVf!r7%1ddX$l+nYQ!%AArragc+_>j z7bSmqSg>eiBBGUV`VADEYBkRA7R}fVU=`VTe$pimG*%G?t=D?xurXPo!XBW$IL`Ly z#65bZn#ae3DsDYc7}YD-p^H=tMjMex@GjsLk`V*^kbN9eU}r9P+atWA)NE6RW>nc%Z6s$-nSFRxW|$)Q_Y$Ze`A8 z3lUz_;J>OFo(`Px+dNg z9Mm#wv9qF&Y}DQ|>20mVZAsJ>_6Dggem9=#LEbA~(n%i6T|KeBGqsAOkr~aINZ;Z@kUyHxgq$P7bCZ& zU1yxhS2%XtX-HbtzLP)^?kr!GMT|r`Z--w|MU;#r+?~TLinzHAzj#h1$7A@v;y7(Q z44qdS7D2#Q<1|&ES*7whYEj&l=gmys4S(#S)id!|84qV!a0*THH_=Pph@rD$2Owbi zmXjxzFoU4{J#qf$TEJ%nVUmD8isCI49n1q7lV^|26|-Jku44mTgQ^l$&Oz{wWS0)= zI$t?)%tydRt7Q5Q034M}V*wbe0L}(?pP%DS>^TZ@G`?#L;HCfO1!53m>5&uet9z{K zKK5CW2AV$legFO)jXoy$h9zcv-$5)lNoG)ifI*}`buh&}=aFjD*>hc&<(1gfY5xVb zeKD?oiu3SH=0UzMl64j~nYA0I#pe#O$KfCZC94rFFUCxbXu1f%u+JU?xvh|li}THY ziZhoUEoX1tl|)TXscP`3o!VL{@z4Be>ZkHMv-+1+BC|is5RjdMugJ94qFwn%5Wk|2 zAGY__yJ2~yvC#{_1<^oNnJ=NtHsqH}coKN7F!EObxp=&WTf8feYovB`bsYQ$VDV+R zXZgIXpdA4mBjRq^nR4Z;YzbH=MgE{Kz7(x@ApY2AUjEyoi!m||V|Q5VD-U`k-ru5r zu=&8H_VQl7J1kEt#H!;fh6JKj$);d5!j*)Tnw#Q648&9sCN>v%xPp3MJW>^Dn(b(S zuG*6=!Qz-8!cYgVco>044_$Z|@g*VQgNRUtsej;+(pT%Y zQN85Z?~;!JKsMtK#K4O`L6#@^T{oNu=E3@-7TvBH1C-Nt$1Z}!{R9ES20QG>&9@K3 zk>`z%BaS@BiQ^I3b}6_p1&_#B@;o*0+!cKbJR2nOGt~A{O;cll`itRQnud>)LuFgP ztZ~&X$^Z6ZzOQQND+g*KdQ2qS$--72L>%sipegK+Om6!2>d9(FOvjhWgt-OcRi7E; znb*NLE*i9PKvyAbks1tLMNdGu2;h-73@F&X2vBo~JC5uQa1tKzd@<`>reCAw>`#Oi z*RoPKV}nPs1EJ?ENb5ppn5SK)*&;7@cd z0w^Zddc0z5c*ut0t9rGn9W;FF8V~mxc<)#)(*vKl9dj5rRrDrOEi3_DTiK)LecSOA ziusnkUoFQs$|&__^Q1yx#H#Sy5rnv3Fy|iQsv2`dQgA`hXDi3Ubx^-Of=>94`fVb= z?duJJ-`ETunf3Z1l6ZIH6G6$6j}OUxzdvHfub&jxw(wNrxj+yJP{nE;ixXxQFz_>( z#D*UX@xsF(W%YQ(Jpu@baQ8tx;g3>RqTGkvvaJ^J<6Pg`A!?$&A*wu@Icz3Js zGP|1bMUH}G1gMy1w@$$_6$=R-p%#h&X`~9k6UPk#&L@lq+41JzVoxS;nyOt-8wb(x z6>eAh?IiC_CPO>p5T2QR8Q&tnOPcq5MfRtShdO?#=Yg5p=;*Jub6NomlXfJ&_!bM? ztRxbC8V<9w7!Y;eEYSqJ?igc&$7?7{dkXq^7`=0mY=r#M?u(n`^iG`%&$_kR{+0{ zJ-BG0nQTgtte+TcMC~!Yh^Hde?kZPJcwt^jjOZfr)mQGc$$*8Z3NAI{rVOV*sQ{!v zBC;A!1HRbI)31IS&uPEpQY^*)xkPOC)xfusqt<119^|prdUZPP4Ckulge|AkHLHgIEq6UojZ+zYHI{&JNByy8 z9Cbeahq3elfLA$chi_vw0=DsgYWR1t2wk^tf8(6mB^aPGo$a>By`KDr=ar_^*DHzc zwq2P%f9N&oQ9x{6%Dnow(`Khf_dn)7*Rgwg=&ruvlwOF9X-UZGXxhYpPs?_iXZ-Z{ z^o<_ZOztnF1RV+AnYj(xe^s&F?P!cVZeU0j<3PhNv`6MC?-__{?21TI^x2_K=C)JZ zC(8ruX~mcHDy1AfM-yL}sEv-5I zj!xB0rwI;__kKul>;?Z*wKD#Qg50fTS$t>4fDIug6-u|mEL{;j9+i18l~QWwTE`Pf zM()l??l))7gGGZEL_~qeyj+hTOwfFVYl>W+nF%XxzVHr^+3$ z`WxH_%?np2d4EmGNQM;YFU!eZjsl>?JweV0Qadqx-08E z+jPyyNq6;mmLdjrNxis!7i?Q}wBb=-Kp8G{Ty$^@@FXQ2z31Avd0;+9*a(a zylnmC;*I;b*VsHlHxELmLC+!2Bp61GUYWqLJ#;2B4!t^X{WgF=YjvCe)DjJ z!GX$ASv;FmBd7~<#6PQAHJnBpI$UEBvJtz$Z5iaZpDm+(WcCPinJZFmN;{-#fy)p% z611+DA2XadWJ9y7H)EZPX=HVU=ESe+S++@fUZUkMdgtyygD(@T!7d@y$icRdx9579 z9RlL3oDmsS1iU0}4Q@!jYQrRB;2-`!)-VM8z5_fW%a-|mv%n;pT>^Il^O9dAg%Z*> z?^n*HkyR*Cyd~yHkOgtqL>(@Wj{04Qx4}N`cIEQ_W|#VyE)Oy)?&aMl7A!)AGQ=La ziGMSvhijF6EJsF{$P*i4_qoduTWNf(^W!#rFRepy%S7HQqrlg*c!fP+J>y1+90#hV zLXvN-gh%L)DDu_)(Z+G_=|>}SS-nk%en0oIcjX1*uBfIJbt@h+48ACyKD4nitxnmS z`g{}H+g+c%XcD8_d~PMc5aDgy5Q*1e7Sg2K%hdlcE;LYDP%lg^G^&sTQ~(&NKln{w zAf%r)>$A{Q8lc|Q5^%vu(CvU-F&jh}MQPQc-S*`kfrr^kqAwGeIX~IYdsbKcsn_RE z_?UOjjD&F^vgtLhKUy-K%?BdtR)mGW>7rM~Wa-FL>s+~GQ7(EF0FiSjS8+QD9^oEZ za~$Q*K@B5T;-V~+9d^uo7YdL>y+9zz5S1it!6hAEH1V1Oe2PuO1^(_EOy;qh&tKIH z5=QD2YOGoWTVosKJ>;{s!)&T;yI9|GB(HNETSR;@xW$mrdrEPJK|v#)^5Vx{K76y3 zS+_z)3YpFSeg-AR0d@uW14Q&h!Ar-EpMi^bb8$UfN@=)kax9fis=^_U=zvWJg^c?^ zJ+h@D6&B)pM9Bv00*z`k?k(C^s1`}Y#V-r?HqEMOi_OSvAS3NlZ!Qgp&EZWw(=FaB zzWd^f4dWQm7@PC-B>&lwlg#$=32|4M2iwQ?H!m2w;xyDHl0_>-IHVF2nXAu+KakNk zNOc}(#h_idZ2*tPy)-VpGGts2yCXJ!u?Oa6$a$U`nMDd->^Io_8*F$#EQ%#F3<=ig zPl&gpeTp;V#>w_%&-ag1Y#DcbA{pT=Kz1vM`TILXSx)BNl8l;qM{3yX_eUICTCcNt zP#sfc!k&n2`{p&6cq+E=*flns29eYVIBpa0831-T0yAVlLY$CP&Nlo4-QeY`mKHC{4ntiITew$}+*e*24uOhUk7DdP({Gz2!h%u)0doY8CHKx4+?2+Tz0A<+koQ*qyB!uTD;1#lr)m&&^d z>3J&QKrda@?#&Pm{9-knL6v(8VKEsfLqW&J?icS~R;6TORIEm7+fDsq_c7={XoJS4 z4Oj0(Eh|~F$mE9JjX2rfrPo4!N^qBkF8b_;0Y2t~D_^%?;?W9iPPyd} zkr>=6>92{=LQt6R;NhkXJ>%N?AcMsMAIuI+tbX=*I6(14R1t_AaA>@QOZcEU-p0v)w^ob&yI9OOs2$|{@8HVk3Gh-&lX)j zBYRqY|BgY}?!CfkTTt1T5aNjxgb+nlOE>q|kgSS!eMmgm@Kb`Tm@mL&`-9_kv1AIC zC*as@7L;Lx3$Px=2LCE}4>r`+0^X-j^*$+FTO!)UVGg45M&eOeUd1kk&`$yHc>;lw zrj#hVlp>KvZ}r>_+64Sn-X=KsSO3<_ zL5SBd=^L-Q7af_He|c5Tqu6Vg4vg&lOFxPM3(QQB6Ps?&(~taf+--rwPoVv#%Qog41LU*?)TT5#u@kj|lg#k`*t zjyUFtWlE&M{$f%imwW+U1HkBONcw@N6PaN~3;H`?*So`u8Q0I+D)&H)RygTmjohKE z6gs)>SJN+k?jJ7Q0)zv_qaR`7_bxsfI@8i@6GNjyQ?x7LOYr+qfqmhV&j+2TgO8ktq7RS(<0WG?DRC4pUH3l$ZAZ?o#D_-B{!C$iaPt?RyR(v z9=1fO3VJ!6%pk^~AIK(z=HTT5BntoPePunKp1s|ugoyUa1i&xGE>8&_Gj)#dz z7vi!Z(Z1tR$;;qhWs_kOkE01oid^NEDJv zf!Q%wIUa>INlBY}EYUh?5pc&K=vCPe^8q5kbx&Iz!l}edECy*G(7lTg;-h7~ucMY< z5$lYrtty05Yn0|#qBh-d+K3nweD&W5tqoY=Sv583ALC~2*%cMJ`lVA=+mVT1Z0atM zP(>KCN7fy@nuIuR_drsD$N2QNhlUeI2%U+|1!Am2B!CpuHu=YPreH31Oy!azPKbr&0t*4~*QQa~7vhL}`LBI=nbCf7pBZjeD7xtOX%(`9HQ{=U{y>bWbn zXt;udCo(GK1g?k6l5p*V_9Lmoq%V#LsRqi?$YZx6XhhQEIbk-~sAKNOZD9XMF9aXI z1TPNQ9?Yz8+sEHzm&=Y8af%(M7s$%rROdw1TyX0>`zHGPy=AUvzU(B6pl6x5UDivb zvUv^iXD_ErNSs$DrH=7}+cI@J<|EO#cOg@*;5Y?6g(W-Q&Hyjkg=0Q=QI`a#pd_-D zKliAG_Bs0fU4@yo?aJ+GS7e_gL}#F&|BJRSk89%W+U{nnqGH{sqNdcnY()gzAT6|Y zLByqXLr8G}l}%Y?%d}R(iU@T90X4N1s8tM#f~=z;Ae&K80t84AA!39;0wf`mneW__ z1o2tk=l$OAd;jatD$LA%mg`*SI_FlmsmrhUPp6vbDh)DQFTC>rcN(_8_CRL+=3OVT z?zhXQxd=R*#XCY6?W`QSlF-KSXvLFYqZX=p=i0vN7cU}ob4~%`ldX4;vmRcN#@6gP+VDNl_jbx#4cnA|tl8aBjVW6sF*WBQld(B4TQ~^{F3g*< zy}GjupNe26h0;8NYFpztoZDkl7J&2Vu@h-pf=2&d0|BV8$!>?6Yc0qOMJ^3YG_k}F zv)#Fv!<;A$A6+FeF~Nr}lZtiQ7?RA0dwuT}HkKJByfM*ShRvS!ofymh=DXL=7T7p< zI~v}kXeM;$$i;{TD-v&A39cq70v|0X)wXAuW%rG~xdn6#mZ&7yQsU z<8#$hI#S-E)J2J#7OVJnVht4W>f>+DEq-Ocu$_g2L*Yp(ZQts_S;MbZiO8+JS986Bu|?P{-YU6}XIOIMOt?Ff za3wl0p#^D5N4rGordlpM1?5T{*0ucRHA1G0EZMuL?Uvtl0)$CUc=U!-r8Ud0^A|1kL`+_l^>gL@EShVUz}Bg$;iLTq$0>*i+z+!&`%%kc430)1f7 zsJ?~{WulodpixyGD&r|QB z!8{D33%hjW2Ja=MPQ(|@-$M?h;}3_pSCcvHE#5CKw5p2Zoh8NyY~zdEI!;NpU_YuQ5QN{>h6~;<)ii{B?_kwz7(rjEyFR zhkwZP$zzZolZE4x%5Xd80pz656ozzcz(E=?)bzm>h~}2+%SM)Y*bKwPB68)%EaCk8 zxIeGT`XpE(!iP0vB;#x>7E2>RnKD{|SX1s)Obv04wksV4RH^nyAl_6V=roGOYDOSO zU^7K=pH&wv>b9ybZTy{kZsO1BLmhXY_}W@5Y4lX`q(e)GX`uB$IVcB!^fwS0FF6)F z-jy%i+5e)Lt^D@ZjisC#;eFOCBYr&TVW$5cE)a=_gz+~ zmg!?fR$db-&SR>c{F_Fy&CB-6o=AY+oRZ&i7C&9Nmk{(K@)n}O10DcpEJM)|a1-kB z5z!!M2!0QTDUi{r$yBj%MC^`d3RmLCee%9E+2it6N%3bnjXx#SSJ*alBpz1QMmMLm zkN*9>eq)Oc?kcT@Hac`BhWZP8)j@5=Sn3D3m;0kIyO3En@qO}vM1*js5iaN#aLd5b z+6lf&9RtTR9ay5y^t&)-_P(ZMC!HU(<$q>kRfY5@}W`qN&xOIeIu*JHg=Ap>6mL&ao6HLLOS9A39$`a=S#|&sSGVjlGZITvMjJ>t>=gV0OI{>w-eA8EsLsNg;P6G zTnHuxXw-_Z4isBPbPL0)l$C$td>>bczoAX84Z_EWB)#to{oR)$aP;PXDa#ecIq~YS zd50_~?}%TSd+5F(pJ;Y)D8A>v>b2BNIPvlu2YR7DKB+XfBe82`VDe*|YV*X-B*%a{ zf!U;oST>%QU8C44L7$Zcp+J&%aW5%;R~rhv;D93XIv&R>b0kG#J4%uA=MyOhQ^jAN_!PFa78FLnQ=$UPr|9tV=$Sl5 zBEuz_HG497gSqqT%#^u%}uSUuz8*?hQkY0~$mI_YF2Mwz3Ql z0|(TRv(WX~%FjI3QDfhh@@(Rf-aw{%v3d@Ew`Jv3qqF+I=+Y?{(UAfBBU}8Bu>7Cg zyM6h3^TJ@)+)qD+j)W8?9&P2r7yB`@{}AD`+gR3xt^g4(s*BwH7s2M95Ik4Lr}eor%2C{6Uh>iRz-EHKezNpj%wpHwGBdYH|yEm+oBHKF~** z&YQ)usWvv}t||N`3Zp$fOoD?tU%|SRo+z?-M8vG!o=BepGen ztqy&myymRzgPNkR0(yWswv4@~4&WW805{(hj8`yClDCtPcU*eQ=_-*~$(XTNgq1(T zo7Cb;pkDk+B)-F35s{~!XoU)k^SCrW4&3U0tNlTKMb?h8i3N&`{%*#ye4nh13zDfY z9QECpe=t&AJdzhvca6IJ_q`&BKO!7DEnA_D3#MjF)_-T#&f>yxp*YaWhCfw99Ke?z zK_~Ra*^R;C(Fw`;B1l2q8pGC}Q0IcU4*3)AoavW13_*6;!#jUOoMKXDQ#d<&dH1~n=eVVtHDF+m5>^r@CY=j?d34^Og56_?8I}f}6C27a*LKWT$O`5(Ylbpww zm*`Awx*8*fa!Th{cgfYk^KifV3%;t*!92igOVN3Fk&$BnQGAPO3Waio80v6V4H;$+ zbV&^vc94dBroD8L45_n7vd47b)NcKN%EtILUWtBbM##&W318d44& z`iW$q=TWV26swqZ3Vi1>dvr^cgw0xCMqIYYa=usaP1U18VY-KzRp6isE7Q~oIh|ay zql?LBsx^Q(Q4aM)In;ybHdyrqdFSou+>D~L7rPH|sPNJ>eUs%EF0#_Jz9+IZV_s23p0)kP}F zTU^-3tpx*mhUvOEcb+HmN}(jjE)5?7_ZbizU}8_qsh-_{1)vNcZn77#w>DT`&vOqI zWIp{aFODPlG_a%k@j&yB@db9*$tA-tF$CBmpy}~cn}e=f&3Y5_m;N0u<;Mz&s%JcK z^RI{`3e{Sdq1iTm2Y0>u#RJ12nk{TlKp{HC@NTyQ-~24qK&qqd1+cD zNyf1#Yq4{k1#USa)eU zC_OlI#(kjy50~dN0QmEYsC?^f^{dZ6?4XNi4 z`vyuq<*fC;Ap6=sd>;6E4a$M{DsNCqtR;mMj~c*Zv2XrYn@{A>@&Hr_(>R;afUAPD zmE4daXqXaNLu#Mo;AKv^7pz?eof0r*=T$R=x&-tulFMk!O-g2fDn<5g=yVd!10ltsV7VJ~4A3(bplF^*6IZ!Z#UY3bnR?3kfkExg)}f z@;g$7&!$_@Qf4OgxAMwvU=?~;(%r}~Cooe^D;GIc7PRO&rQ`-yrx#G$QPB`Z09D>D zJPHIjIT%he()aIPJ6mr9#%nD=<*y=z_ULaS#P z>$rfrDZI?g_6-p$^!$eIvm@?MUl=A1L`H>U@1=gbZMZc03}@}Oo4+TxtCT95l3Xy& zY{F7MGl8^eiD^8wkjgb^UP4-F%!XTnOM*|K_IBc(UmW4UN(jb`nJl(k6)~rC)6bnN zH_BAyL+X_(bvYf>WjS-A6jLNt-;9;r?+>6H{tf&KoF=T(;>)HMPYA{`4E)m%7MzIeO z{Sf-kRgnFMQ=5sRaHhctZ@H6G&{eKVDPMwh-CCQi0lveG?VSy!yGr0I0z;xJ z0_r$h=>22?iv;>G)TiLJM9o(hVg(p^Tuzs5H!3i<$;bPkzhngolco(fK{y6oQXAB& zx`#iff=U!Ud^pM0IV)K)&n@1b6 zKO25_TPP!PFI3uK5fOHY_57)3qwhZb1T<-Jy|hhI_tp97zpr1awHj6obIJ*l$LE%i z68tIJ%2b2SFC=&ee>?~iW#JSsQ$?DHf88?&&` z|8ZYO36u=d8dY&C@c=u?OOzxM12-dGjf*-JrEsDEvm&2q$yQK!gF}FnY98)5!Vrd_ z?TB=R!35AVI#`A0;Iqw=c#P3X+oK&J(-K(f4Oqnd>v195?EXIdQ>%CTM8}_oEBR)C zqzh!Y`Q?UpwybSUNgqs&{ecfU!+gOvAEd|bivT4TFTG+Ok;ZAXt1cQ7BSS?e^TY;8bw){>&c4eQ=nC(2MpkeB{Ng9S-O5vl{psFY?UL5#@J=pP-~W~h$W zv}Dr(L(pQ@uDIdI5CN5m|M%+Zy@Ms+btlAy_{3~*3m6>7+;M(On7HD3!N+cori`yB zdVjbLn+sf6k%?7EO^|H z?HFTm_%GHe!|wXY4rd}0C=+hnuoy<%4}hL!-Xjk?RAj%oq2Y0mDeoI+v)74!bHyiQ zjSxl{G&do=)+fomxDS<^se;8ONQCJGc#uBV8jb#ln#dFaKtvkHx6pi(M&o!1{OwKF zP|9j#F%4;Y5A3t2E-^9jPnorE|HbAKwbFOX?#Yw9EKYqogNSdj9ro5g;9s+o7Rbj7 zSq5gr*0sI)(>kw<0{DmCpN`ZUv}F+oh>RRds3C<~$uiC7cglbyrV*X6g>Ftnu#GjW z6J4irTg(?|zo76fcIT-kVB zguO@NlAl^SfA*7=pAoS-f5F;`KE6*KH&JvIBApw#1VsTDD=0!^1(9T`Vi!smEvN#b zWtf^Tqh;mMV8IAA#$PC@&=PcF;mBaZ%02d)Z~RM2(&Hm9Dq{6oTRTLvXH5DqVa2Y* z5M|WpQNzGzKBNJYN2d5X(dD|MMegj}CA^)+k_U*ppz|E z(i}TV^|T^GnU9pT2oQ9R_(tb6s1hkNNAkBX3^sbnaQA2394(g2 zPh3C4TK$HziIV^q!xu#K1x{FYiD|~nUV^WjPq<=tkN;q^=5;XNQ0?%7&90F+%-}GB&|g;PzP3`71z8PilrWxdutAi{_1cZ z=ZiM-S5~v{rKb}s-&9xp7myEgZx{J4yKkPm?}w%hTYRVF-`F`Z9z?4G8qTo`$-&(y zF?>w!trJXzVFy1HK#=f!P+OwM$q*$G$Me(3Tr~!x<-L_ef_7MgHbH2bnLYZvaK37F z?wG}bDM^t7XNbmK*#dq{`{ab2@k#0o;=wRW)C51@^3sYT-}77DF2&96JT2ln$*r8k zwA`1L=5rt5B2>J7q*ME%4iIRiXJP9Qye0lXJQEqR6OFL+=F@F5x~{~1nO`H0M5^>r z7A^w-uALnLp=h10@3JV7Qy{K$*>2Nj!%oRw~aiQigI5gHD=CQAUZA<5w z^4yN4zxoXwE~p7woZkG+7_5(7>^)mm3Bn`ImdZ`B8S-mBckew-)s$TMc9mnIv`MB(U(X`PHws;$ya5gjT)N}m%xoSAx zdG*6NiMzsG`*QiP?4)K7pIA9imGFRR2TEVM&rjqL;c3s1j>V1I)U*MO0DVXk&XB=s zXs+3uqve_vL^c65t`3%P!PID2GSW*32enyvi#N zR9W30);k3O)agv@I;Osn9%^sGk8^cg0G`i5rJjQD&@^l{mgo=vf*v8Mq0324ECa=0W7w}! zBh%TE=fOtNFSwo5YP^68*QF`@qVijJuYS|8V;6$tRavt0Am=fkM@^7N)ViIw3hT&8 znoq^nxVswN2N@cmUid_k%oIFQ-Xd;di8xI}0R7m_0pm~otD08Sz zrvt$Palgz^P<`doU4n4j$*~=baJ^yP^AB%`D@T$31|QAbMCKh5ByRt?!O<)iJ$E-$ z6fS>Zk(?StTl1SJs7#-!K@;+l>_5W^!HE8>o;c=Em;{$FBpga*j>*C%93s`L$yZbA zufo(v8V!!q*BpkyL8S;Jgxhs!4W{DCr zGfe+ZW?)Y9hU{SY*UmBkTd^0gp@9N*=w-jmnUC~xROuCw zZ1(w;e09k$ z1f(144D~5ZVg7E|=6o^K(niS*q@=^iZPk#Y`NCNnPj}|_{uO`aN8cZ3cSdJ?cWdJ| zx3-*LUuYj5n}YkDRu()xxlBdMUfjHCloi_tbwK&Dj>BZA*PMnxx8*|bgm6>f(#v=Y z_SCyjzeiPXSF;Y(o7?hjDB&Z+N4RK^?-r2^lTFIE$4O4p@l!|bLPG8XN?XUIU4+_S z^UNd05+SvmDfPNcC}#b{F^V}FXw;ac4s{ksEO58;J@onhhcmZsBi=oW*=gYx5ul26 z=yW;5ON+HKJxM)|-|j0tab`#=SZ6j(0b0Ie9h=odDvaIOv^+Hz)Fsj_1nVwf9g5v`i^CA%7N~IRylKK2{O;3rr4DPULls z>BMtrwVIZ$1vIXFBelVI(XubP${Oj)1o0mCc88@?-#EP$6H0Tkdea!M%eSiZvkq++ zdft^U*l^&euE@ehLs%@BvqQ)(ThIPD`2c0r<^NnR zq+o_4vgO9%*~p;d&otv9aE}Oq6+mbwF-x;*GXK72Pn4?U)d5FosviuWiSmQLpV@-9)==LQJMUg#6y!ht#Qot=>uw!Z{=%F2+3^4>LLUc&@iC&*h>BW zKQzNNn@KrOUy~XyAp8MCS}Xo}6aWJ4OTb*i{&I*+`=e^MA4H`)zeXIv&zdZEoEjN3 z`&N0eHf2zMvhm=&!*hj9MR{mm79h75{9 z@lJf<_ngzJig{-##SsVtSNJ1wQELATRC1}=F!zAs>ru_X9T5qdSv1R?B14ju;aTol zO{h9Ml4JhA5&7fOa4OI50$^>M9k0HqR)9i5tA%F`g%> z*{rJlHdFlK_n%t2VMENfOW@(TkY|@u*Hw}4dEEFoU}<8%*RhM3^WVZCq@bBYR5eXW zMYH7j;FIX-T~JB^_ruenKGD#DZ^mJ{0xh@Y(vN1!boy)n*1Sx%iIK6#-|qh#Fr7E5ntG22$nS0^cN)i%n+pfs=2&wn%AwYGX7}r?vY87tw z7Vmy>F$Z=XgbxAVCv%1#<-1hZ0%n|{ash_4%m+!^6x@GBhgLHKWetiV$W0SaSV6TQ zO{5~GKj2MSvz+ca!pa4ww`#%vE7ANjWz6^_j>q_<@h7pT@yc6Io2~Nkv-l~|A1a9` z)}3Ebn8|=I9s}VbkX)lSnw1rZhBV-F!z~!WFvk&2IPy_&XLP`^q58jmxc)~y4Y2dd z5!z~yCu_+Z^_vm`k&f1T{iDuBr;p({j!$3K)<|YwHjdkUo;p63D{PtxRWqFiV2|%r z$ffr79vSVzxPPmPN1PqlKbZHk3bLRVL?L^4(;b#WG--TWAkV>~nMef>B~FK?(L;Sf zw^kIxZ-IUnIePeGm&kYsDqrF9EnFmqMg?#N_5c zJ?1J;=MY4FMA8x&v46ppqixmrq!mv&Y+E0er~IzG-bL((!Ig?O z_=z(X%u-Pdnni^d7yuZv41M-ZGy3n0Sq)i~{fet;(s&I55I|v3BlLdXf3!E7qT>RLK6D#)?j2aS9lKPX|dsU*kD-I%IokHHcehC6;wLa6QA%uXIcF-Qt z37-cQ{l_^-Rfvjyn#I+;Te=YI>7wr6;KUpZ3GtJ(Qmp~`M{eo*c-Vha&(7$O8`yPW z!X8W#jDCzAq4v8805<}zs0(%Q$o?t)avxCZc=Z`ol!U8iKpM|exaxTYcY1|9|8Ox% z1mKv>SIKpTi>Ym^KR=bsz;3I_+zmWQ1|BKxI4!i<-CAn{vFhdjXrT?2luosjx3myx`3@p8E;a!_x#iYK7tl#FWa zQ6FjSlFQ2ddFHl7dpXeY2qkNS)fZf-ZC(%@p-6j$QQZkq)ziu_ML`nCJ=RQa@UgA- zQCGiECWby2-eq^vQh0G7tb;&IdO+MzR(*VKlecpJU_-k7iM_S)xo((B{{8YIUZcl4 zIMboGH1Zr6)c4XHY5PANsmmK4#~-`93#0O#yIL`~9bx1~E^L8NsninNNdLIugZ&Nn z*K6c%on&Kqc~r$2LxY0CDxU3HVHx~(YU|IjOe$6d z{71O9mgF=^s2hVZ-a`R7`T61D@-_)R!BCT6EUkK{O_qlmmAYB{JBM> zkc*PpJ}L0q^5sPnUVo2)?%J^?fy2(;md>AH#q@lo+@X#=ZDGl-J=sO5E)%tE9xRmz z)_jNGpeEKpAx(>bSxE^(goUK-YwGe-u<)AjutOhcEu0+*OvQdO8W_DeO_09M#(Crc zQORd1ZLK!@oNx{^A)MXSXQ~3^&XXl(i+PRe-LL#o$yYP7GYhsajL4c{Kk-t}f^~PX z7X~>g3#^Vxjk>#mZItoU9Xv(5Y`?>HrTtm{IlWEL5+4a#YK#n zfU6rcTHVI};8NiDQXp<-G2Kr*3#Oygt!Dl^C|*C3U>}Pqvq5TGlTdZ7!A!k9%sY(d zli#10>H}^Tpk^1p8ept>CiIBhS{o1&A~An_nUuQpLK5|gsW=f+?^f0?Z5ER@$z338$S}TYwe93}utS?pJ zROGCNYh-^7%#w8Y1?OU}*KY2_RP9Ql-S0%~r5qGQ;@U^d3m~2ydoNS*q@~3bRWO4k z;%_FMWO9UmNhH`cHcO>Cwph(f(BGOMrk)U{EL?Om{1O~zT#Fy2YB-qsHH=r~Fn~w1 zaF;B1S#9my?zs8APQ5b6IMW3Ny=dsQ^@0ZH=Bh7WZ9g<1S|08#K^7tDONT7vcafb} zZd#r>Pi@Ke%;7qOnl|oM4>w>07UW@zW8xQ*;0q6JB3HI{F2B0S_4Hv5*D~uYZthx- z;+R4i5Mro_w<5oF554ygQ0|IY0G$CfHQyw^7Vmd7*gMuJH*q%OQp}8JTWY7O!dM5& zhzm|-fuwpLJIBZR6-PN!xwl^^|jo#_kt3%X3>K`YZ~TeI7}L&75B zPKq%Kj`(Bk$blzad!as>GiZ^7dBlsGgk4(96j6D0hJ}CkLQXpm*DGC+I8AnJ-oZZg ztd4Y1PnWRohC)sStvA_T9bUu#1x*|PD#obf{SUp*(YB6 zVv5iE#e!)*clUCNeGru7n%O1)XwdKRuoOsoboAHm zp`*(wY%7?4Kl&~Y@kc0nvDH90UL|{nmx6Ix;!>8w^`f{bvYYQPvD8!iO?~}-AGgA3 zi&;_43};!DYHU&)hv9cuY#a5$GvGROKwnd&KJ=T`oHxB)<2QN1tKcjTNkl9-VY#9s zX3A|d?WgjBIdSW=Wji}!-Kd39{m#d=5Q??#r-z5;Tw40`2adutW z1XP~#SZm?km_@>=+2gPIZfmtRu{zh&F0vl&y38`r*5=c>T>Wdz*xf|`uCe@cA4+xx z1SekUS)UUc*)uQk!eXbFzzeTZ&$(UBS+HiGY|rhkfK)!> zdovCUM;#bE*e+J4Q#?eT7Lq*PWF|R(DB!L$&M4|J9J)yHI!8`7pu$NDWZF>_U}-8) z!T}t%4uvsOT$sj*SwqY2GuY>W0B3@PhfOtWbWb9AyXIx-cB1feb!PS+WAAOprdb#5 zlFlx9m)#(ZJg;A0=^mA%-!WP3oi|`=WwVEzdC6IPZ`;p#9is}pLI7^3aCWY{tjW}V z1JDAfORN5<6>QRY2DL=vVu;hSK1k+T?Xl}(dqD}#tj9{aZ_8MIz1qMDJRc%~T}F&2 zK}Qc%`pCDsmHc(IbMYaLs9{TopkKpxVC$jdgShW*B}_I9o@EOLK$JKQ6X1Cb&)DN5{h>#p?;J@=clJIwEH+EDVB#|)BU^=APAINEO-kgoRYaUgP(16&y@VCIP*CdOIui2k|FYn#3^Nj3G72mpt z9us~m{RCSX!t%tZ9J2=(>yHt;gw0+7V z(0RA`x&Cj2tg+6S*WzmQO~{sy+|q*fY&)Cc{QdOW@ElVqcvSe1EFi>D>rwDqgz*vC zdsR^U&O`0tbKQxeMVq|)V7i9vfzy%?I2d_L&A$*r==m1 zgnZpA^{H~t)9VUzX#JQzM|eS#lZiT&kge+0cbbKSp}#YIPw({$ z;oA2T10sq zR&BJ5uScbuh$)ot&x0>N0=tiavE%IlYVlJlictBA&LdI(b`>*^W@d*IlBOJ?6B0EV zp0x|v>T#-sdFlar9o(1Bt_s*cs>2I9uc2qR>R)nAysOxZ#WAf3r|m2qi;Dx)nMvDS zKK$g?PtPp3Do!rKep2!(F%kDJX1BG#`f+%JxSGgv8J`1^^Lt8{k*} z{%__GP~C?x!LkwBLlNUSPwN>{`aOh45Y-8h*HJ4QOCQiCnF=s~zwjVM*R?dmXz8UA z>52;5th|Y@*RaX9OgsP7vc?U)#i1Z@3vmCqXM_Ip4_Y_v>ap5>T{YBP#b=h6^@!JY zL;1-YJQEP&pnGIQCc`6ZM9Z+M%cdshqBqj&(rbIaq-P5d)rL{T8~H1^a~z1AXduI1 zhsu9oz#(ri0D@_{JuvRG5!@Iwy`8LbNETKlN}Z=dCx4%)PokKX#=h@fTStz?F|cL#M4|Veckn3uF{$G=gCMYrni% zK&rh%Z3>G;&099!py@ytza=73DuFEqU}{Z7&}rcrfCKcTi?qm0G^qQ&jO67{g_Y{m zQm=}iA_~LE+p7X2oE{!(c_qD-u5f?TuBbREj*BjZT6l-o@uXR{T%*~%W}~Rgz6H~=asDi|v~)_|uFb&(zsZxYTGcJD zOLc0g2mmQC<3*xb+GBeJWs#oi0>rZ_kVK)$SQh*H|JZxdfL4s|-=o*0y9&e*J;+!h z%4vu7yy$^1(5VZWT{+ZH}$&|EcRlcj$n*LvvK%t(P=THh|y( zPN6V~K(|!sIT)>hXsEvqp#oO2Mh)y9v)$tT=>B88eoH}gqmBC^f%JELU{plg24>ND z`^5U6j6&t}Osw~;vT9Gga6P3ygIn_)(Df(GkRIPuROUe^1mYz9p*8D7@z1c1;K~uT z4%(Unam`foSQ@jC(>pN1^b<6kFJGb$!C0RoOao&^?Wjhz3tj8jbS;3Y&M)%6Gk8!k zhUjmOtl^(dl=2VRzQj4^)rISOj;X}U(b+-SvF@K*}Gs<1yzEZ%k-jn7e$enWT<{H(d()_BJ;1yuI@f@Fr z-dD7?04X65fLsddNyA6_5c9#(VWlfQBsFsO1RiEsu3!WPN?UFM6n8*Y44?&@$Z}KY z&;XV@fYjOmlCEH0bU>>zerkWa-*`&hp=?uQmoPgrpvuD>oI zq>b%p-!;~%BY92JMgFPYL159aBlzj~?V-8=Q3Q4X2UtoUAQ30Q2Z9OI-qnV))0%`) z39mgI&Q$cmW5_crG}FUUFF7P;4ert52Tq#zRrs!6wNFs~5KSy?3**EbHi_ z-ay>N^`ODplw-vpmv=aOp(!TtMrjaI|g#aHy*Qpf}(Jp@)@@%2CSG^$f*{DqzJR8~+jLA*tk$>he zPkfr$)X;kiWhmO6H**EWZ~SdTAFV4)uG0j)LD=8Dh2#Pr1gI*LxLLH@~F#H%H!Abg`*Mj+*) z!KEs0BTPRa$fl=l-8oA{fQBGFPlbgEo7EZRl4-2`%&HAbFJ*Qdm-i-qbd$xfT*(wZ z8!EnM8Xa*GQUf>Z*d5vJGxX}Bb2r%$E!OI=-1R{ah$mufi(}bzbp{leT=j4P)GjiZ z521B|tXNzbT!v(XiO*vYTOFdJ_pX5XjvOeE^ptA=TepJVUD>f!g3u&-X6<6EbY1Rv z!hxUxxkFIDx2EX7xYgiwce%mlmu02dtUPb~$_=cwL22KmWLy0LBvYY!zk`RSH3g_x z7u-+}5K}UQxHvdrMDoxPWh1tv%R&aAN<^S78y4B4FDyj!OaR*Ep$0Yl`DRpD`8M{fnE@|8b z7Kw11wx#?ZrYAJj`YMF~QMDef8BFC%z@atOI?eWpIE@TVwQfr-M^WyY?7vtyv>&lg z7vK&s{`o#-;8%X3mwSRzjnH#;L%tjj)&*HW(hkRpVuqKd*#6IVA!v|<1%zEX9-q=o z>jt4Ilp#Ps+Pe_215DpnP;0{bUqP9p@;}d3oa${46sP9h+NmG4W3lEQgtcrIfCqD^ z`0R&EB%LwPnpCnO^fNNFwd^+XOBNhlsBnVUf zG1JkMQ2opF@MiyUF15_|kYLsFv;5Px^;lK=UTpsohrYsD?ap->?Ms^bB%#YJ=l+T4M_urvbX{$$F}!oimh8qqC)d0gj{;9e175m5YcMQ^ez|DE?A3ZYEnSjD zgWu<9yLvM2$TzqK6bbzXFHtCCu``BwUCrVjW+8Jft%Hgo8^8e1>Z zF?}zDyblCF2-7SAm;sLdOc_$my6CQ5ZH6e!^;%_tGu>9yAR*Wj0i*fg1{(GmGBzTa zpaQ9(Z{e_`UX$lC_xr5b>?*vgBpyGd^JRl@oS4l%EK-m23 zzufp^NBO`9)C~HKBwt-YI!R+VBM1o^WQ1Y`cw#364GC#6G!fp_!uq!s`q*KmP$(nQHq|sMXlH~yFFMoipdXN4wyFWinyQ;z zrruA3-H$}F5ZRfSMN2^G0!ajq zgX$8ojHXK3Wmqv(lwd!M8td*05ow-xRG$`I>Tgf_@di!u9o+IQA zyy8cELS?JFa`W%#$|oHYufb*iu!Dz(3m_Y7uCd|6juYLt#&Zx&f$T}NiVilxXu?8* zd9e$=e5%NK1)QG7pq&~wL25}LP%-6_f%hM&{dnH~OM2X~YsS9(@TEJuVeLG_V;xi3 zh84SszUIh_YPB$LV#p2yX-s4Qx5g?$#%Y(UdJMjzVC6eV45@bN6*(ZZ3!=JNWGdVY z!IhI@@+O(fPT|VTVa3!NdK*Y2NEj%-%+u^~G4B5_r!X{$y&P@#L};X5BfsVM(21Qd z#`pg8CoJg1>5Xq>m-;q+FfP(Fh~_Y3ZZDWt87KT$G5yx+)dr_+1swV9Ml<}ofl$Fl8l%H>ZqUZXAf_37DnG0<=e=WWu_jP$ug`E}hVTHV?OH%J5QA=9Y zVH43BU1ms*&cMJlbTl@Y3iXpZ-w$QebhJ+hN->`1ZVkg;F09uXM%^r^(&=m2ulXJwS-ja$a;$VU)lRhhDma+N&mRb4Dz7jilKS9?R{=X^YFJ)@Rmk zbSfJAWDZX*DLi(Jv0?vrCGYEZso8_O#!`1~Z?Gx)VcU(5BW#`KUFpgvqKDL@VtdrH z!r2ncS%R^6=C`cLI|>N>n$Clc->XqR+4*aqAF7z3l+b_8JZ#r>9?h*L66cYh3t#Xw zVQIW_&0DsI-YRDnGw0zmMb1keowQm26@dY|lI;e^VWrMnv^6V(6l{W*GG<>z2&YuD zVduZvs?+d3(YI@?>}m6g)1j;c{?zN~cXon~pCYh48`u7QUAtDtl>b05Gyf%*Juu{# zWV{&m=4m3(KP=mcAR)|a6PqUPYjE1fKy}uMgq1|PslOG;XrHs$UkG=YEsJKX)t-<}{SWkBL=^Tv-sc4O>rgC^3eU)vDwcp=wzf zITH_ja)q=igDq1_Lvprh!yS14a;mlmM2{!?fHaNsSHCJwAmj zn7@d*eh;ixAF;y@)j2>i$^2%KyUDg<_yOAVLK6lDdO5cshHRy&A~4MzsFgw;=w)!= zJ-r5?h(>`BZrm-qTp-FM*8B6Wn|%;|nu9I99=G%AA?qmx2WJDI*tzZKil;v>{VC_{ z`EaLc;vze_bbxpBrYdh!zQPYQ)P8bNpSutL78a>4%nLM=i1YinJm*I(Z_qYsOIRr) zKU2-=JW;EMeL~InmzO{=o*-#dkU6f3l*5v;?8B0rMzwksDkjDK!cXS;!QtQ-8*aFv zG|B*399q%NfgTD+rf&e-k*Q?*(x@P`{(ETi8P<+Zldo8*4^C~#(EA!K`9}k7h&K5H zLJfB;&e|it{+(AlD+QYc?RFrs|IM`!9W&WIV8f&G)VaTHb8xG#{eJA4eDld7$8s!_ zdA(!0Xj7gbdoOk&p!#A7?zZi|g5L>1%SFdq0ztN%+S0)cKu6r6ak78BE}{Zm7eO@r zh=#1vRIvW|iMnqU(@?@;Ps6B^Lae4$LKF3>5|w1lo-UR_swZ3%PgAh8(L?6sK%cQ+ zDW9WKnSj{`(JP)4S=TtICH=jk>%AvF16*O}jZwdsQ6(x}1;DWeg2PJTyylKI`TdXs z$}PG0?Zc+a;<;OSoWbL#xMP;xaB+1ToWk^e2W=4f*lkBk!epB0$i1{PYfYZ|_txQo zl3lq{#%q1t6R*LGF0}`U*y!83uF1KPucg6&8w?~fD$Fyy4vTj_QTV|Ie`~s(1bYDR z?%+pIlA}?3XoN%t?B)b2L5saoaB%y92PX{|!__*d>g|q`h!$)So_n~9Fz)9f+^78j zKo-*E;?LY z{mrlUkAJ!-^=pecv)^q>y2`No?!_bX+kYw)9{hs6-x|vveBoknn>1|XGdg=VsgHYo zd*OtO?>iD}lWN!Xd)t_N3B_4hBP_$sFuM^s&3aEV<|TD3KjFV1d&0c9iX;mg=cHFs2DM!Q+cAjRNuJ$E+3^p55c|>i+lpP}oh#LHk zqHgy${2{@qsncO2oNi;1^l)od92WVQnW_(46PTxPSjsh^vPw*7wAs_69@1QS;YJLl z{ClEf+dE{ckt_)MA_q5J3_Y+mZ=!tV{-z!?r{=n2_Y!@2^+lrk357P28DP?fdVINB z=YnLo1kOriXpy5z-Te*3LjbU-98_#|JrzWVXM;LpBFyK&u6T48X~WUnc@~!IA2y2y z=pw+YH|Lv*^op5+-(d-)6TO~SQ@y};9jFKksf<#L^&%`EzYSo4j&Ml1M5}u^3}zaF ztMvK`go5^9`w$aS$JvS?kQF_emr-Z#XL((sS>WUxafK5aab7)f8bdqTv+`M-bK2w7 zpdVJZMvjje-?*{nu;~JET9CMK`vq(z(E z%qv9+B@uS}{DluFE4B|`FxIjXieiBVxRktuG9rFvTqMB+q`71V8Tbe zb-Ahw+(iu({ltD$UxH%EJayCfXukRwW^u9|MLEKbaVNXaCt z&C|iclj%{CjAxo{Q#G({>fp2AZ9({#*G1R%giyhv`u>Rvv$@QL<->q+_{lB zhe74FFf)A6of}DT*2{eEy*7Autm3Ecfuq$IS(FLtykbjTlwqI6z=7g$ER?3xrgTPh znFLdcQAledXjtrb4@Y&Q$eHct1_;qhx0=aYW6l9sIAjN^`X3rY544S;p0^uAzAKxc zLo(bLO4c-n$ooX|FAY>VMX<_nd!``1akY+DI1AeE4(9?N2;P*ZJO(8v)F=v%UO^5V zqF~P!wukD*UTw0piomw8@-vOaMo~^IE&A` z`vo8Z4fl=i6hZIE*t!$m@tTbOQy%j#46qZWn2arY)&wvGohxKO>DW6HP1R#d126bg zRc@JN$M`$?+&7U`<8Qi`d{)?Ts>ySxtJ(Fg+vyE?_D;(@PX~+c%xPwKNM3~ad4*Rl zaEEK3l*VI{&gq&put%zf%236i{PexXhA>!SddG zw?2U|KGIKdXpDfLvZ%iS@;>KyYU^eoJmox=%co(=%vQzL4lrcYmhL5&>~&PP>kDZU z4I|E>GIPe83pqYQ3$$S85?Y{TYchpB8cj4!D+y3X{2%7tG_0wsYXk1^SZgVzt%?IG zwNLy?5}n+?MSO? zX!oq?I+FzjnYTv5v)gaaqIeuiS$Dv%jTsBG;akJ?YY5R0)-d!6dcR0rD*-xEbUB5tFyRPhk3U$`Nd*z7-KOpa7JPDP_-r_1i4bQDFyFg>Pq>uW>OB+LQC#clzz8 z=h$`uH(i|HU7NF&+~#hsT!Nj-*Nn(wRn0tg%F8go9iZT+!h`XMtB}>Ad>4X*4QZ43=42`_2J4`ESmXy<9IKk{ zxdP_Ba1y3)I~Pz*%p(wZAJZ-ze|=FBEvOz>W7ABN9sw#s>kS6eU3fvkY}DGyP>H^g zD%&QYJZL&X`2OH9{$k{D6pTEYnm8vn<*Y1#5w%xCH;LBLR?qtx2D>9XZj;@v^Ww-wnn;qXap5k|K0J;Uh zDvJpj2ewT?zwSh>qwZd5M)f?_rS`pCdln)w6QR4(-uycfE5Es81}yyQ+LjWcFFOTg z?<)DW*+rl4uCQX1nM=AAejZ&*x^@IozN5dHGM8IxsI-Cce*Q z2+&>XPvw1O+D6sUv+Ra!0r9g7DgOqhhF(q%f|V1Qg|vpk&1cq@??ZKBP?oDl^dSaY z#zpr>9znAh({X>d+D^^M+f`2Ae;Ft#9l|h*od-eO_w)&5YG&sCRLu^4G;Vja=;+}?!2V_NsBWky0OX>HUNTZe|;AsTHA{O7C3^t)|wu@kD-uw~A){|{HeMfhn zn$Bod6)2fs@p68XJ_;(D95L4#isXK^fRo$JfsgfP6nYLegC4Zd7XCGs@REDV7dm@VEpkxs~3%~9?xF@fFgWJ~3kq(xp z8MaNe^DU?2mgE$6qLvqeU9}S)_93KwWpMZmNgZ6sNmxUlMtRm3T^vo2Y5`6$tCyQQ z37*NYJ2m1m&V(FU)G?e+lIS3ZsGa~n+1 zYtD0`tc|h*+_5AO_02(b{;>h5@~E;Hb~Y_cL~AJVR^Y3 zwV%d`4I!o`4izN+<-$BvivTPnp(7^i5BTqoIhzQ7l%%4`1BCair000JmQ)^k#dJD1 z4_1}?pSzW_^A=XbLVoK5@;M}t{&hi+5ACU9^#Y3>*+EaUd!-i;?1XAv`PI{iMqpAKz9@~R84Cj zHD#5UBOq(PwsU*NRF8Rn+ju82OWWCg`B$(zZM3sU7VRuDqDyLGe7fy=S-?5>y=LRO zQ!2`5b5+rMfE?H^toMvR*vqPd%jRmK;q3lF{HsmX*akbqqN(1AaNW7mJma4pbTzF=PocjUf%{x zU|L#=e5joRnVWC9FahvNm!3qFktPwA4eQL7mbA`FqLfM}h;h@!WJZ@#n8Uc= z)McOrK$sT`XApGlY>7Z2P~=wGx~4>;+Ic2#F$|z_kkL2EPC1s_rmLiQBD#Iu1 zH+}`mG-w1I#ETHF-lES?*-)T=gY6pNp#{{n%uww}M;WzNu?sC=Il~*yhA@v%9pIe| z-j=+X8wE17N|6UU%mw+6XpJ`)mL4MlQlwCp!8~D0lJlZbSy4HvvfIDpHC?S+%=8?L z>(>bP1Qc~s#sNgbK!wG|OSoYjePA6o$s|X|_WMq3MpbjIu4)!^n?wD$l&G=Md<1pI zr;LM+pNR$A9neLOr0ALs;b#DFbLmAPUzg0->n43jh-uoNjM{_=P&`9bW{TLbLfHKE zXel%GVmF@j%nz51*+Fs&b6TkP!+{=W92)##`)%3vSyLH*>5x0g62?_>K}uV?#m0r) z`THbES^3j~L)E8v8uy^`nKcL9$qNMA{eG1|yCi^6B@SaSLbH=-h{oBFlk&uTIwy4_ zJEmTBpp#O7zo@Hhswv@M3sVgnX{;4P7$BL-EC8?Wd>YNt|Zj4BZhc)BAV2rM*$eRO1)jA|2K$DHEmZ@>3w zn>T|yXUU%?F6{T9H(FP4WSh^eAHnMc$BTMV&ZuxSVSQ(tlPA$@sq4}WMHXHVA7t#(#H+~xG zFHjx*0@Mr2XWPNQol(NKZhz;_;zUz0GHH89^TCuZZ{GL>rG;x&qNv51${FZoy1N&d zd!vr3WT>Ocp*yM=3x9ODj&rbSP%$!eQ7CWIi(YUN%zl(~K;|vX)Rp}c316#`z%m?3 zeG_pA(zX`uG3PR3E?kPrez@DW^L+aS2*`{j!*n(#biHptGCQoXblP0XG%3HI) zF47+QgMV!Kvbo}l1?Y33M2Hk)M(9S z=Ft#B`V)=PWvS1(e1G@Mv>n|lH9a+SMM0a`CX;r_FW?NB6kY+q=%*&JA0_ZV2H!&^ zl*Du@$fAWlIG+(^*04=&hVDj6E|(WVqY@T!4J;(_=UHfX04`Ty^kA76UOaJA{gv|2 zsd=JaGCT2FkI`pGcHL^TifrVkIJGbnO*=|0-mS1Xz)in=aDHSWW{dCjQ%w;}^#C`~ zHh_y=44^ZP(l>Zyz?(7hsTjd!q{Vaj#_lt<4J@F)|1a< z0XJ^=Fp=NFG0gkLu|-+t9NjQca!b7s4HJzb_sxw8+tod`yntIFeW$i&+kGNm6^{H< z77>bt2CNZkLfQod4q#8dE!G8zo5Wf3K5fIzjKP{jS(X5eHW&uZ~0-`H?F^t+h4c&|?ecvt?*TCID zT^_1}(clrz?`GH|Wa=3T>%xiJlTrS490#w54I8Sxy;g78ch!8sJl~4f92VR>H#qCb zt=-*V*+utfxzL}Lp#E$_u2kTYC7qvSzwiWaQUY^z zcM8bwfl8z8ko?}!ww0q!8^!L9+5f&pPr`>m{POKha7haBqeq$~C+qLvcQB$6dcE@a z+cbl27u?>VM>B%+7Jn2u$tbSpC%icM88w&2H}xk662);4*5Ar#78Xh z>QnSxkC##EE5b&qAO2m~!aey2wZi==&DLKv6Mq{59?RmYrxj4zXXLtA*CW_;(k>n) z;^6CmisW}Np7=e$%}`bd6AkONm(U6QLpID4oC*_-GrZ-QT$FCtZ^!v2nqKDN253fuI2QegfXyj@jz2BT%Q3p_P8GH@{2G@-4M=i^kOrbrnbOLUc zNKg<6gtnrU=*w}i@97L@(3gZ?f98yeDmC$Z&(tQe(REF4WvoX-Ec*nfE6liepEzXB z!6W$fYufDY=ZVn48k>BhZA78ODdTkQX)FJZb=_c1Lak=E?|8=&W{%;@0{s~33}92) z0QwKd;)b9jGkp(64_sWd=yiDG^KIXlrfS@zSZP;T}ZlQQ|V(--MuP4$CgXWbPn$9BNyfxD^>Gbgd_7)oClPC;2fdDuSP3WM^^FQw^!rW{b zzp5$2urP47RKZ8okI2uGZ?=kAf-+6o!-#K{Gut$;r&6ADzKyic9I53iqH-nW+1Do9bEq+YZwZ3>9BnK}HIil4 zfQt2hKs^dZiYeiUh>8ZyY;@gVUHi+>=Tn&Vq1y-mXvlnZA6bGjG!EuUA*q~Rn2SQU zx62isP+&3$1-AUY+!G|+jorc~l@Oc1*+9GL^Wk-ks^RdF zj57~EzO{YBmrruc;s#&^eQH9imhbedF7oQ_mY;7r3LB!FzhK7#Lx&kCe$Z}(Z@Zmt z_W#LjXl^m!RSKB|_6uTk8o(I+qtYNhJnBnhTM#bSqL>1I+XXdxGdPRYILg6KszqaW zP2Hv*d#HV3L&C|7X%Y5+n9Pb*Czx$7iC(_HVsbXWuO>Htcj2^id!v#9?@#Nx@4NDr z7Ni-?pK_<|*yL^~mQXhwaq8)7j#6n|toaOtj_P(VL{6kcVz8UhZCujutVgjFnkrLI zQ)L+N8FhUQ0;obKEYZVOG11WMmEMivuaDZf#QmcM;XSj@&0Eqa+cd5*s=8!ZOU&2q z9j5+smI(A?TRZ8oE&Byc_wAC4K>7jf(LbE)%hgw}fFz@%7W* zCgdoff#^9MXPCy*T#IbA2u>Lg*Y$FLa2*vvN`d?vXv}CLLAW2RGfB;e&Ryuzm;K@P zp0Dvqw)Ghn(K8xmW0u^w@DxU66UfE!#GS?G8+{bP*G-)!<|~ZC^0zMq`4oN+KWnd4 zKTM?Tg?dd~P&I47fCa$Hj-`85&@BPP_Bow0@^)@PO83bK-$Jv$NjheX4qc}`O;EIU z1egjXMoJV+Oki{Ceu_M&O-xL@Lw|Q}9TWPp7&y_H@U6EeTfJ{ox9P#PYj^!~E~Ewr zhAJh=TaT<=b@k4|1K;;Jgeds(w%qVlbM3enMdA;?xgV7@hda6Vjpo5#IEgHXCk-%x zN23P-!J_XUqyrAXA*OIr@1USe3!M-c3P)ZJC}w!+PMa=f(DOymF(-0}pyTMdeFrKa zR={%~7}_i_HjXySo&SETO1fmv&a39KxV1PlbK_)`-!K@bm$|^-2FEH+squdXcn2K# zS-KkzdD_rVOl<5eM{(|o)?hZG8#>WHGa70@&m2Mo(c`{Xl&}lY%{W2_@DUCg#Ck*O zL3G-(2&`^zcnP`F0=$+P9bar~wNG=5lkc-?{Fd&Y-I|}#LVLaCWAgS~@8Ve{ADjCq zal_e}kJJ3u!uT)7f_+f8&Cm;efO^4j2hiCeBxAtB3Dgr)I2eP*pTO9I4{dJ9q3D#0 za5L;~%i(<`*xKcPZ7B_(;Hr*L(%N^oY&{~G=6dZruFmx)V4yc9H(YHCq!78kWB*R4 zEfY5E&0oF!o?V`GfGK?1aJ6c$bb+DQ32MW`lnwT4t3%?#32BHfhFpLmclUW4JtsIid%aL#a7aT_4OIY?1LgsqIFaE4z1;A@^tINT!T19& zhw1`7YQr#fZF|ZNO>m)Gr$UL`aUg&-xTuaSEs3ki zNbI;5xODukaw4}qC(UQ3${)s+3yEGx`D*nkA7n)+Q3S1HVL6ekVFVM|Y z*b7MbU}!JsTX2w$kaj2pKzTfM7yOB=Uks{k*J-9Xj5YKjnP}b$MpJ7-&C=WJwnPEQjJQAm!#qoP45PehRAa@?-kO!JFgE6tH}MbT0Iq5a%_u6GYJw}jQs zubO|g=(kA7|AGb^_}0aiO`-0&OXDl$Nv!d(iTS<5??5>DCjIy4`@gyF=yHurxa#T8d`zEfs3F*) zEP2@0(2ZNehOeRb2ER7;YDP_c!G6mfMaH?-CW?v%qX3-~vnQ5{DY zH6xJWjuOc)>FismQ}k>Hnkqt<3V^+#QDtaqi=me?oAQ2Mpx5*L0bi_2G2eul@q26H zXP((Su^_0b&h2>K;>gN|NN(=46F$qV;l7K;PUo9lHKa$n>lri%;1h`iBJPypMUPN` zvp~vgT4XmrP830laT?;W_~m*OZlmB6jNB8YeLHR-%jMk) z&FIX!3S^e1Lo8Z;kG{Y_fXvn(z}B!>pFfnl|${v)}Sn9Z+yS3bdA+6LDeDo7Ft#J-AQp2G*8l}w`4rx3MU zBIE;ntQ$TAIKVJrMoofLgFbjkz$b%{Je1>~N0dMK+e$Qu-T^7lGYj<0ZtNTTEKjiT zk9+3r`1P38wD6k@na^3)w8}Eebzsk=eJ2+GK`!%lhPbSs!CLq73>H#&k{DFP7~L`5 zLP}-Q#KiiO!5xa&2ny@k^O{6#u&i6jWH)O`3z>kFkOL%%6LLWlh=B_lkpMA$VS+)9 z57~lkr#dPgLgn=UHJhRG^487L84pVQP-R9h4yaPY=b;a5tFr8+DXpPI7aAczA&Y<# z5r*kZ>7h#(4qot@CGnDTzlh&-N35Au}2o4xtMs$M8%I)J+^shY``IE>+AySrBIF1%4a#k9jE|0{hdWP+; z%BvBCweFQx1K3g5P0XwOByt|sUY|ZY88hh=s`ga}0bbJ0p^ee`kih7sy3OSAFKs`= zQo^hQof5!8BPedIE>R~+=qn0kvvldRZJ51|*?V^qvwF_UI?KFu_Bvj-25iAzC-MLz z#mOTjDSY`{nN*&4p#P>GUYsK7ev2M37Pcza&R-ud>Qwhzs2YL z*Y>nDexj8(PEHuT-aRNbfIrJ9IG-n#|8fo%F9oni-4GUK3V%C{m=GTd6Y?YXcWqn{ zQ<}W33=TSw%i+#fL|1MSbL`vYmaA1211q#GN%N(!WY7Q79zf%pXp&Y;=*W9CnnjI4 z^I0gj&{;-8dmOcn;m*^0q3cn9R0`6f7bVUdo5bu=j0D7+b3r!}saRj}Z=Nl)VO z<=&Ui^PW%5w=U}mve1aHI_T*s6xd6UQ!B8`>AA<2A-n|hqo?Q41TGrx%xFZ;>792qb&B9g$L1Qafb^y3 z+k%!Im2OH*TzcEY?2l`KioFLDH}OOVC90A}w@eF(-|+zak}dnbokpwitUbQvKEPr# z$*}27dEGRU$^-bOlFQJxWVEN@g{ z&L|Mt3M!N5N-lq|&GDT?A-Es*=W~lur;k)JA|1SDu2o6bTmZUS4H3SC>Tp;;5?UG` z=us`E6FmTG+fp_Rj@3c@aOhG1Ifk_~r5&x%TGDNQKI0~WX`u#j)X(O|O&M3+#ji>o z!!H=0Q}}$PsxC$Few=r}IEN>LW=0K6=~1?7$2)Ga^}>Mc|so zd6u|NudPP-YMZ-~cc^byRE*kKvI!4&zUR8?E2)gThcJ}YND}HhGR*NV@u95?n$Oth zD|W6<2!f>)IzMBEEs@p1@q5mxj)A7Cf`t$>bf0HKcK+g)Lj->MmV1t}BcvXT49Wp* z?gjEHf^MB?#v{7l{`(V01|_x>q{Nm=E3w@$MvyahMz6$%#hV8xHE$*Ryis2;DxFn* z8D0;OYmpw}{2e(9s6NoAeXG*(6H3`;>1d3=PK9ia_KFqMVu-65^795%p*IcXDhM-5 zq-zAEd*Y9jbUG@Uoy!9+nKC$Ffb8e@u;R{X|>YR?-=B zZSaX+t?e>9Vs4aG#+wfEZr?3_caQEW^|jk-o8cm()!OXX0eZDIkT;_2Nf^5vpS-*1 z_S(~l(UteysRG?rM1=INM^r@8_FI#7mQVMbftre*npuesz^eU8pLxNBM5l{-_jp~l z=xoRq%k?Xz8y^A*RfMSz4GsZ{tt*Bl7T%BfXS^6rj|0Lm!ITf~`QD$>veKw#jrG#4 zXKafrPM@Dz7qajN&iy7ygk_i|W-j=Nl&Fdu70-h8cGQ2T|mLkxS?+CH!3x3 zd$c}hfQ{uBNw&xRNwCqyt^E1_Y_9-(mmuf8{v<*XJpF^NtRj*WBE2`1Yas4t6~n>* zOf9tm{)_T1nNaa7%#U<)Y8TK@cQMCcMsH2GGIZtVMkM@(fa4+IH^|3G67B)eU7{DG zf##wkkfxcQ(_^hKQMdBG-q*Usa(l&p(m()Olk*?0YrZmDSG=u33RgFcxPlyHR*Nbf zW_Na?6odWY@DU$CR&?_`P;~0h2Sc=mj0i!-E`|o5iSFqa(42DM@pdqw1Z9AhE#0@- z)RDOPQq=eDn?Fr!^K;phbG%3O8sqBh4wrzRjm=Ek!DYz424)A3W|%(T~hcr4b2B1GIvtk7WxI6fIRmMU7P9Ezi}u3fP{Y1Ni>~91K~6s z^e>=t0zqzZ0xNaI1x$py)qFUttOC*)4TaM(zdefZ54;#Bawdvf{T>R|{QRebHn8z~ zUds3+zbqoVz`h%``jtjr!lW#t*ueBE!JTcN`*hy7e%5uUADKUvF!PnM~zEoIF&T16^a;0njK+9CY-#vTA7&8P3Dr6p7pv%#L_t8K24C#(Xx1kes zoG#HJ@S;-zk~%tB66lCiBGFDn$ZZ2x`^^aMc!yAxC}N+a8Qor%Gj{&$*@4Mj-}{_x zY|^T{HhGTz(09(!k0uhwnusi7U-y*bPWNZ|8#su zQq&(;5r3DRLvUGxT+f4lAv;G?CZQli^lCUbFJ$L9`Pa(7e!{u7GtMA8*T@h)Lb7xD z#3YNV`^Zo3qipT>GJ2>c%Q5K>D{4w1 zp$QJ8q)U2Pgi@;UtdS*BXDgvlmLs5*vqzJ}wWI{;RRrR|n8YJ@pfz607dtzI*3ev}*3c7SK1%1w?w_@n6P z6Vck{_s?D938|_987~G4(sW$m`bTzgwR?E__pFu)Hc`DX*qS82v?^OnCE_NJr@*~p zkX0dQ#~o@87PSa@shA{z^e^a|wj9#ihwP%MS^X++`pS7gew%n<(y)8efYZkh%K|n- z=YU38Dh&N}=!fx4q$JT6#}b+_W2AkOM$b$w`wAGnnah+y6O5vXw>XtQ8Ns7K7NF-E zc~N(SfaGKGiE27xGD4xp0wiJeSwF&t)4j*S-x#AlxAZ0lB&G5$@ob0fz zt6%JjlhcHs;(cdG^3$%tirMAzd+G)OHJy>~rjY%p?a~bBaMWb?OoXGx10j`e3+OO- z`tAZ(A1He1t9PP9Yme2X z-R;aWf5tb>aw}p^;#?!19II=-$LgHGJ`+&5*VR|G@5%1jzGUNGmztX=h#U~vbSG8S z$LhADo&W)!So=G8D}IYAO(~iXq^@%Zo;rV?R#U~kzXu)QMOS_6uaH-5cDUCZRt1X& z9NV6q?6=)@cQ$4xj_x;!?NY!M9-OqRnF3`h_ zGiy12ubl3=u-oXS$#_%)4W$pKX8g9{O+JS`5Ucd6EG?xdfL~NA`RfhIN^IcOU0B0Nxo#f8t{{7|snD$hKMnmXD&74$-hR0K%=QDs z-Y*|08^p#tMJujvP;5cGAvsM!ied-4u+6l^#Vw?&$lv#{YePlZpn|ik_kQY~2#-;& z5s`d(4QgFBkEKXqQg-~E0!hT#eND#)!JbGtZ0<%_gK4{D_l%b^OT8*h%K$W>`>xRW zt1Rr`KOS|OH{XB8k@(HELZ4+~XE3L!!pr*qGL!?b0ASC*)@Nq=yT^QGI`q;L*!vdW z`jD}mJ>!Zh+tJ)SZRD;}>7ZhFj~Dy)jA>kI8&!3R70;IlpIyOWt({U6eluVJ=w2m0 z8FX0YmHVZie+ysJPiYgdfV>Q!ZA`WcSInw(ITE^zPeVD{cR zHUYmTzxrUTY5C+opkj`}wZzAb)sd2u4fAxfI#82MdXeBL<-(*n<34)bHaLf8hR@Q=MF81i&|rpq|=ith$i;=Mzks4+%W6W|P}>il^yS=uCHh zL8d#Q0+Dku4|R7_=EeHtKOf-jIPrLeEbJXwSkBj>+aqGl@&T4BB0(EzGh(z*Q^@V} z#ECxhyz<*CNvZIOoAEL>u@WHETnF^hK_Y6% zQ3n@}onp4B34-OdN8jYdal*fNe(&@-Y3}P&!`NH2{!7Z7ozr%^`}Ewm@eWR5cXkI{qgxLviLiNM0q6Ew8_&6Ey|4?>ZHHP?iV6K(S?Gk!8qOyS zr<9XR0&bX>_$>cluQ?y+bpMK}C?odI?9?Npj5wi9A~2_@vhKk)=hL z{eypJUPc8K%d;Kt=#;1M$1<~>q{y7?E!-n^w3|>fhzpOQz>Xj}L6A{38* zpG4{V@y{T^Zb-qJL6n48!r)Y*tcg>`43~RAL#I;T(1FVcCGUW)qH;=41s(ne;6d~! zFQ>uCJYq-hwoirZW<+(r>#nO zb|)_>l~~trF-vgn95&EszqO8_zM}4z#is-zn$HRI(P%fVC9mrG%g2E610?<7F;jOQ zc%4AgM;VPvA1w}R$!7n!xn6!cZC`YhaZGE))_o5{2imfU$FU<=SP2>2)JG}u6m z;^&Rf>&9&qcBAK1v1bg#Cn|_YKFXWiiS&nP_TW+Av(T~V9;MMj>1k-s4u1)I#;%SY zls=rorXY4bXzzt5HVLm46#IUndJUVob#KCF`(vlv&;-8A2%Q_HdZUR!xPa3Q_-rDl z8^JG%3`p=HlQh0ik7)3+0J1dricMxQbCfr6PXh`tcZtNT@%C2`5PEB~n%ti<#?9rk4wskU8x*VXZl#@*UQ^ZnW7=oUz6VdYO!yr61=s3HBa zOBiJ@S|BeS*eWEp!Dk|aa$1-C5iZ)WX40p!z*AOA_3pR-T!1g!R%gRH{qWwBIs4a6 z^&33WDTU#vfAGC&Y}#PpThU@S5CQ1YohsK}F#p7ncgRJR!G*_CbR6{LtFE-UH1+X|+yL zr^E%p7RnK*S%~kQtM_9ARn%b(7r4T|Rc)TaM+89=b$Z`9gciAIQxDR1cwInQLjw}S zpNqbBU^E@{r6qH!IHb6m{qwzOa6hdLOvjyRLa+=_cIu zCodp!jx06jgxMc(dgr}Ts0_&SIt=wM3S}HDis7h(M??s$*OPu<+Ih)JFe5^b=C4+u zq2sw&Y?7OiX?4R~=fm5+aM-eMXIu|&f|-lmh2^{NTs&~i-*yU`8Ldg{ay@kK$;HU3 z)+W*Nd_}L>}p-Od!X21P}=M}lpr-#{l?J+ax-UlO zjvE*X&!0JCOOK?xj7m*)bM$UEsW8Fg%@aa=7j!zB%(ixS>An=1292sH>GV;lvmTuY zn&A`bzPPvxjO(b^K(Z-IZrEvHFs8FXL8&ysmkIZWk;9m7FffD4Q2^imAsKG~Ul|oT zr~fx56o!^_>fGNp8n6KL5tETX7ai^Xe;Z4{{@bGnLr)7&XscJ(LLc-0(AdH7&`c{_ z(?R+F%!I*9kB8)}=qlDQOmNciE*n37>+~qgRo#uZo_0B~yVIt6e3$!4NLak>SD%6% z4BTg62P6D{w}YX3pE`01U&ai!yEhEF(O(oxhtok{dazn7EbJHkvx(r=MJzuLM@5s7 zZ*}#@*Y6m2KIQskU4L8pzcn>5JjOsWK1Y{~hl6GVYQ0LhnI1+JCPq=_n;hOQF3}zw zC5R(h+r+Dl&lNa^BdRhXD{}QL)zpkA6h-ApPDbwr0c0M)L<9AP9S9o zc{+}AeP5u?pWg7zhL6P!|7Mah{c%;$%JiQx@)us|JJV??nsL`}}QS$y1 zP$22vgO~$NPy>Sd(nj+F>CqZg^%?wELUSW0e7L}&<#i8{N^!)$pz$%@x@YPI2{a8C}`JV&HG{_6oy915R@*e2YB5P>4|mBTg?1B*|54Rgh@|Z}DYqB=~Sp zK_PUbR;Qz;hmnT;!EOMr6*^y9j?v2c<-!|XHy_TMU36ZB3GwLcZ+C{u+axE?uXNyr zjc#f*YL{hJ*!n$8RUbFGanbYiu5CuegCTr{g?J$1(3N) z(Bg)J9etV_Bn~oro?PpQ7L`S(rd3B0jc+d7eE(fHzVX}TpeVy?vt#d{8>M*Rp%NFC zFS9Wii}QP|4WNY1jEj!GH0u<3s5oqip?m83(Mo*}v)AGU-{db9*q)6sr~)&H0Wnv1 zxHj)*k7}qtkSChPAF1}J;Ct9_nUy~w!2I4wb=_X6~KO(_X2y#!*g_YjA5tR^|k#`oVZ6V?d8p*l^@d#JFvJN{>Mx1z0QCkiM%{cH^nzX$KqBDx>F_q4c950I+;NGcq7qnr{(uhwy>;v|E=5x1m^A=*n35*dA8o5nlH|+Heej2=OW%Tk~ z)i)=n7KS!_%p~2}c=H7qme0|uhSk-E!NDx_kS}ZH{O{t6=Fjl-aa0_o!t*GCN%|_y zshY%YN^L@&TYPri!)&||r=T%~GbheCjF|z0IV;ML)g$PIXUACd`p-kQB+maP%l)O0 zouIw0JJqp&w;XBtI>OG#T791#kwblMme}tf?Tfvcy5fn&U|abZ@#Wmj(-cKk+*wFlvXV(h3*EROjF~TUTBlS&DB3jxLO^JWni?^Rsq( zcnmykr$pDkGSqy}-Kz03{vd2w{NM$B4&p^u+Z#$7F5^P??_6PkcLH^>N5abfe!1 zuoYRD|2~nLp%>N}j;_;@L60mZ<`E_rr=ZtIanvP-WACee`gL>Q7WVYq@J-_mEWOJY zPO4p*`DmfpJMIgQ!i>Dt?BdBCU!M8HWliN$i`ZaccX%YVXh8^P^R98!Cd%FTbV&i( ze8PwD?fa_`jY|}j5&p-hFAifFz@mU^UfRQ2)EsgRXg$3&t!J?suGA-86SWPDZ*0iN z8lhPbQkr=MV8f3?`R#@nDkI;s+jC8T{$b_-r}=kQzQW3<))m``E!IF3hXvOj-dmZ+Uc&fS&#N-ex}I52(#X7_b1)aLJ}FnS6^3Y0yae-;SX@M|gv7Fs8nR zId*)B-|EPF^ky6L0vbR7HM|RDQF}E(K{M~nvyS$+R58*)FSGeLQ&U=L?-3LS-iUy@X|mLk zk&&>TQUw~c93IN``pVXej0RA)ktq;WTW>>#@5lD0HS|)c?9{zUDnk9 zP?^U3uB>@i82Ez-ejX2+(tmZxkXQWqWeHkQH=2!&R2%XpNon=V`l)L`M6Rtp zM~h|Lf`OUIo*Tt&?)0>N{#zJ%u0taWE$ZrZc&wb4kz%_7q-m4TFy1S7ZY=o#IyDea zmHc=3r^Q9N!s%onI>}XZCix%2wP*44x$-!%L@X;RMWWe_jZJF|kUULUXENZ{eIbvY2 z>IqE{GQwc=^+S^cDlr-Fq&-|G{VLR+dPU5!2NE^3*vMzO73lt+0iCaJFyH=?G^hJ8 z(lfs$=yJ<^izW4X>vHD$78O{Rn;Wdlr7o6(waB{sQ9|IG3#o-{>Q*(9VFOBHrLN(O zNQ+eFc*Lk6RBeYWUYkRHi^jd7cDy4Dw3EZ_rLC!thD@2$0@_wJUiXSxZm^0vSwQL} zOOzKYdaEfyz|_w0uOvRighE{FbjY@V+=_B)HkG~iy9Mi-=aV*rNNDC&XHX1N6i_P> zx%jF5-w{`OB|1$gQ=5F#*%x^`Q^sfi5PhxZ-jtS?tj_;SM~gO0jU1V>j87ji$SJKv zeM9nIc|MAsG}s|{%JrA+h7p%;TDR-D$*9wjZ|>ymQc27)Cu${sFfz+)re)Brfd=*? zscxjgL@BcWrpu%i`42!+`Kr}oD?sHELJD!3>9q87_>zCcsOuu;_MSY*A35mZ!}BUB z>ia|9n*Gn_6>Zoo>!UJ3G^A6+$%B>mXnrsBlt9NrOl{JV3mq^^U5KRbdXSN46)vGr z06{YH4+#=e7hS*MToB<{Tu?3JZ1%ego06w7?^i>k?0N%n62*QuTDNEz1YkH|z^qC_ zPypF;YdZ=RmH#u{9V}Bw;m2HLqb-lx!j}OJv224G2fF2$q(+gj?llmy*4c-HGj>bh zUHWQI{gS!(wV5DBx{4wArQoqF*-lt(W3={kWUKgsyUw-3R4X$1mS2j0jI>m!;UJZP zDN_#)>VrgTFn$L1nXlsi13ex_J?4D5U9iuI8`;z?i0BInX4pz<)qOl=HstfqIo`n6 zr6b zqWGl<3d0O*GwdiO4*-Y56cwn|r>G1UwC}Q!)kz0y1WB~0miyI=*gm!4or$KVxm6)I zZ?5a^CL`uuxjyQb?C~q#H)h`rAh)uYgqp0LAvt-o(e=Q++^^?}vp;eF!;Fzat-_hY zUz6h#GokPz1m&yMi`$sz04M7jI#?(_>Cm>^HrHxmrOxnnzI}A-2A_yYML!$Pmv7&N z=*qf&F0QUBA^OqY%TQ^PTN~>fn^7{IO?yGUBKonj!2Jo%6MVT1D74dfic6f((mvE9 z{@gW&N5vFbi?zvi+};cH(=Yc&{pb>+vIFKG$J@wP;$so7VM>0P<*D6=`Kh&mXFCQL z54xDm)#PgET5-diBOnh=?084NOpIn3?^iUwAb5^FLs_Q#tGy?jI}k~yZARtRWn!FC zm`@SwH1MS?3SCyM&=q{;WwjOw{P3B(l>GG_f9@`{>i+|BAHCMke+ti=w==W8C}NHm z3icv*QRuW~&4JX_Npx=r2)^xdTlb=Vxs>5_;<8;gBkEPOg!T-Jjat2#&0AFhA=!P7mctBbBK=XJTu=Yb zD`GUqoQ`T;NmV@R? zQ3mvomI-1{2hEBLv}Of}KXC7$)W88Wuo<_@Po3Udxw42vg!QgP+6I*BWYeCQuU-i{ zNv+rJj-ks8x_AAu1qA=>LSpK_t0Y7@QD@v{3%MiaLVl$(u+;V41)ed^INB33yTxYh zR#!Srt$Jm%LGW~F8YqHQL>!;tIdCK?vP67=iP=Nx9CCzQNhfe>s3BjO66iV+u)pF^ z(TMg1u)Hdq11eY-m@DTtGK4(;Bft8Ft3ZHw-eqHD zLU(E)fel7wXDoIgwq%{8-x+~3f=-UC51|1Sv^deL((Wp?10wR&_Xy)hucWk+JuXs| zom%O6U|o_BOEdUs*^5sURAVTrK3U=Zi#M~R?a-QkD$vM zq1>PrSCdxuR?m(Rb%d;aWJTO(1}q?vH+F6bLnz$+*!SIXnK&1QJHdtCJX3Ws{&Sb_ zlQE|u7kX?7&NMl`cq*9dR#bSc@Stz{AmOCST<+3b~rf3Z?Rnej+Kl z7WVCml|?_bJ$q^tIg;u1^t%mjTJ*nezc6&sb=h}sFb{Z+%zu5c&py95=XrB~?3ZV4 zic9#a*4%XZn)7#Kp41m!TrQuP(0HePL!r7?@XS2knnAhlJMCDu#MG1Q8>sQ*GG?L% z982}}w-oD+xhV*Wzak|RW?@_D+G=OosE7lqh$F|Tf(bCqR^XN@2c&X=pR5O^r z8vFQi7Q2=BFz51!;Qnd+#p;LhC8cL=9#-Benk9SgU6aAs5cG*c(ynY@xO>V^YW0EU z%OkO|>eG(T$$(n!)vGPV|&iUi*OVy`g$1^P*a+C6KfgVW}8^E^KD*-=0CvF&dJxun`z z))YB)dFcH23fzu-HQ-6w$qI8$ zzRKu}KFU#3chITLnu^laHPnmofWE~5eQk!TWe%pQ`h2WnW7>E{xq!2lKGKJd3n*@v z`HfMhOJ7wCDW&wzIFFcXF>Fh;4rTakv<`f(^%t>D`P<)_mEF;rKE5Cj!@5mR+Fp8e z<@$H=0m>|9S`X%H6dVJr_GLZo^G_mWrC1duW03;QvVoTUnIY659@Ds=)7Ext#a9%o z0<2@YJq!mV4i2}lYVaqjw4j=cp%Z=_4MLN0<`d>V`*u48Q=oAxat0Vh&_-#jv2g3s z%CXyz#?RrfwHFScTD-v?HOa89{A8mQ^L~3t;d}>^Tb7OrY?gfl*D@D_;m68RpQ7i+*zmA`P%i2 zgK6UtrxT@nkMiSUe{;TbVmft;-`ni^S?Q9Ll@zOraYoj(coMx_7)ISHC4fyq<{MLA zQ*Ru53sa+IOF&U!utFHkV7ZhLf)ClYHhWN!?u31P*zU92OY9kAJu^k>m=Or(irjdj z0HPj-qTZ{bSSWpRDz0eSi}O_MrF1kl9|zGG7bxsC{2jl$gtYW=HE8m44#Lon=S z<(11Ljmf>Rj>6->-k47YlsL!bHQL?tKB_+Xj6E;+{myl4Qgo_2cSDQcn1U^Fj8%89 z&MsbhWMs*|nm26kp1Sp_2Cf}0mSU7|GTL(-H4)!*EUc<+Q22U!cn&E{;WXxl&ZkQ# zD6Bq*=&kJ6mYl_3gN<7S?>Z4vHS}*E1CB6dx=CCU4#NpgLsX5YTZa%<_ zJ04fg$~r}&OSF{Y%bZ8N`pl_*Li^J14~A3PloLeQ<&kR4jBIg5yYrUtQJ!D;g`(ms zzZU$&bTpluddU64${b5+PY^ThbNAmgJkVG5yxZ_i-kPaC%C2J7b&2(J>u-;R705tQ zWX$N@ebD=>I8a%51Vf-bb{M`a?86CE3LOC@kl{fDwA8F^m?yzad-AL2zzXr@aZw4` zJ1RZbi*Ytpwl&VYNZZ{e;W4zT00DD0gc0oxa>t2?KXv&5A5h#^=LhK=Q-F*_kF}b# zDbJ}4Rr>k&+GM5L`qu1=Qc)z`2&ZSa*nwenZp8o7-kXOtk#+B)ZQJVC(n@!$ zpd!$Ys0a~J0Ttqe6Cx@q%9K`AM1%;4$dqbF6p%PEBWXdv38FG75UNEnB14oZGDblN zbAS*+rmC|mfo_R>hUeVhxzBm-ZU4a{q-xjRYk1eY)?WMAcMc6rg&HZcI(rL#SMM43 zHw#)dRC$xhMu#CJi?Qd|!n5#WXjTx)M+Im-r`VF;R4b93=`gyp1tl`^+wa;CeTNK+ z$NEGOXP;OkMGJ8c>)YoWK)b1nHV`sWK>!!ldu1N`mA@6a0VP3%=SNt`owH>`bL z;P$+>z4_;JE8!0|Sb2W^+gXOs5-(r!`uqfGt+$(rzvS?G>9kDS@9GVf6eTA;?GEpQ zRqE84r)MeYkoo;(QIs+vrLmoks?VZ#r_~9eLS&$xmeeC_sNBOC2mG>4Z@B3DYuzl0 z8Xs_zz#HUCsw{VD$DmpH9L#Dq6I2e9bf-C}1nS_K6OAB}pU#^c1`nK10N(Er#8BSL zL;J-C|Eui;4xB--uY(Dg&cy-X2c$C={{dIZB^O%Oxv1rzNPN^Ms`tD~m=DXFA*SvcM{1#V> z%O9gj+t)ScF$q|0wj>bwq>FikWZ_FNAtOa$qJE%~CQwxvY>T~B2DBw;RWPi^O?o+> zD;RqnzoDGEq-`^E-}8>7ni?ccs-qQ{4!< zKOlw^fSv_Hxn3ONkSu~(6>)MULD;)w_h4p#fE*PcsL5)E!@9HYFTzq)n?)#6$a*j? zl%f{2!uiKYeioAU8^|oHi08+D=7F*jkeM3UtqFlz0rOEsG&43tIKRhWIFLhD!f;?; zAv?G=VWzb2$NBYu{=-Km`mdTS{-C`3l-and^LmZRF^{4{ zYzRU50JM6DYiYi?p;ZC*s{=)t{sL?+anJ_jRmNKZb)0xd$6|om4QF#b=BuZ!n8tNS z53X;1Dub`IF(|zp?!qwN1OTK)c3f8IT&!X=8%Q4u^ig|u&kW_{gLb-L^xT`o3Wi@X z$@YOijb`Z!YALFW#V_*GO0h%NOHLYFF2CVHpD6CD_kTm4Pi&$DG_Di-X+Epvoa|W` zBi(9%KJ((799o5SU=?r_cs1B-;3RGWIt@A=2&gvV6b=w2W|OAUGUxm-iq=G2kccD{ zp25AzDdQ<)>JK_rbN{RD)-;t?f_t@6uo<`E#V@+4zra3s`@o}VG0hizMJ{$$UG(X& z>BUBypbZ=j1vJk8F*|`S+TZPfr$zANj=_7r1KlYop8&cZ#CHHN3~&N1Wy7_ABCz2S z^NCfJYfw)b%ltNWMEWOEPB#zqA;Lx{>cxZk8i{M<)Z?c~ca^Y`@VXd?D*b3elGt_gm*VIgndWmb%f&$*R*v7@sRzPb!aE(biub(vj_7NNO}#I4b%)^Vn4(kMEux( z5>_a50GrjXkrc#2j=T-S3jAn-DW>YL0Vcg~w=OPNS@rVO(;D+ny{?mx++Wz;IvtdE zb0N-CEk-Z49fT1!uaE7j5DjqwHxY+BuGTrXyP{*+*UVYqF=QT4X&cQU_->2AcQ)s} zrgVa$d(<-amVmZbUI{xWQG?xzPGP~T39$6PVIcAAr~^rn`0WB@I2h?vpL1G zw-{lW)6u5F@Z+-S)DcYqAqQ*HKw}-~G^DF~*)(J7^2I^5nHbp6*3^pj&aCqc}D9>deTIF9majmCqpy^s~m z%1U-a+D~J_92>{B(2~kxW``DLbw|klcwx)u54Njw@}=xWW+T7CK|3jUy>DDXKDoR7 z_8D7E520jWPnptKn<()t)Q@8ltWp_LwnLYMuH0d3MAjtm&7l;sr7>pr@yN1^{xYxy z!_b<|Ap0T#&5?qjNU0XTCe~LPqMp$pR#pMyWAuAS(Y;tQaQ+;yt0ROh+5HwW3&U?A zO!b@&Wwu(JxGwRY1)1OTqh*t`puXsGce(x&ILppO{yi!UG2ATO*cJMF=fqPl+sQ9zm!?sG}F48$ZysI zXvbgRMKUDaB0Ri`V_yih40MIAN4QTHyY9z&^k}xS%aZweU*hg?1}j<`_4H=8hnHm6 z3#jc9;B+YPpOmRGGxJ$$dA}AvHm44sB;8NMqCnH9fw?naTcjKe61wg>vuwutpS?i4 zoo2qDC+xO+!>#DPW5CcFA-)6+hZ5LAwJ%JyO=H2tJn5v(o9(fA$PogMfH^{%0MJpu z5oVKoYhsPj^b4#Nc39sWxBwnh3kH7L$kvao93<;!q_h3|*2?S1x16C*7 z6mRTcrePeX6Luqf@D!M#zyY-qJ~YF+V3J!uQ#9OW`~>1AIyF z?tP?=RIG(zFa`GFI1H(2AuuNdaE1`_r&LWNSeeC}l8^nPs6Y#)p0=m#zbJw)FF>}% zl;5m{SrWjoISV{yF|Wsf(|B!c4u0OF4a&VJraj^5`F^{`KE#e2pm`Axbxsh3vVyRt zGXY}5o|RfxfGp!n@P{WZ(%3D9#jbYW_8~ZLvYwlG;PBO&I3!pyrwr0_D?bwD zwB7!lgT!)Ob1R2L`o*=?E8q$fw1XiE&ba{#HwzBC_MA85(g2>vrp`*{3Rp3C2juvq zf7TVqt<v?+_Qf_fI<@e#C@SUpOwklrsPd5zd%xl zsj!~+e9i~D=;ei?4M+7{@Mk2(Gmboc=NpsfJ6)%owv;qW3G>7-OTj9 zVr$=rBGbtQP&9gjB(S~Sin#eYRIb?6Tfc@0Z-27}k>HJcw`%EGAB$!=M%=3*2hz9> zKLG&@Apm3CNgf0(z+7a3jg#5EK#TzXu9SE(Ae~QM9@XHOWCo@CrfhVv(hz7QYiBG9 zA2h^lCI(Nb9R5XrhtjLU++Es>{evkrvxVS)d8Z|AugNfhEVaW7vsAns65`~nQJnIUKGLhja3mFI%_297Qj%4WSHL*bZ z;*hCyEv0N}dqgpWFSs0EXJ=f%4I!niA-g%6FV|1uyj_iLN+6flz7cy10sN1l|CyWn zr&k-+f2~=IkoY)$NbbsHa%V9DVGYa)WGfT9a2XB!HOzwtxoJNH5xWBy;-yA%KTNsn z8A6$=>S0QP*RO7mwB(xAYhb&P>+nzVo2wsSN;Y}-cqq{nwZ9F%IM}1%8X%08Qn~Q zwa|Esv^@ZyUVGnkYm{TtJ+Li7?azGiv4k79<8t$;U{?Z&=rlJ$sEulaeTb4EKlizV z$2gWBb`n}^;ne=6gO-G3ErWIi;OH!1Ce-?pCfgT_nJ&YZ(zjO{1dael1R!aFfjVQs zR!mcEum=8bsf_=KgO~i;y8fxJs4=RtXX~L_;>$gyZ_>0M5Z4MM-ghbW*`oD-mY|)o`1j8x#e?-jsA#){VG~a)mz6_8U3mc_?M2++^ zBM-yrx;-iNm?8jRiJ9FlF`(E-Mwt9EDK9SY?FBVuz_AXr?h0SMSx~eKG_EHuP(4>y zz<_6!8Cxxmxq42uy=TIYk)fu~l6roRknR$g2X+ZS{C*z8;~@Xwp^gTwfZu=9mmeZs zSU4QAwUU(8SV-_NY7g2-%S;Lp%dtrU;7RBM0j9Ne4PMx0jB%26(yc_m)j=eHPDfm2 zhUOsRjD!qTF;LfKkgi)(4-!Jh=W#wVZ@s6nY3P$-**LB1nReB`jF50VNaA6tta6szI4atUi<%L?ts|akGTi z$RUd)-JM<>dgpxfQdB|}RdtWh~&uVO!00}msD26iF+$uVyHQjZA| zfB~@(p5G!Rb_S@NOqX`R71j-6e`)FzY}G^#?*A`wG9N=DV9yy4ENQ}P7aEm(%W$Ww zyR9xi%jm+N#kl@`r;BY8GnY-6oU$nHle+gKD9bDSHThG7-u8SQ)j#-?lsww+F98-x z?;i_A<&mAjtJW7;A3WL&GO24+j=IuNhRkMZfXWXbG-w0TasS>l5HHJwrU^UI5et=7 zk;fRc93fPW1-?K!)>v^N7Ap6PUy!cHLMJMBui+j_v+JXYPZT@gDm=8S|KvK(Z2d~x5Pg5^^4nrv(3J9~xD+IIrFko{vpD`XU{SZ^iED+~6as{M zTYC}(?x>|@!rlbVFf1L*X?5%; zzrT5}%f`OXfc6J2whaqeE$C-0d%JA*C_SCv@Y)76cURBMRT*q&;>)*}*^}4At#4v{ z{4~@({1Bj|uBj40?Mo%ONlF|KsP$@LWD39mm_=#Al2Dj%5P)MId8AZivy7pz6Jmj` zmoKR|Sr`e9sx(WdW_lebFQgh}2jKTd?91vq>{dS`#W8S`d)NN6z~w3no_tOgb5E`@ zG||Kr8QZRz$OdOjTJ+$%22+2iHA?%9TlV~MFDTsX6gi_piKYlkrWkkjvOoq&)HuNH z;bDd){y$ugJ3G10P!$nKhe^qy5|CB_lTWCD!GU;KmWj#7-Y5YBsMs43k^3Wyy9own zu&LLTcwkfikhJ`W`|DSQ;XhK+&y~fTB*Mw?$9I${RuL=JP>5a;Rkd~8Y6tR?5P7fjn|quwazu$T7t7h zlF#R|7j~*#)Y@g=w$ji+dj4xhu$F|rzAo*|>N*2s#%$^NubS;30?IVOATMZ~0|eT2 z*=!GICZK6#K1CVxiv#}`(wdBJ@}dY$(+CR5V@IKw!Jr>1*0Bd=h=Uy60jUH8;~j0@ zW6>8g^Hh+O()krD3&Yi8Dh@UE?o> zRTk)UkuCIRL|@xqVR&vGdvkvuqc?!xWqG6E;2a%;@IT!b2G3}G;_&PbuV)}(1*HKo zk&zUDx&wOVkAj^d{j~46+#{=+lc*2^Dj{k<^Q7|)I%Wzqfv3caiwBZxX0l;dzKrum zPRaaZA!+x2QMXK4kvPVhTVp10HHl!H;r@1EnwGV-_&;#{6*^BZiAAxX&ZFG z5zq!bT3l5#5IX?z>4>W|8>z zC%hvsc>BccUWP||qp7VCYMrR%x}!a;Fk(15*?fv{2b!xRS)JKOY|OwdlEPd}Bk}*6 zyy^2TGP1}37+FxhP>73hs?CYGaXjeN1FbWE;1jM2=Ojv7xR`T_0g1+{YIH<2G&Ds1 zvrv)04E*DoG?}|Jl@{YC>f$Ad5f_X|Vo-(ZkE^Y9$54o}CTdR;8?d`s)F$R}G}l;5 ztWH9Hy@Sko!jT?#10ofmrZqe>wjBqv&r-JGorP@neR~1y8j&&QcPYzu^K%OE!I(ls zbG4X{P5m<$u8I6<_T_BRk5jn5xmhg3`?KGg`=mkJJ%nsN3r1T%eR=}ZdpKy%6Z+r^ zkjF(*p!fEdy=(aqG2XMzXV#-H&lTBP69^XFX1U83>MYb&4OCx{d+jpl%UmNvBbn`) znVnTw>}6Nhgf&--22fra;vix`q>RK=`(eoNgPIZ!Xm@O$Ha|3xn+z-EV~;**s`{s zJLnjJ^@k(CNd|o@e&K@ZLp7C`#2l22Zdy|^O&#s)?*71P?Blb5AH*^!8G|`f3LFD) zmo$As2chN2p>{T8YYVcle}R$19>mBgZ&n0BLkIWkr5(7L?eV+-+{@Uk1l&rhsB0a2_eN3 zjI7NoK^^P!!<94!EKVZocFX3p({x1X6g<}_*`1uCYP!Csr<%?&30c(P`{WqgZ##t4 zHJu048yz=qA(p7%KhKEyb)}Z7i%N(l<&m3BIpqoAun@N(1njZBTA&0JTsmMl3J{0; zeqBc28U>-v%9?}|@u>6@Xg>5>Trsm5l+6s<+55j)-pvT0=Qe&G(Md>e2v2~UYMt&l z?x7wNqasqol&N0406ORf6HhcT#rMzg=Ll_oSu&UhLZ?X@t=q7=8;RSJk~I?X%S^DG zwhOLA#y zECQ%zW`8gfHhGvPis+qJ4=|?}_<~bcrEkf~f|iu};ffyJb<|KYAkKDP}i$O5s$?q)WDWVyXy z4PuYV*5%L_<+$H`BhaOrr=YwRbX1lO&!!J&xr|N*3wU7gDaU~gc#7G8lrxtBv>5Rv z{{AHcw>7A=Ucerako-I-pPUz{$N(@Q2T7|`9Y_eH5!j$IR5}1S4)58092A6dF#-Md z!&og^LCiR@F!(RVn>p8Y=VfQ&Q=HT(^Il6GQDWyQWB>;TDmIrE;0Qr9c68?j!SygftAPaI zf}}xh9|P5AgkZdy>+uHc)U&T6fiK@U)4Pcsf8?NEb$a^2xvz$}fE%Sok>j(jX%-ii zFZ&B?N6b3+)_LgpJOO53IE%vQd2gR1z{c(^(<6T=J3exSlwUq?P#FICfn-Sf?iJpS z_iMgK*xF@qQ|U$5XEq_EHCB6C$)*=dLkp=b#5z6T1GUKcq4bS~4sOj)K0N)r`IYaT zrlu$KW5pLUUB0DmeER5NnH~k}!U1K)Csr<|Mym%{_i}$&kfqH*jIZV7cjS z^vl5KfAi7eJi+lG=8a2!l74aF%=cWq>y5&KrXyZG@neFogyVdlWn}4Sbr0Ucio*wK zuKRR0;g-@;`l3jBTrmLJw80nVk!mtZ1Yip{mU8~~k~4VJs%=-sHZ7nxxl^Mz{HI!c zhVo_ZZ(-n9IxmX#)~nVP+LJPxS6q@rIv?8DxLT!?`F7BV6B0nxruY^vEk;~- zf4Zg27NK!Sks!yB0oAp5wWcUGt*3JDOS?IIX8g?44idf(60%vZPC=V#FJ6xGiNEcz zaNOVop$TG0EcCkR6wVnJLFolm(hL+S))Yc$6HzGr8?jZdqf2`fYMsYPOk9Zh_HW8E zDWP2d!69Mv$mG$GIN=&luyw`vmnOZ6-u*iTf^gR7AYE;8+RsDp>;6T}nxeJr9!2RH zK!P$4)gzHUQh3lJjdjZ%e882i0xVK_}2qEi)XQ{XALdz&^m{+o+f)E?LKhys_P2f3#UdU z>4^Os9a2!<+4H}?yZkn^?j=!wrkT-%ppSuD$GxxoX}EDkCQ9yelU;{W_TLuGWW^O- z)^fMk9$>mMD40+v^dHC?e7g3QTRoq?fAYR0{zA#KY?g)Yv!GH?+3{;rg6nyGaCpz) zoXD2QQ+&xKluO7I+1)QoTJW19kpo39f_<-AwzfXfQ`sDpyI}oKU2Wlwn?$Xz-hGZ~ z!WXtT4oTgh;Nm`kO)$$bij=2QhjnTC^6A6Jf?ernvsIti>HZvaV$rcZD$lRZw6@mD ze1mcC>T=$> zjAI9;e<(dHH|cGXW|ymAVgz8`)SWJihQ1#agPHit(&udaPz_+JVatx@zu0eY<#{|?zUDvH8og4D5!92mG%`K{<+*0 zi3AR|is;a8n)BwwuEYH;s|E4-Cp7U^BCR&jre?n>oaw_?9m@Z*7-#rr@>fKDnSMsv zo%s@vGe

jyoVtG>TbBN;Zf;<1&00Vym8E@!RlE<*%SF63_JaU)U^iP36AnMip;G z1C5Tm#7A882<3mZ@rGd6;7So*$Q8lJ4|Dj`TS!cpVU~GEr=GrirID`XT&u!!>`;wF zg095tmTh`~Lh?bxdCVx5(L)mEk#sS37cH&_MTL&rlNZ~((bNFDNw4M# zrb<2p_0ifrOdk(_UvY)V>I5hj2lm9h7;H=%KHRUajG%#bYvIvkT7_gzz-PD^+6w!n z06y~>O|O0CTyLV>@Tr{t;SgwW@M30x>|2d`UL1`& z;*FkKl&+#%fCiFtV8R`BHzMBm^}S53=Cr4^k|r;3Jo%S;2lg%hPlq*ww}{-o;RB&mPO4?4QGUub{VTd)g}YKbE&90Rkn4ma!vd zr3(316#8NdiyMlzq3!?5a(maTl5HTkVy4h^vzkLGP-zC>oy6J#H~(@w5vhN8M+CxGfeJ>eYE9 zlJU-l_0g;GLT#oF`jq9|TA&*?_WQ8zOu|k+{I|=G7pG|wW zLsJgujos|?(O6Q1==uCw;SUbANL1|}iXR4Vfrg5&V3^He@cGWw>z6)qA#8#^ROnPi}cU{!dE8o}XQcLHhp7hf&{ARFmKjVz)B54N1crYqe|6+BRn4u(GHuq>(3LB7&tw-c zXs5PKzi0KT(!$&H!uN6mt^@k2GdiUr@{}BB8sHM|R?m6T_M^r}3#a(p%dHO!5^ioP z`w+(obeMYmJ=x;Z!)oE1`}jRnZ(0Y_ zM?aAH`l~?`qpw5;wW z@a03NiH*vR1Lh>Fg0pQkmi!udw1XBxT+Z=uF`9p3)4_lULz3Uq*DkxD*QD#MaD-vo zvf||na3HPr^1db7;yszR zg_Uh$^@ChF7J&xk%YrOj<}8Koh{6qkQxb@k)i!lF(3HML91b=#25#q*Yt&Lx^ltDk z4IdFIM6#q6=SD8rahqSh+3-IESNx7|Rd35<7InydqVrzNlwftT!`DRqirA-hV^zCW z3GQID^-e2`-kf+%9fm{gfhono?b$bm4+Qo5Kah18T>SQ4_6b1M0?ITnKrv-1zLB=D z^EMQF*Lz<`;Jk5ZW2}qF`xaL%YSR|a+&cV?5wkBFW_){^1y@H?zznLUhWRuGg_i7s zv~LbPimYZ3u>`;~e$V}99>qIgdF z<_X5xI~6!aHOvT|vbo27u`2B`UYvnW=N0xDIdrYwx%h&d0#fTKnaK`VE-pIi8E6xw zU~*jS1%hJ-_6MI%zI$p%;#u9#oAve6e_HzV&ZPXzKYM2mmn`V2d}Z&Kb2fYiK^caN z=v!61!@nxF;(po7V7QEab;TB%Cl_WIv}xzeELc5|e)#>a2Q4dn4%g%je_K`{E6vyg zi|@77K{uwaZZmAjxk~JZI`(EWdwDfnobyHoePc+wR(N<_K6(67u$#8yQ*~QL3@BRo z@=FAml>ts_9o*_S?4#j-2(CO7?Zm$@e0-okcK@F8i2yhEjKZNcEB6QWi5*hI8X{HW zJL5P_j?VTDA1KckY+9b{@Sc<`H`}V+jwNi6vIPQe<~2>Blq7J?FPb%TC(MB^-6I?* zC)=0f(FKX3)oqhBbl<6U&f2g@E})>w*K2+`YhO)`c-wu)J^6hG*Qe>9ygQ-3jM1SP zueGZ#GpB{G;3o_;5=%c=azi{x`4j|3h*dMf7F{(d2f zz7JUa=B>+EpM$@J-Si*7wMHlHVuaH)yAj1^=$FWQf|;|R?x23?m{wbua$;$wQ0sNV zW2n`ZS6#fkPHkwDz##d=;DDYRQJ=)^lx80(fuFN>=#S)Nlt4keHiaZtUqR@ zSn!?XI3t+O(Ku-CiFgLU#}txX=SfMHj=l}cHmM{viECT5QaLBmSax3%Pn0d^oh= z=546%Bd)?MO<|J7eXI5H_(FE_x;eRP5vRTCQZR;ZV$HD&Na^_}optmIdF;@{8?@T@I6t1@Nbz31aE&=h+T%0sjaL6okKJ5sOJ-y0c;_eCwCo zkp6N#Nx~B|y*5HATd&w;Hyt4l%=oTMAoL8dsc@Np_HSPxwO4o;A7;dWQ$#@-igt%U zm5^3?$XMQdc6x2Q`;>EfaF$JeRA!#*G~L=-%Vpggznb(Ua@>M7XjTJGvA9IQGr}9i zzNr=YsY{M_BvN{VI5|hNDGBozoVQrg$d^l^px>jPz3Y=0u&%Js4T$UTMfvKA5KU#; zKCOj*YnoA-o0>+=1v}Qwg`(U2=Q79nWWT7|CYO7H$k1XBp!%P1w7J_9+7-Cx0mu&Z z7%U1LXGm9~^eHu_imo+(@Oe*w|M*H=dRIsGj?Oh$3r zwNqd0T|Mpai5bI}Nc^Lu-mmO7WXB(k703A%^*<1AC&kQ9FHNZ=v|9KMn3Jpvt6hAZ zf2W;EmTb})K6F$~?8OyusjEj}Z}Du+&ha-^;vVXDWZk%UL z(R8^aD1TrqwwzgT`AVACclSFLY!5;C;@-tiMLVNc__|s3RX82$zDFP$Wc9DUq|y8c zB~Z!OK@nd(0=ft-m?D+K_BR=d-R8wlO>;7s-#?j37-t^;#wGKe_3heDgcRjkbK!i& ziOr9z#8Z?0u`JRDF}lb{UvsJLNSvavvngC=bNbaqmIo|0?GtN>`xh(-J(m$Yb$rN@ zNI8>2!^g}3LMSlf9a5}3lT%#g;_yd4$1Sfk+~0k=0O!xkez{h3^+W$HV%ko*%n+lK z@^%TEU@)NATH)KQj#lGv#`pKmtBm=mJYejSsoKgizrWh)l04{vPQxoXIcx%F6h;u= z;^q9IR3q$dfa{cVYq~TeVpxLaXZ}f-%ERRn&8Q0G=OV*tmt3tBIUNR0)v+RF5y=wz z@#(U^@Oi0OUkOThpn&J~G1=H*8NGU<}W;JMDzTao9qR20(P zjs~M8d`~z%`hLBdoT}J}Jv){kj~k^#s55oHHzSc?Ld}Pu>$JTljU0S}uyCIq!TNVD zB4?UdA1OxrqT9vo58(3gijC7|NGG|;E1-DK_`4nw($g2UZItQZc}Rjsg|k7~DWY7> z3Lb_U#K$jjscNIq@pf{Nzu$ZP;?>ots2%Nl@Yc0AlUZR+KB^vjsLt2OBs;zD*P-3F zgYyIVpjZ8b7=l=&qYywaaDo~_W)?A{vSwEJue%Xv#7oF~zWW0r@$qQW@Hn$lZYeo@ zwiM2nJX)tnOi-N2`N)YZ*J_6{pZk4`frK#!+Ko4e{KvXKGpS3Gt5`ubVBZTXDlkZ; zSiO~nw|Cs`yiz#-1npebX)VSCNw8lDDRot0k;q0Kz~-Qv5{*%xmjuP0U>AC_$)~0A zIl5D)d7HQG5q0lsTuDT~kNeS3UMDzN6U)6<7 zPH|7zhHg-OH>PDJKEC|m&gyBT^Yi2cTU%0}Uls$|aYx}{-r=WG6A;OjOO!=(j8twI zcXC)xbrv@yxh~l^5+A25*IX+R$b~m-u7zU;ml`yRi6#p=zfYT58LF|*rlQcI%8#1^ zt#n%RXLU;R>-ln}X=nt*^)7WRf$RCU?j6C{(EVq}hRk7eu3TLqiUIS*FkGmBKA48MdM)5jwuxgUasn(9uV`&A9rDfV< zw?A>(!24x+uAK8?v?w3W!@O?ON1D1KKY*}0HK{)8F~TAz*49>oy7i9(#J$W`j%{at z)t(%=$p-r}+H+iQ`OjlX4^vxG9=5FZ33Qf&e5ekx)5IH- zyz&&qlBdm04`fbO67ZN@0_zva^9A!=JHx__8yvse&TeO_yY9^l{XtH$#mN3F0y|MObh<~q%}eq#3CXD2-@3jUfRH*`h|#UwBagg2uJK^%~y zwjo?j@jg|?(}*Y0q_?yA<6f-@*+~eIsA5%?%CNVGWY1#=6oNuybFxHXu4mhhvg4e zoqMi5IU+YLT7AA;b1%fRZZwt~?$)b$*sCva3oGB2Z4D*hJ3ko+oOGNsJCo1y8q)jK z(&hXZv71%Q#@HjpHWrP6($u!J4P>l6VQ_|dN1rTm&itsGe(=8E7FxNBzJok?XjI0Q zvmM!8(^o&{uA+bsWpN81zUkWFQrVGj_ueY$%8Pgst*)h-GupqV4H+aYI}Hb7kW8BgQih^iaBMq9gW9tX{Tzy5Up#Eu4MG$)mu$G~+6Q z6f4rIXBk&>#J8>omF{o+{YA}uxUYgUd2FY_p&HBF_v_^uf=p;}Y7E9eDAa%C9wE{R z&gr|m@A`g^bNG+^v^x8{kU>J%-5Eaepr=Ywox_dA0!qcV{gah^(99PEVOi$?>3s(M z?oMJyH}>D2xts8-oV~IU_oVwvaoUV5J4H^%1h$RYo1HjdE}8$P!R&heL(N{V{1Yfz zTT4l!CoTnt9?SV-t$xN~;N?dHGBx>R{g6io?~2&{B?$U_sYD-H&0r0W;Xa&E0=tez-yh+Z z@q0`e%Q?qT4iznXxH!9jWpCwd9OZc?(38_N&{0`xCck&HRbtTFiXn)Ev{{PR@-q>1 z3CgXqCVJRz+ajO}SglOb^hs-$6WP2y zp%mX3KK^hr|EEs=q&J_0rzrB9%zedOzf|tQAk`>h)E2K*o*7!Y;g5L}+p2G`(d^Uj zH4+uKEz#!?=gUbp7g3As7q?FuMN(n2cb+u z^;PT)G_@9~o>3$@u~rr0W3;Dqt7S zn<)}L6R7ARR3Ccrh(X%>gw&czeGBj}w^M&wnx8nSV)MHJx!q3qc>J{=rTLrl{L>19 zGbj_vY4beUrk~`mmB2cs^g^SzkdOp>q^mkw_Q(}-qbt#5ZbktxQlpS-s8Z9~&W+|| zb!O5H<$>gqxa^cTPRpVcdH9z(utD%yDtXzP6iOboh`HE)^^(s_0t52j+{cc(^(@`S zzW*|1%+lLbg4eUXb^CsmlWeZYA(D&HbBaSJM~T<&cEg;(L zN!>@gfRZ&`)Vg!o<(CqAXY+Pv76;46IAr`au$(C(GnX>o3D*>%G%>`$QRieUryodF z^)e;gUND|4p$%g7#adlVq3Diwxj?H{XG_V!wC(%t58_53(uxVY2`Uj0)V z>#~c5V3X1~x#8A=VAvgFAQW4xc+!zXNpI;*Ytuu;KO%py%1--UN9t~6-8|YN4|E*) z&fF4TETB|;+w-K%OFEq(2y0n6`%NQqwm@*37*gYPe2c$)HkjB$XZB06*^!phiUSUJ zw|$*a4Ffr&EYDDMx-PV%ZJW2V0zX?41Xn-YDi!U*Jdg*!> zrDnAVW#<7-l;cO>+hj+!(d5uOoKK+dh+nX3bP`eJ);Z_#XEtAyew8-8%BD#lK`bA@ zC#G=|#;xQ?&)odSY@?HqNQd<2z&Q%dT>eyOoS;5tsWr*#!}cRWsy<5H6TgpYpJ4aNV2j*Z!lfL$HN^tvicxr*36c#x z)@>}xv4O}rHymbHl_ALOO3Az`PECfk)5P{|+%e@8<>6rnWP+YLx*DSc6;FBuOLwi= zLA%z2>3eD7Tg3(pk>kzRV;^)LceRxd+sOh0ik5$~xZNLG#JQqafnx63e)RNf;(o~2 zzk0=zcKfwCtAmJEL#IWh6n#g^+D=pX`tLsEd665&@bpdARcx>HsED_fN?6^yP2nZ3 z2JfQ9Kk3t!{WUduz zH(FMMa??$z6n4&3EQQMm56^TyzD9tIQJ(sHiu3M}fyxuo%boysNEecI<~vsFGt z%OM{r8D;*E$J(0QxYobGTZf-jd*SyYYS+Fe&B>2#E-D%DZ~e4ps8{0P8z8|*Yy3SB z-i#&$!8a3MHyNV!Cp>$W1kK29O%gY(rdD-1I$bIFIHhoaCa*Ss;k{$|z@Y^p?z0qA zy3FSG72!+xnp?atrE!*YJ|yxW(oKYvMEmG+w%+w*#%?(i&T6E*wUwn=E-Oy65Odlh z6VHgv0}qWU>B-ZQkn<`R7vad*|)z@Dx zcP@$THn`}NZiAo7mi*Nj8)sCcE+;Lj0wx#CiZ=2zMX|!)quhy&Y4wv-e@f-$Ic+^t ztyz;LG&zc1Ud$xt%C{+`6lxX}VL5j*;v9u?2&rrD(DvK4m}J(JSFHW#)l7%llZ>Loj;ExVN?VM-sp+^iIqf0ON?4{rx=E>gdb*~5 z=6;;LGrUW#e*rokt@A@L_DG?+a~Xoo*=O&D5S3n?KJTTziu(*ct<<$!4b%^mo7Jt< zmv3HRjI70$vkRAQ&QV{hF!)1_;hew@lk%g*_O;XeJ)@?tX=B!0O_7A$2{39$nO}7% zBz&S2Y{cZSaMe>Cg$qhLc;@-#&h)iQ@~zgDEp!LPvir2WvSCVM&B{-|uc-Ou#7caA zs~kQ~6~$xtcr+n5Tb%Cg;_!A)<<0BU&>vyim8}VX>dd`(b}rcGJnXI(9A!sPq~Z(+ zs{IPXGwONgnm2_OGoCNJx?-|Qj$N-ALDIGK9cgZcag&nu7(Dif^kdtecsU7Ld?;r{=RvEH5CejkHY)|>CSBVKxl<412xmi9Mg1tF zShCgk#6+%VtbA|6L+EzQvyaA-xS@uPZ)R@Ff?PQrC`G9Z-w1R4>xi94AMoXnJSc)u zUwt*IWponZNKX&?>js6zVXo{ybeI^*7!X<9N1pC23Rn4%gO@W+1_O>E70m$6x8c&y zDwO4ai@1l8DSuNd{}yroGcV$Qi@4==5z2?c^LvepYIEC;;7q=_#9lGV; zKR?Ib9q>qMY%WxbtAML1PndJ?ok5oe|IO-Ey$yNKw*33AhyL?9pM!Bi{^Q~x{3Ho4Qq;LImm)2KG zkwbGk($CvxMi}SNxHfLQI|X+*aOk~8_<5>Vvv%?CC)ZaTod+nk@3xxm*qE~Z_jCUX DKUS{H literal 64363 zcmb@t2UOEr_bsZRA~sZ1L`qZ?R76C2jf#SZ5ETLGDoTgYLysM$Do8I85TaD+JyELE z0HL=C0YV^@BoNZ`IOiD7H^%?F_uX;d`wa#RcGg~duDRx3d+h|?-!;_TzvslBZQHi( z*SmG&{k|j~tj;{Q0YcPNPpvO%YdCuu5L5R*{(~JScnM$K>j?ir4#w zX##=ZS~||0#F1i)m;KTg78cgwKTgGV(VF@VjEq%Md*=*aV72_@l~r84e1(o5e;xG^ z7TfW7+cvtr-i>Py{6+~A*W9F4_wBsOQ-6|whK)Uk&#>JZ$oni5Olk%A*(wU`Ep8t%|3dF5OIMC)2E1Jfp$zOIM#kz%7 zG`IZuLI{>=c(m#7xvDs!z|Ob$n}Nt~@KZ?F;lKQCzJOW$$g=u2GT~TmG2rR?`=3E_ zxTZD6)TKloA)gj|=EtA;l?<;-7y{w>28%|%{Aaq&g2$6IXH4a{`Q`TIs0Fs;KcG~R z!`Rn+Pk~k+Cs=OdH}5jv?sfkI_l>xfVlxWi6kCden^wniA(yr~>gO$VI^M~%SF)#= z9`g8O9x1Mx<{;PQz`CTnLVf-YlJtj)m37(UGAQE|Rm3+|+Mh95%6$H^tlHyyekJk8}j*i?DI3i(QPs5O5gzh%qxhOaNP;Kf`+SGtoC zm$Bm3zJ;jd$4aGV2jL^!91qbiLd|&O6UqCE`~x!6|2SaYkDhN|e%HPHj=*XUsmaPx z`3D>GN%YsKYHPuPkzSTMO49C})Ks?hZ-fl$FU{$V7{Rz*R)!|tts*(*{^K`T9Ryb=EhO<0f_^ zfFSFPcDox+eiRg|&dsY~(L8}LVj(rBYIhY^%iX9O0e3*=>7Mqu&$H}rbAPx`2|lVi zASPyGVcw!O&Cdlk3YTBH$n<^yNrxk>6}h8m&JiG}+aRS>5jgW_y1r(*1VaPv48jt^ z3Jc*}0OrxDG4|2lX-4#>2iLjDC|+5}sab?v#?ceYQwF$-FpvJ#9fefvv8);~at+(s zvJcHC?#XxBRW#8RQnpNCv$Fb+c6p^wDS|-#@>?o7WHlnzJ0kY6-ev>@@jk*GCmb4( z8*k-(GU#dlEfzx!$2%7IVk@st=U&>JJ%3YrinkinZzmzIo<_ z`?<>%w=Ocm418N4IbJb4xE_3=-LolaVyg%sRGgb(eNDrBuO)`k(Fx60uDOh@96gB^ z6#u~TzUjMB>mhWFC~}Fb6QKa_fg?ahu0eM?%zD<4d!TfQ!(|F`<>7J#=_9ElHaEEv zR1TQ&&TLl5TCWwY$MT+@e8!c_5b{x(hNjtys()*$DfCm(ijZ9D)?!H z^~`m2zmtb}1lV({*x-+*rD+B(Hfgl(n-3ED*A}nWVYEyTEgJ_8aLFhwIt{PM!aD1B z4C`wA{6n%S!#gMK4J>M*o7_K?$E}(WI&2!(R_*9jF&E06{;k#PDGcYm9vT@TR_;pPs~nhXz^(et;`({& zPH#s#;~&E81h-5}na}t;A9c&u&Eek%{F|jr=~@Z3T8WYkX$C$jYO9cHW~$8v7(B4o z-B{5`Nz6#)VlG7mcJ1hgCQE&>X;0_MgKJ#q#jP%h)@7p!U`e&lH5krTz5-@8Zg(vk z=1t_rJM(hsA`pqe&64U^O|+^W57$F25J&3jK0vJl0tL7QGFx0~u$mH_61JTFQ)z%T z#I-GM)6s=O5nYXyr zlnqM2XGSDkLjI@DgZaD)qs?l~BDtk#{T+co{XcCvkpFirN1DB${HNGID}pKhMM*!* zR+ds03b}h_^UP9)pLJz59|-QY+RY`m8KK^v9yPx#Mi#9n_jL<$Q<2?z=5yNc^Yi}c zY?)wg6)}gFrb^eSt4my{J*~Ew_JmuQ6X-WGSSv>-MyjP@p?@IAQW7a;5zG04(wC7& zJGYh-FMS-)>tusSnt#3!!r|lU760+S7VFlHqbY=ljXStA{M-$*#7^x{R#*Q`W-~Q9 z|D-4(AoU?`bi?&RP2jGv2OBor#5apeX`E!>!wAc$7ONf2%9hW$;MV4I&xcTqQ&Kgd z_3dd}XPl;?g}Jf4p{==QscI=Ej9U!AYrpU}!X0%MQJ!m20`I67xeL^@cm4O4L+5|f za@74VS`Pacf84IDC#IKa%yxHQ>tGpi+o#Qr(KA)BDiTQP30i_Q8Vh|*JTY* zeZ3PmdGM{4!-gB$|4YkpKKRLntsZ|E;$t+iF4kGCWw&Iv)yE7Z#Zt&`KG+}*Q57m0|+ z<#vf;`q_V~>*n>=+hjvT?5+_{fsAVN!9Uv+vsjGGH>kZP)BTEQxXhK7dVOYTsXaI? z5tq0<4fcO>`|}(7FSoyQGK7;}<7A$`iwv)Gb5}a(n z-9!Z_Qp(xOuT)X1$#AI!?p0ZK{aqm;YMv1FYqE#{S6BPD_kOpKxTtt9O>6?=uOz3f z$~`^oU>nK&sq2%7Z_7^HvuniE!^^yH(1y^18-K$km(w~~Psu5?-G02HiEr%i(G%wn zU%$7Pci{Ec>u45-GyX^x6Vr1qWlyO1a?Pj!o}gn-Q{sMJY8>}rN`}G z%x>N-+UoK57x89I%+a3dlI?vnW+Nr|S6G#(zrPR51s%2|dNuJ8ZrQ7BmKNL!1BFihj-F&^Z)(um0oy_yX5q1+}?Qds! zKY4<=FGa}?p&K0349h<*iwLs+Mfb=wd^cB=?@gDr-zpNBW3&-ZzdP(KK%4xJ|ndb5NtY~j|AB^mZ z;a8sernGCz&(FfT6V7oTLq~o|wsR6({w4KcLM7UqhxDkMx|$ppmz~bV)GD-aoA$z? z9T;YG-nCKRhSxIi*YB;UhvTl8w5^?ZmcP>7hAO>Bx@)+V+OxGx1=0?~SUU{W!-+cq ztM7}cdavEb1YWpE&)3%eHxoa_b-orb4?8GbBcagrh{O89K_J5E>wZ|I( zwiaj(Fz*;S=Uh->em<+2VxICB>{GW?=8#y)1D+s>LbwqC8;`gPMs(_J8*}#Q$ zd{f+xf7800r$Up=7yBJj81`rj>6@Cc4z-lan3kgVk^&WSgM~8x;itKox6H~6YkJK~ zKm?DFn?m`KmOAv;Jz4f0ME@ho7N$>zhA1I40omq7KY2t*0k{>3{AJ&X^hIU{ zRggNW%)N@36NeLzaI8h4rrSV9&@YJ>Oz$m!;vBs4_I}M#Xn|De1~Q}oN!lQhivuUg zSS3-;5{m?Ar~5FwS;$t9(d^#1MT~@hondWWQyM8W{Kiq>`&g8$kET4Q3oQVP27Ap4 z`5Ef-9e3oR8I&o!bswuH!1cGa;yK!<=J{k785yUOnrqpx9_fz+}&&`$aEhF3yc^$Pvpc5C1W=vmX|pX zGGKPzK^IHc^O=p-FB^t%XgjPm96h&IyxfWug2>tUA~S-*WDNzUwfXzWqz(Pgt8<={ zp*+Jb$&Z2%w$77mqKk|>D-u@sEw;pa661WoTF(j&%@g|bWV{!2D(hH%!i z4@hX%njR{8u?J-2D%Av!#nv%H7j&d5T!IUdhhhR_uYe^vAuF@N8)8GqP!efPUP%jj z*nSrEw^L#N5Y-j+qKc$sXb(u61}zeqg*}lNORnlHjp6gIQC^*YOh~e%=(Y^e;*Yd$ z@YS6w?Io`3&)T&5(`qqaYuPmkMd+!$V~FKu0i>&D5N)Oxip4rZiapL=mW37>#Imqn zu_qMR;pmeH(`_gtC~3HqNX$sIjw&T>fJYK8t=~ZWM$iJT_;M0TLh5OD24ia^0Wcnj zqshh5UQ^AT=E^(mXIG572t&F4VKT|iG{smba)X3mL@i=${Z+5~KMkt7Q(KBkWrlj* zYVu&xo&o*RQXFZuaKNVct=bY5#8T8Biz$e)uN;fEp42}sW&@YIC5nNqk}OrPXvHU~ zn{~GV4C6VJBHPwQmLw+Fm*X@vL474a2dhl5!bolPDOju#e`g2m_HQ6!M@eN$Hxc}n zIKktWNk`-1!ooJD#n-M#Apxfz#;u=8;AknSW4?U-k{&U_$u>fsVVB}(P;UsNmGli} zRl)RrSTB(_qaq7knMtgVLo8CAHnUTo+_AorQ8xyXzkrCRi@1pxcFStpsY zI@&5SNe7#Q@ghTZUg9nAd#bfI_q4vjU9Jb^Dw0V6#06xniPhgmi?>-y8c#Uk=wAw2BPI3WV#6Yp$?zB{TOA*MI! z8#WfH$w*_d3ICg^EOXZ$7ym-419V^ znu@IVsRG1oLK7u6b5Zv&)7Lx5YJKQ&`ewOwdwM`~GJG7W;Gssa2(;zA+Dn#50M!Uy zKBYC3Y(^8XiL*%o@2CZInbK@^M>dQ6IKQSABJrY%1SjJ{dlj}gF-Y@cdY6&9C#TSW z46j!xXK)x)vZQ7fbkJrFqq#yWPIOUGLtHxMllYNyGPWd|7hKEojAZ5glNOx1O%`@B zcb=S*ZgUAu8^jKi!(rFF90yIte3r|Kz5Y_xif?(?VY*j7KL}f!(uUiJJ-A`MIM_A+8!$Vct$?LKN0_R} z(aQ#13ebjIjU6XBH4xlyepIx7?iiJ7Iaf*&g0$9(=h->*E09BwCFICO47SQf%xF+b zOavpsBd-djpD|=`kc5rL9{$jf_pZryPHd)LHWN7#7cU9&du`CWcCYVnLo5U)u5wAs zibo{`IE^pSJf*dMxU+);q!ha9dRV^ zF8@PY`z57UxGUKh@hMLfle+*(Xz6GKtp>kobu9UpX=ZY2|?tSS(y~9`}ewy=v!GW61s)GH+;`dQ#Y26yM;%+91mMWQ# zkqWY2Uj-et+`w$yW5ub!{qM8B@A8D)m-Ijih}k?e`250oO}9z~PF#e$2i-j!@r@{< z1+f_o;nB{xdmtiEOoqDfGFcR3YV-gxRwA&l!5^L|zQEZ@}{k1Xto>Y$u8X5ww$^y!AXhReRiWW~KUJZK2%a=cMVz>DCBa;-DIY0Sn zU5|~{6pBn?`(VS7qfm#va|x&QDvm+LAF`9`c`J?)vN%}dDC7EQ=> z&#_Kf78~c%p&b&*e#v&uS%6bDyKb;(nqAIg(U*@D-wX58aU!LPtmW3McBl1#41rksdt;dC4gxbvjP-;UC$?Vs5!2S8%<^8JU4oFf@ zW0xau2k1`dJ(}o-OJ)f8$OfT>LgZh(0%QkRb{-BnM*}c*@hDWX)Zuv8$j7gkMge&< zL?9)=l;LMUE(*Gcv}|e3A|4ghnK^iOo2|OVU>F-8;W419t97gn2cjC@!8vFs5<50# zKmr$`LNBy86*J*?48JeOwJEm#0P{|vwq{x}Gc9W5G0TDXTs~%~Wx~&V7JxZTGV9Ue zFv zg^iZu`FxC!bE~B+l{)w7tQM!gamBluMg2T?1Ur4%O^JOk+aPw;YKHTpsuRjq8-zst zz*M93<(@F4XT2rni@>{*LCaJFsBt90ZcT2QA5rdlae?u9^eA7^7Ka-~B}=E;o{ zLHmNzSVyEU0_hL5$6bI~n+76jZS*z>Pf3XGzL|@scer#x;opN#pGM0sz=^mR!|@B> zsmIrmVjFJ(PS4#n^9y?bL#QM;fJz8|pX3UWgP@?j3mM;; z7t7V%JpP%yh&xb>UWbhB=eJzHe4N61%CI^;@GOKm;D=>|{$N?I0?bSh@Js6{dM{_9 zzv*+{*Z`}t9l9Nt9Rba|ce6U85I{-Va{SCDXW_840paIBJ9qi6*;2D>_q(g7FN|O8 zx#n`9A=Ii;WHyfp%YI1&=e6lX=GUVsT6}1 z;~i^g+>q z$!89zVMa@<6=29C2{fW9wwUo^1vb5h<3Te=PDu^>o@u97e-DS0fgVqGX*JPO?P696 z5tp!7q6!nbOISrdB-nQ4Ti0>Z{QxIlAL#>wd#9sU%pnGkUxpSEY8_q)9K4w0Fcq4z z(@Gy04@_>Ye(NgXlfld|r*p(pA*Zj|Co4JcnHG^rvQ}m)hi#L45WnJW$1$0Ym?`Zh zP_NSLH>Kvkvf~CTK;8nZ9Bj9dS;;^sd*~}Ow47jm^K@_ez5U?cn1Q=xS7%~(x#S<7 z*Jf#*YlKxK4=B>f43u@8;UIKBe@A5~e`=OgeIe_!7y9YBwN;TPkA6V5zMcBiuUxJ(ih`bq)2D)^*lwV^IvTQ_~cs*OR|ci)3Kl5Q_iROg*Xo>pqGHI z1msA_S@&~*!t-4XK)d~+S*LJc&!3W8P3tZ-mMr&e$@U7`NR?=VxWq{-5!&-$9eZL5 zIeW;MphBnX7_uZmR^rA$k=AE=Fp*2VgIXq)yCI3~x}gl*kXn;#WM?j^2PpJc04 zKt(yo7nMV4K_44VeF6_x-~YwI4(oF;QR-<0_4e;}5?zbrVLp2HzxqDAa^s|&lX6=L*KAdcS$Z3ol|Rk zz?iu<5O4+Fc8tG&AHmXWr5(;Zc`CvD5xMdwkA6&bChBN#~)WUBzGg<=?`ss*F0opITR_ z=Tl;B>e0Se&miwv)2L?mWLZ%4bk#Jo)YR;^(paVVZzQT%jzA2l*DN_U{yS5rjWkty zRU~pRA`pZnyN*WI(@czD6#s4G2QW~bB|%35%mA2FmfY!d)vN64Z{-UKL3RB$`n1$< zN|!i$SdY8AKCv$=Z^LM&{5X%tFOGr~e;?JVx}`56EW~+>IoBqopZkUx5sThzVv-Z1 z_gV?=X&~NwxjP!=47HUCEQ{Sg%1=JZUYb*xU`Hblp8U=D&`|%|*K0L8-OG}g*|O{I zc0_OWZ}kFx#xa|i7ePI$WOd=Q&#BfF$_*Cgmx58%g7Rs&Y1oRrz0F)JCd~Bvt=7PGq9>cG4is^3M-ZCczh^=_dKFwOBtA4eH`HdN$APe`^O zZ`_RSr!tdv+=CHdya412L7F$zE0^#&1UhBdC1;Zo+5x+;;##xDvOi-}RSQQ8)W5?Y zgqSok$FbxpA<~5Rx)JR>t6&kfR=LwyG{=3TnvqMv?%V0^cuo&7@6aM`$)^a62faP* zv0*u0gt75sfC){a#a|UIb;d&IBx2YoMP-EDE?KR~$uc?&KXap$7@hCcL?fEZ{RD3Z zX9rEXQHz5vyG#UBS^mnQhIzT=xI+x(n{l1@|CXQl=7*)n-b@E@PWe7KO-pKR?U*Sr z1^OF$Sup(c$Vah$hFP_G2o1Q|U($=3b?L@?3XYG|Rkhs_eGySQ+$q=!Hd>n;xd2cN z{5)g&%xK7GtU|G{OZ_}%Nd?|fNszD1-T|5D<5Xg2&UKm|`z^7;xuvxhuwU(!Pu`D- zvb0Mu1?x(6T$0$ONwgmm={|)|3=UdRW;Cj)w};D>gfP?R_}3dxoE$fcoCyqeC}=@P z%hx}pC9GG@Z72w?n;sXZULMfe^M1XC@~ZaJ^7JlE0a*<#pE8k!`&!`PJD;Mv4ZhoQ za#OyCe5YVes23-EAYlhb_{qf7mgN!nSkbu9>RFC;cMGtFf4@gSzl~pzQsJ>u51;!X zM@J!d5pb`_UKg?D>xXk_?lRusm_yA~;ekpcArNQp&E8HfieC!<>2;=2-Qp&L_ugRa zuhAw$g(Gc4$!88lU>avYJK3|pXi8WTBw4vhimt0u`tUCvcfXlq5qi5$m}-H^%^tk| zcXyPf#-`JrsfRrsO@3P(yJAD%*Dh7TPrs&@S;HBr9h}hdmyGBKJfEpEKKtcdm{RkR z-Wz&10y)YXEfi64kzXZ_&@%o!)`2m&e1s;2st}sZK|VL?C$(LklvF*PF=T9?s*_Ib zFmWX?QYsQMZ|0X>qDPwFalOO2cC@Rs@zS_L=89Rr*4JIs#Wih6tyClRDzC89ReH~9 zXqut~hLgws;Rv;46U-(ZTgKok3L9$B{;EThnYONepFIX8N+qFdY<=B2w-{Xz0VU}(!h3b&Wy79r8^pL!@H%%KuqNhIyK8f4j55MH$?_CGbdW`P}h4W$pV-x~E` zr%#3CEC}Py)K7-D0KE%p<0PX$URK`A40k=Nf3+PtVVrlWezO(Yt3sQgyd1j!^2Pj+ zRjP&h?_)IGmMe2b{kOmpw9^S5pP~Ep0x7r-okd#~kdC5U*L@gZ|-gpJuJy(=-|9P9L5-YnrLrv?SkB#Yy}|K`(ZbAFD@+(AjhM z2oj-3*Io{##(cm_kXz_^7fF2B#1Oz&=`@&9jDUEt?yU0E$p!e_?sSnNS{p^A>VWxR zD=a*8UZb+|bX`1Sx|)(Te7I*Ht*G`Vy<|`L>%P#1eB;;%q_chUJN8%I*xUd-Fjb59KB)gRo(ae#^vq4-&n zAbzQjWAnq&xcsHE-%gRcrd5;kt`q1xHZ}%0&WzT9A zjTPU7b;e}}fQCLtjpf~!@%q&+4VFHzk8_Q;!DeF5eYV$H=*`PIvjR6SPX+}i_fo!D z^woF6z9S3re_Qf`4b~7@p_I1M9M|R-`sXXq& zSjZRNhGxQx`#>?j0OQh6*zdbDSyg@LIHTBh+^b~F^`-sMb@cl$lRL~t;T8)va6s?# z-y-K<9&*wgEoN^7e`TK4*KUW#vu{WCA5XwgGtatV6?(Ni0psH~`kP-+C7r{{5q}SofLLD!PmAHzGgxa{qvugFEE}1h z-UGlU&3L9e08}^=c;Tf zS?lllaYfN@&oUIJ0)4 zuH<8oVouKSkC@lI1x;wCGvlYTw;2F>sSpyaDs^t4e&7xx!yk2+EhFk+7%Wh+clOmy z9(+-{V?W~8$GJK7i@$4HG*EBV<(JrR_6GkufFM});mg%kE%V+;a%{%_rG zn;)4Rqw3VzAFX@uGJDfLZXv(lXZ$p$KoS&AO%c?z z=SzEYB=+D9{k0m?aD3wc(QRYStHU~kf*0&Ui196jgFisyLhk4r8bk=?ZjVNvYT9_b z%S`kl3Df|5!_i>P68HYN8^UY9xg~FA>o0q<%Sxus)b&o6361x>Rq!tBh`bR7lIjd-i|Hdp~qf1q&G zx^HKJai-UZsQFB^nlAfe_hXjj$Sp)o=E>aij447^O0seVa20vZXOqk;;z@u|Wk0ea zD&*@c&rZ(yoJ)^CUZtH}5spIU*`Fwu{q5&#%Q^0T*`}qna$1;A;ph=CmN!`Yamvd* z)mrBa9iIgJboN12{azf{Q?mX3xJ<46Zsqf%oNwP`@b)^PbtP9$ow7sKOg#8-Zr zZM*9X(k@h>7m%ov5nV`lk&bz2$uFhLk!s4&gxOcFFx!e0hUii?eb60@uST(}C;IY* zR22DFe%Oc77Jalkfyv3~2<+~QpNb6Uz!#8)U1OsHm4`NuRWA?Hv-zDvJ^>Z1AKy)9 zqDABdT{}Xr0bKA`yk z3rX?QcD!qzQ7QT>mpSDsGbgJSyJAsI+Y#`q=4daHPw0e=XQO@O_#4a8l~A(9yQf(8Yny#x z(V2>d9poLe$7HLFqkX49PNZk@AAvSC6{KL2d*wO65t0s_$)Y7my@kW&yO)FQ7@Z8M zvw9foML?`cMSURSp?0#5-^{+|aj&++y?fl&T@9()UqwjD%sF)X#^a`Qnr~|&+lt&W zD$4=dhZ5F_t<&XSj!KDos-nsO)Y}ovICBK(W~a*;SdLZ1yg@oHn~@UdFFuc#aYY*Oscs zuGc@fN@2>mzz}Gr7E8es0;+nTc~trNW)LqeW@H7n@pJs?uOP`DG%r<4!!nSs4n!S# zA~=-4s^F?Y^*)~rc!PuzFS`V3K+TJxF*GsMS3&vNe|aa$pW$ddxW&3PDwVx@pAN}{ zMA;9~L+wWzhjkHeoR8yfJY##NI+xde6UH8+fzdPrro{=r_;81;=Q`5=n4 zqM&W*tu+#2vQVPMcBt!(_4pxUAJb+x7VTOClwi-W2rfds8&elLbx1^c;#I^&z%Lse z%1%*~I1PUk!}!@Qbh^)%fM>e06#QiZUuZYBdo6@%l882)Kkqs z#qOga4zuOgTTdj$U>v%-h?Hic z!iEa;fx&iD?3{-J=-IIpsowxKWNs``G)bfFmJtX`7on)s8nl>4*iw)@jLCy`dBK`=Yy}tR|9|-5ZMn)(WBTL^fTbA`Q*2FbpvaD z=K0vG39I%C$w*AWWs?9rXUx6N%!hb@Qx3A0RP$O(Gug?*KGHgQmU+{j8GK3H zQGzxfgB1SN`hrIngoH!WLYHid5;0$4GH3PG3N+nnj`|%vQU~X^eioJPAwmQ?>Nv~* zji8D)JchqGW!Omc4Mx)j!3`&HTsbt$!^^KgIFPqK5&n*&cjbEYI@0LsXH)Dh(!hJx z5xV?YE%gA}##)hApOzL{^a&jZIOP%VrBOQxyyyE<_j`1%S2VELaD{5j=}PVILe$RpXhjoC%`%D~)gnX-jSkDhJ<6tm_^=uaNunTi9Jk z#Eu)=dnY+vG8L_qVLkKT+xB*KxIs|(#yc(`rYq{H-Ai)0=v=g`pav{BLaM7VC0Fsu zl>b%bvj|Dr=q@edn(17Q&Kw@t8`8;RO;@s@Cua0km_jE)Xm2pl#)8 zbK?gK^d@iEKiIjnmU8vr`raW}NzT2tp(j&M2xk68>F%_%Rz%LEKJZ4QDJZpW3_eBb zko9cJ*gqi>z7jess8X47jWOI8kDoI3#0gE97?eAgO47_%gHPlk506WK834bOEX32w z0SAVDjWHCi(H{<2s*+t9nwt$|fp^)s+XYUNreYYInBhOntO}veP#P^`4+S_pTMy(d zZK`|UP(wOJ`%F`nVG(0;6kzon{xKo+ivp<*DTk2N5X(s2_MfFbVjt$ufACGpkxc$gp)el90E6)7`5unl+b(NxXzVGDw}Xl^;P>`2MT4W4rYLx~FQ92;t0WN@a- zT;x|lqXqiyJE@;SL<90L%QDAPvd8U4r|FaXFo%^%N^^FJ2)`6`2D~WhKzacwz$tQOoJa4jb0WKZx069Y6zjIo^+tV+^)6LcQclK;*aIEI$6-I0Y0K<6 zSOGBB?mpAtPEs2hcsX{EQ}JCHb^4j4c-vTe>0jJ*{umq(tgaTGDUwJKNt5DSgU6b* ze~&C#*&xD2;st6!^05>-2T`G*y^0Q#_m_l0t7h*9kxE7xy$59G54>6IDGS0=ICX2^ za=$?zHoKVACm^FnQAv@nQekDnq=;$0ur~)QUI@(*%$#^LO#Nq$4Fikdl^-QE@B{*xDpWY|kRAbYPSOXVOd zD5a4pM4i^oGn?4hBeM4d(3MO{hC=%Y?e;+INtW;uI={kFi+#Lwg(y35k|R78k)qGI zm31{Yxy60-Biw33DNZ^u?l>#oDHj#BHb!wD&XDt;BXsM-e13}!7j5)m94EbvXt-`d z4^ZohP2iF`IKZ4opu+Q%Sxsy9*p)V8n|JY_7)p z2S-F$6HQhAQQprDY2m`;fS-Ykn~UQ#eKx`?i)ZSssW+u_9(CYiZh^PWEqY{GK}uD!XL)UBpoA9FpwyQ&G47Ky;D!LNYNKy`bc8xu3h67qaovabLnLQ)& z&HmTMUhv&)^P-pXf&tk{Wg)0LF2C***NA5sCxZhqTDsv&eE5~!KIzQX#NdI-U9*h| zYxaFwgCUG(u7$K-r-rWlt@Q3C#6K=U%T$rIw$xT1@DLa%%Mv4Q zO4ADc{;$oyRKBuyVFADXeo6N_IP-S_=*i@bjpV(TmS*$=Uz1YTIAx&5*(I&6r?e^` z`8vSR>_@HGV=4Fpv}@sb*qJm%Fil*4y*rC9*M0USS$i?lPK~AaGvQQ!GW6-IG`(`9T;r)wR7-=X@H!{FBk+$ zE*gZzR|4xi!U<)D!`CjzI^-Z%DW9Y5!u-U((*>;0fMaA8i*);>7Wqq~ze!os)u$~$ zA@X*ZIy0X}CYl_sS{_~Jq2cS$aFObu_0=-jG@bqB;G`0zdwwH3uIo+iutu=2kH#&` zq>Bx%G5CZODA3D5IS$#G@--70^k|Yq6!=?)4;C9o0RwuQ@13^Sx+MdVWiM1wx1h|9 z5&1>ks*<6OoHaCgZymrC=M)qBb-!5PMVTmvaH(Wf)L>z2;iOBwM0%Y1V3ui7#Wal` ze)}RbyYpeR<%r?_1RWYjaMLJFB8C5`xaRF|{Wl*jTvv>@gx({rHf?#rKBl)Zq^LdetMHQKVlm%Km05cS|o z&d5o9WJ;^yEana6I)^$uPE)gZ7p4f5ss!jp{!DRL3!Lyi={l<~l#h>C19dy{_{)bp z1kGX6W11KqOGMr452Ka)^~#5Djg&f(XUq9VtE?};7t{v%2ige(gB?~$iFfZAe+Bx3 zhX&lFI-k6pLwLt8_$`v$`Pw-cKL>fpeiPE?{)x%tBA81Ka()eIa7*4FlW{u{VX%>w z=O;pgR0@NDKqp2j(xN;YrC9DJ1;n=ACu-=ed-p*y4vUXASxXW zTzm(XA`6Yb*5*)uX!sr78CkGd^_YXY`wQ~f#TySFoQG;1j6i`O* zhS<@sL9b-Pl-+DoMV$wXNQLRlMC3)RhNdQGjrqiGDQmIkAr9;(T6$oKhh-A5K+8sK zH?@gpcUH(E=Vdob`F2Pb>g-Z0h>o3^Sb6f6RFmSX9}QnjK=8~!v(YkSJzm<{(B@9v z+&x2cz2T?IV6l=%B8|k2^q+Pbt`ex%Xc|JmWS8VTkDh{b@b$~gbFl0YZ#to3&1g)D zA;%G(z}Md5$WltX<1P)XG{|*lvYV89IVr)f&N4o<;$6wGozi?>P5|Rj-@U9>j2*C@ zk&#sh0|Lv0=&e$7co_zu!hp${L8a|{&MNB6dI<7A_j-Q{{YH(&c0N8}XaaN8J2*Y4 zqj2$8Ec+Zr->+?As^7k?a(q4D1*!g&Y&9;6o{j?3825f?L1xYt*{mzBSB^)Uydd#$ zk`l&^4T4d)z$mpn!?;n6D}qnN6OCAMhJ>Go+}&4={M#n>_T$~Q$Aok29CQ^{jr`(?!c~s5 zg10j)Q*_%st~TMW(i`^QEZ6(-^CxP`_eMdB6RwRR{p%d2qY7!{8?P~TE>=R~)5=m+ ziodz;Lr6eK2PS&cwZuQJVSyhiJFcDMq>yG?Nsefnl5HY(c%l{x?Efg8XJx3||Az98 za_#M;rTj0a$;O)@psR)88Csx8n`=5W>z3fXsb>MHd#}EQRmn*rt+CYJ4_VkIQ4_6qmv2ttr?d6_r3GP8QRV|{ zQ-Tpp%Eog9@@eH+(4!aV*d*cZJ;UxE46=QtK5m$46}YdMZ| z0zX-qkh8MRyheVtv6!n7)_<++mMZciU+=siVCPKGxy`ilK~JZjIR|iG72bR+#7kcs z8k9W$^~$5WM=61mFKKUD9vtF; zymxdrO(O2$`Zp>6^PtO&81i{_X0r=YsLl6OR{5r!JZ+=Ce1pA9)oN8Z#Ip(D?+2!d z=`lXR9{C*f1MnKnUoJGuBXd?6b?-L2FJgzvpNE?2udCaQpm&+jwPK;mceQ>;|CCtT z-c_}~$0$AF6DW9VUHfij&lBhw8tzKh^lU2mbd83_WHo3M5HuU%C(d3PL7Rqv6LDv8JMga#zC07)nv_7d` zwNC<6?bTZVP%IlY-Bfk7hqGXIa0~A^t)y3+hY}E1XF8^k#|(T=+78_zj+dP;b>O!` z^cM2QD3q&hImUHt9C7o?3Rx#EX}~fziFmGCTzg*|jy|lGdn`}(`Kvz6T~B6AAF}u%;MRZ`-D1fRyg<%Eo7RH|}-2^m&(KgmqpS>rt{( zq#k4QAJ8U}MzYfbk#{fMfZ%4!jQu~%T;`?Ll&<`Hit_eG2~ZnX z%@TvK^}O>QzGORh?$fN{4Od0)@%3cKzepC`G?Q=_bV<_(V%}=@ekqQ$D9_&tt(^SX z(XVrEKvPwJtV{kM(@x$!B{yw_6h?c(Pm*mfZ9DMiDU3tD&rz+016r{QjBJsrpTzCZ zZEm?GM~GPX=#i4Hk|)(hTU_MfuAz^Y23d%iCXg;#DP!C>X?u}!H-7H!y<3mXRbwN= zGZ&5=;wGfZ{F0_pG`ys$Dzx&Mk8k@XpvFz_JKXksyCHu+!lc*9$e*#z)E@eE;3?zr_xES2<>~Q3BNsdHzb^H~92pL(`2_A9oGE9lxp1fjr9W zo#2_h{dfGb@M@5ke{f9}S)Fwsf5@o}DJtG;=GX|8o+!zoB{o)Z&+?Rw1#-9II$DD@((oB&}j zGe&MbcTT9(;w1{VvcSfgxORlogj zzlArnE_WxPNm=CZteP!LHm@4*fcm}0K~Idfh&K|Q_?XnNvk{Ow4oPEI9oY#O2q27UU^$aR;8K5eq#LnGq{S=k2;DS zHV?PLa|m+ETI;1%Gsj|?x&OWamL*NMc=b5gOS$MG-P`c?hAaA3`kkBjj`j7oTECfg z3+c^2&s9jYPOGbJ#QtFDB|-~6L|E}4@Ab2&EE=@B$rb}~D@jh9;?_Ey0iL(bk{y;H z`&V`zqxU1q>HH?}6?)x24vhtuXl`CYQ0&O>2cdMK#N^ATBHbRr#r?z1q#jj;3HibO zKD=;b`HT3kwyVZLBn|jH=)Uek8C~Gh4QNOFp$OUav|(71`6{)DdWDVD35U-b_rF%l zB#Ks7)d_wnOdrrM8zX`$*`1*zK)Fz+K*>m3AUGcKgqcQS`kx@lhHmHccM zIUjrIsu-nTMtb1VaufgtiF|DE-Mg#DOk2oIBBIaC0AjZUF=!(#e5+Ln?^sv5aG_SufYWcXPPx{w0 zlaC(Hu0Lg(Q)lFzSr&fjB&LW;KSm7KF#WlLd?=~`o$Aw`dnkwaL&a~`>+2omQKT&R z>aWk|V1F$)BV>jb{6mH8X%s-*tRBg!)-o5&xU`w%Dr6lo}Sk#`BMyXEy}$Q`cor+MId2rHP`K)K6djQ>_ccr#-+s zK3d^@>u;9Lx=9?^x56#n*J78BwrLe*i7RLnxfG9I#^(?=ZN^0F>PPjkMMu;m&h98duM3ZQk1Oj z{-ODcRH0ThO?_Ag&>$6@Zgosb#HNu7Na^@j0+)F`^EGLOTtvHxuM+$k#}YoaWBW}S z3%CTv6!J(<(f;`EZ~%5hOn57SB02~RbwQO3dq>ld~G2C+n`bsXQ`R3KN*%k?E2j*!Sw$Z`E>gs_+; ztoP+5dU3Lv&m=2@L~2DOJqBPD&E$ygTR#L6ZHA3Tm{#~aIe3!<5EqC>F|Dj})KYmC z8;FsF699-Q@d~TN+@|3~`{CGWapPwXoUNw}37^iC7(F?t!m(Mu5ck&eS9bS~Y%LlOAB3J3!GVQ>V5(;WfQzILA=` zeF4i!l@n6FiP2#?TorWYnAW{z9EOk*^bHgNHG_D%e&oB^f^LLOhFkG0LBc23D^M1Gajt9xr@?Mvp@v&4wgoOHRsc>) zgGGsc@HD0CXOVhgA7yW!^A0>(PSMP9iE}Utm$jYl$WwBv61n<)n1-nr#vQk4fIVsG ze-jU9XI*U3a^mEFRNYZEBK36>Q=;NfPVupaf9`=YQAsaBA(0|_2(=S=oHmzw02Y?2Vm@J~ zlFcpG7kThG;?O6_BuI(W08-;}^|UO>zmLBD;s)!(;E+*Tn-5xIbbWQ)kZ>lV;3E^F z$EGnu%MQT58NitFaj)55yzc-O%a+SS=tm=yA09W3l1bX*A6vCs9y+^2j`R@`g2#*( zV=?vQqG6|a8|mbj*)-TmcG3I&`L+Y1o+7;o^HRUwe%!?|{8o;jq;E=^i*?#u%e$)csj89P!DqX|4u81) zW$@OhR?Sg!5m8A4h0^l8h4GzBLn1RPF}+GI=gF}50E-J(~78pPeqwAjd z_D#v4{A(tcN*_R+WT#3SNELSWje|mYG+bE8dntG4_2`S%i4xXlE+2;&bT&>Zh=t!@ z7#=+j1L@6%4f3~GpP<#6<_)&e^6Y}V!Hdp=xNhUZ$vVh7fZ|bO$2RSx@;9zgiQ@8>CGVRxe)un_h7A6 zg}{2CC*k`%&kp~*Y-SEqskdopPEO#I_lp|Z$=d$l_jj}YhStm@7&)E_2cN;xiB7D2 z#hQ!C9LCPcYDQ0W8Yb}-d5@yc&@T79gd84#EsbH8Bj!b;Rd1(Lt#Uuz%=`1r%C>h&Ne=dQ^_7^xvuyp||QlCNRXj3mb2$`re zch5K9P2;`rjWGo8(KKbfZdU(>f1oGn@nG^Y=Y|l{tizTzZwcD7t+KU6R z%aPBz>L6@aa+9ZAlh}Ik!-lS{>-D`pWv>h*|HvKQ6CPdgT^m8ss{)DW%Pl`8zU_HqR*u* zWGwE8Ku&mMjLtS+lLrFRT33}PN=wY(#A39rBTg9jHl|qCy~neozq%#HGgmWtWkXEH zVU=g5<+N%a#sGx*m?ORi`rb3}`}TC{Ii~bn6y0>brru)om=nIi2^iS`jC8BLAO2i=GcJQgdkV0@ciHQG{MSe) zV5GI@#?#J2?;6n6KJ6puG)!ZISh~mI)A&zO9wN6^UwGNICnwmqtyKoZEDhXRbehvJ z_-kNHHPpOi3pFibMheaUfhPRhWk_J2O6tLID?5d4lc|nj$E<~}Z3IZGC~A6U9+@#d zt?@X>p><@+z6UHj<&%^(82<$SxujqvcG#I>S`B@_l(-S>In#Y?1!6i3Iu4OukTEvd z<*8GO?Oq8TFxtRT#}-w!>i0XRi*x3hsvZbjop>ej=7catR#%3&37?uzPqWu%{Ms#p0Kv$F;iw&h8fUb$GvUh1L0W+mIq_r$ z^|)nwe`zmqYEEN+sjJLA!K&3W>Nzfb!(Bc}sRObS(2LwB3l|nb9d<2_)j?E;?)uz3 zK!ok;!`n!{4;WM^Uj_TIUH18ypR3^_*(}}$qr(uF;4$y$#KI3ucDPTb?+^(%`=n5i zljS?<&B?+T1sgRg%kAf^eM*Y5U}%~24;n~#?* z6Dp)sSIUL=L5;7G>ri+~e6Pb4NP;MF`Ch<}aqdo6nb>fY&jJc%(0 z%1$3`m0_$QZZj5Sx@cCy&f*VCW~_zar7#wq=rOe%b#nnU13iFr1g0BPI+&S!}bZimGNs8|JZW4Z3RzcWWDC=KuzehYB zZq7xmXOT?|tdSeMD`0Pv@>FGG8T0yJw`*$elJK=CZ46ZV6uE~JV$`mT$A)DjaNZ`* zzYmX+(&3add6KbtIsiW^HRL$T75b(3gc7AC=IfS$4Z=WOW-#L-P?X(j;im^ft zW92H!5%QR)c(jateZ}l$onOO_p-D4mCdu`U!RwlMQI!SE{uX2*2)Qq=TJ(*B?Jp<2 zR#kTCZ84$bvR26qia;4uMx+s9z2NY_;$djbkY(nU{`C8sN^SeywXAnaI$t(yJhx}x zODO!!?309owh6BOcbZnB_+p1A`<(9lIF0QwH?DANIHSax5<}w2rRXR4AY0rI`uP2W ztA76F-VJ@J-a2*FB_#-q#9g)xg=_jt%$_{W7&9nyi*`KeWqf8VQE2um4p4-T&}!s5 zy+gG6(c-ps-+n=jZOjH;7$zDuWuxfU-w4N~|K|NXCF`x>^uXKy_YeBio*&gj9zcpa zi>#ah!PKmNoD>vhAA5mDuytmugIQSJl2IHP;z^FtdA>z-PWTNgmgs_vFOIcqi@1Z3T;`oj%s5T!u`g(jWup>ukFGrfvX$81k16f|d(%zw z+(#19`$2sG9V(MIQdsu5@e668?04UURd>thl|7e+qV@j#5+;@U-Q2hCQ>KQz%0n{Y z8zzyYHL$&Oiyr>k*dN?xSnpUj_hbs0XK4MhvpDyj^k@C3xLe8GuZ zzHpfCN{cG?|A##XsbLh>Qc0P^8v$EU{|v;yu0AN@mbRht$pFR>dBlVvF0WiIop;Gj z?-i_9k2xZZ1;)5GM++5)Lg5Fd(GY1K;e_<6| zqdIu*zMO3YM(J9(UeCUwa4SqH*eY4ImFwtE-0>Z#RE_pM)Y2liY9AJO6i^2__)vxT zw5EKdD~u_HHfT{@7f8mvBSUrq-$yWDb@>G>V+my_I-_}OW6Krh&(n13gs&YJP&0%Fu$*prGWF!zTYE{lGtN$x z58L-FAzl`&c30jzdt9w$Pdg|FwO~U~Ii~a7ROC-9K^;{c0M@0-FUdHRuky(cST7JK zZnNd^qiVIKmup6|%YSZg%<>3rXOrrs0OAP|W0T!i-og&=+y4UI*v{?VG+=RF8u9FD zRU{07&{yN@DeG-_ru_4=lnZY=8=PQ&Z!NTg$Jsh@VJPXIBl2f?Vi7y8hY3x2Soh#q zb7&{rl)ary= zb2*;B+gLFEiKFScI*nNj9;7_FWTQ4J0kNCq(Nqq$!Tmf2m~vskyxcXFngune{;LC{ zETAdf28pZMphIN)b^0jb+AnOIdL;Jh-(Xq_{<5wWdTcp6U>s<;UVmaeWCbEicI-v? zAr%BrHM4{3#(qJPYfj519Fp`n?raZzF!4<6q;tQ9aoXX2g5hskegRbev@-PZuIWu4 z>BodnW&1JBdH*U#s)ytkl>Bl7O<>L(GW0%O|N-u&!fvhC{svZFIL$Ti-Mg;yv`ecYtHf_31lqOIhceOIPt9XA8E7 zf@!tZkzeW|!d2^e@|b!^s@nZi$r6ckS7q%7-`6}502g$FZ$v$ZFWsEJEbkJcN=Pg* z^52yZdpl^ll`rYuJaesH!Fu6tu3uaZ8;i6L)8xYYC_VV~U!uP<_cu%+^ z<#AQYQbk&G;3Rd^f#VKlrWpnWA${6D%r>7S9#%h=k2{sIPiQ;JYV5$(!ZVGfpM>d- zPNk@jo2SODt`zR-D>aJb1H@)1wEZT<>x_$JkyanZZW9x$gr-GoKwVH9^FNK~q#B7r z4$5BrWWB%z`kzSeVd!*CqZCcISa_9gF#Fg+Tf~c|qxSxg1l}w!#0mD**i2_kt$rELzAY zTKijD=!hs)Xd;W?#AwR3dKwJ0o2{$kULWt5?I+l+ph2K%mLSsDaaMKTMAb9@AzaAv zi}5RsB>7wN?;}4>$19WU`U*~@C&n_~!~1q0tiXM>eDh;NMI7a zqcgN3|0E?VXb&#WpoOazcD2vzQmcg z8+#Wv+f!N4o(|>Aua5rnmS^F(YvgBA|!8o6`($V-WAOC=3Kp7+F?3o&uzv|j?r*bC18RN&U+ z;iixMZ^#*Jb{xx*Z}E*tt?*&y|2kcvXmX4y5mwh1Oa)?}q{VY0IOKPA z>Z}NLIXD=5a2jEOJMM(%@4Lg@(*eJIuy;zR;ec9kH_1|8`>}tIW->q!96+)HhkUo)!5-*k@EOPl<3b@s~=jd3E8#(B@Nez0@#~+*Mv&y_d2*K0gev8d<&sp zklwI>pMdN~PEHoZNAmADcJmn%UV+r?!#tS&Mk;=k7qUNnXHFWT(_7+mwZAVqiD3=z zW^Y|0wC}QjnJ5_|$nGbrp)DGsJgkb94Qvf_;Oi!PxBG=ZFT@+oG720oFAUY1F~pUK zdy7wBf0uV}b|4pgiKvBGM`7Ts!Pv-{ei?+U0PmZc= zzj*k@Lw}Z_(=Poc&|x^Kh7ONS6d61E|N5_IJ_;_SU&9*4{u_1yT-8za5L1`;m$z*& zoqBc~g9+Mxsdst4^$xqA#^pTvsjxpHMCcm6s;bJ^JrI_Z({BphAL*KRPmn%o@~mqh z7Qv_VrgH-VhXYWx8C`gBZJcQ-ixC{CuJPB8*;FB&M&gC2{6@&gI*rR(h~25%sdfw> z{=o{cnk6*l8nIoiEQ4-8sD5&v?yNX$>E}fotSg#xO_Aa(^n<^{nYPB~1-Vo7F!qQ1E{&DCppvbjL zysYbty}i1__tbj_6DwIQ98-eny+8rY1K74qqX!f2XoYuE-W3M3!e^~X-`{HG^sQMI zQ-%-y^TA9K&_xuMM;?z|z-8~zU|og+T>x>-)Y$(AKQGa=w^Q`Z3yIpH4TggL=pRw6 zXla4loPedkWjn4Ev%t8#t49}w62~@PdF8I|^m@m8L6KHJlhG-!nC?H)ElRQ_O)3KYZL_v?(3HQjBUN|6^9Yp-P_w8ds)!*Z>X=; z#uv%IsT&K)3sUKqdaLp)t~4DMmz6fSe7y!YaOOkq7#z%i@krkCNLWK@0kISe>b?}P zc$~IoK43@|%}_Y9WV0F8S3aGZ!hWnV$6vw>>MpkWx#c>4)w^2a>fi_+ocSG!5Mz>Kl6iy5OFm z;>=$`>cxGrOZP9l>2Pz2E$mxXox>RPBIVYl!cGVgs!b<{jbb(x%00h3E6hEN&3SDr ziG0bPHI`X=lttaO+4Ld=licp9Z(}|TN;vFn#ZCUNXAK>N{_BPhIu}ETUgH`--HRVr zzNGPv(+qO!QlRMci6)CWj@i2VzI>vJ-$Y6s9--n`gS4YT?Pg$edtl~zNNu0|;*AQ2 z%^_Dcl%Iik2}T>BM}Q%9`H%xsBg^N*>_t6QAirg_3pSfN}u!STdNGDC8R&3z1O+; zzeP>NFK+fu2jmQ*!Rh}Bsms>a8dqN|9M6$J+X+fG!DL0Hs9Z4)}C$OTj3th^FFL!qni`6*+r`HPpXJ_ zC!19p_V3l8bZ_7P>f)(Hoo}7{d|tyYvpYFRS+C5m-~UJgra>=yaX|}1TEEE7W41tF z58+rAO#l-JpaG=8ocIuaxkic|m+0hoft=7faB*evxb3Q>i(%C(d+w<1UTX_ZflI|T zFB;IQ_+fMoFz+bgu+!KDtb`Stm;Cijk9#9nZ#vMY0D{P@Ia9_6D1 zdB;~Rr#;$_xvesH#`{+N9$b5p<1sR5$=iMaIA&{}>#fEtdzrDT8F)Z6vxFT<-kc-T z5oi9;4*(a=i+Yr&CVf$Rsh%5nhm2}>i~w7mwPievScpSsf46uS_*~V=u-KufcuWNC zgx~ugXx@SW(m3P)x_*lFMsl&)C9>&QAexh>0t_IO(35iHX|42tw&T-lHK>aMRi1qh zChdCm;u9$B>!*Hr0y`p_k|RZ%YB5;Dpo8gHhk5zg1rBa|?p3XS-GIuTVMZ;}f@oHKOqe)5<3 zxd|L8)gH`0bi@}Y(XG3VHhi!=ei!Z*#a6*9UQ{eK9 z4Z_qlAZpJlpE>q0NbaEG@b6p6_7TS%T&+=Yfi>QQ@@yK^&!Hw+r8{t9&z1O#AH;S? z8|?Y`w(~5<%N`~RN(px{dQQvt8hDwRR9IAL?0q69B4w8iO|h=2C&7-78)Cm?ofNg$ zA4t;(*Qdon;0o5Xv)I_mg!IGy^iNy44{f3pcP^9?RX|*)5{FN46h21>4Rqev$FxPL z8$JZjOWM0>pka9bWY2l~Dc}YEe1@-k7_4rvpOma~F?Q{?mK5j&zBS_4*e9bJv4y8M zV+|a)2a0K)n=`!eoD;q=p-AS?(+mbTR)t$PRwG&SPe+d^ddV3ytu1`hqv&V*o6cv1 zfLjj9nO?&`RJo5^BRMFzu@Ya|SOk?Y3#Abw9Qf)nr>@6kF(J1^znyj3KCl11vD&Zs zIrE5}$>}P>zR0+teuGZ1EKvK|{(2N?M{w-7$&Qj#`s<3#J6;xqEddw28W+B9G{%(6 zQNJ7eh12#BR7-u7f`U9b8@U}%$hzpskmuC&4#`5IpM0JmdG>f7=m+m*0w3kvqIqH@ zHhp`(4k%OEc~aR@P+{6=fL%@E8}HfKlRmdr|*m@qb^81 zrmc>r`APIvM}UitDt62Yp1sFM!ai%}fH{g*RB3-CzW349`|8%L6rJZ^03KG6W&TME zbA&t=$=jl9WG2^6pT-@NN2}(?4%06-Uk4B)TUzwB^}_ zKQ{)51NX?+Xx7PMbr9a6yVNu^WXCSizw(=kC37R+Hh4FT93SMRvEx8CYBUjl_+!KF zt~slB#HLn4Y=erNNee&)Pi2b~qR{11BmkjV7-#<2KYgg)S4zf#eQzju z!&bpr`1y;&f@-dwoxDwzQc9^F0)&Ug=zMkEvxVy1TFdzE_$L7N1H`Y;)#*RXzeSE9NOcs9KLYn)Lat;l;=+e{S~lhb9EZ!D|3u{TCDUY2XJi9J&U9*T5a) zDN!hRwa88^?274K-`S;z9wP;J7R~%8z`~f3f-NKe3o~bmHb?$ZuPoZlAQ<}y{lwap z#~*ozJIwK_iF>2Q$v@LMRbe6D&S_S+Ql~}c@vdFYxZ_nM8Ph|H>K~{OId*`m2KBU{ z9cTXa-G~F%ot$Uu%us(BGM%9LQlq_dHBh6ye{N7ILOEXca}z1&;1V{-@?!C0Rpqg$ zCl{;lJ)!LmHPsKy_ObH}1=mlQkB~i5_CN=^1IJY+=zQ`Y0c;<-={GRt)$c1_L=a5SPeU=pc;Z2kW<&UlYUM0oJ?wwo|p!8V0qGdF@$|zc-#KE)7gJgF{ z7cC_k#F%|PUmnFPS(>ib?0J|M-lxe8GE`*-W;>{ST85apn1i?(QQ`0+68kZ9b0jPJ z-RL|9#&7oR?DxgjjAf_P&2)9AeTkpNSd|@GJUI;gtEtatO#QR#^0bAYj?2BGQ~nc% zd8a?p?87E!(){>y8NR<8B2OHGjeXHC$4cB`lPVaDOiwjZHY*P^16%OZj})fn81dgO z;NJZSzoxIRTVBABou=nuDi_E5_~Yyw~n(8f5x+ChRwvldzQr>EL6=u27#>2rrFW$2xR(EgHU zxkLM+1lCV$`L!Depl3zR;;^q1Z7>oRfDX>5^E!_mo+VrXeMXFDrzgKZy=+qoukcpg z6DbV*0P?11cNCyARonKT|Z zRP+hjs`*SS=n=dJl)mR6?#jnXTE586Vd}wHGG@h5a)Z8mYba>r-B5dch?h3jFOP?c zP{=UML##6Y2PY6^vuRZh=&)ljv&=>zvoymLfza7LKcd?(} zXuS|XYPEyq_1VSA=PN4CKgp;^Q@#2I#VE$)bW*DI7Y+>zsSn$dcr%4goZnJ3mCDXS=tvZxq6u1MQ^5mkS*g?H? zlM9DW01Xrwsps&5vvknR7gyi>K%WiG-$4e_gz8K`{!O!n|8G zU9bez;~B@s&I)8|k-Qf2)gY(fP8CY1Adpjba);v7*yA<>NtsiY7mL6p%g`SWze}5! zENuX5L5{$DaKEy)N`_4W<)r6x8`m;g;>TXT(OnysbQ|ylxLE;i0sWKYj|?B;<(klq zjOP7@g^%`w>lEk57bP`#Pb`C%^5`einrA0oj3O{wtQ{f}&i zAGI`23i^WPT0&WIe?|HM<>c0%0mF?XHLl7SX7YSeLu8_?F%lV^$xDT)k5U9FV=o)! z()GaevRU@?vO{eg=DQO z-DiuAxSy!jOw|QU$R}2LzOCESkBzKd^z<1G1nYcyv=3vgY7~XjvTXi!5&ry6X>e!l zlhSI@Hly$j;z?yyn?pu5*NfY5IXSnvLC0YF0{_RVd$z1Q>rbBNG&x;GwwpUGGp z{J!kA=<704Qj%31=W0Fuyzc_H?cJ*`W7tVSRHv)%FO*d)Gi@!j7|rtVKYy(0_oDy% z^so2FxXBb9247jC-*+ndY6n3jRbM7JF=c|J19jtY!D$U2_Fy4d&Fy$5tNs~f!(5Zj z7-}E!ze0b8gS}W4&D6Y~ULwu6zPC{S5dA>3TiLafj@3`(!V}1i%Zsdn$YK;@VdH8& zV4dZS3!1WJr+|84rK}q{RxbAwC$$sB3;q;xI}Q$QSSDy+`@ozlYd*tnYrIk;Q?H%5 zV(EN#;QGg2p!TGAp1b87c6yohIH6c7-dM`QTK1Ivkj>Ob;u6t-FP~wd52uiz6JtC% zr`9QK{7e334Gruo`vbpD_RlF9xL4d)9kf-t&_C;D^^$UP9)v zv-~49Wl2L14e4^N<@aPXB80XtoYITgrG7=`x?yh2s(h+LKdfiiSB7|TF+*cpb>4W4 zb~Do}Io7%GyW~X173Bo^5M&I{xhBs&lxh zA>Zj6=gYg+RI=s`MvGxuvyiw&;_$W%AEBQ0Y+Fo2KBMCJ1|m1jaOWYM_T@0VV82>8 zcC6JiCBw&;%C&6tRH$ts*t1Wwl0x>QKHzk*jcZ_-dhZ^7&F_6gQt|y<-f^poRAZZH zQ$y{mxO+1nb>&vJiQyTBNfXI4Bu{x4D3Q~9{^3-pUGecOLz^CC>OwSLw@kZ%I?Z5tkpVC0hL7x#HR9 z;wHswWafW$DDb7sL{Ku&oaBvEI^ye)mKCx;Al++YyW0|OqbPbmx*g(Q>+dL`O2Pc| zVJtRx@o1>@e5PiYjDe#|0zo6(j;5+yW6C9L)pnKAmd?1`FOH2AH`-}P@7CCFiRkbi zqf)<~K+))bsB4U!fHL~C+DJH8hVt}` zx!=8hF7qcmFC^+x$5#hsAKz+`TTYWTGd0#j^etzCcg3j<0v;`}w!wmhRD5IhHL&Yv zooyC~4wYUQoo*S1^JhNI^RM>DO)(_Hx7}iCSS^>}`|&80-&Km$qOsq@Jzr8$n%TjASs*5Izsi{cb)5qnS?{)JA^uHD;8L@Ahn zx@9;UBg>T_Y^|iyfib*M1@BvUlKLoi9+J?{Gn32fj`g=3^=E3_S74$zDEJe5A6M#; z5^tsik7%qGHrioy?s5xc??aL^vz>Dc$Y;n9`%OHKDs-+}chF$DgORf^b6c8;Vc;R$ zXDp|TmV?DC`&_SabE0_q>LTpAOZ6CuSp_fRSTo8e3_D;8mbF+GT7F~x2?Y0>79x_mjy`Ik%u(4K?b{75PMSlXXLYTX)tEoiXoGQN7d~t9n6m zh+2bmLvxa)&z)eM=KkE2MUggoEMjJ~e2^%9-v{^mj?by^ZJXO=98jvhK2i?rM?Q!| zXvMuHyaRFD0pcbwq5#F+ZGt|)zw3`g#Lq*#kVth37B_PVb3)YQL#^^jkKACRfii0VAk8{l1jXpKqrbyqpYIf|ti|urK-tDgJWA#Dh^O%8R zz`dMs&%)-d%{!GiBKrePm+h%Wc| z4ETpSzc4yZW}wsCE#U-?c!sXYjATh$xuH!zvSYt{5>hDhI(`brW@HO|e z2{lbgkEJI(tr>j(Q9YP+3HC{3ApP&k?gQl^f>w2n=)c9uHhf#cy2v4Gz;F}{AXXhi z)%~-p+EnU1v+x2>@ssx)FrA^U+D92H+r>I-*HmU8v@R#>Hns7JrOa5?)!dSHJ50kC zAaXz@Qi1JC*s?j7cN-rLI0yiuLtU9cw-gtJ3v~j1&L@t0lFZlI1)H)nB_)zR+;lI@2?;Yo2f zr;g@Wxp6%!cq=Sa+V+%~ew$sj$|?IG)%S{meDS%Uf}EQs%PnVodu`RFsU35jQWe4_ib(~;5t zOWLyo_QqoMgJ!<2NA?S+fapWB2Jkse1t*{#K*R$Y`kTZTFoMO{KG^d`-LDR3Nb09* z*3qONfV~Q0;VBY3Xv73|#M)GwVe6Nbi$C^Wx}ot+0{GzatoH!wnn2a-+f_5IxksuW zfrnNugzApFH6JPa6slj}SWYydnWRXZ95Q-}Kk+x`Oj7{$-v)fX{VVbe)M^CEKii!c zC%Rsj;`kKV+mmA-W@<=y3tFF7PPuDUm5OiAdsh~ew985o*j%4+K)L#L=n`1>spcPQ zS-g)>)FyaGRoV&b)UVf>dXOE!xli%K(75TMX<}-eUa}sa%Zom5Oh$;QfX)3oT-9E= z5zAMkN}B#IX_~^RSbY>z4dh@OVcSroAO7N`*R{<|DgYmkzqW$o{2LFns_inc`3LT*!cv_-4 z{zA_#kZ9;_Q}7NmHlHc3EC~MDi2gUz(ljc*+sAF}YJSNMnf;RX{_rGOQ%P~!k95@; zND#Y@JA`lRTt90I4Szs;>i-I%#eZAKeG+_Fr3HdDc!hl zx%aNTzdvhZXfwb@Y!=~R!`*Z0%8OfxP}nZC{UE?r9l#b03e=+0)Mg!4U1q1)Tdwp| z{GCnm!amo>$1Wdllxu)5o0+u}3+qC%HOub}D-lN(DmSgoe-SQw=u;wOhxDM4FaG%^ zAIzjAKW5AKVlviL)8;U!AZRx)2^Ne$N$5uQuV!(+30zQ&Z0oYaVo-gMBy)o`LLzz zO|W>=g6`~A^?hkCU|VHR4{ud1^;fzS=b1DWONi>b;%sMIt;T;f8ho%vPYF?pE=Di@ z3o>gn!~dTmKQ|pqc=)$Yuy({_!=1;S)8~YjOck2TDY?fO>9&hi_iRlx7auvpZaUSX zXJOJ?I{(MI_4x`1C15X88;Wae)!k~7$Pn-UD$v|!1u|tb&KcsgL}?o7u5lO9oKGwe zWB|AuL8xc;)djQ$)R+8QdAc__ESA775U5U~03M2>Q~TX;j8$SJmccd1?!;P!wzE62 zTNxy>-#>`%-4|E!?RyG?Dnp^y?S;-uoI(yWB$xqMTqyvbrk&f5N~+5yM^Ec_nZc zx9BLVH}2_A376Jk;cS+~K+>f{GzHU(^Y6~zetH|ans_{(2C1k}>}?)TxoVelGFegC z6Knr@BNVX%?TdF`!*iX^WRZ|bS@V%ggmo6hk4l|eT?4-_@d0fw#XLuYZCf@HedAd} z(JeRPoAP=NbjbNdL?6*2BV0U z<4>X79Z@6-Sh~nWYAB^fB{=hbYOgC&{sBWE^M5`w~~N1 z-&qCqmumSoDk_x`LQ=nWz6c%vOl!eIy;XIv@v((Ys=v3#oPAy*zWuOIGQrv9x__|Fu6)6HgdQV*VpjY32O{Il-BGmzubs z$$bkIjSq@HQd5mP7K^MFW-*JqQ<4e2R=p=CZG;7_Y){#l*v>vUeDBg9UDfuRbV~WI z^~PCGla6Hi@%VBFIV4w;S7XJt`ehznMb^q<{4KWg&l*j0r3uLNw<^o4oxpv4iqhRp z|3ky#mY0?=OFy3q`C57kVUq5qJqVIkT{#4#k=SWOpAT4a!1 z#>S4v(l-RGfZn%%55nHyhiDtMOd7?F&0BuA4eGQ?CP+7+K2q9BQ5MoIKCxp{21lm6 z-VJwXlAWa9!!K~ss z<&~2B%50ys%#&mFKZoh%zXd<*Qbqd99WKquu9SOZ6VgDax-ZT3mZJ6$(e~LJQ{@h$ ztNn=a`ymN#obPZ3o3>zYyhL3yF)TmVLX8=lQfpOhSpL0UxQ5bdQ4(lnU>Mpy{$Xo! z@AsXuq7`%B0Cv;Y zY(o-BpI3fa)fM}NG}!P9dl|1RS-qUrYQM7D_!U@l7cu!!3A_D|!!kko8;YtG?!|id z-lFiPtA4>5qAoJBRMjAC?9qx%xQeL1=@MmLbc4GW_TE1X@Ahj(I7Q?lZtTzT0|7(m zSyJ>3#f(&9v0=>MzJZ@^2u&yKAaz_E?h%W}iqT@mfID8MgWuwAXl*G%@w)L`7hyf( zxn9i(1*q4*ivnHR&*h!UTz9O(qwuQjm+2IXlYfQ~)8?^cm{pu`3h%)wFKHw*crF#~y3oaB1G>=#sC z4Qc(v)vcx_qfGS8qUAINVCH+b)wIIY6Kq2cFh|Qn^9*%uv!^M_nlEsqH#Tyk@%*PR zBmc%I7D*fCpV_wJ69*3)6Z6FIj*qYwO%}OGOPN8xc}B_w7i}~!KPI+2357P8cK+m2 z+;hERV#j0Vjg>^*c)~snyt;CD+b(5a&Cq3sVyjZ61{isrzW8yjjQR+wy(WQcl*-tn z5ArI^bUE=ZtD%t{f#NYT}4YhWf%ol~s%;gMoJw;RCOXG0$A> zUzvTZx4cu5Ux)cM|KWv$y;&N#(Rh8nT<#mN;aelnaYP)i-voUfiLxbFRgT-A*ngJ~9N_Vwmba9|jd zc?azJD<3xoeQ~Rj7c5jPgZcuz#GwB!p|QpM9h&+SrhP&p+kGfC^C zo>uPPG_ZAGVla>Ri}sr6YLlr_*Rr7^ByH_>^iiQ zZz(_fZsr$MILnMC*jDWbr7V~gt)v))r6 z-)AleN)NPbdA_6~Zt3?mPhx+B>9#{M#6x1+UXe?EXle`P%}%Z;1%&J!uT1N^oKE$U zV}*e+17m8BCIMl2PUssfU%e+}vXd#4NHn{IzWfA(88b9aJ2*mYX-;7%gJTy1jsf~U zXb;z}XY7|ETeocNg_b49doVBZN&KMYw4~aywA*>p=_lt}o6iY6&4KhOkSRD1MEpS# zTbIS?&h`Jqa9fyU{|eeZalLZ3cl&uD-O1u7ff|628);9A{)}%|(;d@}+?ccw z_PMwhUlOPzs80zX_>DW?QasxJc;JDIe)!d~*-cq!=4;~9^r{B(_jEcCG^8x1oR6OL zNy|V&#PYG3@8-|h^W~B}{g@-X>0-s7d$us$$@fAB-cudIki5^f;m$kxs+w`I#h$h7 z?oQR!+3fa-H(KJXupsxoMmYspIy@@cI_NV93T441-AUe0SNbj!02A5Fs*Fb<{dLJtdvH1= zkg#{Sio9zWx4+0x`!!pGADo$G=Ow;hN;g&yMJA0(@)^DN+K0u&9@`?XiwQOsy1Gw> zy>Y?5?kFBmaYEB>Llb%L$karcDP@}tDY}c$ZFg+Zm-z!1k6mq$7OKS z=-b60+A03JgVS*0nA$7q0D0FSL>~~>@p%C(-@83$Uwbm_TfdmLDasy_92D9VUBExS zrp7EEirTud&ezux<7?R#s4)lT)~CG>HfoMZkB)SkF754qxbX)*QPyk)TB_^LbKl(M z%pQJ_C^KHsmR=%-% z1hU@}fZ7cEN7|87o5-&B$!>N&MXhOf+Jub}TkC3Q82OUsewAWB(Cw z_I#3Od3`p7xp@(5$lCWtPEYH=1o@=*kWo?h$RnzRF5Pw9zrRG@0UU<=_sS|BF@F2X zT2|fg!R7v-;Lu1*ZS!|P!%DW3YWrA9K*MGzY2YpS)Nzw~;bOe_KHT+e)pdJ^fR2YC zZ_7SNo<%kGq2hsuAk~ZSse0CvEZzk3gPG@JRQ$ic4&FGD+hl|~Gz2@mq+@ZicNQ-e z(7%NJl*KN2)_ij?7@Hnw5L=Wt>#P!$fe-t4Iz&}PqT+Xxi_AeTKn2ru40Mhy~+T+gu_Lbl0;_iO8 z4RI1lZ@3#@w&Ird{fN5S+feLlwV3F0BeLwa?pbKF_m0bdg7NKzcBH=c)Kq1g*uo5ge~?*f@A*z*F86d7 z-AKsfIB)oiy3i$jTpBkjV{NUmwFe!$mZa*Eqo-VtCLI0>d&(Di)g4-%?w!IsW)F+c?6KQ+)R+k#q?O z)c$OoboB`S!COQGU*Y9h=a<}sOtirKiM@du7dV&SG1?qW@_xLdUYJr7pbj8j%n1z_ z9tnxc0odRlHstXtM_8XnQ6Gak>K!3h2j>HoB(xB|tYB8#X_7Z?Y+@F~igfHJn=)(a zv|e=g`+)B1Yro7tO?k4!PMAOurIvr4^0A1ZsZ(3W?VIk-cBw7KToOD8brZ)XEKeH3 zs>|7sKzL72abz_$QsI)fWa!;}tvYu0%ip&Y`s{q?8#utrOJW?KcU68+l%^;C@_t#_ zIn;&iPe&Tme!!FihDFDmTI=iQ){2gfy&yb?1nO3)j)H{J7<8y|``ChIgHsbsb~Lv4 z>0326>Dofc*qI~B7P^{6WDk0FeSco2o)%OiOa03?@0-*kEfi@{gg%TK6O_HATDf~i zDICGjtU4dBUir|#LZ$=7@d_V+e0t2l)nSXp>=jO0A3e-L0rUx#y zg`XIz4A@DgM?jWn=$C;6@wH;z1ja=zXKgt;#r`zCHI!<|Zsca#vLcO64i5|nRMv4U z>3CyU2>Rt&2T`*buQot_Nq8=#9hNlU@0~_$8irIot!#+nh^Vh?f~Agx@BY=1 zJgoXrC(|c!`A#jaJ}JRB8+;tL>$CZMg3+Fwk^4!inyEh$!eQe^uwzc%`8aBd=eWKp zWnidcz?vkWJ&TWYhQ;E~aaqat7kE+wCE~|$A*G+qdvU^GJfMM*VgzCb zZ(ps+y(P;mVl3M1hp{63520j}7d*Du>7?;Hv7$eBYH&sHczlU3CxUqMqV0^4v9p(s zfE^{KJ@qKZK&vW*{Tat2vhU(DIibqW43cCE&>oCut-VzBIYB!1M?UyQnZn`pH96h; zY|mH5ufVr@Q1?Bf7xmFi()celT~iC2mI+SufJ6d?EA02nTZ0!H8oj$)NJyctEs>dZ zVrT$Gs*ih0_>Kp(jB0SwkUkvzKI``GUoyfK+ZGpP$&zwHEGCR_s!>~+C1x*<@6XHU z22DM?ILTNy`yM=B=A5T1FqwYG@bazvi`q*yl#~Q%=)A+rz zcXlUSScs-J28s4Eo^g7IzM^^R%0n(*;fM(Li@p#BjUyZiEPQ4e0_&j@vP1gwj=wK3 z(60!?7#Ds=4W09keHhe6;7Rm8F*<1npsCis_=8hc!8|SRhI|9AJ+nE>4$HKOrAAv^ zKPrOg_+p?)i9;!>xcb@@i9Xb)%GU@1Z?N2H?B*YouF*J^7i*AI$9A|lJ1&tX`e|rL zB5H?Y#}pOb)B-#3>RYoKSrOB=9a3IdiGH9zs+JbtxwmtBocBoX2lKO5G!{h~#$`H$ z`^0BW!;wys`ntD}eL;Ab7jh^N|C(=+o{2+t%h=4vzo#ioAKPl0w3B|bR!+aF7l1P8I7|HiDuT<^sXffOT>N_ zbJ00H61`)~CucqfRl!*0DXEEqcXN$x29O=#+9(YnL!w0dw&>zxa1#Ov3f zxtM)%9`%7mv|8nw-+E!^S+^+G0j$yLYQ3;vMy-FmEsBQ;a#t5+kR!RO2%>wI0CE}JOWGw?YxpzSfU(Jr48Dlm8>N3-&wLy}Pj zXE}M2_T}h1i{!(o0i7_a7}ddp?3*eXK)Bxzi-lN>K|~Ou-L0E0b+*NJWQ`|G1=xDa zyfR2i{m98~BCjE0)UVR}LW;zCFE`z_|KY^)gmFM#|1%M}= zIj}47g!<1oBV%^dY+NCueEvM2_+jDN<|%(*?=2j-DvBPG?%<7R{sG#w<0(1hsRNKe z!uk6Jf>M_~zj@Hk=!Z9sMi93Y;=I-ZrRUd)dQR&{JA*Ks=u*W8+=}KH0Iw~SUdF_` zMea8wKcYw-4G(CYjF|nr!={<1?(#wsoYF6GZepgw6EnWrybF2i|2r{&u%fo`3b+Z$ zWl+k8@Ra|M07#+&ZZk}8l@BF6XRV@bO3m&IdWUux;2_E9#7!Re9j@EV6xWdM%uD0t z9m7RA;O(b?J=3KUy!{jyOP=jIG2^1Q-l77oLlOXYQ(FrlZBFI)SYKx2UNVBF)au|h zVC!Vxw_vHS{|DnqGXZ(z>MC)I370z>=|2Jqu;z>jfJjEei>B-LR9{$jD< z8`9#koQrk>+N{3s=+10%mPCKuR`6kSdir&}3f?~h*|Rs|V3FY??GwCO!>@-CKv6C* z(mKD+DNrw1U{dWOU$OuWlYi+Re;n!n9x3uU$#QcF9rIO4j)r+rk|{qrv`6E@B6w0w4nfoyHFr6Oa|oz3oB)md!wgk*e$Oezy;*Eg{W8sE zB1tXgQC?4L{D{jRo=L`S%0mu4Qi=nC-W`^h=+|_MWYtQW?h^S<6z5AT5?Xjl=A_|@ zzQm|t{G@>rD)H15=Lrbk05L@AuGEV-A+fwS1X6N5TqYER8@VsZUm9Ly$a$(%xq_W} zub^Pr#0~lAVRM@Bf#0aMz|=peK0{0PaErMcRm=;sO7>sqSr?~qfA}jtj913ktjmv8 zRmidabZ*jGJS4Hs>aFc<8It>@c8Lm?-vEt=R=B+sZbN#dzHVd{UB@zrM_vq=@;yik zKYpf>&f|ak^KOD94!&r~pM{%;j#@aLAsOr2u3?^+i8QcSZwQOnh)*%@Cq3Mo@e~t3 z^lFrQSGoJ2jE`KWZJY#Q2zfbqj3(?u_)BLhu%!nm_|Nwi$@5&{mB3uZH``iQJVy-r z-MhsH7nYooIIK0I_D+51x_Dg91H%g&-oLp0TF_nz#I7ZLZuhO|XPmaSm) z$Pt~ee;5WI)@iy1z=m5`n`zZQ*rc_ITsOz{a8*#a5Ep+I$aHj2^RP3SdGPt9mym-% zw2i>naZ8!5e`{_4>2RqWTXY)|x}&^o{Dpli{x@LC)g^;GFyJz@NBEA;+sys6sW{h* zU!&pRp$G>`*JUY5h(t&HpPSmBuW9_Vw)-nw)&M-miV0_a;ExH49)K}JbO0o*=Rt09 zCRz%3I4i{L`RngDbjRPt_Qfx@e&3S|@_4`QvVT;$;k4n*5s}%FYn-rkEtD5b8px+7 z;V`b0O-sk&Ph_Sq=hLFU2A{TG(`+3xeUvn_?yuHUaewj5s!Yx*q zxj3X>HGRpTPqlf8)UUdS%HqEn);#6Dwffzmn}G{nj9@8;SL%+Recjckf?v3Y0&IgX^<`<^niyqu+e2Y`oyE=PP=HzF- z8@-$IAV|?~EY+5EIk1^rzp+-$rh`&)sTjK6q7?psi+w(zv9rll1K<4lMrkCAkYF>h zSPgro*L-Yg!W{?c--mBuixm9IErZ(BHESN)AMxo*HS~_xOQ-YdInOD=Cu$pBJ(ZPi z2AM}j+5G8Z@fS)fKmQPM5P!e)B=+9>E>w_N@;%PQnYhA5vALkTWXc31OeP+htDU4( z$(%Ks9RT&77A-*uj~?=JA1HdQ$~&Z$-{$FKQ0mAC$++Tv*evDOHef>`JDb1GNhj4?Edp!JZV4 zv5vd13bNeY?h9d5`eWDqG2Acf^@-PA-&NmhnD9Hi_65k@P7`zymUEeH3rM~{@c1_p z)K+*=0HTKStc7gllWmrO)j?g|E>tDXsCu~;CGG>}3NmvbQ+_a-H>*KJ8+M()mdBJV z;-z*3a0Vd}crg!LqbK{DtQ(v=!EDtW`21iE+inx&ZJoBI2y06hRoRu$U1tYD7J{0B zD}-qkoxKJk{efuD1|r+meSZT~DjW7X%?Uk! zfmPqFivI(XP6d+*jKaGQSUPYe{H@8MEPjE(O+6=zI3@=}VpHer5S<(GOylrTxv?e{ zOK_H`+vqut{|!_YwT7QNBb1+*bdR;D5*EK0sHC1vc$f^1P*rlnMa=}V*Bf$88N^~& zuh9%deci5F?hjQ><8HF|wh?ig=O9+{UzWAVwmyAWyyc>}Z08wYueIkuzOzp22H#%n zGmbTf)?a_z6{{Ws5(fFoJ0F2DSZ*~v=-Z6-MoEQ_f|nqJRQo2e&UX~?VC?*Llw8nrqD-@ZTGzx2~x^r zpIxu1eQQ*xw31S`PT#qX9VEBnG;&+Zz;OU%dU7%7xNirG?F4pEvOqOMi`ih2GjU9^e)p!=sh48kDo!*z>1;2Ftd+LR zd&dB0gqOt+<)*lXM?cSWWieS6E5{EtgcW&t-Ry(SC98V`XM}qk)EoYMUzVC$2GG?s z$j!6P2MKBA z2O@+D;73Yr{4g)Ihl)_i;`L1L=YJ2l1#p{AQBROK?+nZ4A2#Mq3Co*%c8YzzlbqABKv%29t#ar_4%tMpE^$vP;&mSW@X>{7V(l z@dG!p>iBM8J0MyCnJ%r1?TC<4_{Ev46qCN5?AAO~mB}+eOz!qwOcq8fytg+WkBP+9 z>xEfO<9zZ0`k=mwa2iNzG^JYSzYNaJR~z1Kc_Ke(GIWJ<|TNpkBPQ-&6awOdKVmB_RW}P5~2~qGt{AL=6tZ`W+5`o4Krq=&uK{ zB#~y>vcq;Bxxl6C_W3$5y7O36YBJj5qjQ}P3UA&MCH~bDi5N=)>{j~7OP&5LkpYkQ z`Vy~)`ZMF>vB>8#9?SnV{7-U-L{sU@*3138IC_qvm7hsi;Cg};^29NB?k)BJ!3%0G zxwqVvDvuO3G*fPlEepEKRnZ2$_F{sx%RlonE}=57T&aRUAjA#Q%`eaogz7cv{^B%( z!uRXK_OTj=ybcZot|z%{09jw>PK3{45`2b+*KtD(!mO1ZoFq35_<}=gZ?PAtPHxD4 zMUs#){+&WSXs?E;KF0EJ4x_`@f-*%_lDI%KL_oMpX$UDnmC^)r+9g-3d*sO1?iNOR zL)B%edX#v~+#MMnMZ){Th#+XBn_vJ7yU7yMbnh*uo&#)o$%hj-vZEle8GKUEEUkBk z?NFY9_z-~4v(~JF;Er3;cJ{veZtaPrH6O>-v7^<0Z&F+<-ZT-?sc`ksqC%K~8R>|L z-Ocwi56-tsl)H5NB{#72Fw5^%CT^WoaxGxz7!V5~HbFu3mb;x*@X8$i9fo9UJ|skx4kq!T!=-Z)1~7!hGec%9%zg77eaQh}v)mW8 zaBHD)Lg-QAEY0@HG zo~5#XA0eUYKQaPGBQVJe11z|lJ<%UzrM`PNwxW~=j@6myj^+(ZB`zCc(MHGY-xr?Q zn!l$}&-fUqje(1(tuR`X%DBC7I7X{s;Y(eL)e=1B~^eQFH-azB7JI^F0^8M z(NyTiA-njkd=0zN;X#FK0KVaiZVm-R3RWv?NaxnQ2jmI1VwvnRi$|KSa<HZ~6T$l$fYN>X2 zLZd6mX5r4J9R|u<*$1%cQW1q^ELXdRLm#yfEyS8K)JnHbyW%!%N8`i+FY`d0UAwt|2s%xWm{rVucJT)|N z1IGLH(HB}a*gV9$h9fcucprAnU2k1huPLYv)^ROO3`c;TM=mpQ`L{-`zqDF>I(Cje z`f{uh^!({wT+i>zN&gP&U!-$9zV^g!TAr(gl{~CVN-o$nvpTdNc%S=|SRNQ;QGw>! zVTc^fao?eMk2c+hl}Z81;bAUVS%h$IyK*nKpd}H;a0TVzZ`;9E8dxf`i_Kkm5}uh- zYk!4owp->fZ3FL#bja1JQ8@JLSRH&Yg4xAcPj=eXk>gd^@@9$O1ZjKWdw15|P*C8Y zJAG_gK#7LQ>b7i(Ltth5|1B3%lsbny(o6U&*<9$3-c?I~n7Ar7e`{F0#1% zh~Gjoa0t`Wwp6utVBj?5k#`38!iwx5HLqQsYYIDN>Tc!hD&A7nTo$iVt6HVb2SHT12+XdZIDkr_rIac<=;3Oz5M-l z;j?u{&_k64Pb831<`wl09f;?C-`9E@Y-&)Xpb~&|`rQ*Hltu#4oqAz!UUU;D6nB14Hd5zQ;{{N6i(kS3iqFzzN z7K9H}LDl53gn(ZE4JRS}#kZc2TXfzBuU)8bt)RPaR{-u6T{w4mU3(v>uPvt5pdvi ze>?WB-b91^QR35qNfT#Da-)3e3%~#F$m45ST*5D7$5o|MHBuiz+1@hhPALMuZJR%} z*|(oCYEdyC%SEZx`zU_+sU_+t408lw3-Qm>#jZNA z7I@1ki`uP8*$SW3;)hkdfGFQLNsTU@g^dVcyS`&dcb7n{O+gaIN-9@~gC~OSO)caJ zf`T{iFbA0L<#*}f7X1(pp?ZjPGlY;WBE@FHuDw0OuueWT{f9VP?|b{+t9oVaQObTn zmzxqO8W;Y78O0yxhW8{8&f}5JVd03+&@TG+<+&e$-v}RGTO_oIe0uW5tJBh<>}Z{i z@)e}VjjYY7yAdO0=514(`JG{BpRHO}KiG$HtZhXiu^k_vZ2;jGTWft_Cf-dUYNeZf z`Vf~y^$K^&-gdoH>{)})Td&Y8w9_un7I337eB+D3(Mxqx*P*b-%dUce^Xa8x44BC5vLEPy@zl`63 z<+*-e{@ATLUVV(;^>SGyLalfFQo>QA7FEsKy2y2;%Wiq`x9>PL<;ES>fdd(+{2##z z`tgZjOBf`6Hvz8SLUVd2gzG(iIPalJa94xXb~M~p*WylZJ9gBt3SxTZl5J$obUEnr zR{PICXc2nNoOxvg1!Q5duqiM(7nKj$5xre#sQs3@`KYkKK&-1ofy3*~v zNrq%j`L#VINty$0AO1O0Q1V8$7x$nnoo)UEP!(m6c70oqUruoFN{t2`LkXjv3Ejv%Z672_x{HzM$#d}nPc@KuX<>xwkdis1A7Xxc7Do0^&9gD z+UJr4k25rTam6Kt>78=^eblW|ccw>FflS_R1~U0(vhljgFj=a!I~V~c@Ev_Q~?pGlOi;$>tIf0FkA!Fx6>6quZj+m0CTVJLqDzR-vMJ6OHh~f`C%DfW}#^4(JLPN+v`y1BddJ(+~t*^ z&!79+`^krPZsPd`ETa&#UtDJoA-blJB_8cx(SbcDaRs~Q*i2h#)P4Kx230i609n2w zsEzrn+Y|8wFED+0(r0>xa>%$U&1MvhWxZ65ld-F+=Z9=tq9O6}Eyf;@_PQYSxczIu z4Q$#om&yy2HOr;r6@CGzQm-NwYcq3fq7h=B;~(E-*l?xFGq6Jk!5u`E1kqL|1peo{ z%?f?bZDn}r&U35T^c-Bz-nN+;T+^O=l=?Dm-1WsR4}s&g4b)g-j!Sw_K%-@^0mx5z z;5^H{Nv=u9W{PfEs)@N8^er_W*Q!?&(xhuv^w5>j0s6IS1y;>nqPC!ZSpjaYWx{|R zE|ooXd(=dOPP$Q553a~tS7QasuaF`K3ap0G?ZLg(@O?zrWom2z{gz`0)^!d!qlzbA^u6kBuzb4m2b-Z%iF^Gn1j3iMH$IpK7j$ zm1E)VHq;G8-Pl@=^$h2VgQNzF>^lSZtDmxg)zp`vFji(FZU2^#g1gtmA*<))6!()m z4|M_?2fT)IOx1$)-SF1WFrVjo@*RLjuR^BDV~AUU{HS)P=?>qYCyP6MqQ$dWea%~_g~w~RJLHbHOtT*>P*WWC${r*N8rVD78}BbS-qTqQW@+Lbwj6M3ra z2QQ}(Ga$MyMZ0Yr!{<@&XY3F{Yzzzyn3LrukrTX~m2)AsS9os5aG(J8r~Ix4*R z4;vlvMfR2|&6hL9U6O=~!*zd-`rdn`!cd-gnK-x%;?Q6J#F@V2`VyQk{B?WHy@IeM zTUUYtl`dvhy?Q@(a5)hmwtOBme82xmBdRie0kxA2dPcfg_2`HMj}B@-urty+zIAQu z!usm--(PEQWPpy5)XE00-d|X_(eG{EW%`>N_V~aYWt@J`R^)}?d!Xpn_;`FZ>2hO< z>(3X?7LH^Y)EIH_w((f*^Xq>+9OARaR%5uw9-UjAy0nCo z@xD=0@ra|5`oo30a%L0MqT|n1fxxa)p*;4_RYf?%7zLh0NVxD`C&#}CD#1TOpBYga zfA>2}}5z>(n*_pZ^*4kv_^*y-OGOI!*drqA)DGR6xF0NLvZj zu#aEOTF-MlFqWf#{G4vVdWm0Q^B^LAUC7mrdzu{oD5w;SUo%=jQqstHwD%4+Np1Y2 zXzqy3dD{J0-x7GQK;BlV%j3@m?jO_+uOB_Q)6eNP4%7$+4X?m2y|n{M3QJW%m3{(M zL#m$#7JHwfN9sC{Wv-yZ?RHg|(;%pm9G#ed^r>#flPeRrfW*T*+3#?&P>6QDnql!` zGBg_j$^lT;>2GYRA1gEL{aQJK9o0pzguL>M2e&@+ROsB&Aa|vKl+2;V6JLUcxc9d5 z8eY@%!;0n!-mw0{&jWR8bC`z2CyDXO_e=Bhe@^VdG`@y;ok5^{17nL+1pB6c_en8@ zCLW7cE|mGj6@15h?=eJn+1FV0i1trlgIbvz`Nvz#H;QOF9T#qIzknqlYtJ!<+iUd! zapi&oii0O~7ngNGkx70s3APkjawqkf=)J`BE6*<6$hNu1Ub1&D@#k{ZgOJ9c6?9C` zu1xf#T(K-2Z2qXX7os$rJ|j?7*%Yv<@aVajgNs}uweJCAsZ{OrK>Nxhs=v1!rxLzh zXdabI-%#>alNPQR`ubYR~fQQ+d|FnXY_#dQX`|3dRP?ZPHL!1QKY*8OHBdVT8C zk~ja9icRXGf|gT=l2qnRy?V9iR!#F$(%x3PC1s{~3%lmDkRHOCpY4MK;zR%HU9id? z%(9LLr}z#jH?}Rk$unBRmNBlhvrfc(G`XqVufas-BMX8-A@uC12;=L27EVLtBG04P zJypJ!%=4IwbHDwxcseXld*ebx!R#c=1|CCIv=|u;(h1K;%_)ua0@fktreAL-K%h*p;Q@OsQ#7F zn!pRh-NR5r-ggf>sTZt)z0~}rXW(5<;t+e~JT6D-#mSCxiKOVamfbOXm2)Afg_gYo z3PX@MZ;;*!X41=uLqf%W>OTREvcjLOw}owrdMq*HY(=hvVDxhlaNYVBj^Wq6E*^}BUen z`)JGVR`xBuTDAKV#<_#73k`uvziz*ovruLK(Nh@yF~gc_H2VuJ=>a8IKq=50jzvog zOCE3MFlO3)#A|#P7w)`w>8cCE_|B}Y{)3BlL{xpJb{R^;BHnwf`tZlDM06F0S zqWo1+bdUX~A&4OEg1vB-hNmV5YB+O(O~0Dh-;W=Os{V66 zrGpAMchgcjR`%cUjp^9B+dPT6V;wQ2CqAR|o}%ykJV$Su1Xn%c9uj{*-$wAn7~v^$ zv6TVCe;~ZsbecJbi}zdNEGd(;>8^4@Erd%2vZ=`Z*FFxG8R`{qrWz&xN=Fft-gp9* z?%}7K|JjlbEJDE_FMM+fmBnxWX>7+a_*usyeF!u2CV0S03hTWu7zK_fMpM$!X zqsHcOkE>i}t-V>`r}OcvX3%E4O#C>&+r%y*ZaM2_&nHtc@7a>BSQa@jZPCOXM*%-k z@yCyvakT3X&6@4eRG-+=c0)8l(odqb_ncz$Ks{je&9$TD2+*A}Z@e6K6hGR4A1r+? z@dq%h`(K7d{M0wCrlykMJn-to#Xr&Q@fsUNaFi2M=}u83M(JV?uw?ljbU!-0avD&?y|fu24& zApRrEveDi?jk*|~m)llb>vNP9l+6JKM@RFNw`iQ>L ze(K6euF3A#n`0vG8N7t^Zm~mStj<{{=3)EW=%$6I(L-srqSUVmpP%D%2S}h}Rqm?n zJlCVD&X)7?d#mhh%PX?8-{jcc(L|cxts)o#vx}6*eY!e9Ft)=u?-w#Ic#6Z!8uMNI znNf@W10{|Hdk3E1KdCR}%L`ViYQG-ejOnK>km?E5B>3{N54o=Kw?(B*L(K$~%B=1H zqaW%!h;2gQX!_#sRfH8A1$|LWCvih* zvgM*4a%|)V3l%}O%u(O=?2kryL}&BMnG8YgxyghKt%2W=0F)OlzN{VuUi7Az+O$~B z_=bk0>x~~SXutf^i(cJ#M&Mh*D(NK5u?)L8Atml0{jkwT2T0haWe&q;1l;w6E)S-? zU7!gwoCQIHl>J_cU*2y^n6;2Qvh~ilp4i|g`?k8yYKRXf!la~fO5hiR)sOy6zv65@ zQtI*Xh!PJt9I$#rx&m&M7K#RKEu-)ABJ-bpqdk}q>88+D;oo`e3GMf0}!bK6ma+A{clpG zMKyUkZY|C%zOJ>B5Bn1%Q*UaSY15GQE2bU{<`%wr{4SR0EZ6|Eaklyqc+X*nZ-yR2 zQ}C_^`NrkuVD)u7FnDajU6Qe(YRubDGF1Gp<+NLvE0KwOh*+2>lXer$ zY;vg3@rN#D$&qYDB(tETjfm`CT^#1SJbgN!KNdD&4kHNy^u$4MfMN21^TzkP#y{Ts*auDp@h%`N(Wwv7kh9H{Sv*MK%F z`vvrTTh|smJ$W!r_S-MH5$V*e)Gy;^XGvFJM_GhU5Udcx|9+7(HA&LjL*nXV=b&(SGJWNH|l=*)6@S53^9}gWqaq7tPz1?>2 zUare%qj9Qwqy4?MbS3Q3C0~%_A(j#g*#|@y&$eXMlY(9n;Oi;g;x!?kbkolrJ6)O_ z^lKZ(>WYdESk z)SyF*Y5pKz5K&h1k1g!qUeUTS?Bx{@L;Uqq>eI{x_IdlcCNdYRjL4Z23ml-kVo3ra z)kAaWQG5PC4Ngg$s4C|u-til7giV#OgV4|$M~UAB)URgj8L|rcWOE19c{2F)jbdE9 z%zj32Uu+XC_y0kj$>~X}7uo>^l0~^$CGez@mWT(SvJ(XhTh3}hODJN))vcR<5OO1u zG=ePir5tFVadq^|q0I;Fsg?;yRc8|wV|1U9KFU)|a)&0>C83+GIj@92g)}1;#2`wk z=@p5>kO#AGPusE1~^qWh=IUZx2i2kWfLe74@?BBCxco{WbVJ4v9&>mUs@No)vVeL3`eUUZuZ%;P^2X@R@Ts`K@RW%Ik_HgOazeP;Gl{cj(V! zH66WT<|^R~*S;$Es45rmKJg=1LhuUc`bu)Wb1PC=;W3C=a+j9& zYgHY5VsL;t!>F){oS@KnI{8JGhXm9f?1UN)0otlIXLs|}ZQJ3r6C!f|=gP7e+~*92 zfReqb_M^OM-vQ4go7dor$}1)Jy}h~w`1^Eh$8n8(@GQdu)J6KI^cnm)=l`v!68=LU zCz!-HsC0z4h)F9WhnPxuCVN^K2(Pc_39X>jPUM)~j;68ySU`*Zx~O=bk;RnSbIiGb z0ht5sj9!4dj_f_ortXifI3>WZI#uP*y(rcLxH()I>hOb%y|SEdUQ8fQ_q$*oUi*Le zRp)W4%MyV_ngijqVF^+zXjbSuw>dw9!Z1s3$rVq9QcI@szmu=4&TL0v(vZJL@*XI& zoId=;UvZ*k^e?vNgDyUB-T{=Ef2aNcD-$xM1ZoqRiJ*r~>VD0`i^4}(R<5^cO8h!k z0}GGZ=4KTF`t3(DkwgDAZg4f{1auDy5Cf8kv1{2{4<8>5WgJI1JzMgmwwk|>Bb+b- zsJ(plg%h}h*ZdVN8=y_^O!#7@On&}%TcB7jUp(1lU{kIm9j_GVim%6}TEUA1>JwmF#V=^6_>W*YzS?}A5rPfG5C(_=95QV_NS7VL`fKzUeb)rt*a4MHn9?!dd*V)xyV%fOl+O?3EWE|OrmCQhZ4uEbw|~NF{>tQ3zDzZfDQAVhiiG|S)FQTC{p{A zhh;yn*pkY09UMZ6*Tlq?Whj86MxQjEmE*Iu`s6>{*FmUyun)Ruc;A^iJuEL7)aH4n z`Hy|kg}=kHGtJ$&5==lcx5g8R=VcMYiAMABIP$Dz0ek}3HxhjYDOM(M*J=NrXS-A1 znEE3wTnaMU{f4O1e0tttc74YkLC)t%yVvV_TJHVF&R0C05g?T3LNXLkNow(wx+9Lz zhx3kKz^?dCp!q!)UyOa+_vu5-jg*=7iODe}K&*D9X;kRK+D=mV-g$fQint6kH;)Gu8CJiX69h{=X_j`-XV zrQk|!5P@UdMK9bit?R1+W(^RpuKQeX(T*dBH%7g6irQ{CE{)&H{IU=i@#H$^AjFQg zpoD+n|981asQE>Jt%4ZU;p#de_Q*aq=c;!+1wTuuNKAN2vf=|Zc=j|zEg*VtTFKlg z@@!D+ZQ$9Rj;Jp|(dAbp?F5t@6Z59x62AOpaoc6kdWWYUa1HpjSU;8dS8doE*isl- z4Z!Lg;4S6r%KLG*ll6a{PJqj82GN;tfPLUw>N#WS4<~NSU9UJBMnWce$NwXnRUP%? zN?rg*fD)KeBOtk}P`Y99gpunbr|x8U);C|s<~5cMA0-u>wA@O| zy5UUtN2S15xp4#U^F29RzbY%;{OiP08`(dp4!CswXeS<6c$?V$nrLj@hBwtyvH-!H zO;>V({!7TD0G9dxwRP?BOz;2yn=Vc_x=D`A_nb~Sha|}~jLymBbPD-0LeWX?A?CW- z)ajH$cFNZ!o2~ffE{D0C5$Z%zY?jNI%PKK!EX}rN#&7X?ufC7p*Poxg_PW2Gujl9e z`0QhdBlMfmu<{ki$eri}1$EoKSh3QW%LHTbPV>@>Vj{QDiZgb2y<)qsG^MNJR5tjb ztHIxe^PV-&6>mGc?s=6%xFnKpqzq3L8^sClFb2P9^yMO(Pb+e_u-7_-Lw)H&sd!+2 zQ~~)o#ulGUtDo_i*Z9?w*gVlLqT-a?vy4cvfmM%U6K*=Jopp@Lq1*sN!kBRPk) z9z1u*j7Te6n)voLDOd4u1VG6!;(V`yQ8rppju4qV3AY^rw3S#5z8hoixwx*Fkbd%e zMJfEZSh;o;Ne`h8MW%MLe<-{e2Q^)ikmjxL zOeuB3Zb{+^xpV09SJIb~oR1${H9g%yiF{LDiEU0PnljtEcMPrT^NCqLeY0d_UBSA# z*_2~SW{~H;p7rB@h~%6&sy4n!@p!F4kZ98~B&TP+us|x#5*}xT9QY`IT=<5bk&!-aIvo3mgtlfpYrjug)}|1S z6L3=wuohlRR#Q@6vi{}xdcSPg>`dW0V9**P|6y3gDnwz=<&tCAPshJrie}Hzx@^7z zwi4e9bv=p@qY4ebhg@Ud@w$g%j}M^OMurJ0pJd)EUhS!hxZU#ej-$t3TnjD!POU4) z|B1?$;Po?!v~P4`+>AA||A<>VjLKlNe;fNvQsO1Lbc|6d?GV1vuspOXv~Asmc64gn zt0XiX!6gZG7s&$wx@7f=*Y25Ug0Y>pVO||`j~$P-L;KU`Xu9r#VN!Tgyw#)M0(P(o z?(fdsD@#yY`Dm%lT`)l8J~fEDfb~o<_T*HmNQtN|pT33QTI2lJEZ{KG_X%-p31|Lj z48-X)wNG5OdL}YE@<~D}5J~D-_DjM?sy%TE?e-12#U8^!xSEn0+T~^6mSUC?Fqp+S zc{*MuL2OqclbgamIT|rLWv3Q2Wi%(MPa>6!Fbh`vNKpEI8_c4QRw0C&Y&TRLoHGu)3YktU z3i2i&dM1@E5j5<^r0rb@-6##`G)Y&irg4pdzQ;-Ce+nQBz1(uibP9Y z?-v#4s%r`CcVY)0&T`oh+(&=5ZHjBkTaC#)ia!4G{Eg+n&~zTA%m~d4K9z#=J(F$$ zWD6vxpS(pM3uv>nSDUajT23pP9)Fg=KOWLBU+SohDl^0|IQty(OH`mE`0pY!{N+Ay4fiJagz=f4= z0Ve-$GVe*#x&n9WK&NJ8)e2ri8t%aEUas|BUG)y}%y;rBz zP@Z!hd}52t@4`PC?{o0VV1V3TLslWnTX&TUPG4E*)CV=)h<*1P-TqFV}X?PQVZj@y85Q+WPUGXsm)3D&T%ame4i|2Z3I z`bI4O{bhI>GhhRoc1h#M2z5=M|)PPg2pqD z?T?%v>Y}kB3$R@HHID43~=6?Lb)9Hs$m#BcWnc%i1=n-Dvyz zd-b8RVt6i;wT;-5)4n#0U*WXx#tY8?4Sq-?9}g8oHd?&`VP}VgWaFI)*|!3X?IY}} zE4@Q^`?lQpoV7f{^HEb~+Yp_`px)g50;DJd$-z3l=CdY=1BD1{ z`v#c2hKnxJF;NLE%jWJfFbd^-Zm<8hPZVQ3pD{M%o659HnFyV`!JId3y$<=9g0Z_V zkLxTa*6LG0?l)rp$Lb(yil46?Na~p?$ca{`)-DYA1b%K&3Rrig(I} z?#^fIs7Alh|Lra80MN1)X(#YlD9eW%vWA}RkNp}dt-H8CG>FsV2`>=s(X4|%Oa_B! zfT`x&q+GPq=8o+$u=?lQN5FKne1lzxtjHmZGbj37-;T>%1M_knsD@4pVhdDu38 zNV9xhO~2VQQ(8KdyATWk(YlW%))DST+!H zJg7PCWeJM)0BO-JzBb5r4Us)8bqA9fGJk9qU2Pde&8po9W$95-TczJLH?^rCA`O;L zz;6P$&0|)*^Vo9{_6z0tFCgHJvXZd*U2>~zH2xyav@~KxHr|7vwR;P}^c3tb50_AL z$S+NCHoNTx`RT>T;~h>kpy4IPyQykTdM_IIlam#-lArRS`5f}Lz#dvC_4 zna2l7)LG=tweK`z9f&;=KH;hAVVRJrF?n*N^PF$-n8Ct>XG%LO%ru)g91 z7!c+&r-KYi+(u4cS)5kFw<>n-`i$*Mw6q|9b=8zy7XlWmSH8H49@0#;WqEK&IKZVR zFkNf+i*Ds{jHs|X{{W#$xZX{OfmIXcKsN={YNjZUJ7(kPj(=(N?(7Z zwpxRGLk}9EgzDG;_Sui6&r6~y7%TwUgo33V){H44JeG&K4ShWcW9d$hcO1L>%tI?9 z!wJ;?G@*AdL+e+e%e;;IPr$2wr|5K)RHuVx{m@d$M6CLJRaINUH}`0U(OrpvDm;OC zQT3~m2IeyRDVXh0jR+!dJarA`AXmp*=xPsuTY8Gne=BfnP06=zxvY{xef>L-YpPfq zw>eSHb9Nho6Axz|LxAj7NwDHI^n`HE1=LPZe(~9%q0DNzrrun9?oVK{RArN=u=;r= zjveTp_7e?&sY!n(I38`NTy^$?rO0a@qHogEiBytFNpNrSbCi~|0p|xe2)oB(!*pT= zE{f_nCIrIg2w_fM&NUyNZI}yp{}D*`=bJ?td6Hk6>FE4{5-fp+y$$1GAB;y8js>B- zu&W`D)x{q_(#}(ZTd@V`^fjPv_12OPo$YELA6Z+gLupXtN1I3DHaXa#MCXWQ^9G>1 z`4LZ7yCE|y)=pDrclAkC13zPpiiGa*V;Z zpuHz5>$e-$Yn5BQbk2-cCn3 z&(=ODKVq*$?EFTH?wP-RY~9LbJ%dUHVj?g681KfZrg^baz}I_5X_0iaKI1U`MNwHo zGL*VDOxb~W`d38CE4_Lpp?-odQNW+z99xxhqUoyGC@edyQPFya-EPd`HyWraJ&Jm$)!@!87!44vj$Nr7D&c##izl#q#j_XX|Un++AtbAva3(p4pOIBrlhjuWK7(wrW?8>6|XUllxDIO1&z+?up*Wl-k}I*04M!NA;yF*49(m-vDRH#@&E7tld{ zzR~SyyZ(iLTQM`Bvjvj6*k$+v3a^TuAs9GO0WBkl`k$?z1R^Kp!WYk<8Zmh8}R$`J~*d4SvK zuM*H$7hckk12sdq7&S!7tQKin%pqcxEL&4Ha|&yo*AUx*o}-_zP{MTvEd6rK`PBvQ z;^*t;KtG4bJ0l-X%eH{!i!fdYckVn5%l_i|WANS9_Y>gJWVnr`>AKfIeS1Pap^k-~(EMyY%DATgE{- zA}c)Rzx&)sbfl7ABN%jzl5DCio@oYYOF=ru9Tj(L3=C6lHGJ9UJ`+ro%L2a?j=(Nr zGAzK&i5ZquTU|mYJ*&p}qy3wqBN8r&GjY57=FaZaKFZjHj_#IO4`2YRKYGud*>P(^(Zt*OQz%;3&9k6HT6Bqc{rZ@nR6^SF8zQflZfzu zkS2(>a;tH@2Un!U>sOMQ^qj4Q+Q~>v(k~uf``?9+a)l(hI9xpg*fHBs@c3hvdEyYRw^H!J0l*}3b zz#d=G!=I^Umj~0}fVH&3bUE%9t4tJNovsAyIvM%^htI#vkO_qc!N5#KoX-v6)D=(6 zXkBy}PV9uBL=#JTf0=QDcqQd%X6`B6ZdjmIZvB#J@g?Pu%xkA+a!WdQb9xq`3UW2EQEn1#ol`C< z<%Wn>0Dv4`WpkNB!;gmaFDZvbV;+?n5T2XO&0dvDSqqwg<%@as;olY(s)iD$FUt44 zcxEC_LG8s`l~~hjl^YcBrXs&yXeknxfW?rz=(1pmFbk#kB>>`VB#KMp3rP==x2Oj7 z^<*%GA;`#I$jHCP3+HYY{|+t5h4Y+VhiF#pLGFBZ4H2;M1y90rdK%~Jzym}4V)*9< z++RLZq~&kobsm@F0J^W7p`aWGuOb)<%Au#E97duW5BX^m{IusI75qXAb3l8H#5bn0 zd8W>}I>QR$Y^)^C3;q;I&U%t)q^Wo&7^G&vJp%3ds?Uet#T~i^lIz27I5s_bV`fA_ z-7Jizm4wV4b5ezUb943`?&N36Q6VyWNy}@Xn&ed(Q`}@9I&D;4B{FM}w_gYg-o*pMK{Z~zk)dGmrXa1F zD*7)3JPI(^JHB#Gj;4o8xM!LM{`>Z?xo5P+oYfTW!W=f~FQ{Jv=w&26@@Cb5llz6( zMEJ~?w}1Hi_U;a|)C%@HzLbZW0W6Tik6^zlF-L#hJ$1M9uH4NA#FB6CsONdo#4|B7 zKQ`fLO8}sP0+HD+Lk5b7XeyS2qYfkSMA?j)`MrWQo?em3fHya@d=ov0F;_iFs-wBe sqq|s)Z9o8H#N3!+`~`2uS1hUWWl@#nT8;64DE^MyI@&z^>D+Js2N42cP5=M^ From 33da69fcb0abb39ff987c148b6b74a02183c60d6 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 29 Nov 2020 15:52:05 -0600 Subject: [PATCH 0109/1270] BUG: Fix incorrect brackets leading to error leak in mapping.c --- numpy/core/src/multiarray/mapping.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index cb5c3823dccf..d64962f87150 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -3194,9 +3194,10 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, } PyObject *shape2 = convert_shape_to_string(mit->nd, mit->dimensions, ""); - if (shape2 == NULL) + if (shape2 == NULL) { Py_DECREF(shape1); goto finish; + } PyErr_Format(PyExc_ValueError, "shape mismatch: value array of shape %S could not be broadcast " From 4234327caccd76a0fe62a89162cbf0b122e2f83a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 29 Nov 2020 15:54:18 -0600 Subject: [PATCH 0110/1270] TST: Do not leak reference/object in error-test path The other path must leak, since dtype registration is permanent (at least I think so). --- numpy/core/src/multiarray/_multiarray_tests.c.src | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index 3811e87a8e34..3c8caefcef1f 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -677,11 +677,12 @@ create_custom_field_dtype(PyObject *NPY_UNUSED(mod), PyObject *args) "invalid error argument to test function."); } if (PyArray_RegisterDataType(dtype) < 0) { - /* Fix original type in the error_path == 2 case. */ + /* Fix original type in the error_path == 2 case and delete it */ Py_SET_TYPE(dtype, original_type); + Py_DECREF(dtype); return NULL; } - Py_INCREF(dtype); + Py_INCREF(dtype); /* hold on to the original (leaks a reference) */ return (PyObject *)dtype; } From a8464940ca4f591f4aca2416e2e6173d68251131 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 29 Nov 2020 15:55:07 -0600 Subject: [PATCH 0111/1270] BUG: Fix readonly related scalar buffer exports The readonly flag was not being set and untested. Also the request for a readonly buffer incorrectly did not set an error. --- numpy/core/src/multiarray/scalartypes.c.src | 14 +++++++- numpy/core/tests/test_multiarray.py | 4 +-- numpy/core/tests/test_scalarbuffer.py | 39 ++++++++++++++++++--- 3 files changed, 49 insertions(+), 8 deletions(-) diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index d018fccbbd22..e480628e710c 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -2395,6 +2395,10 @@ gentype_arrtype_getbuffer(PyObject *self, Py_buffer *view, int flags) self); return -1; } + if ((flags & PyBUF_WRITEABLE) == PyBUF_WRITEABLE) { + PyErr_SetString(PyExc_BufferError, "scalar buffer is readonly"); + return -1; + } PyArray_Descr *descr = PyArray_DescrFromScalar(self); if (descr == NULL) { return -1; @@ -2413,6 +2417,7 @@ gentype_arrtype_getbuffer(PyObject *self, Py_buffer *view, int flags) view->shape = NULL; view->strides = NULL; view->suboffsets = NULL; + view->readonly = 1; /* assume general (user) scalars are readonly. */ Py_INCREF(self); view->obj = self; view->buf = scalar_value(self, descr); @@ -2444,6 +2449,7 @@ static int @name@_getbuffer(PyObject *self, Py_buffer *view, int flags) { if ((flags & PyBUF_WRITEABLE) == PyBUF_WRITEABLE) { + PyErr_SetString(PyExc_BufferError, "scalar buffer is readonly"); return -1; } Py@Name@ScalarObject *scalar = (Py@Name@ScalarObject *)self; @@ -2456,6 +2462,7 @@ static int view->shape = NULL; view->strides = NULL; view->suboffsets = NULL; + view->readonly = 1; Py_INCREF(self); view->obj = self; view->buf = &(scalar->obval); @@ -2482,6 +2489,7 @@ static int unicode_getbuffer(PyObject *self, Py_buffer *view, int flags) { if ((flags & PyBUF_WRITEABLE) == PyBUF_WRITEABLE) { + PyErr_SetString(PyExc_BufferError, "scalar buffer is readonly"); return -1; } PyUnicodeScalarObject *scalar = (PyUnicodeScalarObject *)self; @@ -2493,6 +2501,7 @@ unicode_getbuffer(PyObject *self, Py_buffer *view, int flags) view->shape = NULL; view->strides = NULL; view->suboffsets = NULL; + view->readonly = 1; Py_INCREF(self); view->obj = self; @@ -2522,7 +2531,7 @@ unicode_getbuffer(PyObject *self, Py_buffer *view, int flags) view->format = scalar->buffer_fmt; } else { - scalar->buffer_fmt = PyObject_Malloc(22); + scalar->buffer_fmt = PyMem_Malloc(22); if (scalar->buffer_fmt == NULL) { Py_SETREF(view->obj, NULL); return -1; @@ -2549,6 +2558,7 @@ static int @name@_getbuffer(PyObject *self, Py_buffer *view, int flags) { if ((flags & PyBUF_WRITEABLE) == PyBUF_WRITEABLE) { + PyErr_SetString(PyExc_BufferError, "scalar buffer is readonly"); return -1; } Py@Name@ScalarObject *scalar = (Py@Name@ScalarObject *)self; @@ -2560,6 +2570,7 @@ static int view->shape = &length; view->strides = NULL; view->suboffsets = NULL; + view->readonly = 1; Py_INCREF(self); view->obj = self; @@ -2651,6 +2662,7 @@ unicode_arrtype_dealloc(PyObject *v) { /* note: may be null if it was never requested */ PyMem_Free(PyArrayScalar_VAL(v, Unicode)); + PyMem_Free(((PyUnicodeScalarObject *)v)->buffer_fmt); /* delegate to the base class */ PyUnicode_Type.tp_dealloc(v); } diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 12306cbb883d..bd8c51ab78fd 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -7350,9 +7350,9 @@ def test_export_flags(self): def test_export_and_pickle_user_dtype(self, obj, error): # User dtypes should export successfully when FORMAT was not requested. with pytest.raises(error): - _multiarray_tests.get_buffer_info(obj, ("STRIDED", "FORMAT")) + _multiarray_tests.get_buffer_info(obj, ("STRIDED_RO", "FORMAT")) - _multiarray_tests.get_buffer_info(obj, ("STRIDED",)) + _multiarray_tests.get_buffer_info(obj, ("STRIDED_RO",)) # This is currently also necessary to implement pickling: pickle_obj = pickle.dumps(obj) diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py index 574c56864bb7..851cd3081aee 100644 --- a/numpy/core/tests/test_scalarbuffer.py +++ b/numpy/core/tests/test_scalarbuffer.py @@ -3,6 +3,7 @@ """ import numpy as np from numpy.core._rational_tests import rational +from numpy.core._multiarray_tests import get_buffer_info import pytest from numpy.testing import assert_, assert_equal, assert_raises @@ -52,10 +53,20 @@ def test_scalar_dim(self, scalar): assert_equal(mv_x.suboffsets, ()) @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) - def test_scalar_known_code(self, scalar, code): + def test_scalar_code_and_properties(self, scalar, code): x = scalar() + expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0, + shape=(), format=code, readonly=True) + mv_x = memoryview(x) - assert_equal(mv_x.format, code) + print(mv_x.readonly, self._as_dict(mv_x)) + assert self._as_dict(mv_x) == expected + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_buffers_readonly(self, scalar): + x = scalar() + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) def test_void_scalar_structured_data(self): dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))]) @@ -77,9 +88,14 @@ def test_void_scalar_structured_data(self): assert_equal(mv_x.itemsize, mv_a.itemsize) assert_equal(mv_x.format, mv_a.format) + # Check that we do not allow writeable buffer export (technically + # we could allow it sometimes here...) + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + def _as_dict(self, m): return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, - ndim=m.ndim, format=m.format) + ndim=m.ndim, format=m.format, readonly=m.readonly) def test_datetime_memoryview(self): # gh-11656 @@ -88,7 +104,7 @@ def test_datetime_memoryview(self): dt1 = np.datetime64('2016-01-01') dt2 = np.datetime64('2017-01-01') expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,), - format='B') + format='B', readonly=True) v = memoryview(dt1) assert self._as_dict(v) == expected @@ -100,6 +116,10 @@ def test_datetime_memoryview(self): # Fails to create a PEP 3118 valid buffer assert_raises((ValueError, BufferError), memoryview, a[0]) + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(dt1, ["WRITABLE"]) + @pytest.mark.parametrize('s', [ pytest.param("\x32\x32", id="ascii"), pytest.param("\uFE0F\uFE0F", id="basic multilingual"), @@ -109,7 +129,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w') + expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w', + readonly=True) v = memoryview(s) assert self._as_dict(v) == expected @@ -119,7 +140,15 @@ def test_str_ucs4(self, s): assert_equal(code_points, [ord(c) for c in s]) + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(s, ["WRITABLE"]) + def test_user_scalar_fails_buffer(self): r = rational(1) with assert_raises(TypeError): memoryview(r) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(r, ["WRITABLE"]) \ No newline at end of file From 2fd82cb2644d35195e11b3b62be56c6d09a77166 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 28 Nov 2020 13:25:34 -0700 Subject: [PATCH 0112/1270] DOC: Add NumPy 1.20.0-changelog.rst --- doc/changelog/1.20.0-changelog.rst | 845 +++++++++++++++++++++++++++++ 1 file changed, 845 insertions(+) create mode 100644 doc/changelog/1.20.0-changelog.rst diff --git a/doc/changelog/1.20.0-changelog.rst b/doc/changelog/1.20.0-changelog.rst new file mode 100644 index 000000000000..5db423c41fed --- /dev/null +++ b/doc/changelog/1.20.0-changelog.rst @@ -0,0 +1,845 @@ + +Contributors +============ + +A total of 182 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Meurer + +* Abhilash Barigidad + +* Abhinav Reddy + +* Abhishek Singh + +* Al-Baraa El-Hag + +* Albert Villanova del Moral + +* Alex Leontiev + +* Alex Rockhill + +* Alex Rogozhnikov +* Alexander Belopolsky +* Alexander Kuhn-Regnier + +* Allen Downey + +* Andras Deak +* Andrea Olivo andryandrew@gmail.com andryandrew + +* Andrew Eckart + +* Anirudh Subramanian +* Anthony Byuraev + +* Antonio Larrosa + +* Ashutosh Singh + +* Bangcheng Yang + +* Bas van Beek + +* Ben Derrett + +* Ben Elliston + +* Ben Nathanson + +* Bharat Medasani + +* Bharat Raghunathan +* Bijesh Mohan + +* Bradley Dice + +* Brandon David + +* Brandt Bucher +* Brian Soto + +* Brigitta Sipocz +* Cameron Blocker + +* Carl Leake + +* Charles Harris +* Chris Brown + +* Chris Vavaliaris + +* Chunlin Fang +* CloseChoice + +* Daniel G. A. Smith + +* Daniel Hrisca +* Daniel Vanzo + +* David Pitchford + +* Davide Dal Bosco + +* Dima Kogan + +* Dmitry Kutlenkov + +* Douglas Fenstermacher + +* Dustin Spicuzza + +* E. Madison Bray + +* Elia Franzella + +* Enrique Matías Sánchez (Quique) + +* Erfan Nariman | Veneficus + +* Eric Larson +* Eric Moore +* Eric Wieser +* Erik M. Bray +* EthanCJ-git + +* Etienne Guesnet + +* Felix Divo +* Frankie Robertson + +* Ganesh Kathiresan +* Gengxin Xie +* Gerry Manoim + +* Guilherme Leobas +* Hassan Kibirige +* Hugo Mendes + +* Hugo van Kemenade +* Ian Thomas + +* InessaPawson + +* Isabela Presedo-Floyd + +* Isuru Fernando +* Jakob Jacobson + +* Jakob Jakobson + +* Jakub Wilk +* James Myatt + +* Jesse Li + +* John Hagen + +* John Zwinck +* Joseph Fox-Rabinovitz +* Josh Wilson +* Jovial Joe Jayarson + +* Julia Signell + +* Jun Kudo + +* Karan Dhir + +* Kaspar Thommen + +* Kerem Hallaç +* Kevin Moore + +* Kevin Sheppard +* Klaus Zimmermann + +* LSchroefl + +* Laurie + +* Laurie Stephey + +* Levi Stovall + +* Lisa Schwetlick + +* Lukas Geiger + +* Madhulika Jain Chambers + +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Michael Hirsch +* Nick R. Papior +* Nikola Forró +* Noman Arshad + +* Paul YS Lee + +* Pauli Virtanen +* Paweł Redzyński + +* Peter Andreas Entschev +* Peter Bell +* Philippe Ombredanne + +* Phoenix Meadowlark + +* Piotr Gaiński +* Raghav Khanna + +* Raghuveer Devulapalli +* Rajas Rade + +* Rakesh Vasudevan +* Ralf Gommers +* Raphael Kruse + +* Rashmi K A + +* Robert Kern +* Rohit Sanjay + +* Roman Yurchak +* Ross Barnowski +* Royston E Tauro + +* Ryan C Cooper + +* Ryan Soklaski +* Safouane Chergui + +* Sahil Siddiq + +* Sarthak Vineet Kumar + +* Sayed Adel +* Sebastian Berg +* Sergei Vorfolomeev + +* Seth Troisi +* Sidhant Bansal + +* Simon Gasse +* Simon Graham + +* Stefan Appelhoff + +* Stefan Behnel + +* Stefan van der Walt +* Steve Dower +* Steve Joachim + +* Steven Pitman + +* Stuart Archibald +* Sturla Molden +* Susan Chang + +* Takanori H + +* Tapajyoti Bose + +* Thomas A Caswell +* Tina Oberoi +* Tirth Patel +* Tobias Pitters + +* Tyler Reddy +* Veniamin Petrenko + +* Wansoo Kim + +* Warren Weckesser +* Wei Yang + +* Wojciech Rzadkowski +* Yang Hau + +* Yogesh Raisinghani + +* Yu Feng +* Yuya Unno + +* Zac Hatfield-Dodds +* Zuhair Ali-Khan + +* @abhilash42 + +* @bernie gray + +* @danbeibei + +* @dojafrat +* @dpitch40 + +* @forfun + +* @iamsoto + +* @jbrockmendel + +* @leeyspaul + +* @mitch + +* @prateek arora + +* @qiyu8 + +* @serge-sans-paille + +* @skywalker + +* @stphnlyd + +* @xoviat +* @谭九鼎 + +* @JMFT + +* @Jack + +* @Neal C + + +Pull requests merged +==================== + +A total of 650 pull requests were merged for this release. + +* `#13516 `__: ENH: enable multi-platform SIMD compiler optimizations +* `#14779 `__: NEP 36 (fair play) +* `#14882 `__: DEP: Deprecate aliases of builtin types in python 3.7+ +* `#15037 `__: BUG: ``np.resize`` negative shape and subclasses edge case fixes +* `#15121 `__: ENH: random: Add the method ``permuted`` to Generator. +* `#15162 `__: BUG,MAINT: Fix issues with non-reduce broadcasting axes +* `#15471 `__: BUG: Ensure PyArray_FromScalar always returns the requested dtype +* `#15507 `__: NEP 42: Technical decisions for new DTypes +* `#15508 `__: API: Create Preliminary DTypeMeta class and np.dtype subclasses +* `#15604 `__: MAINT: Avoid exception in NpzFile destructor if constructor raises... +* `#15666 `__: ENH: Improved ``__str__`` for polynomials +* `#15759 `__: BUILD: Remove Accelerate support +* `#15791 `__: [DOC] Added tutorial about the numpy.ma module. +* `#15852 `__: ENH: Add where argument to np.mean +* `#15886 `__: DEP: Deprecate passing shape=None to mean shape=() +* `#15900 `__: DEP: Ensure indexing errors will be raised even on empty results +* `#15997 `__: ENH: improve printing of arrays with multi-line reprs +* `#16130 `__: DOC: Correct documentation of ``__array__`` when used as output... +* `#16134 `__: ENH: Implement concatenate dtype and casting keyword arguments +* `#16156 `__: DEP: Deprecate ``numpy.dual``. +* `#16161 `__: BUG: Potential fix for divmod(1.0, 0.0) to raise divbyzero and... +* `#16167 `__: DOC: Increase guidance and detail of np.polynomial docstring +* `#16174 `__: DOC: Add transition note to all lib/poly functions +* `#16200 `__: ENH: Rewrite of array-coercion to support new dtypes +* `#16205 `__: ENH: Add ``full_output`` argument to ``f2py.compile``. +* `#16232 `__: DEP: Deprecate ufunc.outer with matrix inputs +* `#16238 `__: MAINT: Unify cached (C-level static) imports +* `#16239 `__: BUG,DOC: Allow attach docs twice but error if wrong +* `#16242 `__: BUG: Fix default fallback in genfromtxt +* `#16247 `__: ENH:Umath Replace raw SIMD of unary float point(32-64) with NPYV... +* `#16248 `__: MRG, ENH: added edge keyword argument to digitize +* `#16257 `__: DOC: Update the f2py section of the "Using Python as Glue" page. +* `#16260 `__: DOC: Improve ``rec.array`` function documentation (#15853) +* `#16266 `__: ENH: include dt64/td64 isinstance checks in ``__init__.pxd`` +* `#16267 `__: DOC: Clarifications for np.std +* `#16273 `__: BUG: Order percentile monotonically +* `#16274 `__: MAINT: cleanups to quantile +* `#16275 `__: REL: Update master after 1.19.x branch. +* `#16276 `__: BUG: Ensure out argument is returned by identity for 0d arrays +* `#16278 `__: DOC: Clarifications for ``np.var``. +* `#16283 `__: DOC: Add a note about performance of isclose compared to math.isclose +* `#16284 `__: MAINT: Clean up the implementation of quantile +* `#16285 `__: MAINT: Bump hypothesis from 5.12.0 to 5.14.0 +* `#16291 `__: DOC: Improve "tobytes" docstring. +* `#16292 `__: BUG: Fix tools/download-wheels.py. +* `#16295 `__: BUG: Require Python >= 3.6 in setup.py +* `#16296 `__: DOC: Fix malformed docstrings in ma. +* `#16297 `__: ENH: Optimize Cpu feature detect in X86, fix for GCC on macOS +* `#16298 `__: BUG: np.info does not show keyword-only arguments +* `#16300 `__: DOC: Fix bad reference in ``numpy.ma`` +* `#16304 `__: TST, MAINT: Fix detecting and testing armhf features +* `#16305 `__: DOC: Fix packbits documentation rendering, +* `#16306 `__: DOC: Fix troubleshooting code snippet when env vars are empty +* `#16308 `__: BUG: relpath fails for different drives on windows +* `#16311 `__: DOC: Fix ``np.ma.core.doc_note`` +* `#16316 `__: MAINT: Bump numpydoc version +* `#16318 `__: MAINT: Stop Using PyEval_Call* and simplify some uses +* `#16321 `__: ENH: Improve the ARM cpu feature detection by parsing /proc/cpuinfo +* `#16323 `__: DOC: Reconstruct Testing Guideline. +* `#16329 `__: MAINT: Cleanup 'tools/download-wheels.py' +* `#16332 `__: DOC: link np.interp to SciPy's interpolation functions (closes... +* `#16333 `__: DOC: Fix spelling typo - homogenous to homogeneous. (#16324) +* `#16334 `__: ENH: Use AVX-512 for np.isnan, np.infinite, np.isinf and np.signbit +* `#16336 `__: BUG: Fix refcounting in add_newdoc +* `#16337 `__: CI: Create a link for the circleCI artifact +* `#16348 `__: BUG: Fix dtype leak in ``PyArray_FromAny`` error path +* `#16349 `__: BUG: Indentation for docstrings +* `#16351 `__: BUG: Fix small leaks in error path and ``empty_like`` with shape +* `#16362 `__: MAINT: Streamline download-wheels. +* `#16365 `__: DOC: Fix an obvious mistake in a message printed in doc/Makefile. +* `#16367 `__: MAINT: Bump cython from 0.29.17 to 0.29.19 +* `#16368 `__: MAINT: Bump hypothesis from 5.14.0 to 5.15.1 +* `#16369 `__: MAINT: Bump pytest-cov from 2.8.1 to 2.9.0 +* `#16371 `__: ENH: Use AVX-512 for np.frexp and np.ldexp +* `#16373 `__: MAINT, DOC: add index for user docs. +* `#16375 `__: ENH: ARM Neon implementation with intrinsic for np.argmax. +* `#16385 `__: DOC: Tighten howto-docs guide #16259 +* `#16387 `__: MAINT: Make ctypes optional on Windows +* `#16389 `__: ENH: Hardcode buffer handling for simple scalars +* `#16392 `__: MAINT: Stop uploading wheels to Rackspace. +* `#16393 `__: MAINT: Use a raw string for the fromstring docstring. +* `#16395 `__: ENH: Validate and disable CPU features in runtime +* `#16397 `__: ENH: Implement the NumPy C SIMD vectorization interface +* `#16404 `__: DOC,BLD: Update make dist html target. +* `#16408 `__: DOC,BLD: Update sphinx conf to use xelatex. +* `#16409 `__: TST, CI: turn on codecov patch diffs +* `#16411 `__: BUG: endpoints of array returned by geomspace() should match... +* `#16417 `__: MAINT: support python 3.10 +* `#16418 `__: MAINT: Chain some exceptions. +* `#16420 `__: DOC: Improve intersect1d docstring +* `#16422 `__: DOC: Update assert_warns parameter list +* `#16423 `__: TST: Simplify assert_warns in test_io.py +* `#16427 `__: DOC: make NEP 18 status Final +* `#16428 `__: DOC: Add style guide to howto_document +* `#16430 `__: DOC: NEP for C style guide +* `#16433 `__: DOC: Fix description of dtype default in linspace +* `#16435 `__: BUG: Add extern to PyArrayDTypeMeta_Type declaration +* `#16436 `__: DOC: Add a reference into NEP 29, +* `#16438 `__: MAINT: Catch remaining cases of Py_SIZE and Py_TYPE as lvalues +* `#16442 `__: ENH: Fix deprecated warn for Intel/Apple/Clang Compiler +* `#16444 `__: DOC: make clearer that sinc is normalized by a factor pi +* `#16445 `__: DOC: update roadmap +* `#16446 `__: BUG: fixes einsum output order with optimization (#14615) +* `#16447 `__: DOC: add a "make show" command to doc/Makefile +* `#16450 `__: DOC: Add a NEP link to all neps. +* `#16452 `__: DOC,ENH: extend error message when Accelerate is detected +* `#16463 `__: DOC: Improve assert_warns docstring with example +* `#16464 `__: MAINT: Bump hypothesis from 5.15.1 to 5.16.0 +* `#16465 `__: DOC: Fix development_workflow links +* `#16468 `__: BUG: fix GCC 10 major version comparison +* `#16471 `__: BLD: install mingw32 v7.3.0 for win32 +* `#16472 `__: DOC: Fixes for 18 broken links +* `#16474 `__: MAINT: use zip instead of range in piecewise +* `#16476 `__: ENH: add ``norm=forward,backward`` to numpy.fft functions +* `#16482 `__: SIMD: Optimize the performace of np.packbits in ARM-based machine. +* `#16485 `__: BUG: Fix result when a gufunc output broadcasts the inputs. +* `#16500 `__: DOC: Point Contributing page to new NEP 45 +* `#16501 `__: MAINT: make Py_SET_SIZE and Py_SET_TYPE macros a bit safer +* `#16503 `__: BUG:random: Error when ``size`` is smaller than broadcast input... +* `#16504 `__: DOC: Correct MV Normal sig +* `#16505 `__: BUG: raise IEEE exception on AIX +* `#16506 `__: DOC: only single-polynomial fitting in np.polynomial.Polynomial.fit() +* `#16510 `__: DOC: Minor rounding correction in Generator.binomial +* `#16514 `__: STY: trivial doc style fix in NEP 45. +* `#16515 `__: ENH: add type stubs from numpy-stubs +* `#16519 `__: BUG: f2py: make callbacks threadsafe +* `#16520 `__: STY: f2py: replace \t by whitespace for readability +* `#16522 `__: MAINT:ARMHF Fix detecting feature groups NEON_HALF and NEON_VFPV4 +* `#16523 `__: MAINT: Improve buffer speed +* `#16524 `__: MAINT: f2py: move thread-local declaration definition to common... +* `#16529 `__: BUG: Fix cython warning in random/_common.pyx. +* `#16530 `__: MAINT: Bump pytest from 5.4.2 to 5.4.3 +* `#16532 `__: BUG: Remove non-threadsafe sigint handling from fft calculation +* `#16540 `__: SIMD: SSE2 intrinsic implementation for float64 input of np.enisum +* `#16551 `__: BUG: Ensure SeedSequence 0-padding does not collide with spawn... +* `#16554 `__: DEP: Remove deprecated numeric types and deprecate remaining +* `#16555 `__: CI: drop win32 3.7, 3.6 builds +* `#16556 `__: MAINT: simplifying annotations for np.core.from_numeric +* `#16558 `__: ENH: make typing module available at runtime +* `#16570 `__: ENH: Throw TypeError on operator concat on Numpy Arrays +* `#16571 `__: TST: Add new tests for array coercion +* `#16572 `__: BUG: fix sin/cos bug when input is strided array +* `#16574 `__: MAINT: fix name of first parameter to dtype constructor in type... +* `#16581 `__: DOC: Added an example for np.transpose(4d_array) +* `#16583 `__: MAINT: changed ``np.generic`` arguments to positional-only +* `#16590 `__: DOC: Clarify dtype default for logspace and geomspace +* `#16591 `__: DOC: Disallow complex args in arange +* `#16592 `__: BUG: Raise TypeError for float->timedelta promotion +* `#16594 `__: ENH: Add ``__f2py_numpy_version__`` attribute to Fortran modules. +* `#16596 `__: BUG: Fix reference count leak in mapping.c +* `#16601 `__: MAINT: Move and improve ``test_ignore_nan_ulperror``. +* `#16603 `__: DOC: make addition of types a "new feature" in release notes +* `#16605 `__: MAINT: Avx512 intrinsics implementation for float64 input np.log +* `#16606 `__: MAINT: Bump pytest-cov from 2.9.0 to 2.10.0 +* `#16607 `__: MAINT: Bump hypothesis from 5.16.0 to 5.16.1 +* `#16613 `__: MAINT: bump mypy version to 0.780 +* `#16617 `__: BLD: Openblas 0.3.10 +* `#16618 `__: ENH: add annotation for abs +* `#16619 `__: BLD: check if std=c99 is really required +* `#16620 `__: MAINT, CI: disable Shippable cache +* `#16621 `__: BENCH: Expand array-creation benchmarks +* `#16622 `__: MAINT: Implemented two dtype-related TODO's +* `#16623 `__: BUG: Initialize stop-reading in array_from_text +* `#16627 `__: DOC: Updated documentation for numpy.squeeze +* `#16629 `__: ENH: add tool to find functions missing types +* `#16630 `__: ENH,BUG:distutils Remove the origins from the implied features +* `#16633 `__: MAINT: lib: Some code clean up in loadtxt +* `#16635 `__: BENCH: remove obsolete goal_time param +* `#16639 `__: BUG: Fix uint->timedelta promotion to raise TypeError +* `#16642 `__: MAINT: Replace ``PyUString_GET_SIZE`` with ``PyUnicode_GetLength``. +* `#16643 `__: REL: Fix outdated docs link +* `#16646 `__: TST: add a static typing test for memoryviews as ArrayLikes +* `#16647 `__: ENH: Added annotations to 8 functions from np.core.fromnumeric +* `#16648 `__: REL: Update master after 1.19.0 release. +* `#16650 `__: ENH: Allow genfromtxt to unpack structured arrays +* `#16651 `__: MAINT: Prefer generator expressions over list comprehensions... +* `#16653 `__: DOC: cross-reference numpy.dot and numpy.linalg.multi_dot +* `#16658 `__: MAINT: Bump hypothesis from 5.16.1 to 5.16.3 +* `#16659 `__: MAINT: Bump mypy from 0.780 to 0.781 +* `#16664 `__: DOC: Add lib.format.open_memmap to autosummary. +* `#16666 `__: BUG: Fix bug in AVX complex absolute while processing array of... +* `#16669 `__: MAINT: remove blacklist/whitelist terms +* `#16674 `__: TST: Add extra debugging information to CPU features detection +* `#16675 `__: ENH: Add support for file like objects to np.core.records.fromfile +* `#16683 `__: DOC: updated gcc minimum recommend version to build from source +* `#16684 `__: MAINT: Allow ``None`` to be passed to certain ``generic`` subclasses +* `#16690 `__: DOC: fixed docstring for descr_to_dtype +* `#16691 `__: DOC: Remove "matrix" from ``triu`` docstring. +* `#16696 `__: MAINT: add py.typed sentinel to package manifest +* `#16699 `__: MAINT: Fixup quantile tests to not use ``np.float`` +* `#16702 `__: BLD: Add CPU entry for Emscripten / WebAssembly +* `#16704 `__: TST: Disable Python 3.9-dev testing. +* `#16706 `__: DOC: Add instruction about stable symlink +* `#16708 `__: MAINT: Disable use_hugepages in case of ValueError +* `#16709 `__: DOC: Add dep directive to alen docstring. +* `#16710 `__: ENH, BLD: Add RPATH support for AIX +* `#16718 `__: DOC: fix typo +* `#16720 `__: BUG: Fix PyArray_SearchSorted signature. +* `#16729 `__: ENH: Add annotations to the last 8 functions in numpy.core.fromnumeric +* `#16730 `__: ENH: Use f90 compiler specified in f2py command line args for... +* `#16731 `__: DOC: reword random c-api introduction, cython is documented in... +* `#16735 `__: DOC: Tweak a sentence about broadcasting. +* `#16736 `__: DOC: Prepend ``ma.`` to references in ``numpy.ma`` +* `#16738 `__: DOC: Remove redundant word +* `#16742 `__: DOC: add unique() to See Also of repeat() +* `#16743 `__: DOC: add example to unique() and make connection to repeat() +* `#16747 `__: MAINT: Chaining exceptions in numpy/core/_internal.py +* `#16752 `__: BLD: add manylinux1 OpenBlAS 0.3.10 hashes and test for them +* `#16757 `__: DOC: Add Matti Picus to steering council page +* `#16759 `__: ENH: make dtype generic over scalar type +* `#16760 `__: DOC: Added a section in the 'Iterating over arrays' doc page... +* `#16761 `__: MAINT: Tidy exception chaining in _datasource.py +* `#16762 `__: MAINT: Fixes for deprecated functions in scalartypes.c.src +* `#16764 `__: MAINT: Bump mypy from 0.781 to 0.782 +* `#16765 `__: MAINT: Bump hypothesis from 5.16.3 to 5.19.0 +* `#16767 `__: ENH: Update NumPy logos +* `#16770 `__: MAINT: Remove unneeded call to PyUnicode_READY +* `#16771 `__: MAINT: Fix deprecated functions in scalarapi.c +* `#16775 `__: DOC: switch to logo with text +* `#16782 `__: ENH, TST: Bring the NumPy C SIMD vectorization interface "NPYV"... +* `#16786 `__: BENCH: Add basic benchmarks for scalar indexing and assignment +* `#16789 `__: BUG: fix decode error when building and get rid of warn +* `#16792 `__: DOC: Minor RST formatting. +* `#16793 `__: BLD, MAINT: update cython to 0.29.21 +* `#16794 `__: TST: Upgrade to Python 3.8 for DEBUG testing. +* `#16798 `__: DOC: Fix RST/numpydoc standard. +* `#16800 `__: MAINT: Move typing tests +* `#16802 `__: MAINT: Explicitly disallow object user dtypes +* `#16805 `__: DOC: add example to corrcoef function +* `#16806 `__: DOC: adding docs on passing dimensions as tuple to ndindex +* `#16807 `__: BUG, MAINT: Remove overzealous automatic RST link +* `#16811 `__: DOC: Add explanation of 'K' and 'A' layout options to 'asarray*'... +* `#16814 `__: DOC: Add a reST label to /user/building.rst +* `#16815 `__: BUG: fix mgrid output for lower precision float inputs +* `#16816 `__: BLD: temporarily disable OpenBLAS hash checks +* `#16817 `__: BUG: Do not inherit flags from the structured part of a union... +* `#16819 `__: DOC: replace dec.slow with pytest.mark.slow +* `#16820 `__: MAINT: Make void scalar to array creation copy when dtype is... +* `#16821 `__: DOC: fix inconsistent parameter name in np.ndindex docstring +* `#16822 `__: MAINT: setuptools 49.2.0 emits a warning, avoid it +* `#16824 `__: DOC: add examples to random number generator pages +* `#16826 `__: DOC: describe ufunc copy behavior when input and output overlap +* `#16827 `__: MAINT: Fix ``runtest.py`` warning. +* `#16829 `__: DOC,BLD: Add pandas to doc_requirements.txt +* `#16831 `__: MAINT: fix sphinx deprecation +* `#16834 `__: Avoid using uninitialized bytes in getlimits.py. +* `#16835 `__: DOC: Explaining why datetime64 doesn't work for allclose + isclose +* `#16836 `__: DOC: improve SIMD features tables +* `#16837 `__: BLD: update openblas hashes, re-enable check +* `#16838 `__: MAINT: Remove code that will never run +* `#16840 `__: MAINT: Bump hypothesis from 5.19.0 to 5.19.1 +* `#16841 `__: BUG: linspace should round towards -infinity +* `#16845 `__: TST: Disable shippable until we can fix it. +* `#16847 `__: MAINT: Remove Duplicated Code (function extract rmap) +* `#16848 `__: MAINT: Remove Duplicated Code +* `#16849 `__: MAINT: Change for loop (range -> for each) +* `#16850 `__: DEP: Deprecate NumPy object scalars +* `#16854 `__: DOC: clarify whats required for new features see #13924 +* `#16857 `__: MAINT: fix new compiler warnings on clang +* `#16858 `__: BUG: fix the search dir of dispatch-able sources +* `#16860 `__: MAINT: Remove deprecated python function 'file()' +* `#16868 `__: BUG: Validate output size in bin- and multinomial +* `#16870 `__: BLD, MAINT: Pin setuptools +* `#16871 `__: BUG: Update compiler check for AVX-512F +* `#16874 `__: TST, MAINT: fix the test for ``np.ones`` +* `#16878 `__: DOC: edit to the documentation of lib/polynomial.py/polyfit +* `#16879 `__: MAINT: Configure hypothesis in ``np.test()`` for determinism,... +* `#16882 `__: BLD: Remove unused pip install +* `#16883 `__: BUG,DOC: Fix bad MPL kwarg in docs +* `#16886 `__: DOC: Fix types including curly braces +* `#16887 `__: DOC: Remove the links for ``True`` and ``False`` +* `#16888 `__: ENH: Integrate the new CPU dispatcher with umath generator +* `#16894 `__: DOC: Fix wrong markups in ``arrays.dtypes`` +* `#16896 `__: DOC: Remove links for C codes +* `#16897 `__: DOC: Fix the declarations of C fuctions +* `#16899 `__: MNT: also use Py_SET_REFCNT instead of Py_REFCNT +* `#16900 `__: MAINT: Chaining exceptions in numpy/__init__.py +* `#16907 `__: DOC: update val to be scalar or array like optional closes #16901 +* `#16910 `__: MAINT: Bump hypothesis from 5.19.1 to 5.20.2 +* `#16911 `__: ENH: Speed up trim_zeros +* `#16914 `__: BUG: Fix string/bytes to complex assignment +* `#16917 `__: DOC: Add correctness vs strictness consideration for np.dtype +* `#16919 `__: DOC: Add ufunc docstring to generated docs. +* `#16925 `__: REL: Update master after 1.19.1 release. +* `#16931 `__: Revert "Merge pull request #16248 from alexrockhill/edge" +* `#16936 `__: BUG: Fix memory leak of buffer-info cache due to relaxed strides +* `#16938 `__: ENH,API: Store exported buffer info on the array +* `#16940 `__: BLD: update OpenBLAS build +* `#16941 `__: BUG: Allow array-like types to be coerced as object array elements +* `#16943 `__: DEP: Deprecate size-one ragged array coercion +* `#16944 `__: Change the name of the folder "icons" to "logo". +* `#16949 `__: ENH: enable colors for ``runtests.py --ipython`` +* `#16950 `__: DOC: Clarify input to irfft/irfft2/irfftn +* `#16952 `__: MAINT: Bump hypothesis from 5.20.2 to 5.23.2 +* `#16953 `__: update numpy/lib/arraypad.py with appropriate chain exception +* `#16957 `__: MAINT: Use arm64 instead of aarch64 on travisCI. +* `#16962 `__: MAINT: Chain exception in ``distutils/fcompiler/environment.py``. +* `#16966 `__: MAINT: Added the ``order`` parameter to ``np.array()`` +* `#16969 `__: ENH: Add Neon SIMD implementations for add, sub, mul, and div +* `#16973 `__: DOC: Fixed typo in lib/recfunctions.py +* `#16974 `__: TST: Add pypy win32 CI testing. +* `#16982 `__: ENH: Increase the use of ``Literal`` types +* `#16986 `__: ENH: Add NumPy declarations to be used by Cython 3.0+ +* `#16988 `__: DOC: Add the new NumPy logo to Sphinx pages +* `#16991 `__: MAINT: Bump hypothesis from 5.23.2 to 5.23.9 +* `#16992 `__: MAINT: Bump pytest from 5.4.3 to 6.0.1 +* `#16993 `__: BLD: pin setuptools < 49.2.0 +* `#16996 `__: DOC: Revise glossary page +* `#17002 `__: DOC: clip() allows arguments. +* `#17009 `__: NEP: Updated NEP-35 with keyword-only instruction +* `#17013 `__: MAINT: Simplify scalar power +* `#17014 `__: MAINT: Improve error handling in umathmodule setup +* `#17028 `__: DOC: Disclaimer for FFT library +* `#17029 `__: MAINT: Add error return to all casting functionality and NpyIter +* `#17033 `__: BUG: fix a compile and a test warning +* `#17036 `__: DOC: Clarify that ``np.char`` comparison functions always return... +* `#17039 `__: DOC: Use a less ambiguous example for array_split +* `#17041 `__: MAINT: Bump hypothesis from 5.23.9 to 5.23.12 +* `#17048 `__: STY: core._internal style fixups +* `#17050 `__: MAINT: Remove _EXTRAFLAGS variable +* `#17053 `__: BUG: fix typo in polydiv that prevented promotion to poly1d +* `#17058 `__: MAINT: Revert boolean casting back to elementwise comparisons... +* `#17062 `__: API, BUG: Raise error on complex input to i0 +* `#17063 `__: MAINT: Remove obsolete conversion to set +* `#17067 `__: DEP: lib: Remove the deprecated financial functions. +* `#17068 `__: MAINT, BUG: Remove uses of PyString_FromString. +* `#17074 `__: DOC: use the pydata_sphinx_theme +* `#17078 `__: DOC: Fixes duplication of toctree content (Closes #17077) +* `#17091 `__: MAINT: Bump pytest-cov from 2.10.0 to 2.10.1 +* `#17092 `__: MAINT: Bump hypothesis from 5.23.12 to 5.26.0 +* `#17093 `__: NEP: Adjust NEP-35 to make it more user-accessible +* `#17104 `__: ENH: Add placeholder stubs for all sub-modules +* `#17109 `__: MAINT: Split einsum into multiple files +* `#17112 `__: BUG: Handle errors from the PyCapsule API +* `#17115 `__: DOC: Fix spacing in vectorize doc +* `#17116 `__: API: Remove ``np.ctypeslib.ctypes_load_library`` +* `#17119 `__: DOC: make spacing consistent in NEP 41 bullet points +* `#17121 `__: BUG: core: fix ilp64 blas dot/vdot/... for strides > int32 max +* `#17123 `__: ENH: allow running mypy through runtests.py +* `#17127 `__: MAINT: Remove duplicated symbols from link step +* `#17129 `__: BLD: Check for reduce intrinsics and AVX512BW mask operations +* `#17132 `__: MAINT: Chain some exceptions in arraysetops. +* `#17133 `__: MAINT: Chain ValueError in ma.timer_comparison +* `#17137 `__: API,MAINT: Rewrite promotion using common DType and common instance +* `#17141 `__: MAINT: Make arrayprint str and repr the ndarray defaults. +* `#17142 `__: DOC: NEP-42: Fix a few typos. +* `#17143 `__: MAINT: Change handling of the expired financial functions. +* `#17144 `__: ENH: Add annotations to 3 functions in ``np.core.function_base`` +* `#17145 `__: MAINT, BUG: Replace uses of PyString_AsString. +* `#17146 `__: MAINT: ``Replace PyUString_*`` by ``PyUnicode_*`` equivalents. +* `#17149 `__: MAINT: Replace PyInt macros with their PyLong replacement +* `#17150 `__: ENH: Add support for the abstract scalars to cython code +* `#17151 `__: BUG: Fix incorrect cython definition of npy_cfloat +* `#17152 `__: MAINT: Clean up some ``Npy_`` vs ``Py_`` macro usage +* `#17154 `__: DOC: Remove references to PyCObject +* `#17159 `__: DOC: Update numpy4matlab +* `#17160 `__: Clean up some more bytes vs unicode handling +* `#17161 `__: BUG: Remove Void special case for "safe casting" +* `#17163 `__: MAINT: Remove redundant headers +* `#17164 `__: MAINT: Remove ``NPY_COPY_PYOBJECT_PTR`` +* `#17167 `__: BLD: Merge the npysort library into multiarray +* `#17168 `__: TST: Add tests mapping out the rules for metadata in promotion +* `#17171 `__: BUG: revert trim_zeros changes from gh-16911 +* `#17172 `__: ENH: Make ``np.complexfloating`` generic w.r.t. ``np.floating`` +* `#17176 `__: MAINT/ENH: datetime: remove calls to PyUnicode_AsASCIIString,... +* `#17180 `__: ENH: Added missing methods to ``np.flatiter`` +* `#17181 `__: DOC: Correct error in description of ndarray.base +* `#17182 `__: DOC: Document ``dtype.metadata`` +* `#17186 `__: MAINT: Use utf8 strings in more of datetime +* `#17188 `__: MAINT: Add placeholder stubs for ``ndarray`` and ``generic`` +* `#17191 `__: MAINT: Bump hypothesis from 5.26.0 to 5.30.0 +* `#17193 `__: MAINT: Remove some callers of functions in numpy.compat +* `#17195 `__: ENH: Make the window functions exactly symmetric +* `#17197 `__: MAINT: Improve error handling in npy_cpu_init +* `#17199 `__: DOC: Fix the documented signatures of four ``ufunc`` methods +* `#17201 `__: MAINT: Make the ``NPY_CPU_DISPATCH_CALL`` macros expressions not... +* `#17204 `__: DOC: Fixed headings for tutorials so they appear at new theme... +* `#17210 `__: DOC: Canonical_urls +* `#17214 `__: MAINT: Fix various issues with the ``np.generic`` annotations +* `#17219 `__: BLD: enabled negation of library choices in ``NPY_*_ORDER`` +* `#17220 `__: BUG, DOC: comment out metadata added via javascript +* `#17222 `__: MAINT, DOC: move informational files from numpy.doc.*.py to their... +* `#17223 `__: MAINT: use sysconfig not distutils.sysconfig where possible +* `#17225 `__: BUG: Fix dimension discovery of within array ragged cases +* `#17227 `__: DOC: Added templates for different types of issues. +* `#17233 `__: DEP: Deprecated ndindex.ndincr +* `#17235 `__: MAINT: Remove old PY_VERSION_HEX and sys.version_info code +* `#17237 `__: BUG: Avoid using ``np.random`` in typing tests. +* `#17239 `__: DOC: Fix link quick-start in old random API functions +* `#17241 `__: MAINT: ``__array_interface__`` data address cannot be bytes +* `#17242 `__: MAINT: Run slow CI jobs earlier so builds finishes sooner +* `#17247 `__: ENH: Add tool to help speed up Travis CI +* `#17250 `__: DOC: Fix docstring cross-referencing +* `#17252 `__: DOC: Added a PR "Reviewer guidelines" document. +* `#17257 `__: DOC: work around a bug in the new theme +* `#17258 `__: SIMD: add fused multiply subtract/add intrinics for all supported... +* `#17259 `__: MAINT: Bump hypothesis from 5.30.0 to 5.33.0 +* `#17260 `__: MAINT: Bump pydata-sphinx-theme from 0.3.2 to 0.4.0 +* `#17263 `__: DOC: add new glossary terms +* `#17264 `__: DOC: remove some glosssary terms +* `#17267 `__: TST: Fix the path to ``mypy.ini`` in ``runtests.py`` +* `#17268 `__: BUG: sysconfig attributes/distutils issue +* `#17273 `__: ENH: Annotate the arithmetic operations of ``ndarray`` and ``generic`` +* `#17278 `__: MAINT: Merge together index page content into a single file +* `#17279 `__: DOC: Fix a typo in shape_base. +* `#17284 `__: ENH: Pass optimizations arguments to asv build +* `#17285 `__: DEP: Change the financial name access warning to DeprecationWarning +* `#17288 `__: REL: Update master after 1.19.2 release. +* `#17289 `__: MAINT: Simplify ufunc pickling +* `#17290 `__: MAINT: Cleanup some pystring macros +* `#17292 `__: MAINT: Replace remaining PyString macros. +* `#17293 `__: MAINT: Replace PyUString_Check by PyUnicode_Check. +* `#17295 `__: BUG,ENH: fix pickling user-scalars by allowing non-format buffer... +* `#17296 `__: MAINT: Replace some ``pyint_*`` macros defined in ``npy_3kcompat``. +* `#17297 `__: BLD: set upper versions for build dependencies +* `#17299 `__: MAINT: (dtype-transfer) make copyswapn and legacy cast wrapper... +* `#17300 `__: MAINT: Replace PyBaseString_Check by PyUnicode_Check +* `#17302 `__: MAINT: Replace a couple of missed npy_3kcompat macros +* `#17304 `__: BUILD: pin pygments to 2.6.1, 2.7.0 breaks custom NumPyC lexer +* `#17307 `__: MAINT: Bump hypothesis from 5.33.0 to 5.35.1 +* `#17308 `__: MAINT: Bump pytest from 6.0.1 to 6.0.2 +* `#17309 `__: MAINT: Move the ``fromnumeric`` annotations to their own stub file +* `#17312 `__: MAINT: Syntax-highlight .src files on github +* `#17313 `__: MAINT: Mark vendored/generated files in .gitattributes +* `#17315 `__: MAINT: Cleanup f2py/cfuncs.py +* `#17319 `__: BUG: Set deprecated fields to null in PyArray_InitArrFuncs +* `#17320 `__: BUG: allow registration of hard-coded structured dtypes +* `#17326 `__: ENH: Add annotations for five array construction functions +* `#17329 `__: DOC: Fix incorrect ``.. deprecated::`` syntax that led to this... +* `#17330 `__: DOC: improve ``issubdtype`` and scalar type docs +* `#17331 `__: DOC: Remove the tables of scalar types, and use ``..autoclass``... +* `#17332 `__: DOC, BLD: update lexer highlighting and make numpydocs a regular... +* `#17334 `__: MAINT: Chaining exceptions in npyio.py +* `#17337 `__: NEP: Regenerate table in NEP 29 (add numpy 1.18 and 1.19 to list) +* `#17338 `__: DOC: Fix syntax errors in docstrings for versionchanged, versionadded +* `#17340 `__: SIMD: Add partial/non-contig load and store intrinsics for 32/64-bit +* `#17344 `__: ENH, BLD: Support for the NVIDIA HPC SDK nvfortran compiler +* `#17346 `__: BLD,BUG: Fix a macOS build failure when ``NPY_BLAS_ORDER=""`` +* `#17350 `__: DEV: Add PR prefix labeler and numpy prefix mapping +* `#17352 `__: DOC: Guide to writing how-tos +* `#17353 `__: DOC: How-to guide for I/O +* `#17354 `__: DOC: clarify residuals return param +* `#17356 `__: ENH: Add Npy__PyLong_AsInt function. +* `#17357 `__: MAINT: Bump hypothesis from 5.35.1 to 5.35.3 +* `#17364 `__: MAINT: Finish replacing PyInt_Check +* `#17369 `__: DOC: distutils: Remove an obsolete paragraph. +* `#17370 `__: NEP: Edit nep-0042 for more clarity +* `#17372 `__: ENH: Add annotations for remaining ``ndarray`` / ``generic`` non-magic... +* `#17373 `__: BUG: Fixes module data docstrings. +* `#17375 `__: DOC: Fix default_rng docstring +* `#17377 `__: BUG: ensure _UFuncNoLoopError can be pickled +* `#17380 `__: Minor grammatical correction in quickstart doc. +* `#17382 `__: DOC: NumPy restyling for pydata theme +* `#17383 `__: MAINT: Fix docstring for np.matmul +* `#17386 `__: MAINT: Bump hypothesis from 5.35.3 to 5.36.1 +* `#17388 `__: MAINT: Remove old debug print statement. +* `#17391 `__: DOC: Replace "About NumPy" with "Document conventions" +* `#17392 `__: DOC: Update info on doc style rules +* `#17393 `__: BUG: Fix default void, datetime, and timedelta in array coercion +* `#17396 `__: MAINT: Replace append_metastr_to_string function. +* `#17399 `__: BLD: Fixed ARGOUTVIEWM memory deallocation. Closes #17398. +* `#17400 `__: DOC: rm incorrect alias from recarray user article. +* `#17401 `__: MAINT: Rewrite can-cast logic in terms of NEP 42 +* `#17402 `__: DOC: Add arraysetops to an autosummary +* `#17404 `__: MAINT: Replace PyUString_ConcatAndDel in nditer_constr.c. +* `#17405 `__: MAINT: Replace PyUString_ConcatAndDel in mapping.c. +* `#17406 `__: ENH: Replace the module-level ``__getattr__`` with explicit type... +* `#17407 `__: DOC: in PR template, set expectations for PR review timeline +* `#17409 `__: MAINT: Cleanup remaining PyUString_ConcatAndDel use. +* `#17410 `__: API: Special case how numpy scalars are coerced to signed integer +* `#17411 `__: TST: Mark the typing tests as slow +* `#17412 `__: DOC: Fix a parameter type in the ``putmask`` docs +* `#17418 `__: DOC: adding operational form documentation for array ops +* `#17419 `__: DEP: Deprecate coercion to subarray dtypes +* `#17421 `__: BUG: Fix memory leak in array-coercion error paths +* `#17422 `__: MAINT: chains nested try-except in numpy/ma/core.py +* `#17423 `__: DOC: Remove bogus reference to _a_ +* `#17424 `__: DOC: Fix formatting issues in description of .c.src files +* `#17427 `__: NEP: nep-0029 typo correction +* `#17429 `__: MAINT: Move aliases for common scalar unions to ``numpy.typing`` +* `#17430 `__: BUG: Fix memoryleaks related to NEP 37 function overrides +* `#17431 `__: DOC: Fix the links for ``Ellipsis`` +* `#17432 `__: DOC: add references to einops and opt_einsum +* `#17433 `__: MAINT : Disable 32 bit PyPy CI testing on Windows. +* `#17435 `__: DOC: Security warning for issues template +* `#17436 `__: DOC: Fix "Feature request" spelling in issue templates +* `#17438 `__: MAINT: Chaining exception in numpy\numpy\ma\mrecords.py +* `#17440 `__: DOC: Cleaner template for PRs +* `#17442 `__: MAINT: fix exception chaining in format.py +* `#17443 `__: ENH: Warn on unsupported Python 3.10+ +* `#17444 `__: ENH: Add ``Typing :: Typed`` to the PyPi classifier +* `#17445 `__: DOC: Fix the references for macros +* `#17447 `__: NEP: update NEP 42 with discussion of type hinting applications +* `#17448 `__: DOC: Remove CoC pages from Sphinx +* `#17453 `__: MAINT: Chain exceptions in "_polybase.py" +* `#17455 `__: MAINT: Bump hypothesis from 5.36.1 to 5.37.0 +* `#17456 `__: ENH: add dtype option to numpy.lib.function_base.cov and corrcoef +* `#17457 `__: BUG: Fixes incorrect error message in numpy.ediff1d +* `#17459 `__: DOC: update code of conduct URL +* `#17464 `__: DOC: Add some entries for C types and macros +* `#17465 `__: ENH: Add annotations for bitwise operations +* `#17468 `__: DOC: add some missing scalar aliases +* `#17472 `__: TST: Fix doctest for full_like +* `#17473 `__: MAINT: py3k: remove os.fspath and os.PathLike backports +* `#17474 `__: MAINT: Move the ``np.core.numeric`` annotations to their own stub... +* `#17479 `__: ENH: type ``np.unicode_`` as ``np.str_`` +* `#17481 `__: DOC: Fix the entries for members of structures +* `#17483 `__: DOC: Fix the references for ``random.*`` +* `#17485 `__: BLD: circleCI- merge before build, add -n to sphinx +* `#17487 `__: MAINT: Remove duplicate placeholder annotations +* `#17497 `__: DOC: Use consistent lowercase on docs landing page +* `#17498 `__: MAINT: fix incompatible type comparison in numpy.lib.utils.info +* `#17501 `__: BUG: Fix failures in master related to userdtype registeration +* `#17502 `__: BUG: remove ``sys`` from the type stubs +* `#17503 `__: DOC: Fix empty 'C style guide' page +* `#17504 `__: DOC: Rename 'Quickstart tutorial' +* `#17508 `__: ENH: Added the Final feature for all constants +* `#17510 `__: DOC: Fewer blank lines in PR template +* `#17520 `__: DOC: Display real license on license page +* `#17521 `__: DOC: Add docstrings for some scalar types +* `#17523 `__: DOC: Update top links in landing page +* `#17525 `__: CI: Make merge ref grabbing conditional on the PR being active +* `#17527 `__: DOC: Fix Bool types in C functions +* `#17528 `__: Doc: Fix some links and typos +* `#17529 `__: MAINT: Cleanup compatibility code for pathlib +* `#17534 `__: DOC: Fix a typo +* `#17535 `__: ENH: add function to get broadcast shape from a given set of... +* `#17536 `__: BUG: Fixed crash on self-referential dtypes +* `#17537 `__: MAINT: Bump hypothesis from 5.37.0 to 5.37.1 +* `#17538 `__: MAINT: Bump pydata-sphinx-theme from 0.4.0 to 0.4.1 +* `#17539 `__: MAINT: Bump mypy from 0.782 to 0.790 +* `#17540 `__: ENH: Make ``np.number`` generic with respect to its precision +* `#17541 `__: CI: fix conditional for PR merge command +* `#17546 `__: MAINT: explicit disabling ``CCompilerOpt`` in F2PY +* `#17548 `__: BUG: Cygwin Workaround for #14787 on affected platforms +* `#17549 `__: DOC: Fix the entries of C functions +* `#17555 `__: DOC: Fix wrong blockquotes +* `#17558 `__: DOC: MAINT: Add NEP 43 links to NEP 42 +* `#17559 `__: DOC: Remove directives for some constants +* `#17564 `__: MAINT: Update the annotations in ``np.core.numeric`` +* `#17570 `__: DOC: Add the entry for ``NPY_FEATURE_VERSION`` +* `#17571 `__: DOC: Fix typos +* `#17572 `__: ENH: Add annotations for three new constants +* `#17576 `__: DOC: Fix Boolean array indexing typo +* `#17577 `__: BUG: Respect dtype of all-zero argument to poly1d +* `#17578 `__: NEP36: include additional feedback +* `#17580 `__: MAINT: Cleanup swig for Python 3. +* `#17581 `__: MAINT: Move the ``np.core.numerictypes`` annotations to their own... +* `#17583 `__: MAINT: Bump hypothesis from 5.37.1 to 5.37.3 +* `#17584 `__: ENH: Add annotations for ``np.core._type_aliases`` +* `#17594 `__: DOC: Typo in lexsort docstring +* `#17596 `__: DEP,BUG: Coercion/cast of array to a subarray dtype will be fixed +* `#17597 `__: TST: Clean up the errors of the typing tests +* `#17598 `__: BUG: Fixed file handle leak in array_tofile. +* `#17601 `__: TST: Fix a broken ``np.core.numeric`` test +* `#17603 `__: MAINT: Mark dead code as intentional for clang. +* `#17607 `__: DOC: removed old references to submodule licenses +* `#17608 `__: DOC: Fix typos (general documentation) +* `#17610 `__: Fully qualify license trove classifier +* `#17611 `__: BUG: mac dylib treated as part of extra objects by f2py +* `#17613 `__: ENH: Add annotations for 9 ``ndarray``/``generic`` magic methods +* `#17614 `__: DOC: Fix the document for arrays interface +* `#17618 `__: MAINT: Conversion of some strings to f-strings +* `#17619 `__: DOC: Fix some references +* `#17621 `__: TST: Valid docstring for config_py function show() +* `#17622 `__: MAINT: Conversion of some strings to fstrings, part II +* `#17623 `__: MAINT: Conversion of some strings to fstrings, part III +* `#17624 `__: DOC: Tidy up references to ``str_`` / ``bytes_`` +* `#17625 `__: MAINT: Conversion of some strings to fstrings, part iv +* `#17627 `__: DOC: Fix the references for ``__array_*__`` +* `#17628 `__: DOC: Add entries for macros +* `#17629 `__: DOC: Add ``identity_value`` to ``PyUFuncObject`` +* `#17630 `__: DOC: Replace ``PyCObject`` with ``PyCapsule`` +* `#17633 `__: DOC: Don't use Python highlighting for non-python code +* `#17638 `__: DOC: Fix some references +* `#17639 `__: MAINT: Bump hypothesis from 5.37.3 to 5.38.0 +* `#17641 `__: MAINT, BLD: update to OpenBLAS v0.3.12 +* `#17642 `__: DOC: Fix reference to atleast_1d +* `#17643 `__: ENH: Add annotations for ``np.core._ufunc_config`` +* `#17644 `__: ENH: Add annotations for ``np.core.shape_base`` +* `#17645 `__: BUG: fix np.timedelta64('nat').__format__ throwing an exception +* `#17654 `__: BUG: f2py incorrectly translates dimension declarations. +* `#17655 `__: BLD: Fix installing Numpy on z/OS +* `#17657 `__: NEP: Ensure inner loop signature is complete everywhere +* `#17658 `__: TST: simplify source path names in compilation test +* `#17662 `__: TST: f2py: Add a doctest for ``getlincoef`` +* `#17666 `__: REL: Update master after 1.19.3 release. +* `#17668 `__: TST: Make test suite work in FIPS (140-2) Mode +* `#17670 `__: DOC: f2py: Add a docstring for getarrlen +* `#17672 `__: DOC: Update README badge for travis-ci.com +* `#17673 `__: MAINT: Refine a number of ``np.generic`` annotations +* `#17675 `__: MAINT: Update release documentation and software +* `#17681 `__: SIMD: Add sum intrinsics for float/double. +* `#17682 `__: BUG: (nditer_impl.h) Use ``intp`` instead of ``char *`` for offset... +* `#17689 `__: BUG: Fix small bug in ``make_lite.py``. +* `#17691 `__: DOC: Modify Templates +* `#17692 `__: MAINT: Bump hypothesis from 5.38.0 to 5.41.0 +* `#17693 `__: MAINT: Bump pytz from 2020.1 to 2020.4 +* `#17695 `__: TST: use a more standard workflow for PyPy +* `#17696 `__: REL: Update master after 1.19.4 release. +* `#17699 `__: MAINT: Rename ``DtypeLike`` to ``DTypeLike`` +* `#17700 `__: Fix small typos. +* `#17701 `__: BUG: Fixed an issue where ``.pyi`` files were ignored by numpy... +* `#17703 `__: Fix Doc Typos & Added Example +* `#17708 `__: Improve the einsum bench by adding new bench cases and variable... +* `#17715 `__: REV: Revert gh-17654 - f2py incorrectly translates dimension... +* `#17717 `__: MAINT: Add more files to ``.gitgnore`` +* `#17720 `__: API: Do not import sliding_window_view to main namespace +* `#17723 `__: MAINT: Do not override ``sliding_window_view`` module to ``numpy`` +* `#17725 `__: NEP: Add NEP-35 instructions on reading like= downstream +* `#17729 `__: BLD: Use importlib to find numpy root directory in distutils +* `#17733 `__: MAINT: ma: Remove unused ``**options`` from MaskedArray ``__new__``... +* `#17735 `__: TST: Remove Python 3.6 CI testing. +* `#17738 `__: BLD, TST: move linux jobs to github actions +* `#17740 `__: MAINT: Bump hypothesis from 5.41.0 to 5.41.2 +* `#17743 `__: BLD, BUG: Fix cblas detection on windows +* `#17745 `__: TST: add pypy3.7 +* `#17748 `__: BLD: compare platform.architecture() correctly +* `#17749 `__: DOC: Add "performance" category to the release notes +* `#17751 `__: BUG: Fix segfault due to out of bound pointer in floatstatus... +* `#17753 `__: BUG: Fix buffer export dtype references +* `#17755 `__: BUG: Fix memory leaks found using valgrind +* `#17758 `__: BLD: Lazy load f2py test utilities +* `#17759 `__: BLD: use BUFFERSIZE=20 in OpenBLAS +* `#17763 `__: SIMD, BUG: fix reuses the previous values during the fallback... +* `#17768 `__: MAINT: update link to website in FUNDING.yml +* `#17773 `__: MAINT: Add BLD and STY to labeler prefixes. +* `#17776 `__: MAINT: Simplify Hypothesis configuration +* `#17787 `__: NEP: Make like= argument added in NEP-35 strict +* `#17788 `__: DOC: Fix up links, code blocks of release note fragments +* `#17796 `__: MAINT: Minor touchups in npyio +* `#17802 `__: MAINT: Update mailmap. +* `#17805 `__: MAINT: Set the ufunc and ndarray ops return type to ``Any`` +* `#17812 `__: Update linalg.py +* `#17815 `__: DOC: Fix empty_like docstring +* `#17823 `__: DOC: Add missing release fragments to ``upcoming_changes``. +* `#17828 `__: BUG: Fix incorrectly passed size in masked processing +* `#17829 `__: MAINT: Bump hypothesis from 5.41.2 to 5.41.3 +* `#17830 `__: TST: Add back durations flag for DEBUG builds. +* `#17832 `__: BUG: Fix subarray dtype used with too large count in fromfile +* `#17833 `__: BUG: Fix pickling of scalars with ``NPY_LISTPICKLE`` +* `#17838 `__: DOC: Update the ``numpy.typing`` documentation +* `#17841 `__: DOC: Fixing boilerplate code example +* `#17844 `__: MAINT: Add ``__all__`` to ``numpy.typing`` +* `#17848 `__: DOC: Add release note for gh-16161. +* `#17855 `__: BUG: Fix incorrect C function prototypes/declarations. +* `#17857 `__: MAINT: Prepare for the NumPy 1.20.x branch. +* `#17869 `__: BUG, TST: use python-version not PYTHON_VERSION +* `#17875 `__: DOC: Prepare for 1.20.0 release +* `#17879 `__: BUG: Fix buffer readflag errors and small leaks From 743290814b5a3f86d33652f5ffd4c7c88e89bd62 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 30 Nov 2020 07:02:17 +0000 Subject: [PATCH 0113/1270] MAINT: Bump hypothesis from 5.41.3 to 5.41.4 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 5.41.3 to 5.41.4. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-5.41.3...hypothesis-python-5.41.4) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 2ca10be5d16d..97d06a0cfaec 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel setuptools<49.2.0 -hypothesis==5.41.3 +hypothesis==5.41.4 pytest==6.0.2 pytz==2020.4 pytest-cov==2.10.1 From 2061a7b06acb9e15c889ffcd1b550b379a60fe46 Mon Sep 17 00:00:00 2001 From: jakobjakobson13 <43045863+jakobjakobson13@users.noreply.github.com> Date: Mon, 30 Nov 2020 12:11:20 +0100 Subject: [PATCH 0114/1270] removed dict entry `openblas64_-v0.3.10-win_amd64-gcc_7_1_0.zip` is already given in line 57 --- tools/openblas_support.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index c0e9dddab467..50837177b680 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -84,8 +84,6 @@ "f68fea21fbc73d06b7566057cad2ed8c7c0eb71fabf9ed8a609f86e5bc60ce5e", "openblas64_-v0.3.10-manylinux2014_aarch64.tar.gz": "15e6eed8cb0df8b88e52baa136ffe1769c517e9de7bcdfd81ec56420ae1069e9", - "openblas64_-v0.3.10-win_amd64-gcc_7_1_0.zip": - "aea3f9c8bdfe0b837f0d2739a6c755b12b6838f6c983e4ede71b4e1b576e6e77", } From 242a1b52fba8e654f3d2a309a6390fac51909df5 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 30 Nov 2020 14:33:20 -0700 Subject: [PATCH 0115/1270] ENH: Timestamp development versions. This adds a timestamp to development versions of NumPy. We used to do this for NumPy nightly builds so that pip would pickup the latest version, but pip 20.3 breaks with that as there is disagreement between the wheel name and the internal version. Closes #17885. --- setup.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4d0ded9253ba..326d34e7a857 100755 --- a/setup.py +++ b/setup.py @@ -130,7 +130,10 @@ def get_version_info(): GIT_REVISION = "Unknown" if not ISRELEASED: - FULLVERSION += '.dev0+' + GIT_REVISION[:7] + import time + + time_stamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) + FULLVERSION += f'.dev0+{time_stamp}_{GIT_REVISION[:7]}' return FULLVERSION, GIT_REVISION From 28aa88312164987462b1e7744e0efb5bee65c724 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 1 Dec 2020 08:52:50 +0530 Subject: [PATCH 0116/1270] MAINT: Linting fixes --- LICENSES_bundled.txt | 2 +- numpy/core/src/umath/loops.c.src | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index 80557d3e6ee7..26c7a7829361 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -18,5 +18,5 @@ License: MIT Name: libdivide Files: numpy/core/include/numpy/libdivide/* -License: zlib +License: Zlib For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 6637c0e4e4f9..6823a13b213d 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -862,14 +862,14 @@ NPY_NO_EXPORT void /* When the divisor is a constant, use libdivide for faster division */ if (steps[1] == 0) { - /* In case of empty array, just return*/ + /* In case of empty array, just return */ if (n == 0) { return; } const @type@ in2 = *(@type@ *)ip2; - /* If divisor is 0, we need not compute anything*/ + /* If divisor is 0, we need not compute anything */ if (in2 == 0) { npy_set_floatstatus_divbyzero(); BINARY_LOOP_SLIDING { @@ -1415,12 +1415,12 @@ TIMEDELTA_dm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const NPY_NO_EXPORT void TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - /* NOTE: This code is similar to array floor divide*/ + /* NOTE: This code is similar to array floor divide */ BINARY_DEFS /* When the divisor is a constant, use libdivide for faster division */ if (steps[1] == 0) { - /* In case of empty array, just return*/ + /* In case of empty array, just return */ if (n == 0) { return; } @@ -1528,12 +1528,12 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp const *dimensions, npy_intp const NPY_NO_EXPORT void TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { - /* NOTE: This code is similar to array floor divide*/ + /* NOTE: This code is similar to array floor divide */ BINARY_DEFS /* When the divisor is a constant, use libdivide for faster division */ if (steps[1] == 0) { - /* In case of empty array, just return*/ + /* In case of empty array, just return */ if (n == 0) { return; } From e4060c39675718ab3b19d02e23ef1644aeb9c6da Mon Sep 17 00:00:00 2001 From: Agbonze Osazuwa Date: Tue, 1 Dec 2020 04:46:39 +0100 Subject: [PATCH 0117/1270] Correct sentence/statement composition Incorrect statement spotted *In the simplest example of broadcasting, the scalar ``b`` is stretched to become an array ~of with the same~ shape as ``a`` so the shapes are compatible for element-by-element multiplication.* --- doc/source/user/theory.broadcasting.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/theory.broadcasting.rst b/doc/source/user/theory.broadcasting.rst index b37edeacc121..a82d78e6c9a8 100644 --- a/doc/source/user/theory.broadcasting.rst +++ b/doc/source/user/theory.broadcasting.rst @@ -69,7 +69,7 @@ numpy on Windows 2000 with one million element arrays. *Figure 1* *In the simplest example of broadcasting, the scalar ``b`` is - stretched to become an array of with the same shape as ``a`` so the shapes + stretched to become an array of same shape as ``a`` so the shapes are compatible for element-by-element multiplication.* From 88be44bacffeb336317e51c5199e1ef6e41ee183 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 2 Dec 2020 13:35:57 -0700 Subject: [PATCH 0118/1270] MAINT: Remove remaining uses of Python 3.6. We missed a few first time around. --- INSTALL.rst.txt | 2 +- setup.py | 7 +++---- test_requirements.txt | 4 +--- tox.ini | 2 +- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/INSTALL.rst.txt b/INSTALL.rst.txt index 5ee97d7901a0..1bc97c4b5f86 100644 --- a/INSTALL.rst.txt +++ b/INSTALL.rst.txt @@ -14,7 +14,7 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.6.x or newer. +1) Python__ 3.7.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and diff --git a/setup.py b/setup.py index 326d34e7a857..528b4f1e404a 100755 --- a/setup.py +++ b/setup.py @@ -26,8 +26,8 @@ import warnings -if sys.version_info[:2] < (3, 6): - raise RuntimeError("Python version >= 3.6 required.") +if sys.version_info[:2] < (3, 7): + raise RuntimeError("Python version >= 3.7 required.") import builtins @@ -40,7 +40,6 @@ Programming Language :: C Programming Language :: Python Programming Language :: Python :: 3 -Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 @@ -473,7 +472,7 @@ def setup_package(): platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='pytest', cmdclass=cmdclass, - python_requires='>=3.6', + python_requires='>=3.7', zip_safe=False, entry_points={ 'console_scripts': f2py_cmds diff --git a/test_requirements.txt b/test_requirements.txt index 97d06a0cfaec..9c94abf91a05 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -6,12 +6,10 @@ pytest==6.0.2 pytz==2020.4 pytest-cov==2.10.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' -pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending cffi # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # - Mypy doesn't currently work on Python 3.9 -# - Python 3.6 doesn't work because it doesn't understand py.typed -mypy==0.790; platform_python_implementation != "PyPy" and python_version > "3.6" +mypy==0.790; platform_python_implementation != "PyPy" typing_extensions diff --git a/tox.ini b/tox.ini index e58dd6efed57..9bc2bbac36dd 100644 --- a/tox.ini +++ b/tox.ini @@ -26,7 +26,7 @@ [tox] envlist = - py36,py37,py38, + py37,py38,py39, py37-not-relaxed-strides [testenv] From 755ea9a83e410c226c24b95cb892da3f64248d42 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 2 Dec 2020 12:39:26 -0600 Subject: [PATCH 0119/1270] ENH: Micro-optimize where=True path for mean, var, any, and all This removes a 20%-30% overhead, and thus the largest chunk of slowdown incurred by adding the `where` argument. Most other places have fast-paths for `where=True`, this one also should have it. The additional argument does slow down the function versions a bit more than this, but that is to be expected probably (it has to build a new argument dict, at some point we might want to move this to C, but that seems worth much more with FASTCALL logic). --- numpy/core/_methods.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index 75fd32ec8d66..c730e2035f36 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -51,9 +51,15 @@ def _prod(a, axis=None, dtype=None, out=None, keepdims=False, return umr_prod(a, axis, dtype, out, keepdims, initial, where) def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_any(a, axis, dtype, out, keepdims) return umr_any(a, axis, dtype, out, keepdims, where=where) def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_all(a, axis, dtype, out, keepdims) return umr_all(a, axis, dtype, out, keepdims, where=where) def _count_reduce_items(arr, axis, keepdims=False, where=True): @@ -158,7 +164,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): is_float16_result = False rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) - if umr_any(rcount == 0, axis=None): + if rcount == 0 if where is True else umr_any(rcount == 0): warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default @@ -191,7 +197,7 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) # Make this warning show up on top. - if umr_any(ddof >= rcount, axis=None): + if ddof >= rcount if where is True else umr_any(ddof >= rcount): warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) From 8ee149dbbdfdbc0ddd19a40460d9e8061792ed84 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 2 Dec 2020 15:04:25 -0600 Subject: [PATCH 0120/1270] DOC: Add where to all, any, mean, var, and std method docstrings --- numpy/core/_add_newdocs.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index c9968f122942..e2bf6c439a09 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -2570,7 +2570,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('all', """ - a.all(axis=None, out=None, keepdims=False) + a.all(axis=None, out=None, keepdims=False, *, where=True) Returns True if all elements evaluate to True. @@ -2585,7 +2585,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('any', """ - a.any(axis=None, out=None, keepdims=False) + a.any(axis=None, out=None, keepdims=False, *, where=True) Returns True if any of the elements of `a` evaluate to True. @@ -3242,7 +3242,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', """ - a.mean(axis=None, dtype=None, out=None, keepdims=False) + a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) Returns the average of the array elements along given axis. @@ -3813,7 +3813,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('std', """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) Returns the standard deviation of the array elements along given axis. @@ -4100,7 +4100,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('var', """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) Returns the variance of the array elements, along given axis. From d54e2dcc17e0f0a8702d4b0eb4ac30a5c927a056 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 2 Dec 2020 20:22:00 -0600 Subject: [PATCH 0121/1270] DEP: Finalize unravel_index `dims` alias for `shape` keyword The argument was renamed to `shape` and deprecated since NumPy 1.16, so the deprecation can now be finalized. --- .../upcoming_changes/17900.expired.rst | 2 ++ numpy/core/multiarray.py | 5 +-- numpy/core/src/multiarray/compiled_base.c | 35 ------------------- numpy/lib/tests/test_index_tricks.py | 15 +++----- 4 files changed, 7 insertions(+), 50 deletions(-) create mode 100644 doc/release/upcoming_changes/17900.expired.rst diff --git a/doc/release/upcoming_changes/17900.expired.rst b/doc/release/upcoming_changes/17900.expired.rst new file mode 100644 index 000000000000..810d672414e9 --- /dev/null +++ b/doc/release/upcoming_changes/17900.expired.rst @@ -0,0 +1,2 @@ +* The ``shape`` argument `numpy.unravel_index` cannot be passed + as ``dims`` keyword argument anymore. (Was deprecated in NumPy 1.16.) diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index f311fad8d390..f736973def1f 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -999,7 +999,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None, dims=None): +def unravel_index(indices, shape=None, order=None): """ unravel_index(indices, shape, order='C') @@ -1045,9 +1045,6 @@ def unravel_index(indices, shape=None, order=None, dims=None): (3, 1, 4, 1) """ - if dims is not None: - warnings.warn("'shape' argument should be used instead of 'dims'", - DeprecationWarning, stacklevel=3) return (indices,) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 8ab59201523d..da857071b83c 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1229,41 +1229,6 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds) char *kwlist[] = {"indices", "shape", "order", NULL}; - /* - * TODO: remove this in favor of warning raised in the dispatcher when - * __array_function__ is enabled by default. - */ - - /* - * Continue to support the older "dims" argument in place - * of the "shape" argument. Issue an appropriate warning - * if "dims" is detected in keywords, then replace it with - * the new "shape" argument and continue processing as usual. - */ - if (kwds) { - PyObject *dims_item, *shape_item; - dims_item = _PyDict_GetItemStringWithError(kwds, "dims"); - if (dims_item == NULL && PyErr_Occurred()){ - return NULL; - } - shape_item = _PyDict_GetItemStringWithError(kwds, "shape"); - if (shape_item == NULL && PyErr_Occurred()){ - return NULL; - } - if (dims_item != NULL && shape_item == NULL) { - if (DEPRECATE("'shape' argument should be" - " used instead of 'dims'") < 0) { - return NULL; - } - if (PyDict_SetItemString(kwds, "shape", dims_item) < 0) { - return NULL; - } - if (PyDict_DelItemString(kwds, "dims") < 0) { - return NULL; - } - } - } - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:unravel_index", kwlist, &indices0, diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index 843e27cefc91..c21aefd1a66b 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -16,23 +16,13 @@ class TestRavelUnravelIndex: def test_basic(self): assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) - # test backwards compatibility with older dims - # keyword argument; see Issue #10586 - with assert_warns(DeprecationWarning): - # we should achieve the correct result - # AND raise the appropriate warning - # when using older "dims" kw argument - assert_equal(np.unravel_index(indices=2, - dims=(2, 2)), - (1, 0)) - # test that new shape argument works properly assert_equal(np.unravel_index(indices=2, shape=(2, 2)), (1, 0)) # test that an invalid second keyword argument - # is properly handled + # is properly handled, including the old name `dims`. with assert_raises(TypeError): np.unravel_index(indices=2, hape=(2, 2)) @@ -42,6 +32,9 @@ def test_basic(self): with assert_raises(TypeError): np.unravel_index(254, ims=(17, 94)) + with assert_raises(TypeError): + np.unravel_index(254, dims=(17, 94)) + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) From 11ee3340218aba1e6873139274f12f212dec5e0d Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 3 Dec 2020 00:18:41 +0200 Subject: [PATCH 0122/1270] BUG: merging PR to use -Werror=undef broke another PR --- numpy/core/include/numpy/npy_common.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index 9ce5caa66056..c8495db8e58f 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -17,7 +17,9 @@ #ifndef NPY_RELAXED_STRIDES_DEBUG #define NPY_RELAXED_STRIDES_DEBUG 0 #endif - +#ifndef NPY_USE_NEW_CASTINGIMPL + #define NPY_USE_NEW_CASTINGIMPL 0 +#endif /* * using static inline modifiers when defining npy_math functions * allows the compiler to make optimizations when possible From 4950fd10e678faea859fc8d8bd5d7ea9df2007b1 Mon Sep 17 00:00:00 2001 From: "Tomoki, Karatsu" <49965247+t-karatsu@users.noreply.github.com> Date: Thu, 3 Dec 2020 22:01:43 +0900 Subject: [PATCH 0123/1270] ENH: add support for fujitsu compiler to numpy. (#17792) --- numpy/distutils/fcompiler/__init__.py | 2 +- numpy/distutils/fcompiler/fujitsu.py | 46 +++++++++++++++++++++++++++ numpy/tests/test_public_api.py | 1 + 3 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 numpy/distutils/fcompiler/fujitsu.py diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index 1f340a412913..76f00ee91b97 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -744,7 +744,7 @@ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): 'intelvem', 'intelem', 'flang')), ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', 'vast', 'compaq', - 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')), + 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor', 'fujitsu')), ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), ('irix.*', ('mips', 'gnu', 'gnu95',)), diff --git a/numpy/distutils/fcompiler/fujitsu.py b/numpy/distutils/fcompiler/fujitsu.py new file mode 100644 index 000000000000..ddce67456d18 --- /dev/null +++ b/numpy/distutils/fcompiler/fujitsu.py @@ -0,0 +1,46 @@ +""" +fujitsu + +Supports Fujitsu compiler function. +This compiler is developed by Fujitsu and is used in A64FX on Fugaku. +""" +from numpy.distutils.fcompiler import FCompiler + +compilers = ['FujitsuFCompiler'] + +class FujitsuFCompiler(FCompiler): + compiler_type = 'fujitsu' + description = 'Fujitsu Fortran Compiler' + + possible_executables = ['frt'] + version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' + # $ frt --version + # frt (FRT) x.x.x yyyymmdd + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["frt", "-Fixed"], + 'compiler_fix' : ["frt", "-Fixed"], + 'compiler_f90' : ["frt"], + 'linker_so' : ["frt", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-KPIC'] + module_dir_switch = '-M' + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + def runtime_library_dir_option(self, dir): + return f'-Wl,-rpath={dir}' + def get_libraries(self): + return ['fj90f', 'fj90i', 'fjsrcinfo'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('fujitsu').get_version()) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index a7bd0f115d15..1382e1c4b52a 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -243,6 +243,7 @@ def test_NPY_NO_EXPORT(): "distutils.fcompiler.nv", "distutils.fcompiler.sun", "distutils.fcompiler.vast", + "distutils.fcompiler.fujitsu", "distutils.from_template", "distutils.intelccompiler", "distutils.lib2def", From 1ccb4c6dbfa6194d1627885e39f81823bce44fc7 Mon Sep 17 00:00:00 2001 From: FX Coudert Date: Thu, 3 Dec 2020 16:44:16 +0100 Subject: [PATCH 0124/1270] Update gnu.py --- numpy/distutils/fcompiler/gnu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 0d9d769c2a8c..68d1501eee6a 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -126,7 +126,7 @@ def get_flags_linker_so(self): target = '10.9' s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' warnings.warn(s, stacklevel=2) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = target + os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) else: opt.append("-shared") From bd7609c2aa11de9f46d084e607ae42d1bbe17797 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Thu, 3 Dec 2020 12:35:40 -0800 Subject: [PATCH 0125/1270] Fix AttributeError: 'bool' object has no attribute 'ndim' --- numpy/testing/_private/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index fb33bdcbd7f7..e974bbd0911a 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -745,7 +745,7 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): # flag as it everywhere, so we should return the scalar flag. if isinstance(x_id, bool) or x_id.ndim == 0: return bool_(x_id) - elif isinstance(x_id, bool) or y_id.ndim == 0: + elif isinstance(y_id, bool) or y_id.ndim == 0: return bool_(y_id) else: return y_id From a430fab55823c4420b729f0b990b4c2cc87dddf2 Mon Sep 17 00:00:00 2001 From: Derek Homeier Date: Fri, 4 Dec 2020 12:26:41 +0100 Subject: [PATCH 0126/1270] TST: remove stray '+' from f-string upgrade --- numpy/random/_examples/numba/extending.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_examples/numba/extending.py b/numpy/random/_examples/numba/extending.py index da4d9394461a..f387db69502a 100644 --- a/numpy/random/_examples/numba/extending.py +++ b/numpy/random/_examples/numba/extending.py @@ -44,7 +44,7 @@ def numpycall(): assert r1.shape == r2.shape t1 = timeit(numbacall, number=1000) -+print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms') +print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms') t2 = timeit(numpycall, number=1000) print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms') From b86408552e823a8d2cdc5db9c3a89560b3c462ab Mon Sep 17 00:00:00 2001 From: Maia Kaplan Date: Sun, 29 Nov 2020 15:29:37 -0800 Subject: [PATCH 0127/1270] BUG: np.arange: As only kwarg, allow `stop` and disallow `start`. (gh-17764) * This matches args behaviour. * Avoids `arange(start=4)` from returning a range from 0 to 4 * Allows `arange(stop=4)` to run without error, to return range from 0 to 4 --- numpy/core/src/multiarray/multiarraymodule.c | 14 +++++++++++++- numpy/core/tests/test_multiarray.py | 16 ++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 1aad70dc65bb..196dd0e3b8f3 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2930,7 +2930,7 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { static char *kwd[] = {"start", "stop", "step", "dtype", "like", NULL}; PyArray_Descr *typecode = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&$O:arange", kwd, + if (!PyArg_ParseTupleAndKeywords(args, kws, "|OOOO&$O:arange", kwd, &o_start, &o_stop, &o_step, @@ -2940,6 +2940,18 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { return NULL; } + if (o_stop == NULL) { + if (args == NULL || PyTuple_GET_SIZE(args) == 0){ + PyErr_SetString(PyExc_TypeError, + "arange() requires stop to be specified."); + return NULL; + } + } + else if (o_start == NULL) { + o_start = o_stop; + o_stop = NULL; + } + array_function_result = array_implement_c_array_function_creation( "arange", args, kws); if (array_function_result != Py_NotImplemented) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 515287f16310..f4d7ef999e7d 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -8295,6 +8295,22 @@ def test_zero_step(self): assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + def test_require_range(self): + assert_raises(TypeError, np.arange) + assert_raises(TypeError, np.arange, step=3) + assert_raises(TypeError, np.arange, dtype='int64') + assert_raises(TypeError, np.arange, start=4) + + def test_start_stop_kwarg(self): + keyword_stop = np.arange(stop=3) + keyword_zerotostop = np.arange(start=0, stop=3) + keyword_start_stop = np.arange(start=3, stop=9) + + assert len(keyword_stop) == 3 + assert len(keyword_zerotostop) == 3 + assert len(keyword_start_stop) == 6 + assert_array_equal(keyword_stop, keyword_zerotostop) + class TestArrayFinalize: """ Tests __array_finalize__ """ From d169276c467903be95f1ba94de1cee9bb32b280a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 4 Dec 2020 10:51:24 -0600 Subject: [PATCH 0128/1270] DOC: Update release notes to mention `type(dtype) is not np.dtype` This also requires mentioning the C-API macro `#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)` which was updated to the above in 1.16.6 meaning that using the macro and compiling against an older NumPy version will cause issues. The macro has to be avoided in that case. --- doc/source/release/1.20.0-notes.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index 73b470f3fc29..9f46a3e806a2 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -253,6 +253,18 @@ library. Compatibility notes =================== +``isinstance(dtype, np.dtype)`` and not ``type(dtype) is not np.dtype`` +----------------------------------------------------------------------- +NumPy dtypes are not direct instances of ``np.dtype`` anymore. Code that +may have used ``type(dtype) is np.dtype`` will always return ``False`` and +must be updated to use the correct version ``isinstance(dtype, np.dtype)``. + +This change also affects the C-side macro ``PyArray_DescrCheck`` if compiled +against a NumPy older than 1.16.6. If code uses this macro and wishes to +compile against an older version of NumPy, it must replace the macro +(see also `C API changes`_ section). + + Same kind casting in concatenate with ``axis=None`` --------------------------------------------------- When `~numpy.concatenate` is called with ``axis=None``, @@ -524,6 +536,23 @@ cannot represent ``b"1"`` faithfully. C API changes ============= +The ``PyArray_DescrCheck`` macro is modified +-------------------------------------------- +The ``PyArray_DescrCheck`` macro has been updated since NumPy 1.16.6 to be:: + + #define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) + +Starting with NumPy 1.20 code that is compiled against an earlier version +will be API incompatible with NumPy 1.20. +The fix is to either compile against 1.16.6 (if the NumPy 1.16 release is +the oldest release you wish to support), or manually inline the macro by +replacing it with the new definition:: + + PyObject_TypeCheck(op, &PyArrayDescr_Type) + +which is compatible with all NumPy versions. + + Size of ``np.ndarray`` and ``np.void_`` changed ----------------------------------------------- The size of the ``PyArrayObject`` and ``PyVoidScalarObject`` From b2778a8a6d99dfbfee8f09daf6c069d1acf1aa8d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 5 Dec 2020 13:22:39 +0100 Subject: [PATCH 0129/1270] MAINT: Replace `contextlib_nullcontext` with `contextlib.nullcontext` --- numpy/core/_methods.py | 5 +++-- numpy/core/memmap.py | 12 ++++++------ numpy/core/records.py | 7 +++---- numpy/lib/npyio.py | 6 +++--- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index c730e2035f36..1867ba68c4e6 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -4,6 +4,7 @@ """ import warnings +from contextlib import nullcontext from numpy.core import multiarray as mu from numpy.core import umath as um @@ -11,7 +12,7 @@ from numpy.core import numerictypes as nt from numpy.core import _exceptions from numpy._globals import _NoValue -from numpy.compat import pickle, os_fspath, contextlib_nullcontext +from numpy.compat import pickle, os_fspath # save those O(100) nanoseconds! umr_maximum = um.maximum.reduce @@ -279,7 +280,7 @@ def _ptp(a, axis=None, out=None, keepdims=False): def _dump(self, file, protocol=2): if hasattr(file, 'write'): - ctx = contextlib_nullcontext(file) + ctx = nullcontext(file) else: ctx = open(os_fspath(file), "wb") with ctx as f: diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py index 66653c0c155b..892ad2540a2a 100644 --- a/numpy/core/memmap.py +++ b/numpy/core/memmap.py @@ -1,8 +1,8 @@ +from contextlib import nullcontext + import numpy as np from .numeric import uint8, ndarray, dtype -from numpy.compat import ( - os_fspath, contextlib_nullcontext, is_pathlib_path -) +from numpy.compat import os_fspath, is_pathlib_path from numpy.core.overrides import set_module __all__ = ['memmap'] @@ -38,7 +38,7 @@ class memmap(ndarray): which returns a view into an mmap buffer. Flush the memmap instance to write the changes to the file. Currently there - is no API to close the underlying ``mmap``. It is tricky to ensure the + is no API to close the underlying ``mmap``. It is tricky to ensure the resource is actually closed, since it may be shared between different memmap instances. @@ -112,7 +112,7 @@ class memmap(ndarray): The memmap object can be used anywhere an ndarray is accepted. Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns ``True``. - + Memory-mapped files cannot be larger than 2GB on 32-bit systems. When a memmap causes a file to be created or extended beyond its @@ -223,7 +223,7 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, raise ValueError("shape must be given") if hasattr(filename, 'read'): - f_ctx = contextlib_nullcontext(filename) + f_ctx = nullcontext(filename) else: f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b') diff --git a/numpy/core/records.py b/numpy/core/records.py index c2f6c6965d48..00d456658bc4 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -36,12 +36,11 @@ import os import warnings from collections import Counter, OrderedDict +from contextlib import nullcontext from . import numeric as sb from . import numerictypes as nt -from numpy.compat import ( - os_fspath, contextlib_nullcontext -) +from numpy.compat import os_fspath from numpy.core.overrides import set_module from .arrayprint import get_printoptions @@ -914,7 +913,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface. # Example of fd: gzip, BytesIO, BufferedReader # file already opened - ctx = contextlib_nullcontext(fd) + ctx = nullcontext(fd) else: # open file ctx = open(os_fspath(fd), 'rb') diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 3b2de3e61e1f..af8e28e42051 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -24,7 +24,7 @@ from numpy.compat import ( asbytes, asstr, asunicode, os_fspath, os_PathLike, - pickle, contextlib_nullcontext + pickle ) @@ -517,7 +517,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True): # [1 2] [1 3] """ if hasattr(file, 'write'): - file_ctx = contextlib_nullcontext(file) + file_ctx = contextlib.nullcontext(file) else: file = os_fspath(file) if not file.endswith('.npy'): @@ -1792,7 +1792,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, fid_ctx = contextlib.closing(fid) else: fid = fname - fid_ctx = contextlib_nullcontext(fid) + fid_ctx = contextlib.nullcontext(fid) fhd = iter(fid) except TypeError as e: raise TypeError( From a99e1a8d3a2d2341bd1c8cf9a30db751fc4481ab Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 5 Dec 2020 13:28:08 +0100 Subject: [PATCH 0130/1270] DOC: Add a note that one should preferably use `contextlib.nullcontext` --- numpy/compat/py3k.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py index f36aaca170ff..e1e236d92306 100644 --- a/numpy/compat/py3k.py +++ b/numpy/compat/py3k.py @@ -94,6 +94,9 @@ class contextlib_nullcontext: cm = optional_cm if condition else nullcontext() with cm: # Perform operation, using optional_cm if condition is True + + .. note:: + Prefer using `contextlib.nullcontext` instead of this context manager. """ def __init__(self, enter_result=None): From 312897920dedc683d7693d7eb1a63373262f259c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 5 Dec 2020 21:06:54 +0100 Subject: [PATCH 0131/1270] TST: Fixed an issue where the typing tests would fail for comparison operations --- numpy/typing/tests/data/reveal/comparisons.py | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/numpy/typing/tests/data/reveal/comparisons.py b/numpy/typing/tests/data/reveal/comparisons.py index 82d1fa6dee68..507f713c7d5d 100644 --- a/numpy/typing/tests/data/reveal/comparisons.py +++ b/numpy/typing/tests/data/reveal/comparisons.py @@ -33,8 +33,8 @@ reveal_type(td > i) # E: numpy.bool_ reveal_type(td > i4) # E: numpy.bool_ reveal_type(td > i8) # E: numpy.bool_ -reveal_type(td > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(td > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(td > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(td > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] # boolean @@ -51,8 +51,8 @@ reveal_type(b_ > c) # E: numpy.bool_ reveal_type(b_ > c16) # E: numpy.bool_ reveal_type(b_ > c8) # E: numpy.bool_ -reveal_type(b_ > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(b_ > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(b_ > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(b_ > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] # Complex @@ -67,8 +67,8 @@ reveal_type(c16 > c) # E: numpy.bool_ reveal_type(c16 > f) # E: numpy.bool_ reveal_type(c16 > i) # E: numpy.bool_ -reveal_type(c16 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(c16 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(c16 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(c16 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(c16 > c16) # E: numpy.bool_ reveal_type(f8 > c16) # E: numpy.bool_ @@ -81,8 +81,8 @@ reveal_type(c > c16) # E: numpy.bool_ reveal_type(f > c16) # E: numpy.bool_ reveal_type(i > c16) # E: numpy.bool_ -reveal_type(AR > c16) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > c16) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > c16) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > c16) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(c8 > c16) # E: numpy.bool_ reveal_type(c8 > f8) # E: numpy.bool_ @@ -95,8 +95,8 @@ reveal_type(c8 > c) # E: numpy.bool_ reveal_type(c8 > f) # E: numpy.bool_ reveal_type(c8 > i) # E: numpy.bool_ -reveal_type(c8 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(c8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(c8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(c8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(c16 > c8) # E: numpy.bool_ reveal_type(f8 > c8) # E: numpy.bool_ @@ -109,8 +109,8 @@ reveal_type(c > c8) # E: numpy.bool_ reveal_type(f > c8) # E: numpy.bool_ reveal_type(i > c8) # E: numpy.bool_ -reveal_type(AR > c8) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > c8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > c8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > c8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] # Float @@ -123,8 +123,8 @@ reveal_type(f8 > c) # E: numpy.bool_ reveal_type(f8 > f) # E: numpy.bool_ reveal_type(f8 > i) # E: numpy.bool_ -reveal_type(f8 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(f8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(f8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(f8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(f8 > f8) # E: numpy.bool_ reveal_type(i8 > f8) # E: numpy.bool_ @@ -135,8 +135,8 @@ reveal_type(c > f8) # E: numpy.bool_ reveal_type(f > f8) # E: numpy.bool_ reveal_type(i > f8) # E: numpy.bool_ -reveal_type(AR > f8) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > f8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > f8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > f8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(f4 > f8) # E: numpy.bool_ reveal_type(f4 > i8) # E: numpy.bool_ @@ -147,8 +147,8 @@ reveal_type(f4 > c) # E: numpy.bool_ reveal_type(f4 > f) # E: numpy.bool_ reveal_type(f4 > i) # E: numpy.bool_ -reveal_type(f4 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(f4 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(f4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(f4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(f8 > f4) # E: numpy.bool_ reveal_type(i8 > f4) # E: numpy.bool_ @@ -159,8 +159,8 @@ reveal_type(c > f4) # E: numpy.bool_ reveal_type(f > f4) # E: numpy.bool_ reveal_type(i > f4) # E: numpy.bool_ -reveal_type(AR > f4) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > f4) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > f4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > f4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] # Int @@ -173,8 +173,8 @@ reveal_type(i8 > c) # E: numpy.bool_ reveal_type(i8 > f) # E: numpy.bool_ reveal_type(i8 > i) # E: numpy.bool_ -reveal_type(i8 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(i8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(i8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(i8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(u8 > i4) # E: numpy.bool_ @@ -184,8 +184,8 @@ reveal_type(u8 > c) # E: numpy.bool_ reveal_type(u8 > f) # E: numpy.bool_ reveal_type(u8 > i) # E: numpy.bool_ -reveal_type(u8 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(u8 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(u8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(u8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(i8 > i8) # E: numpy.bool_ reveal_type(u8 > i8) # E: numpy.bool_ @@ -196,8 +196,8 @@ reveal_type(c > i8) # E: numpy.bool_ reveal_type(f > i8) # E: numpy.bool_ reveal_type(i > i8) # E: numpy.bool_ -reveal_type(AR > i8) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > i8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > i8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > i8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(i4 > u8) # E: numpy.bool_ @@ -207,16 +207,16 @@ reveal_type(c > u8) # E: numpy.bool_ reveal_type(f > u8) # E: numpy.bool_ reveal_type(i > u8) # E: numpy.bool_ -reveal_type(AR > u8) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > u8) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > u8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > u8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(i4 > i8) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i4 > i) # E: numpy.bool_ reveal_type(i4 > b_) # E: numpy.bool_ reveal_type(i4 > b) # E: numpy.bool_ -reveal_type(i4 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(i4 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(i4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(i4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(u4 > i8) # E: numpy.bool_ reveal_type(u4 > i4) # E: numpy.bool_ @@ -225,16 +225,16 @@ reveal_type(u4 > i) # E: numpy.bool_ reveal_type(u4 > b_) # E: numpy.bool_ reveal_type(u4 > b) # E: numpy.bool_ -reveal_type(u4 > AR) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(u4 > SEQ) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(u4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(u4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(i8 > i4) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i > i4) # E: numpy.bool_ reveal_type(b_ > i4) # E: numpy.bool_ reveal_type(b > i4) # E: numpy.bool_ -reveal_type(AR > i4) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > i4) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > i4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > i4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] reveal_type(i8 > u4) # E: numpy.bool_ reveal_type(i4 > u4) # E: numpy.bool_ @@ -243,5 +243,5 @@ reveal_type(b_ > u4) # E: numpy.bool_ reveal_type(b > u4) # E: numpy.bool_ reveal_type(i > u4) # E: numpy.bool_ -reveal_type(AR > u4) # E: Union[numpy.ndarray, numpy.bool_] -reveal_type(SEQ > u4) # E: Union[numpy.ndarray, numpy.bool_] +reveal_type(AR > u4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(SEQ > u4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] From bfceca93325ad801a6dcd4b9fdd5432825618a8f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 5 Dec 2020 12:13:40 -0600 Subject: [PATCH 0132/1270] DOC: Add information about leak checking and valgrind This is not meant to be a fool-proof guide. It isn't and I do not want to expect people to use it... But I realize that I use these things semi-regulary (even if I never bother to remove all wrinkles) and it is good to have the information somewhere so that everyone knows where to look. --- .../dev/development_advanced_debugging.rst | 186 ++++++++++++++++++ doc/source/dev/development_environment.rst | 2 + doc/source/dev/index.rst | 2 + 3 files changed, 190 insertions(+) create mode 100644 doc/source/dev/development_advanced_debugging.rst diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst new file mode 100644 index 000000000000..cf4805a8b6d2 --- /dev/null +++ b/doc/source/dev/development_advanced_debugging.rst @@ -0,0 +1,186 @@ +======================== +Advanced debugging tools +======================== + +If you reached here, you want to dive into, or use, more advanced tooling. +This is usually not necessary for first time contributers and most +day-to-day developement. +These are used more rarely, for example close to a new NumPy release, +or when a large or particular complex change was made. + +Since not all of these tools are used on a regular bases and only available +on some systems, please expect differences, issues, or quirks; +we will be happy to help if you get stuck and appreciate any improvements +or suggestions to these workflows. + + +Finding C errors with additional tooling +######################################## + +Most development will not require more than a typical debugging toolchain +as shown in :ref:`Debugging `. +But for example memory leaks can be particularly subtle or difficult to +narrow down. + +We do not expect any of these tools to be run by most contributors. +However, you can ensure that we can track down such issues more easily easier: + +* Tests should cover all code paths, incluing error paths. +* Try to write short and simple tests. If you have a very complicated test + consider creating an additional simpler test as well. + This can be helpful, because often it is only easy to find which test + triggers an issue and not which line of the test. +* Never use ``np.empty`` if data is read/used. ``valgrind`` will notice this + and report an error. When you do not care about values, you can generate + random values instead. + +This will help us catch any oversights before your change is released +and means you do not have to worry about making reference counting errors, +which can be intimidating. + + +Python debug build for finding memory leaks +=========================================== + +Debug builds of Python are easily available for example on ``debian`` systems, +and can be used on all platforms. +Running a test or terminal is usually as easy as:: + + python3.8d runtests.py --ipython + +and were already mentioned in :ref:`Debugging `. + +A Python debug build will help: + +- Find bugs which may otherwise cause random behaviour. + One example is when an object is still used after it has been deleted. + +- Python debug builds allows to check correct reference counting. + This works using the additional commands:: + + sys.gettotalrefcount() + sys.getallocatedblocks() + + +Use together with ``pytest`` +---------------------------- + +Running the test suit only with a debug python build will not find many +errors on its own. An additional advantage of a debug build of Python is that +it allows detecting memory leaks. + +A tool to make this easier is `pytest-leaks `_, +which can be installed using ``pip``. +Unfortunately, ``pytest`` itself may leak memory, but good results can usually +(currently) be achieved by removing:: + + @pytest.fixture(autouse=True) + def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + + @pytest.fixture(autouse=True) + def env_setup(monkeypatch): + monkeypatch.setenv('PYTHONHASHSEED', '0') + +from ``numpy/conftest.py`` (This may change with new ``pytest-leaks`` versions +or ``pytest`` updates). + +This allows to run the test suit, or part of it, conveniently:: + + python3.8d runtests.py -t numpy/core/tests/test_multiarray.py -- -R2:3 -s + +where ``-R2:3`` is the ``pytest-leaks`` command (see its documentation), the +``-s`` causes output to print and may be necessary (in some versions captured +output was detected as a leak). + +Note that some tests are known (or even designed) to leak references, we try +to mark them, but expect some false positives. + + +``valgrind`` +============ + +Valgrind is a powerful tool to find certain memory access problems and should +be run on complicated C code. +Basic use of ``valgrind`` usually requires no more than:: + + PYTHONMALLOC=malloc python runtests.py + +where ``PYTHONMALLOC=malloc`` is necessary to avoid false positives from python +itself. +Depending on the system and valgrind version, you may see more false positives. +``valgrind`` supports "suppressions" to ignore some of these, and Python does +have a supression file (and even a compile time option) which may help if you +find it necessary. + +Valgrind helps: + +- Find use of uninitialized variables/memory. + +- Detect memory access violations (reading or writing outside of allocated + memory). + +- Find *many* memory leaks. Note that for *most* leaks the python + debug build approach (and ``pytest-leaks``) is much more sensitive. + The reason is that ``valgrind`` can only detect if memory is definitely + lost. If:: + + dtype = np.dtype(np.int64) + arr.astype(dtype=dtype) + + Has incorrect reference counting for ``dtype``, this is a bug, but valgrind + cannot see it because ``np.dtype(np.int64)`` returns always the same object. + However, not all dtypes are singletons, so this might leak memory for + different input. + In rare cases NumPy uses ``malloc`` and not the Python memory allocators + which are invisible to the Python debug build. + ``malloc`` should normally be avoided, but there are some exceptions + (e.g. the ``PyArray_Dims`` structure is public API and cannot use the + Python allocators.) + +Even though using valgrind for memory leak detection is slow and less sensitive +it can be a convenient: you can run most programs with valgrind without +modification. + +Things to be aware of: + +- Valgrind does not support the numpy ``longdouble``, this means that tests + will fail or be flagged errors that are completely fine. + +- Expect some errors before and after running your NumPy code. + +- Caches can mean that errors (specifically memory leaks) may not be detected + or are only detect at a later, unrelated time. + +A big advantage of valgrind is that it has no requirements aside valgrind +itself (although you probably want to use debug builds for better tracebacks). + + +Use together with ``pytest`` +---------------------------- +You can run the test suit with valgrind which will work may be sufficient +when you are only interested in a few tests:: + + PYTHOMMALLOC=malloc valgrind python runtests.py -t numpy/core/tests/test_multiarray.py -- --continue-on-collection-errors + +Note the ``--continue-on-collection-errors``, which is currently necessary due to +missing ``longdouble`` support causing failures (this will usually not be +necessary if you do not run the full test suit). + +If you wish to detect memory leaks you will also require ``--show-leak-kinds=definite`` +and possibly more valgrind options. Just as for ``pytest-leaks`` certain +tests are known to leak cause errors in valgrind and may or may not be marked +as such. + +We have developed `pytest-valgrind `_ +which: + +- Reports errors for each test individually + +- Narrows down memory leaks to individual tests (by default valgrind + only checks for memory leaks after a program stops, which is very + cumbersome). + +Please refer to its ``README`` for more information (it includes an example +command for NumPy). + diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index cb027c6620f8..013414568a2c 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -207,6 +207,8 @@ repo, use one of:: $ git reset --hard +.. _debugging: + Debugging --------- diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 4641a7e2fff6..bcd144d71cc7 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -12,6 +12,7 @@ Contributing to NumPy Git Basics development_environment development_workflow + development_advanced_debugging ../benchmarking NumPy C style guide releasing @@ -302,6 +303,7 @@ The rest of the story Git Basics development_environment development_workflow + development_advanced_debugging reviewer_guidelines ../benchmarking NumPy C style guide From 7216177bd9a99b70d98f7a48b2f58d38abf26647 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 5 Dec 2020 22:13:40 -0600 Subject: [PATCH 0133/1270] Apply suggestions from code review Co-authored-by: Matti Picus --- .../dev/development_advanced_debugging.rst | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index cf4805a8b6d2..4bf9facdfc88 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -46,6 +46,8 @@ Debug builds of Python are easily available for example on ``debian`` systems, and can be used on all platforms. Running a test or terminal is usually as easy as:: + python3.8d runtests.py + # or python3.8d runtests.py --ipython and were already mentioned in :ref:`Debugging `. @@ -65,7 +67,7 @@ A Python debug build will help: Use together with ``pytest`` ---------------------------- -Running the test suit only with a debug python build will not find many +Running the test suite only with a debug python build will not find many errors on its own. An additional advantage of a debug build of Python is that it allows detecting memory leaks. @@ -85,7 +87,7 @@ Unfortunately, ``pytest`` itself may leak memory, but good results can usually from ``numpy/conftest.py`` (This may change with new ``pytest-leaks`` versions or ``pytest`` updates). -This allows to run the test suit, or part of it, conveniently:: +This allows to run the test suite, or part of it, conveniently:: python3.8d runtests.py -t numpy/core/tests/test_multiarray.py -- -R2:3 -s @@ -129,7 +131,7 @@ Valgrind helps: arr.astype(dtype=dtype) Has incorrect reference counting for ``dtype``, this is a bug, but valgrind - cannot see it because ``np.dtype(np.int64)`` returns always the same object. + cannot see it because ``np.dtype(np.int64)`` always returns the same object. However, not all dtypes are singletons, so this might leak memory for different input. In rare cases NumPy uses ``malloc`` and not the Python memory allocators @@ -152,20 +154,21 @@ Things to be aware of: - Caches can mean that errors (specifically memory leaks) may not be detected or are only detect at a later, unrelated time. -A big advantage of valgrind is that it has no requirements aside valgrind +A big advantage of valgrind is that it has no requirements aside from valgrind itself (although you probably want to use debug builds for better tracebacks). Use together with ``pytest`` ---------------------------- -You can run the test suit with valgrind which will work may be sufficient +You can run the test suite with valgrind which may be sufficient when you are only interested in a few tests:: - PYTHOMMALLOC=malloc valgrind python runtests.py -t numpy/core/tests/test_multiarray.py -- --continue-on-collection-errors + PYTHOMMALLOC=malloc valgrind python runtests.py \ + -t numpy/core/tests/test_multiarray.py -- --continue-on-collection-errors Note the ``--continue-on-collection-errors``, which is currently necessary due to missing ``longdouble`` support causing failures (this will usually not be -necessary if you do not run the full test suit). +necessary if you do not run the full test suite). If you wish to detect memory leaks you will also require ``--show-leak-kinds=definite`` and possibly more valgrind options. Just as for ``pytest-leaks`` certain @@ -183,4 +186,3 @@ which: Please refer to its ``README`` for more information (it includes an example command for NumPy). - From 76bbd8846377d4eeed8ec8deabea028b1e3e1e23 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 5 Dec 2020 22:26:29 -0600 Subject: [PATCH 0134/1270] DOC: Move links to the end of the sections in new debugging text --- doc/source/dev/development_advanced_debugging.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 4bf9facdfc88..fa4014fdbc45 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -71,8 +71,7 @@ Running the test suite only with a debug python build will not find many errors on its own. An additional advantage of a debug build of Python is that it allows detecting memory leaks. -A tool to make this easier is `pytest-leaks `_, -which can be installed using ``pip``. +A tool to make this easier is `pytest-leaks`_, which can be installed using ``pip``. Unfortunately, ``pytest`` itself may leak memory, but good results can usually (currently) be achieved by removing:: @@ -98,6 +97,7 @@ output was detected as a leak). Note that some tests are known (or even designed) to leak references, we try to mark them, but expect some false positives. +.. _pytest-leaks: https://github.com/abalkin/pytest-leaks ``valgrind`` ============ @@ -175,8 +175,7 @@ and possibly more valgrind options. Just as for ``pytest-leaks`` certain tests are known to leak cause errors in valgrind and may or may not be marked as such. -We have developed `pytest-valgrind `_ -which: +We have developed `pytest-valgrind`_ which: - Reports errors for each test individually @@ -186,3 +185,6 @@ which: Please refer to its ``README`` for more information (it includes an example command for NumPy). + +.. _pytest-valgrind: https://github.com/seberg/pytest-valgrind + From 709ee27ddea73784f7710c102b72e89e531465b8 Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Sun, 6 Dec 2020 10:53:52 -0800 Subject: [PATCH 0135/1270] Clarify savez documentation around naming of arrays in output file --- numpy/lib/npyio.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index af8e28e42051..efebb5fb7098 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -539,10 +539,11 @@ def _savez_dispatcher(file, *args, **kwds): def savez(file, *args, **kwds): """Save several arrays into a single file in uncompressed ``.npz`` format. - If arguments are passed in with no keywords, the corresponding variable - names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword - arguments are given, the corresponding variable names, in the ``.npz`` - file will match the keyword names. + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. Parameters ---------- @@ -552,13 +553,12 @@ def savez(file, *args, **kwds): ``.npz`` extension will be appended to the filename if it is not already there. args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. Returns ------- @@ -613,6 +613,7 @@ def savez(file, *args, **kwds): ['x', 'y'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + """ _savez(file, args, kwds, False) @@ -627,9 +628,11 @@ def savez_compressed(file, *args, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. - If keyword arguments are given, then filenames are taken from the keywords. - If arguments are passed in with no keywords, then stored filenames are - arr_0, arr_1, etc. + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. Parameters ---------- @@ -639,13 +642,12 @@ def savez_compressed(file, *args, **kwds): ``.npz`` extension will be appended to the filename if it is not already there. args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. Returns ------- From 5d53d6f88c07670dd8b41888948671d352259475 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sun, 6 Dec 2020 18:31:27 -0800 Subject: [PATCH 0136/1270] [DOC]: Wrong length for underline in docstring. This can trip-up doc parsers like Numpydoc --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index bff023d7ffb6..7ffa367751aa 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4023,7 +4023,7 @@ cdef class Generator: The drawn samples, of shape ``(size, k)``. Raises - ------- + ------ ValueError If any value in ``alpha`` is less than or equal to zero From 45becc44946d42bf2c32028f1691a42d0b03941e Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 7 Dec 2020 07:03:06 +0000 Subject: [PATCH 0137/1270] MAINT: Bump hypothesis from 5.41.4 to 5.41.5 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 5.41.4 to 5.41.5. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-5.41.4...hypothesis-python-5.41.5) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 9c94abf91a05..1b7b696d378e 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel setuptools<49.2.0 -hypothesis==5.41.4 +hypothesis==5.41.5 pytest==6.0.2 pytz==2020.4 pytest-cov==2.10.1 From 1c92910a407c097b71bc594ac68d992885304469 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 7 Dec 2020 15:35:44 +0800 Subject: [PATCH 0138/1270] Optimize the performance of countnonzero by using universal intrinsics --- numpy/core/src/common/simd/sse/arithmetic.h | 8 +++ numpy/core/src/common/simd/sse/conversion.h | 13 +++++ numpy/core/src/multiarray/item_selection.c | 55 ++++++++++++++++++++- 3 files changed, 74 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index 8440cc52e5c8..4e5d2583c9fe 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -149,6 +149,14 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) #endif // !NPY_HAVE_FMA3 // Horizontal add: Calculates the sum of all vector elements. + +NPY_FINLINE int npyv_sum_u32(__m128i a) +{ + __m128i t = _mm_add_epi32(a, _mm_srli_si128(a, 8)); + t = _mm_add_epi32(t, _mm_srli_si128(t, 4)); + return (unsigned)_mm_cvtsi128_si32(t); +} + NPY_FINLINE float npyv_sum_f32(__m128 a) { #ifdef NPY_HAVE_SSE3 diff --git a/numpy/core/src/common/simd/sse/conversion.h b/numpy/core/src/common/simd/sse/conversion.h index ea9660d13a30..23d1e1bc6b4d 100644 --- a/numpy/core/src/common/simd/sse/conversion.h +++ b/numpy/core/src/common/simd/sse/conversion.h @@ -29,4 +29,17 @@ #define npyv_cvt_b32_f32(A) _mm_castps_si128(A) #define npyv_cvt_b64_f64(A) _mm_castpd_si128(A) +// expand +NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { + const __m128i z = _mm_setzero_si128(); + *low = _mm_unpacklo_epi8(data, z); + *high = _mm_unpackhi_epi8(data, z); +} + +NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { + const __m128i z = _mm_setzero_si128(); + *low = _mm_unpacklo_epi16(data, z); + *high = _mm_unpackhi_epi16(data, z); +} + #endif // _NPY_SIMD_SSE_CVT_H diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 8052e24e424b..55153977d3a8 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -4,6 +4,11 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + +#ifndef MIN +#define MIN(a, b) ((a < b) ? (a) : (b)) +#endif + #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -27,7 +32,7 @@ #include "alloc.h" #include "arraytypes.h" #include "array_coercion.h" - +#include "simd/simd.h" static NPY_GCC_OPT_3 NPY_INLINE int npy_fasttake_impl( @@ -2124,6 +2129,46 @@ count_nonzero_bytes_384(const npy_uint64 * w) return r; } +#if NPY_SIMD +static NPY_INLINE npy_intp +count_nonzero_bytes(const char *d, npy_uintp unrollx) +{ + int count = 0; + int i = 0; + + const int vstep = npyv_nlanes_u8; + const npyv_u8 vone = npyv_setall_u8(1); + const npyv_u8 vzero = npyv_setall_u8(0); + npyv_u8 vt; + npyv_u8 vsum32 = npyv_zero_u32(); + while (i < unrollx) + { + npyv_u16 vsum16 = npyv_zero_u16(); + int j = i; + while (j < MIN(unrollx, i + 65535 * npyv_nlanes_u16)) + { + int k = j; + npyv_u8 vsum8 = npyv_zero_u8(); + for (; k < MIN(unrollx, j + 255 * vstep); k += vstep) + { + vt = npyv_cmpeq_u8(npyv_load_u8(d + k), vzero); + vt = npyv_and_u8(vt, vone); + vsum8 = npyv_add_u8(vsum8, vt); + } + npyv_u16 part1, part2; + npyv_expand_u8_u16(vsum8, &part1, &part2); + vsum16 = npyv_add_u16(vsum16, npyv_add_u16(part1, part2)); + j = k; + } + npyv_u32 part1, part2; + npyv_expand_u16_u32(vsum16, &part1, &part2); + vsum32 = npyv_add_u32(vsum32, npyv_add_u32(part1, part2)); + i = j; + } + count = i - npyv_sum_u32(vsum32); + return count; +} +#endif /* * Counts the number of True values in a raw boolean array. This * is a low-overhead function which does no heap allocations. @@ -2133,6 +2178,7 @@ count_nonzero_bytes_384(const npy_uint64 * w) NPY_NO_EXPORT npy_intp count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const *astrides) { + int idim; npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS]; npy_intp i, coord[NPY_MAXDIMS]; @@ -2154,13 +2200,17 @@ count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const } NPY_BEGIN_THREADS_THRESHOLDED(shape[0]); - /* Special case for contiguous inner loop */ if (strides[0] == 1) { NPY_RAW_ITER_START(idim, ndim, coord, shape) { /* Process the innermost dimension */ const char *d = data; const char *e = data + shape[0]; +#if NPY_SIMD + npy_uintp stride = shape[0] & -npyv_nlanes_u8; + count += count_nonzero_bytes(d, stride); + d += stride; +#else if (NPY_CPU_HAVE_UNALIGNED_ACCESS || npy_is_aligned(d, sizeof(npy_uint64))) { npy_uintp stride = 6 * sizeof(npy_uint64); @@ -2168,6 +2218,7 @@ count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const count += count_nonzero_bytes_384((const npy_uint64 *)d); } } +#endif for (; d < e; ++d) { count += (*d != 0); } From 4337b25467f41b5ae4c51cf8c7f48356b626a46b Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 7 Dec 2020 16:31:10 +0800 Subject: [PATCH 0139/1270] test avx256 result --- numpy/core/src/common/simd/avx2/arithmetic.h | 9 +++++++++ numpy/core/src/common/simd/avx2/conversion.h | 13 +++++++++++++ numpy/core/src/common/simd/simd.h | 6 ++++++ 3 files changed, 28 insertions(+) diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index 3a6dc953534e..a0a03b3c9e85 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -118,6 +118,15 @@ #endif // !NPY_HAVE_FMA3 // Horizontal add: Calculates the sum of all vector elements. +NPY_FINLINE int npyv_sum_u32(__m256i a) +{ + __m256i s0 = _mm256_hadd_epi32(a, a); + s0 = _mm256_hadd_epi32(s0, s0); + __m128i s1 = _mm256_extracti128_si256(s0, 1);; + s1 = _mm_add_epi32(_mm256_castsi256_si128(s0), s1); + return _mm_cvtsi128_si32(s1); +} + NPY_FINLINE float npyv_sum_f32(__m256 a) { __m256 sum_halves = _mm256_hadd_ps(a, a); diff --git a/numpy/core/src/common/simd/avx2/conversion.h b/numpy/core/src/common/simd/avx2/conversion.h index 9fd86016d3d9..dd5594d481e3 100644 --- a/numpy/core/src/common/simd/avx2/conversion.h +++ b/numpy/core/src/common/simd/avx2/conversion.h @@ -29,4 +29,17 @@ #define npyv_cvt_b32_f32(BL) _mm256_castps_si256(BL) #define npyv_cvt_b64_f64(BL) _mm256_castpd_si256(BL) +// expand +NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { + const __m256i z = _mm256_setzero_si256(); + *low = _mm256_unpacklo_epi8(data, z); + *high = _mm256_unpackhi_epi8(data, z); +} + +NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { + const __m256i z = _mm256_setzero_si256(); + *low = _mm256_unpacklo_epi16(data, z); + *high = _mm256_unpackhi_epi16(data, z); +} + #endif // _NPY_SIMD_AVX2_CVT_H diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 8804223c9fef..1bab31837aeb 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -15,6 +15,12 @@ extern "C" { #endif +#ifndef NPY_HAVE_AVX2 + #include + #define NPY_HAVE_AVX + #define NPY_HAVE_AVX2 +#endif + // lane type by intrin suffix typedef npy_uint8 npyv_lanetype_u8; typedef npy_int8 npyv_lanetype_s8; From d2f477c33f7473c59ebcff4bf57dd4a5efc2742b Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 7 Dec 2020 20:04:41 +0800 Subject: [PATCH 0140/1270] add AVX512/Neon/VSX intrinsics --- numpy/core/src/common/simd/avx512/arithmetic.h | 11 ++++++++++- numpy/core/src/common/simd/avx512/conversion.h | 13 +++++++++++++ numpy/core/src/common/simd/neon/arithmetic.h | 6 ++++++ numpy/core/src/common/simd/neon/conversion.h | 10 ++++++++++ numpy/core/src/common/simd/simd.h | 6 ------ numpy/core/src/common/simd/vsx/arithmetic.h | 7 +++++++ numpy/core/src/common/simd/vsx/conversion.h | 10 ++++++++++ 7 files changed, 56 insertions(+), 7 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index 7372ca29e40d..e3366a2385ca 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -130,7 +130,7 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) #define npyv_nmulsub_f64 _mm512_fnmsub_pd /*************************** - * Reduce Sum + * Reduce Sum: Calculates the sum of all vector elements. * there are three ways to implement reduce sum for AVX512: * 1- split(256) /add /split(128) /add /hadd /hadd /extract * 2- shuff(cross) /add /shuff(cross) /add /shuff /add /shuff /add /extract @@ -144,6 +144,15 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) * The third one is almost the same as the second one but only works for * intel compiler/GCC 7.1/Clang 4, we still need to support older GCC. ***************************/ + +NPY_FINLINE int npyv_sum_u32(npyv_u32 a) +{ + __m256i half = _mm256_add_epi32(_mm512_castsi512_si256(a), _mm512_extracti32x8_epi32(a, 1)); + __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); + quarter = _mm_hadd_epi32(quarter, quarter); + return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); +} + #ifdef NPY_HAVE_AVX512F_REDUCE #define npyv_sum_f32 _mm512_reduce_add_ps #define npyv_sum_f64 _mm512_reduce_add_pd diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 0f7e27de3a0a..80ff970b0c4a 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -51,4 +51,17 @@ #define npyv_cvt_b32_f32(A) npyv_cvt_b32_u32(_mm512_castps_si512(A)) #define npyv_cvt_b64_f64(A) npyv_cvt_b64_u64(_mm512_castpd_si512(A)) +// expand +NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { + const __m512i z = _mm512_setzero_si512(); + *low = _mm512_unpacklo_epi8(data, z); + *high = _mm512_unpackhi_epi8(data, z); +} + +NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { + const __m512i z = _mm512_setzero_si512(); + *low = _mm512_unpacklo_epi16(data, z); + *high = _mm512_unpackhi_epi16(data, z); +} + #endif // _NPY_SIMD_AVX512_CVT_H diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index 87e00d5d14e4..88709be9965b 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -132,6 +132,12 @@ #endif // NPY_SIMD_F64 // Horizontal add: Calculates the sum of all vector elements. +NPY_FINLINE int npyv_sum_u32(npyv_u32 a) +{ + uint32x2_t a0 = vpadd_u32(vget_low_u32(a), vget_high_u32(a)); + return (unsigned)vget_lane_u32(vpadd_u32(a0, vget_high_u32(a)),0); +} + #if NPY_SIMD_F64 #define npyv_sum_f32 vaddvq_f32 #define npyv_sum_f64 vaddvq_f64 diff --git a/numpy/core/src/common/simd/neon/conversion.h b/numpy/core/src/common/simd/neon/conversion.h index b286931d1404..3d9e47f603b8 100644 --- a/numpy/core/src/common/simd/neon/conversion.h +++ b/numpy/core/src/common/simd/neon/conversion.h @@ -29,4 +29,14 @@ #define npyv_cvt_b32_f32(BL) vreinterpretq_u32_f32(BL) #define npyv_cvt_b64_f64(BL) vreinterpretq_u64_f64(BL) +NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { + *low = vmovl_u8(vget_low_u8(data)); + *high = vmovl_u8(vget_high_u8(data)); +} + +NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { + *low = vmovl_u16(vget_low_u16(data)); + *high = vmovl_u16(vget_high_u16(data)); +} + #endif // _NPY_SIMD_NEON_CVT_H diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 1bab31837aeb..8804223c9fef 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -15,12 +15,6 @@ extern "C" { #endif -#ifndef NPY_HAVE_AVX2 - #include - #define NPY_HAVE_AVX - #define NPY_HAVE_AVX2 -#endif - // lane type by intrin suffix typedef npy_uint8 npyv_lanetype_u8; typedef npy_int8 npyv_lanetype_s8; diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index 2f6762e636b4..ca2f8f1a7a95 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -117,6 +117,13 @@ #define npyv_nmulsub_f64 vec_nmadd // Horizontal add: Calculates the sum of all vector elements. + +NPY_FINLINE int npyv_sum_u32(npyv_u32 a) +{ + const npyv_u32 rs = vec_add(a, vec_sld(a, a, 8)); + return vec_extract(vec_add(rs, vec_sld(rs, rs, 4)), 0); +} + NPY_FINLINE float npyv_sum_f32(npyv_f32 a) { npyv_f32 sum = vec_add(a, npyv_combineh_f32(a, a)); diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 6ed135990ccc..9e35ece924fa 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -29,4 +29,14 @@ #define npyv_cvt_b32_f32(A) ((npyv_b32) A) #define npyv_cvt_b64_f64(A) ((npyv_b64) A) +NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { + *low = vec_unpackhu(data); + *high = vec_unpacklu(data); +} + +NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { + *low = vec_unpackhu(data); + *high = vec_unpacklu(data); +} + #endif // _NPY_SIMD_VSX_CVT_H From 18f0de9fb5dccc677bce390a9158db15503e8465 Mon Sep 17 00:00:00 2001 From: Christos Efstathiou Date: Mon, 7 Dec 2020 14:30:41 +0000 Subject: [PATCH 0141/1270] Removed empty string from nag compiler's flags --- numpy/distutils/fcompiler/nag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py index 908e724e6536..7df8ffe2ce30 100644 --- a/numpy/distutils/fcompiler/nag.py +++ b/numpy/distutils/fcompiler/nag.py @@ -19,7 +19,7 @@ def get_flags_linker_so(self): def get_flags_opt(self): return ['-O4'] def get_flags_arch(self): - return [''] + return [] class NAGFCompiler(BaseNAGFCompiler): From 134bfbb08c5e0d75fa1ac6c66cf1c8402c52caf5 Mon Sep 17 00:00:00 2001 From: Christos Efstathiou Date: Mon, 7 Dec 2020 14:38:13 +0000 Subject: [PATCH 0142/1270] BUG: Removed empty String from Nag Compiler's Flags --- numpy/distutils/fcompiler/nag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py index 908e724e6536..7df8ffe2ce30 100644 --- a/numpy/distutils/fcompiler/nag.py +++ b/numpy/distutils/fcompiler/nag.py @@ -19,7 +19,7 @@ def get_flags_linker_so(self): def get_flags_opt(self): return ['-O4'] def get_flags_arch(self): - return [''] + return [] class NAGFCompiler(BaseNAGFCompiler): From 905075211bb0bae80cb7f837419180f25a253dd5 Mon Sep 17 00:00:00 2001 From: a-elhag <48454648+a-elhag@users.noreply.github.com> Date: Mon, 7 Dec 2020 12:15:17 -0500 Subject: [PATCH 0143/1270] BUG: numpy.putmask not respecting writeable flag (#17884) * BUG: numpy.putmask not respecting writeable flag * Made the code PEP8 compatible. Fixes gh-17871. --- numpy/core/src/multiarray/item_selection.c | 4 ++++ numpy/core/tests/test_multiarray.py | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 8052e24e424b..b279ffc2f1ba 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -576,6 +576,10 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) return NULL; } + if (PyArray_FailUnlessWriteable(self, "putmask: output array") < 0) { + return NULL; + } + mask = (PyArrayObject *)PyArray_FROM_OTF(mask0, NPY_BOOL, NPY_ARRAY_CARRAY | NPY_ARRAY_FORCECAST); if (mask == NULL) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 2df92ee652b6..048b1688f2a7 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4643,6 +4643,13 @@ def test_overlaps(self): np.putmask(x[1:4], x[:3], [True, False, True]) assert_equal(x, np.array([True, True, True, True])) + def test_writeable(self): + a = np.arange(5) + a.flags.writeable = False + + with pytest.raises(ValueError): + np.putmask(a, a >= 2, 3) + class TestTake: def tst_basic(self, x): From 9c3ac2e3bbe47624ea240fcd63fc8b921393bf99 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 4 Dec 2020 16:24:00 -0600 Subject: [PATCH 0144/1270] NEP: Update NEP 42 and 43 according to the current implementation We modified the `resolve_descriptors` function to not use `context` which is a bit heavy weight, and that also allows removing of DTypes from the context. This means it now passes `self, DTypes` (where self is the unbound ArrayMethod). Also add a note on the -1 return value of `resolve_descriptors`. Apply suggestions from code review Co-authored-by: Ross Barnowski Fixup note on CastingImpl being mainly a NEP 42 name Fixup sphinx warnings --- doc/neps/nep-0042-new-dtypes.rst | 40 ++++++++++++++--------- doc/neps/nep-0043-extensible-ufuncs.rst | 43 +++++++++++++++++-------- 2 files changed, 53 insertions(+), 30 deletions(-) diff --git a/doc/neps/nep-0042-new-dtypes.rst b/doc/neps/nep-0042-new-dtypes.rst index d1ddb7101706..1a77b5718045 100644 --- a/doc/neps/nep-0042-new-dtypes.rst +++ b/doc/neps/nep-0042-new-dtypes.rst @@ -203,7 +203,7 @@ Other elements of the casting implementation is the ``CastingImpl``: # Object describing and performing the cast casting : casting - def resolve_descriptors(self, Tuple[DType] : input) -> (casting, Tuple[DType]): + def resolve_descriptors(self, Tuple[DTypeMeta], Tuple[DType|None] : input) -> (casting, Tuple[DType]): raise NotImplementedError # initially private: @@ -213,6 +213,8 @@ Other elements of the casting implementation is the ``CastingImpl``: which describes the casting from one DType to another. In :ref:`NEP 43 ` this ``CastingImpl`` object is used unchanged to support universal functions. +Note that the name ``CastingImpl`` here will be generically called +``ArrayMethod`` to accomodate both casting and universal functions. ****************************************************************************** @@ -525,7 +527,8 @@ This means the implementation will work like this:: # Find what dtype1 is cast to when cast to the common DType # by using the CastingImpl as described below: castingimpl = get_castingimpl(type(dtype1), common) - safety, (_, dtype1) = castingimpl.resolve_descriptors((dtype1, None)) + safety, (_, dtype1) = castingimpl.resolve_descriptors( + (common, common), (dtype1, None)) assert safety == "safe" # promotion should normally be a safe cast if type(dtype2) is not common: @@ -652,7 +655,7 @@ and implements the following methods and attributes: * To report safeness, - ``resolve_descriptors(self, Tuple[DType] : input) -> casting, Tuple[DType]``. + ``resolve_descriptors(self, Tuple[DTypeMeta], Tuple[DType|None] : input) -> casting, Tuple[DType]``. The ``casting`` output reports safeness (safe, unsafe, or same-kind), and the tuple is used for more multistep casting, as in the example below. @@ -691,7 +694,7 @@ The full process is: 1. Call - ``CastingImpl[Int24, String].resolve_descriptors((int24, "S20"))``. + ``CastingImpl[Int24, String].resolve_descriptors((Int24, String), (int24, "S20"))``. This provides the information that ``CastingImpl[Int24, String]`` only implements the cast of ``int24`` to ``"S8"``. @@ -716,7 +719,7 @@ The full process is: to call - ``CastingImpl[Int24, String].resolve_descriptors((int24, None))``. + ``CastingImpl[Int24, String].resolve_descriptors((Int24, String), (int24, None))``. In this case the result of ``(int24, "S8")`` defines the correct cast: @@ -763,8 +766,8 @@ even if the user provides only an ``int16 -> int24`` cast. This proposal does not provide that, but future work might find such casts dynamically, or at least allow ``resolve_descriptors`` to return arbitrary ``dtypes``. -If ``CastingImpl[Int8, Int24].resolve_descriptors((int8, int24))`` returns -``(int16, int24)``, the actual casting process could be extended to include +If ``CastingImpl[Int8, Int24].resolve_descriptors((Int8, Int24), (int8, int24))`` +returns ``(int16, int24)``, the actual casting process could be extended to include the ``int8 -> int16`` cast. This adds a step. @@ -774,7 +777,7 @@ The implementation for casting integers to datetime would generally say that this cast is unsafe (because it is always an unsafe cast). Its ``resolve_descriptors`` function may look like:: - def resolve_descriptors(self, given_dtypes): + def resolve_descriptors(self, DTypes, given_dtypes): from_dtype, to_dtype = given_dtypes from_dtype = from_dtype.ensure_canonical() # ensure not byte-swapped if to_dtype is None: @@ -835,9 +838,10 @@ Its ``resolve_descriptors`` function may look like:: **Notes:** -The proposed ``CastingImpl`` is designed to be identical to the -``PyArrayMethod`` proposed in NEP43 as part of restructuring ufuncs to handle -new DTypes. +``CastingImpl`` is used as a name in this NEP to clarify that it implements +all functionality related to a cast. It is meant to be identical to the +``ArrayMethod`` proposed in NEP 43 as part of restructuring ufuncs to handle +new DTypes. All type definitions are expected to be named ``ArrayMethod``. The way dispatching works for ``CastingImpl`` is planned to be limited initially and fully opaque. In the future, it may or may not be moved into a @@ -1297,8 +1301,9 @@ The external API for ``CastingImpl`` will be limited initially to defining: instance if the second string is shorter. If neither type is parametric the ``resolve_descriptors`` must use it. -* ``resolve_descriptors(dtypes_in[2], dtypes_out[2], casting_out) -> int {0, - -1}`` The out +* ``resolve_descriptors(PyArrayMethodObject *self, PyArray_DTypeMeta *DTypes[2], + PyArray_Descr *dtypes_in[2], PyArray_Descr *dtypes_out[2], NPY_CASTING *casting_out) + -> int {0, -1}`` The out dtypes must be set correctly to dtypes which the strided loop (transfer function) can handle. Initially the result must have instances of the same DType class as the ``CastingImpl`` is defined for. The @@ -1307,9 +1312,12 @@ The external API for ``CastingImpl`` will be limited initially to defining: A new, additional flag, ``_NPY_CAST_IS_VIEW``, can be set to indicate that no cast is necessary and a view is sufficient to perform the cast. The cast should return - ``-1`` when a custom error is set and ``NPY_NO_CASTING`` to indicate - that a generic casting error should be set (this is in most cases - preferable). + ``-1`` when an error occurred. If a cast is not possible (but no error + occurred), a ``-1`` result should be returned *without* an error set. + *This point is under consideration, we may use ``-1`` to indicate + a general error, and use a different return value for an impossible cast.* + This means that it is *not* possible to inform the user about why a cast is + impossible. * ``strided_loop(char **args, npy_intp *dimensions, npy_intp *strides, ...) -> int {0, -1}`` (signature will be fully defined in :ref:`NEP 43 `) diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 96d4794f3774..7dbad289b476 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -235,20 +235,30 @@ to define string equality, will be added to a ufunc. class StringEquality(BoundArrayMethod): nin = 1 nout = 1 + # DTypes are stored on the BoundArrayMethod and not on the internal + # ArrayMethod, to reference cyles. DTypes = (String, String, Bool) - def resolve_descriptors(context, given_descrs): + def resolve_descriptors(self: ArrayMethod, DTypes, given_descrs): """The strided loop supports all input string dtype instances and always returns a boolean. (String is always native byte order.) Defining this function is not necessary, since NumPy can provide it by default. + + The `self` argument here refers to the unbound array method, so + that DTypes are passed in explicitly. """ - assert isinstance(given_descrs[0], context.DTypes[0]) - assert isinstance(given_descrs[1], context.DTypes[1]) + assert isinstance(given_descrs[0], DTypes[0]) + assert isinstance(given_descrs[1], DTypes[1]) + assert given_descrs[2] is None or isinstance(given_descrs[2], DTypes[2]) + out_descr = given_descrs[2] # preserve input (e.g. metadata) + if given_descrs[2] is None: + out_descr = DTypes[2]() + # The operation is always "safe" casting (most ufuncs are) - return (given_descrs[0], given_descrs[1], context.DTypes[2]()), "safe" + return (given_descrs[0], given_descrs[1], out_descr), "safe" def strided_loop(context, dimensions, data, strides, innerloop_data): """The 1-D strided loop, similar to those used in current ufuncs""" @@ -422,9 +432,8 @@ a new ``ArrayMethod`` object: # More general flags: flags: int - @staticmethod - def resolve_descriptors( - Context: context, Tuple[DType]: given_descrs)-> Casting, Tuple[DType]: + def resolve_descriptors(self, + Tuple[DTypeMeta], Tuple[DType|None]: given_descrs) -> Casting, Tuple[DType]: """Returns the safety of the operation (casting safety) and the """ # A default implementation can be provided for non-parametric @@ -468,8 +477,6 @@ With ``Context`` providing mostly static information about the function call: int : nin = 1 # The number of output arguments: int : nout = 1 - # The DTypes this Method operates on/is defined for: - Tuple[DTypeMeta] : dtypes # The actual dtypes instances the inner-loop operates on: Tuple[DType] : descriptors @@ -616,7 +623,8 @@ definitions (see also :ref:`NEP 42 ` ``CastingImpl``): NPY_CASTING resolve_descriptors( - PyArrayMethod_Context *context, + PyArrayMethodObject *self, + PyArray_DTypeMeta *dtypes, PyArray_Descr *given_dtypes[nin+nout], PyArray_Descr *loop_dtypes[nin+nout]); @@ -652,20 +660,20 @@ definitions (see also :ref:`NEP 42 ` ``CastingImpl``): * The optional ``get_loop`` function will not be public initially, to avoid finalizing the API which requires design choices also with casting: - .. code-block:: + .. code-block:: C innerloop * get_loop( PyArrayMethod_Context *context, - /* (move_references is currently used internally for casting) */ int aligned, int move_references, npy_intp *strides, PyArray_StridedUnaryOp **out_loop, NpyAuxData **innerloop_data, NPY_ARRAYMETHOD_FLAGS *flags); - The ``NPY_ARRAYMETHOD_FLAGS`` can indicate whether the Python API is required - and floating point errors must be checked. + ``NPY_ARRAYMETHOD_FLAGS`` can indicate whether the Python API is required + and floating point errors must be checked. ``move_references`` is used + internally for NumPy casting at this time. * The inner-loop function:: @@ -739,6 +747,13 @@ casting can be prepared. While the returned casting-safety (``NPY_CASTING``) will almost always be "safe" for universal functions, including it has two big advantages: +* ``-1`` indicates that an error occurred. If a Python error is set, it will + be raised. If no Python error is set this will be considered an "impossible" + cast and a custom error will be set. (This distinction is important for the + ``np.can_cast()`` function, which should raise the first one and return + ``False`` in the second case, it is not noteworthy for typical ufuncs). + *This point is under consideration, we may use ``-1`` to indicate + a general error, and use a different return value for an impossible cast.* * Returning the casting safety is central to NEP 42 for casting and allows the unmodified use of ``ArrayMethod`` there. * There may be a future desire to implement fast but unsafe implementations. From 7a075cf7f75c6595ae1aa7d4d3720efa09f6296e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 7 Dec 2020 13:25:11 -0600 Subject: [PATCH 0145/1270] NEP: Accept NEP 42 -- New and extensible DTypes --- doc/neps/nep-0042-new-dtypes.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/neps/nep-0042-new-dtypes.rst b/doc/neps/nep-0042-new-dtypes.rst index 1a77b5718045..1738bd1abf3e 100644 --- a/doc/neps/nep-0042-new-dtypes.rst +++ b/doc/neps/nep-0042-new-dtypes.rst @@ -8,10 +8,10 @@ NEP 42 — New and extensible DTypes :Author: Sebastian Berg :Author: Ben Nathanson :Author: Marten van Kerkwijk -:Status: Draft +:Status: Accepted :Type: Standard :Created: 2019-07-17 - +:Resolution: https://mail.python.org/pipermail/numpy-discussion/2020-October/081038.html .. note:: From 03181afa5f57c85a8607d59afaaa5370b127fb1a Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Mon, 7 Dec 2020 14:18:31 -0800 Subject: [PATCH 0146/1270] [DOC] Replace {var} in docstrings type annotation with `scalar or None`. Closes #17944 --- numpy/ma/core.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 4fb7d8c28224..d6af22337cc3 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5439,7 +5439,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, When the array contains unmasked values at the same extremes of the datatype, the ordering of these values and the masked values is undefined. - fill_value : {var}, optional + fill_value : scalar or None, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. @@ -5497,7 +5497,7 @@ def argmin(self, axis=None, fill_value=None, out=None): axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis - fill_value : {var}, optional + fill_value : scalar or None, optional Value used to fill in the masked values. If None, the output of minimum_fill_value(self._data) is used instead. out : {None, array}, optional @@ -5543,7 +5543,7 @@ def argmax(self, axis=None, fill_value=None, out=None): axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis - fill_value : {var}, optional + fill_value : scalar or None, optional Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead. out : {None, array}, optional @@ -5594,7 +5594,7 @@ def sort(self, axis=-1, kind=None, order=None, When the array contains unmasked values sorting at the same extremes of the datatype, the ordering of these values and the masked values is undefined. - fill_value : {var}, optional + fill_value : scalar or None, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. @@ -5665,7 +5665,7 @@ def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - fill_value : {var}, optional + fill_value : scalar or None, optional Value used to fill in the masked values. If None, use the output of `minimum_fill_value`. keepdims : bool, optional @@ -5799,7 +5799,7 @@ def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - fill_value : {var}, optional + fill_value : scalar or None, optional Value used to fill in the masked values. If None, use the output of maximum_fill_value(). keepdims : bool, optional @@ -5876,7 +5876,7 @@ def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. - fill_value : {var}, optional + fill_value : scalar or None, optional Value used to fill in the masked values. keepdims : bool, optional If this is set to True, the axes which are reduced are left From 9235016e44f306b61fcc7805956654f65183c785 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Mon, 7 Dec 2020 15:39:54 -0800 Subject: [PATCH 0147/1270] TST: Fix crosstalk issues with polynomial str tests. Polynomial printing tests implicitly depended on calling order, causing the test suite to fail when the test ordering was randomized with the pytest-random plugin (gh-17954). Two factors contributed to this: * Improper setting of class-level test config and * Poorly designed test that overrode an inherited class variable. --- numpy/polynomial/tests/test_printing.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index 27a8ab9f20b2..4e9902a69588 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -85,7 +85,7 @@ def test_laguerre_str(self, inp, tgt): class TestStrAscii: @pytest.fixture(scope='class', autouse=True) - def use_unicode(self): + def use_ascii(self): poly.set_default_printstyle('ascii') @pytest.mark.parametrize(('inp', 'tgt'), ( @@ -161,7 +161,10 @@ def test_laguerre_str(self, inp, tgt): class TestLinebreaking: - poly.set_default_printstyle('ascii') + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') def test_single_line_one_less(self): # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74) @@ -283,19 +286,19 @@ def test_nonnumeric_object_coefficients(coefs, tgt): class TestFormat: def test_format_unicode(self): - poly.Polynomial._use_unicode = False + poly.set_default_printstyle('ascii') p = poly.Polynomial([1, 2, 0, -1]) assert_equal(format(p, 'unicode'), "1.0 + 2.0·x¹ + 0.0·x² - 1.0·x³") def test_format_ascii(self): - poly.Polynomial._use_unicode = True + poly.set_default_printstyle('unicode') p = poly.Polynomial([1, 2, 0, -1]) assert_equal( format(p, 'ascii'), "1.0 + 2.0 x**1 + 0.0 x**2 - 1.0 x**3" ) def test_empty_formatstr(self): - poly.Polynomial._use_unicode = False + poly.set_default_printstyle('ascii') p = poly.Polynomial([1, 2, 3]) assert_equal(format(p), "1.0 + 2.0 x**1 + 3.0 x**2") assert_equal(f"{p}", "1.0 + 2.0 x**1 + 3.0 x**2") From 1ec750059d5953dcfaca22c9270d024261965078 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 8 Dec 2020 09:18:28 +0800 Subject: [PATCH 0148/1270] fix pointer differ in signedness warning --- numpy/core/src/multiarray/item_selection.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 55153977d3a8..6b89797f7f14 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2131,7 +2131,7 @@ count_nonzero_bytes_384(const npy_uint64 * w) #if NPY_SIMD static NPY_INLINE npy_intp -count_nonzero_bytes(const char *d, npy_uintp unrollx) +count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) { int count = 0; int i = 0; @@ -2208,7 +2208,7 @@ count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const const char *e = data + shape[0]; #if NPY_SIMD npy_uintp stride = shape[0] & -npyv_nlanes_u8; - count += count_nonzero_bytes(d, stride); + count += count_nonzero_bytes((const npy_uint8 *)d, stride); d += stride; #else if (NPY_CPU_HAVE_UNALIGNED_ACCESS || From b62059f6cbbf32db0ee513fbdb0182356ac68845 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 8 Dec 2020 12:16:03 +0800 Subject: [PATCH 0149/1270] fix CI failures --- numpy/core/src/common/simd/vsx/conversion.h | 10 ++++++---- numpy/core/src/multiarray/item_selection.c | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 9e35ece924fa..7e9413c9678b 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -30,13 +30,15 @@ #define npyv_cvt_b64_f64(A) ((npyv_b64) A) NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { - *low = vec_unpackhu(data); - *high = vec_unpacklu(data); + npyv_u8 zero = npyv_zero_u8(); + *low = (npyv_u16)vec_mergel(data, zero); + *high = (npyv_u16)vec_mergeh(data, zero); } NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { - *low = vec_unpackhu(data); - *high = vec_unpacklu(data); + npyv_u16 zero = npyv_zero_u16(); + *low = (npyv_u32)vec_mergel(data, zero); + *high = (npyv_u32)vec_mergeh(data, zero); } #endif // _NPY_SIMD_VSX_CVT_H diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 6b89797f7f14..235f4347abe5 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2140,7 +2140,7 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) const npyv_u8 vone = npyv_setall_u8(1); const npyv_u8 vzero = npyv_setall_u8(0); npyv_u8 vt; - npyv_u8 vsum32 = npyv_zero_u32(); + npyv_u32 vsum32 = npyv_zero_u32(); while (i < unrollx) { npyv_u16 vsum16 = npyv_zero_u16(); From 941a82b273e3779af130ffb77f428de3a5bd75ae Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 8 Dec 2020 15:47:51 +0800 Subject: [PATCH 0150/1270] The mid loop should subtract 255 to prevent overriding. --- numpy/core/src/multiarray/item_selection.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 235f4347abe5..0adcaf4628ad 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2135,7 +2135,7 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) { int count = 0; int i = 0; - + const int vstep = npyv_nlanes_u8; const npyv_u8 vone = npyv_setall_u8(1); const npyv_u8 vzero = npyv_setall_u8(0); @@ -2145,7 +2145,7 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) { npyv_u16 vsum16 = npyv_zero_u16(); int j = i; - while (j < MIN(unrollx, i + 65535 * npyv_nlanes_u16)) + while (j < MIN(unrollx, i + 65280 * npyv_nlanes_u16)) { int k = j; npyv_u8 vsum8 = npyv_zero_u8(); From 7829237f823ea1d21952bc5bccb435a0782234da Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 8 Dec 2020 20:33:49 +0800 Subject: [PATCH 0151/1270] change to npyv_b8 and make loop more clear --- numpy/core/src/multiarray/item_selection.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 0adcaf4628ad..9f1b6ddee21e 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -5,10 +5,6 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#ifndef MIN -#define MIN(a, b) ((a < b) ? (a) : (b)) -#endif - #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -2139,17 +2135,17 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) const int vstep = npyv_nlanes_u8; const npyv_u8 vone = npyv_setall_u8(1); const npyv_u8 vzero = npyv_setall_u8(0); - npyv_u8 vt; + npyv_b8 vt; npyv_u32 vsum32 = npyv_zero_u32(); while (i < unrollx) { npyv_u16 vsum16 = npyv_zero_u16(); int j = i; - while (j < MIN(unrollx, i + 65280 * npyv_nlanes_u16)) + while (j < PyArray_MIN(unrollx, i + 0xFF00 * npyv_nlanes_u16)) { int k = j; npyv_u8 vsum8 = npyv_zero_u8(); - for (; k < MIN(unrollx, j + 255 * vstep); k += vstep) + for (; k < PyArray_MIN(unrollx, j + 0xFF * vstep); k += vstep) { vt = npyv_cmpeq_u8(npyv_load_u8(d + k), vzero); vt = npyv_and_u8(vt, vone); From 40fd17e3a2418d54284b53dbcf2ba72dbfbb58ad Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 2 Dec 2020 13:06:51 -0700 Subject: [PATCH 0152/1270] ENH: Use versioneer to manage numpy versions. The new tags look like '1.21.0.dev0+98.gaa0453721f', where '98' is the number of commits since the 1.21.0 branch was started and 'aa0453721f'. The chosen form may be specified in the 'setup.cfg' file. This PR adds two new files 'numpy/_version.py' and 'numpy/version.py'. The latter is kept because it is part of the public API and is actually used by some downstream projects, but it is no longer dynamically created. See https://github.com/python-versioneer/python-versioneer/ for more information. --- .gitattributes | 1 + .gitignore | 2 - MANIFEST.in | 2 + doc/Makefile | 9 +- numpy/__init__.py | 15 +- numpy/_version.py | 525 ++++++++ numpy/core/setup_common.py | 2 +- numpy/distutils/misc_util.py | 7 + numpy/tests/test_numpy_version.py | 2 +- numpy/version.py | 11 + setup.cfg | 11 + setup.py | 172 +-- versioneer.py | 1855 +++++++++++++++++++++++++++++ 13 files changed, 2479 insertions(+), 135 deletions(-) create mode 100644 numpy/_version.py create mode 100644 numpy/version.py create mode 100644 setup.cfg create mode 100644 versioneer.py diff --git a/.gitattributes b/.gitattributes index f4b6c0dcfbd1..8cdd176f0680 100644 --- a/.gitattributes +++ b/.gitattributes @@ -20,3 +20,4 @@ numpy/core/include/numpy/libdivide/* linguist-vendored numpy/linalg/lapack_lite/f2c_*.c linguist-generated numpy/linalg/lapack_lite/lapack_lite_names.h linguist-generated +numpy/_version.py export-subst diff --git a/.gitignore b/.gitignore index f85c577647c4..b28f0d3c8224 100644 --- a/.gitignore +++ b/.gitignore @@ -120,9 +120,7 @@ numpy/__config__.py numpy/core/include/numpy/__multiarray_api.h numpy/core/include/numpy/__ufunc_api.h numpy/core/include/numpy/_numpyconfig.h -numpy/version.py site.cfg -setup.cfg .tox numpy/core/include/numpy/__multiarray_api.c numpy/core/include/numpy/__ufunc_api.c diff --git a/MANIFEST.in b/MANIFEST.in index 35eb05a61619..57496ec71653 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -48,3 +48,5 @@ global-exclude *.pyc *.pyo *.pyd *.swp *.bak *~ # Exclude license file that we append to the main license when running # `python setup.py sdist` exclude LICENSES_bundled.txt +include versioneer.py +include numpy/_version.py diff --git a/doc/Makefile b/doc/Makefile index dd63702de1bd..68d496389e84 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -49,7 +49,7 @@ help: @echo " show to show the html output in a browser" clean: - -rm -rf build/* + -rm -rf build/* find . -name generated -type d -prune -exec rm -rf "{}" ";" gitwash-update: @@ -66,7 +66,7 @@ gitwash-update: # Build the current numpy version, and extract docs from it. # We have to be careful of some issues: -# +# # - Everything must be done using the same Python version # - We must use eggs (otherwise they might override PYTHONPATH on import). # - Different versions of easy_install install to different directories (!) @@ -80,8 +80,7 @@ UPLOAD_DIR=/srv/docs_scipy_org/doc/numpy-$(RELEASE) DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)" NUMPYVER:=$(shell $(PYTHON) -c "import numpy; print(numpy.version.git_revision[:10])" 2>/dev/null) -GITVER ?= $(shell cd ..; $(PYTHON) -c "from setup import git_version; \ - print(git_version()[:10])") +GITVER ?= $(shell cd ..; $(PYTHON) -c "import versioneer as v; print(v.get_versions()['full-revisionid'][:10])") version-check: ifeq "$(GITVER)" "Unknown" @@ -163,7 +162,7 @@ endif @echo " " @echo in build/merge/index.html, @echo then \"git commit\", \"git push\" - + #------------------------------------------------------------------------------ # Basic Sphinx generation rules for different formats diff --git a/numpy/__init__.py b/numpy/__init__.py index 3e5277318a47..879e8f013fed 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -130,12 +130,16 @@ your python interpreter from there.""" raise ImportError(msg) from e - from .version import git_revision as __git_revision__ - from .version import version as __version__ - __all__ = ['ModuleDeprecationWarning', 'VisibleDeprecationWarning'] + # get the version using versioneer + from ._version import get_versions + vinfo = get_versions() + __version__ = vinfo.get("closest-tag", vinfo["version"]) + __git_version__ = vinfo.get("full-revisionid") + del get_versions, vinfo + # mapping of {name: (value, deprecation_msg)} __deprecated_attrs__ = {} @@ -384,3 +388,8 @@ def _mac_os_check(): # Note that this will currently only make a difference on Linux core.multiarray._set_madvise_hugepage(use_hugepage) + + +from ._version import get_versions +__version__ = get_versions()['version'] +del get_versions diff --git a/numpy/_version.py b/numpy/_version.py new file mode 100644 index 000000000000..73f9f5c7f4be --- /dev/null +++ b/numpy/_version.py @@ -0,0 +1,525 @@ + +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. Generated by +# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + git_date = "$Format:%ci$" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "numpy-" + cfg.versionfile_source = "numpy/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + p = None + for c in commands: + try: + dispcmd = str([c] + args) + # remember shell=False, so use git.cmd on windows, not just git + p = subprocess.Popen([c] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None)) + break + except EnvironmentError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = p.communicate()[0].strip().decode() + if p.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, p.returncode + return stdout, p.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for i in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + else: + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + f = open(versionfile_abs, "r") + for line in f.readlines(): + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + f.close() + except EnvironmentError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if not keywords: + raise NotThisMethod("no keywords at all, weird") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = set([r.strip() for r in refnames.strip("()").split(",")]) + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = set([r for r in refs if re.search(r'\d', r)]) + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long", + "--match", "%s*" % tag_prefix], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], + cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + """TAG[.post0.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post0.dev%d" % pieces["distance"] + else: + # exception #1 + rendered = "0.post0.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for i in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index ba3e215b3998..2d85e071841d 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -51,7 +51,7 @@ def is_released(config): """Return True if a released version of numpy is detected.""" from distutils.version import LooseVersion - v = config.get_version('../version.py') + v = config.get_version('../_version.py') if v is None: raise ValueError("Could not get version") pv = LooseVersion(vstring=v).version diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index a8e19d52c56b..d3073ab2d742 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -1968,6 +1968,13 @@ def get_version(self, version_file=None, version_variable=None): version = getattr(version_module, a, None) if version is not None: break + + # Try if versioneer module + try: + version = version_module.get_versions()['version'] + except AttributeError: + version = None + if version is not None: break diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 916ab93830df..7fd56681550a 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -8,7 +8,7 @@ def test_valid_numpy_version(): # Verify that the numpy version is a valid one (no .post suffix or other # nonsense). See gh-6431 for an issue caused by an invalid version. version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" - dev_suffix = r"(\.dev0\+([0-9a-f]{7}|Unknown))" + dev_suffix = r"\.dev0\+[0-9]*\.g[0-9a-f]+" if np.version.release: res = re.match(version_pattern, np.__version__) else: diff --git a/numpy/version.py b/numpy/version.py new file mode 100644 index 000000000000..8a1d05aa481a --- /dev/null +++ b/numpy/version.py @@ -0,0 +1,11 @@ +from ._version import get_versions + +__ALL__ = ['version', 'full_version', 'git_revision', 'release'] + +vinfo = get_versions() +version: str = vinfo["version"] +full_version: str = vinfo['version'] +git_revision: str = vinfo['full-revisionid'] +release = 'dev0' not in version + +del get_versions, vinfo diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000000..5bca14ba00ee --- /dev/null +++ b/setup.cfg @@ -0,0 +1,11 @@ +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. + +[versioneer] +VCS = git +style = pep440 +versionfile_source = numpy/_version.py +versionfile_build = numpy/_version.py +tag_prefix = v +parentdir_prefix = numpy- diff --git a/setup.py b/setup.py index 528b4f1e404a..4335e417a64f 100755 --- a/setup.py +++ b/setup.py @@ -24,13 +24,49 @@ import subprocess import textwrap import warnings +import versioneer +import builtins +# This is a bit hackish: we are setting a global variable so that the main +# numpy __init__ can detect if it is being loaded by the setup routine, to +# avoid attempting to load components that aren't built yet. While ugly, it's +# a lot more robust than what was previously being used. +builtins.__NUMPY_SETUP__ = True + +# Needed for backwards code compatibility below and in some CI scripts. +# The version components are changed from ints to strings, but only VERSION +# seems to matter outside of this module and it was already a str. +FULLVERSION = versioneer.get_version() +ISRELEASED = 'dev' not in FULLVERSION +MAJOR, MINOR, MICRO = FULLVERSION.split('.')[:3] +VERSION = '{}.{}.{}'.format(MAJOR, MINOR, MICRO) +# Python supported version checks if sys.version_info[:2] < (3, 7): raise RuntimeError("Python version >= 3.7 required.") -import builtins +# The first version not in the `Programming Language :: Python :: ...` classifiers above +if sys.version_info >= (3, 10): + warnings.warn( + f"NumPy {VERSION} may not yet support Python " + f"{sys.version_info.major}.{sys.version_info.minor}.", + RuntimeWarning, + ) +# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be +# properly updated when the contents of directories change (true for distutils, +# not sure about setuptools). +if os.path.exists('MANIFEST'): + os.remove('MANIFEST') + +# We need to import setuptools here in order for it to persist in sys.modules. +# Its presence/absence is used in subclassing setup in numpy/distutils/core.py, +# which may not be the most robust design. +import setuptools + +# Initialize cmdclass from versioneer +from numpy.distutils.core import numpy_cmdclass +cmdclass = versioneer.get_cmdclass(numpy_cmdclass) CLASSIFIERS = """\ Development Status :: 5 - Production/Stable @@ -54,114 +90,6 @@ Operating System :: MacOS """ -MAJOR = 1 -MINOR = 21 -MICRO = 0 -ISRELEASED = False -VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) - -# The first version not in the `Programming Language :: Python :: ...` classifiers above -if sys.version_info >= (3, 10): - warnings.warn( - f"NumPy {VERSION} may not yet support Python " - f"{sys.version_info.major}.{sys.version_info.minor}.", - RuntimeWarning, - ) - - -# Return the git revision as a string -def git_version(): - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - GIT_REVISION = out.strip().decode('ascii') - except (subprocess.SubprocessError, OSError): - GIT_REVISION = "Unknown" - - if not GIT_REVISION: - # this shouldn't happen but apparently can (see gh-8512) - GIT_REVISION = "Unknown" - - return GIT_REVISION - - -# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be -# properly updated when the contents of directories change (true for distutils, -# not sure about setuptools). -if os.path.exists('MANIFEST'): - os.remove('MANIFEST') - -# This is a bit hackish: we are setting a global variable so that the main -# numpy __init__ can detect if it is being loaded by the setup routine, to -# avoid attempting to load components that aren't built yet. While ugly, it's -# a lot more robust than what was previously being used. -builtins.__NUMPY_SETUP__ = True - - -def get_version_info(): - # Adding the git rev number needs to be done inside write_version_py(), - # otherwise the import of numpy.version messes up the build under Python 3. - FULLVERSION = VERSION - if os.path.exists('.git'): - GIT_REVISION = git_version() - elif os.path.exists('numpy/version.py'): - # must be a source distribution, use existing version file - try: - from numpy.version import git_revision as GIT_REVISION - except ImportError: - raise ImportError("Unable to import git_revision. Try removing " - "numpy/version.py and the build directory " - "before building.") - else: - GIT_REVISION = "Unknown" - - if not ISRELEASED: - import time - - time_stamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) - FULLVERSION += f'.dev0+{time_stamp}_{GIT_REVISION[:7]}' - - return FULLVERSION, GIT_REVISION - - -def write_version_py(filename='numpy/version.py'): - cnt = """ -# THIS FILE IS GENERATED FROM NUMPY SETUP.PY -# -# To compare versions robustly, use `numpy.lib.NumpyVersion` -short_version: str = '%(version)s' -version: str = '%(version)s' -full_version: str = '%(full_version)s' -git_revision: str = '%(git_revision)s' -release: bool = %(isrelease)s - -if not release: - version = full_version -""" - FULLVERSION, GIT_REVISION = get_version_info() - - a = open(filename, 'w') - try: - a.write(cnt % {'version': VERSION, - 'full_version': FULLVERSION, - 'git_revision': GIT_REVISION, - 'isrelease': str(ISRELEASED)}) - finally: - a.close() - def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration @@ -230,13 +158,13 @@ def __exit__(self, exception_type, exception_value, traceback): f.write(self.bsd_text) -from distutils.command.sdist import sdist -class sdist_checked(sdist): +sdist_class = cmdclass['sdist'] +class sdist_checked(sdist_class): """ check submodules on sdist to prevent incomplete tarballs """ def run(self): check_submodules() with concat_license_files(): - sdist.run(self) + sdist_class.run(self) def get_build_overrides(): @@ -320,7 +248,8 @@ def parse_setuppy_commands(): # below and not standalone. Hence they're not added to good_commands. good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py', 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm', - 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src') + 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src', + 'version') for command in good_commands: if command in args: @@ -422,7 +351,7 @@ def parse_setuppy_commands(): def get_docs_url(): - if not ISRELEASED: + if 'dev' in VERSION: return "https://numpy.org/devdocs" else: # For releases, this URL ends up on pypi. @@ -437,9 +366,6 @@ def setup_package(): os.chdir(src_path) sys.path.insert(0, src_path) - # Rewrite the version file every time - write_version_py() - # The f2py scripts that will be installed if sys.platform == 'win32': f2py_cmds = [ @@ -452,7 +378,7 @@ def setup_package(): 'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2], ] - cmdclass = {"sdist": sdist_checked, } + cmdclass["sdist"] = sdist_checked metadata = dict( name='numpy', maintainer="NumPy Developers", @@ -471,6 +397,7 @@ def setup_package(): classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='pytest', + version=versioneer.get_version(), cmdclass=cmdclass, python_requires='>=3.7', zip_safe=False, @@ -486,10 +413,11 @@ def setup_package(): # Raise errors for unsupported commands, improve help output, etc. run_build = parse_setuppy_commands() - if run_build: + if run_build and 'version' not in sys.argv: # patches distutils, even though we don't use it - import setuptools # noqa: F401 + #from setuptools import setup from numpy.distutils.core import setup + if 'sdist' not in sys.argv: # Generate Cython sources, unless we're generating an sdist generate_cython() @@ -498,10 +426,8 @@ def setup_package(): # Customize extension building cmdclass['build_clib'], cmdclass['build_ext'] = get_build_overrides() else: + #from numpy.distutils.core import setup from setuptools import setup - # Version number is added to metadata inside configuration() if build - # is run. - metadata['version'] = get_version_info()[0] try: setup(**metadata) diff --git a/versioneer.py b/versioneer.py new file mode 100644 index 000000000000..1040c218924c --- /dev/null +++ b/versioneer.py @@ -0,0 +1,1855 @@ + +# Version: 0.19 + +"""The Versioneer - like a rocketeer, but for versions. + +The Versioneer +============== + +* like a rocketeer, but for versions! +* https://github.com/python-versioneer/python-versioneer +* Brian Warner +* License: Public Domain +* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3 +* [![Latest Version][pypi-image]][pypi-url] +* [![Build Status][travis-image]][travis-url] + +This is a tool for managing a recorded version number in distutils-based +python projects. The goal is to remove the tedious and error-prone "update +the embedded version string" step from your release process. Making a new +release should be as easy as recording a new tag in your version-control +system, and maybe making new tarballs. + + +## Quick Install + +* `pip install versioneer` to somewhere in your $PATH +* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) +* run `versioneer install` in your source tree, commit the results +* Verify version information with `python setup.py version` + +## Version Identifiers + +Source trees come from a variety of places: + +* a version-control system checkout (mostly used by developers) +* a nightly tarball, produced by build automation +* a snapshot tarball, produced by a web-based VCS browser, like github's + "tarball from tag" feature +* a release tarball, produced by "setup.py sdist", distributed through PyPI + +Within each source tree, the version identifier (either a string or a number, +this tool is format-agnostic) can come from a variety of places: + +* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows + about recent "tags" and an absolute revision-id +* the name of the directory into which the tarball was unpacked +* an expanded VCS keyword ($Id$, etc) +* a `_version.py` created by some earlier build step + +For released software, the version identifier is closely related to a VCS +tag. Some projects use tag names that include more than just the version +string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool +needs to strip the tag prefix to extract the version identifier. For +unreleased software (between tags), the version identifier should provide +enough information to help developers recreate the same tree, while also +giving them an idea of roughly how old the tree is (after version 1.2, before +version 1.3). Many VCS systems can report a description that captures this, +for example `git describe --tags --dirty --always` reports things like +"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the +0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has +uncommitted changes). + +The version identifier is used for multiple purposes: + +* to allow the module to self-identify its version: `myproject.__version__` +* to choose a name and prefix for a 'setup.py sdist' tarball + +## Theory of Operation + +Versioneer works by adding a special `_version.py` file into your source +tree, where your `__init__.py` can import it. This `_version.py` knows how to +dynamically ask the VCS tool for version information at import time. + +`_version.py` also contains `$Revision$` markers, and the installation +process marks `_version.py` to have this marker rewritten with a tag name +during the `git archive` command. As a result, generated tarballs will +contain enough information to get the proper version. + +To allow `setup.py` to compute a version too, a `versioneer.py` is added to +the top level of your source tree, next to `setup.py` and the `setup.cfg` +that configures it. This overrides several distutils/setuptools commands to +compute the version when invoked, and changes `setup.py build` and `setup.py +sdist` to replace `_version.py` with a small static file that contains just +the generated version data. + +## Installation + +See [INSTALL.md](./INSTALL.md) for detailed installation instructions. + +## Version-String Flavors + +Code which uses Versioneer can learn about its version string at runtime by +importing `_version` from your main `__init__.py` file and running the +`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can +import the top-level `versioneer.py` and run `get_versions()`. + +Both functions return a dictionary with different flavors of version +information: + +* `['version']`: A condensed version string, rendered using the selected + style. This is the most commonly used value for the project's version + string. The default "pep440" style yields strings like `0.11`, + `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section + below for alternative styles. + +* `['full-revisionid']`: detailed revision identifier. For Git, this is the + full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". + +* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the + commit date in ISO 8601 format. This will be None if the date is not + available. + +* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that + this is only accurate if run in a VCS checkout, otherwise it is likely to + be False or None + +* `['error']`: if the version string could not be computed, this will be set + to a string describing the problem, otherwise it will be None. It may be + useful to throw an exception in setup.py if this is set, to avoid e.g. + creating tarballs with a version string of "unknown". + +Some variants are more useful than others. Including `full-revisionid` in a +bug report should allow developers to reconstruct the exact code being tested +(or indicate the presence of local changes that should be shared with the +developers). `version` is suitable for display in an "about" box or a CLI +`--version` output: it can be easily compared against release notes and lists +of bugs fixed in various releases. + +The installer adds the following text to your `__init__.py` to place a basic +version in `YOURPROJECT.__version__`: + + from ._version import get_versions + __version__ = get_versions()['version'] + del get_versions + +## Styles + +The setup.cfg `style=` configuration controls how the VCS information is +rendered into a version string. + +The default style, "pep440", produces a PEP440-compliant string, equal to the +un-prefixed tag name for actual releases, and containing an additional "local +version" section with more detail for in-between builds. For Git, this is +TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags +--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the +tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and +that this commit is two revisions ("+2") beyond the "0.11" tag. For released +software (exactly equal to a known tag), the identifier will only contain the +stripped tag, e.g. "0.11". + +Other styles are available. See [details.md](details.md) in the Versioneer +source tree for descriptions. + +## Debugging + +Versioneer tries to avoid fatal errors: if something goes wrong, it will tend +to return a version of "0+unknown". To investigate the problem, run `setup.py +version`, which will run the version-lookup code in a verbose mode, and will +display the full contents of `get_versions()` (including the `error` string, +which may help identify what went wrong). + +## Known Limitations + +Some situations are known to cause problems for Versioneer. This details the +most significant ones. More can be found on Github +[issues page](https://github.com/python-versioneer/python-versioneer/issues). + +### Subprojects + +Versioneer has limited support for source trees in which `setup.py` is not in +the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are +two common reasons why `setup.py` might not be in the root: + +* Source trees which contain multiple subprojects, such as + [Buildbot](https://github.com/buildbot/buildbot), which contains both + "master" and "slave" subprojects, each with their own `setup.py`, + `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI + distributions (and upload multiple independently-installable tarballs). +* Source trees whose main purpose is to contain a C library, but which also + provide bindings to Python (and perhaps other languages) in subdirectories. + +Versioneer will look for `.git` in parent directories, and most operations +should get the right version string. However `pip` and `setuptools` have bugs +and implementation details which frequently cause `pip install .` from a +subproject directory to fail to find a correct version string (so it usually +defaults to `0+unknown`). + +`pip install --editable .` should work correctly. `setup.py install` might +work too. + +Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in +some later version. + +[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking +this issue. The discussion in +[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the +issue from the Versioneer side in more detail. +[pip PR#3176](https://github.com/pypa/pip/pull/3176) and +[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve +pip to let Versioneer work correctly. + +Versioneer-0.16 and earlier only looked for a `.git` directory next to the +`setup.cfg`, so subprojects were completely unsupported with those releases. + +### Editable installs with setuptools <= 18.5 + +`setup.py develop` and `pip install --editable .` allow you to install a +project into a virtualenv once, then continue editing the source code (and +test) without re-installing after every change. + +"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a +convenient way to specify executable scripts that should be installed along +with the python package. + +These both work as expected when using modern setuptools. When using +setuptools-18.5 or earlier, however, certain operations will cause +`pkg_resources.DistributionNotFound` errors when running the entrypoint +script, which must be resolved by re-installing the package. This happens +when the install happens with one version, then the egg_info data is +regenerated while a different version is checked out. Many setup.py commands +cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into +a different virtualenv), so this can be surprising. + +[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes +this one, but upgrading to a newer version of setuptools should probably +resolve it. + + +## Updating Versioneer + +To upgrade your project to a new release of Versioneer, do the following: + +* install the new Versioneer (`pip install -U versioneer` or equivalent) +* edit `setup.cfg`, if necessary, to include any new configuration settings + indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. +* re-run `versioneer install` in your source tree, to replace + `SRC/_version.py` +* commit any changed files + +## Future Directions + +This tool is designed to make it easily extended to other version-control +systems: all VCS-specific components are in separate directories like +src/git/ . The top-level `versioneer.py` script is assembled from these +components by running make-versioneer.py . In the future, make-versioneer.py +will take a VCS name as an argument, and will construct a version of +`versioneer.py` that is specific to the given VCS. It might also take the +configuration arguments that are currently provided manually during +installation by editing setup.py . Alternatively, it might go the other +direction and include code from all supported VCS systems, reducing the +number of intermediate scripts. + +## Similar projects + +* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time + dependency +* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of + versioneer + +## License + +To make Versioneer easier to embed, all its code is dedicated to the public +domain. The `_version.py` that it creates is also in the public domain. +Specifically, both are released under the Creative Commons "Public Domain +Dedication" license (CC0-1.0), as described in +https://creativecommons.org/publicdomain/zero/1.0/ . + +[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg +[pypi-url]: https://pypi.python.org/pypi/versioneer/ +[travis-image]: +https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg +[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer + +""" + +import configparser +import errno +import json +import os +import re +import subprocess +import sys + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_root(): + """Get the project root directory. + + We require that all commands are run from the project root, i.e. the + directory that contains setup.py, setup.cfg, and versioneer.py . + """ + root = os.path.realpath(os.path.abspath(os.getcwd())) + setup_py = os.path.join(root, "setup.py") + versioneer_py = os.path.join(root, "versioneer.py") + if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + # allow 'python path/to/setup.py COMMAND' + root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) + setup_py = os.path.join(root, "setup.py") + versioneer_py = os.path.join(root, "versioneer.py") + if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + err = ("Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND').") + raise VersioneerBadRootError(err) + try: + # Certain runtime workflows (setup.py install/develop in a setuptools + # tree) execute all dependencies in a single python process, so + # "versioneer" may be imported multiple times, and python's shared + # module-import table will cache the first one. So we can't use + # os.path.dirname(__file__), as that will find whichever + # versioneer.py was first imported, even in later projects. + me = os.path.realpath(os.path.abspath(__file__)) + me_dir = os.path.normcase(os.path.splitext(me)[0]) + vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) + if me_dir != vsr_dir: + print("Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(me), versioneer_py)) + except NameError: + pass + return root + + +def get_config_from_root(root): + """Read the project setup.cfg file to determine Versioneer config.""" + # This might raise EnvironmentError (if setup.cfg is missing), or + # configparser.NoSectionError (if it lacks a [versioneer] section), or + # configparser.NoOptionError (if it lacks "VCS="). See the docstring at + # the top of versioneer.py for instructions on writing your setup.cfg . + setup_cfg = os.path.join(root, "setup.cfg") + parser = configparser.ConfigParser() + with open(setup_cfg, "r") as f: + parser.read_file(f) + VCS = parser.get("versioneer", "VCS") # mandatory + + def get(parser, name): + if parser.has_option("versioneer", name): + return parser.get("versioneer", name) + return None + cfg = VersioneerConfig() + cfg.VCS = VCS + cfg.style = get(parser, "style") or "" + cfg.versionfile_source = get(parser, "versionfile_source") + cfg.versionfile_build = get(parser, "versionfile_build") + cfg.tag_prefix = get(parser, "tag_prefix") + if cfg.tag_prefix in ("''", '""'): + cfg.tag_prefix = "" + cfg.parentdir_prefix = get(parser, "parentdir_prefix") + cfg.verbose = get(parser, "verbose") + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +# these dictionaries contain VCS-specific tools +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + p = None + for c in commands: + try: + dispcmd = str([c] + args) + # remember shell=False, so use git.cmd on windows, not just git + p = subprocess.Popen([c] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None)) + break + except EnvironmentError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = p.communicate()[0].strip().decode() + if p.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, p.returncode + return stdout, p.returncode + + +LONG_VERSION_PY['git'] = r''' +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. Generated by +# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" + git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" + git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "%(STYLE)s" + cfg.tag_prefix = "%(TAG_PREFIX)s" + cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" + cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + p = None + for c in commands: + try: + dispcmd = str([c] + args) + # remember shell=False, so use git.cmd on windows, not just git + p = subprocess.Popen([c] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None)) + break + except EnvironmentError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %%s" %% dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %%s" %% (commands,)) + return None, None + stdout = p.communicate()[0].strip().decode() + if p.returncode != 0: + if verbose: + print("unable to run %%s (error)" %% dispcmd) + print("stdout was %%s" %% stdout) + return None, p.returncode + return stdout, p.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for i in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + else: + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %%s but none started with prefix %%s" %% + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + f = open(versionfile_abs, "r") + for line in f.readlines(): + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + f.close() + except EnvironmentError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if not keywords: + raise NotThisMethod("no keywords at all, weird") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = set([r.strip() for r in refnames.strip("()").split(",")]) + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %%d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = set([r for r in refs if re.search(r'\d', r)]) + if verbose: + print("discarding '%%s', no digits" %% ",".join(refs - tags)) + if verbose: + print("likely tags: %%s" %% ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + if verbose: + print("picking %%s" %% r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %%s not under git control" %% root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long", + "--match", "%%s*" %% tag_prefix], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%%s'" + %% describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%%s' doesn't start with prefix '%%s'" + print(fmt %% (full_tag, tag_prefix)) + pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" + %% (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], + cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + """TAG[.post0.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post0.dev%%d" %% pieces["distance"] + else: + # exception #1 + rendered = "0.post0.dev%%d" %% pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%%s" %% pieces["short"] + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%%s" %% pieces["short"] + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%%s'" %% style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for i in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} +''' + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + f = open(versionfile_abs, "r") + for line in f.readlines(): + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + f.close() + except EnvironmentError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if not keywords: + raise NotThisMethod("no keywords at all, weird") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = set([r.strip() for r in refnames.strip("()").split(",")]) + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = set([r for r in refs if re.search(r'\d', r)]) + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long", + "--match", "%s*" % tag_prefix], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], + cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def do_vcs_install(manifest_in, versionfile_source, ipy): + """Git-specific installation logic for Versioneer. + + For Git, this means creating/changing .gitattributes to mark _version.py + for export-subst keyword substitution. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + files = [manifest_in, versionfile_source] + if ipy: + files.append(ipy) + try: + me = __file__ + if me.endswith(".pyc") or me.endswith(".pyo"): + me = os.path.splitext(me)[0] + ".py" + versioneer_file = os.path.relpath(me) + except NameError: + versioneer_file = "versioneer.py" + files.append(versioneer_file) + present = False + try: + f = open(".gitattributes", "r") + for line in f.readlines(): + if line.strip().startswith(versionfile_source): + if "export-subst" in line.strip().split()[1:]: + present = True + f.close() + except EnvironmentError: + pass + if not present: + f = open(".gitattributes", "a+") + f.write("%s export-subst\n" % versionfile_source) + f.close() + files.append(".gitattributes") + run_command(GITS, ["add", "--"] + files) + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for i in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + else: + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +SHORT_VERSION_PY = """ +# This file was generated by 'versioneer.py' (0.19) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json + +version_json = ''' +%s +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) +""" + + +def versions_from_file(filename): + """Try to determine the version from _version.py if present.""" + try: + with open(filename) as f: + contents = f.read() + except EnvironmentError: + raise NotThisMethod("unable to read _version.py") + mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", + contents, re.M | re.S) + if not mo: + mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", + contents, re.M | re.S) + if not mo: + raise NotThisMethod("no version_json in _version.py") + return json.loads(mo.group(1)) + + +def write_to_version_file(filename, versions): + """Write the given version number to the given _version.py file.""" + os.unlink(filename) + contents = json.dumps(versions, sort_keys=True, + indent=1, separators=(",", ": ")) + with open(filename, "w") as f: + f.write(SHORT_VERSION_PY % contents) + + print("set %s to '%s'" % (filename, versions["version"])) + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + """TAG[.post0.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post0.dev%d" % pieces["distance"] + else: + # exception #1 + rendered = "0.post0.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +class VersioneerBadRootError(Exception): + """The project root directory is unknown or missing key files.""" + + +def get_versions(verbose=False): + """Get the project version from whatever source is available. + + Returns dict with two keys: 'version' and 'full'. + """ + if "versioneer" in sys.modules: + # see the discussion in cmdclass.py:get_cmdclass() + del sys.modules["versioneer"] + + root = get_root() + cfg = get_config_from_root(root) + + assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" + handlers = HANDLERS.get(cfg.VCS) + assert handlers, "unrecognized VCS '%s'" % cfg.VCS + verbose = verbose or cfg.verbose + assert cfg.versionfile_source is not None, \ + "please set versioneer.versionfile_source" + assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" + + versionfile_abs = os.path.join(root, cfg.versionfile_source) + + # extract version from first of: _version.py, VCS command (e.g. 'git + # describe'), parentdir. This is meant to work for developers using a + # source checkout, for users of a tarball created by 'setup.py sdist', + # and for users of a tarball/zipball created by 'git archive' or github's + # download-from-tag feature or the equivalent in other VCSes. + + get_keywords_f = handlers.get("get_keywords") + from_keywords_f = handlers.get("keywords") + if get_keywords_f and from_keywords_f: + try: + keywords = get_keywords_f(versionfile_abs) + ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) + if verbose: + print("got version from expanded keyword %s" % ver) + return ver + except NotThisMethod: + pass + + try: + ver = versions_from_file(versionfile_abs) + if verbose: + print("got version from file %s %s" % (versionfile_abs, ver)) + return ver + except NotThisMethod: + pass + + from_vcs_f = handlers.get("pieces_from_vcs") + if from_vcs_f: + try: + pieces = from_vcs_f(cfg.tag_prefix, root, verbose) + ver = render(pieces, cfg.style) + if verbose: + print("got version from VCS %s" % ver) + return ver + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + if verbose: + print("got version from parentdir %s" % ver) + return ver + except NotThisMethod: + pass + + if verbose: + print("unable to compute version") + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, "error": "unable to compute version", + "date": None} + + +def get_version(): + """Get the short version string for this project.""" + return get_versions()["version"] + + +def get_cmdclass(cmdclass=None): + """Get the custom setuptools/distutils subclasses used by Versioneer. + + If the package uses a different cmdclass (e.g. one from numpy), it + should be provide as an argument. + """ + if "versioneer" in sys.modules: + del sys.modules["versioneer"] + # this fixes the "python setup.py develop" case (also 'install' and + # 'easy_install .'), in which subdependencies of the main project are + # built (using setup.py bdist_egg) in the same python process. Assume + # a main project A and a dependency B, which use different versions + # of Versioneer. A's setup.py imports A's Versioneer, leaving it in + # sys.modules by the time B's setup.py is executed, causing B to run + # with the wrong versioneer. Setuptools wraps the sub-dep builds in a + # sandbox that restores sys.modules to it's pre-build state, so the + # parent is protected against the child's "import versioneer". By + # removing ourselves from sys.modules here, before the child build + # happens, we protect the child from the parent's versioneer too. + # Also see https://github.com/python-versioneer/python-versioneer/issues/52 + + cmds = {} if cmdclass is None else cmdclass.copy() + + # we add "version" to both distutils and setuptools + from distutils.core import Command + + class cmd_version(Command): + description = "report generated version string" + user_options = [] + boolean_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + vers = get_versions(verbose=True) + print("Version: %s" % vers["version"]) + print(" full-revisionid: %s" % vers.get("full-revisionid")) + print(" dirty: %s" % vers.get("dirty")) + print(" date: %s" % vers.get("date")) + if vers["error"]: + print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version + + # we override "build_py" in both distutils and setuptools + # + # most invocation pathways end up running build_py: + # distutils/build -> build_py + # distutils/install -> distutils/build ->.. + # setuptools/bdist_wheel -> distutils/install ->.. + # setuptools/bdist_egg -> distutils/install_lib -> build_py + # setuptools/install -> bdist_egg ->.. + # setuptools/develop -> ? + # pip install: + # copies source tree to a tempdir before running egg_info/etc + # if .git isn't copied too, 'git describe' will fail + # then does setup.py bdist_wheel, or sometimes setup.py install + # setup.py egg_info -> ? + + # we override different "build_py" commands for both environments + if 'build_py' in cmds: + _build_py = cmds['build_py'] + elif "setuptools" in sys.modules: + from setuptools.command.build_py import build_py as _build_py + else: + from distutils.command.build_py import build_py as _build_py + + class cmd_build_py(_build_py): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_py.run(self) + # now locate _version.py in the new build/ directory and replace + # it with an updated value + if cfg.versionfile_build: + target_versionfile = os.path.join(self.build_lib, + cfg.versionfile_build) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py + + if "setuptools" in sys.modules: + from setuptools.command.build_ext import build_ext as _build_ext + else: + from distutils.command.build_ext import build_ext as _build_ext + + class cmd_build_ext(_build_ext): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_ext.run(self) + if self.inplace: + # build_ext --inplace will only build extensions in + # build/lib<..> dir with no _version.py to write to. + # As in place builds will already have a _version.py + # in the module dir, we do not need to write one. + return + # now locate _version.py in the new build/ directory and replace + # it with an updated value + target_versionfile = os.path.join(self.build_lib, + cfg.versionfile_source) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + cmds["build_ext"] = cmd_build_ext + + if "cx_Freeze" in sys.modules: # cx_freeze enabled? + from cx_Freeze.dist import build_exe as _build_exe + # nczeczulin reports that py2exe won't like the pep440-style string + # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. + # setup(console=[{ + # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION + # "product_version": versioneer.get_version(), + # ... + + class cmd_build_exe(_build_exe): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + target_versionfile = cfg.versionfile_source + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + _build_exe.run(self) + os.unlink(target_versionfile) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % + {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + cmds["build_exe"] = cmd_build_exe + del cmds["build_py"] + + if 'py2exe' in sys.modules: # py2exe enabled? + from py2exe.distutils_buildexe import py2exe as _py2exe + + class cmd_py2exe(_py2exe): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + target_versionfile = cfg.versionfile_source + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + _py2exe.run(self) + os.unlink(target_versionfile) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % + {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + cmds["py2exe"] = cmd_py2exe + + # we override different "sdist" commands for both environments + if 'sdist' in cmds: + _sdist = cmds['sdist'] + elif "setuptools" in sys.modules: + from setuptools.command.sdist import sdist as _sdist + else: + from distutils.command.sdist import sdist as _sdist + + class cmd_sdist(_sdist): + def run(self): + versions = get_versions() + self._versioneer_generated_versions = versions + # unless we update this, the command will keep using the old + # version + self.distribution.metadata.version = versions["version"] + return _sdist.run(self) + + def make_release_tree(self, base_dir, files): + root = get_root() + cfg = get_config_from_root(root) + _sdist.make_release_tree(self, base_dir, files) + # now locate _version.py in the new base_dir directory + # (remembering that it may be a hardlink) and replace it with an + # updated value + target_versionfile = os.path.join(base_dir, cfg.versionfile_source) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, + self._versioneer_generated_versions) + cmds["sdist"] = cmd_sdist + + return cmds + + +CONFIG_ERROR = """ +setup.cfg is missing the necessary Versioneer configuration. You need +a section like: + + [versioneer] + VCS = git + style = pep440 + versionfile_source = src/myproject/_version.py + versionfile_build = myproject/_version.py + tag_prefix = + parentdir_prefix = myproject- + +You will also need to edit your setup.py to use the results: + + import versioneer + setup(version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), ...) + +Please read the docstring in ./versioneer.py for configuration instructions, +edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. +""" + +SAMPLE_CONFIG = """ +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. + +[versioneer] +#VCS = git +#style = pep440 +#versionfile_source = +#versionfile_build = +#tag_prefix = +#parentdir_prefix = + +""" + +INIT_PY_SNIPPET = """ +from ._version import get_versions +__version__ = get_versions()['version'] +del get_versions +""" + + +def do_setup(): + """Do main VCS-independent setup function for installing Versioneer.""" + root = get_root() + try: + cfg = get_config_from_root(root) + except (EnvironmentError, configparser.NoSectionError, + configparser.NoOptionError) as e: + if isinstance(e, (EnvironmentError, configparser.NoSectionError)): + print("Adding sample versioneer config to setup.cfg", + file=sys.stderr) + with open(os.path.join(root, "setup.cfg"), "a") as f: + f.write(SAMPLE_CONFIG) + print(CONFIG_ERROR, file=sys.stderr) + return 1 + + print(" creating %s" % cfg.versionfile_source) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), + "__init__.py") + if os.path.exists(ipy): + try: + with open(ipy, "r") as f: + old = f.read() + except EnvironmentError: + old = "" + if INIT_PY_SNIPPET not in old: + print(" appending to %s" % ipy) + with open(ipy, "a") as f: + f.write(INIT_PY_SNIPPET) + else: + print(" %s unmodified" % ipy) + else: + print(" %s doesn't exist, ok" % ipy) + ipy = None + + # Make sure both the top-level "versioneer.py" and versionfile_source + # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so + # they'll be copied into source distributions. Pip won't be able to + # install the package without this. + manifest_in = os.path.join(root, "MANIFEST.in") + simple_includes = set() + try: + with open(manifest_in, "r") as f: + for line in f: + if line.startswith("include "): + for include in line.split()[1:]: + simple_includes.add(include) + except EnvironmentError: + pass + # That doesn't cover everything MANIFEST.in can do + # (http://docs.python.org/2/distutils/sourcedist.html#commands), so + # it might give some false negatives. Appending redundant 'include' + # lines is safe, though. + if "versioneer.py" not in simple_includes: + print(" appending 'versioneer.py' to MANIFEST.in") + with open(manifest_in, "a") as f: + f.write("include versioneer.py\n") + else: + print(" 'versioneer.py' already in MANIFEST.in") + if cfg.versionfile_source not in simple_includes: + print(" appending versionfile_source ('%s') to MANIFEST.in" % + cfg.versionfile_source) + with open(manifest_in, "a") as f: + f.write("include %s\n" % cfg.versionfile_source) + else: + print(" versionfile_source already in MANIFEST.in") + + # Make VCS-specific changes. For git, this means creating/changing + # .gitattributes to mark _version.py for export-subst keyword + # substitution. + do_vcs_install(manifest_in, cfg.versionfile_source, ipy) + return 0 + + +def scan_setup_py(): + """Validate the contents of setup.py against Versioneer's expectations.""" + found = set() + setters = False + errors = 0 + with open("setup.py", "r") as f: + for line in f.readlines(): + if "import versioneer" in line: + found.add("import") + if "versioneer.get_cmdclass()" in line: + found.add("cmdclass") + if "versioneer.get_version()" in line: + found.add("get_version") + if "versioneer.VCS" in line: + setters = True + if "versioneer.versionfile_source" in line: + setters = True + if len(found) != 3: + print("") + print("Your setup.py appears to be missing some important items") + print("(but I might be wrong). Please make sure it has something") + print("roughly like the following:") + print("") + print(" import versioneer") + print(" setup( version=versioneer.get_version(),") + print(" cmdclass=versioneer.get_cmdclass(), ...)") + print("") + errors += 1 + if setters: + print("You should remove lines like 'versioneer.VCS = ' and") + print("'versioneer.versionfile_source = ' . This configuration") + print("now lives in setup.cfg, and should be removed from setup.py") + print("") + errors += 1 + return errors + + +if __name__ == "__main__": + cmd = sys.argv[1] + if cmd == "setup": + errors = do_setup() + errors += scan_setup_py() + if errors: + sys.exit(1) From cb31f35eea66529f51dbc62dde74b3753e676879 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 8 Dec 2020 13:01:25 -0600 Subject: [PATCH 0153/1270] TST: Ensure `like=` tests are not sensitive to execution order This moves class creation into a `setup` function and uses `self` instead of the test class all over. There are probably nicer ways to fix (and improve) it, but this seemed nice minimal. --- numpy/core/tests/test_overrides.py | 72 +++++++++++++++--------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py index df0ea330c524..6862fca03826 100644 --- a/numpy/core/tests/test_overrides.py +++ b/numpy/core/tests/test_overrides.py @@ -430,25 +430,27 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: + def setup(self): + class MyArray(): + def __init__(self, function=None): + self.function = function - class MyArray(): - - def __init__(self, function=None): - self.function = function + def __array_function__(self, func, types, args, kwargs): + try: + my_func = getattr(self, func.__name__) + except AttributeError: + return NotImplemented + return my_func(*args, **kwargs) - def __array_function__(self, func, types, args, kwargs): - try: - my_func = getattr(TestArrayLike.MyArray, func.__name__) - except AttributeError: - return NotImplemented - return my_func(*args, **kwargs) + self.MyArray = MyArray - class MyNoArrayFunctionArray(): + class MyNoArrayFunctionArray(): + def __init__(self, function=None): + self.function = function - def __init__(self, function=None): - self.function = function + self.MyNoArrayFunctionArray = MyNoArrayFunctionArray - def add_method(name, arr_class, enable_value_error=False): + def add_method(self, name, arr_class, enable_value_error=False): def _definition(*args, **kwargs): # Check that `like=` isn't propagated downstream assert 'like' not in kwargs @@ -464,9 +466,9 @@ def func_args(*args, **kwargs): @requires_array_function def test_array_like_not_implemented(self): - TestArrayLike.add_method('array', TestArrayLike.MyArray) + self.add_method('array', self.MyArray) - ref = TestArrayLike.MyArray.array() + ref = self.MyArray.array() with assert_raises_regex(TypeError, 'no implementation found'): array_like = np.asarray(1, like=ref) @@ -497,15 +499,15 @@ def test_array_like_not_implemented(self): @pytest.mark.parametrize('numpy_ref', [True, False]) @requires_array_function def test_array_like(self, function, args, kwargs, numpy_ref): - TestArrayLike.add_method('array', TestArrayLike.MyArray) - TestArrayLike.add_method(function, TestArrayLike.MyArray) + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) np_func = getattr(np, function) - my_func = getattr(TestArrayLike.MyArray, function) + my_func = getattr(self.MyArray, function) if numpy_ref is True: ref = np.array(1) else: - ref = TestArrayLike.MyArray.array() + ref = self.MyArray.array() like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) @@ -523,20 +525,20 @@ def test_array_like(self, function, args, kwargs, numpy_ref): assert_equal(array_like, np_arr) else: - assert type(array_like) is TestArrayLike.MyArray + assert type(array_like) is self.MyArray assert array_like.function is my_func @pytest.mark.parametrize('function, args, kwargs', _array_tests) - @pytest.mark.parametrize('ref', [1, [1], MyNoArrayFunctionArray]) + @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) @requires_array_function def test_no_array_function_like(self, function, args, kwargs, ref): - TestArrayLike.add_method('array', TestArrayLike.MyNoArrayFunctionArray) - TestArrayLike.add_method(function, TestArrayLike.MyNoArrayFunctionArray) + self.add_method('array', self.MyNoArrayFunctionArray) + self.add_method(function, self.MyNoArrayFunctionArray) np_func = getattr(np, function) # Instantiate ref if it's the MyNoArrayFunctionArray class - if ref is TestArrayLike.MyNoArrayFunctionArray: - ref = ref.array() + if ref == "MyNoArrayFunctionArray": + ref = self.MyNoArrayFunctionArray.array() like_args = tuple(a() if callable(a) else a for a in args) @@ -546,13 +548,13 @@ def test_no_array_function_like(self, function, args, kwargs, ref): @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like_fromfile(self, numpy_ref): - TestArrayLike.add_method('array', TestArrayLike.MyArray) - TestArrayLike.add_method("fromfile", TestArrayLike.MyArray) + self.add_method('array', self.MyArray) + self.add_method("fromfile", self.MyArray) if numpy_ref is True: ref = np.array(1) else: - ref = TestArrayLike.MyArray.array() + ref = self.MyArray.array() data = np.random.random(5) @@ -566,18 +568,14 @@ def test_array_like_fromfile(self, numpy_ref): assert_equal(np_res, data) assert_equal(array_like, np_res) else: - assert type(array_like) is TestArrayLike.MyArray - assert array_like.function is TestArrayLike.MyArray.fromfile + assert type(array_like) is self.MyArray + assert array_like.function is self.MyArray.fromfile @requires_array_function def test_exception_handling(self): - TestArrayLike.add_method( - 'array', - TestArrayLike.MyArray, - enable_value_error=True, - ) + self.add_method('array', self.MyArray, enable_value_error=True) - ref = TestArrayLike.MyArray.array() + ref = self.MyArray.array() with assert_raises(ValueError): np.array(1, value_error=True, like=ref) From 6f59235f14e8f3f6f5c03d556d2bc7c03ef6e793 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 8 Dec 2020 13:32:59 -0600 Subject: [PATCH 0154/1270] TST: Do not disable errors globally in ma/timer_comparison.py This script is meant to be run stand-alone (and should probably just not be at this place), but moving these down is cleaner in any case. The issue is that our "public API" tests import the module and then modify the global state. --- numpy/ma/timer_comparison.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py index aff72f79b8d2..9eb1a23cd693 100644 --- a/numpy/ma/timer_comparison.py +++ b/numpy/ma/timer_comparison.py @@ -7,12 +7,9 @@ from numpy.testing import build_err_msg -# Fixme: this does not look right. -np.seterr(all='ignore') pi = np.pi - class ModuleTester: def __init__(self, module): self.module = module @@ -111,6 +108,7 @@ def assert_array_equal(self, x, y, err_msg=''): self.assert_array_compare(self.equal, x, y, err_msg=err_msg, header='Arrays are not equal') + @np.errstate(all='ignore') def test_0(self): """ Tests creation @@ -121,6 +119,7 @@ def test_0(self): xm = self.masked_array(x, mask=m) xm[0] + @np.errstate(all='ignore') def test_1(self): """ Tests creation @@ -148,6 +147,7 @@ def test_1(self): xf.shape = s assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + @np.errstate(all='ignore') def test_2(self): """ Tests conversions and indexing. @@ -190,6 +190,7 @@ def test_2(self): m3 = self.make_mask(m, copy=1) assert(m is not m3) + @np.errstate(all='ignore') def test_3(self): """ Tests resize/repeat @@ -209,6 +210,7 @@ def test_3(self): y8 = x4.repeat(2, 0) assert self.allequal(y5, y8) + @np.errstate(all='ignore') def test_4(self): """ Test of take, transpose, inner, outer products. @@ -232,6 +234,7 @@ def test_4(self): assert t[1] == 2 assert t[2] == 3 + @np.errstate(all='ignore') def test_5(self): """ Tests inplace w/ scalar @@ -284,6 +287,7 @@ def test_5(self): x += 1. assert self.allequal(x, y + 1.) + @np.errstate(all='ignore') def test_6(self): """ Tests inplace w/ array @@ -335,6 +339,7 @@ def test_6(self): x /= a xm /= a + @np.errstate(all='ignore') def test_7(self): "Tests ufunc" d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), @@ -369,6 +374,7 @@ def test_7(self): self.assert_array_equal(ur.filled(0), mr.filled(0), f) self.assert_array_equal(ur._mask, mr._mask) + @np.errstate(all='ignore') def test_99(self): # test average ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) @@ -414,6 +420,7 @@ def test_99(self): self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) + @np.errstate(all='ignore') def test_A(self): x = self.arange(24) x[5:6] = self.masked From 9ba3d421c62eac5cd73fa135e6d93ce892b41be1 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 8 Dec 2020 17:49:28 +0200 Subject: [PATCH 0155/1270] TST, BUILD: Add a native x86 baseline build running on ubuntu-20.04 to build and test NumPy against the maximum supported CPU features by the compiler and the running machine --- azure-pipelines.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f8773dc36abc..5f7afdaaff36 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -258,3 +258,28 @@ stages: failTaskOnFailedTests: true testRunTitle: 'Publish test results for gcc 4.8' + # Native build is based on gcc flag `-march=native` + - job: Linux_baseline_native + pool: + vmImage: 'ubuntu-20.04' + steps: + - script: | + if ! `gcc 2>/dev/null`; then + sudo apt install gcc + fi + sudo apt install python3 + sudo apt install python3-dev + # python3 has no setuptools, so install one to get us going + python3 -m pip install --user --upgrade pip 'setuptools<49.2.0' + python3 -m pip install --user -r test_requirements.txt + displayName: 'install python/requirements' + - script: | + python3 runtests.py --show-build-log --cpu-baseline=native --cpu-dispatch=none \ + --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml + displayName: 'Run native baseline Build / Tests' + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true + testRunTitle: 'Publish test results for baseline/native' From 3df3da1420418a41884be39a71c31a873d765800 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 8 Dec 2020 21:05:41 +0200 Subject: [PATCH 0156/1270] TST: Fix passing build options when `--debug-info` is enabled --- runtests.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/runtests.py b/runtests.py index 87e26768b351..bad93f53a26c 100755 --- a/runtests.py +++ b/runtests.py @@ -432,8 +432,6 @@ def build_project(args): cmd += ["build"] if args.parallel > 1: cmd += ["-j", str(args.parallel)] - if args.debug_info: - cmd += ["build_src", "--verbose-cfg"] if args.warn_error: cmd += ["--warn-error"] if args.cpu_baseline: @@ -444,6 +442,8 @@ def build_project(args): cmd += ["--disable-optimization"] if args.simd_test is not None: cmd += ["--simd-test", args.simd_test] + if args.debug_info: + cmd += ["build_src", "--verbose-cfg"] # Install; avoid producing eggs so numpy can be imported from dst_dir. cmd += ['install', '--prefix=' + dst_dir, '--single-version-externally-managed', @@ -538,7 +538,7 @@ def asv_clear_cache(bench_path, h_commits, env_dir="env"): for asv_build_cache in glob.glob(asv_build_pattern, recursive=True): for c in h_commits: try: shutil.rmtree(os.path.join(asv_build_cache, c)) - except OSError: pass + except OSError: pass def asv_substitute_config(in_config, out_config, **custom_vars): """ From 4fd3d905ea374896b5d91f77a7a4044a200455eb Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 9 Dec 2020 16:04:09 +0800 Subject: [PATCH 0157/1270] use splited loops and add unit test for new intrinsics --- numpy/core/src/_simd/_simd.dispatch.c.src | 16 +++- numpy/core/src/common/simd/avx2/conversion.h | 18 ++-- .../core/src/common/simd/avx512/conversion.h | 18 ++-- numpy/core/src/common/simd/neon/arithmetic.h | 13 +-- numpy/core/src/common/simd/neon/conversion.h | 16 ++-- numpy/core/src/common/simd/sse/conversion.h | 16 ++-- numpy/core/src/common/simd/vsx/conversion.h | 15 ++-- numpy/core/src/multiarray/item_selection.c | 86 ++++++++++++------- numpy/core/tests/test_simd.py | 19 +++- 9 files changed, 140 insertions(+), 77 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index 18c38387165c..0617c76d7fc6 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -15,13 +15,15 @@ /**begin repeat * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# + * #esfx = u16, s8, u32, s16, u32, s32, u64, s64, f32, f64# + * #expand_sup =1, 0, 1, 0, 0, 0, 0, 0, 0, 0# * #simd_sup = 1, 1, 1, 1, 1, 1, 1, 1, 1, NPY_SIMD_F64# * #fp_only = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #sat_sup = 1, 1, 1, 1, 0, 0, 0, 0, 0, 0# * #mul_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 1# * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# - * #sum_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# + * #sum_sup = 0, 0, 0, 0, 1, 0, 0, 0, 1, 1# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# * #shr_imm = 0, 0, 16, 16, 32, 32, 64, 64, 0, 0# @@ -319,7 +321,9 @@ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@bsfx@, v@sfx@, v@sfx@) ***************************/ SIMD_IMPL_INTRIN_1(cvt_@sfx@_@bsfx@, v@sfx@, v@bsfx@) SIMD_IMPL_INTRIN_1(cvt_@bsfx@_@sfx@, v@bsfx@, v@sfx@) - +#if @expand_sup@ +SIMD_IMPL_INTRIN_1(expand_@sfx@_@esfx@, v@esfx@x2, v@sfx@) +#endif // expand_sup /*************************** * Arithmetic ***************************/ @@ -382,13 +386,15 @@ static PyMethodDef simd__intrinsics_methods[] = { /**begin repeat * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# + * #esfx = u16, s8, u32, s16, u32, s32, u64, s64, f32, f64# + * #expand_sup =1, 0, 1, 0, 0, 0, 0, 0, 0, 0# * #simd_sup = 1, 1, 1, 1, 1, 1, 1, 1, 1, NPY_SIMD_F64# * #fp_only = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #sat_sup = 1, 1, 1, 1, 0, 0, 0, 0, 0, 0# * #mul_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 1# * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# - * #sum_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# + * #sum_sup = 0, 0, 0, 0, 1, 0, 0, 0, 1, 1# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# * #shr_imm = 0, 0, 16, 16, 32, 32, 64, 64, 0, 0# @@ -469,7 +475,9 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) ***************************/ SIMD_INTRIN_DEF(cvt_@sfx@_@bsfx@) SIMD_INTRIN_DEF(cvt_@bsfx@_@sfx@) - +#if @expand_sup@ +SIMD_INTRIN_DEF(expand_@sfx@_@esfx@) +#endif // expand_sup /*************************** * Arithmetic ***************************/ diff --git a/numpy/core/src/common/simd/avx2/conversion.h b/numpy/core/src/common/simd/avx2/conversion.h index dd5594d481e3..55544039d4d4 100644 --- a/numpy/core/src/common/simd/avx2/conversion.h +++ b/numpy/core/src/common/simd/avx2/conversion.h @@ -30,16 +30,18 @@ #define npyv_cvt_b64_f64(BL) _mm256_castpd_si256(BL) // expand -NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { - const __m256i z = _mm256_setzero_si256(); - *low = _mm256_unpacklo_epi8(data, z); - *high = _mm256_unpackhi_epi8(data, z); +NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { + npyv_u16x2 r; + r.val[0] = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(data)); + r.val[1] = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(data, 1)); + return r; } -NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { - const __m256i z = _mm256_setzero_si256(); - *low = _mm256_unpacklo_epi16(data, z); - *high = _mm256_unpackhi_epi16(data, z); +NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { + npyv_u32x2 r; + r.val[0] = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(data)); + r.val[1] = _mm256_cvtepu16_epi32(_mm256_extracti128_si256(data, 1)); + return r; } #endif // _NPY_SIMD_AVX2_CVT_H diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 80ff970b0c4a..f06c752ce8ca 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -52,16 +52,18 @@ #define npyv_cvt_b64_f64(A) npyv_cvt_b64_u64(_mm512_castpd_si512(A)) // expand -NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { - const __m512i z = _mm512_setzero_si512(); - *low = _mm512_unpacklo_epi8(data, z); - *high = _mm512_unpackhi_epi8(data, z); +NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { + npyv_u16x2 r; + r.val[0] = _mm512_cvtepu8_epi16(_mm512_castsi512_si256(data)); + r.val[1] = _mm512_cvtepu8_epi16(_mm512_extracti32x8_epi32(data, 1)); + return r; } -NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { - const __m512i z = _mm512_setzero_si512(); - *low = _mm512_unpacklo_epi16(data, z); - *high = _mm512_unpackhi_epi16(data, z); +NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { + npyv_u16x2 r; + r.val[0] = _mm512_cvtepu16_epi32(_mm512_castsi512_si256(data)); + r.val[1] = _mm512_cvtepu16_epi32(_mm512_extracti32x8_epi32(data, 1)); + return r; } #endif // _NPY_SIMD_AVX512_CVT_H diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index 88709be9965b..e34d25d28994 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -132,16 +132,17 @@ #endif // NPY_SIMD_F64 // Horizontal add: Calculates the sum of all vector elements. -NPY_FINLINE int npyv_sum_u32(npyv_u32 a) -{ - uint32x2_t a0 = vpadd_u32(vget_low_u32(a), vget_high_u32(a)); - return (unsigned)vget_lane_u32(vpadd_u32(a0, vget_high_u32(a)),0); -} - #if NPY_SIMD_F64 + #define npyv_sum_u32 vaddvq_u32 #define npyv_sum_f32 vaddvq_f32 #define npyv_sum_f64 vaddvq_f64 #else + NPY_FINLINE int npyv_sum_u32(npyv_u32 a) + { + uint32x2_t a0 = vpadd_u32(vget_low_u32(a), vget_high_u32(a)); + return (unsigned)vget_lane_u32(vpadd_u32(a0, vget_high_u32(a)),0); + } + NPY_FINLINE float npyv_sum_f32(npyv_f32 a) { float32x2_t r = vadd_f32(vget_high_f32(a), vget_low_f32(a)); diff --git a/numpy/core/src/common/simd/neon/conversion.h b/numpy/core/src/common/simd/neon/conversion.h index 3d9e47f603b8..bddcb4e6eca5 100644 --- a/numpy/core/src/common/simd/neon/conversion.h +++ b/numpy/core/src/common/simd/neon/conversion.h @@ -29,14 +29,18 @@ #define npyv_cvt_b32_f32(BL) vreinterpretq_u32_f32(BL) #define npyv_cvt_b64_f64(BL) vreinterpretq_u64_f64(BL) -NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { - *low = vmovl_u8(vget_low_u8(data)); - *high = vmovl_u8(vget_high_u8(data)); +NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { + npyv_u16x2 r; + r.val[0] = vmovl_u8(vget_low_u8(data)); + r.val[1] = vmovl_u8(vget_high_u8(data)); + return r; } -NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { - *low = vmovl_u16(vget_low_u16(data)); - *high = vmovl_u16(vget_high_u16(data)); +NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { + npyv_u32x2 r; + r.val[0] = vmovl_u16(vget_low_u16(data)); + r.val[1] = vmovl_u16(vget_high_u16(data)); + return r; } #endif // _NPY_SIMD_NEON_CVT_H diff --git a/numpy/core/src/common/simd/sse/conversion.h b/numpy/core/src/common/simd/sse/conversion.h index 23d1e1bc6b4d..db1bd4bc4e25 100644 --- a/numpy/core/src/common/simd/sse/conversion.h +++ b/numpy/core/src/common/simd/sse/conversion.h @@ -30,16 +30,20 @@ #define npyv_cvt_b64_f64(A) _mm_castpd_si128(A) // expand -NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { +NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { + npyv_u16x2 r; const __m128i z = _mm_setzero_si128(); - *low = _mm_unpacklo_epi8(data, z); - *high = _mm_unpackhi_epi8(data, z); + r.val[0] = _mm_unpacklo_epi8(data, z); + r.val[1] = _mm_unpackhi_epi8(data, z); + return r; } -NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { +NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { + npyv_u32x2 r; const __m128i z = _mm_setzero_si128(); - *low = _mm_unpacklo_epi16(data, z); - *high = _mm_unpackhi_epi16(data, z); + r.val[0] = _mm_unpacklo_epi16(data, z); + r.val[1] = _mm_unpackhi_epi16(data, z); + return r; } #endif // _NPY_SIMD_SSE_CVT_H diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 7e9413c9678b..b7c97ba09442 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -29,16 +29,19 @@ #define npyv_cvt_b32_f32(A) ((npyv_b32) A) #define npyv_cvt_b64_f64(A) ((npyv_b64) A) -NPY_FINLINE void npyv_expand_u8_u16(npyv_u8 data, npyv_u16 *low, npyv_u16 *high) { +NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { + npyv_u16x2 r; npyv_u8 zero = npyv_zero_u8(); - *low = (npyv_u16)vec_mergel(data, zero); - *high = (npyv_u16)vec_mergeh(data, zero); + r.val[0] = (npyv_u16)vec_mergel(data, zero); + r.val[1] = (npyv_u16)vec_mergeh(data, zero); + return r; } -NPY_FINLINE void npyv_expand_u16_u32(npyv_u16 data, npyv_u32 *low, npyv_u32 *high) { +NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { + npyv_u32x2 r; npyv_u16 zero = npyv_zero_u16(); - *low = (npyv_u32)vec_mergel(data, zero); - *high = (npyv_u32)vec_mergeh(data, zero); + r.val[0] = (npyv_u32)vec_mergel(data, zero); + r.val[1] = (npyv_u32)vec_mergeh(data, zero); } #endif // _NPY_SIMD_VSX_CVT_H diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 9f1b6ddee21e..1a69efe1f5e1 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2126,44 +2126,66 @@ count_nonzero_bytes_384(const npy_uint64 * w) } #if NPY_SIMD -static NPY_INLINE npy_intp -count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) + +/* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */ +static NPY_INLINE npyv_u8 +count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) { - int count = 0; - int i = 0; - - const int vstep = npyv_nlanes_u8; const npyv_u8 vone = npyv_setall_u8(1); const npyv_u8 vzero = npyv_setall_u8(0); - npyv_b8 vt; + + npy_intp n = 0; + npyv_u8 vsum8 = npyv_zero_u8(); + while (*d < end && n <= 0xFE) { + npyv_u8 vt = npyv_cvt_u8_b8(npyv_cmpeq_u8(npyv_load_u8(*d), vzero)); + vt = npyv_and_u8(vt, vone); + vsum8 = npyv_add_u8(vsum8, vt); + *d += npyv_nlanes_u8; + n++; + } + return vsum8; +} + +static NPY_INLINE npyv_u16 +count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end) +{ + npyv_u16 vsum16 = npyv_zero_u16(); + npy_intp n = 0; + while (*d < end && n <= 0xFF00) { + npyv_u8 vsum8 = count_zero_bytes_u8(d, end); + npyv_u16x2 part = npyv_expand_u8_u16(vsum8); + vsum16 = npyv_add_u16(vsum16, npyv_add_u16(part.val[0], part.val[1])); + n += 0xFF; + } + return vsum16; +} + +static NPY_INLINE npyv_u32 +count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end) +{ npyv_u32 vsum32 = npyv_zero_u32(); - while (i < unrollx) - { - npyv_u16 vsum16 = npyv_zero_u16(); - int j = i; - while (j < PyArray_MIN(unrollx, i + 0xFF00 * npyv_nlanes_u16)) - { - int k = j; - npyv_u8 vsum8 = npyv_zero_u8(); - for (; k < PyArray_MIN(unrollx, j + 0xFF * vstep); k += vstep) - { - vt = npyv_cmpeq_u8(npyv_load_u8(d + k), vzero); - vt = npyv_and_u8(vt, vone); - vsum8 = npyv_add_u8(vsum8, vt); - } - npyv_u16 part1, part2; - npyv_expand_u8_u16(vsum8, &part1, &part2); - vsum16 = npyv_add_u16(vsum16, npyv_add_u16(part1, part2)); - j = k; - } - npyv_u32 part1, part2; - npyv_expand_u16_u32(vsum16, &part1, &part2); - vsum32 = npyv_add_u32(vsum32, npyv_add_u32(part1, part2)); - i = j; + npy_intp n = 0; + while (*d < end && n <= 0xFFFF0000) { + npyv_u16 vsum16 = count_zero_bytes_u16(d, end); + npyv_u32x2 part = npyv_expand_u16_u32(vsum16); + vsum32 = npyv_add_u32(vsum32, npyv_add_u32(part.val[0], part.val[1])); + n += 0xFFFF; + } + return vsum32; +} + +static NPY_INLINE npy_intp +count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) +{ + npy_intp zero_count = 0; + const npy_uint8 *end = d + unrollx; + while (d < end) { + npyv_u32 vsum32 = count_zero_bytes_u32(&d, end); + zero_count += npyv_sum_u32(vsum32); } - count = i - npyv_sum_u32(vsum32); - return count; + return unrollx - zero_count; } + #endif /* * Counts the number of True values in a raw boolean array. This diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 13e8d5ede460..354bc0646d96 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -540,6 +540,23 @@ def test_conversion_boolean(self): true_vsfx = from_boolean(true_vb) assert false_vsfx != true_vsfx + def test_conversion_expand(self): + if self.sfx == "u8": + totype = "u16" + elif self.sfx == "u16": + totype = "u32" + else: + return + data = self._data() + expand = getattr(self.npyv, "expand_%s_%s" % (self.sfx, totype)) + vdata = self.load(data) + edata = expand(vdata) + # lower half part + data_lo = data[:self.nlanes//2] + # higher half part + data_hi = data[self.nlanes//2:] + assert edata == (data_lo, data_hi) + def test_arithmetic_subadd(self): if self._is_fp(): data_a = self._data() @@ -584,7 +601,7 @@ def test_arithmetic_div(self): assert div == data_div def test_arithmetic_reduce_sum(self): - if not self._is_fp(): + if self.sfx not in ("u32", "f32", "f64"): return # reduce sum data = self._data() From dfbb3f76c4e9252be597107b6e9fc04adc9e8855 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 9 Dec 2020 16:06:01 +0800 Subject: [PATCH 0158/1270] force avx2 in order to test the performance. --- numpy/core/src/common/simd/simd.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 8804223c9fef..8b93dbfba2b8 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -11,6 +11,12 @@ #include "npy_cpu_dispatch.h" #include "simd_utils.h" +#ifndef NPY_HAVE_AVX2 + #include + #define NPY_HAVE_AVX + #define NPY_HAVE_AVX2 +#endif + #ifdef __cplusplus extern "C" { #endif From ea92ca96a2eefc25e3cefc3568d393972d2c323b Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 9 Dec 2020 16:19:54 +0800 Subject: [PATCH 0159/1270] remove force avx2 --- numpy/core/src/common/simd/simd.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 8b93dbfba2b8..8804223c9fef 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -11,12 +11,6 @@ #include "npy_cpu_dispatch.h" #include "simd_utils.h" -#ifndef NPY_HAVE_AVX2 - #include - #define NPY_HAVE_AVX - #define NPY_HAVE_AVX2 -#endif - #ifdef __cplusplus extern "C" { #endif From c0bc4dd3633e0fe6db8c4eb351da15065500020e Mon Sep 17 00:00:00 2001 From: Maia Kaplan Date: Wed, 9 Dec 2020 08:10:44 -0800 Subject: [PATCH 0160/1270] Add missing decref to arange, refs 17878 --- numpy/core/src/multiarray/multiarraymodule.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 196dd0e3b8f3..b41cd701a40b 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2944,6 +2944,7 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { if (args == NULL || PyTuple_GET_SIZE(args) == 0){ PyErr_SetString(PyExc_TypeError, "arange() requires stop to be specified."); + Py_XDECREF(typecode); return NULL; } } From fbdda8f24924ff63b5abf2782e85206dc34405a7 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 9 Dec 2020 17:20:45 +0100 Subject: [PATCH 0161/1270] ENH: Replace `dtype` with the `_DType` typevar --- numpy/__init__.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9f3ba34006db..ff275fff28b0 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -869,7 +869,7 @@ class dtype(Generic[_DTypeScalar]): @property def alignment(self) -> int: ... @property - def base(self) -> dtype: ... + def base(self: _DType) -> _DType: ... @property def byteorder(self) -> str: ... @property @@ -905,14 +905,14 @@ class dtype(Generic[_DTypeScalar]): @property def ndim(self) -> int: ... @property - def subdtype(self) -> Optional[Tuple[dtype, _Shape]]: ... - def newbyteorder(self, __new_order: _ByteOrder = ...) -> dtype: ... + def subdtype(self: _DType) -> Optional[Tuple[_DType, _Shape]]: ... + def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... # Leave str and type for end to avoid having to use `builtins.str` # everywhere. See https://github.com/python/mypy/issues/3775 @property def str(self) -> builtins.str: ... @property - def type(self) -> Type[generic]: ... + def type(self) -> Type[_DTypeScalar]: ... class _flagsobj: aligned: bool From c9dc6784e67c28e56317e40b3d192bc2d2345ce4 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 9 Dec 2020 17:21:40 +0100 Subject: [PATCH 0162/1270] MAINT: Use `dtype[Any]` instead of `dtype` --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ff275fff28b0..1247c79970a7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -879,7 +879,7 @@ class dtype(Generic[_DTypeScalar]): @property def fields( self, - ) -> Optional[Mapping[str, Union[Tuple[dtype, int], Tuple[dtype, int, Any]]]]: ... + ) -> Optional[Mapping[str, Union[Tuple[dtype[Any], int], Tuple[dtype[Any], int, Any]]]]: ... @property def flags(self) -> int: ... @property From ab4252d1ea4bca1d881a44376a693ca8c5a75c1b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 9 Dec 2020 17:30:55 +0100 Subject: [PATCH 0163/1270] TST: Add new tests for `dtype` methods --- numpy/typing/tests/data/pass/dtype.py | 8 ++++++++ numpy/typing/tests/data/reveal/dtype.py | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/numpy/typing/tests/data/pass/dtype.py b/numpy/typing/tests/data/pass/dtype.py index cbae8c078551..a97edc302582 100644 --- a/numpy/typing/tests/data/pass/dtype.py +++ b/numpy/typing/tests/data/pass/dtype.py @@ -1,5 +1,7 @@ import numpy as np +dtype_obj = np.dtype(np.str_) + np.dtype(dtype=np.int64) np.dtype(int) np.dtype("int") @@ -33,3 +35,9 @@ class Test: np.dtype(Test()) + +# Methods and attributes +dtype_obj.base +dtype_obj.subdtype +dtype_obj.newbyteorder() +dtype_obj.type diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.py index d414f2c4934f..626a15270a25 100644 --- a/numpy/typing/tests/data/reveal/dtype.py +++ b/numpy/typing/tests/data/reveal/dtype.py @@ -1,5 +1,7 @@ import numpy as np +dtype_obj: np.dtype[np.str_] + reveal_type(np.dtype(np.float64)) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(np.dtype(np.int64)) # E: numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] @@ -31,3 +33,9 @@ # Void reveal_type(np.dtype(("U", 10))) # E: numpy.dtype[numpy.void] + +# Methods and attributes +reveal_type(dtype_obj.base) # E: numpy.dtype[numpy.str_] +reveal_type(dtype_obj.subdtype) # E: Union[Tuple[numpy.dtype[numpy.str_], builtins.tuple[builtins.int]], None] +reveal_type(dtype_obj.newbyteorder()) # E: numpy.dtype[numpy.str_] +reveal_type(dtype_obj.type) # E: Type[numpy.str_] From 6edf451f7fe0e3a77e545733d119d2a03754ee5b Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 9 Dec 2020 21:08:06 +0200 Subject: [PATCH 0164/1270] BUG, SIMD: Fix direactive check for AVX512BW of intrinsics npyv_tobits_* --- numpy/core/src/common/simd/avx512/conversion.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index bd92abccd44c..6ad299dd51da 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -56,7 +56,7 @@ NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) { #ifdef NPY_HAVE_AVX512BW_MASK return (npy_uint64)_cvtmask64_u64(a); -#elif NPY_HAVE_AVX512BW +#elif defined(NPY_HAVE_AVX512BW) return (npy_uint64)a; #else int mask_lo = _mm256_movemask_epi8(npyv512_lower_si256(a)); @@ -68,7 +68,7 @@ NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) { #ifdef NPY_HAVE_AVX512BW_MASK return (npy_uint32)_cvtmask32_u32(a); -#elif NPY_HAVE_AVX512BW +#elif defined(NPY_HAVE_AVX512BW) return (npy_uint32)a; #else __m256i pack = _mm256_packs_epi16( From 32465aa8fc2ce2c92b5c9265b8cb2737cb11b2a1 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 10 Dec 2020 09:46:09 +0800 Subject: [PATCH 0165/1270] add non-AVX512BW instruments --- numpy/core/src/_simd/_simd.dispatch.c.src | 4 ++-- numpy/core/src/common/simd/avx2/conversion.h | 4 ++-- numpy/core/src/common/simd/avx512/conversion.h | 18 +++++++++++++++--- numpy/core/src/common/simd/neon/conversion.h | 4 ++-- numpy/core/src/common/simd/sse/conversion.h | 4 ++-- numpy/core/src/common/simd/vsx/conversion.h | 4 ++-- numpy/core/src/multiarray/item_selection.c | 4 ++-- numpy/core/tests/test_simd.py | 2 +- 8 files changed, 28 insertions(+), 16 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index 0617c76d7fc6..f314feb7d8f4 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -322,7 +322,7 @@ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@bsfx@, v@sfx@, v@sfx@) SIMD_IMPL_INTRIN_1(cvt_@sfx@_@bsfx@, v@sfx@, v@bsfx@) SIMD_IMPL_INTRIN_1(cvt_@bsfx@_@sfx@, v@bsfx@, v@sfx@) #if @expand_sup@ -SIMD_IMPL_INTRIN_1(expand_@sfx@_@esfx@, v@esfx@x2, v@sfx@) +SIMD_IMPL_INTRIN_1(expand_@esfx@_@sfx@, v@esfx@x2, v@sfx@) #endif // expand_sup /*************************** * Arithmetic @@ -476,7 +476,7 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) SIMD_INTRIN_DEF(cvt_@sfx@_@bsfx@) SIMD_INTRIN_DEF(cvt_@bsfx@_@sfx@) #if @expand_sup@ -SIMD_INTRIN_DEF(expand_@sfx@_@esfx@) +SIMD_INTRIN_DEF(expand_@esfx@_@sfx@) #endif // expand_sup /*************************** * Arithmetic diff --git a/numpy/core/src/common/simd/avx2/conversion.h b/numpy/core/src/common/simd/avx2/conversion.h index 55544039d4d4..0f458950bd2f 100644 --- a/numpy/core/src/common/simd/avx2/conversion.h +++ b/numpy/core/src/common/simd/avx2/conversion.h @@ -30,14 +30,14 @@ #define npyv_cvt_b64_f64(BL) _mm256_castpd_si256(BL) // expand -NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { +NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; r.val[0] = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(data)); r.val[1] = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(data, 1)); return r; } -NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { +NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; r.val[0] = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(data)); r.val[1] = _mm256_cvtepu16_epi32(_mm256_extracti128_si256(data, 1)); diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index f06c752ce8ca..8e4dbfda8671 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -52,17 +52,29 @@ #define npyv_cvt_b64_f64(A) npyv_cvt_b64_u64(_mm512_castpd_si512(A)) // expand -NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { +NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; +#ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu8_epi16(_mm512_castsi512_si256(data)); r.val[1] = _mm512_cvtepu8_epi16(_mm512_extracti32x8_epi32(data, 1)); +#else + __m512i zero = npyv_zero_u8(); + r.val[0] = _mm512_unpacklo_epi8(data, zero); + r.val[1] = _mm512_unpackhi_epi8(data, zero); +#endif return r; } -NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { - npyv_u16x2 r; +NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { + npyv_u32x2 r; +#ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu16_epi32(_mm512_castsi512_si256(data)); r.val[1] = _mm512_cvtepu16_epi32(_mm512_extracti32x8_epi32(data, 1)); +#else + __m512i zero = npyv_zero_u16(); + r.val[0] = _mm512_unpacklo_epi16(data, zero); + r.val[1] = _mm512_unpackhi_epi16(data, zero); +#endif return r; } diff --git a/numpy/core/src/common/simd/neon/conversion.h b/numpy/core/src/common/simd/neon/conversion.h index bddcb4e6eca5..6ddbfd21716a 100644 --- a/numpy/core/src/common/simd/neon/conversion.h +++ b/numpy/core/src/common/simd/neon/conversion.h @@ -29,14 +29,14 @@ #define npyv_cvt_b32_f32(BL) vreinterpretq_u32_f32(BL) #define npyv_cvt_b64_f64(BL) vreinterpretq_u64_f64(BL) -NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { +NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; r.val[0] = vmovl_u8(vget_low_u8(data)); r.val[1] = vmovl_u8(vget_high_u8(data)); return r; } -NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { +NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; r.val[0] = vmovl_u16(vget_low_u16(data)); r.val[1] = vmovl_u16(vget_high_u16(data)); diff --git a/numpy/core/src/common/simd/sse/conversion.h b/numpy/core/src/common/simd/sse/conversion.h index db1bd4bc4e25..1113fab21db5 100644 --- a/numpy/core/src/common/simd/sse/conversion.h +++ b/numpy/core/src/common/simd/sse/conversion.h @@ -30,7 +30,7 @@ #define npyv_cvt_b64_f64(A) _mm_castpd_si128(A) // expand -NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { +NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; const __m128i z = _mm_setzero_si128(); r.val[0] = _mm_unpacklo_epi8(data, z); @@ -38,7 +38,7 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { return r; } -NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { +NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; const __m128i z = _mm_setzero_si128(); r.val[0] = _mm_unpacklo_epi16(data, z); diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index b7c97ba09442..447c1911b24e 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -29,7 +29,7 @@ #define npyv_cvt_b32_f32(A) ((npyv_b32) A) #define npyv_cvt_b64_f64(A) ((npyv_b64) A) -NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { +NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; npyv_u8 zero = npyv_zero_u8(); r.val[0] = (npyv_u16)vec_mergel(data, zero); @@ -37,7 +37,7 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u8_u16(npyv_u8 data) { return r; } -NPY_FINLINE npyv_u32x2 npyv_expand_u16_u32(npyv_u16 data) { +NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; npyv_u16 zero = npyv_zero_u16(); r.val[0] = (npyv_u32)vec_mergel(data, zero); diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 1a69efe1f5e1..40d4df2bdbd8 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2153,7 +2153,7 @@ count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end) npy_intp n = 0; while (*d < end && n <= 0xFF00) { npyv_u8 vsum8 = count_zero_bytes_u8(d, end); - npyv_u16x2 part = npyv_expand_u8_u16(vsum8); + npyv_u16x2 part = npyv_expand_u16_u8(vsum8); vsum16 = npyv_add_u16(vsum16, npyv_add_u16(part.val[0], part.val[1])); n += 0xFF; } @@ -2167,7 +2167,7 @@ count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end) npy_intp n = 0; while (*d < end && n <= 0xFFFF0000) { npyv_u16 vsum16 = count_zero_bytes_u16(d, end); - npyv_u32x2 part = npyv_expand_u16_u32(vsum16); + npyv_u32x2 part = npyv_expand_u32_u16(vsum16); vsum32 = npyv_add_u32(vsum32, npyv_add_u32(part.val[0], part.val[1])); n += 0xFFFF; } diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 354bc0646d96..5156917051f7 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -548,7 +548,7 @@ def test_conversion_expand(self): else: return data = self._data() - expand = getattr(self.npyv, "expand_%s_%s" % (self.sfx, totype)) + expand = getattr(self.npyv, "expand_%s_%s" % (totype, self.sfx)) vdata = self.load(data) edata = expand(vdata) # lower half part From ede6df18eb78b80ac29e24bf576ab5ed58bfe6b6 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 10 Dec 2020 12:06:25 +0800 Subject: [PATCH 0166/1270] Add non-AVX512BW expansion --- .../core/src/common/simd/avx512/conversion.h | 30 ++++++++++++------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index f3800a4c6a51..b3cc3775720d 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -55,13 +55,18 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; + __m256i lo = _mm512_castsi512_si256(data); + __m256i hi = _mm512_extracti32x8_epi32(data, 1); #ifdef NPY_HAVE_AVX512BW - r.val[0] = _mm512_cvtepu8_epi16(_mm512_castsi512_si256(data)); - r.val[1] = _mm512_cvtepu8_epi16(_mm512_extracti32x8_epi32(data, 1)); + r.val[0] = _mm512_cvtepu8_epi16(lo); + r.val[1] = _mm512_cvtepu8_epi16(hi); #else - __m512i zero = npyv_zero_u8(); - r.val[0] = _mm512_unpacklo_epi8(data, zero); - r.val[1] = _mm512_unpackhi_epi8(data, zero); + __m256i loelo = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(lo)); + __m256i loehi = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(lo, 1)); + __m256i hielo = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(hi)); + __m256i hiehi = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(hi, 1)); + r.val[0] = _mm512_inserti64x4(_mm512_castsi256_si512(loelo), loehi, 1); + r.val[1] = _mm512_inserti64x4(_mm512_castsi256_si512(hielo), hiehi, 1); #endif return r; } @@ -69,13 +74,18 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; + __m256i lo = _mm512_castsi512_si256(data); + __m256i hi = _mm512_extracti32x8_epi32(data, 1); #ifdef NPY_HAVE_AVX512BW - r.val[0] = _mm512_cvtepu16_epi32(_mm512_castsi512_si256(data)); - r.val[1] = _mm512_cvtepu16_epi32(_mm512_extracti32x8_epi32(data, 1)); + r.val[0] = _mm512_cvtepu16_epi32(lo); + r.val[1] = _mm512_cvtepu16_epi32(hi); #else - __m512i zero = npyv_zero_u16(); - r.val[0] = _mm512_unpacklo_epi16(data, zero); - r.val[1] = _mm512_unpackhi_epi16(data, zero); + __m256i loelo = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(lo)); + __m256i loehi = _mm256_cvtepu16_epi32(_mm256_extracti128_si256(lo, 1)); + __m256i hielo = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(hi)); + __m256i hiehi = _mm256_cvtepu16_epi32(_mm256_extracti128_si256(hi, 1)); + r.val[0] = _mm512_inserti64x4(_mm512_castsi256_si512(loelo), loehi, 1); + r.val[1] = _mm512_inserti64x4(_mm512_castsi256_si512(hielo), hiehi, 1); #endif return r; } From 37e429e886efa8eb48f298922a311936a6bff043 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 10 Dec 2020 16:01:33 +0800 Subject: [PATCH 0167/1270] Add non-AVX512DQ intrinsics --- numpy/core/src/common/simd/avx512/conversion.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index b3cc3775720d..5525fd90b18d 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -56,7 +56,11 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; __m256i lo = _mm512_castsi512_si256(data); +#ifdef NPY_HAVE_AVX512DQ __m256i hi = _mm512_extracti32x8_epi32(data, 1); +#else + __m256i hi = _mm512_extracti64x4_epi64(data, 1); +#endif #ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu8_epi16(lo); r.val[1] = _mm512_cvtepu8_epi16(hi); @@ -75,7 +79,11 @@ NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; __m256i lo = _mm512_castsi512_si256(data); +#ifdef NPY_HAVE_AVX512DQ __m256i hi = _mm512_extracti32x8_epi32(data, 1); +#else + __m256i hi = _mm512_extracti64x4_epi64(data, 1); +#endif #ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu16_epi32(lo); r.val[1] = _mm512_cvtepu16_epi32(hi); From 0b553c780c6fd70e315debbd7f0356a5f1e81448 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 10 Dec 2020 16:14:03 +0800 Subject: [PATCH 0168/1270] use current intrinsics instead of create new one. --- .../core/src/common/simd/avx512/arithmetic.h | 2 +- .../core/src/common/simd/avx512/conversion.h | 24 +++++++------------ 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index e3366a2385ca..4cb222a67def 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -147,7 +147,7 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) NPY_FINLINE int npyv_sum_u32(npyv_u32 a) { - __m256i half = _mm256_add_epi32(_mm512_castsi512_si256(a), _mm512_extracti32x8_epi32(a, 1)); + __m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a)); __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); quarter = _mm_hadd_epi32(quarter, quarter); return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 5525fd90b18d..c5ec3dcd9714 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -55,12 +55,8 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; - __m256i lo = _mm512_castsi512_si256(data); -#ifdef NPY_HAVE_AVX512DQ - __m256i hi = _mm512_extracti32x8_epi32(data, 1); -#else - __m256i hi = _mm512_extracti64x4_epi64(data, 1); -#endif + __m256i lo = npyv512_lower_si256(data); + __m256i hi = npyv512_higher_si256(data, 1); #ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu8_epi16(lo); r.val[1] = _mm512_cvtepu8_epi16(hi); @@ -69,8 +65,8 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) __m256i loehi = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(lo, 1)); __m256i hielo = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(hi)); __m256i hiehi = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(hi, 1)); - r.val[0] = _mm512_inserti64x4(_mm512_castsi256_si512(loelo), loehi, 1); - r.val[1] = _mm512_inserti64x4(_mm512_castsi256_si512(hielo), hiehi, 1); + r.val[0] = npyv512_combine_si256(loelo, loehi); + r.val[1] = npyv512_combine_si256(hielo, hiehi); #endif return r; } @@ -78,12 +74,8 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; - __m256i lo = _mm512_castsi512_si256(data); -#ifdef NPY_HAVE_AVX512DQ - __m256i hi = _mm512_extracti32x8_epi32(data, 1); -#else - __m256i hi = _mm512_extracti64x4_epi64(data, 1); -#endif + __m256i lo = npyv512_lower_si256(data); + __m256i hi = npyv512_higher_si256(data, 1); #ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu16_epi32(lo); r.val[1] = _mm512_cvtepu16_epi32(hi); @@ -92,8 +84,8 @@ NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) __m256i loehi = _mm256_cvtepu16_epi32(_mm256_extracti128_si256(lo, 1)); __m256i hielo = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(hi)); __m256i hiehi = _mm256_cvtepu16_epi32(_mm256_extracti128_si256(hi, 1)); - r.val[0] = _mm512_inserti64x4(_mm512_castsi256_si512(loelo), loehi, 1); - r.val[1] = _mm512_inserti64x4(_mm512_castsi256_si512(hielo), hiehi, 1); + r.val[0] = npyv512_combine_si256(loelo, loehi); + r.val[1] = npyv512_combine_si256(hielo, hiehi); #endif return r; } From d328bf3c4682dc9f478741c77ee4871bb847176b Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 10 Dec 2020 16:23:26 +0800 Subject: [PATCH 0169/1270] remove extra input --- numpy/core/src/common/simd/avx512/conversion.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index c5ec3dcd9714..9bf3115d9534 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -56,7 +56,7 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; __m256i lo = npyv512_lower_si256(data); - __m256i hi = npyv512_higher_si256(data, 1); + __m256i hi = npyv512_higher_si256(data); #ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu8_epi16(lo); r.val[1] = _mm512_cvtepu8_epi16(hi); @@ -75,7 +75,7 @@ NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; __m256i lo = npyv512_lower_si256(data); - __m256i hi = npyv512_higher_si256(data, 1); + __m256i hi = npyv512_higher_si256(data); #ifdef NPY_HAVE_AVX512BW r.val[0] = _mm512_cvtepu16_epi32(lo); r.val[1] = _mm512_cvtepu16_epi32(hi); From d06d38c10080ebdbc928e69131116e6a9d45100d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 10 Dec 2020 08:52:35 -0700 Subject: [PATCH 0170/1270] MAINT: Don't include "dirty" in versioneer generated versions. We patch the LICENSE file for both sdist and wheel releases, making them all "dirty", i.e., containing files that have not been committed. Having "dirty" in the product name is bad marketing and the versioneer tool does not have an option or style that will omit that bit of information, so patch the versioneer files to make that tag an empty string. --- numpy/_version.py | 2 +- versioneer.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_version.py b/numpy/_version.py index 73f9f5c7f4be..6605bf4afa36 100644 --- a/numpy/_version.py +++ b/numpy/_version.py @@ -236,7 +236,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty=", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) diff --git a/versioneer.py b/versioneer.py index 1040c218924c..7a77c5ef7fa1 100644 --- a/versioneer.py +++ b/versioneer.py @@ -647,7 +647,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty=", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) @@ -1046,7 +1046,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty=", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) From c6a5c9aaf5a04405246c6a6158ee1e5a8e5d3973 Mon Sep 17 00:00:00 2001 From: Rin Arakaki Date: Fri, 11 Dec 2020 02:10:09 +0900 Subject: [PATCH 0171/1270] Add/remove spaces in snippets and re-format here and there --- doc/source/user/quickstart.rst | 384 ++++++++++++++++----------------- 1 file changed, 184 insertions(+), 200 deletions(-) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 8fdc6ec3649f..4d76340385f7 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -53,8 +53,8 @@ axis has a length of 2, the second axis has a length of 3. :: - [[ 1., 0., 0.], - [ 0., 1., 2.]] + [[1., 0., 0.], + [0., 1., 2.]] NumPy's array class is called ``ndarray``. It is also known by the alias ``array``. Note that ``numpy.array`` is not the same as the Standard @@ -128,7 +128,7 @@ from the type of the elements in the sequences. :: >>> import numpy as np - >>> a = np.array([2,3,4]) + >>> a = np.array([2, 3, 4]) >>> a array([2, 3, 4]) >>> a.dtype @@ -142,11 +142,11 @@ rather than providing a single sequence as an argument. :: - >>> a = np.array(1,2,3,4) # WRONG + >>> a = np.array(1, 2, 3, 4) # WRONG Traceback (most recent call last): ... TypeError: array() takes from 1 to 2 positional arguments but 4 were given - >>> a = np.array([1,2,3,4]) # RIGHT + >>> a = np.array([1, 2, 3, 4]) # RIGHT ``array`` transforms sequences of sequences into two-dimensional arrays, sequences of sequences of sequences into three-dimensional arrays, and @@ -154,7 +154,7 @@ so on. :: - >>> b = np.array([(1.5,2,3), (4,5,6)]) + >>> b = np.array([(1.5, 2, 3), (4, 5, 6)]) >>> b array([[1.5, 2. , 3. ], [4. , 5. , 6. ]]) @@ -163,7 +163,7 @@ The type of the array can also be explicitly specified at creation time: :: - >>> c = np.array( [ [1,2], [3,4] ], dtype=complex ) + >>> c = np.array([[1, 2], [3, 4]], dtype=complex) >>> c array([[1.+0.j, 2.+0.j], [3.+0.j, 4.+0.j]]) @@ -185,7 +185,7 @@ state of the memory. By default, the dtype of the created array is array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]) - >>> np.ones( (2,3,4), dtype=np.int16 ) # dtype can also be specified + >>> np.ones((2, 3, 4), dtype=np.int16) # dtype can also be specified array([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], @@ -193,9 +193,9 @@ state of the memory. By default, the dtype of the created array is [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=int16) - >>> np.empty( (2,3) ) # uninitialized - array([[ 3.73603959e-262, 6.02658058e-154, 6.55490914e-260], # may vary - [ 5.30498948e-313, 3.14673309e-307, 1.00000000e+000]]) + >>> np.empty((2, 3)) # uninitialized, the result may vary + array([[3.73603959e-262, 6.02658058e-154, 6.55490914e-260], + [5.30498948e-313, 3.14673309e-307, 1.00000000e+000]]) To create sequences of numbers, NumPy provides the ``arange`` function which is analogous to the Python built-in ``range``, but returns an @@ -203,9 +203,9 @@ array. :: - >>> np.arange( 10, 30, 5 ) + >>> np.arange(10, 30, 5) array([10, 15, 20, 25]) - >>> np.arange( 0, 2, 0.3 ) # it accepts float arguments + >>> np.arange(0, 2, 0.3) # it accepts float arguments array([0. , 0.3, 0.6, 0.9, 1.2, 1.5, 1.8]) When ``arange`` is used with floating point arguments, it is generally @@ -215,9 +215,9 @@ to use the function ``linspace`` that receives as an argument the number of elements that we want, instead of the step:: >>> from numpy import pi - >>> np.linspace( 0, 2, 9 ) # 9 numbers from 0 to 2 + >>> np.linspace(0, 2, 9) # 9 numbers from 0 to 2 array([0. , 0.25, 0.5 , 0.75, 1. , 1.25, 1.5 , 1.75, 2. ]) - >>> x = np.linspace( 0, 2*pi, 100 ) # useful to evaluate function at lots of points + >>> x = np.linspace(0, 2 * pi, 100) # useful to evaluate function at lots of points >>> f = np.sin(x) .. seealso:: @@ -251,18 +251,18 @@ matrices and tridimensionals as lists of matrices. :: - >>> a = np.arange(6) # 1d array + >>> a = np.arange(6) # 1d array >>> print(a) [0 1 2 3 4 5] - >>> - >>> b = np.arange(12).reshape(4,3) # 2d array + >>> + >>> b = np.arange(12).reshape(4, 3) # 2d array >>> print(b) [[ 0 1 2] [ 3 4 5] [ 6 7 8] [ 9 10 11]] - >>> - >>> c = np.arange(24).reshape(2,3,4) # 3d array + >>> + >>> c = np.arange(24).reshape(2, 3, 4) # 3d array >>> print(c) [[[ 0 1 2 3] [ 4 5 6 7] @@ -280,8 +280,8 @@ central part of the array and only prints the corners:: >>> print(np.arange(10000)) [ 0 1 2 ... 9997 9998 9999] - >>> - >>> print(np.arange(10000).reshape(100,100)) + >>> + >>> print(np.arange(10000).reshape(100, 100)) [[ 0 1 2 ... 97 98 99] [ 100 101 102 ... 197 198 199] [ 200 201 202 ... 297 298 299] @@ -295,7 +295,7 @@ can change the printing options using ``set_printoptions``. :: - >>> np.set_printoptions(threshold=sys.maxsize) # sys module should be imported + >>> np.set_printoptions(threshold=sys.maxsize) # sys module should be imported .. _quickstart.basic-operations: @@ -308,35 +308,35 @@ created and filled with the result. :: - >>> a = np.array( [20,30,40,50] ) - >>> b = np.arange( 4 ) + >>> a = np.array([20, 30, 40, 50]) + >>> b = np.arange(4) >>> b array([0, 1, 2, 3]) - >>> c = a-b + >>> c = a - b >>> c array([20, 29, 38, 47]) - >>> b**2 + >>> b ** 2 array([0, 1, 4, 9]) - >>> 10*np.sin(a) + >>> 10 * np.sin(a) array([ 9.12945251, -9.88031624, 7.4511316 , -2.62374854]) - >>> a<35 + >>> a < 35 array([ True, True, False, False]) Unlike in many matrix languages, the product operator ``*`` operates elementwise in NumPy arrays. The matrix product can be performed using the ``@`` operator (in python >=3.5) or the ``dot`` function or method:: - >>> A = np.array( [[1,1], - ... [0,1]] ) - >>> B = np.array( [[2,0], - ... [3,4]] ) - >>> A * B # elementwise product + >>> A = np.array([[1, 1], + ... [0, 1]]) + >>> B = np.array([[2, 0], + ... [3, 4]]) + >>> A * B # elementwise product array([[2, 0], [0, 4]]) - >>> A @ B # matrix product + >>> A @ B # matrix product array([[5, 4], [3, 4]]) - >>> A.dot(B) # another matrix product + >>> A.dot(B) # another matrix product array([[5, 4], [3, 4]]) @@ -345,9 +345,9 @@ existing array rather than create a new one. :: - >>> rg = np.random.default_rng(1) # create instance of default random number generator - >>> a = np.ones((2,3), dtype=int) - >>> b = rg.random((2,3)) + >>> rg = np.random.default_rng(1) # create instance of default random number generator + >>> a = np.ones((2, 3), dtype=int) + >>> b = rg.random((2, 3)) >>> a *= 3 >>> a array([[3, 3, 3], @@ -356,7 +356,7 @@ existing array rather than create a new one. >>> b array([[3.51182162, 3.9504637 , 3.14415961], [3.94864945, 3.31183145, 3.42332645]]) - >>> a += b # b is not automatically converted to integer type + >>> a += b # b is not automatically converted to integer type Traceback (most recent call last): ... numpy.core._exceptions._UFuncOutputCastingError: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind' @@ -368,15 +368,15 @@ as upcasting). :: >>> a = np.ones(3, dtype=np.int32) - >>> b = np.linspace(0,pi,3) + >>> b = np.linspace(0, pi, 3) >>> b.dtype.name 'float64' - >>> c = a+b + >>> c = a + b >>> c array([1. , 2.57079633, 4.14159265]) >>> c.dtype.name 'float64' - >>> d = np.exp(c*1j) + >>> d = np.exp(c * 1j) >>> d array([ 0.54030231+0.84147098j, -0.84147098+0.54030231j, -0.54030231-0.84147098j]) @@ -388,7 +388,7 @@ the array, are implemented as methods of the ``ndarray`` class. :: - >>> a = rg.random((2,3)) + >>> a = rg.random((2, 3)) >>> a array([[0.82770259, 0.40919914, 0.54959369], [0.02755911, 0.75351311, 0.53814331]]) @@ -404,19 +404,19 @@ of numbers, regardless of its shape. However, by specifying the ``axis`` parameter you can apply an operation along the specified axis of an array:: - >>> b = np.arange(12).reshape(3,4) + >>> b = np.arange(12).reshape(3, 4) >>> b array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> - >>> b.sum(axis=0) # sum of each column + >>> b.sum(axis=0) # sum of each column array([12, 15, 18, 21]) >>> - >>> b.min(axis=1) # min of each row + >>> b.min(axis=1) # min of each row array([0, 4, 8]) >>> - >>> b.cumsum(axis=1) # cumulative sum along each row + >>> b.cumsum(axis=1) # cumulative sum along each row array([[ 0, 1, 3, 6], [ 4, 9, 15, 22], [ 8, 17, 27, 38]]) @@ -427,7 +427,7 @@ Universal Functions NumPy provides familiar mathematical functions such as sin, cos, and exp. In NumPy, these are called "universal -functions"(\ ``ufunc``). Within NumPy, these functions +functions" (\ ``ufunc``). Within NumPy, these functions operate elementwise on an array, producing an array as output. :: @@ -500,22 +500,22 @@ and other Python sequences. :: - >>> a = np.arange(10)**3 + >>> a = np.arange(10) ** 3 >>> a array([ 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]) >>> a[2] 8 >>> a[2:5] array([ 8, 27, 64]) - # equivalent to a[0:6:2] = 1000; - # from start to position 6, exclusive, set every 2nd element to 1000 + >>> # equivalent to a[0:6:2] = 1000; + >>> # from start to position 6, exclusive, set every 2nd element to 1000 >>> a[:6:2] = 1000 >>> a array([1000, 1, 1000, 27, 1000, 125, 216, 343, 512, 729]) - >>> a[ : :-1] # reversed a + >>> a[::-1] # reversed a array([ 729, 512, 343, 216, 125, 1000, 27, 1000, 1, 1000]) >>> for i in a: - ... print(i**(1/3.)) + ... print(i ** (1 / 3.)) ... 9.999999999999998 1.0 @@ -532,23 +532,23 @@ and other Python sequences. **Multidimensional** arrays can have one index per axis. These indices are given in a tuple separated by commas:: - >>> def f(x,y): - ... return 10*x+y + >>> def f(x, y): + ... return 10 * x + y ... - >>> b = np.fromfunction(f,(5,4),dtype=int) + >>> b = np.fromfunction(f, (5, 4), dtype=int) >>> b array([[ 0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]]) - >>> b[2,3] + >>> b[2, 3] 23 - >>> b[0:5, 1] # each row in the second column of b + >>> b[0:5, 1] # each row in the second column of b array([ 1, 11, 21, 31, 41]) - >>> b[ : ,1] # equivalent to the previous example + >>> b[:, 1] # equivalent to the previous example array([ 1, 11, 21, 31, 41]) - >>> b[1:3, : ] # each column in the second and third row of b + >>> b[1:3, :] # each column in the second and third row of b array([[10, 11, 12, 13], [20, 21, 22, 23]]) @@ -557,34 +557,34 @@ indices are considered complete slices\ ``:`` :: - >>> b[-1] # the last row. Equivalent to b[-1,:] + >>> b[-1] # the last row. Equivalent to b[-1, :] array([40, 41, 42, 43]) The expression within brackets in ``b[i]`` is treated as an ``i`` followed by as many instances of ``:`` as needed to represent the remaining axes. NumPy also allows you to write this using dots as -``b[i,...]``. +``b[i, ...]``. The **dots** (``...``) represent as many colons as needed to produce a complete indexing tuple. For example, if ``x`` is an array with 5 axes, then -- ``x[1,2,...]`` is equivalent to ``x[1,2,:,:,:]``, -- ``x[...,3]`` to ``x[:,:,:,:,3]`` and -- ``x[4,...,5,:]`` to ``x[4,:,:,5,:]``. +- ``x[1, 2, ...]`` is equivalent to ``x[1, 2, :, :, :]``, +- ``x[..., 3]`` to ``x[:, :, :, :, 3]`` and +- ``x[4, ..., 5, :]`` to ``x[4, :, :, 5, :]``. :: - >>> c = np.array( [[[ 0, 1, 2], # a 3D array (two stacked 2D arrays) - ... [ 10, 12, 13]], - ... [[100,101,102], - ... [110,112,113]]]) + >>> c = np.array([[[ 0, 1, 2], # a 3D array (two stacked 2D arrays) + ... [ 10, 12, 13]], + ... [[100, 101, 102], + ... [110, 112, 113]]]) >>> c.shape (2, 2, 3) - >>> c[1,...] # same as c[1,:,:] or c[1] + >>> c[1, ...] # same as c[1, :, :] or c[1] array([[100, 101, 102], [110, 112, 113]]) - >>> c[...,2] # same as c[:,:,2] + >>> c[..., 2] # same as c[:, :, 2] array([[ 2, 13], [102, 113]]) @@ -647,7 +647,7 @@ Changing the shape of an array An array has a shape given by the number of elements along each axis:: - >>> a = np.floor(10*rg.random((3,4))) + >>> a = np.floor(10 * rg.random((3, 4))) >>> a array([[3., 7., 3., 4.], [1., 4., 2., 2.], @@ -661,7 +661,7 @@ the original array:: >>> a.ravel() # returns the array, flattened array([3., 7., 3., 4., 1., 4., 2., 2., 7., 2., 4., 9.]) - >>> a.reshape(6,2) # returns the array with a modified shape + >>> a.reshape(6, 2) # returns the array with a modified shape array([[3., 7.], [3., 4.], [1., 4.], @@ -678,14 +678,14 @@ the original array:: >>> a.shape (3, 4) -The order of the elements in the array resulting from ravel() is +The order of the elements in the array resulting from ``ravel`` is normally "C-style", that is, the rightmost index "changes the fastest", -so the element after a[0,0] is a[0,1]. If the array is reshaped to some +so the element after ``a[0, 0]`` is ``a[0, 1]``. If the array is reshaped to some other shape, again the array is treated as "C-style". NumPy normally -creates arrays stored in this order, so ravel() will usually not need to +creates arrays stored in this order, so ``ravel`` will usually not need to copy its argument, but if the array was made by taking slices of another array or created with unusual options, it may need to be copied. The -functions ravel() and reshape() can also be instructed, using an +functions ``ravel`` and ``reshape`` can also be instructed, using an optional argument, to use FORTRAN-style arrays, in which the leftmost index changes the fastest. @@ -698,15 +698,15 @@ itself:: array([[3., 7., 3., 4.], [1., 4., 2., 2.], [7., 2., 4., 9.]]) - >>> a.resize((2,6)) + >>> a.resize((2, 6)) >>> a array([[3., 7., 3., 4., 1., 4.], [2., 2., 7., 2., 4., 9.]]) -If a dimension is given as -1 in a reshaping operation, the other +If a dimension is given as ``-1`` in a reshaping operation, the other dimensions are automatically calculated:: - >>> a.reshape(3,-1) + >>> a.reshape(3, -1) array([[3., 7., 3., 4.], [1., 4., 2., 2.], [7., 2., 4., 9.]]) @@ -726,45 +726,44 @@ Stacking together different arrays Several arrays can be stacked together along different axes:: - >>> a = np.floor(10*rg.random((2,2))) + >>> a = np.floor(10 * rg.random((2, 2))) >>> a array([[9., 7.], [5., 2.]]) - >>> b = np.floor(10*rg.random((2,2))) + >>> b = np.floor(10 * rg.random((2, 2))) >>> b array([[1., 9.], [5., 1.]]) - >>> np.vstack((a,b)) + >>> np.vstack((a, b)) array([[9., 7.], [5., 2.], [1., 9.], [5., 1.]]) - >>> np.hstack((a,b)) + >>> np.hstack((a, b)) array([[9., 7., 1., 9.], [5., 2., 5., 1.]]) -The function `column_stack` -stacks 1D arrays as columns into a 2D array. It is equivalent to -`hstack` only for 2D arrays:: +The function `column_stack` stacks 1D arrays as columns into a 2D array. +It is equivalent to `hstack` only for 2D arrays:: >>> from numpy import newaxis - >>> np.column_stack((a,b)) # with 2D arrays + >>> np.column_stack((a, b)) # with 2D arrays array([[9., 7., 1., 9.], [5., 2., 5., 1.]]) - >>> a = np.array([4.,2.]) - >>> b = np.array([3.,8.]) - >>> np.column_stack((a,b)) # returns a 2D array + >>> a = np.array([4., 2.]) + >>> b = np.array([3., 8.]) + >>> np.column_stack((a, b)) # returns a 2D array array([[4., 3.], [2., 8.]]) - >>> np.hstack((a,b)) # the result is different + >>> np.hstack((a, b)) # the result is different array([4., 2., 3., 8.]) - >>> a[:,newaxis] # view `a` as a 2D column vector + >>> a[:, newaxis] # view `a` as a 2D column vector array([[4.], [2.]]) - >>> np.column_stack((a[:,newaxis],b[:,newaxis])) + >>> np.column_stack((a[:, newaxis], b[:, newaxis])) array([[4., 3.], [2., 8.]]) - >>> np.hstack((a[:,newaxis],b[:,newaxis])) # the result is the same + >>> np.hstack((a[:, newaxis], b[:, newaxis])) # the result is the same array([[4., 3.], [2., 8.]]) @@ -785,12 +784,10 @@ which the concatenation should happen. **Note** -In complex cases, `r_` and -`c_` are useful for creating arrays -by stacking numbers along one axis. They allow the use of range literals -(":") :: +In complex cases, `r_` and `c_` are useful for creating arrays by stacking +numbers along one axis. They allow the use of range literals ``:``. :: - >>> np.r_[1:4,0,4] + >>> np.r_[1:4, 0, 4] array([1, 2, 3, 0, 4]) When used with arrays as arguments, @@ -818,18 +815,18 @@ array along its horizontal axis, either by specifying the number of equally shaped arrays to return, or by specifying the columns after which the division should occur:: - >>> a = np.floor(10*rg.random((2,12))) + >>> a = np.floor(10 * rg.random((2, 12))) >>> a array([[6., 7., 6., 9., 0., 5., 4., 0., 6., 8., 5., 2.], [8., 5., 5., 7., 1., 8., 6., 7., 1., 8., 1., 0.]]) - # Split a into 3 - >>> np.hsplit(a,3) + >>> # Split `a` into 3 + >>> np.hsplit(a, 3) [array([[6., 7., 6., 9.], [8., 5., 5., 7.]]), array([[0., 5., 4., 0.], [1., 8., 6., 7.]]), array([[6., 8., 5., 2.], [1., 8., 1., 0.]])] - # Split a after the third and the fourth column - >>> np.hsplit(a,(3,4)) + >>> # Split `a` after the third and the fourth column + >>> np.hsplit(a, (3, 4)) [array([[6., 7., 6.], [8., 5., 5.]]), array([[9.], [7.]]), array([[0., 5., 4., 0., 6., 8., 5., 2.], @@ -871,7 +868,7 @@ copy. >>> def f(x): ... print(id(x)) ... - >>> id(a) # id is a unique identifier of an object + >>> id(a) # id is a unique identifier of an object 148293216 # may vary >>> f(a) 148293216 # may vary @@ -887,15 +884,15 @@ creates a new array object that looks at the same data. >>> c = a.view() >>> c is a False - >>> c.base is a # c is a view of the data owned by a + >>> c.base is a # c is a view of the data owned by a True >>> c.flags.owndata False >>> - >>> c = c.reshape((2, 6)) # a's shape doesn't change + >>> c = c.reshape((2, 6)) # a's shape doesn't change >>> a.shape (3, 4) - >>> c[0, 4] = 1234 # a's data changes + >>> c[0, 4] = 1234 # a's data changes >>> a array([[ 0, 1, 2, 3], [1234, 5, 6, 7], @@ -903,8 +900,8 @@ creates a new array object that looks at the same data. Slicing an array returns a view of it:: - >>> s = a[ : , 1:3] # spaces added for clarity; could also be written "s = a[:, 1:3]" - >>> s[:] = 10 # s[:] is a view of s. Note the difference between s = 10 and s[:] = 10 + >>> s = a[:, 1:3] + >>> s[:] = 10 # s[:] is a view of s. Note the difference between s = 10 and s[:] = 10 >>> a array([[ 0, 10, 10, 3], [1234, 10, 10, 7], @@ -917,12 +914,12 @@ The ``copy`` method makes a complete copy of the array and its data. :: - >>> d = a.copy() # a new array object with new data is created + >>> d = a.copy() # a new array object with new data is created >>> d is a False - >>> d.base is a # d doesn't share anything with a + >>> d.base is a # d doesn't share anything with a False - >>> d[0,0] = 9999 + >>> d[0, 0] = 9999 >>> a array([[ 0, 10, 10, 3], [1234, 10, 10, 7], @@ -1068,13 +1065,13 @@ Indexing with Arrays of Indices :: - >>> a = np.arange(12)**2 # the first 12 square numbers - >>> i = np.array([1, 1, 3, 8, 5]) # an array of indices - >>> a[i] # the elements of a at the positions i + >>> a = np.arange(12) ** 2 # the first 12 square numbers + >>> i = np.array([1, 1, 3, 8, 5]) # an array of indices + >>> a[i] # the elements of `a` at the positions `i` array([ 1, 1, 9, 64, 25]) - >>> - >>> j = np.array([[3, 4], [9, 7]]) # a bidimensional array of indices - >>> a[j] # the same shape as j + >>> + >>> j = np.array([[3, 4], [9, 7]]) # a bidimensional array of indices + >>> a[j] # the same shape as `j` array([[ 9, 16], [81, 49]]) @@ -1090,9 +1087,9 @@ using a palette. ... [0, 255, 0], # green ... [0, 0, 255], # blue ... [255, 255, 255]]) # white - >>> image = np.array([[0, 1, 2, 0], # each value corresponds to a color in the palette + >>> image = np.array([[0, 1, 2, 0], # each value corresponds to a color in the palette ... [0, 3, 4, 0]]) - >>> palette[image] # the (2, 4, 3) color image + >>> palette[image] # the (2, 4, 3) color image array([[[ 0, 0, 0], [255, 0, 0], [ 0, 255, 0], @@ -1108,25 +1105,25 @@ indices for each dimension must have the same shape. :: - >>> a = np.arange(12).reshape(3,4) + >>> a = np.arange(12).reshape(3, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) - >>> i = np.array([[0, 1], # indices for the first dim of a + >>> i = np.array([[0, 1], # indices for the first dim of `a` ... [1, 2]]) - >>> j = np.array([[2, 1], # indices for the second dim + >>> j = np.array([[2, 1], # indices for the second dim ... [3, 3]]) - >>> - >>> a[i, j] # i and j must have equal shape + >>> + >>> a[i, j] # i and j must have equal shape array([[ 2, 5], [ 7, 11]]) - >>> + >>> >>> a[i, 2] array([[ 2, 6], [ 6, 10]]) - >>> - >>> a[:, j] # i.e., a[ : , j] + >>> + >>> a[:, j] array([[[ 2, 1], [ 3, 3]], @@ -1142,26 +1139,24 @@ put ``i`` and ``j`` in a ``tuple`` and then do the indexing with that. :: >>> l = (i, j) - # equivalent to a[i, j] + >>> # equivalent to a[i, j] >>> a[l] array([[ 2, 5], [ 7, 11]]) However, we can not do this by putting ``i`` and ``j`` into an array, because this array will be interpreted as indexing the first dimension -of a. +of ``a``. :: >>> s = np.array([i, j]) - - # not what we want + >>> # not what we want >>> a[s] Traceback (most recent call last): File "", line 1, in IndexError: index 3 is out of bounds for axis 0 with size 3 - - # same as a[i, j] + >>> # same as `a[i, j]` >>> a[tuple(s)] array([[ 2, 5], [ 7, 11]]) @@ -1169,8 +1164,8 @@ of a. Another common use of indexing with arrays is the search of the maximum value of time-dependent series:: - >>> time = np.linspace(20, 145, 5) # time scale - >>> data = np.sin(np.arange(20)).reshape(5,4) # 4 time-dependent series + >>> time = np.linspace(20, 145, 5) # time scale + >>> data = np.sin(np.arange(20)).reshape(5, 4) # 4 time-dependent series >>> time array([ 20. , 51.25, 82.5 , 113.75, 145. ]) >>> data @@ -1179,22 +1174,18 @@ value of time-dependent series:: [ 0.98935825, 0.41211849, -0.54402111, -0.99999021], [-0.53657292, 0.42016704, 0.99060736, 0.65028784], [-0.28790332, -0.96139749, -0.75098725, 0.14987721]]) - - # index of the maxima for each series + >>> # index of the maxima for each series >>> ind = data.argmax(axis=0) >>> ind array([2, 0, 3, 1]) - - # times corresponding to the maxima + >>> # times corresponding to the maxima >>> time_max = time[ind] - >>> - >>> data_max = data[ind, range(data.shape[1])] # => data[ind[0],0], data[ind[1],1]... - + >>> + >>> data_max = data[ind, range(data.shape[1])] # => data[ind[0], 0], data[ind[1], 1]... >>> time_max array([ 82.5 , 20. , 113.75, 51.25]) >>> data_max array([0.98935825, 0.84147098, 0.99060736, 0.6569866 ]) - >>> np.all(data_max == data.max(axis=0)) True @@ -1203,7 +1194,7 @@ You can also use indexing with arrays as a target to assign to:: >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) - >>> a[[1,3,4]] = 0 + >>> a[[1, 3, 4]] = 0 >>> a array([0, 0, 2, 0, 0]) @@ -1211,7 +1202,7 @@ However, when the list of indices contains repetitions, the assignment is done several times, leaving behind the last value:: >>> a = np.arange(5) - >>> a[[0,0,2]]=[1,2,3] + >>> a[[0, 0, 2]] = [1, 2, 3] >>> a array([2, 1, 3, 3, 4]) @@ -1219,13 +1210,13 @@ This is reasonable enough, but watch out if you want to use Python's ``+=`` construct, as it may not do what you expect:: >>> a = np.arange(5) - >>> a[[0,0,2]]+=1 + >>> a[[0, 0, 2]] += 1 >>> a array([1, 1, 3, 3, 4]) Even though 0 occurs twice in the list of indices, the 0th element is -only incremented once. This is because Python requires "a+=1" to be -equivalent to "a = a + 1". +only incremented once. This is because Python requires ``a += 1`` to be +equivalent to ``a = a + 1``. Indexing with Boolean Arrays ---------------------------- @@ -1238,18 +1229,18 @@ which ones we don't. The most natural way one can think of for boolean indexing is to use boolean arrays that have *the same shape* as the original array:: - >>> a = np.arange(12).reshape(3,4) + >>> a = np.arange(12).reshape(3, 4) >>> b = a > 4 - >>> b # b is a boolean with a's shape + >>> b # `b` is a boolean with `a`'s shape array([[False, False, False, False], [False, True, True, True], [ True, True, True, True]]) - >>> a[b] # 1d array with the selected elements + >>> a[b] # 1d array with the selected elements array([ 5, 6, 7, 8, 9, 10, 11]) This property can be very useful in assignments:: - >>> a[b] = 0 # All elements of 'a' higher than 4 become 0 + >>> a[b] = 0 # All elements of `a` higher than 4 become 0 >>> a array([[0, 1, 2, 3], [4, 0, 0, 0], @@ -1264,45 +1255,45 @@ set `__: >>> import numpy as np >>> import matplotlib.pyplot as plt - >>> def mandelbrot( h,w, maxit=20 ): + >>> def mandelbrot(h, w, maxit=20): ... """Returns an image of the Mandelbrot fractal of size (h,w).""" - ... y,x = np.ogrid[ -1.4:1.4:h*1j, -2:0.8:w*1j ] - ... c = x+y*1j + ... y, x = np.ogrid[-1.4:1.4:h*1j, -2:0.8:w*1j] + ... c = x + y * 1j ... z = c ... divtime = maxit + np.zeros(z.shape, dtype=int) ... ... for i in range(maxit): - ... z = z**2 + c - ... diverge = z*np.conj(z) > 2**2 # who is diverging - ... div_now = diverge & (divtime==maxit) # who is diverging now - ... divtime[div_now] = i # note when - ... z[diverge] = 2 # avoid diverging too much + ... z = z ** 2 + c + ... diverge = z * np.conj(z) > 2 ** 2 # who is diverging + ... div_now = diverge & (divtime == maxit) # who is diverging now + ... divtime[div_now] = i # note when + ... z[diverge] = 2 # avoid diverging too much ... ... return divtime - >>> plt.imshow(mandelbrot(400,400)) + >>> plt.imshow(mandelbrot(400, 400)) The second way of indexing with booleans is more similar to integer indexing; for each dimension of the array we give a 1D boolean array selecting the slices we want:: - >>> a = np.arange(12).reshape(3,4) - >>> b1 = np.array([False,True,True]) # first dim selection - >>> b2 = np.array([True,False,True,False]) # second dim selection - >>> - >>> a[b1,:] # selecting rows + >>> a = np.arange(12).reshape(3, 4) + >>> b1 = np.array([False, True, True]) # first dim selection + >>> b2 = np.array([True, False, True, False]) # second dim selection + >>> + >>> a[b1, :] # selecting rows array([[ 4, 5, 6, 7], [ 8, 9, 10, 11]]) - >>> - >>> a[b1] # same thing + >>> + >>> a[b1] # same thing array([[ 4, 5, 6, 7], [ 8, 9, 10, 11]]) - >>> - >>> a[:,b2] # selecting columns + >>> + >>> a[:, b2] # selecting columns array([[ 0, 2], [ 4, 6], [ 8, 10]]) - >>> - >>> a[b1,b2] # a weird thing to do + >>> + >>> a[b1, b2] # a weird thing to do array([ 4, 10]) Note that the length of the 1D boolean array must coincide with the @@ -1319,10 +1310,10 @@ obtain the result for each n-uplet. For example, if you want to compute all the a+b\*c for all the triplets taken from each of the vectors a, b and c:: - >>> a = np.array([2,3,4,5]) - >>> b = np.array([8,5,4]) - >>> c = np.array([5,4,6,8,3]) - >>> ax,bx,cx = np.ix_(a,b,c) + >>> a = np.array([2, 3, 4, 5]) + >>> b = np.array([8, 5, 4]) + >>> c = np.array([5, 4, 6, 8, 3]) + >>> ax, bx, cx = np.ix_(a, b, c) >>> ax array([[[2]], @@ -1339,7 +1330,7 @@ and c:: array([[[5, 4, 6, 8, 3]]]) >>> ax.shape, bx.shape, cx.shape ((4, 1, 1), (1, 3, 1), (1, 1, 5)) - >>> result = ax+bx*cx + >>> result = ax + bx * cx >>> result array([[[42, 34, 50, 66, 26], [27, 22, 32, 42, 17], @@ -1356,9 +1347,9 @@ and c:: [[45, 37, 53, 69, 29], [30, 25, 35, 45, 20], [25, 21, 29, 37, 17]]]) - >>> result[3,2,4] + >>> result[3, 2, 4] 17 - >>> a[3]+b[2]*c[4] + >>> a[3] + b[2] * c[4] 17 You could also implement the reduce as follows:: @@ -1367,12 +1358,12 @@ You could also implement the reduce as follows:: ... vs = np.ix_(*vectors) ... r = ufct.identity ... for v in vs: - ... r = ufct(r,v) + ... r = ufct(r, v) ... return r and then use it as:: - >>> ufunc_reduce(np.add,a,b,c) + >>> ufunc_reduce(np.add, a, b, c) array([[[15, 14, 16, 18, 13], [12, 11, 13, 15, 10], [11, 10, 12, 14, 9]], @@ -1417,33 +1408,26 @@ See linalg.py in numpy folder for more. >>> print(a) [[1. 2.] [3. 4.]] - >>> a.transpose() array([[1., 3.], [2., 4.]]) - >>> np.linalg.inv(a) array([[-2. , 1. ], [ 1.5, -0.5]]) - - >>> u = np.eye(2) # unit 2x2 matrix; "eye" represents "I" + >>> u = np.eye(2) # unit 2x2 matrix; "eye" represents "I" >>> u array([[1., 0.], [0., 1.]]) >>> j = np.array([[0.0, -1.0], [1.0, 0.0]]) - - >>> j @ j # matrix product + >>> j @ j # matrix product array([[-1., 0.], [ 0., -1.]]) - >>> np.trace(u) # trace 2.0 - >>> y = np.array([[5.], [7.]]) >>> np.linalg.solve(a, y) array([[-3.], [ 4.]]) - >>> np.linalg.eig(j) (array([0.+1.j, 0.-1.j]), array([[0.70710678+0.j , 0.70710678-0.j ], [0. -0.70710678j, 0. +0.70710678j]])) @@ -1455,7 +1439,7 @@ See linalg.py in numpy folder for more. Returns The eigenvalues, each repeated according to its multiplicity. The normalized (unit "length") eigenvectors, such that the - column ``v[:,i]`` is the eigenvector corresponding to the + column ``v[:, i]`` is the eigenvector corresponding to the eigenvalue ``w[i]`` . Tricks and Tips @@ -1496,13 +1480,13 @@ functions ``column_stack``, ``dstack``, ``hstack`` and ``vstack``, depending on the dimension in which the stacking is to be done. For example:: - >>> x = np.arange(0,10,2) + >>> x = np.arange(0, 10, 2) >>> y = np.arange(5) - >>> m = np.vstack([x,y]) + >>> m = np.vstack([x, y]) >>> m array([[0, 2, 4, 6, 8], [0, 1, 2, 3, 4]]) - >>> xy = np.hstack([x,y]) + >>> xy = np.hstack([x, y]) >>> xy array([0, 2, 4, 6, 8, 0, 1, 2, 3, 4]) @@ -1530,12 +1514,12 @@ that ``pylab.hist`` plots the histogram automatically, while >>> import matplotlib.pyplot as plt >>> # Build a vector of 10000 normal deviates with variance 0.5^2 and mean 2 >>> mu, sigma = 2, 0.5 - >>> v = rg.normal(mu,sigma,10000) + >>> v = rg.normal(mu, sigma, 10000) >>> # Plot a normalized histogram with 50 bins >>> plt.hist(v, bins=50, density=1) # matplotlib version (plot) >>> # Compute the histogram with numpy and then plot it >>> (n, bins) = np.histogram(v, bins=50, density=True) # NumPy version (no plot) - >>> plt.plot(.5*(bins[1:]+bins[:-1]), n) + >>> plt.plot(.5 * (bins[1:] + bins[:-1]), n) Further reading From fed2509471e77e0357c5252e69fc6159881f6cb2 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 10 Dec 2020 11:58:47 -0700 Subject: [PATCH 0172/1270] BUG: Make sdist use distutils. If sdist uses setuptools there can be problems, see gh-7127. --- setup.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 4335e417a64f..5e8290a7a91a 100755 --- a/setup.py +++ b/setup.py @@ -60,8 +60,10 @@ os.remove('MANIFEST') # We need to import setuptools here in order for it to persist in sys.modules. -# Its presence/absence is used in subclassing setup in numpy/distutils/core.py, -# which may not be the most robust design. +# Its presence/absence is used in subclassing setup in numpy/distutils/core.py. +# However, we need to run the distutils version of sdist, so import that first +# so that it is in sys.modules +import numpy.distutils.command.sdist import setuptools # Initialize cmdclass from versioneer @@ -158,13 +160,14 @@ def __exit__(self, exception_type, exception_value, traceback): f.write(self.bsd_text) -sdist_class = cmdclass['sdist'] -class sdist_checked(sdist_class): +# Need to inherit from versioneer version of sdist to get the encoded +# version information. +class sdist_checked(cmdclass['sdist']): """ check submodules on sdist to prevent incomplete tarballs """ def run(self): check_submodules() with concat_license_files(): - sdist_class.run(self) + super().run() def get_build_overrides(): From 8caabdf36c63098bc5743306df55e2c45b5808e3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 9 Dec 2020 16:14:06 -0600 Subject: [PATCH 0173/1270] DEP: Futurewarn on requiring __len__ on array-likes This fixes issue gh-17965. The slightly annoying thing is that there is no simple way to opt-in to the new behaviour and the old behaviour is a bit quirky to begin with (honoring the dtype, but not the shape). --- doc/source/release/1.20.0-notes.rst | 49 +++++++++++- numpy/core/src/multiarray/array_coercion.c | 47 ++++++++++++ numpy/core/tests/test_array_coercion.py | 16 +++- numpy/core/tests/test_deprecations.py | 86 ++++++++++++++++++++++ 4 files changed, 192 insertions(+), 6 deletions(-) diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index 9f46a3e806a2..e26aa0d40579 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -184,6 +184,43 @@ Use ``next(it)`` instead of ``it.ndincr()``. (`gh-17233 `__) +ArrayLike objects which do not define ``__len__`` and ``__getitem__`` +--------------------------------------------------------------------- +Objects which define one of the protocols ``__array__``, +``__array_interface__``, or ``__array_struct__`` but are not sequences +(usually defined by having a ``__len__`` and ``__getitem__``) will behave +differently during array-coercion in the future. + +When nested inside sequences, such as ``np.array([array_like])``, these +were handled as a single Python object rather than an array. +In the future they will behave identically to:: + + np.array([np.array(array_like)]) + +This change should only have an effect if ``np.array(array_like)`` is not 0-D. +The solution to this warning may depend on the object: + +* Some array-likes may expect the new behaviour, and users can ignore the + warning. The object can choose to expose the sequence protocol to opt-in + to the new behaviour. +* For example, ``shapely`` will allow conversion to an array-like using + ``line.coords`` rather than ``np.asarray(line)``. Users may work around + the warning, or use the new convention when it becomes available. + +Unfortunately, using the new behaviour can only be achieved by +calling ``np.array(array_like)``. + +If you wish to ensure that the old behaviour remains unchanged, please create +an object array and then fill it explicitly, for example:: + + arr = np.empty(3, dtype=object) + arr[:] = [array_like1, array_like2, array_like3] + +This will ensure NumPy knows to not enter the array-like and use it as +a object instead. + +(`gh-17973 `__) + Future Changes ============== @@ -349,9 +386,15 @@ Things will now be more consistent with:: np.array([np.array(array_like1)]) -This could potentially subtly change output for badly defined array-likes. -We are not aware of any such case where the results were not clearly -incorrect previously. +This can subtly change output for some badly defined array-likes. +One example for this are array-like objects which are not also sequences +of matching shape. +In NumPy 1.20, a warning will be given when an array-like is not also a +sequence (but behaviour remains identical, see deprecations). +If an array like is also a sequence (defines ``__getitem__`` and ``__len__``) +NumPy will now only use the result given by ``__array__``, +``__array_interface__``, or ``__array_struct__``. This will result in +differences when the (nested) sequence describes a different shape. (`gh-16200 `__) diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 53d891049e23..78b7c99c5747 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -922,6 +922,53 @@ PyArray_DiscoverDTypeAndShape_Recursive( Py_DECREF(arr); arr = NULL; } + else if (curr_dims > 0 && curr_dims != max_dims) { + /* + * Deprecated 2020-12-09, NumPy 1.20 + * + * See https://github.com/numpy/numpy/issues/17965 + * Shapely had objects which are not sequences but did export + * the array-interface (and so are arguably array-like). + * Previously numpy would not use array-like information during + * shape discovery, so that it ended up acting as if this was + * an (unknown) scalar but with the specified dtype. + * Thus we ignore "scalars" here, as the value stored in the + * array should be acceptable. + */ + if (PyArray_NDIM(arr) > 0 && NPY_UNLIKELY(!PySequence_Check(obj))) { + if (PyErr_WarnFormat(PyExc_FutureWarning, 1, + "The input object of type '%s' is an array-like " + "implementing one of the corresponding protocols " + "(`__array__`, `__array_interface__` or " + "`__array_struct__`); but not a sequence (or 0-D). " + "In the future, this object will be coerced as if it " + "was first converted using `np.array(obj)`. " + "To retain the old behaviour, you have to either " + "modify the type '%s', or assign to an empty array " + "created with `np.empty(correct_shape, dtype=object)`.", + Py_TYPE(obj)->tp_name, Py_TYPE(obj)->tp_name) < 0) { + Py_DECREF(arr); + return -1; + } + /* + * Strangely enough, even though we threw away the result here, + * we did use it during descriptor discovery, so promote it: + */ + if (update_shape(curr_dims, &max_dims, out_shape, + 0, NULL, NPY_FALSE, flags) < 0) { + *flags |= FOUND_RAGGED_ARRAY; + Py_DECREF(arr); + return max_dims; + } + if (!(*flags & DESCRIPTOR_WAS_SET) && handle_promotion( + out_descr, PyArray_DESCR(arr), fixed_DType, flags) < 0) { + Py_DECREF(arr); + return -1; + } + Py_DECREF(arr); + return max_dims; + } + } } if (arr != NULL) { /* diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py index 78def936022d..4c284d9b5b0f 100644 --- a/numpy/core/tests/test_array_coercion.py +++ b/numpy/core/tests/test_array_coercion.py @@ -38,8 +38,18 @@ def subclass(a): yield subclass + class _SequenceLike(): + # We are giving a warning that array-like's were also expected to be + # sequence-like in `np.array([array_like])`, this can be removed + # when the deprecation exired (started NumPy 1.20) + def __len__(self): + raise TypeError + + def __getitem__(self): + raise TypeError + # Array-interface - class ArrayDunder: + class ArrayDunder(_SequenceLike): def __init__(self, a): self.a = a @@ -52,7 +62,7 @@ def __array__(self, dtype=None): yield param(memoryview, id="memoryview") # Array-interface - class ArrayInterface: + class ArrayInterface(_SequenceLike): def __init__(self, a): self.a = a # need to hold on to keep interface valid self.__array_interface__ = a.__array_interface__ @@ -60,7 +70,7 @@ def __init__(self, a): yield param(ArrayInterface, id="__array_interface__") # Array-Struct - class ArrayStruct: + class ArrayStruct(_SequenceLike): def __init__(self, a): self.a = a # need to hold on to keep struct valid self.__array_struct__ = a.__array_struct__ diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index a67fe62c3d9b..ed238da9f121 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -773,6 +773,92 @@ def check(): self.assert_deprecated(check) +class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase): + # Deprecated 2020-12-09, NumPy 1.20 + warning_cls = FutureWarning + message = "The input object of type.*but not a sequence" + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_deprecated(self, protocol): + """Test that these objects give a warning since they are not 0-D, + not coerced at the top level `np.array(obj)`, but nested, and do + *not* define the sequence protocol. + + NOTE: Tests for the versions including __len__ and __getitem__ exist + in `test_array_coercion.py` and they can be modified or ammended + when this deprecation expired. + """ + blueprint = np.arange(10) + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)}) + self.assert_deprecated(lambda: np.array([MyArr()], dtype=object)) + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_0d_not_deprecated(self, protocol): + # 0-D always worked (albeit it would use __float__ or similar for the + # conversion, which may not happen anymore) + blueprint = np.array(1.) + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)}) + myarr = MyArr() + + self.assert_not_deprecated(lambda: np.array([myarr], dtype=object)) + res = np.array([myarr], dtype=object) + expected = np.empty(1, dtype=object) + expected[0] = myarr + assert_array_equal(res, expected) + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_unnested_not_deprecated(self, protocol): + blueprint = np.arange(10) + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)}) + myarr = MyArr() + + self.assert_not_deprecated(lambda: np.array(myarr)) + res = np.array(myarr) + assert_array_equal(res, blueprint) + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_strange_dtype_handling(self, protocol): + """The old code would actually use the dtype from the array, but + then end up not using the array (for dimension discovery) + """ + blueprint = np.arange(10).astype("f4") + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol), + "__float__": lambda _: 0.5}) + myarr = MyArr() + + # Make sure we warn (and capture the FutureWarning) + with pytest.warns(FutureWarning, match=self.message): + res = np.array([[myarr]]) + + assert res.shape == (1, 1) + assert res.dtype == "f4" + assert res[0, 0] == 0.5 + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_assignment_not_deprecated(self, protocol): + # If the result is dtype=object we do not unpack a nested array or + # array-like, if it is nested at exactly the right depth. + # NOTE: We actually do still call __array__, etc. but ignore the result + # in the end. For `dtype=object` we could optimize that away. + blueprint = np.arange(10).astype("f4") + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol), + "__float__": lambda _: 0.5}) + myarr = MyArr() + + res = np.empty(3, dtype=object) + def set(): + res[:] = [myarr, myarr, myarr] + self.assert_not_deprecated(set) + assert res[0] is myarr + assert res[1] is myarr + assert res[2] is myarr + + class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase): # Deprecated 2020-11-24, NumPy 1.20 """ From 421b54c851826e1362e24f6528d0223656eeb3b7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 10 Dec 2020 15:38:26 -0700 Subject: [PATCH 0174/1270] MAINT: Update test_requirements and release_requirements. Install the correct versions of wheel and towncrier. --- release_requirements.txt | 2 +- test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/release_requirements.txt b/release_requirements.txt index c24e39c7849d..805ce9a8afe1 100644 --- a/release_requirements.txt +++ b/release_requirements.txt @@ -14,4 +14,4 @@ twine # building and notes Paver -towncrier +git+https://github.com/hawkowl/towncrier.git@master diff --git a/test_requirements.txt b/test_requirements.txt index 1b7b696d378e..5166ada16d6d 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,5 +1,5 @@ cython==0.29.21 -wheel +wheel<=0.35.1 setuptools<49.2.0 hypothesis==5.41.5 pytest==6.0.2 From 7bd6de3d0947c5078c9524739dc08bb5eac49c1e Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 10 Dec 2020 23:53:57 +0000 Subject: [PATCH 0175/1270] MAINT, SIMD: reduce the number of preprocessor CPU runtime dispatcher calls --- numpy/core/code_generators/generate_umath.py | 38 ++++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index cb1147b93fba..fd0051ee9dc4 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -48,8 +48,9 @@ class TypeDescription: simd: list Available SIMD ufunc loops, dispatched at runtime in specified order Currently only supported for simples types (see make_arrays) - dispatch: list - Available SIMD ufunc loops, dispatched at runtime in specified order + dispatch: str or None, optional + Dispatch-able source name without its extension '.dispatch.c' that contains the definition of ufunc, + dispatched at runtime depending on the specified targets of the dispatch-able source. Currently only supported for simples types (see make_arrays) """ def __init__(self, type, f=None, in_=None, out=None, astype=None, simd=None, dispatch=None): @@ -119,11 +120,12 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None, dispatch=None) simdt = [k for k, v in simd if t in v] else: simdt = [] + # [(dispatch file name without extension '.dispatch.c*', list of types)] if dispatch: - dispt = [k for k, v in dispatch if t in v] + dispt = ([k for k, v in dispatch if t in v]+[None])[0] else: - dispt = [] + dispt = None tds.append(TypeDescription( t, f=fd, in_=i, out=o, astype=astype, simd=simdt, dispatch=dispt )) @@ -1000,6 +1002,7 @@ def make_arrays(funcdict): # later code1list = [] code2list = [] + dispdict = {} names = sorted(funcdict.keys()) for name in names: uf = funcdict[name] @@ -1010,6 +1013,7 @@ def make_arrays(funcdict): sub = 0 for t in uf.type_descriptions: + if t.func_data is FullTypeDescr: tname = english_upper(chartoname[t.type]) datalist.append('(void *)NULL') @@ -1023,7 +1027,8 @@ def make_arrays(funcdict): elif t.func_data is None: datalist.append('(void *)NULL') tname = english_upper(chartoname[t.type]) - funclist.append('%s_%s' % (tname, name)) + cfunc_name = f"{tname}_{name}" + funclist.append(cfunc_name) if t.simd is not None: for vt in t.simd: code2list.append(textwrap.dedent("""\ @@ -1036,16 +1041,8 @@ def make_arrays(funcdict): ISA=vt.upper(), isa=vt, fname=name, type=tname, idx=k )) - if t.dispatch is not None: - for dname in t.dispatch: - code2list.append(textwrap.dedent("""\ - #ifndef NPY_DISABLE_OPTIMIZATION - #include "{dname}.dispatch.h" - #endif - NPY_CPU_DISPATCH_CALL_XB({name}_functions[{k}] = {tname}_{name}); - """).format( - dname=dname, name=name, tname=tname, k=k - )) + if t.dispatch: + dispdict.setdefault(t.dispatch, []).append((tname, k, cfunc_name)) else: funclist.append('NULL') try: @@ -1091,6 +1088,17 @@ def make_arrays(funcdict): % (name, datanames)) code1list.append("static char %s_signatures[] = {%s};" % (name, signames)) + + for dname, funcs in dispdict.items(): + code2list.append(textwrap.dedent(f""" + #ifndef NPY_DISABLE_OPTIMIZATION + #include "{dname}.dispatch.h" + #endif + """)) + for (ufunc_name, func_idx, cfunc_name) in funcs: + code2list.append(textwrap.dedent(f"""\ + NPY_CPU_DISPATCH_CALL_XB({ufunc_name}_functions[{func_idx}] = {cfunc_name}); + """)) return "\n".join(code1list), "\n".join(code2list) def make_ufuncs(funcdict): From f7fde77403acbebcab26118799d23237f137bccc Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 11 Dec 2020 09:10:03 +0800 Subject: [PATCH 0176/1270] fix slow test bug --- numpy/core/src/common/simd/vsx/conversion.h | 1 + numpy/core/src/multiarray/item_selection.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index d3ae7a51439c..7df16a2a8faa 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -45,6 +45,7 @@ NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) npyv_u16 zero = npyv_zero_u16(); r.val[0] = (npyv_u32)vec_mergel(data, zero); r.val[1] = (npyv_u32)vec_mergeh(data, zero); + return r; } // convert boolean vector to integer bitfield diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 19ecaf4630fa..dfee03414b55 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2145,7 +2145,7 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) vt = npyv_and_u8(vt, vone); vsum8 = npyv_add_u8(vsum8, vt); *d += npyv_nlanes_u8; - n++; + n+= npyv_nlanes_u8; } return vsum8; } From d38a749c232bfa91ac76ab7403a08719f63cc38a Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 11 Dec 2020 10:01:50 +0800 Subject: [PATCH 0177/1270] Thanks for the unit test, expose the VSX expand error. --- numpy/core/src/common/simd/vsx/conversion.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 7df16a2a8faa..320672700ae6 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -34,8 +34,8 @@ NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { npyv_u16x2 r; npyv_u8 zero = npyv_zero_u8(); - r.val[0] = (npyv_u16)vec_mergel(data, zero); - r.val[1] = (npyv_u16)vec_mergeh(data, zero); + r.val[0] = (npyv_u16)vec_mergeh(data, zero); + r.val[1] = (npyv_u16)vec_mergel(data, zero); return r; } @@ -43,8 +43,8 @@ NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { npyv_u32x2 r; npyv_u16 zero = npyv_zero_u16(); - r.val[0] = (npyv_u32)vec_mergel(data, zero); - r.val[1] = (npyv_u32)vec_mergeh(data, zero); + r.val[0] = (npyv_u32)vec_mergeh(data, zero); + r.val[1] = (npyv_u32)vec_mergel(data, zero); return r; } From 711482a83beb2c7b92c2249ca992c8d8cc97edd8 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 11 Dec 2020 14:36:24 +0800 Subject: [PATCH 0178/1270] Force AVX2 in order to test the performance by using new intrinsics. --- numpy/core/src/common/simd/simd.h | 6 ++- numpy/core/src/multiarray/compiled_base.c | 59 +++++++++++++++-------- 2 files changed, 44 insertions(+), 21 deletions(-) diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index f28326d943d1..7705a48ceee7 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -10,7 +10,11 @@ #include "numpy/npy_common.h" #include "npy_cpu_dispatch.h" #include "simd_utils.h" -#define NPY_HAVE_AVX2 +#ifndef NPY_HAVE_AVX2 + #include + #define NPY_HAVE_AVX + #define NPY_HAVE_AVX2 +#endif #ifdef __cplusplus extern "C" { #endif diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index ad1c8140b154..7979368c5fc7 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -14,6 +14,12 @@ #include "ctors.h" #include "common.h" #include "simd/simd.h" + +typedef enum { + PACK_ORDER_LITTLE = 0, + PACK_ORDER_BIG +} PACK_ORDER; + /* * Returns -1 if the array is monotonic decreasing, * +1 if the array is monotonic increasing, @@ -1469,7 +1475,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) * byte array. Truth values are determined as usual: 0 is false, everything * else is true. */ -static NPY_INLINE void +static NPY_GCC_OPT_3 NPY_INLINE void pack_inner(const char *inptr, npy_intp element_size, /* in bytes */ npy_intp n_in, @@ -1495,28 +1501,41 @@ pack_inner(const char *inptr, /* don't handle non-full 8-byte remainder */ npy_intp vn_out = n_out - (remain ? 1 : 0); const int vstep = npyv_nlanes_u64; + const int vstepx4 = vstep * 4; vn_out -= (vn_out & (vstep - 1)); - for (index = 0; index < vn_out; index += vstep) { - // Maximum parallel ability: handle eight 64-bit integers at one time - npy_uint64 a[8]; - for (int i = 0; i < vstep; i++) { - a[i] = *(npy_uint64*)(inptr + 8 * i); + for (; index <= vn_out - vstepx4; index += vstepx4, inptr += npyv_nlanes_u8 * 4) { + npyv_u8 v0 = npyv_load_u8((const npy_uint8*)inptr); + npyv_u8 v1 = npyv_load_u8((const npy_uint8*)inptr + npyv_nlanes_u8 * 1); + npyv_u8 v2 = npyv_load_u8((const npy_uint8*)inptr + npyv_nlanes_u8 * 2); + npyv_u8 v3 = npyv_load_u8((const npy_uint8*)inptr + npyv_nlanes_u8 * 3); + if (order == PACK_ORDER_BIG) { + v0 = npyv_rev64_u8(v0); + v1 = npyv_rev64_u8(v1); + v2 = npyv_rev64_u8(v2); + v3 = npyv_rev64_u8(v3); } - if (order == 'b') { - for (int i = 0; i < vstep; i++) { - a[i] = npy_bswap8(a[i]); + npy_uint64 bb[4]; + bb[0] = npyv_tobits_b8(npyv_cmpneq_u8(v0, v_zero)); + bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); + bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); + bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); + for(int i = 0; i < 4; i++) { + for (int j = 0; j < vstep; j++) { + memcpy(outptr, (char*)&bb[i] + j, 1); + outptr += out_stride; } } - npyv_u8 v = npyv_reinterpret_u8_u64(npyv_set_u64(a[0], a[1], a[2], a[3], - a[4], a[5], a[6], a[7])); - npyv_b8 bmask = npyv_cmpneq_u8(v, v_zero); - npy_uint64 r = npyv_movemask_b8(bmask); - /* store result */ - for (int i = 0; i < vstep; i++) { - memcpy(outptr, (char*)&r + i, 1); + } + for (; index < vn_out; index += vstep, inptr += npyv_nlanes_u8) { + npyv_u8 va = npyv_load_u8((const npy_uint8*)inptr); + if (order == PACK_ORDER_BIG) { + va = npyv_rev64_u8(va); + } + npy_uint64 bb = npyv_movemask_b8(npyv_cmpneq_u8(va, v_zero)); + for (int i = 0; i < vstep; ++i) { + memcpy(outptr, (char*)&bb + i, 1); outptr += out_stride; } - inptr += 8 * vstep; } } #endif @@ -1528,7 +1547,7 @@ pack_inner(const char *inptr, for (; index < n_out; index++) { unsigned char build = 0; int maxi = (index == n_out - 1) ? remain : 8; - if (order == 'b') { + if (order == PACK_ORDER_BIG) { for (int i = 0; i < maxi; i++) { build <<= 1; for (npy_intp j = 0; j < element_size; j++) { @@ -1638,13 +1657,13 @@ pack_bits(PyObject *input, int axis, char order) Py_XDECREF(ot); goto fail; } - + const PACK_ORDER ordere = order == 'b' ? PACK_ORDER_BIG : PACK_ORDER_LITTLE; NPY_BEGIN_THREADS_THRESHOLDED(PyArray_DIM(out, axis)); while (PyArray_ITER_NOTDONE(it)) { pack_inner(PyArray_ITER_DATA(it), PyArray_ITEMSIZE(new), PyArray_DIM(new, axis), PyArray_STRIDE(new, axis), PyArray_ITER_DATA(ot), PyArray_DIM(out, axis), - PyArray_STRIDE(out, axis), order); + PyArray_STRIDE(out, axis), ordere); PyArray_ITER_NEXT(it); PyArray_ITER_NEXT(ot); } From 95cd7065c99ff5f44f47b1d2ac5c8db4ad54989f Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 11 Dec 2020 14:51:45 +0800 Subject: [PATCH 0179/1270] remove useless intrinsics and forced SIMD. --- numpy/core/src/common/simd/avx2/operators.h | 3 --- numpy/core/src/common/simd/avx512/operators.h | 14 -------------- numpy/core/src/common/simd/neon/operators.h | 9 --------- numpy/core/src/common/simd/simd.h | 6 +----- numpy/core/src/common/simd/sse/operators.h | 3 --- numpy/core/src/common/simd/vsx/operators.h | 7 ------- numpy/core/src/multiarray/compiled_base.c | 2 +- 7 files changed, 2 insertions(+), 42 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/operators.h b/numpy/core/src/common/simd/avx2/operators.h index 74517c3f4e7b..c1d30413fe5e 100644 --- a/numpy/core/src/common/simd/avx2/operators.h +++ b/numpy/core/src/common/simd/avx2/operators.h @@ -197,7 +197,4 @@ NPY_FINLINE __m256i npyv_cmpge_u32(__m256i a, __m256i b) #define npyv_cmpge_f32(A, B) _mm256_castps_si256(_mm256_cmp_ps(A, B, _CMP_GE_OQ)) #define npyv_cmpge_f64(A, B) _mm256_castpd_si256(_mm256_cmp_pd(A, B, _CMP_GE_OQ)) -// Create mask from the most significant bit of each 8-bit element -#define npyv_movemask_b8(a) _mm256_movemask_epi8(a) - #endif // _NPY_SIMD_AVX2_OPERATORS_H diff --git a/numpy/core/src/common/simd/avx512/operators.h b/numpy/core/src/common/simd/avx512/operators.h index 05978a6cf3b5..f76ea5e2d6c5 100644 --- a/numpy/core/src/common/simd/avx512/operators.h +++ b/numpy/core/src/common/simd/avx512/operators.h @@ -256,18 +256,4 @@ #define npyv_cmpge_f32(A, B) _mm512_cmp_ps_mask(A, B, _CMP_GE_OQ) #define npyv_cmpge_f64(A, B) _mm512_cmp_pd_mask(A, B, _CMP_GE_OQ) -// Create mask from the most significant bit of each 8-bit element -// AVX512F & AVX512BW -NPY_FINLINE npy_uint64 npyv_movemask_b8(npyv_b8 mask) -{ -#ifdef NPY_HAVE_AVX512BW_MASK - return (npy_uint64)_cvtmask64_u64(mask); -#elif NPY_HAVE_AVX512BW - return (npy_uint64)mask; -#else - int mask_lo = _mm256_movemask_epi8(_mm512_castsi512_si256(mask)); - int mask_hi = _mm256_movemask_epi8(_mm512_extracti64x4_epi64(mask, 1)); - return (unsigned)mask_lo | ((npy_uint64)(unsigned)mask_hi << 32); -#endif -} #endif // _NPY_SIMD_AVX512_OPERATORS_H diff --git a/numpy/core/src/common/simd/neon/operators.h b/numpy/core/src/common/simd/neon/operators.h index 61d100225888..c1ad4ba12a6f 100644 --- a/numpy/core/src/common/simd/neon/operators.h +++ b/numpy/core/src/common/simd/neon/operators.h @@ -215,13 +215,4 @@ #define npyv_cmple_f32(A, B) npyv_cmpge_f32(B, A) #define npyv_cmple_f64(A, B) npyv_cmpge_f64(B, A) -// Create mask from the most significant bit of each 8-bit element -NPY_INLINE int32_t npyv_movemask_b8(uint8x16_t input) -{ - int8x8_t m0 = vcreate_s8(0x0706050403020100ULL); - uint8x16_t v0 = vshlq_u8(vshrq_n_u8(input, 7), vcombine_s8(m0, m0)); - uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0))); - return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8); -} - #endif // _NPY_SIMD_NEON_OPERATORS_H diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 7705a48ceee7..8804223c9fef 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -10,11 +10,7 @@ #include "numpy/npy_common.h" #include "npy_cpu_dispatch.h" #include "simd_utils.h" -#ifndef NPY_HAVE_AVX2 - #include - #define NPY_HAVE_AVX - #define NPY_HAVE_AVX2 -#endif + #ifdef __cplusplus extern "C" { #endif diff --git a/numpy/core/src/common/simd/sse/operators.h b/numpy/core/src/common/simd/sse/operators.h index 96835a3dfbaa..6e32ca4fd9b3 100644 --- a/numpy/core/src/common/simd/sse/operators.h +++ b/numpy/core/src/common/simd/sse/operators.h @@ -255,7 +255,4 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_cmpge_f32(a, b) _mm_castps_si128(_mm_cmpge_ps(a, b)) #define npyv_cmpge_f64(a, b) _mm_castpd_si128(_mm_cmpge_pd(a, b)) -// Create mask from the most significant bit of each 8-bit element -#define npyv_movemask_b8(a) _mm_movemask_epi8(a) - #endif // _NPY_SIMD_SSE_OPERATORS_H diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h index b99eef08ebf9..ca020d9e0e8c 100644 --- a/numpy/core/src/common/simd/vsx/operators.h +++ b/numpy/core/src/common/simd/vsx/operators.h @@ -213,11 +213,4 @@ NPY_FINLINE npyv_f64 npyv_not_f64(npyv_f64 a) #define npyv_cmple_f32(A, B) npyv_cmpge_f32(B, A) #define npyv_cmple_f64(A, B) npyv_cmpge_f64(B, A) -NPY_FINLINE npy_uint64 npyv_movemask_b8(npyv_b8 mask) -{ - const npyv_u8 bperm = {120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0}; - npyv_s32 vmask = (npyv_s32)vec_vbpermq((npyv_u8)mask, bperm); - return (npy_uint64)vec_extract(vmask, 2); -} - #endif // _NPY_SIMD_VSX_OPERATORS_H diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 7979368c5fc7..1aa7eceeec71 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1531,7 +1531,7 @@ pack_inner(const char *inptr, if (order == PACK_ORDER_BIG) { va = npyv_rev64_u8(va); } - npy_uint64 bb = npyv_movemask_b8(npyv_cmpneq_u8(va, v_zero)); + npy_uint64 bb = npyv_tobits_b8(npyv_cmpneq_u8(va, v_zero)); for (int i = 0; i < vstep; ++i) { memcpy(outptr, (char*)&bb + i, 1); outptr += out_stride; From 0ece23b05376c1069be59b65a6f2ae919f4dd839 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 11 Dec 2020 15:02:20 +0800 Subject: [PATCH 0180/1270] remove extra headers. --- numpy/core/src/common/simd/avx2/avx2.h | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/core/src/common/simd/avx2/avx2.h b/numpy/core/src/common/simd/avx2/avx2.h index 9370ca6c95f4..c30c2233d4cf 100644 --- a/numpy/core/src/common/simd/avx2/avx2.h +++ b/numpy/core/src/common/simd/avx2/avx2.h @@ -1,7 +1,6 @@ #ifndef _NPY_SIMD_H_ #error "Not a standalone header" #endif -#include #define NPY_SIMD 256 #define NPY_SIMD_WIDTH 32 #define NPY_SIMD_F64 1 From 1237be83356e20f568f28e54bc0099f5acd3e2db Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 11 Dec 2020 13:22:53 +0100 Subject: [PATCH 0181/1270] ENH: Add dtype-support for the `__array__` protocol --- numpy/__init__.pyi | 16 +++++++++++++--- numpy/typing/_array_like.py | 21 +++++++++++++-------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 83afd2e49808..ac21c990706e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -971,8 +971,11 @@ class flatiter(Generic[_ArraySelf]): @overload def __getitem__( self, key: Union[_ArrayLikeInt, slice, ellipsis], - ) -> _ArraySelf: ... - def __array__(self, __dtype: DTypeLike = ...) -> ndarray: ... + ) -> _NdArraySubClass: ... + @overload + def __array__(self: flatiter[ndarray[Any, _DType]], __dtype: None = ...) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... _OrderKACF = Optional[Literal["K", "A", "C", "F"]] _OrderACF = Optional[Literal["A", "C", "F"]] @@ -1004,7 +1007,6 @@ class _ArrayOrScalarCommon: def itemsize(self) -> int: ... @property def nbytes(self) -> int: ... - def __array__(self, __dtype: DTypeLike = ...) -> ndarray: ... def __bool__(self) -> bool: ... def __bytes__(self) -> bytes: ... def __str__(self) -> str: ... @@ -1468,6 +1470,10 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): strides: _ShapeLike = ..., order: _OrderKACF = ..., ) -> _ArraySelf: ... + @overload + def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... @property def ctypes(self) -> _ctypes: ... @property @@ -1646,6 +1652,10 @@ _NBit_co2 = TypeVar("_NBit_co2", covariant=True, bound=NBitBase) class generic(_ArrayOrScalarCommon): @abstractmethod def __init__(self, *args: Any, **kwargs: Any) -> None: ... + @overload + def __array__(self: _ScalarType, __dtype: None = ...) -> ndarray[Any, dtype[_ScalarType]]: ... + @overload + def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... @property def base(self) -> None: ... @property diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index a1a604239390..4ea6974b2941 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import sys -from typing import Any, overload, Sequence, TYPE_CHECKING, Union +from typing import Any, overload, Sequence, TYPE_CHECKING, Union, TypeVar -from numpy import ndarray +from numpy import ndarray, dtype from ._scalars import _ScalarLike from ._dtype_like import DTypeLike @@ -16,12 +18,15 @@ else: HAVE_PROTOCOL = True +_DType = TypeVar("_DType", bound="dtype[Any]") + if TYPE_CHECKING or HAVE_PROTOCOL: - class _SupportsArray(Protocol): - @overload - def __array__(self, __dtype: DTypeLike = ...) -> ndarray: ... - @overload - def __array__(self, dtype: DTypeLike = ...) -> ndarray: ... + # The `_SupportsArray` protocol only cares about the default dtype + # (i.e. `dtype=None`) ofthe to-be returned array. + # Concrete implementations of the protocol are responsible for adding + # any and all remaining overloads + class _SupportsArray(Protocol[_DType]): + def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... else: _SupportsArray = Any @@ -36,5 +41,5 @@ def __array__(self, dtype: DTypeLike = ...) -> ndarray: ... _ScalarLike, Sequence[_ScalarLike], Sequence[Sequence[Any]], # TODO: Wait for support for recursive types - _SupportsArray, + "_SupportsArray[Any]", ] From 9d36f09cae3b09e61105e9222e0b56c0b1546a0c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 11 Dec 2020 13:22:46 +0100 Subject: [PATCH 0182/1270] ENH: Add dtype-support for `np.flatiter` --- numpy/__init__.pyi | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ac21c990706e..79c53adc7af0 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -955,19 +955,22 @@ _ArrayLikeInt = Union[ _FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter) -class flatiter(Generic[_ArraySelf]): +class flatiter(Generic[_NdArraySubClass]): @property - def base(self) -> _ArraySelf: ... + def base(self) -> _NdArraySubClass: ... @property def coords(self) -> _Shape: ... @property def index(self) -> int: ... - def copy(self) -> _ArraySelf: ... + def copy(self) -> _NdArraySubClass: ... def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... - def __next__(self) -> generic: ... + def __next__(self: flatiter[ndarray[Any, dtype[_ScalarType]]]) -> _ScalarType: ... def __len__(self) -> int: ... @overload - def __getitem__(self, key: Union[int, integer]) -> generic: ... + def __getitem__( + self: flatiter[ndarray[Any, dtype[_ScalarType]]], + key: Union[int, integer], + ) -> _ScalarType: ... @overload def __getitem__( self, key: Union[_ArrayLikeInt, slice, ellipsis], @@ -1487,7 +1490,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ... def fill(self, value: Any) -> None: ... @property - def flat(self: _ArraySelf) -> flatiter[_ArraySelf]: ... + def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... @overload def item(self, *args: int) -> Any: ... @overload @@ -1668,7 +1671,7 @@ class generic(_ArrayOrScalarCommon): def strides(self) -> Tuple[()]: ... def byteswap(self: _ScalarType, inplace: Literal[False] = ...) -> _ScalarType: ... @property - def flat(self) -> flatiter[ndarray]: ... + def flat(self: _ScalarType) -> flatiter[ndarray[Any, dtype[_ScalarType]]]: ... def item( self: _ScalarType, __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., From 347c40a8d677fb8d5c4bf71382bebcb81e4e1cec Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 11 Dec 2020 13:35:42 +0100 Subject: [PATCH 0183/1270] TST: Updated the `np.flatiter` typing tests --- numpy/typing/tests/data/pass/flatiter.py | 2 ++ numpy/typing/tests/data/reveal/flatiter.py | 21 ++++++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index c0219eb2beda..4fdf2529962b 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -12,3 +12,5 @@ a[[0, 1, 2]] a[...] a[:] +a.__array__() +a.__array__(np.float64) diff --git a/numpy/typing/tests/data/reveal/flatiter.py b/numpy/typing/tests/data/reveal/flatiter.py index 56cdc7a0ea74..221101ebb9f8 100644 --- a/numpy/typing/tests/data/reveal/flatiter.py +++ b/numpy/typing/tests/data/reveal/flatiter.py @@ -1,14 +1,17 @@ +from typing import Any import numpy as np -a: "np.flatiter[np.ndarray]" +a: np.flatiter[np.ndarray[Any, np.dtype[np.str_]]] -reveal_type(a.base) # E: numpy.ndarray* -reveal_type(a.copy()) # E: numpy.ndarray* +reveal_type(a.base) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a.copy()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] reveal_type(a.coords) # E: tuple[builtins.int] reveal_type(a.index) # E: int -reveal_type(iter(a)) # E: Iterator[numpy.generic*] -reveal_type(next(a)) # E: numpy.generic -reveal_type(a[0]) # E: numpy.generic -reveal_type(a[[0, 1, 2]]) # E: numpy.ndarray* -reveal_type(a[...]) # E: numpy.ndarray* -reveal_type(a[:]) # E: numpy.ndarray* +reveal_type(iter(a)) # E: Iterator[numpy.str_] +reveal_type(next(a)) # E: numpy.str_ +reveal_type(a[0]) # E: numpy.str_ +reveal_type(a[[0, 1, 2]]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a[...]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a[:]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a.__array__()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a.__array__(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[Any]] From 597357517617c3a3c3c2d5385d3da2d4433c567f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 11 Dec 2020 13:36:32 +0100 Subject: [PATCH 0184/1270] TST: Updated the `__array__` typing tests --- numpy/typing/tests/data/fail/array_like.py | 4 ++-- numpy/typing/tests/data/pass/array_like.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/typing/tests/data/fail/array_like.py b/numpy/typing/tests/data/fail/array_like.py index a97e72dc73a9..3bbd2906150f 100644 --- a/numpy/typing/tests/data/fail/array_like.py +++ b/numpy/typing/tests/data/fail/array_like.py @@ -11,6 +11,6 @@ class A: x3: ArrayLike = {1: "foo", 2: "bar"} # E: Incompatible types in assignment scalar = np.int64(1) -scalar.__array__(dtype=np.float64) # E: Unexpected keyword argument +scalar.__array__(dtype=np.float64) # E: No overload variant array = np.array([1]) -array.__array__(dtype=np.float64) # E: Unexpected keyword argument +array.__array__(dtype=np.float64) # E: No overload variant diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index f85724267448..563fc08c7ffb 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -25,13 +25,13 @@ def __array__(self, dtype: DTypeLike = None) -> np.ndarray: x13: ArrayLike = A() scalar: _SupportsArray = np.int64(1) -scalar.__array__(np.float64) +scalar.__array__(None) array: _SupportsArray = np.array(1) -array.__array__(np.float64) +array.__array__(None) a: _SupportsArray = A() -a.__array__(np.int64) -a.__array__(dtype=np.int64) +a.__array__(None) +a.__array__(dtype=None) # Escape hatch for when you mean to make something like an object # array. From 52ecd5a97ce3ff4dad72935b8bf67ba2cfb6908e Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 4 Dec 2020 23:03:23 +0000 Subject: [PATCH 0185/1270] BUG: Enforce high >= low on uniform number generators Check that high is weakly larger than low and raise if now closes #17905 --- numpy/random/_generator.pyx | 15 ++++++++------- numpy/random/mtrand.pyx | 15 ++++++++------- numpy/random/tests/test_generator_mt19937.py | 6 ++++++ numpy/random/tests/test_random.py | 6 ++++++ 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 7ffa367751aa..cd951526bfe0 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -859,7 +859,8 @@ cdef class Generator: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than high. The default value is 1.0. + less than high. The default value is 1.0. high - low must be + non-negative. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -914,7 +915,7 @@ cdef class Generator: """ cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange - cdef double _low, _high, range + cdef double _low, _high, rng cdef object temp alow = np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED) @@ -923,13 +924,13 @@ cdef class Generator: if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0: _low = PyFloat_AsDouble(low) _high = PyFloat_AsDouble(high) - range = _high - _low - if not np.isfinite(range): - raise OverflowError('Range exceeds valid bounds') + rng = _high - _low + if not np.isfinite(rng): + raise OverflowError('high - low range exceeds valid bounds') return cont(&random_uniform, &self._bitgen, size, self.lock, 2, _low, '', CONS_NONE, - range, '', CONS_NONE, + rng, 'high - low', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, None) @@ -943,7 +944,7 @@ cdef class Generator: raise OverflowError('Range exceeds valid bounds') return cont(&random_uniform, &self._bitgen, size, self.lock, 2, alow, '', CONS_NONE, - arange, '', CONS_NONE, + arange, 'high - low', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, None) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index d43e7f5aa6d9..7f4fe1c3c89d 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -1026,7 +1026,8 @@ cdef class RandomState: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than or equal to high. The default value is 1.0. + less than or equal to high. The default value is 1.0. high - low must be + non-negative. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -1095,7 +1096,7 @@ cdef class RandomState: """ cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange - cdef double _low, _high, range + cdef double _low, _high, rng cdef object temp alow = np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED) @@ -1104,13 +1105,13 @@ cdef class RandomState: if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0: _low = PyFloat_AsDouble(low) _high = PyFloat_AsDouble(high) - range = _high - _low - if not np.isfinite(range): - raise OverflowError('Range exceeds valid bounds') + rng = _high - _low + if not np.isfinite(rng): + raise OverflowError('High - low range exceeds valid bounds') return cont(&random_uniform, &self._bitgen, size, self.lock, 2, _low, '', CONS_NONE, - range, '', CONS_NONE, + rng, 'high - low', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, None) @@ -1123,7 +1124,7 @@ cdef class RandomState: raise OverflowError('Range exceeds valid bounds') return cont(&random_uniform, &self._bitgen, size, self.lock, 2, alow, '', CONS_NONE, - arange, '', CONS_NONE, + arange, 'high - low', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, None) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index b69cd38d4a4a..4b534fcece75 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1666,6 +1666,12 @@ def test_uniform_range_bounds(self): # DBL_MAX by increasing fmin a bit random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + def test_uniform_neg_range(self): + func = random.uniform + assert_raises(ValueError, func, 2, 1) + assert_raises(ValueError, func, [1, 2], [1, 1]) + assert_raises(ValueError, func, [[0, 1],[2, 3]], 2) + def test_scalar_exception_propagation(self): # Tests that exceptions are correctly propagated in distributions # when called with objects that throw exceptions when converted to diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index c13fc39e3339..473ca08e405d 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -916,6 +916,12 @@ def test_uniform_range_bounds(self): # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > # DBL_MAX by increasing fmin a bit np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_uniform_neg_range(self): + func = np.random.uniform + assert_raises(ValueError, func, 2, 1) + assert_raises(ValueError, func, [1, 2], [1, 1]) + assert_raises(ValueError, func, [[0, 1],[2, 3]], 2) def test_scalar_exception_propagation(self): # Tests that exceptions are correctly propagated in distributions From a3bb19df580454a6b98c34e29a00c271c2e411af Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Sat, 5 Dec 2020 21:58:09 +0000 Subject: [PATCH 0186/1270] Revert changes to mtrand This doesn't qualify for fixing under the NEP. --- .../upcoming_changes/17921.compatibility.rst | 6 ++++++ numpy/random/_generator.pyx | 8 ++------ numpy/random/mtrand.pyx | 15 +++++++-------- numpy/random/tests/test_generator_mt19937.py | 9 +++++++++ numpy/random/tests/test_random.py | 6 ------ 5 files changed, 24 insertions(+), 20 deletions(-) create mode 100644 doc/release/upcoming_changes/17921.compatibility.rst diff --git a/doc/release/upcoming_changes/17921.compatibility.rst b/doc/release/upcoming_changes/17921.compatibility.rst new file mode 100644 index 000000000000..a1e2fb2d0408 --- /dev/null +++ b/doc/release/upcoming_changes/17921.compatibility.rst @@ -0,0 +1,6 @@ +Validate input values in ``Generator.uniform`` +---------------------------------------------- +Checked that ``high - low >= 0`` in ``np.random.Generator.uniform``. Raises +``ValueError`` if ``low > high``. Previously out-of-order inputs were accepted +and silently swapped, so that if ``low > high``, the value generated was +``high + (low - high) * random()``. diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index cd951526bfe0..e00bc4d989d5 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -859,8 +859,8 @@ cdef class Generator: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than high. The default value is 1.0. high - low must be - non-negative. + less than high. high - low must be non-negative. The default value + is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -886,10 +886,6 @@ cdef class Generator: anywhere within the interval ``[a, b)``, and zero elsewhere. When ``high`` == ``low``, values of ``low`` will be returned. - If ``high`` < ``low``, the results are officially undefined - and may eventually raise an error, i.e. do not rely on this - function to behave when passed arguments satisfying that - inequality condition. Examples -------- diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 7f4fe1c3c89d..d43e7f5aa6d9 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -1026,8 +1026,7 @@ cdef class RandomState: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than or equal to high. The default value is 1.0. high - low must be - non-negative. + less than or equal to high. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -1096,7 +1095,7 @@ cdef class RandomState: """ cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange - cdef double _low, _high, rng + cdef double _low, _high, range cdef object temp alow = np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED) @@ -1105,13 +1104,13 @@ cdef class RandomState: if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0: _low = PyFloat_AsDouble(low) _high = PyFloat_AsDouble(high) - rng = _high - _low - if not np.isfinite(rng): - raise OverflowError('High - low range exceeds valid bounds') + range = _high - _low + if not np.isfinite(range): + raise OverflowError('Range exceeds valid bounds') return cont(&random_uniform, &self._bitgen, size, self.lock, 2, _low, '', CONS_NONE, - rng, 'high - low', CONS_NON_NEGATIVE, + range, '', CONS_NONE, 0.0, '', CONS_NONE, None) @@ -1124,7 +1123,7 @@ cdef class RandomState: raise OverflowError('Range exceeds valid bounds') return cont(&random_uniform, &self._bitgen, size, self.lock, 2, alow, '', CONS_NONE, - arange, 'high - low', CONS_NON_NEGATIVE, + arange, '', CONS_NONE, 0.0, '', CONS_NONE, None) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 4b534fcece75..c4fb5883c925 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1666,6 +1666,15 @@ def test_uniform_range_bounds(self): # DBL_MAX by increasing fmin a bit random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + def test_uniform_zero_range(self): + func = random.uniform + result = func(1.5, 1.5) + assert_allclose(result, 1.5) + result = func([0.0, np.pi], [0.0, np.pi]) + assert_allclose(result, [0.0, np.pi]) + result = func([[2145.12], [2145.12]], [2145.12, 2145.12]) + assert_allclose(result, 2145.12 + np.zeros((2, 2))) + def test_uniform_neg_range(self): func = random.uniform assert_raises(ValueError, func, 2, 1) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 473ca08e405d..c13fc39e3339 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -916,12 +916,6 @@ def test_uniform_range_bounds(self): # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > # DBL_MAX by increasing fmin a bit np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) - - def test_uniform_neg_range(self): - func = np.random.uniform - assert_raises(ValueError, func, 2, 1) - assert_raises(ValueError, func, [1, 2], [1, 1]) - assert_raises(ValueError, func, [[0, 1],[2, 3]], 2) def test_scalar_exception_propagation(self): # Tests that exceptions are correctly propagated in distributions From 6139ed42af271d37234320e83a41000d93bdeae1 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 11 Dec 2020 17:33:29 +0100 Subject: [PATCH 0187/1270] STY: Fixed a typo: `ofthe` -> `of the` Addresses https://github.com/numpy/numpy/pull/17981#discussion_r541018879 Co-Authored-By: Charles Harris --- numpy/typing/_array_like.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 4ea6974b2941..63b67b33ab22 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -22,7 +22,7 @@ if TYPE_CHECKING or HAVE_PROTOCOL: # The `_SupportsArray` protocol only cares about the default dtype - # (i.e. `dtype=None`) ofthe to-be returned array. + # (i.e. `dtype=None`) of the to-be returned array. # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads class _SupportsArray(Protocol[_DType]): From d084917e8f884f43ee172117fe516244ebe728b5 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 11 Dec 2020 19:38:05 +0000 Subject: [PATCH 0188/1270] ENH: add new option 'cfunc_alias' to umath generator --- numpy/core/code_generators/generate_umath.py | 39 ++++++++++++-------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index fd0051ee9dc4..14b5488762a4 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -45,15 +45,19 @@ class TypeDescription: astype : dict or None, optional If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y instead of PyUFunc_x_x/PyUFunc_xx_x. + cfunc_alias : str or none, optional + replaces the suffix of C function name instead of using ufunc_name, + e.g. "FLOAT_{cfunc_alias}" instead of "FLOAT_{ufunc_name}" (see make_arrays) + NOTE: it doesn't support 'astype' simd: list Available SIMD ufunc loops, dispatched at runtime in specified order Currently only supported for simples types (see make_arrays) dispatch: str or None, optional Dispatch-able source name without its extension '.dispatch.c' that contains the definition of ufunc, dispatched at runtime depending on the specified targets of the dispatch-able source. - Currently only supported for simples types (see make_arrays) + NOTE: it doesn't support 'astype' """ - def __init__(self, type, f=None, in_=None, out=None, astype=None, simd=None, dispatch=None): + def __init__(self, type, f=None, in_=None, out=None, astype=None, cfunc_alias=None, simd=None, dispatch=None): self.type = type self.func_data = f if astype is None: @@ -65,6 +69,7 @@ def __init__(self, type, f=None, in_=None, out=None, astype=None, simd=None, dis if out is not None: out = out.replace('P', type) self.out = out + self.cfunc_alias = cfunc_alias self.simd = simd self.dispatch = dispatch @@ -91,7 +96,7 @@ def build_func_data(types, f): func_data = [_fdata_map.get(t, '%s') % (f,) for t in types] return func_data -def TD(types, f=None, astype=None, in_=None, out=None, simd=None, dispatch=None): +def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None, simd=None, dispatch=None): if f is not None: if isinstance(f, str): func_data = build_func_data(types, f) @@ -127,7 +132,7 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None, dispatch=None) else: dispt = None tds.append(TypeDescription( - t, f=fd, in_=i, out=o, astype=astype, simd=simdt, dispatch=dispt + t, f=fd, in_=i, out=o, astype=astype, cfunc_alias=cfunc_alias, simd=simdt, dispatch=dispt )) return tds @@ -1013,38 +1018,33 @@ def make_arrays(funcdict): sub = 0 for t in uf.type_descriptions: - + cfunc_alias = t.cfunc_alias if t.cfunc_alias else name + cfunc_fname = None if t.func_data is FullTypeDescr: tname = english_upper(chartoname[t.type]) datalist.append('(void *)NULL') - funclist.append( - '%s_%s_%s_%s' % (tname, t.in_, t.out, name)) + cfunc_fname = f"{tname}_{t.in_}_{t.out}_{cfunc_alias}" elif isinstance(t.func_data, FuncNameSuffix): datalist.append('(void *)NULL') tname = english_upper(chartoname[t.type]) - funclist.append( - '%s_%s_%s' % (tname, name, t.func_data.suffix)) + cfunc_fname = f"{tname}_{cfunc_alias}_{t.func_data.suffix}" elif t.func_data is None: datalist.append('(void *)NULL') tname = english_upper(chartoname[t.type]) - cfunc_name = f"{tname}_{name}" - funclist.append(cfunc_name) + cfunc_fname = f"{tname}_{cfunc_alias}" if t.simd is not None: for vt in t.simd: code2list.append(textwrap.dedent("""\ #ifdef HAVE_ATTRIBUTE_TARGET_{ISA} if (NPY_CPU_HAVE({ISA})) {{ - {fname}_functions[{idx}] = {type}_{fname}_{isa}; + {fname}_functions[{idx}] = {cname}_{isa}; }} #endif """).format( ISA=vt.upper(), isa=vt, - fname=name, type=tname, idx=k + fname=name, cname=cfunc_fname, idx=k )) - if t.dispatch: - dispdict.setdefault(t.dispatch, []).append((tname, k, cfunc_name)) else: - funclist.append('NULL') try: thedict = arity_lookup[uf.nin, uf.nout] except KeyError as e: @@ -1074,6 +1074,13 @@ def make_arrays(funcdict): #datalist.append('(void *)%s' % t.func_data) sub += 1 + if cfunc_fname: + funclist.append(cfunc_fname) + if t.dispatch: + dispdict.setdefault(t.dispatch, []).append((name, k, cfunc_fname)) + else: + funclist.append('NULL') + for x in t.in_ + t.out: siglist.append('NPY_%s' % (english_upper(chartoname[x]),)) From ca11e4e2ebde67743c3ce09ccd265ffd855dbf4f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 12 Dec 2020 08:17:01 -0700 Subject: [PATCH 0189/1270] BUG: Replace f-string in root setup.py --- setup.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 5e8290a7a91a..e54328e064af 100755 --- a/setup.py +++ b/setup.py @@ -47,11 +47,11 @@ # The first version not in the `Programming Language :: Python :: ...` classifiers above if sys.version_info >= (3, 10): + fmt = "NumPy {} may not yet support Python {}.{}." warnings.warn( - f"NumPy {VERSION} may not yet support Python " - f"{sys.version_info.major}.{sys.version_info.minor}.", - RuntimeWarning, - ) + fmt.format(VERSION, *sys.version_info[:2]), + RuntimeWarning) + del fmt # BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be # properly updated when the contents of directories change (true for distutils, From d2c04e72630b167987368c32d33460baf91e4cdc Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sun, 13 Dec 2020 11:17:41 -0800 Subject: [PATCH 0190/1270] [DOC] Replace verbatim to reference to local parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rest of the docstring and other function tend to have this convention and sphinx – as well as other tools – will be able to infer this actually refers to one of the function parameters. --- numpy/core/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index 8a1fee99b9d6..e940ac230537 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -371,7 +371,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, 1.00000000e+00+0.00000000e+00j]) - Graphical illustration of ``endpoint`` parameter: + Graphical illustration of `endpoint` parameter: >>> import matplotlib.pyplot as plt >>> N = 10 From 8e956b4f613d1a7332f0a07f53afe0855ca59f83 Mon Sep 17 00:00:00 2001 From: Ryan C Cooper Date: Sun, 13 Dec 2020 14:38:52 -0500 Subject: [PATCH 0191/1270] DOC: Rename basics to fundamentals + added description (#17889) --- doc/source/user/basics.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst index e0fc0ece36ce..66f3f9ee9988 100644 --- a/doc/source/user/basics.rst +++ b/doc/source/user/basics.rst @@ -1,14 +1,18 @@ -************ -NumPy basics -************ +****************** +NumPy fundamentals +****************** + +These documents clarify concepts, design decisions, and technical +constraints in NumPy. This is a great place to understand the +fundamental NumPy ideas and philosophy. .. toctree:: :maxdepth: 1 - basics.types basics.creation - basics.io basics.indexing + basics.io + basics.types basics.broadcasting basics.byteswapping basics.rec From fe582b6e6ef21706ee00fcdad5b97c4430a4616d Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sun, 13 Dec 2020 16:51:40 -0800 Subject: [PATCH 0192/1270] [DOC] np.kron use double backticks for non-refs Some part of the docstring were between simple backticks which are therefore marked as cross-reference, while I belive the intended role is likely verbatim. --- numpy/lib/shape_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index cbc4641d83d3..f0596444ea97 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -1088,8 +1088,8 @@ def kron(a, b): ----- The function assumes that the number of dimensions of `a` and `b` are the same, if necessary prepending the smallest with ones. - If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, - the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. + If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. The elements are products of elements from `a` and `b`, organized explicitly by:: From c32f60e38376e438e1d357d03c5699f4fe9c6649 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 14 Dec 2020 10:16:58 +0800 Subject: [PATCH 0193/1270] Optimize the performance of einsum's submodule dot --- numpy/core/src/common/simd/simd.h | 6 +- .../core/src/multiarray/einsum_sumprod.c.src | 184 +++++------------- 2 files changed, 49 insertions(+), 141 deletions(-) diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 8804223c9fef..7705a48ceee7 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -10,7 +10,11 @@ #include "numpy/npy_common.h" #include "npy_cpu_dispatch.h" #include "simd_utils.h" - +#ifndef NPY_HAVE_AVX2 + #include + #define NPY_HAVE_AVX + #define NPY_HAVE_AVX2 +#endif #ifdef __cplusplus extern "C" { #endif diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index caba0e00ad29..2ef0ab13b973 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -597,156 +597,60 @@ static void @type@ *data1 = (@type@ *)dataptr[1]; @temptype@ accum = 0; -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, accum_sse = _mm_setzero_pd(); -#endif - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_contig_outstride0_two (%d)\n", (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - accum += @from@(data0[@i@]) * @from@(data1[@i@]); -/**end repeat2**/ - case 0: - *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum); - return; - } - -#if EINSUM_USE_SSE1 && @float32@ +#if @NPYV_CHK@ // NPYV check for @type@ /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); + const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1); + const int vstep = npyv_nlanes_@sfx@; + npyv_@sfx@ vaccum = npyv_zero_@sfx@(); -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. + /**begin repeat2 + * #cond = if(is_aligned), else# + * #ld = loada, load# + * #st = storea, store# + */ + @cond@ { + const npy_intp vstepx4 = vstep * 4; + for (; count >= vstepx4; count -= vstepx4, data0 += vstepx4, data1 += vstepx4) { + /**begin repeat3 + * #i = 0, 1, 2, 3# */ - a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@)); - accum_sse = _mm_add_ps(accum_sse, a); -/**end repeat2**/ - data0 += 8; - data1 += 8; + npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data0 + vstep * @i@); + npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data1 + vstep * @i@); + /**end repeat3**/ + npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, vaccum); + npyv_@sfx@ ab2 = npyv_muladd_@sfx@(a2, b2, ab3); + npyv_@sfx@ ab1 = npyv_muladd_@sfx@(a1, b1, ab2); + vaccum = npyv_muladd_@sfx@(a0, b0, ab1); } - - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; } -#elif EINSUM_USE_SSE2 && @float64@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@)); - accum_sse = _mm_add_pd(accum_sse, a); -/**end repeat2**/ - data0 += 8; - data1 += 8; - } - - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + /**end repeat2**/ + for (; count > 0; count -= vstep, data0 += vstep, data1 += vstep) { + npyv_@sfx@ a = npyv_load_tillz_@sfx@(data0, count); + npyv_@sfx@ b = npyv_load_tillz_@sfx@(data1, count); + vaccum = npyv_muladd_@sfx@(a, b, vaccum); } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@)); - accum_sse = _mm_add_ps(accum_sse, a); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@)); - accum_sse = _mm_add_pd(accum_sse, a); -/**end repeat2**/ + accum = npyv_sum_@sfx@(vaccum); + npyv_cleanup(); #else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - accum += @from@(data0[@i@]) * @from@(data1[@i@]); -/**end repeat2**/ -#endif - data0 += 8; - data1 += 8; +#ifndef NPY_DISABLE_OPTIMIZATION + for (; count >= 4; count -= 4, data0 += 4, data1 += 4) { + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + const @type@ ab@i@ = @from@(data0[@i@]) * @from@(data1[@i@]); + /**end repeat2**/ + accum += ab0 + ab1 + ab2 + ab3; } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#elif EINSUM_USE_SSE2 && @float64@ - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; +#endif // !NPY_DISABLE_OPTIMIZATION + for (; count > 0; --count, ++data0, ++data1) { + const @type@ a = @from@(*data0); + const @type@ b = @from@(*data1); + accum += a * b; + } +#endif // NPYV check for @type@ + *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum); } static void From 0985a73ffa4090b862829b92bf9df09bb2783efc Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 11 Dec 2020 19:45:12 +0000 Subject: [PATCH 0194/1270] ENH, SIMD: Ditching the old CPU dispatcher(Arithmetic) The first patch in a series of pull-requests aims to facilitate the migration process to our new SIMD interface(NPYV). It is basically a process that focuses on getting rid of the main umath SIMD source `simd.inc`, which contains almost all SIMD kernels, by splitting it into several dispatch-able sources without changing the base code, which facilitates the review process in order to speed up access to the nominal target. In this patch, we have moved the arithmetic operations of real and complex for single/double precision to the new CPU dispatcher. NOTE: previously, the SIMD code of AVX2 and AVX512F for single/double precision wasn't dispatched in runtime before. --- .gitignore | 2 + numpy/core/code_generators/generate_umath.py | 31 +- numpy/core/setup.py | 2 + numpy/core/src/umath/fast_loop_macros.h | 112 +++ numpy/core/src/umath/loops.c.src | 258 +----- numpy/core/src/umath/loops.h.src | 53 +- .../src/umath/loops_arithm_fp.dispatch.c.src | 777 ++++++++++++++++++ numpy/core/src/umath/loops_utils.h | 42 - numpy/core/src/umath/loops_utils.h.src | 213 +++++ numpy/core/src/umath/simd.inc.src | 669 +-------------- 10 files changed, 1192 insertions(+), 967 deletions(-) create mode 100644 numpy/core/src/umath/loops_arithm_fp.dispatch.c.src delete mode 100644 numpy/core/src/umath/loops_utils.h create mode 100644 numpy/core/src/umath/loops_utils.h.src diff --git a/.gitignore b/.gitignore index f85c577647c4..18317b315027 100644 --- a/.gitignore +++ b/.gitignore @@ -172,6 +172,7 @@ numpy/core/src/umath/simd.inc numpy/core/src/umath/struct_ufunc_test.c numpy/core/src/umath/test_rational.c numpy/core/src/umath/umath_tests.c +numpy/core/src/umath/loops_utils.h numpy/distutils/__config__.py numpy/linalg/umath_linalg.c doc/source/**/generated/ @@ -218,3 +219,4 @@ numpy/core/src/_simd/_simd_data.inc numpy/core/src/_simd/_simd_inc.h # umath module numpy/core/src/umath/loops_unary_fp.dispatch.c +numpy/core/src/umath/loops_arithm_fp.dispatch.c diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 14b5488762a4..cdd2a84941fa 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -46,18 +46,19 @@ class TypeDescription: If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y instead of PyUFunc_x_x/PyUFunc_xx_x. cfunc_alias : str or none, optional - replaces the suffix of C function name instead of using ufunc_name, - e.g. "FLOAT_{cfunc_alias}" instead of "FLOAT_{ufunc_name}" (see make_arrays) + appended to inner loop C function name, e.g. FLOAT_{cfunc_alias} (see make_arrays) NOTE: it doesn't support 'astype' simd: list Available SIMD ufunc loops, dispatched at runtime in specified order Currently only supported for simples types (see make_arrays) dispatch: str or None, optional - Dispatch-able source name without its extension '.dispatch.c' that contains the definition of ufunc, - dispatched at runtime depending on the specified targets of the dispatch-able source. + Dispatch-able source name without its extension '.dispatch.c' that + contains the definition of ufunc, dispatched at runtime depending on the + specified targets of the dispatch-able source. NOTE: it doesn't support 'astype' """ - def __init__(self, type, f=None, in_=None, out=None, astype=None, cfunc_alias=None, simd=None, dispatch=None): + def __init__(self, type, f=None, in_=None, out=None, astype=None, cfunc_alias=None, + simd=None, dispatch=None): self.type = type self.func_data = f if astype is None: @@ -96,7 +97,8 @@ def build_func_data(types, f): func_data = [_fdata_map.get(t, '%s') % (f,) for t in types] return func_data -def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None, simd=None, dispatch=None): +def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None, + simd=None, dispatch=None): if f is not None: if isinstance(f, str): func_data = build_func_data(types, f) @@ -132,7 +134,8 @@ def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None, simd=No else: dispt = None tds.append(TypeDescription( - t, f=fd, in_=i, out=o, astype=astype, cfunc_alias=cfunc_alias, simd=simdt, dispatch=dispt + t, f=fd, in_=i, out=o, astype=astype, cfunc_alias=cfunc_alias, + simd=simdt, dispatch=dispt )) return tds @@ -287,7 +290,7 @@ def english_upper(s): Ufunc(2, 1, Zero, docstrings.get('numpy.core.umath.add'), 'PyUFunc_AdditionTypeResolver', - TD(notimes_or_obj, simd=[('avx512f', cmplxvec),('avx2', ints)]), + TD(notimes_or_obj, simd=[('avx2', ints)], dispatch=[('loops_arithm_fp', 'fdFD')]), [TypeDescription('M', FullTypeDescr, 'Mm', 'M'), TypeDescription('m', FullTypeDescr, 'mm', 'm'), TypeDescription('M', FullTypeDescr, 'mM', 'M'), @@ -298,7 +301,7 @@ def english_upper(s): Ufunc(2, 1, None, # Zero is only a unit to the right, not the left docstrings.get('numpy.core.umath.subtract'), 'PyUFunc_SubtractionTypeResolver', - TD(ints + inexact, simd=[('avx512f', cmplxvec),('avx2', ints)]), + TD(ints + inexact, simd=[('avx2', ints)], dispatch=[('loops_arithm_fp', 'fdFD')]), [TypeDescription('M', FullTypeDescr, 'Mm', 'M'), TypeDescription('m', FullTypeDescr, 'mm', 'm'), TypeDescription('M', FullTypeDescr, 'MM', 'm'), @@ -309,7 +312,7 @@ def english_upper(s): Ufunc(2, 1, One, docstrings.get('numpy.core.umath.multiply'), 'PyUFunc_MultiplicationTypeResolver', - TD(notimes_or_obj, simd=[('avx512f', cmplxvec),('avx2', ints)]), + TD(notimes_or_obj, simd=[('avx2', ints)], dispatch=[('loops_arithm_fp', 'fdFD')]), [TypeDescription('m', FullTypeDescr, 'mq', 'm'), TypeDescription('m', FullTypeDescr, 'qm', 'm'), TypeDescription('m', FullTypeDescr, 'md', 'm'), @@ -333,10 +336,10 @@ def english_upper(s): Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.true_divide'), 'PyUFunc_TrueDivisionTypeResolver', - TD(flts+cmplx), - [TypeDescription('m', FullTypeDescr, 'mq', 'm'), - TypeDescription('m', FullTypeDescr, 'md', 'm'), - TypeDescription('m', FullTypeDescr, 'mm', 'd'), + TD(flts+cmplx, cfunc_alias='divide', dispatch=[('loops_arithm_fp', 'fd')]), + [TypeDescription('m', FullTypeDescr, 'mq', 'm', cfunc_alias='divide'), + TypeDescription('m', FullTypeDescr, 'md', 'm', cfunc_alias='divide'), + TypeDescription('m', FullTypeDescr, 'mm', 'd', cfunc_alias='divide'), ], TD(O, f='PyNumber_TrueDivide'), ), diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 2ec5e1a64b37..2e020a595c14 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -916,8 +916,10 @@ def generate_umath_c(ext, build_dir): join('src', 'umath', 'funcs.inc.src'), join('src', 'umath', 'simd.inc.src'), join('src', 'umath', 'loops.h.src'), + join('src', 'umath', 'loops_utils.h.src'), join('src', 'umath', 'loops.c.src'), join('src', 'umath', 'loops_unary_fp.dispatch.c.src'), + join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'), join('src', 'umath', 'matmul.h.src'), join('src', 'umath', 'matmul.c.src'), join('src', 'umath', 'clip.h.src'), diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h index 5c22c6f1c2f8..dbcff8793506 100644 --- a/numpy/core/src/umath/fast_loop_macros.h +++ b/numpy/core/src/umath/fast_loop_macros.h @@ -237,5 +237,117 @@ abs_ptrdiff(char *a, char *b) TYPE io1 = *(TYPE *)iop1; \ BINARY_REDUCE_LOOP_INNER +#define IS_BINARY_STRIDE_ONE(esize, vsize) \ + ((steps[0] == esize) && \ + (steps[1] == esize) && \ + (steps[2] == esize) && \ + (abs_ptrdiff(args[2], args[0]) >= vsize) && \ + (abs_ptrdiff(args[2], args[1]) >= vsize)) + +/* + * stride is equal to element size and input and destination are equal or + * don't overlap within one register. The check of the steps against + * esize also quarantees that steps are >= 0. + */ +#define IS_BLOCKABLE_UNARY(esize, vsize) \ + (steps[0] == (esize) && steps[0] == steps[1] && \ + (npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \ + ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \ + ((abs_ptrdiff(args[1], args[0]) == 0)))) + +/* + * Avoid using SIMD for very large step sizes for several reasons: + * 1) Supporting large step sizes requires use of i64gather/scatter_ps instructions, + * in which case we need two i64gather instructions and an additional vinsertf32x8 + * instruction to load a single zmm register (since one i64gather instruction + * loads into a ymm register). This is not ideal for performance. + * 2) Gather and scatter instructions can be slow when the loads/stores + * cross page boundaries. + * + * We instead rely on i32gather/scatter_ps instructions which use a 32-bit index + * element. The index needs to be < INT_MAX to avoid overflow. MAX_STEP_SIZE + * ensures this. The condition also requires that the input and output arrays + * should have no overlap in memory. + */ +#define IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP \ + ((labs(steps[0]) < MAX_STEP_SIZE) && \ + (labs(steps[1]) < MAX_STEP_SIZE) && \ + (labs(steps[2]) < MAX_STEP_SIZE) && \ + (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ + (nomemoverlap(args[1], steps[1] * dimensions[0], args[2], steps[2] * dimensions[0]))) + +#define IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP \ + ((labs(steps[0]) < MAX_STEP_SIZE) && \ + (labs(steps[1]) < MAX_STEP_SIZE) && \ + (labs(steps[2]) < MAX_STEP_SIZE) && \ + (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ + (nomemoverlap(args[0], steps[0] * dimensions[0], args[1], steps[1] * dimensions[0]))) + +/* + * 1) Output should be contiguous, can handle strided input data + * 2) Input step should be smaller than MAX_STEP_SIZE for performance + * 3) Input and output arrays should have no overlap in memory + */ +#define IS_OUTPUT_BLOCKABLE_UNARY(esizein, esizeout, vsize) \ + ((steps[0] & (esizein-1)) == 0 && \ + steps[1] == (esizeout) && labs(steps[0]) < MAX_STEP_SIZE && \ + (nomemoverlap(args[1], steps[1] * dimensions[0], args[0], steps[0] * dimensions[0]))) + +#define IS_BLOCKABLE_REDUCE(esize, vsize) \ + (steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \ + npy_is_aligned(args[1], (esize)) && \ + npy_is_aligned(args[0], (esize))) + +#define IS_BLOCKABLE_BINARY(esize, vsize) \ + (steps[0] == steps[1] && steps[1] == steps[2] && steps[2] == (esize) && \ + npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \ + npy_is_aligned(args[0], (esize)) && \ + (abs_ptrdiff(args[2], args[0]) >= (vsize) || \ + abs_ptrdiff(args[2], args[0]) == 0) && \ + (abs_ptrdiff(args[2], args[1]) >= (vsize) || \ + abs_ptrdiff(args[2], args[1]) >= 0)) + +#define IS_BLOCKABLE_BINARY_SCALAR1(esize, vsize) \ + (steps[0] == 0 && steps[1] == steps[2] && steps[2] == (esize) && \ + npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \ + ((abs_ptrdiff(args[2], args[1]) >= (vsize)) || \ + (abs_ptrdiff(args[2], args[1]) == 0)) && \ + abs_ptrdiff(args[2], args[0]) >= (esize)) + +#define IS_BLOCKABLE_BINARY_SCALAR2(esize, vsize) \ + (steps[1] == 0 && steps[0] == steps[2] && steps[2] == (esize) && \ + npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[0], (esize)) && \ + ((abs_ptrdiff(args[2], args[0]) >= (vsize)) || \ + (abs_ptrdiff(args[2], args[0]) == 0)) && \ + abs_ptrdiff(args[2], args[1]) >= (esize)) + +#undef abs_ptrdiff + +#define IS_BLOCKABLE_BINARY_BOOL(esize, vsize) \ + (steps[0] == (esize) && steps[0] == steps[1] && steps[2] == (1) && \ + npy_is_aligned(args[1], (esize)) && \ + npy_is_aligned(args[0], (esize))) + +#define IS_BLOCKABLE_BINARY_SCALAR1_BOOL(esize, vsize) \ + (steps[0] == 0 && steps[1] == (esize) && steps[2] == (1) && \ + npy_is_aligned(args[1], (esize))) + +#define IS_BLOCKABLE_BINARY_SCALAR2_BOOL(esize, vsize) \ + (steps[0] == (esize) && steps[1] == 0 && steps[2] == (1) && \ + npy_is_aligned(args[0], (esize))) + +/* align var to alignment */ +#define LOOP_BLOCK_ALIGN_VAR(var, type, alignment)\ + npy_intp i, peel = npy_aligned_block_offset(var, sizeof(type),\ + alignment, n);\ + for(i = 0; i < peel; i++) + +#define LOOP_BLOCKED(type, vsize)\ + for(; i < npy_blocked_end(peel, sizeof(type), vsize, n);\ + i += (vsize / sizeof(type))) + +#define LOOP_BLOCKED_END\ + for (; i < n; i++) + #endif /* _NPY_UMATH_FAST_LOOP_MACROS_H_ */ diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 6403efaeee17..839d2b3ae2bb 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1795,86 +1795,6 @@ DOUBLE_log_avx512f(char **args, npy_intp const *dimensions, npy_intp const *step } } -/**begin repeat - * Float types - * #type = npy_float, npy_double, npy_longdouble, npy_float# - * #dtype = npy_float, npy_double, npy_longdouble, npy_half# - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF# - * #c = f, , l, # - * #C = F, , L, # - * #trf = , , , npy_half_to_float# - */ - -/* - * Pairwise summation, rounding error O(lg n) instead of O(n). - * The recursion depth is O(lg n) as well. - * when updating also update similar complex floats summation - */ -static @type@ -pairwise_sum_@TYPE@(char *a, npy_intp n, npy_intp stride) -{ - if (n < 8) { - npy_intp i; - @type@ res = 0.; - - for (i = 0; i < n; i++) { - res += @trf@(*((@dtype@*)(a + i * stride))); - } - return res; - } - else if (n <= PW_BLOCKSIZE) { - npy_intp i; - @type@ r[8], res; - - /* - * sum a block with 8 accumulators - * 8 times unroll reduces blocksize to 16 and allows vectorization with - * avx without changing summation ordering - */ - r[0] = @trf@(*((@dtype@ *)(a + 0 * stride))); - r[1] = @trf@(*((@dtype@ *)(a + 1 * stride))); - r[2] = @trf@(*((@dtype@ *)(a + 2 * stride))); - r[3] = @trf@(*((@dtype@ *)(a + 3 * stride))); - r[4] = @trf@(*((@dtype@ *)(a + 4 * stride))); - r[5] = @trf@(*((@dtype@ *)(a + 5 * stride))); - r[6] = @trf@(*((@dtype@ *)(a + 6 * stride))); - r[7] = @trf@(*((@dtype@ *)(a + 7 * stride))); - - for (i = 8; i < n - (n % 8); i += 8) { - /* small blocksizes seems to mess with hardware prefetch */ - NPY_PREFETCH(a + (i + 512/(npy_intp)sizeof(@dtype@))*stride, 0, 3); - r[0] += @trf@(*((@dtype@ *)(a + (i + 0) * stride))); - r[1] += @trf@(*((@dtype@ *)(a + (i + 1) * stride))); - r[2] += @trf@(*((@dtype@ *)(a + (i + 2) * stride))); - r[3] += @trf@(*((@dtype@ *)(a + (i + 3) * stride))); - r[4] += @trf@(*((@dtype@ *)(a + (i + 4) * stride))); - r[5] += @trf@(*((@dtype@ *)(a + (i + 5) * stride))); - r[6] += @trf@(*((@dtype@ *)(a + (i + 6) * stride))); - r[7] += @trf@(*((@dtype@ *)(a + (i + 7) * stride))); - } - - /* accumulate now to avoid stack spills for single peel loop */ - res = ((r[0] + r[1]) + (r[2] + r[3])) + - ((r[4] + r[5]) + (r[6] + r[7])); - - /* do non multiple of 8 rest */ - for (; i < n; i++) { - res += @trf@(*((@dtype@ *)(a + i * stride))); - } - return res; - } - else { - /* divide by two but avoid non-multiples of unroll factor */ - npy_intp n2 = n / 2; - - n2 -= n2 % 8; - return pairwise_sum_@TYPE@(a, n2, stride) + - pairwise_sum_@TYPE@(a + n2 * stride, n - n2, stride); - } -} - -/**end repeat**/ - /**begin repeat * Float types * #type = npy_float, npy_double, npy_longdouble# @@ -1882,39 +1802,6 @@ pairwise_sum_@TYPE@(char *a, npy_intp n, npy_intp stride) * #c = f, , l# * #C = F, , L# */ - -/**begin repeat1 - * Arithmetic - * # kind = add, subtract, multiply, divide# - * # OP = +, -, *, /# - * # PW = 1, 0, 0, 0# - */ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - if (IS_BINARY_REDUCE) { -#if @PW@ - @type@ * iop1 = (@type@ *)args[0]; - npy_intp n = dimensions[0]; - - *iop1 @OP@= pairwise_sum_@TYPE@(args[1], n, steps[1]); -#else - BINARY_REDUCE_LOOP(@type@) { - io1 @OP@= *(@type@ *)ip2; - } - *((@type@ *)iop1) = io1; -#endif - } - else if (!run_binary_simd_@kind@_@TYPE@(args, dimensions, steps)) { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = in1 @OP@ in2; - } - } -} -/**end repeat1**/ - /**begin repeat1 * #kind = equal, not_equal, less, less_equal, greater, greater_equal, * logical_and, logical_or# @@ -2244,8 +2131,6 @@ NPY_NO_EXPORT void } } -#define @TYPE@_true_divide @TYPE@_divide - /**end repeat**/ /* @@ -2254,6 +2139,38 @@ NPY_NO_EXPORT void ***************************************************************************** */ +/**begin repeat + * Arithmetic + * # kind = add, subtract, multiply, divide# + * # OP = +, -, *, /# + * # PW = 1, 0, 0, 0# + */ +NPY_NO_EXPORT void +LONGDOUBLE_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + if (IS_BINARY_REDUCE) { +#if @PW@ + npy_longdouble * iop1 = (npy_longdouble *)args[0]; + npy_intp n = dimensions[0]; + + *iop1 @OP@= LONGDOUBLE_pairwise_sum(args[1], n, steps[1]); +#else + BINARY_REDUCE_LOOP(npy_longdouble) { + io1 @OP@= *(npy_longdouble *)ip2; + } + *((npy_longdouble *)iop1) = io1; +#endif + } + else { + BINARY_LOOP { + const npy_longdouble in1 = *(npy_longdouble *)ip1; + const npy_longdouble in2 = *(npy_longdouble *)ip2; + *((npy_longdouble *)op1) = in1 @OP@ in2; + } + } +} +/**end repeat**/ + NPY_NO_EXPORT void LONGDOUBLE_reciprocal(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { @@ -2306,7 +2223,7 @@ HALF_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void #if @PW@ npy_intp n = dimensions[0]; - io1 @OP@= pairwise_sum_HALF(args[1], n, steps[1]); + io1 @OP@= HALF_pairwise_sum(args[1], n, steps[1]); #else BINARY_REDUCE_LOOP_INNER { io1 @OP@= npy_half_to_float(*(npy_half *)ip2); @@ -2614,9 +2531,6 @@ HALF_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, } } -#define HALF_true_divide HALF_divide - - /* ***************************************************************************** ** COMPLEX LOOPS ** @@ -2643,80 +2557,8 @@ HALF_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, * #SIMD = 1, 1, 0# */ -/* similar to pairwise sum of real floats */ -static void -pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_intp n, - npy_intp stride) -{ - assert(n % 2 == 0); - if (n < 8) { - npy_intp i; - - *rr = 0.; - *ri = 0.; - for (i = 0; i < n; i += 2) { - *rr += *((@ftype@ *)(a + i * stride + 0)); - *ri += *((@ftype@ *)(a + i * stride + sizeof(@ftype@))); - } - return; - } - else if (n <= PW_BLOCKSIZE) { - npy_intp i; - @ftype@ r[8]; - - /* - * sum a block with 8 accumulators - * 8 times unroll reduces blocksize to 16 and allows vectorization with - * avx without changing summation ordering - */ - r[0] = *((@ftype@ *)(a + 0 * stride)); - r[1] = *((@ftype@ *)(a + 0 * stride + sizeof(@ftype@))); - r[2] = *((@ftype@ *)(a + 2 * stride)); - r[3] = *((@ftype@ *)(a + 2 * stride + sizeof(@ftype@))); - r[4] = *((@ftype@ *)(a + 4 * stride)); - r[5] = *((@ftype@ *)(a + 4 * stride + sizeof(@ftype@))); - r[6] = *((@ftype@ *)(a + 6 * stride)); - r[7] = *((@ftype@ *)(a + 6 * stride + sizeof(@ftype@))); - - for (i = 8; i < n - (n % 8); i += 8) { - /* small blocksizes seems to mess with hardware prefetch */ - NPY_PREFETCH(a + (i + 512/(npy_intp)sizeof(@ftype@))*stride, 0, 3); - r[0] += *((@ftype@ *)(a + (i + 0) * stride)); - r[1] += *((@ftype@ *)(a + (i + 0) * stride + sizeof(@ftype@))); - r[2] += *((@ftype@ *)(a + (i + 2) * stride)); - r[3] += *((@ftype@ *)(a + (i + 2) * stride + sizeof(@ftype@))); - r[4] += *((@ftype@ *)(a + (i + 4) * stride)); - r[5] += *((@ftype@ *)(a + (i + 4) * stride + sizeof(@ftype@))); - r[6] += *((@ftype@ *)(a + (i + 6) * stride)); - r[7] += *((@ftype@ *)(a + (i + 6) * stride + sizeof(@ftype@))); - } - - /* accumulate now to avoid stack spills for single peel loop */ - *rr = ((r[0] + r[2]) + (r[4] + r[6])); - *ri = ((r[1] + r[3]) + (r[5] + r[7])); - - /* do non multiple of 8 rest */ - for (; i < n; i+=2) { - *rr += *((@ftype@ *)(a + i * stride + 0)); - *ri += *((@ftype@ *)(a + i * stride + sizeof(@ftype@))); - } - return; - } - else { - /* divide by two but avoid non-multiples of unroll factor */ - @ftype@ rr1, ri1, rr2, ri2; - npy_intp n2 = n / 2; - - n2 -= n2 % 8; - pairwise_sum_@TYPE@(&rr1, &ri1, a, n2, stride); - pairwise_sum_@TYPE@(&rr2, &ri2, a + n2 * stride, n - n2, stride); - *rr = rr1 + rr2; - *ri = ri1 + ri2; - return; - } -} - - +#if !@SIMD@ +// CFLOAT & CDOUBLE defined by 'loops_arithm_fp.dispatch.c.src' /**begin repeat1 * arithmetic * #kind = add, subtract# @@ -2733,7 +2575,7 @@ NPY_NO_EXPORT void @ftype@ * oi = ((@ftype@ *)args[0]) + 1; @ftype@ rr, ri; - pairwise_sum_@TYPE@(&rr, &ri, args[1], n * 2, steps[1] / 2); + @TYPE@_pairwise_sum(&rr, &ri, args[1], n * 2, steps[1] / 2); *or @OP@= rr; *oi @OP@= ri; return; @@ -2763,6 +2605,7 @@ NPY_NO_EXPORT void ((@ftype@ *)op1)[1] = in1r*in2i + in1i*in2r; } } +#endif // !SIMD NPY_NO_EXPORT void @TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) @@ -2796,31 +2639,6 @@ NPY_NO_EXPORT void } } -#if @SIMD@ -NPY_NO_EXPORT void -@TYPE@_add_avx512f(char **args, const npy_intp *dimensions, const npy_intp *steps, void *func) -{ - if (IS_BINARY_REDUCE) { - @TYPE@_add(args, dimensions, steps, func); - } - else if (!run_binary_avx512f_add_@TYPE@(args, dimensions, steps)) { - @TYPE@_add(args, dimensions, steps, func); - } -} - -/**begin repeat1 - * arithmetic - * #kind = subtract, multiply# - */ -NPY_NO_EXPORT void -@TYPE@_@kind@_avx512f(char **args, const npy_intp *dimensions, const npy_intp *steps, void *func) -{ - if (!run_binary_avx512f_@kind@_@TYPE@(args, dimensions, steps)) { - @TYPE@_@kind@(args, dimensions, steps, func); - } -} -/**end repeat1**/ -#endif NPY_NO_EXPORT void @TYPE@_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) @@ -3066,8 +2884,6 @@ NPY_NO_EXPORT void } /**end repeat1**/ -#define @TYPE@_true_divide @TYPE@_divide - /**end repeat**/ #undef CGE diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index a0b68d168b19..c15ff8e3b75d 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -185,6 +185,22 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat1**/ /**end repeat**/ +#ifndef NPY_DISABLE_OPTIMIZATION + #include "loops_arithm_fp.dispatch.h" +#endif +/**begin repeat + * #TYPE = FLOAT, DOUBLE# + */ +/**begin repeat1 + * Arithmetic + * # kind = add, subtract, multiply, divide# + * # OP = +, -, *, /# + */ +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, + (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) +/**end repeat1**/ +/**end repeat**/ + /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -356,9 +372,6 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @TYPE@_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - -#define @TYPE@_true_divide @TYPE@_divide - /**end repeat**/ @@ -367,6 +380,19 @@ NPY_NO_EXPORT void ** COMPLEX LOOPS ** ***************************************************************************** */ +#ifndef NPY_DISABLE_OPTIMIZATION + #include "loops_arithm_fp.dispatch.h" +#endif +/**begin repeat + * #TYPE = CFLOAT, CDOUBLE# + */ +/**begin repeat1 + * #kind = add, subtract, multiply# + */ +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, + (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))) +/**end repeat1**/ +/**end repeat**/ #define CGE(xr,xi,yr,yi) (xr > yr || (xr == yr && xi >= yi)); #define CLE(xr,xi,yr,yi) (xr < yr || (xr == yr && xi <= yi)); @@ -380,26 +406,14 @@ NPY_NO_EXPORT void * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# * #c = f, , l# * #C = F, , L# - * #IFSIMD = 1, 1, 0# */ /**begin repeat1 - * #isa = , _avx512f# - */ - -/**begin repeat2 * arithmetic - * #kind = add, subtract# - * #OP = +, -# + * #kind = add, subtract, multiply# */ - NPY_NO_EXPORT void -C@TYPE@_@kind@@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - -/**end repeat2**/ - -NPY_NO_EXPORT void -C@TYPE@_multiply@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /**end repeat1**/ NPY_NO_EXPORT void @@ -480,7 +494,6 @@ C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, v NPY_NO_EXPORT void C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /**end repeat1**/ -#define C@TYPE@_true_divide C@TYPE@_divide /**end repeat**/ @@ -593,10 +606,6 @@ NPY_NO_EXPORT void TIMEDELTA_mm_qm_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /* Special case equivalents to above functions */ - -#define TIMEDELTA_mq_m_true_divide TIMEDELTA_mq_m_divide -#define TIMEDELTA_md_m_true_divide TIMEDELTA_md_m_divide -#define TIMEDELTA_mm_d_true_divide TIMEDELTA_mm_d_divide #define TIMEDELTA_mq_m_floor_divide TIMEDELTA_mq_m_divide #define TIMEDELTA_md_m_floor_divide TIMEDELTA_md_m_divide /* #define TIMEDELTA_mm_d_floor_divide TIMEDELTA_mm_d_divide */ diff --git a/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src new file mode 100644 index 000000000000..d8c8fdc9e41e --- /dev/null +++ b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src @@ -0,0 +1,777 @@ +/*@targets + ** $maxopt baseline + ** sse2 avx2 avx512f + **/ +#define _UMATHMODULE +#define _MULTIARRAYMODULE +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "lowlevel_strided_loops.h" +// Provides the various *_LOOP macros +#include "fast_loop_macros.h" + +// TODO: replace raw SIMD with NPYV +//############################################################################### +//## Real Single/Double precision +//############################################################################### +/******************************************************************************** + ** Defining the SIMD kernels + ********************************************************************************/ +#ifdef NPY_HAVE_SSE2 +/**begin repeat + * #type = npy_float, npy_double# + * #TYPE = FLOAT, DOUBLE# + * #scalarf = npy_sqrtf, npy_sqrt# + * #c = f, # + * #vtype = __m128, __m128d# + * #vtype256 = __m256, __m256d# + * #vtype512 = __m512, __m512d# + * #vpre = _mm, _mm# + * #vpre256 = _mm256, _mm256# + * #vpre512 = _mm512, _mm512# + * #vsuf = ps, pd# + * #vsufs = ss, sd# + * #nan = NPY_NANF, NPY_NAN# + * #double = 0, 1# + * #cast = _mm_castps_si128, _mm_castpd_si128# + */ +/**begin repeat1 +* Arithmetic +* # kind = add, subtract, multiply, divide# +* # OP = +, -, *, /# +* # VOP = add, sub, mul, div# +*/ +static void +sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) +{ +#ifdef NPY_HAVE_AVX512F + const npy_intp vector_size_bytes = 64; + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[i] @OP@ ip2[i]; + /* lots of specializations, to squeeze out max performance */ + if (npy_is_aligned(&ip1[i], vector_size_bytes) && npy_is_aligned(&ip2[i], vector_size_bytes)) { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + } + else if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + } +#elif defined NPY_HAVE_AVX2 + const npy_intp vector_size_bytes = 32; + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[i] @OP@ ip2[i]; + /* lots of specializations, to squeeze out max performance */ + if (npy_is_aligned(&ip1[i], vector_size_bytes) && + npy_is_aligned(&ip2[i], vector_size_bytes)) { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + } + else if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + } +#else + const npy_intp vector_size_bytes = 16; + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[i] @OP@ ip2[i]; + /* lots of specializations, to squeeze out max performance */ + if (npy_is_aligned(&ip1[i], vector_size_bytes) && + npy_is_aligned(&ip2[i], vector_size_bytes)) { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, a); + @vpre@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); + @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } + } + else if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); + @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } + else if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); + @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } + else { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, a); + @vpre@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); + @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } + } +#endif + LOOP_BLOCKED_END { + op[i] = ip1[i] @OP@ ip2[i]; + } +} + +static void +sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) +{ +#ifdef NPY_HAVE_AVX512F + const npy_intp vector_size_bytes = 64; + const @vtype512@ a = @vpre512@_set1_@vsuf@(ip1[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[0] @OP@ ip2[i]; + if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + + +#elif defined NPY_HAVE_AVX2 + const npy_intp vector_size_bytes = 32; + const @vtype256@ a = @vpre256@_set1_@vsuf@(ip1[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[0] @OP@ ip2[i]; + if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } +#else + const npy_intp vector_size_bytes = 16; + const @vtype@ a = @vpre@_set1_@vsuf@(ip1[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[0] @OP@ ip2[i]; + if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } +#endif + LOOP_BLOCKED_END { + op[i] = ip1[0] @OP@ ip2[i]; + } +} + +static void +sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) +{ +#ifdef NPY_HAVE_AVX512F + const npy_intp vector_size_bytes = 64; + const @vtype512@ b = @vpre512@_set1_@vsuf@(ip2[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[i] @OP@ ip2[0]; + if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + +#elif defined NPY_HAVE_AVX2 + const npy_intp vector_size_bytes = 32; + const @vtype256@ b = @vpre256@_set1_@vsuf@(ip2[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[i] @OP@ ip2[0]; + if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } +#else + const npy_intp vector_size_bytes = 16; + const @vtype@ b = @vpre@_set1_@vsuf@(ip2[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) + op[i] = ip1[i] @OP@ ip2[0]; + if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, vector_size_bytes) { + @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); + @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); + @vpre@_store_@vsuf@(&op[i], c); + } + } +#endif + LOOP_BLOCKED_END { + op[i] = ip1[i] @OP@ ip2[0]; + } +} + +/**end repeat1**/ +/**end repeat**/ + +#else // NPY_HAVE_SSE2 + +/**begin repeat + * #type = npy_float, npy_double# + * #TYPE = FLOAT, DOUBLE# + * #sfx = f32, f64# + * #CHK = , _F64# + */ +#if NPY_SIMD@CHK@ +/**begin repeat1 +* Arithmetic +* # kind = add, subtract, multiply, divide# +* # OP = +, -, *, /# +* # VOP = add, sub, mul, div# +*/ + +static void +simd_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) +{ + LOOP_BLOCK_ALIGN_VAR(op, @type@, NPY_SIMD_WIDTH) { + op[i] = ip1[i] @OP@ ip2[i]; + } + /* lots of specializations, to squeeze out max performance */ + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { + npyv_@sfx@ a = npyv_load_@sfx@(&ip1[i]); + npyv_@sfx@ c = npyv_@VOP@_@sfx@(a, a); + npyv_store_@sfx@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { + npyv_@sfx@ a = npyv_load_@sfx@(&ip1[i]); + npyv_@sfx@ b = npyv_load_@sfx@(&ip2[i]); + npyv_@sfx@ c = npyv_@VOP@_@sfx@(a, b); + npyv_store_@sfx@(&op[i], c); + } + } + LOOP_BLOCKED_END { + op[i] = ip1[i] @OP@ ip2[i]; + } +} + +static void +simd_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) +{ + const npyv_@sfx@ v1 = npyv_setall_@sfx@(ip1[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, NPY_SIMD_WIDTH) { + op[i] = ip1[0] @OP@ ip2[i]; + } + LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { + npyv_@sfx@ v2 = npyv_load_@sfx@(&ip2[i]); + npyv_@sfx@ v3 = npyv_@VOP@_@sfx@(v1, v2); + npyv_store_@sfx@(&op[i], v3); + } + LOOP_BLOCKED_END { + op[i] = ip1[0] @OP@ ip2[i]; + } +} + +static void +simd_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) +{ + const npyv_@sfx@ v2 = npyv_setall_@sfx@(ip2[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, NPY_SIMD_WIDTH) { + op[i] = ip1[i] @OP@ ip2[0]; + } + LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { + npyv_@sfx@ v1 = npyv_load_@sfx@(&ip1[i]); + npyv_@sfx@ v3 = npyv_@VOP@_@sfx@(v1, v2); + npyv_store_@sfx@(&op[i], v3); + } + LOOP_BLOCKED_END { + op[i] = ip1[i] @OP@ ip2[0]; + } +} +/**end repeat1**/ +#endif /* NPY_SIMD@CHK@ */ +/**end repeat**/ +#endif // NPY_HAVE_SSE2 + +/**begin repeat + * Float types + * #type = npy_float, npy_double, npy_longdouble# + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# + * #vector = 1, 1, 0# + * #VECTOR = NPY_SIMD, NPY_SIMD_F64, 0 # + */ +/**begin repeat1 + * Arithmetic + * # kind = add, subtract, multiply, divide# + */ +static NPY_INLINE int +run_binary_simd_@kind@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) +{ +#if @vector@ && defined NPY_HAVE_SSE2 + @type@ * ip1 = (@type@ *)args[0]; + @type@ * ip2 = (@type@ *)args[1]; + @type@ * op = (@type@ *)args[2]; + npy_intp n = dimensions[0]; +#if defined NPY_HAVE_AVX512F + const npy_uintp vector_size_bytes = 64; +#elif defined NPY_HAVE_AVX2 + const npy_uintp vector_size_bytes = 32; +#else + const npy_uintp vector_size_bytes = 32; +#endif + /* argument one scalar */ + if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), vector_size_bytes)) { + sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n); + return 1; + } + /* argument two scalar */ + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), vector_size_bytes)) { + sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n); + return 1; + } + else if (IS_BLOCKABLE_BINARY(sizeof(@type@), vector_size_bytes)) { + sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n); + return 1; + } +#elif @VECTOR@ + @type@ * ip1 = (@type@ *)args[0]; + @type@ * ip2 = (@type@ *)args[1]; + @type@ * op = (@type@ *)args[2]; + npy_intp n = dimensions[0]; + /* argument one scalar */ + if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), NPY_SIMD_WIDTH)) { + simd_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n); + return 1; + } + /* argument two scalar */ + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH)) { + simd_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n); + return 1; + } + else if (IS_BLOCKABLE_BINARY(sizeof(@type@), NPY_SIMD_WIDTH)) { + simd_binary_@kind@_@TYPE@(op, ip1, ip2, n); + return 1; + } +#endif + return 0; +} +/**end repeat1**/ +/**end repeat**/ + +/******************************************************************************** + ** Defining ufunc inner functions + ********************************************************************************/ +/**begin repeat + * Float types + * #type = npy_float, npy_double# + * #TYPE = FLOAT, DOUBLE# + * #c = f, # + * #C = F, # + */ +/**begin repeat1 + * Arithmetic + * # kind = add, subtract, multiply, divide# + * # OP = +, -, *, /# + * # PW = 1, 0, 0, 0# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + if (IS_BINARY_REDUCE) { +#if @PW@ + @type@ * iop1 = (@type@ *)args[0]; + npy_intp n = dimensions[0]; + + *iop1 @OP@= @TYPE@_pairwise_sum(args[1], n, steps[1]); +#else + BINARY_REDUCE_LOOP(@type@) { + io1 @OP@= *(@type@ *)ip2; + } + *((@type@ *)iop1) = io1; +#endif + } + else if (!run_binary_simd_@kind@_@TYPE@(args, dimensions, steps)) { + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1) = in1 @OP@ in2; + } + } +} +/**end repeat1**/ +/**end repeat**/ + +//############################################################################### +//## Complex Single/Double precision +//############################################################################### +/******************************************************************************** + ** Defining the SIMD kernels + ********************************************************************************/ +#if !defined(_MSC_VER) && defined(NPY_HAVE_AVX512F) + /** + * For somehow MSVC commit aggressive optimization lead + * to raises 'RuntimeWarning: invalid value encountered in multiply' + * + * the issue mainly caused by '_mm512_maskz_loadu_ps', we need to + * investigate about it while moving to NPYV. + */ + #define AVX512F_NOMSVC +#endif + +#ifdef AVX512F_NOMSVC +static NPY_INLINE __mmask16 +avx512_get_full_load_mask_ps(void) +{ + return 0xFFFF; +} + +static NPY_INLINE __mmask8 +avx512_get_full_load_mask_pd(void) +{ + return 0xFF; +} +static NPY_INLINE __m512 +avx512_masked_load_ps(__mmask16 mask, npy_float* addr) +{ + return _mm512_maskz_loadu_ps(mask, (__m512 *)addr); +} + +static NPY_INLINE __m512d +avx512_masked_load_pd(__mmask8 mask, npy_double* addr) +{ + return _mm512_maskz_loadu_pd(mask, (__m512d *)addr); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 +avx512_get_partial_load_mask_ps(const npy_int num_elem, const npy_int total_elem) +{ + return (0x0001 << num_elem) - 0x0001; +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8 +avx512_get_partial_load_mask_pd(const npy_int num_elem, const npy_int total_elem) +{ + return (0x01 << num_elem) - 0x01; +} +/**begin repeat + * #vsub = ps, pd# + * #type= npy_float, npy_double# + * #epi_vsub = epi32, epi64# + * #vtype = __m512, __m512d# + * #mask = __mmask16, __mmask8# + * #and_const = 0x7fffffff, 0x7fffffffffffffffLL# + * #neg_mask = 0x80000000, 0x8000000000000000# + * #perm_ = 0xb1, 0x55# + * #cmpx_img_mask = 0xAAAA, 0xAA# + * #cmpx_re_mask = 0x5555, 0x55# + * #INF = NPY_INFINITYF, NPY_INFINITY# + * #NAN = NPY_NANF, NPY_NAN# + */ +static @vtype@ +avx512_hadd_@vsub@(const @vtype@ x) +{ + return _mm512_add_@vsub@(x, _mm512_permute_@vsub@(x, @perm_@)); +} + +static @vtype@ +avx512_hsub_@vsub@(const @vtype@ x) +{ + return _mm512_sub_@vsub@(x, _mm512_permute_@vsub@(x, @perm_@)); +} +static NPY_INLINE @vtype@ +avx512_cmul_@vsub@(@vtype@ x1, @vtype@ x2) +{ + // x1 = r1, i1 + // x2 = r2, i2 + @vtype@ x3 = _mm512_permute_@vsub@(x2, @perm_@); // i2, r2 + @vtype@ x12 = _mm512_mul_@vsub@(x1, x2); // r1*r2, i1*i2 + @vtype@ x13 = _mm512_mul_@vsub@(x1, x3); // r1*i2, r2*i1 + @vtype@ outreal = avx512_hsub_@vsub@(x12); // r1*r2 - i1*i2, r1*r2 - i1*i2 + @vtype@ outimg = avx512_hadd_@vsub@(x13); // r1*i2 + i1*r2, r1*i2 + i1*r2 + return _mm512_mask_blend_@vsub@(@cmpx_img_mask@, outreal, outimg); +} +/**end repeat**/ +#endif + +/**begin repeat + * #TYPE = CFLOAT, CDOUBLE# + * #type = npy_float, npy_double# + * #num_lanes = 16, 8# + * #vsuffix = ps, pd# + * #epi_vsub = epi32, epi64# + * #mask = __mmask16, __mmask8# + * #vtype = __m512, __m512d# + * #scale = 4, 8# + * #vindextype = __m512i, __m256i# + * #vindexload = _mm512_loadu_si512, _mm256_loadu_si256# + * #storemask = 0xFF, 0xF# + * #IS_FLOAT = 1, 0# + */ +/**begin repeat1 + * #func = add, subtract, multiply# + * #vectorf = _mm512_add, _mm512_sub, avx512_cmul# + */ +#if defined AVX512F_NOMSVC +static NPY_INLINE void +AVX512F_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps) +{ + const npy_intp array_size = dimensions[0]; + npy_intp num_remaining_elements = 2*array_size; + @type@* ip1 = (@type@*) args[0]; + @type@* ip2 = (@type@*) args[1]; + @type@* op = (@type@*) args[2]; + + @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@(); + + while (num_remaining_elements > 0) { + if (num_remaining_elements < @num_lanes@) { + load_mask = avx512_get_partial_load_mask_@vsuffix@( + num_remaining_elements, @num_lanes@); + } + @vtype@ x1, x2; + x1 = avx512_masked_load_@vsuffix@(load_mask, ip1); + x2 = avx512_masked_load_@vsuffix@(load_mask, ip2); + + @vtype@ out = @vectorf@_@vsuffix@(x1, x2); + + _mm512_mask_storeu_@vsuffix@(op, load_mask, out); + + ip1 += @num_lanes@; + ip2 += @num_lanes@; + op += @num_lanes@; + num_remaining_elements -= @num_lanes@; + } +} +#endif // AVX512F_NOMSVC +/**end repeat1**/ +/**end repeat**/ + +/**begin repeat + * #TYPE = CFLOAT, CDOUBLE# + * #type= npy_float, npy_double# + * #esize = 8, 16# + */ +/**begin repeat1 + * #func = add, subtract, multiply# + */ +static NPY_INLINE int +run_binary_avx512f_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps) +{ +#if defined AVX512F_NOMSVC + if (IS_BINARY_STRIDE_ONE(@esize@, 64)) { + AVX512F_@func@_@TYPE@(args, dimensions, steps); + return 1; + } + else + return 0; +#endif + return 0; +} +/**end repeat1**/ +/**end repeat**/ + +/******************************************************************************** + ** Defining ufunc inner functions + ********************************************************************************/ +/**begin repeat + * complex types + * #TYPE = CFLOAT, CDOUBLE# + * #ftype = npy_float, npy_double# + * #c = f, # + * #C = F, # + */ +/**begin repeat1 + * arithmetic + * #kind = add, subtract# + * #OP = +, -# + * #PW = 1, 0# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + // Parenthesis around @PW@ tells clang dead code is intentional + if (IS_BINARY_REDUCE && (@PW@)) { + npy_intp n = dimensions[0]; + @ftype@ * or = ((@ftype@ *)args[0]); + @ftype@ * oi = ((@ftype@ *)args[0]) + 1; + @ftype@ rr, ri; + + @TYPE@_pairwise_sum(&rr, &ri, args[1], n * 2, steps[1] / 2); + *or @OP@= rr; + *oi @OP@= ri; + return; + } + if (!run_binary_avx512f_@kind@_@TYPE@(args, dimensions, steps)) { + BINARY_LOOP { + const @ftype@ in1r = ((@ftype@ *)ip1)[0]; + const @ftype@ in1i = ((@ftype@ *)ip1)[1]; + const @ftype@ in2r = ((@ftype@ *)ip2)[0]; + const @ftype@ in2i = ((@ftype@ *)ip2)[1]; + ((@ftype@ *)op1)[0] = in1r @OP@ in2r; + ((@ftype@ *)op1)[1] = in1i @OP@ in2i; + } + } +} +/**end repeat1**/ + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_multiply) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + if (!run_binary_avx512f_multiply_@TYPE@(args, dimensions, steps)) { + BINARY_LOOP { + const @ftype@ in1r = ((@ftype@ *)ip1)[0]; + const @ftype@ in1i = ((@ftype@ *)ip1)[1]; + const @ftype@ in2r = ((@ftype@ *)ip2)[0]; + const @ftype@ in2i = ((@ftype@ *)ip2)[1]; + ((@ftype@ *)op1)[0] = in1r*in2r - in1i*in2i; + ((@ftype@ *)op1)[1] = in1r*in2i + in1i*in2r; + } + } +} +/**end repeat**/ diff --git a/numpy/core/src/umath/loops_utils.h b/numpy/core/src/umath/loops_utils.h deleted file mode 100644 index f5540bdae2fd..000000000000 --- a/numpy/core/src/umath/loops_utils.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _NPY_UMATH_LOOPS_UTILS_H_ -#define _NPY_UMATH_LOOPS_UTILS_H_ - -#include "numpy/npy_common.h" // NPY_FINLINE -/* - * nomemoverlap - returns false if two strided arrays have an overlapping - * region in memory. ip_size/op_size = size of the arrays which can be negative - * indicating negative steps. - */ -NPY_FINLINE npy_bool -nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) -{ - char *ip_start, *ip_end, *op_start, *op_end; - if (ip_size < 0) { - ip_start = ip + ip_size; - ip_end = ip; - } - else { - ip_start = ip; - ip_end = ip + ip_size; - } - if (op_size < 0) { - op_start = op + op_size; - op_end = op; - } - else { - op_start = op; - op_end = op + op_size; - } - return (ip_start == op_start && op_end == ip_end) || - (ip_start > op_end) || (op_start > ip_end); -} - -// returns true if two strided arrays have an overlapping region in memory -// same as `nomemoverlap()` but requires array length and step sizes -NPY_FINLINE npy_bool -is_mem_overlap(const void *src, npy_intp src_step, const void *dst, npy_intp dst_step, npy_intp len) -{ - return !(nomemoverlap((char*)src, src_step*len, (char*)dst, dst_step*len)); -} - -#endif // _NPY_UMATH_LOOPS_UTILS_H_ diff --git a/numpy/core/src/umath/loops_utils.h.src b/numpy/core/src/umath/loops_utils.h.src new file mode 100644 index 000000000000..dfa790ed9f34 --- /dev/null +++ b/numpy/core/src/umath/loops_utils.h.src @@ -0,0 +1,213 @@ +#ifndef _NPY_UMATH_LOOPS_UTILS_H_ +#define _NPY_UMATH_LOOPS_UTILS_H_ + +#include "numpy/npy_common.h" // NPY_FINLINE +#include "numpy/halffloat.h" // npy_half_to_float +/* + * nomemoverlap - returns false if two strided arrays have an overlapping + * region in memory. ip_size/op_size = size of the arrays which can be negative + * indicating negative steps. + */ +NPY_FINLINE npy_bool +nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) +{ + char *ip_start, *ip_end, *op_start, *op_end; + if (ip_size < 0) { + ip_start = ip + ip_size; + ip_end = ip; + } + else { + ip_start = ip; + ip_end = ip + ip_size; + } + if (op_size < 0) { + op_start = op + op_size; + op_end = op; + } + else { + op_start = op; + op_end = op + op_size; + } + return (ip_start == op_start && op_end == ip_end) || + (ip_start > op_end) || (op_start > ip_end); +} + +// returns true if two strided arrays have an overlapping region in memory +// same as `nomemoverlap()` but requires array length and step sizes +NPY_FINLINE npy_bool +is_mem_overlap(const void *src, npy_intp src_step, const void *dst, npy_intp dst_step, npy_intp len) +{ + return !(nomemoverlap((char*)src, src_step*len, (char*)dst, dst_step*len)); +} + +/* + * cutoff blocksize for pairwise summation + * decreasing it decreases errors slightly as more pairs are summed but + * also lowers performance, as the inner loop is unrolled eight times it is + * effectively 16 + */ +#define PW_BLOCKSIZE 128 + +/**begin repeat + * Float types + * #type = npy_float, npy_double, npy_longdouble, npy_float# + * #dtype = npy_float, npy_double, npy_longdouble, npy_half# + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF# + * #c = f, , l, # + * #C = F, , L, # + * #trf = , , , npy_half_to_float# + */ + +/* + * Pairwise summation, rounding error O(lg n) instead of O(n). + * The recursion depth is O(lg n) as well. + * when updating also update similar complex floats summation + */ +static NPY_INLINE @type@ +@TYPE@_pairwise_sum(char *a, npy_intp n, npy_intp stride) +{ + if (n < 8) { + npy_intp i; + @type@ res = 0.; + + for (i = 0; i < n; i++) { + res += @trf@(*((@dtype@*)(a + i * stride))); + } + return res; + } + else if (n <= PW_BLOCKSIZE) { + npy_intp i; + @type@ r[8], res; + + /* + * sum a block with 8 accumulators + * 8 times unroll reduces blocksize to 16 and allows vectorization with + * avx without changing summation ordering + */ + r[0] = @trf@(*((@dtype@ *)(a + 0 * stride))); + r[1] = @trf@(*((@dtype@ *)(a + 1 * stride))); + r[2] = @trf@(*((@dtype@ *)(a + 2 * stride))); + r[3] = @trf@(*((@dtype@ *)(a + 3 * stride))); + r[4] = @trf@(*((@dtype@ *)(a + 4 * stride))); + r[5] = @trf@(*((@dtype@ *)(a + 5 * stride))); + r[6] = @trf@(*((@dtype@ *)(a + 6 * stride))); + r[7] = @trf@(*((@dtype@ *)(a + 7 * stride))); + + for (i = 8; i < n - (n % 8); i += 8) { + /* small blocksizes seems to mess with hardware prefetch */ + NPY_PREFETCH(a + (i + 512/(npy_intp)sizeof(@dtype@))*stride, 0, 3); + r[0] += @trf@(*((@dtype@ *)(a + (i + 0) * stride))); + r[1] += @trf@(*((@dtype@ *)(a + (i + 1) * stride))); + r[2] += @trf@(*((@dtype@ *)(a + (i + 2) * stride))); + r[3] += @trf@(*((@dtype@ *)(a + (i + 3) * stride))); + r[4] += @trf@(*((@dtype@ *)(a + (i + 4) * stride))); + r[5] += @trf@(*((@dtype@ *)(a + (i + 5) * stride))); + r[6] += @trf@(*((@dtype@ *)(a + (i + 6) * stride))); + r[7] += @trf@(*((@dtype@ *)(a + (i + 7) * stride))); + } + + /* accumulate now to avoid stack spills for single peel loop */ + res = ((r[0] + r[1]) + (r[2] + r[3])) + + ((r[4] + r[5]) + (r[6] + r[7])); + + /* do non multiple of 8 rest */ + for (; i < n; i++) { + res += @trf@(*((@dtype@ *)(a + i * stride))); + } + return res; + } + else { + /* divide by two but avoid non-multiples of unroll factor */ + npy_intp n2 = n / 2; + + n2 -= n2 % 8; + return @TYPE@_pairwise_sum(a, n2, stride) + + @TYPE@_pairwise_sum(a + n2 * stride, n - n2, stride); + } +} + +/**end repeat**/ + +/**begin repeat + * complex types + * #TYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #ftype = npy_float, npy_double, npy_longdouble# + * #c = f, , l# + * #C = F, , L# + * #SIMD = 1, 1, 0# + */ +/* similar to pairwise sum of real floats */ +static NPY_INLINE void +@TYPE@_pairwise_sum(@ftype@ *rr, @ftype@ * ri, char * a, npy_intp n, + npy_intp stride) +{ + assert(n % 2 == 0); + if (n < 8) { + npy_intp i; + + *rr = 0.; + *ri = 0.; + for (i = 0; i < n; i += 2) { + *rr += *((@ftype@ *)(a + i * stride + 0)); + *ri += *((@ftype@ *)(a + i * stride + sizeof(@ftype@))); + } + return; + } + else if (n <= PW_BLOCKSIZE) { + npy_intp i; + @ftype@ r[8]; + + /* + * sum a block with 8 accumulators + * 8 times unroll reduces blocksize to 16 and allows vectorization with + * avx without changing summation ordering + */ + r[0] = *((@ftype@ *)(a + 0 * stride)); + r[1] = *((@ftype@ *)(a + 0 * stride + sizeof(@ftype@))); + r[2] = *((@ftype@ *)(a + 2 * stride)); + r[3] = *((@ftype@ *)(a + 2 * stride + sizeof(@ftype@))); + r[4] = *((@ftype@ *)(a + 4 * stride)); + r[5] = *((@ftype@ *)(a + 4 * stride + sizeof(@ftype@))); + r[6] = *((@ftype@ *)(a + 6 * stride)); + r[7] = *((@ftype@ *)(a + 6 * stride + sizeof(@ftype@))); + + for (i = 8; i < n - (n % 8); i += 8) { + /* small blocksizes seems to mess with hardware prefetch */ + NPY_PREFETCH(a + (i + 512/(npy_intp)sizeof(@ftype@))*stride, 0, 3); + r[0] += *((@ftype@ *)(a + (i + 0) * stride)); + r[1] += *((@ftype@ *)(a + (i + 0) * stride + sizeof(@ftype@))); + r[2] += *((@ftype@ *)(a + (i + 2) * stride)); + r[3] += *((@ftype@ *)(a + (i + 2) * stride + sizeof(@ftype@))); + r[4] += *((@ftype@ *)(a + (i + 4) * stride)); + r[5] += *((@ftype@ *)(a + (i + 4) * stride + sizeof(@ftype@))); + r[6] += *((@ftype@ *)(a + (i + 6) * stride)); + r[7] += *((@ftype@ *)(a + (i + 6) * stride + sizeof(@ftype@))); + } + + /* accumulate now to avoid stack spills for single peel loop */ + *rr = ((r[0] + r[2]) + (r[4] + r[6])); + *ri = ((r[1] + r[3]) + (r[5] + r[7])); + + /* do non multiple of 8 rest */ + for (; i < n; i+=2) { + *rr += *((@ftype@ *)(a + i * stride + 0)); + *ri += *((@ftype@ *)(a + i * stride + sizeof(@ftype@))); + } + return; + } + else { + /* divide by two but avoid non-multiples of unroll factor */ + @ftype@ rr1, ri1, rr2, ri2; + npy_intp n2 = n / 2; + + n2 -= n2 % 8; + @TYPE@_pairwise_sum(&rr1, &ri1, a, n2, stride); + @TYPE@_pairwise_sum(&rr2, &ri2, a + n2 * stride, n - n2, stride); + *rr = rr1 + rr2; + *ri = ri1 + ri2; + return; + } +} +/**end repeat**/ + +#endif // _NPY_UMATH_LOOPS_UTILS_H_ diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index a118fb0d091e..53bb4e0597f9 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -28,7 +28,6 @@ #undef __AVX512F__ #endif #endif -#include "simd/simd.h" #include "loops_utils.h" // nomemoverlap #include #include @@ -52,119 +51,6 @@ */ #define MAX_STEP_SIZE 2097152 -#define IS_BINARY_STRIDE_ONE(esize, vsize) \ - ((steps[0] == esize) && \ - (steps[1] == esize) && \ - (steps[2] == esize) && \ - (abs_ptrdiff(args[2], args[0]) >= vsize) && \ - (abs_ptrdiff(args[2], args[1]) >= vsize)) - -/* - * stride is equal to element size and input and destination are equal or - * don't overlap within one register. The check of the steps against - * esize also quarantees that steps are >= 0. - */ -#define IS_BLOCKABLE_UNARY(esize, vsize) \ - (steps[0] == (esize) && steps[0] == steps[1] && \ - (npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \ - ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \ - ((abs_ptrdiff(args[1], args[0]) == 0)))) - -/* - * Avoid using SIMD for very large step sizes for several reasons: - * 1) Supporting large step sizes requires use of i64gather/scatter_ps instructions, - * in which case we need two i64gather instructions and an additional vinsertf32x8 - * instruction to load a single zmm register (since one i64gather instruction - * loads into a ymm register). This is not ideal for performance. - * 2) Gather and scatter instructions can be slow when the loads/stores - * cross page boundaries. - * - * We instead rely on i32gather/scatter_ps instructions which use a 32-bit index - * element. The index needs to be < INT_MAX to avoid overflow. MAX_STEP_SIZE - * ensures this. The condition also requires that the input and output arrays - * should have no overlap in memory. - */ -#define IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[2], steps[2] * dimensions[0]))) - -#define IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[1], steps[1] * dimensions[0]))) - -/* - * 1) Output should be contiguous, can handle strided input data - * 2) Input step should be smaller than MAX_STEP_SIZE for performance - * 3) Input and output arrays should have no overlap in memory - */ -#define IS_OUTPUT_BLOCKABLE_UNARY(esizein, esizeout, vsize) \ - ((steps[0] & (esizein-1)) == 0 && \ - steps[1] == (esizeout) && labs(steps[0]) < MAX_STEP_SIZE && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[0], steps[0] * dimensions[0]))) - -#define IS_BLOCKABLE_REDUCE(esize, vsize) \ - (steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \ - npy_is_aligned(args[1], (esize)) && \ - npy_is_aligned(args[0], (esize))) - -#define IS_BLOCKABLE_BINARY(esize, vsize) \ - (steps[0] == steps[1] && steps[1] == steps[2] && steps[2] == (esize) && \ - npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \ - npy_is_aligned(args[0], (esize)) && \ - (abs_ptrdiff(args[2], args[0]) >= (vsize) || \ - abs_ptrdiff(args[2], args[0]) == 0) && \ - (abs_ptrdiff(args[2], args[1]) >= (vsize) || \ - abs_ptrdiff(args[2], args[1]) >= 0)) - -#define IS_BLOCKABLE_BINARY_SCALAR1(esize, vsize) \ - (steps[0] == 0 && steps[1] == steps[2] && steps[2] == (esize) && \ - npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \ - ((abs_ptrdiff(args[2], args[1]) >= (vsize)) || \ - (abs_ptrdiff(args[2], args[1]) == 0)) && \ - abs_ptrdiff(args[2], args[0]) >= (esize)) - -#define IS_BLOCKABLE_BINARY_SCALAR2(esize, vsize) \ - (steps[1] == 0 && steps[0] == steps[2] && steps[2] == (esize) && \ - npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[0], (esize)) && \ - ((abs_ptrdiff(args[2], args[0]) >= (vsize)) || \ - (abs_ptrdiff(args[2], args[0]) == 0)) && \ - abs_ptrdiff(args[2], args[1]) >= (esize)) - -#undef abs_ptrdiff - -#define IS_BLOCKABLE_BINARY_BOOL(esize, vsize) \ - (steps[0] == (esize) && steps[0] == steps[1] && steps[2] == (1) && \ - npy_is_aligned(args[1], (esize)) && \ - npy_is_aligned(args[0], (esize))) - -#define IS_BLOCKABLE_BINARY_SCALAR1_BOOL(esize, vsize) \ - (steps[0] == 0 && steps[1] == (esize) && steps[2] == (1) && \ - npy_is_aligned(args[1], (esize))) - -#define IS_BLOCKABLE_BINARY_SCALAR2_BOOL(esize, vsize) \ - (steps[0] == (esize) && steps[1] == 0 && steps[2] == (1) && \ - npy_is_aligned(args[0], (esize))) - -/* align var to alignment */ -#define LOOP_BLOCK_ALIGN_VAR(var, type, alignment)\ - npy_intp i, peel = npy_aligned_block_offset(var, sizeof(type),\ - alignment, n);\ - for(i = 0; i < peel; i++) - -#define LOOP_BLOCKED(type, vsize)\ - for(; i < npy_blocked_end(peel, sizeof(type), vsize, n);\ - i += (vsize / sizeof(type))) - -#define LOOP_BLOCKED_END\ - for (; i < n; i++) - - /* * Dispatcher functions * decide whether the operation can be vectorized and run it @@ -183,31 +69,6 @@ * #esize = 8, 16# */ -/**begin repeat1 - * #func = add, subtract, multiply# - */ - -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS -static NPY_INLINE NPY_GCC_TARGET_AVX512F void -AVX512F_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps); -#endif - -static NPY_INLINE int -run_binary_avx512f_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps) -{ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS - if (IS_BINARY_STRIDE_ONE(@esize@, 64)) { - AVX512F_@func@_@TYPE@(args, dimensions, steps); - return 1; - } - else - return 0; -#endif - return 0; -} - -/**end repeat1**/ - /**begin repeat1 * #func = square, absolute, conjugate# * #outsize = 1, 2, 1# @@ -507,92 +368,6 @@ run_@name@_simd_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp /**end repeat1**/ -/**begin repeat1 - * Arithmetic - * # kind = add, subtract, multiply, divide# - */ - -#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS - -/* prototypes */ -static void -sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, - npy_intp n); -static void -sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, - npy_intp n); -static void -sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, - npy_intp n); - -#elif @VECTOR@ - -static void -simd_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, - npy_intp n); -static void -simd_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, - npy_intp n); -static void -simd_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, - npy_intp n); - -#endif - -static NPY_INLINE int -run_binary_simd_@kind@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS - @type@ * ip1 = (@type@ *)args[0]; - @type@ * ip2 = (@type@ *)args[1]; - @type@ * op = (@type@ *)args[2]; - npy_intp n = dimensions[0]; -#if defined __AVX512F__ - const npy_uintp vector_size_bytes = 64; -#elif defined __AVX2__ - const npy_uintp vector_size_bytes = 32; -#else - const npy_uintp vector_size_bytes = 32; -#endif - /* argument one scalar */ - if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), vector_size_bytes)) { - sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n); - return 1; - } - /* argument two scalar */ - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), vector_size_bytes)) { - sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n); - return 1; - } - else if (IS_BLOCKABLE_BINARY(sizeof(@type@), vector_size_bytes)) { - sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n); - return 1; - } -#elif @VECTOR@ - @type@ * ip1 = (@type@ *)args[0]; - @type@ * ip2 = (@type@ *)args[1]; - @type@ * op = (@type@ *)args[2]; - npy_intp n = dimensions[0]; - /* argument one scalar */ - if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), NPY_SIMD_WIDTH)) { - simd_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n); - return 1; - } - /* argument two scalar */ - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH)) { - simd_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n); - return 1; - } - else if (IS_BLOCKABLE_BINARY(sizeof(@type@), NPY_SIMD_WIDTH)) { - simd_binary_@kind@_@TYPE@(op, ip1, ip2, n); - return 1; - } -#endif - return 0; -} - -/**end repeat1**/ - /**begin repeat1 * #kind = equal, not_equal, less, less_equal, greater, greater_equal, * logical_and, logical_or# @@ -777,7 +552,6 @@ static NPY_INLINE npy_double sse2_horizontal_@VOP@___m128d(__m128d v) _mm_store_sd(&r, _mm_@VOP@_pd(tmp, v)); /* m(ab) m(bb) */ return r; } - /**end repeat**/ /**begin repeat @@ -797,327 +571,6 @@ static NPY_INLINE npy_double sse2_horizontal_@VOP@___m128d(__m128d v) * #double = 0, 1# * #cast = _mm_castps_si128, _mm_castpd_si128# */ - - -/**begin repeat1 -* Arithmetic -* # kind = add, subtract, multiply, divide# -* # OP = +, -, *, /# -* # VOP = add, sub, mul, div# -*/ - -static void -sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) -{ -#ifdef __AVX512F__ - const npy_intp vector_size_bytes = 64; - LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) - op[i] = ip1[i] @OP@ ip2[i]; - /* lots of specializations, to squeeze out max performance */ - if (npy_is_aligned(&ip1[i], vector_size_bytes) && npy_is_aligned(&ip2[i], vector_size_bytes)) { - if (ip1 == ip2) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); - @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - } - else if (npy_is_aligned(&ip1[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); - @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - else if (npy_is_aligned(&ip2[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); - @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - else { - if (ip1 == ip2) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); - @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - } -#elif defined __AVX2__ - const npy_intp vector_size_bytes = 32; - LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) - op[i] = ip1[i] @OP@ ip2[i]; - /* lots of specializations, to squeeze out max performance */ - if (npy_is_aligned(&ip1[i], vector_size_bytes) && - npy_is_aligned(&ip2[i], vector_size_bytes)) { - if (ip1 == ip2) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); - @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - } - else if (npy_is_aligned(&ip1[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); - @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - else if (npy_is_aligned(&ip2[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); - @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - else { - if (ip1 == ip2) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); - @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - } -#else - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) - op[i] = ip1[i] @OP@ ip2[i]; - /* lots of specializations, to squeeze out max performance */ - if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES) && - npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { - if (ip1 == ip2) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, a); - @vpre@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); - @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } - } - else if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); - @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } - else if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); - @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } - else { - if (ip1 == ip2) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, a); - @vpre@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); - @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } - } -#endif - LOOP_BLOCKED_END { - op[i] = ip1[i] @OP@ ip2[i]; - } -} - - -static void -sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) -{ -#ifdef __AVX512F__ - const npy_intp vector_size_bytes = 64; - const @vtype512@ a = @vpre512@_set1_@vsuf@(ip1[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) - op[i] = ip1[0] @OP@ ip2[i]; - if (npy_is_aligned(&ip2[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - - -#elif defined __AVX2__ - const npy_intp vector_size_bytes = 32; - const @vtype256@ a = @vpre256@_set1_@vsuf@(ip1[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) - op[i] = ip1[0] @OP@ ip2[i]; - if (npy_is_aligned(&ip2[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } -#else - const @vtype@ a = @vpre@_set1_@vsuf@(ip1[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) - op[i] = ip1[0] @OP@ ip2[i]; - if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } -#endif - LOOP_BLOCKED_END { - op[i] = ip1[0] @OP@ ip2[i]; - } -} - - -static void -sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) -{ -#ifdef __AVX512F__ - const npy_intp vector_size_bytes = 64; - const @vtype512@ b = @vpre512@_set1_@vsuf@(ip2[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) - op[i] = ip1[i] @OP@ ip2[0]; - if (npy_is_aligned(&ip1[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); - @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); - @vpre512@_store_@vsuf@(&op[i], c); - } - } - -#elif defined __AVX2__ - const npy_intp vector_size_bytes = 32; - const @vtype256@ b = @vpre256@_set1_@vsuf@(ip2[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) - op[i] = ip1[i] @OP@ ip2[0]; - if (npy_is_aligned(&ip1[i], vector_size_bytes)) { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, vector_size_bytes) { - @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); - @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); - @vpre256@_store_@vsuf@(&op[i], c); - } - } -#else - const @vtype@ b = @vpre@_set1_@vsuf@(ip2[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) - op[i] = ip1[i] @OP@ ip2[0]; - if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { - @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]); - @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b); - @vpre@_store_@vsuf@(&op[i], c); - } - } -#endif - LOOP_BLOCKED_END { - op[i] = ip1[i] @OP@ ip2[0]; - } -} - -/**end repeat1**/ - /* * compress 4 vectors to 4/8 bytes in op with filled with 0 or 1 * the last vector is passed as a pointer as MSVC 2010 is unable to ignore the @@ -3347,45 +2800,6 @@ AVX512F_log_DOUBLE(npy_double * op, * #IS_FLOAT = 1, 0# */ -/**begin repeat1 - * #func = add, subtract, multiply# - * #vectorf = _mm512_add, _mm512_sub, avx512_cmul# - */ - -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS -static NPY_GCC_OPT_3 NPY_INLINE NPY_GCC_TARGET_AVX512F void -AVX512F_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps) -{ - const npy_intp array_size = dimensions[0]; - npy_intp num_remaining_elements = 2*array_size; - @type@* ip1 = (@type@*) args[0]; - @type@* ip2 = (@type@*) args[1]; - @type@* op = (@type@*) args[2]; - - @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@(); - - while (num_remaining_elements > 0) { - if (num_remaining_elements < @num_lanes@) { - load_mask = avx512_get_partial_load_mask_@vsuffix@( - num_remaining_elements, @num_lanes@); - } - @vtype@ x1, x2; - x1 = avx512_masked_load_@vsuffix@(load_mask, ip1); - x2 = avx512_masked_load_@vsuffix@(load_mask, ip2); - - @vtype@ out = @vectorf@_@vsuffix@(x1, x2); - - _mm512_mask_storeu_@vsuffix@(op, load_mask, out); - - ip1 += @num_lanes@; - ip2 += @num_lanes@; - op += @num_lanes@; - num_remaining_elements -= @num_lanes@; - } -} -#endif -/**end repeat1**/ - /**begin repeat1 * #func = square, conjugate# * #vectorf = avx512_csquare, avx512_conjugate# @@ -3648,86 +3062,5 @@ sse2_@kind@_BOOL(@type@ * op, @type@ * ip, const npy_intp n) /**end repeat**/ #undef VECTOR_SIZE_BYTES -#else /* NPY_HAVE_SSE2_INTRINSICS */ - -/**begin repeat - * #type = npy_float, npy_double# - * #TYPE = FLOAT, DOUBLE# - * #sfx = f32, f64# - * #CHK = , _F64# - */ - -#if NPY_SIMD@CHK@ - -/**begin repeat1 -* Arithmetic -* # kind = add, subtract, multiply, divide# -* # OP = +, -, *, /# -* # VOP = add, sub, mul, div# -*/ - -static void -simd_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) -{ - LOOP_BLOCK_ALIGN_VAR(op, @type@, NPY_SIMD_WIDTH) { - op[i] = ip1[i] @OP@ ip2[i]; - } - /* lots of specializations, to squeeze out max performance */ - if (ip1 == ip2) { - LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { - npyv_@sfx@ a = npyv_load_@sfx@(&ip1[i]); - npyv_@sfx@ c = npyv_@VOP@_@sfx@(a, a); - npyv_store_@sfx@(&op[i], c); - } - } - else { - LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { - npyv_@sfx@ a = npyv_load_@sfx@(&ip1[i]); - npyv_@sfx@ b = npyv_load_@sfx@(&ip2[i]); - npyv_@sfx@ c = npyv_@VOP@_@sfx@(a, b); - npyv_store_@sfx@(&op[i], c); - } - } - LOOP_BLOCKED_END { - op[i] = ip1[i] @OP@ ip2[i]; - } -} - -static void -simd_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) -{ - const npyv_@sfx@ v1 = npyv_setall_@sfx@(ip1[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, NPY_SIMD_WIDTH) { - op[i] = ip1[0] @OP@ ip2[i]; - } - LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { - npyv_@sfx@ v2 = npyv_load_@sfx@(&ip2[i]); - npyv_@sfx@ v3 = npyv_@VOP@_@sfx@(v1, v2); - npyv_store_@sfx@(&op[i], v3); - } - LOOP_BLOCKED_END { - op[i] = ip1[0] @OP@ ip2[i]; - } -} - -static void -simd_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) -{ - const npyv_@sfx@ v2 = npyv_setall_@sfx@(ip2[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, NPY_SIMD_WIDTH) { - op[i] = ip1[i] @OP@ ip2[0]; - } - LOOP_BLOCKED(@type@, NPY_SIMD_WIDTH) { - npyv_@sfx@ v1 = npyv_load_@sfx@(&ip1[i]); - npyv_@sfx@ v3 = npyv_@VOP@_@sfx@(v1, v2); - npyv_store_@sfx@(&op[i], v3); - } - LOOP_BLOCKED_END { - op[i] = ip1[i] @OP@ ip2[0]; - } -} -/**end repeat1**/ -#endif /* NPY_SIMD@CHK@ */ -/**end repeat**/ -#endif +#endif /* NPY_HAVE_SSE2_INTRINSICS */ #endif From 350670fd661bba7f8cf8baf436f07b3a284b85be Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 14 Dec 2020 11:00:09 +0800 Subject: [PATCH 0195/1270] remove force avx2 --- numpy/core/src/common/simd/simd.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 7705a48ceee7..8804223c9fef 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -10,11 +10,7 @@ #include "numpy/npy_common.h" #include "npy_cpu_dispatch.h" #include "simd_utils.h" -#ifndef NPY_HAVE_AVX2 - #include - #define NPY_HAVE_AVX - #define NPY_HAVE_AVX2 -#endif + #ifdef __cplusplus extern "C" { #endif From 37db023706843afdd887f7dd22a38a30196ffea6 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 14 Dec 2020 07:52:54 +0000 Subject: [PATCH 0196/1270] MAINT: Bump pytest from 6.0.2 to 6.2.0 Bumps [pytest](https://github.com/pytest-dev/pytest) from 6.0.2 to 6.2.0. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/6.0.2...6.2.0) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 5166ada16d6d..e038ca473168 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -2,7 +2,7 @@ cython==0.29.21 wheel<=0.35.1 setuptools<49.2.0 hypothesis==5.41.5 -pytest==6.0.2 +pytest==6.2.0 pytz==2020.4 pytest-cov==2.10.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' From adc7e31ef831020e158f26efeab647bb27d3f6a3 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 14 Dec 2020 07:53:31 +0000 Subject: [PATCH 0197/1270] MAINT: Update wheel requirement from <=0.35.1 to <0.36.3 Updates the requirements on [wheel](https://github.com/pypa/wheel) to permit the latest version. - [Release notes](https://github.com/pypa/wheel/releases) - [Changelog](https://github.com/pypa/wheel/blob/master/docs/news.rst) - [Commits](https://github.com/pypa/wheel/compare/0.5...0.36.2) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 5166ada16d6d..f640c6ec8410 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,5 +1,5 @@ cython==0.29.21 -wheel<=0.35.1 +wheel<0.36.3 setuptools<49.2.0 hypothesis==5.41.5 pytest==6.0.2 From 3768b8ad352a5595073d1bdb04ea3523affb5732 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 14 Dec 2020 15:56:06 +0800 Subject: [PATCH 0198/1270] get rid of memcpy. --- numpy/core/src/multiarray/compiled_base.c | 26 ++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 1aa7eceeec71..81eb6e623bba 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1483,7 +1483,7 @@ pack_inner(const char *inptr, char *outptr, npy_intp n_out, npy_intp out_stride, - char order) + PACK_ORDER order) { /* * Loop through the elements of inptr. @@ -1519,10 +1519,26 @@ pack_inner(const char *inptr, bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); - for(int i = 0; i < 4; i++) { - for (int j = 0; j < vstep; j++) { - memcpy(outptr, (char*)&bb[i] + j, 1); - outptr += out_stride; + if(out_stride == 1 && + npy_is_aligned(outptr, sizeof(npy_uint64))) { + npy_uint64 *ptr64 = (npy_uint64*)outptr; + #if NPY_SIMD_WIDTH == 16 + npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); + ptr64[0] = bcomp; + #elif NPY_SIMD_WIDTH == 32 + ptr64[0] = bb[0] | (bb[1] << 32); + ptr64[1] = bb[2] | (bb[3] << 32); + #else + ptr64[0] = bb[0]; ptr64[1] = bb[1]; + ptr64[2] = bb[2]; ptr64[3] = bb[3]; + #endif + outptr += vstepx4; + } else { + for(int i = 0; i < 4; i++) { + for (int j = 0; j < vstep; j++) { + memcpy(outptr, (char*)&bb[i] + j, 1); + outptr += out_stride; + } } } } From ae96df4f897ed07e4190070b8757488b05b1b0df Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 14 Dec 2020 16:20:38 +0800 Subject: [PATCH 0199/1270] use max value to detect deviation --- numpy/core/src/common/simd/avx2/arithmetic.h | 2 +- numpy/core/src/common/simd/avx512/arithmetic.h | 2 +- numpy/core/src/common/simd/neon/arithmetic.h | 2 +- numpy/core/src/common/simd/sse/arithmetic.h | 2 +- numpy/core/src/common/simd/vsx/arithmetic.h | 2 +- numpy/core/tests/test_simd.py | 12 +++++------- 6 files changed, 10 insertions(+), 12 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index a0a03b3c9e85..3a3a82798829 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -118,7 +118,7 @@ #endif // !NPY_HAVE_FMA3 // Horizontal add: Calculates the sum of all vector elements. -NPY_FINLINE int npyv_sum_u32(__m256i a) +NPY_FINLINE npy_uint32 npyv_sum_u32(__m256i a) { __m256i s0 = _mm256_hadd_epi32(a, a); s0 = _mm256_hadd_epi32(s0, s0); diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index 4cb222a67def..6f668f439a89 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -145,7 +145,7 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) * intel compiler/GCC 7.1/Clang 4, we still need to support older GCC. ***************************/ -NPY_FINLINE int npyv_sum_u32(npyv_u32 a) +NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a)); __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index e34d25d28994..1c8bde15a318 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -137,7 +137,7 @@ #define npyv_sum_f32 vaddvq_f32 #define npyv_sum_f64 vaddvq_f64 #else - NPY_FINLINE int npyv_sum_u32(npyv_u32 a) + NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { uint32x2_t a0 = vpadd_u32(vget_low_u32(a), vget_high_u32(a)); return (unsigned)vget_lane_u32(vpadd_u32(a0, vget_high_u32(a)),0); diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index 4e5d2583c9fe..faf5685d98d8 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -150,7 +150,7 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) // Horizontal add: Calculates the sum of all vector elements. -NPY_FINLINE int npyv_sum_u32(__m128i a) +NPY_FINLINE npy_uint32 npyv_sum_u32(__m128i a) { __m128i t = _mm_add_epi32(a, _mm_srli_si128(a, 8)); t = _mm_add_epi32(t, _mm_srli_si128(t, 4)); diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index ca2f8f1a7a95..1288a52a7b6d 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -118,7 +118,7 @@ // Horizontal add: Calculates the sum of all vector elements. -NPY_FINLINE int npyv_sum_u32(npyv_u32 a) +NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { const npyv_u32 rs = vec_add(a, vec_sld(a, a, 8)); return vec_extract(vec_add(rs, vec_sld(rs, rs, 4)), 0); diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 7d4e3db16517..41d65c13f517 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -579,14 +579,12 @@ def test_conversion_boolean(self): assert false_vsfx != true_vsfx def test_conversion_expand(self): - if self.sfx == "u8": - totype = "u16" - elif self.sfx == "u16": - totype = "u32" - else: + if self.sfx not in ("u8", "u16"): return - data = self._data() - expand = getattr(self.npyv, "expand_%s_%s" % (totype, self.sfx)) + totype = self.sfx[0]+str(int(self.sfx[1:])*2) + expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") + # close enough from the edge to detect any deviation + data = self._data(self._int_max() - self.nlanes) vdata = self.load(data) edata = expand(vdata) # lower half part From 30c01122d2631269fa28f057f6723168b64ca783 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 14 Dec 2020 16:29:11 +0800 Subject: [PATCH 0200/1270] add highest gcc optimization level. --- numpy/core/src/multiarray/einsum_sumprod.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index 2ef0ab13b973..86d5b82fc818 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -589,7 +589,7 @@ finish_after_unrolled_loop: goto finish_after_unrolled_loop; } -static void +static NPY_GCC_OPT_3 void @name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) { From bdb67012974c6da18fb00066e81ff903265b95fb Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 14 Dec 2020 11:42:24 +0200 Subject: [PATCH 0201/1270] TST: ignore pytest warning --- pytest.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 149af04b88c6..dfad538c2327 100644 --- a/pytest.ini +++ b/pytest.ini @@ -14,4 +14,5 @@ filterwarnings = # Matrix PendingDeprecationWarning. ignore:the matrix subclass is not ignore:Importing from numpy.matlib is - +# pytest warning when using PYTHONOPTIMIZE + ignore:assertions not in test modules or plugins:pytest.PytestConfigWarning From e7709ea4ce04299829e3ce6fc28570f248c66fa0 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 14 Dec 2020 15:20:18 +0000 Subject: [PATCH 0202/1270] MAINT: Bump hypothesis from 5.41.5 to 5.43.3 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 5.41.5 to 5.43.3. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-5.41.5...hypothesis-python-5.43.3) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 1f7b229b4f8f..e3fc9ccc6764 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==5.41.5 +hypothesis==5.43.3 pytest==6.2.0 pytz==2020.4 pytest-cov==2.10.1 From 77e4bf6acc5346266d66c815d7a94d30507e4e48 Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Mon, 14 Dec 2020 10:32:42 -0800 Subject: [PATCH 0203/1270] Replace Numpy with NumPy --- doc/neps/nep-0013-ufunc-overrides.rst | 12 +++---- doc/neps/nep-0023-backwards-compatibility.rst | 2 +- doc/neps/nep-0025-missing-data-3.rst | 16 +++++----- doc/neps/nep-0027-zero-rank-arrarys.rst | 4 +-- doc/neps/nep-0029-deprecation_policy.rst | 32 +++++++++---------- .../nep-0032-remove-financial-functions.rst | 12 +++---- 6 files changed, 39 insertions(+), 39 deletions(-) diff --git a/doc/neps/nep-0013-ufunc-overrides.rst b/doc/neps/nep-0013-ufunc-overrides.rst index 698e45738708..4a647e9d380c 100644 --- a/doc/neps/nep-0013-ufunc-overrides.rst +++ b/doc/neps/nep-0013-ufunc-overrides.rst @@ -46,8 +46,8 @@ right behaviour, hence the change in name.) The ``__array_ufunc__`` as described below requires that any corresponding Python binary operations (``__mul__`` et al.) should be -implemented in a specific way and be compatible with Numpy's ndarray -semantics. Objects that do not satisfy this cannot override any Numpy +implemented in a specific way and be compatible with NumPy's ndarray +semantics. Objects that do not satisfy this cannot override any NumPy ufuncs. We do not specify a future-compatible path by which this requirement can be relaxed --- any changes here require corresponding changes in 3rd party code. @@ -132,7 +132,7 @@ However, this behavior is more confusing than useful, and having a :exc:`TypeError` would be preferable. This proposal will *not* resolve the issue with scipy.sparse matrices, -which have multiplication semantics incompatible with numpy arrays. +which have multiplication semantics incompatible with NumPy arrays. However, the aim is to enable writing other custom array types that have strictly ndarray compatible semantics. @@ -246,7 +246,7 @@ three groups: - *Incompatible*: neither above nor below A; types for which no (indirect) upcasting is possible. -Note that the legacy behaviour of numpy ufuncs is to try to convert +Note that the legacy behaviour of NumPy ufuncs is to try to convert unknown objects to :class:`ndarray` via :func:`np.asarray`. This is equivalent to placing :class:`ndarray` above these objects in the graph. Since we above defined :class:`ndarray` to return `NotImplemented` for @@ -454,7 +454,7 @@ implements the following behavior: A class wishing to modify the interaction with :class:`ndarray` in binary operations therefore has two options: -1. Implement ``__array_ufunc__`` and follow Numpy semantics for Python +1. Implement ``__array_ufunc__`` and follow NumPy semantics for Python binary operations (see below). 2. Set ``__array_ufunc__ = None``, and implement Python binary @@ -678,7 +678,7 @@ NA ``abs`` :func:`absolute` Future extensions to other functions ------------------------------------ -Some numpy functions could be implemented as (generalized) Ufunc, in +Some NumPy functions could be implemented as (generalized) Ufunc, in which case it would be possible for them to be overridden by the ``__array_ufunc__`` method. A prime candidate is :func:`~numpy.matmul`, which currently is not a Ufunc, but could be relatively easily be diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index 158f46273f00..c8bd7c180468 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -156,7 +156,7 @@ Removing complete submodules This year there have been suggestions to consider removing some or all of ``numpy.distutils``, ``numpy.f2py``, ``numpy.linalg``, and ``numpy.random``. The motivation was that all these cost maintenance effort, and that they slow -down work on the core of Numpy (ndarrays, dtypes and ufuncs). +down work on the core of NumPy (ndarrays, dtypes and ufuncs). The impact on downstream libraries and users would be very large, and maintenance of these modules would still have to happen. Therefore this is diff --git a/doc/neps/nep-0025-missing-data-3.rst b/doc/neps/nep-0025-missing-data-3.rst index ffe208c98070..81045652e17d 100644 --- a/doc/neps/nep-0025-missing-data-3.rst +++ b/doc/neps/nep-0025-missing-data-3.rst @@ -62,7 +62,7 @@ values, (4) it is compatible with the common practice of using NaN to indicate missingness when working with floating point numbers, (5) the dtype is already a place where "weird things can happen" -- there are a wide variety of dtypes that don't act like ordinary numbers (including structs, Python objects, -fixed-length strings, ...), so code that accepts arbitrary numpy arrays already +fixed-length strings, ...), so code that accepts arbitrary NumPy arrays already has to be prepared to handle these (even if only by checking for them and raising an error). Therefore adding yet more new dtypes has less impact on extension authors than if we change the ndarray object itself. @@ -96,7 +96,7 @@ for consistency. General strategy ================ -Numpy already has a general mechanism for defining new dtypes and slotting them +NumPy already has a general mechanism for defining new dtypes and slotting them in so that they're supported by ndarrays, by the casting machinery, by ufuncs, and so on. In principle, we could implement NA-dtypes just using these existing interfaces. But we don't want to do that, because defining all those new ufunc @@ -271,7 +271,7 @@ below. Casting ------- -FIXME: this really needs attention from an expert on numpy's casting rules. But +FIXME: this really needs attention from an expert on NumPy's casting rules. But I can't seem to find the docs that explain how casting loops are looked up and decided between (e.g., if you're casting from dtype A to dtype B, which dtype's loops are used?), so I can't go into details. But those details are tricky and @@ -338,7 +338,7 @@ Printing -------- FIXME: There should be some sort of mechanism by which values which are NA are -automatically repr'ed as NA, but I don't really understand how numpy printing +automatically repr'ed as NA, but I don't really understand how NumPy printing works, so I'll let someone else fill in this section. Indexing @@ -364,10 +364,10 @@ own global singleton.) So for now we stick to scalar indexing just returning Python API for generic NA support ================================= -NumPy will gain a global singleton called numpy.NA, similar to None, but with +NumPy will gain a global singleton called ``numpy.NA``, similar to None, but with semantics reflecting its status as a missing value. In particular, trying to treat it as a boolean will raise an exception, and comparisons with it will -produce numpy.NA instead of True or False. These basics are adopted from the +produce ``numpy.NA`` instead of True or False. These basics are adopted from the behavior of the NA value in the R project. To dig deeper into the ideas, http://en.wikipedia.org/wiki/Ternary_logic#Kleene_logic provides a starting point. @@ -453,8 +453,8 @@ The NEP also contains a proposal for a somewhat elaborate domain-specific-language for describing NA dtypes. I'm not sure how great an idea that is. (I have a bias against using strings as data structures, and find the already existing strings confusing enough as it is -- also, apparently the -NEP version of numpy uses strings like 'f8' when printing dtypes, while my -numpy uses object names like 'float64', so I'm not sure what's going on there. +NEP version of NumPy uses strings like 'f8' when printing dtypes, while my +NumPy uses object names like 'float64', so I'm not sure what's going on there. ``withNA(float64, arg1=value1)`` seems like a more pleasant way to print a dtype than "NA[f8,value1]", at least to me.) But if people want it, then cool. diff --git a/doc/neps/nep-0027-zero-rank-arrarys.rst b/doc/neps/nep-0027-zero-rank-arrarys.rst index 2d152234cf44..cb39726759ba 100644 --- a/doc/neps/nep-0027-zero-rank-arrarys.rst +++ b/doc/neps/nep-0027-zero-rank-arrarys.rst @@ -57,7 +57,7 @@ However there are some important differences: Motivation for Array Scalars ---------------------------- -Numpy's design decision to provide 0-d arrays and array scalars in addition to +NumPy's design decision to provide 0-d arrays and array scalars in addition to native python types goes against one of the fundamental python design principles that there should be only one obvious way to do it. In this section we will try to explain why it is necessary to have three different ways to @@ -109,7 +109,7 @@ arrays to scalars were summarized as follows: are something like Python lists (which except for Object arrays they are not). -Numpy implements a solution that is designed to have all the pros and none of the cons above. +NumPy implements a solution that is designed to have all the pros and none of the cons above. Create Python scalar types for all of the 21 types and also inherit from the three that already exist. Define equivalent diff --git a/doc/neps/nep-0029-deprecation_policy.rst b/doc/neps/nep-0029-deprecation_policy.rst index 957674ee6e20..a50afcb98f9d 100644 --- a/doc/neps/nep-0029-deprecation_policy.rst +++ b/doc/neps/nep-0029-deprecation_policy.rst @@ -1,7 +1,7 @@ .. _NEP29: ================================================================================== -NEP 29 — Recommend Python and Numpy version support as a community policy standard +NEP 29 — Recommend Python and NumPy version support as a community policy standard ================================================================================== @@ -124,14 +124,14 @@ Drop Schedule :: On next release, drop support for Python 3.5 (initially released on Sep 13, 2015) - On Jan 07, 2020 drop support for Numpy 1.14 (initially released on Jan 06, 2018) + On Jan 07, 2020 drop support for NumPy 1.14 (initially released on Jan 06, 2018) On Jun 23, 2020 drop support for Python 3.6 (initially released on Dec 23, 2016) - On Jul 23, 2020 drop support for Numpy 1.15 (initially released on Jul 23, 2018) - On Jan 13, 2021 drop support for Numpy 1.16 (initially released on Jan 13, 2019) - On Jul 26, 2021 drop support for Numpy 1.17 (initially released on Jul 26, 2019) - On Dec 22, 2021 drop support for Numpy 1.18 (initially released on Dec 22, 2019) + On Jul 23, 2020 drop support for NumPy 1.15 (initially released on Jul 23, 2018) + On Jan 13, 2021 drop support for NumPy 1.16 (initially released on Jan 13, 2019) + On Jul 26, 2021 drop support for NumPy 1.17 (initially released on Jul 26, 2019) + On Dec 22, 2021 drop support for NumPy 1.18 (initially released on Dec 22, 2019) On Dec 26, 2021 drop support for Python 3.7 (initially released on Jun 27, 2018) - On Jun 21, 2022 drop support for Numpy 1.19 (initially released on Jun 20, 2020) + On Jun 21, 2022 drop support for NumPy 1.19 (initially released on Jun 20, 2020) On Apr 14, 2023 drop support for Python 3.8 (initially released on Oct 14, 2019) @@ -249,18 +249,18 @@ Code to generate support and drop schedule tables :: from datetime import datetime, timedelta - data = """Jan 15, 2017: Numpy 1.12 + data = """Jan 15, 2017: NumPy 1.12 Sep 13, 2015: Python 3.5 Dec 23, 2016: Python 3.6 Jun 27, 2018: Python 3.7 - Jun 07, 2017: Numpy 1.13 - Jan 06, 2018: Numpy 1.14 - Jul 23, 2018: Numpy 1.15 - Jan 13, 2019: Numpy 1.16 - Jul 26, 2019: Numpy 1.17 + Jun 07, 2017: NumPy 1.13 + Jan 06, 2018: NumPy 1.14 + Jul 23, 2018: NumPy 1.15 + Jan 13, 2019: NumPy 1.16 + Jul 26, 2019: NumPy 1.17 Oct 14, 2019: Python 3.8 - Dec 22, 2019: Numpy 1.18 - Jun 20, 2020: Numpy 1.19 + Dec 22, 2019: NumPy 1.18 + Jun 20, 2020: NumPy 1.19 """ releases = [] @@ -284,7 +284,7 @@ Code to generate support and drop schedule tables :: py_major,py_minor = sorted([int(x) for x in r[2].split('.')] for r in releases if r[1] == 'Python')[-1] minpy = f"{py_major}.{py_minor+1}+" - num_major,num_minor = sorted([int(x) for x in r[2].split('.')] for r in releases if r[1] == 'Numpy')[-1] + num_major,num_minor = sorted([int(x) for x in r[2].split('.')] for r in releases if r[1] == 'NumPy')[-1] minnum = f"{num_major}.{num_minor+1}+" toprint_drop_dates = [''] diff --git a/doc/neps/nep-0032-remove-financial-functions.rst b/doc/neps/nep-0032-remove-financial-functions.rst index 1c3722f46cb3..bf98a7467d2b 100644 --- a/doc/neps/nep-0032-remove-financial-functions.rst +++ b/doc/neps/nep-0032-remove-financial-functions.rst @@ -174,19 +174,19 @@ References and footnotes .. [1] Financial functions, https://numpy.org/doc/1.17/reference/routines.financial.html -.. [2] Numpy-discussion mailing list, "Simple financial functions for NumPy", +.. [2] NumPy-Discussion mailing list, "Simple financial functions for NumPy", https://mail.python.org/pipermail/numpy-discussion/2008-April/032353.html -.. [3] Numpy-discussion mailing list, "add xirr to numpy financial functions?", +.. [3] NumPy-Discussion mailing list, "add xirr to numpy financial functions?", https://mail.python.org/pipermail/numpy-discussion/2009-May/042645.html -.. [4] Numpy-discussion mailing list, "Definitions of pv, fv, nper, pmt, and rate", +.. [4] NumPy-Discussion mailing list, "Definitions of pv, fv, nper, pmt, and rate", https://mail.python.org/pipermail/numpy-discussion/2009-June/043188.html .. [5] Get financial functions out of main namespace, https://github.com/numpy/numpy/issues/2880 -.. [6] Numpy-discussion mailing list, "Deprecation of financial routines", +.. [6] NumPy-Discussion mailing list, "Deprecation of financial routines", https://mail.python.org/pipermail/numpy-discussion/2013-August/067409.html .. [7] ``component: numpy.lib.financial`` issues, @@ -198,7 +198,7 @@ References and footnotes .. [9] Quansight-Labs/python-api-inspect, https://github.com/Quansight-Labs/python-api-inspect/ -.. [10] Numpy-discussion mailing list, "NEP 32: Remove the financial functions +.. [10] NumPy-Discussion mailing list, "NEP 32: Remove the financial functions from NumPy" https://mail.python.org/pipermail/numpy-discussion/2019-September/079965.html @@ -206,7 +206,7 @@ References and footnotes remove the financial functions. https://mail.google.com/mail/u/0/h/1w0mjgixc4rpe/?&th=16d5c38be45f77c4&q=nep+32&v=c&s=q -.. [12] Numpy-discussion mailing list, "Proposal to accept NEP 32: Remove the +.. [12] NumPy-Discussion mailing list, "Proposal to accept NEP 32: Remove the financial functions from NumPy" https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html From a08acd460fbe133b316b5e963a54dfa8208c036e Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 15 Dec 2020 09:49:19 +0800 Subject: [PATCH 0204/1270] add ARMV7 macro define header in order to prevent bus error. --- numpy/core/include/numpy/npy_cpu.h | 3 +++ numpy/core/src/multiarray/compiled_base.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h index 4dbf9d84e384..88f22a248c9d 100644 --- a/numpy/core/include/numpy/npy_cpu.h +++ b/numpy/core/include/numpy/npy_cpu.h @@ -84,6 +84,9 @@ # error Unknown ARM CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) #endif + #if !(defined(__aarch64__) || defined(_M_ARM64)) + #define NPY_CPU_ARMV7 + #endif #elif defined(__sh__) && defined(__LITTLE_ENDIAN__) #define NPY_CPU_SH_LE #elif defined(__sh__) && defined(__BIG_ENDIAN__) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 81eb6e623bba..3b5322c15a2b 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -20,6 +20,11 @@ typedef enum { PACK_ORDER_BIG } PACK_ORDER; +#ifdef NPY_CPU_ARMV7 + #define ISARMV7 1 +#else + #define ISARMV7 0 +#endif /* * Returns -1 if the array is monotonic decreasing, * +1 if the array is monotonic increasing, @@ -1520,7 +1525,7 @@ pack_inner(const char *inptr, bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); if(out_stride == 1 && - npy_is_aligned(outptr, sizeof(npy_uint64))) { + (!ISARMV7 || npy_is_aligned(outptr, sizeof(npy_uint64)))) { npy_uint64 *ptr64 = (npy_uint64*)outptr; #if NPY_SIMD_WIDTH == 16 npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); From a209ef9e4e3265efa3ba1c0f090c501fce82845a Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 15 Dec 2020 16:14:18 +0800 Subject: [PATCH 0205/1270] Unified code style. --- numpy/core/src/multiarray/item_selection.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index dfee03414b55..34ccc3da2eb8 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2132,11 +2132,11 @@ count_nonzero_bytes_384(const npy_uint64 * w) #if NPY_SIMD /* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */ -static NPY_INLINE npyv_u8 +static NPY_INLINE NPY_GCC_OPT_3 npyv_u8 count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) { const npyv_u8 vone = npyv_setall_u8(1); - const npyv_u8 vzero = npyv_setall_u8(0); + const npyv_u8 vzero = npyv_zero_u8(); npy_intp n = 0; npyv_u8 vsum8 = npyv_zero_u8(); @@ -2145,7 +2145,7 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) vt = npyv_and_u8(vt, vone); vsum8 = npyv_add_u8(vsum8, vt); *d += npyv_nlanes_u8; - n+= npyv_nlanes_u8; + n += npyv_nlanes_u8; } return vsum8; } From 1342aaa49d9544029f1ff1e56b404adfa1bef468 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Tue, 15 Dec 2020 10:09:44 -0800 Subject: [PATCH 0206/1270] DOC: Add more information about poly1d -> polynomial to reference guide (#16164) * Added internal link to polynomial package. Also uncapitalized Polynomial, to mitigate potential confusion of the Polynomial convenience class with the polynomial package. * WIP: Adding transition guide from poly1d to polynomial * WIP: Started adding quickref table for poly transition * WIP: Finish fleshing out transition guide for polynomials * Proofreading transition guide. * Fix traceback in code example for failing doctest. * DOC: Remove trailing _ from sphinx links. Was causing a CI failure on building the devdocs * Fix typo. Co-Authored-By: Chris Barnes * DOC: Add note about terminology. Clarify the distinction between polynomial module and polynomial package. Co-authored-by: Chris Barnes --- doc/source/reference/routines.polynomials.rst | 164 ++++++++++++++++-- 1 file changed, 154 insertions(+), 10 deletions(-) diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst index e74c5a683b57..da481ae4c602 100644 --- a/doc/source/reference/routines.polynomials.rst +++ b/doc/source/reference/routines.polynomials.rst @@ -9,24 +9,165 @@ of the `numpy.polynomial` package, introduced in NumPy 1.4. Prior to NumPy 1.4, `numpy.poly1d` was the class of choice and it is still available in order to maintain backward compatibility. -However, the newer Polynomial package is more complete than `numpy.poly1d` -and its convenience classes are better behaved in the numpy environment. +However, the newer `polynomial package ` is more complete +and its `convenience classes ` provide a +more consistent, better-behaved interface for working with polynomial +expressions. Therefore :mod:`numpy.polynomial` is recommended for new coding. -Transition notice ------------------ -The various routines in the Polynomial package all deal with -series whose coefficients go from degree zero upward, -which is the *reverse order* of the Poly1d convention. -The easy way to remember this is that indexes -correspond to degree, i.e., coef[i] is the coefficient of the term of -degree i. +.. note:: **Terminology** + The term *polynomial module* refers to the old API defined in + `numpy.lib.polynomial`, which includes the :class:`numpy.poly1d` class and + the polynomial functions prefixed with *poly* accessible from the `numpy` + namespace (e.g. `numpy.polyadd`, `numpy.polyval`, `numpy.polyfit`, etc.). + + The term *polynomial package* refers to the new API definied in + `numpy.polynomial`, which includes the convenience classes for the + different kinds of polynomials (`numpy.polynomial.Polynomial`, + `numpy.polynomial.Chebyshev`, etc.). + +Transitioning from `numpy.poly1d` to `numpy.polynomial` +------------------------------------------------------- + +As noted above, the :class:`poly1d class ` and associated +functions defined in ``numpy.lib.polynomial``, such as `numpy.polyfit` +and `numpy.poly`, are considered legacy and should **not** be used in new +code. +Since NumPy version 1.4, the `numpy.polynomial` package is preferred for +working with polynomials. + +Quick Reference +~~~~~~~~~~~~~~~ + +The following table highlights some of the main differences between the +legacy polynomial module and the polynomial package for common tasks. +The `~numpy.polynomial.polynomial.Polynomial` class is imported for brevity:: + + from numpy.polynomial import Polynomial + + ++------------------------+------------------------------+---------------------------------------+ +| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | ++------------------------+------------------------------+---------------------------------------+ +| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | +| polynomial object | | | +| from coefficients [1]_ | | | ++------------------------+------------------------------+---------------------------------------+ +| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | +| object from roots | ``p = np.poly1d(r)`` | | ++------------------------+------------------------------+---------------------------------------+ +| Fit a polynomial of | | | +| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | ++------------------------+------------------------------+---------------------------------------+ + + +.. [1] Note the reversed ordering of the coefficients + +Transition Guide +~~~~~~~~~~~~~~~~ + +There are significant differences between ``numpy.lib.polynomial`` and +`numpy.polynomial`. +The most significant difference is the ordering of the coefficients for the +polynomial expressions. +The various routines in `numpy.polynomial` all +deal with series whose coefficients go from degree zero upward, +which is the *reverse order* of the poly1d convention. +The easy way to remember this is that indices +correspond to degree, i.e., ``coef[i]`` is the coefficient of the term of +degree *i*. + +Though the difference in convention may be confusing, it is straightforward to +convert from the legacy polynomial API to the new. +For example, the following demonstrates how you would convert a `numpy.poly1d` +instance representing the expression :math:`x^{2} + 2x + 3` to a +`~numpy.polynomial.polynomial.Polynomial` instance representing the same +expression:: + + >>> p1d = np.poly1d([1, 2, 3]) + >>> p = np.polynomial.Polynomial(p1d.coef[::-1]) + +In addition to the ``coef`` attribute, polynomials from the polynomial +package also have ``domain`` and ``window`` attributes. +These attributes are most relevant when fitting +polynomials to data, though it should be noted that polynomials with +different ``domain`` and ``window`` attributes are not considered equal, and +can't be mixed in arithmetic:: + + >>> p1 = np.polynomial.Polynomial([1, 2, 3]) + >>> p1 + Polynomial([1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> p2 = np.polynomial.Polynomial([1, 2, 3], domain=[-2, 2]) + >>> p1 == p2 + False + >>> p1 + p2 + Traceback (most recent call last): + ... + TypeError: Domains differ + +See the documentation for the +`convenience classes `_ for further details on +the ``domain`` and ``window`` attributes. + +Another major difference bewteen the legacy polynomial module and the +polynomial package is polynomial fitting. In the old module, fitting was +done via the `~numpy.polyfit` function. In the polynomial package, the +`~numpy.polynomial.polynomial.Polynomial.fit` class method is preferred. For +example, consider a simple linear fit to the following data: + +.. ipython:: python + + rg = np.random.default_rng() + x = np.arange(10) + y = np.arange(10) + rg.standard_normal(10) + +With the legacy polynomial module, a linear fit (i.e. polynomial of degree 1) +could be applied to these data with `~numpy.polyfit`: + +.. ipython:: python + + np.polyfit(x, y, deg=1) + +With the new polynomial API, the `~numpy.polynomial.polynomial.Polynomial.fit` +class method is preferred: + +.. ipython:: python + + p_fitted = np.polynomial.Polynomial.fit(x, y, deg=1) + p_fitted + +Note that the coefficients are given *in the scaled domain* defined by the +linear mapping between the ``window`` and ``domain``. +`~numpy.polynomial.polynomial.Polynomial.convert` can be used to get the +coefficients in the unscaled data domain. + +.. ipython:: python + + p_fitted.convert() + +Documentation for the `~numpy.polynomial` Package +------------------------------------------------- + +In addition to standard power series polynomials, the polynomial package +provides several additional kinds of polynomials including Chebyshev, +Hermite (two subtypes), Laguerre, and Legendre polynomials. +Each of these has an associated +`convenience class ` available from the +`numpy.polynomial` namespace that provides a consistent interface for working +with polynomials regardless of their type. .. toctree:: :maxdepth: 1 routines.polynomials.classes + +Documentation pertaining to specific functions defined for each kind of +polynomial individually can be found in the corresponding module documentation: + +.. toctree:: + :maxdepth: 1 + routines.polynomials.polynomial routines.polynomials.chebyshev routines.polynomials.hermite @@ -36,6 +177,9 @@ degree i. routines.polynomials.polyutils +Documentation for Legacy Polynomials +------------------------------------ + .. toctree:: :maxdepth: 2 From 7628292e005fd3d6faf1b77434d2224921d49ad8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 14 May 2020 14:27:29 -0500 Subject: [PATCH 0207/1270] DOC: Warn when reloading numpy or using numpy in sub-interpreter This adds a warning when the main NumPy module is reloaded with the assumption that in this case objects such as `np.matrix`, `np._NoValue` or exceptions may be cached internally. It also gives a warning when NumPy is imported in a sub-interpreter. --- numpy/__init__.py | 5 +++ numpy/core/src/multiarray/multiarraymodule.c | 39 ++++++++++++++++++++ numpy/tests/test_reloading.py | 14 ++++--- 3 files changed, 53 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 879e8f013fed..a242bb7dfaae 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -389,7 +389,12 @@ def _mac_os_check(): # Note that this will currently only make a difference on Linux core.multiarray._set_madvise_hugepage(use_hugepage) + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + core.multiarray._multiarray_umath._reload_guard() from ._version import get_versions __version__ = get_versions()['version'] del get_versions + diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 32c5ac0dc20c..870b633ed379 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -4085,6 +4085,42 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) } +static PyObject * +_reload_guard(PyObject *NPY_UNUSED(self)) { + static int initialized = 0; + +#if !defined(PYPY_VERSION) + if (PyThreadState_Get()->interp != PyInterpreterState_Main()) { + if (PyErr_WarnEx(PyExc_UserWarning, + "NumPy was imported from a Python sub-interpreter but " + "NumPy does not properly support sub-interpreters. " + "This will likely work for most users but might cause hard to " + "track down issues or subtle bugs. " + "A common user of the rare sub-interpreter feature is wsgi " + "which also allows single-interpreter mode.\n" + "Improvements in the case of bugs are welcome, but is not " + "on the NumPy roadmap, and full support may require " + "significant effort to achieve.", 2) < 0) { + return NULL; + } + /* No need to give the other warning in a sub-interpreter as well... */ + initialized = 1; + Py_RETURN_NONE; + } +#endif + if (initialized) { + if (PyErr_WarnEx(PyExc_UserWarning, + "The NumPy module was reloaded (imported a second time). " + "This can in some cases result in small but subtle issues " + "and is discouraged.", 2) < 0) { + return NULL; + } + } + initialized = 1; + Py_RETURN_NONE; +} + + static struct PyMethodDef array_module_methods[] = { {"_get_implementing_args", (PyCFunction)array__get_implementing_args, @@ -4276,6 +4312,9 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS, NULL}, {"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage, METH_O, NULL}, + {"_reload_guard", (PyCFunction)_reload_guard, + METH_NOARGS, + "Give a warning on reload and big warning in sub-interpreters."}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 61ae91b00d55..5c4309f4ab88 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -1,4 +1,4 @@ -from numpy.testing import assert_raises, assert_, assert_equal +from numpy.testing import assert_raises, assert_warns, assert_, assert_equal from numpy.compat import pickle import sys @@ -16,13 +16,15 @@ def test_numpy_reloading(): VisibleDeprecationWarning = np.VisibleDeprecationWarning ModuleDeprecationWarning = np.ModuleDeprecationWarning - reload(np) + with assert_warns(UserWarning): + reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) assert_raises(RuntimeError, reload, numpy._globals) - reload(np) + with assert_warns(UserWarning): + reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) @@ -45,13 +47,15 @@ def test_full_reimport(): # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys + from pytest import warns import numpy as np for k in list(sys.modules.keys()): if "numpy" in k: del sys.modules[k] - import numpy as np + with warns(UserWarning): + import numpy as np """) p = subprocess.run([sys.executable, '-c', code]) - assert p.returncode == 0 + From b5c5ad8e542413d1273c5a27fe14f439cabe869c Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 16 Dec 2020 09:54:05 +0800 Subject: [PATCH 0208/1270] add strong alignment definition.which can be used in other areas. --- numpy/core/include/numpy/npy_common.h | 8 ++++++++ numpy/core/include/numpy/npy_cpu.h | 3 --- numpy/core/src/multiarray/compiled_base.c | 7 +------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index c8495db8e58f..d5a586c56a39 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -10,6 +10,14 @@ #include #endif +// int*, int64* should be propertly aligned on ARMv7 to avoid bus error +#if !defined(NPY_STRONG_ALIGNMENT) && defined(__arm__) && !(defined(__aarch64__) || defined(_M_ARM64)) +#define NPY_STRONG_ALIGNMENT 1 +#endif +#if !defined(NPY_STRONG_ALIGNMENT) +#define NPY_STRONG_ALIGNMENT 0 +#endif + // compile time environment variables #ifndef NPY_RELAXED_STRIDES_CHECKING #define NPY_RELAXED_STRIDES_CHECKING 0 diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h index 88f22a248c9d..4dbf9d84e384 100644 --- a/numpy/core/include/numpy/npy_cpu.h +++ b/numpy/core/include/numpy/npy_cpu.h @@ -84,9 +84,6 @@ # error Unknown ARM CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) #endif - #if !(defined(__aarch64__) || defined(_M_ARM64)) - #define NPY_CPU_ARMV7 - #endif #elif defined(__sh__) && defined(__LITTLE_ENDIAN__) #define NPY_CPU_SH_LE #elif defined(__sh__) && defined(__BIG_ENDIAN__) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 3b5322c15a2b..f09a1de32e25 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -20,11 +20,6 @@ typedef enum { PACK_ORDER_BIG } PACK_ORDER; -#ifdef NPY_CPU_ARMV7 - #define ISARMV7 1 -#else - #define ISARMV7 0 -#endif /* * Returns -1 if the array is monotonic decreasing, * +1 if the array is monotonic increasing, @@ -1525,7 +1520,7 @@ pack_inner(const char *inptr, bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); if(out_stride == 1 && - (!ISARMV7 || npy_is_aligned(outptr, sizeof(npy_uint64)))) { + (!NPY_STRONG_ALIGNMENT || npy_is_aligned(outptr, sizeof(npy_uint64)))) { npy_uint64 *ptr64 = (npy_uint64*)outptr; #if NPY_SIMD_WIDTH == 16 npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); From e826c1f2616c72b023145fb4842bfbd1b60ddb3f Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 16 Dec 2020 11:40:44 +0800 Subject: [PATCH 0209/1270] add NPY_GCC_OPT_3 to all count_zero functions. --- numpy/core/src/multiarray/item_selection.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 34ccc3da2eb8..abe7f4516a85 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2150,7 +2150,7 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) return vsum8; } -static NPY_INLINE npyv_u16 +static NPY_INLINE NPY_GCC_OPT_3 npyv_u16 count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end) { npyv_u16 vsum16 = npyv_zero_u16(); @@ -2164,7 +2164,7 @@ count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end) return vsum16; } -static NPY_INLINE npyv_u32 +static NPY_INLINE NPY_GCC_OPT_3 npyv_u32 count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end) { npyv_u32 vsum32 = npyv_zero_u32(); @@ -2178,7 +2178,7 @@ count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end) return vsum32; } -static NPY_INLINE npy_intp +static NPY_INLINE NPY_GCC_OPT_3 npy_intp count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) { npy_intp zero_count = 0; From 13da291b95fd8a0aad99b1d8ec45a2107526c2e4 Mon Sep 17 00:00:00 2001 From: Rin Arakaki Date: Wed, 16 Dec 2020 16:36:14 +0900 Subject: [PATCH 0210/1270] Remove spaces around ** --- doc/source/user/quickstart.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 4d76340385f7..8719e6eef7d6 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -315,7 +315,7 @@ created and filled with the result. >>> c = a - b >>> c array([20, 29, 38, 47]) - >>> b ** 2 + >>> b**2 array([0, 1, 4, 9]) >>> 10 * np.sin(a) array([ 9.12945251, -9.88031624, 7.4511316 , -2.62374854]) @@ -500,7 +500,7 @@ and other Python sequences. :: - >>> a = np.arange(10) ** 3 + >>> a = np.arange(10)**3 >>> a array([ 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]) >>> a[2] @@ -515,7 +515,7 @@ and other Python sequences. >>> a[::-1] # reversed a array([ 729, 512, 343, 216, 125, 1000, 27, 1000, 1, 1000]) >>> for i in a: - ... print(i ** (1 / 3.)) + ... print(i**(1 / 3.)) ... 9.999999999999998 1.0 @@ -1065,7 +1065,7 @@ Indexing with Arrays of Indices :: - >>> a = np.arange(12) ** 2 # the first 12 square numbers + >>> a = np.arange(12)**2 # the first 12 square numbers >>> i = np.array([1, 1, 3, 8, 5]) # an array of indices >>> a[i] # the elements of `a` at the positions `i` array([ 1, 1, 9, 64, 25]) @@ -1263,8 +1263,8 @@ set `__: ... divtime = maxit + np.zeros(z.shape, dtype=int) ... ... for i in range(maxit): - ... z = z ** 2 + c - ... diverge = z * np.conj(z) > 2 ** 2 # who is diverging + ... z = z**2 + c + ... diverge = z * np.conj(z) > 2**2 # who is diverging ... div_now = diverge & (divtime == maxit) # who is diverging now ... divtime[div_now] = i # note when ... z[diverge] = 2 # avoid diverging too much From b156231e40e280658dd1d6582d1d9734e0f56b09 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 16 Dec 2020 16:23:54 +0800 Subject: [PATCH 0211/1270] add benchmark test case for little order. --- benchmarks/benchmarks/bench_core.py | 3 +++ numpy/core/src/multiarray/compiled_base.c | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 0c2a18c15333..1c028542db04 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -165,6 +165,9 @@ def setup(self, dtype): def time_packbits(self, dtype): np.packbits(self.d) + def time_packbits_little(self, dtype): + np.packbits(self.d, bitorder="little") + def time_packbits_axis0(self, dtype): np.packbits(self.d2, axis=0) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index f09a1de32e25..6ae4dda6bba9 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1502,6 +1502,7 @@ pack_inner(const char *inptr, npy_intp vn_out = n_out - (remain ? 1 : 0); const int vstep = npyv_nlanes_u64; const int vstepx4 = vstep * 4; + const int isAligned = npy_is_aligned(outptr, sizeof(npy_uint64)); vn_out -= (vn_out & (vstep - 1)); for (; index <= vn_out - vstepx4; index += vstepx4, inptr += npyv_nlanes_u8 * 4) { npyv_u8 v0 = npyv_load_u8((const npy_uint8*)inptr); @@ -1520,7 +1521,7 @@ pack_inner(const char *inptr, bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); if(out_stride == 1 && - (!NPY_STRONG_ALIGNMENT || npy_is_aligned(outptr, sizeof(npy_uint64)))) { + (!NPY_STRONG_ALIGNMENT || isAligned)) { npy_uint64 *ptr64 = (npy_uint64*)outptr; #if NPY_SIMD_WIDTH == 16 npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); From 3fc052fc6e64c6306974d4606551cc9b4711039f Mon Sep 17 00:00:00 2001 From: "Michal W. Tarnowski" Date: Wed, 16 Dec 2020 09:39:31 +0100 Subject: [PATCH 0212/1270] DOC: Fix and extend the docstring for np.inner (#18002) * DOC: fix the docstring for np.inner * DOC: extend the docstring for np.inner and add an example * DOC: update numpy/core/multiarray.py Co-authored-by: Eric Wieser * DOC: apply suggestions from code review Co-authored-by: Matti Picus Co-authored-by: Eric Wieser Co-authored-by: Matti Picus --- numpy/core/multiarray.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index f736973def1f..07179a627eec 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -259,12 +259,16 @@ def inner(a, b): Returns ------- out : ndarray - `out.shape = a.shape[:-1] + b.shape[:-1]` + If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` Raises ------ ValueError - If the last dimension of `a` and `b` has different size. + If both `a` and `b` are nonscalar and their last dimensions have + different sizes. See Also -------- @@ -284,8 +288,8 @@ def inner(a, b): or explicitly:: - np.inner(a, b)[i0,...,ir-1,j0,...,js-1] - = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) + np.inner(a, b)[i0,...,ir-2,j0,...,js-2] + = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) In addition `a` or `b` may be scalars, in which case:: @@ -300,14 +304,25 @@ def inner(a, b): >>> np.inner(a, b) 2 - A multidimensional example: + Some multidimensional examples: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) - >>> np.inner(a, b) + >>> c = np.inner(a, b) + >>> c.shape + (2, 3) + >>> c array([[ 14, 38, 62], [ 86, 110, 134]]) + >>> a = np.arange(2).reshape((1,1,2)) + >>> b = np.arange(6).reshape((3,2)) + >>> c = np.inner(a, b) + >>> c.shape + (1, 1, 3) + >>> c + array([[[1, 3, 5]]]) + An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) From 4a09b2fe9bc32b33f5411ced65cbec2389e2167f Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 16 Dec 2020 17:30:19 +0800 Subject: [PATCH 0213/1270] only the last accumulation into the 2**32-bit counters needs this adjustment to avoid overflow. --- numpy/core/src/multiarray/item_selection.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index abe7f4516a85..c71c4325a485 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2138,14 +2138,14 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) const npyv_u8 vone = npyv_setall_u8(1); const npyv_u8 vzero = npyv_zero_u8(); - npy_intp n = 0; + npy_intp lane_max = 0; npyv_u8 vsum8 = npyv_zero_u8(); - while (*d < end && n <= 0xFE) { + while (*d < end && lane_max <= 0xFE) { npyv_u8 vt = npyv_cvt_u8_b8(npyv_cmpeq_u8(npyv_load_u8(*d), vzero)); vt = npyv_and_u8(vt, vone); vsum8 = npyv_add_u8(vsum8, vt); *d += npyv_nlanes_u8; - n += npyv_nlanes_u8; + lane_max += 1; } return vsum8; } @@ -2154,12 +2154,12 @@ static NPY_INLINE NPY_GCC_OPT_3 npyv_u16 count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end) { npyv_u16 vsum16 = npyv_zero_u16(); - npy_intp n = 0; - while (*d < end && n <= 0xFF00) { + npy_intp lane_max = 0; + while (*d < end && lane_max <= 0xFF00-0xFF) { npyv_u8 vsum8 = count_zero_bytes_u8(d, end); npyv_u16x2 part = npyv_expand_u16_u8(vsum8); vsum16 = npyv_add_u16(vsum16, npyv_add_u16(part.val[0], part.val[1])); - n += 0xFF; + lane_max += 0xFF*2; } return vsum16; } @@ -2168,12 +2168,13 @@ static NPY_INLINE NPY_GCC_OPT_3 npyv_u32 count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end) { npyv_u32 vsum32 = npyv_zero_u32(); - npy_intp n = 0; - while (*d < end && n <= 0xFFFF0000) { + npy_intp lane_max = 0; + // The last accumulation needs to adjustment (2**32-1)/nlanes to avoid overflow. + while (*d < end && lane_max <= (0xFFFF0000-0xFFFF)/npyv_nlanes_u32) { npyv_u16 vsum16 = count_zero_bytes_u16(d, end); npyv_u32x2 part = npyv_expand_u32_u16(vsum16); vsum32 = npyv_add_u32(vsum32, npyv_add_u32(part.val[0], part.val[1])); - n += 0xFFFF; + lane_max += 0xFFFF*2; } return vsum32; } From 768e9e0a3cbe5febf5d7e031d5164b257add6237 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 16 Dec 2020 13:13:43 +0200 Subject: [PATCH 0214/1270] DOC: fix for doctests --- doc/source/user/quickstart.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 8719e6eef7d6..ab5bb5318cdd 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -177,7 +177,7 @@ The function ``zeros`` creates an array full of zeros, the function ``ones`` creates an array full of ones, and the function ``empty`` creates an array whose initial content is random and depends on the state of the memory. By default, the dtype of the created array is -``float64``. +``float64``, but it can be specified via the key word argument ``dtype``. :: @@ -185,7 +185,7 @@ state of the memory. By default, the dtype of the created array is array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]) - >>> np.ones((2, 3, 4), dtype=np.int16) # dtype can also be specified + >>> np.ones((2, 3, 4), dtype=np.int16) array([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], @@ -193,8 +193,8 @@ state of the memory. By default, the dtype of the created array is [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=int16) - >>> np.empty((2, 3)) # uninitialized, the result may vary - array([[3.73603959e-262, 6.02658058e-154, 6.55490914e-260], + >>> np.empty((2, 3)) + array([[3.73603959e-262, 6.02658058e-154, 6.55490914e-260], # may vary [5.30498948e-313, 3.14673309e-307, 1.00000000e+000]]) To create sequences of numbers, NumPy provides the ``arange`` function From 6658f0f179ca22526631735194faa5d2c5559bf8 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Wed, 16 Dec 2020 10:21:30 +0000 Subject: [PATCH 0215/1270] DOC: Add a brief explanation of float printing Also link to the overflow docs here. --- doc/source/reference/arrays.scalars.rst | 24 ++++++++++++++++++++++++ doc/source/user/basics.types.rst | 2 ++ 2 files changed, 26 insertions(+) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 29438f70f4c9..c3fb1f91b7b9 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -108,6 +108,11 @@ Integer types :members: __init__ :exclude-members: __init__ +.. note:: + + The numpy integer types mirror the behavior of C integers, and can therefore + be subject to :ref:`overflow-errors`. + Signed integer types ++++++++++++++++++++ @@ -169,6 +174,25 @@ Inexact types :members: __init__ :exclude-members: __init__ +.. note:: + + The printing behavior of inexact scalars is to use as few digits as possible + to display the result unambiguously. This means that equal values at + different precisions will display differently:: + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32(f16) + >>> f64 = np.float64(f32) + >>> f16 == f32 == f64 + True + >>> f16, f32, f64 + (0.1, 0.099975586, 0.0999755859375) + + Note that none of these floats hold the exact value :math:`\frac{1}{10}`; + ``f16`` prints as ``0.1`` because it is as close to that value as possible, + whereas the other types do not as they have more precision and therefore have + closer values. + Floating-point types ++++++++++++++++++++ diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index ec2af409ae86..781dd66e525f 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -261,6 +261,8 @@ identical behaviour between arrays and scalars, irrespective of whether the value is inside an array or not. NumPy scalars also have many of the same methods arrays do. +.. _overflow-errors: + Overflow Errors =============== From 0a562f37b849d945b99ee2a212b53901601ae9af Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Wed, 16 Dec 2020 14:24:34 +0000 Subject: [PATCH 0216/1270] Update doc/source/reference/arrays.scalars.rst --- doc/source/reference/arrays.scalars.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index c3fb1f91b7b9..ea1cae23e8ba 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -178,7 +178,7 @@ Inexact types The printing behavior of inexact scalars is to use as few digits as possible to display the result unambiguously. This means that equal values at - different precisions will display differently:: + different precisions may display differently:: >>> f16 = np.float16("0.1") >>> f32 = np.float32(f16) From 1d06fcb709c4819cda0321cfd2a0c5935b340f82 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 17 Dec 2020 07:47:31 +0200 Subject: [PATCH 0217/1270] BLD: update to OpenBLAS 0.3.13 --- tools/openblas_support.py | 82 +-------------------------------------- 1 file changed, 2 insertions(+), 80 deletions(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 50837177b680..dff19274ed49 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -12,81 +12,12 @@ from urllib.request import urlopen, Request from urllib.error import HTTPError -OPENBLAS_V = '0.3.12' -# Temporary build of OpenBLAS to test a fix for dynamic detection of CPU -OPENBLAS_LONG = 'v0.3.12-buffersize20' +OPENBLAS_V = '0.3.13' +OPENBLAS_LONG = 'v0.3.13' BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' ARCHITECTURES = ['', 'windows', 'darwin', 'aarch64', 'x86_64', 'i686', 'ppc64le', 's390x'] -sha256_vals = { - "openblas-v0.3.7-527-g79fd006c-win_amd64-gcc_7_1_0.zip": - "7249d68c02e6b6339e06edfeab1fecddf29ee1e67a3afaa77917c320c43de840", - "openblas64_-v0.3.7-527-g79fd006c-win_amd64-gcc_7_1_0.zip": - "6488e0961a5926e47242f63b63b41cfdd661e6f1d267e8e313e397cde4775c17", - "openblas-v0.3.7-527-g79fd006c-win32-gcc_7_1_0.zip": - "5fb0867ca70b1d0fdbf68dd387c0211f26903d74631420e4aabb49e94aa3930d", - "openblas-v0.3.7-527-g79fd006c-macosx_10_9_x86_64-gf_1becaaa.tar.gz": - "69434bd626bbc495da9ce8c36b005d140c75e3c47f94e88c764a199e820f9259", - "openblas64_-v0.3.7-527-g79fd006c-macosx_10_9_x86_64-gf_1becaaa.tar.gz": - "093f6d953e3fa76a86809be67bd1f0b27656671b5a55b233169cfaa43fd63e22", - "openblas-v0.3.7-527-g79fd006c-manylinux2014_aarch64.tar.gz": - "42676c69dc48cd6e412251b39da6b955a5a0e00323ddd77f9137f7c259d35319", - "openblas64_-v0.3.7-527-g79fd006c-manylinux2014_aarch64.tar.gz": - "5aec167af4052cf5e9e3e416c522d9794efabf03a2aea78b9bb3adc94f0b73d8", - "openblas-v0.3.7-527-g79fd006c-manylinux2010_x86_64.tar.gz": - "fa67c6cc29d4cc5c70a147c80526243239a6f95fc3feadcf83a78176cd9c526b", - "openblas64_-v0.3.7-527-g79fd006c-manylinux2010_x86_64.tar.gz": - "9ad34e89a5307dcf5823bf5c020580d0559a0c155fe85b44fc219752e61852b0", - "openblas-v0.3.7-527-g79fd006c-manylinux2010_i686.tar.gz": - "0b8595d316c8b7be84ab1f1d5a0c89c1b35f7c987cdaf61d441bcba7ab4c7439", - "openblas-v0.3.7-527-g79fd006c-manylinux2014_ppc64le.tar.gz": - "3e1c7d6472c34e7210e3605be4bac9ddd32f613d44297dc50cf2d067e720c4a9", - "openblas64_-v0.3.7-527-g79fd006c-manylinux2014_ppc64le.tar.gz": - "a0885873298e21297a04be6cb7355a585df4fa4873e436b4c16c0a18fc9073ea", - "openblas-v0.3.7-527-g79fd006c-manylinux2014_s390x.tar.gz": - "79b454320817574e20499d58f05259ed35213bea0158953992b910607b17f240", - "openblas64_-v0.3.7-527-g79fd006c-manylinux2014_s390x.tar.gz": - "9fddbebf5301518fc4a5d2022a61886544a0566868c8c014359a1ee6b17f2814", - "openblas-v0.3.7-527-g79fd006c-manylinux1_i686.tar.gz": - "24fb92684ec4676185fff5c9340f50c3db6075948bcef760e9c715a8974e4680", - "openblas-v0.3.7-527-g79fd006c-manylinux1_x86_64.tar.gz": - "ebb8236b57a1b4075fd5cdc3e9246d2900c133a42482e5e714d1e67af5d00e62", - "openblas-v0.3.10-win_amd64-gcc_7_1_0.zip": - "e5356a2aa4aa7ed9233b2ca199fdd445f55ba227f004ebc63071dfa2426e9b09", - "openblas64_-v0.3.10-win_amd64-gcc_7_1_0.zip": - "aea3f9c8bdfe0b837f0d2739a6c755b12b6838f6c983e4ede71b4e1b576e6e77", - "openblas-v0.3.10-win32-gcc_7_1_0.zip": - "af1ad3172b23f7c6ef2234151a71d3be4d92010dad4dfb25d07cf5a20f009202", - "openblas64_-v0.3.10-macosx_10_9_x86_64-gf_1becaaa.tar.gz": - "38b61c58d63048731d6884fea7b63f8cbd610e85b138c6bac0e39fd77cd4699b", - "openblas-v0.3.10-manylinux2014_aarch64.tar.gz": - "c4444b9836ec26f7772fae02851961bf73177ff2aa436470e56fab8a1ef8d405", - "openblas-v0.3.10-manylinux2010_x86_64.tar.gz": - "cb7988c4a015aece9c49b1169f51c4ac2287fb9aab8114c8ab67792138ffc85e", - "openblas-v0.3.10-manylinux2010_i686.tar.gz": - "dc637801dd80ebd6394ea8b4a97f8858e4224870ea9214de08bebbdddd8e206e", - "openblas-v0.3.10-manylinux1_x86_64.tar.gz": - "ec1f9e9b2a62d5cb9e2634b88ee2da7cb6b07702d5a0e8b190d680a31adfa23a", - "openblas-v0.3.10-manylinux1_i686.tar.gz": - "b13d9d14e6bd452c0fbadb5cd5fda05b98b1e14043edb13ead90694d4cc07f0e", - "openblas-v0.3.10-manylinux2014_ppc64le.tar.gz": - "1cbc8176986099cf0cbb8f64968d5a14880d602d4b3c59a91d75b69b8760cde3", - "openblas-v0.3.10-manylinux2014_s390x.tar.gz": - "fa6722f0b12507ab0a65f38501ed8435b573df0adc0b979f47cdc4c9e9599475", - "openblas-v0.3.10-macosx_10_9_x86_64-gf_1becaaa.tar.gz": - "c6940b5133e687ae7a4f9c7c794f6a6d92b619cf41e591e5db07aab5da118199", - "openblas64_-v0.3.10-manylinux2014_s390x.tar.gz": - "e0347dd6f3f3a27d2f5e76d382e8a4a68e2e92f5f6a10e54ef65c7b14b44d0e8", - "openblas64_-v0.3.10-manylinux2014_ppc64le.tar.gz": - "4b96a51ac767ec0aabb821c61bcd3420e82e987fc93f7e1f85aebb2a845694eb", - "openblas64_-v0.3.10-manylinux2010_x86_64.tar.gz": - "f68fea21fbc73d06b7566057cad2ed8c7c0eb71fabf9ed8a609f86e5bc60ce5e", - "openblas64_-v0.3.10-manylinux2014_aarch64.tar.gz": - "15e6eed8cb0df8b88e52baa136ffe1769c517e9de7bcdfd81ec56420ae1069e9", -} - - IS_32BIT = sys.maxsize < 2**32 @@ -162,15 +93,6 @@ def download_openblas(target, arch, ilp64, is_32bit): data = response.read() # Verify hash key = os.path.basename(filename) - sha256_returned = hashlib.sha256(data).hexdigest() - if 0: - if key not in sha256_vals: - raise ValueError( - f'\nkey "{key}" with hash "{sha256_returned}" not in sha256_vals\n') - sha256_expected = sha256_vals[key] - if sha256_returned != sha256_expected: - # print(f'\nkey "{key}" with hash "{sha256_returned}" mismatch\n') - raise ValueError(f'sha256 hash mismatch for filename {filename}') print("Saving to file", file=sys.stderr) with open(target, 'wb') as fid: fid.write(data) From b80411d2eaf6eda8075764359ea32b59fcc6c468 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 17 Dec 2020 14:53:43 +0800 Subject: [PATCH 0218/1270] Optimize the performance of einsum's submodule sum. --- .../core/src/multiarray/einsum_sumprod.c.src | 504 +++--------------- 1 file changed, 78 insertions(+), 426 deletions(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index 86d5b82fc818..03d2d614cf3c 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -94,6 +94,58 @@ * 0*3# */ +#if !@complex@ +static @temptype@ @name@_sum_of_arr(@type@ **data, npy_intp count) +{ + @temptype@ accum = 0; +#if @NPYV_CHK@ // NPYV check for @type@ + /* Use aligned instructions if possible */ + const int is_aligned = EINSUM_IS_ALIGNED(*data); + const int vstep = npyv_nlanes_@sfx@; + npyv_@sfx@ vaccum = npyv_zero_@sfx@(); + const npy_intp vstepx4 = vstep * 4; + + /**begin repeat1 + * #cond = if(is_aligned), else# + * #ld = loada, load# + * #st = storea, store# + */ + @cond@ { + for (; count >= vstepx4; count -= vstepx4, *data += vstepx4) { + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(*data + vstep * @i@); + /**end repeat2**/ + npyv_@sfx@ a01 = npyv_add_@sfx@(a0, a1); + npyv_@sfx@ a23 = npyv_add_@sfx@(a2, a3); + npyv_@sfx@ a0123 = npyv_add_@sfx@(a01, a23); + vaccum = npyv_add_@sfx@(a0123, vaccum); + } + } + /**end repeat1**/ + for (; count > 0; count -= vstep, *data += vstep) { + npyv_@sfx@ a = npyv_load_tillz_@sfx@(*data, count); + vaccum = npyv_add_@sfx@(a, vaccum); + } + accum = npyv_sum_@sfx@(vaccum); + npyv_cleanup(); +#else +#ifndef NPY_DISABLE_OPTIMIZATION + for (; count > 4; count -= 4, *data += 4) { + const @temptype@ a01 = @from@(**data) + @from@(*(*data + 1)); + const @temptype@ a23 = @from@(*(*data + 2)) + @from@(*(*data + 3)); + accum += a01 + a23; + } +#endif // !NPY_DISABLE_OPTIMIZATION + for (; count > 0; --count, *data += 1) { + accum += @from@(**data); + } +#endif // NPYV check for @type@ + return accum; +} +#endif + /**begin repeat1 * #nop = 1, 2, 3, 1000# * #noplabel = one, two, three, any# @@ -657,139 +709,10 @@ static void @name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) { - @temptype@ value0 = @from@(*(@type@ *)dataptr[0]); @type@ *data1 = (@type@ *)dataptr[1]; - @temptype@ accum = 0; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, accum_sse = _mm_setzero_pd(); -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outstride0_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - accum += @from@(data1[@i@]); -/**end repeat2**/ - case 0: - *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum); - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data1+@i@)); -/**end repeat2**/ - data1 += 8; - } - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#elif EINSUM_USE_SSE2 && @float64@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data1+@i@)); -/**end repeat2**/ - data1 += 8; - } - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data1+@i@)); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data1+@i@)); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - accum += @from@(data1[@i@]); -/**end repeat2**/ -#endif - data1 += 8; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#elif EINSUM_USE_SSE2 && @float64@ - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + @temptype@ value0 = @from@(*(@type@ *)dataptr[0]); + @temptype@ accum = @name@_sum_of_arr(&data1, count); + *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum); } static void @@ -798,135 +721,8 @@ static void { @type@ *data0 = (@type@ *)dataptr[0]; @temptype@ value1 = @from@(*(@type@ *)dataptr[1]); - @temptype@ accum = 0; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, accum_sse = _mm_setzero_pd(); -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outstride0_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - accum += @from@(data0[@i@]); -/**end repeat2**/ - case 0: - *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1); - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@)); -/**end repeat2**/ - data0 += 8; - } - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#elif EINSUM_USE_SSE2 && @float64@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@)); -/**end repeat2**/ - data0 += 8; - } - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@)); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@)); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - accum += @from@(data0[@i@]); -/**end repeat2**/ -#endif - data0 += 8; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#elif EINSUM_USE_SSE2 && @float64@ - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + @temptype@ accum = @name@_sum_of_arr(&data0, count); + *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value1 * accum); } #elif @nop@ == 3 && !@complex@ @@ -1032,175 +828,31 @@ static void @name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr, npy_intp const *strides, npy_intp count) { -#if @complex@ - @temptype@ accum_re = 0, accum_im = 0; - @temptype@ *data0 = (@temptype@ *)dataptr[0]; -#else - @temptype@ accum = 0; - @type@ *data0 = (@type@ *)dataptr[0]; -#endif - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, accum_sse = _mm_setzero_pd(); -#endif - - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: + NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n", (int)count); #if !@complex@ - accum += @from@(data0[@i@]); -#else /* complex */ - accum_re += data0[2*@i@+0]; - accum_im += data0[2*@i@+1]; -#endif -/**end repeat2**/ - case 0: -#if @complex@ - ((@temptype@ *)dataptr[1])[0] += accum_re; - ((@temptype@ *)dataptr[1])[1] += accum_im; + @type@ *data = (@type@ *)dataptr[0]; + @temptype@ accum = @name@_sum_of_arr(&data, count); + *((@type@ *)dataptr[1]) = @to@(accum + @from@(*((@type@ *)dataptr[1]))); #else - *((@type@ *)dataptr[1]) = @to@(accum + - @from@(*((@type@ *)dataptr[1]))); -#endif - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@)); -/**end repeat2**/ - data0 += 8; - } - - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#elif EINSUM_USE_SSE2 && @float64@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@)); -/**end repeat2**/ - data0 += 8; - } - - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + @temptype@ accum_re = 0, accum_im = 0; + @temptype@ *data0 = (@temptype@ *)dataptr[0]; +#ifndef NPY_DISABLE_OPTIMIZATION + for (; count > 4; count -= 4, data0 += 4*2) { + const @temptype@ re01 = data0[0] + data0[2]; + const @temptype@ re23 = data0[4] + data0[6]; + const @temptype@ im13 = data0[1] + data0[3]; + const @temptype@ im57 = data0[5] + data0[7]; + accum_re += re01 + re23; + accum_im += im13 + im57; } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@)); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@)); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ -# if !@complex@ - accum += @from@(data0[@i@]); -# else /* complex */ - accum_re += data0[2*@i@+0]; - accum_im += data0[2*@i@+1]; -# endif -/**end repeat2**/ -#endif - -#if !@complex@ - data0 += 8; -#else - data0 += 8*2; -#endif +#endif // !NPY_DISABLE_OPTIMIZATION + for (; count > 0; --count, data0 += 2) { + accum_re += data0[0]; + accum_im += data0[1]; } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#elif EINSUM_USE_SSE2 && @float64@ - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + ((@temptype@ *)dataptr[1])[0] += accum_re; + ((@temptype@ *)dataptr[1])[1] += accum_im; +#endif // !@complex@ } #endif /* @nop@ == 1 */ From f3608c32c2d225dfc0ee0107d6310e6f1caef75f Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 17 Dec 2020 15:05:18 +0800 Subject: [PATCH 0219/1270] add NPY_GCC_OPT_3 option. --- numpy/core/src/multiarray/einsum_sumprod.c.src | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index 03d2d614cf3c..5c45e5ec7890 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -705,7 +705,7 @@ static NPY_GCC_OPT_3 void *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum); } -static void +static NPY_GCC_OPT_3 void @name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) { @@ -715,7 +715,7 @@ static void *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum); } -static void +static NPY_GCC_OPT_3 void @name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) { @@ -824,7 +824,7 @@ static void #if @nop@ == 1 -static void +static NPY_GCC_OPT_3 void @name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr, npy_intp const *strides, npy_intp count) { From c3ffcb33964a708ac8fa1c6ef48c2e7d056a686c Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 17 Dec 2020 19:59:30 +0800 Subject: [PATCH 0220/1270] add ascii art to demonstrate the process and fix the lane_max ranges --- numpy/core/src/multiarray/item_selection.c | 39 ++++++++++++++-------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index c71c4325a485..0bab1eff98b7 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2133,14 +2133,14 @@ count_nonzero_bytes_384(const npy_uint64 * w) /* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */ static NPY_INLINE NPY_GCC_OPT_3 npyv_u8 -count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) +count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_count) { const npyv_u8 vone = npyv_setall_u8(1); const npyv_u8 vzero = npyv_zero_u8(); npy_intp lane_max = 0; npyv_u8 vsum8 = npyv_zero_u8(); - while (*d < end && lane_max <= 0xFE) { + while (*d < end && lane_max <= max_count - 1) { npyv_u8 vt = npyv_cvt_u8_b8(npyv_cmpeq_u8(npyv_load_u8(*d), vzero)); vt = npyv_and_u8(vt, vone); vsum8 = npyv_add_u8(vsum8, vt); @@ -2151,41 +2151,54 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end) } static NPY_INLINE NPY_GCC_OPT_3 npyv_u16 -count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end) +count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_count) { npyv_u16 vsum16 = npyv_zero_u16(); npy_intp lane_max = 0; - while (*d < end && lane_max <= 0xFF00-0xFF) { - npyv_u8 vsum8 = count_zero_bytes_u8(d, end); + while (*d < end && lane_max <= max_count - 2*NPY_MAX_UINT8) { + npyv_u8 vsum8 = count_zero_bytes_u8(d, end, NPY_MAX_UINT8); npyv_u16x2 part = npyv_expand_u16_u8(vsum8); vsum16 = npyv_add_u16(vsum16, npyv_add_u16(part.val[0], part.val[1])); - lane_max += 0xFF*2; + lane_max += 2*NPY_MAX_UINT8; } return vsum16; } static NPY_INLINE NPY_GCC_OPT_3 npyv_u32 -count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end) +count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end, npy_uint32 max_count) { npyv_u32 vsum32 = npyv_zero_u32(); npy_intp lane_max = 0; - // The last accumulation needs to adjustment (2**32-1)/nlanes to avoid overflow. - while (*d < end && lane_max <= (0xFFFF0000-0xFFFF)/npyv_nlanes_u32) { - npyv_u16 vsum16 = count_zero_bytes_u16(d, end); + while (*d < end && lane_max <= max_count - 2*NPY_MAX_UINT16) { + npyv_u16 vsum16 = count_zero_bytes_u16(d, end, NPY_MAX_UINT16); npyv_u32x2 part = npyv_expand_u32_u16(vsum16); vsum32 = npyv_add_u32(vsum32, npyv_add_u32(part.val[0], part.val[1])); - lane_max += 0xFFFF*2; + lane_max += 2*NPY_MAX_UINT16; } return vsum32; } - +/* + * Counts the number of non-zero values in a raw array. + * The one loop process is as follows(take SSE2 with 128bits vector for example): + * |------------16 lanes---------| + * [vsum8] 255 255 255 ... 255 255 255 255 count_zero_bytes_u8: counting 255*16 elements + * !! + * |------------8 lanes---------| + * [vsum16] 65535 65535 65535 ... 65535 count_zero_bytes_u16: counting 65535*8 elements + * !! + * |------------4 lanes---------| + * [vsum32] 1073741824 ... 1073741824 count_zero_bytes_u32(overflow control): counting 2**32-1 elements + * !! + * 2**32-1 count_zero_bytes +*/ static NPY_INLINE NPY_GCC_OPT_3 npy_intp count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) { npy_intp zero_count = 0; const npy_uint8 *end = d + unrollx; while (d < end) { - npyv_u32 vsum32 = count_zero_bytes_u32(&d, end); + // The npyv_nlanes_u32 factor ensures that the sum of all lanes still fits in a uint32 + npyv_u32 vsum32 = count_zero_bytes_u32(&d, end, NPY_MAX_UINT32 / npyv_nlanes_u32); zero_count += npyv_sum_u32(vsum32); } return unrollx - zero_count; From 5109cbbfcbbc1f61607549484aaf10269b87a261 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 17 Dec 2020 17:35:11 +0300 Subject: [PATCH 0221/1270] DOC: random: add some examples for SeedSequence Based on https://mail.python.org/pipermail/numpy-discussion/2020-December/081323.html --- .../reference/random/bit_generators/index.rst | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst index 315657172fa4..9822dec359b7 100644 --- a/doc/source/reference/random/bit_generators/index.rst +++ b/doc/source/reference/random/bit_generators/index.rst @@ -105,6 +105,44 @@ If you need to generate a good seed "offline", then ``SeedSequence().entropy`` or using ``secrets.randbits(128)`` from the standard library are both convenient ways. +If you need to run several stochastic simulations in parallel, best practice +is to construct a random generator instance for each simulation. +To make sure that the random streams have distinct initial states, you can use +the `spawn` method of `~SeedSequence`. For instance, here we construct a list +of 12 instances: + +.. code-block:: python + + from numpy.random import PCG64, SeedSequence + + # High quality initial entropy + entropy = 0x87351080e25cb0fad77a44a3be03b491 + base_seq = SeedSequence(entropy) + child_seqs = base_seq.spawn(12) # a list of 12 SeedSequences + generators = [PCG64(seq) for seq in child_seqs] + +.. end_block + + +An alternative way is to use the fact that a `~SeedSequence` can be initialized +by a tuple of elements. Here we use a base entropy value and an integer +``worker_id`` + +.. code-block:: python + + from numpy.random import PCG64, SeedSequence + + # High quality initial entropy + entropy = 0x87351080e25cb0fad77a44a3be03b491 + sequences = [SeedSequence((entropy, worker_id)) for worker_id in range(12)] + generators = [PCG64(seq) for seq in sequences] + +.. end_block + +Note that the sequences produced by the latter method will be distinct from +those constructed via `~SeepSequence.spawn`. + + .. autosummary:: :toctree: generated/ From 5b63f260933672b7182daf4fb15ffcd15bae68bf Mon Sep 17 00:00:00 2001 From: kumudlakara <55556183+kumudlakara@users.noreply.github.com> Date: Thu, 17 Dec 2020 23:38:22 +0530 Subject: [PATCH 0222/1270] DOC: Doc for deprecate_with_doc (#17852) * Add doc for deprecate_with_doc Co-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com> Co-authored-by: Matti Picus --- numpy/lib/tests/test_utils.py | 11 ++++++++++- numpy/lib/utils.py | 27 ++++++++++++++++++++++++++- numpy/tests/test_public_api.py | 2 +- 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index 33951b92ade9..8a877ae69854 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -4,7 +4,7 @@ from numpy.core import arange from numpy.testing import assert_, assert_equal, assert_raises_regex -from numpy.lib import deprecate +from numpy.lib import deprecate, deprecate_with_doc import numpy.lib.utils as utils from io import StringIO @@ -60,6 +60,11 @@ def old_func6(self, x): new_func6 = deprecate(old_func6) +@deprecate_with_doc(msg="Rather use new_func7") +def old_func7(self,x): + return x + + def test_deprecate_decorator(): assert_('deprecated' in old_func.__doc__) @@ -73,6 +78,10 @@ def test_deprecate_fn(): assert_('new_func3' in new_func3.__doc__) +def test_deprecate_with_doc_decorator_message(): + assert_('Rather use new_func7' in old_func7.__doc__) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") @pytest.mark.parametrize('old_func, new_func', [ (old_func4, new_func4), diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 5447608bf5bb..f7e176cf3230 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -193,7 +193,32 @@ def deprecate(*args, **kwargs): else: return _Deprecate(*args, **kwargs) -deprecate_with_doc = lambda msg: _Deprecate(message=msg) + +def deprecate_with_doc(msg): + """ + Deprecates a function and includes the deprecation in its docstring. + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's + docstring and returns the new function object. + + See Also + -------- + deprecate : Decorate a function such that it issues a `DeprecationWarning` + + Parameters + ---------- + msg : str + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + obj : object + + """ + return _Deprecate(message=msg) #-------------------------------------------- diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 1382e1c4b52a..7b2a590c3872 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -40,7 +40,7 @@ def test_numpy_namespace(): 'byte_bounds': 'numpy.lib.utils.byte_bounds', 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays', 'deprecate': 'numpy.lib.utils.deprecate', - 'deprecate_with_doc': 'numpy.lib.utils.', + 'deprecate_with_doc': 'numpy.lib.utils.deprecate_with_doc', 'disp': 'numpy.lib.function_base.disp', 'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose', 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap', From 1c0ea7369d908f6c740e6f5fe95e0534938d98d3 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 18 Dec 2020 11:42:10 +0800 Subject: [PATCH 0223/1270] add missing opt 3 flag. --- numpy/core/src/multiarray/einsum_sumprod.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index 5c45e5ec7890..a3d2b127f938 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -95,7 +95,7 @@ */ #if !@complex@ -static @temptype@ @name@_sum_of_arr(@type@ **data, npy_intp count) +static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ **data, npy_intp count) { @temptype@ accum = 0; #if @NPYV_CHK@ // NPYV check for @type@ From 087cb0015f3c474709720c07cd2fa0a3a4770988 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 18 Dec 2020 14:06:11 -0700 Subject: [PATCH 0224/1270] DOC, MAINT: Minor fixes to refguide_check.py documentation. [travis skip] --- tools/refguide_check.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 138e0ece7ed5..f0f6461b77ea 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -19,11 +19,12 @@ Another use of this helper script is to check validity of code samples in docstrings:: - $ python refguide_check.py --doctests ma + $ python tools/refguide_check.py --doctests ma or in RST-based documentations:: - $ python refguide_check.py --rst docs + $ python tools/refguide_check.py --rst doc/source + """ import copy import doctest From ab585e6f8b047a9ca54578eb93e241917227e985 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 18 Dec 2020 12:39:11 +0100 Subject: [PATCH 0225/1270] ENH: Added annotations for `np.core.arrayprint` --- numpy/__init__.pyi | 21 +++--- numpy/core/arrayprint.pyi | 148 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 160 insertions(+), 9 deletions(-) create mode 100644 numpy/core/arrayprint.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index cf9b3e86ab3d..3165a6319265 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -174,6 +174,18 @@ from numpy.core._ufunc_config import ( _ErrDictOptional, ) +from numpy.core.arrayprint import ( + set_printoptions as set_printoptions, + get_printoptions as get_printoptions, + array2string as array2string, + format_float_scientific as format_float_scientific, + format_float_positional as format_float_positional, + array_repr as array_repr, + array_str as array_str, + set_string_function as set_string_function, + printoptions as printoptions, +) + from numpy.core.numeric import ( zeros_like as zeros_like, ones as ones, @@ -281,10 +293,7 @@ append: Any apply_along_axis: Any apply_over_axes: Any arange: Any -array2string: Any -array_repr: Any array_split: Any -array_str: Any asarray_chkfinite: Any asfarray: Any asmatrix: Any @@ -357,8 +366,6 @@ fliplr: Any flipud: Any float128: Any float_: Any -format_float_positional: Any -format_float_scientific: Any format_parser: Any frombuffer: Any fromfile: Any @@ -368,7 +375,6 @@ fromregex: Any fromstring: Any genfromtxt: Any get_include: Any -get_printoptions: Any geterrobj: Any gradient: Any half: Any @@ -469,7 +475,6 @@ polyint: Any polymul: Any polysub: Any polyval: Any -printoptions: Any product: Any promote_types: Any put_along_axis: Any @@ -495,8 +500,6 @@ savetxt: Any savez: Any savez_compressed: Any select: Any -set_printoptions: Any -set_string_function: Any setdiff1d: Any seterrobj: Any setxor1d: Any diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi new file mode 100644 index 000000000000..0dd1ec0575a2 --- /dev/null +++ b/numpy/core/arrayprint.pyi @@ -0,0 +1,148 @@ +import sys +from types import TracebackType +from typing import Any, Optional, Callable, Union, Type + +# Using a private class is by no means ideal, but it is simply a consquence +# of a `contextlib.context` returning an instance of aformentioned class +from contextlib import _GeneratorContextManager + +from numpy import ( + ndarray, + generic, + bool_, + integer, + timedelta64, + datetime64, + floating, + complexfloating, + void, + str_, + bytes_, + longdouble, + clongdouble, +) +from numpy.typing import ArrayLike, _CharLike, _FloatLike + +if sys.version_info > (3, 8): + from typing import Literal, TypedDict +else: + from typing_extensions import Literal, TypedDict + +class _FormatDict(TypedDict, total=False): + bool: Callable[[bool_], str] + int: Callable[[integer[Any]], str] + timedelta: Callable[[timedelta64], str] + datetime: Callable[[datetime64], str] + float: Callable[[floating[Any]], str] + longfloat: Callable[[longdouble], str] + complexfloat: Callable[[complexfloating[Any, Any]], str] + longcomplexfloat: Callable[[clongdouble], str] + void: Callable[[void], str] + numpystr: Callable[[_CharLike], str] + object: Callable[[object], str] + str: Callable[[Any], str] # Unused but still present? + all: Callable[[object], str] + int_kind: Callable[[integer[Any]], str] + float_kind: Callable[[floating[Any]], str] + complex_kind: Callable[[complexfloating[Any, Any]], str] + str_kind: Callable[[_CharLike], str] + +class _FormatOptions(TypedDict): + precision: int + threshold: int + edgeitems: int + linewidth: int + suppress: bool + nanstr: str + infstr: str + formatter: Optional[_FormatDict] + sign: Literal["-", "+", " "] + floatmode: Literal["fixed", "unique", "maxprec", "maxprec_equal"] + legacy: Literal[False, "1.13"] + +def set_printoptions( + precision: Optional[int] = ..., + threshold: Optional[int] = ..., + edgeitems: Optional[int] = ..., + linewidth: Optional[int] = ..., + suppress: Optional[bool] = ..., + nanstr: Optional[str] = ..., + infstr: Optional[str] = ..., + formatter: Optional[_FormatDict] = ..., + sign: Optional[Literal["-", "+", " "]] = ..., + floatmode: Optional[ + Literal["fixed", "unique", "maxprec", "maxprec_equal"] + ] = ..., + *, + legacy: Optional[Literal[False, "1.13"]] = ... +) -> None: ... +def get_printoptions() -> _FormatOptions: ... +def array2string( + a: ndarray[Any, Any], + max_line_width: Optional[int] = ..., + precision: Optional[int] = ..., + suppress_small: Optional[bool] = ..., + separator: str = ..., + prefix: str = ..., + # NOTE With the `style` argument being deprecated, + # all arguments between `formatter` and `suffix` are de facto + # keyworld-only arguments + *, + formatter: Optional[_FormatDict] = ..., + threshold: Optional[int] = ..., + edgeitems: Optional[int] = ..., + sign: Optional[Literal["-", "+", " "]] = ..., + floatmode: Optional[Literal["fixed", "unique", "maxprec", "maxprec_equal"]] = ..., + suffix: str = ..., + legacy: Optional[Literal[False, "1.13"]] = ..., +) -> str: ... +def format_float_scientific( + x: _FloatLike, + precision: Optional[int] = ..., + unique: bool = ..., + trim: Literal["k", ".", "0", "-"] = ..., + sign: bool = ..., + pad_left: Optional[int] = ..., + exp_digits: Optional[int] = ..., +) -> str: ... +def format_float_positional( + x: _FloatLike, + precision: Optional[int] = ..., + unique: bool = ..., + fractional: bool = ..., + trim: Literal["k", ".", "0", "-"] = ..., + sign: bool = ..., + pad_left: Optional[int] = ..., + pad_right: Optional[int] = ..., +) -> str: ... +def array_repr( + arr: ndarray[Any, Any], + max_line_width: Optional[int] = ..., + precision: Optional[int] = ..., + suppress_small: Optional[bool] = ..., +) -> str: ... +def array_str( + a: ndarray[Any, Any], + max_line_width: Optional[int] = ..., + precision: Optional[int] = ..., + suppress_small: Optional[bool] = ..., +) -> str: ... +def set_string_function( + f: Optional[Callable[[ndarray[Any, Any]], str]], repr: bool = ... +) -> None: ... +def printoptions( + precision: Optional[int] = ..., + threshold: Optional[int] = ..., + edgeitems: Optional[int] = ..., + linewidth: Optional[int] = ..., + suppress: Optional[bool] = ..., + nanstr: Optional[str] = ..., + infstr: Optional[str] = ..., + formatter: Optional[_FormatDict] = ..., + sign: Optional[Literal["-", "+", " "]] = ..., + floatmode: Optional[ + Literal["fixed", "unique", "maxprec", "maxprec_equal"] + ] = ..., + *, + legacy: Optional[Literal[False, "1.13"]] = ... +) -> _GeneratorContextManager[_FormatOptions]: ... From 76336f1cddc0f62d4ad952785877cd2f4c413911 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 18 Dec 2020 12:39:11 +0100 Subject: [PATCH 0226/1270] DOC: Clarify that `np.array2string` can only take an `ndarray` --- numpy/core/arrayprint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index ad15304194a8..51bad18ded07 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -524,7 +524,7 @@ def array2string(a, max_line_width=None, precision=None, Parameters ---------- - a : array_like + a : ndarray Input array. max_line_width : int, optional Inserts newlines if text is longer than `max_line_width`. From 2717731451db52351f3dbf4814a1858a668744f0 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 19 Dec 2020 03:53:12 +0100 Subject: [PATCH 0227/1270] TST: Added typing tests for `np.core.arrayprint` --- numpy/typing/tests/data/fail/arrayprint.py | 13 +++++++ numpy/typing/tests/data/pass/arrayprint.py | 37 ++++++++++++++++++++ numpy/typing/tests/data/reveal/arrayprint.py | 19 ++++++++++ 3 files changed, 69 insertions(+) create mode 100644 numpy/typing/tests/data/fail/arrayprint.py create mode 100644 numpy/typing/tests/data/pass/arrayprint.py create mode 100644 numpy/typing/tests/data/reveal/arrayprint.py diff --git a/numpy/typing/tests/data/fail/arrayprint.py b/numpy/typing/tests/data/fail/arrayprint.py new file mode 100644 index 000000000000..86297a0b24a4 --- /dev/null +++ b/numpy/typing/tests/data/fail/arrayprint.py @@ -0,0 +1,13 @@ +from typing import Callable, Any +import numpy as np + +AR: np.ndarray +func1: Callable[[Any], str] +func2: Callable[[np.integer[Any]], str] + +np.array2string(AR, style=None) # E: Unexpected keyword argument +np.array2string(AR, legacy="1.14") # E: incompatible type +np.array2string(AR, sign="*") # E: incompatible type +np.array2string(AR, floatmode="default") # E: incompatible type +np.array2string(AR, formatter={"A": func1}) # E: incompatible type +np.array2string(AR, formatter={"float": func2}) # E: Incompatible types diff --git a/numpy/typing/tests/data/pass/arrayprint.py b/numpy/typing/tests/data/pass/arrayprint.py new file mode 100644 index 000000000000..6c704c755570 --- /dev/null +++ b/numpy/typing/tests/data/pass/arrayprint.py @@ -0,0 +1,37 @@ +import numpy as np + +AR = np.arange(10) +AR.setflags(write=False) + +with np.printoptions(): + np.set_printoptions( + precision=1, + threshold=2, + edgeitems=3, + linewidth=4, + suppress=False, + nanstr="Bob", + infstr="Bill", + formatter={}, + sign="+", + floatmode="unique", + ) + np.get_printoptions() + str(AR) + + np.array2string( + AR, + max_line_width=5, + precision=2, + suppress_small=True, + separator=";", + prefix="test", + threshold=5, + floatmode="fixed", + suffix="?", + legacy="1.13", + ) + np.format_float_scientific(1, precision=5) + np.format_float_positional(1, trim="k") + np.array_repr(AR) + np.array_str(AR) diff --git a/numpy/typing/tests/data/reveal/arrayprint.py b/numpy/typing/tests/data/reveal/arrayprint.py new file mode 100644 index 000000000000..e797097ebb94 --- /dev/null +++ b/numpy/typing/tests/data/reveal/arrayprint.py @@ -0,0 +1,19 @@ +from typing import Any, Callable +import numpy as np + +AR: np.ndarray[Any, Any] +func_float: Callable[[np.floating[Any]], str] +func_int: Callable[[np.integer[Any]], str] + +reveal_type(np.get_printoptions()) # E: TypedDict +reveal_type(np.array2string( # E: str + AR, formatter={'float_kind': func_float, 'int_kind': func_int} +)) +reveal_type(np.format_float_scientific(1.0)) # E: str +reveal_type(np.format_float_positional(1)) # E: str +reveal_type(np.array_repr(AR)) # E: str +reveal_type(np.array_str(AR)) # E: str + +reveal_type(np.printoptions()) # E: contextlib._GeneratorContextManager +with np.printoptions() as dct: + reveal_type(dct) # E: TypedDict From 7dce0ed00dd406d03705257514e6fac9fa4e25bb Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Fri, 18 Dec 2020 17:34:41 -0800 Subject: [PATCH 0228/1270] DOC: Parameter name typo axes -> axis in numpy.fft._pocketfft. The parameter name seem to have a typo in both those case and reference axis (and not axes), this is likely due to copy-past as some other functions in this modules use axes (when several indices are required), but other also use `axis` and have the correct spelling. From review it also seem like previous phrasing is unclear so update all similar entries to reflect the new phrasing. --- numpy/fft/_pocketfft.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 2066b95eac2d..bf0e60b6de56 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -160,7 +160,7 @@ def fft(a, n=None, axis=-1, norm=None): Raises ------ IndexError - if `axes` is larger than the last axis of `a`. + If `axis` is not a valid axis of `a`. See Also -------- @@ -272,7 +272,7 @@ def ifft(a, n=None, axis=-1, norm=None): Raises ------ IndexError - If `axes` is larger than the last axis of `a`. + If `axis` is not a valid axis of `a`. See Also -------- @@ -358,7 +358,7 @@ def rfft(a, n=None, axis=-1, norm=None): Raises ------ IndexError - If `axis` is larger than the last axis of `a`. + If `axis` is not a valid axis of `a`. See Also -------- @@ -461,7 +461,7 @@ def irfft(a, n=None, axis=-1, norm=None): Raises ------ IndexError - If `axis` is larger than the last axis of `a`. + If `axis` is not a valid axis of `a`. See Also -------- @@ -556,7 +556,7 @@ def hfft(a, n=None, axis=-1, norm=None): Raises ------ IndexError - If `axis` is larger than the last axis of `a`. + If `axis` is not a valid axis of `a`. See also -------- From 09cbb7495b92a37ddfb5e7d1bc2e9964bc6a0609 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sun, 13 Dec 2020 17:08:40 -0800 Subject: [PATCH 0229/1270] DOC: Fix a couple of reference to verbatim and vice versa This update a coupe of references (single backticks) that actually are not to verbatim/code (double backticks); and a couple of verbatim to reference when they do actually exists and can be resolved in context. I probably missed other; and stayed simple but spoted a few other inconsistencies that I did not fix: - some ``...`` could actually be :math:`...` but not always clear if it would be better. - some intervals are [``...``], other are ``[...]`` I guess they could be discussed individually; it was mostly the failing references that bothered me. --- numpy/core/code_generators/ufunc_docstrings.py | 12 ++++++------ numpy/lib/polynomial.py | 4 ++-- numpy/lib/scimath.py | 8 ++++---- numpy/polynomial/polynomial.py | 6 +++--- numpy/testing/_private/utils.py | 2 +- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index b7edd2834b43..04181fbc2e92 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -185,7 +185,7 @@ def add_newdoc(place, name, doc): Notes ----- `arccos` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `cos(z) = x`. The convention is to return + many numbers `z` such that ``cos(z) = x``. The convention is to return the angle `z` whose real part lies in `[0, pi]`. For real-valued input data types, `arccos` always returns real output. @@ -193,7 +193,7 @@ def add_newdoc(place, name, doc): it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arccos` is a complex analytic function that - has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from + has branch cuts ``[-inf, -1]`` and `[1, inf]` and is continuous from above on the former and from below on the latter. The inverse `cos` is also known as `acos` or cos^-1. @@ -245,7 +245,7 @@ def add_newdoc(place, name, doc): ----- `arccosh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `cosh(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi, pi]` and the real part in + `z` whose imaginary part lies in ``[-pi, pi]`` and the real part in ``[0, inf]``. For real-valued input data types, `arccosh` always returns real output. @@ -406,7 +406,7 @@ def add_newdoc(place, name, doc): it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arctan` is a complex analytic function that - has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous + has [``1j, infj``] and [``-1j, -infj``] as branch cuts, and is continuous from the left on the former and from the right on the latter. The inverse tangent is also known as `atan` or tan^{-1}. @@ -544,7 +544,7 @@ def add_newdoc(place, name, doc): Notes ----- `arctanh` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `tanh(z) = x`. The convention is to return + many numbers `z` such that ``tanh(z) = x``. The convention is to return the `z` whose imaginary part lies in `[-pi/2, pi/2]`. For real-valued input data types, `arctanh` always returns real output. @@ -765,7 +765,7 @@ def add_newdoc(place, name, doc): Return the ceiling of the input, element-wise. The ceil of the scalar `x` is the smallest integer `i`, such that - `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`. + ``i >= x``. It is often denoted as :math:`\\lceil x \\rceil`. Parameters ---------- diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 0fd9bbd79c6a..ea966ffa343b 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -708,8 +708,8 @@ def polyval(p, x): ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` - If `x` is a sequence, then `p(x)` is returned for each element of `x`. - If `x` is another polynomial then the composite polynomial `p(x(t))` + If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. + If `x` is another polynomial then the composite polynomial ``p(x(t))`` is returned. Parameters diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index 2b0d38c37f2b..ed9ffd295b90 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -572,10 +572,10 @@ def arctanh(x): Compute the inverse hyperbolic tangent of `x`. Return the "principal value" (for a description of this, see - `numpy.arctanh`) of `arctanh(x)`. For real `x` such that - `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is + `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that + ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is complex, the result is complex. Finally, `x = 1` returns``inf`` and - `x=-1` returns ``-inf``. + ``x=-1`` returns ``-inf``. Parameters ---------- @@ -597,7 +597,7 @@ def arctanh(x): ----- For an arctanh() that returns ``NAN`` when real `x` is not in the interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does - return +/-inf for `x = +/-1`). + return +/-inf for ``x = +/-1``). Examples -------- diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 1baa7d870881..44784023bfd9 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -156,7 +156,7 @@ def polyfromroots(roots): .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - where the `r_n` are the roots specified in `roots`. If a zero has + where the ``r_n`` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear @@ -192,11 +192,11 @@ def polyfromroots(roots): Notes ----- The coefficients are determined by multiplying together linear factors - of the form `(x - r_i)`, i.e. + of the form ``(x - r_i)``, i.e. .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) - where ``n == len(roots) - 1``; note that this implies that `1` is always + where ``n == len(roots) - 1``; note that this implies that ``1`` is always returned for :math:`a_n`. Examples diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e974bbd0911a..b4d42728e3cd 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -481,7 +481,7 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): instead of this function for more consistent floating point comparisons. - The test verifies that the elements of ``actual`` and ``desired`` satisfy. + The test verifies that the elements of `actual` and `desired` satisfy. ``abs(desired-actual) < 1.5 * 10**(-decimal)`` From 82e5747dd40cbbde9c728452f21fa4716492c81d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 19 Dec 2020 15:41:38 +0100 Subject: [PATCH 0230/1270] MAINT: Remove any mention of `formatter["str"]` It's been non-functional for the past 8 years (xref https://github.com/numpy/numpy/pull/459) --- numpy/core/arrayprint.py | 12 ++++-------- numpy/core/arrayprint.pyi | 1 - 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 51bad18ded07..94ec8ed3446a 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -146,7 +146,6 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, - 'longcomplexfloat' : composed of two 128-bit floats - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - 'object' : `np.object_` arrays - - 'str' : all other strings Other keys that can be used to set a group of types at once are: @@ -154,7 +153,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, - 'int_kind' : sets 'int' - 'float_kind' : sets 'float' and 'longfloat' - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' + - 'str_kind' : sets 'numpystr' floatmode : str, optional Controls the interpretation of the `precision` option for floating-point types. Can take the following values @@ -375,8 +374,7 @@ def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy, 'timedelta': lambda: TimedeltaFormat(data), 'object': lambda: _object_format, 'void': lambda: str_format, - 'numpystr': lambda: repr_format, - 'str': lambda: str} + 'numpystr': lambda: repr_format} # we need to wrap values in `formatter` in a lambda, so that the interface # is the same as the above values. @@ -398,8 +396,7 @@ def indirect(x): for key in ['complexfloat', 'longcomplexfloat']: formatdict[key] = indirect(formatter['complex_kind']) if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = indirect(formatter['str_kind']) + formatdict['numpystr'] = indirect(formatter['str_kind']) for key in formatdict.keys(): if key in fkeys: formatdict[key] = indirect(formatter[key]) @@ -572,7 +569,6 @@ def array2string(a, max_line_width=None, precision=None, - 'longcomplexfloat' : composed of two 128-bit floats - 'void' : type `numpy.void` - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings Other keys that can be used to set a group of types at once are: @@ -580,7 +576,7 @@ def array2string(a, max_line_width=None, precision=None, - 'int_kind' : sets 'int' - 'float_kind' : sets 'float' and 'longfloat' - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' + - 'str_kind' : sets 'numpystr' threshold : int, optional Total number of array elements which trigger summarization rather than full repr. diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi index 0dd1ec0575a2..840c8d8dabea 100644 --- a/numpy/core/arrayprint.pyi +++ b/numpy/core/arrayprint.pyi @@ -40,7 +40,6 @@ class _FormatDict(TypedDict, total=False): void: Callable[[void], str] numpystr: Callable[[_CharLike], str] object: Callable[[object], str] - str: Callable[[Any], str] # Unused but still present? all: Callable[[object], str] int_kind: Callable[[integer[Any]], str] float_kind: Callable[[floating[Any]], str] From 05fa6e4bc219fb4db0808ef5cf9aed96c8ec0844 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 19 Dec 2020 15:43:34 +0100 Subject: [PATCH 0231/1270] MAINT: Added a type-alias for the `floatmode` annotations Co-Authored-By: Eric Wieser <425260+eric-wieser@users.noreply.github.com> --- numpy/core/arrayprint.pyi | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi index 840c8d8dabea..6aaae0320173 100644 --- a/numpy/core/arrayprint.pyi +++ b/numpy/core/arrayprint.pyi @@ -28,6 +28,8 @@ if sys.version_info > (3, 8): else: from typing_extensions import Literal, TypedDict +_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] + class _FormatDict(TypedDict, total=False): bool: Callable[[bool_], str] int: Callable[[integer[Any]], str] @@ -56,7 +58,7 @@ class _FormatOptions(TypedDict): infstr: str formatter: Optional[_FormatDict] sign: Literal["-", "+", " "] - floatmode: Literal["fixed", "unique", "maxprec", "maxprec_equal"] + floatmode: _FloatMode legacy: Literal[False, "1.13"] def set_printoptions( @@ -69,9 +71,7 @@ def set_printoptions( infstr: Optional[str] = ..., formatter: Optional[_FormatDict] = ..., sign: Optional[Literal["-", "+", " "]] = ..., - floatmode: Optional[ - Literal["fixed", "unique", "maxprec", "maxprec_equal"] - ] = ..., + floatmode: Optional[_FloatMode] = ..., *, legacy: Optional[Literal[False, "1.13"]] = ... ) -> None: ... @@ -83,7 +83,7 @@ def array2string( suppress_small: Optional[bool] = ..., separator: str = ..., prefix: str = ..., - # NOTE With the `style` argument being deprecated, + # NOTE: With the `style` argument being deprecated, # all arguments between `formatter` and `suffix` are de facto # keyworld-only arguments *, @@ -91,7 +91,7 @@ def array2string( threshold: Optional[int] = ..., edgeitems: Optional[int] = ..., sign: Optional[Literal["-", "+", " "]] = ..., - floatmode: Optional[Literal["fixed", "unique", "maxprec", "maxprec_equal"]] = ..., + floatmode: Optional[_FloatMode] = ..., suffix: str = ..., legacy: Optional[Literal[False, "1.13"]] = ..., ) -> str: ... @@ -139,9 +139,7 @@ def printoptions( infstr: Optional[str] = ..., formatter: Optional[_FormatDict] = ..., sign: Optional[Literal["-", "+", " "]] = ..., - floatmode: Optional[ - Literal["fixed", "unique", "maxprec", "maxprec_equal"] - ] = ..., + floatmode: Optional[_FloatMode] = ..., *, legacy: Optional[Literal[False, "1.13"]] = ... ) -> _GeneratorContextManager[_FormatOptions]: ... From dc3fe03617103814d56941b74f4ebbf9b33cf3b2 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Sat, 19 Dec 2020 07:53:09 -0800 Subject: [PATCH 0232/1270] BUG: make a variable volatile to work around clang compiler bug (#18030) * BUG: make a variable volatile to work around clang compiler bug * Adding comments for relevance * TST: Adding test to check for no overflow warnings in log Fixes #18005 --- numpy/core/src/umath/simd.inc.src | 12 ++++++++++-- numpy/core/tests/test_umath.py | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index a118fb0d091e..c2104810f556 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -1549,7 +1549,11 @@ fma_get_exponent(__m256 x) __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ); __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ); - __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); + /* + * It is necessary for temp1 to be volatile, a bug in clang optimizes it out which leads + * to an overflow warning in some cases. See https://github.com/numpy/numpy/issues/18005 + */ + volatile __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); __m256 temp = _mm256_mul_ps(temp1, two_power_100); x = _mm256_blendv_ps(x, temp, denormal_mask); @@ -1576,7 +1580,11 @@ fma_get_mantissa(__m256 x) __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ); __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ); - __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); + /* + * It is necessary for temp1 to be volatile, a bug in clang optimizes it out which leads + * to an overflow warning in some cases. See https://github.com/numpy/numpy/issues/18005 + */ + volatile __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); __m256 temp = _mm256_mul_ps(temp1, two_power_100); x = _mm256_blendv_ps(x, temp, denormal_mask); diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 8162e52bd628..bc72aa862a73 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -926,6 +926,11 @@ def test_log_values(self): assert_raises(FloatingPointError, np.log, np.float32(-np.inf)) assert_raises(FloatingPointError, np.log, np.float32(-1.0)) + # See https://github.com/numpy/numpy/issues/18005 + with assert_no_warnings(): + a = np.array(1e9, dtype='float32') + np.log(a) + def test_sincos_values(self): with np.errstate(all='ignore'): x = [np.nan, np.nan, np.nan, np.nan] From bfbe56caeb056456662309b33510f6b5531d3973 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Dec 2020 09:35:05 -0700 Subject: [PATCH 0233/1270] MAINT: Fix typo SeepSequence --- doc/source/reference/random/bit_generators/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst index 9822dec359b7..6f8cf02ca49d 100644 --- a/doc/source/reference/random/bit_generators/index.rst +++ b/doc/source/reference/random/bit_generators/index.rst @@ -140,7 +140,7 @@ by a tuple of elements. Here we use a base entropy value and an integer .. end_block Note that the sequences produced by the latter method will be distinct from -those constructed via `~SeepSequence.spawn`. +those constructed via `~SeedSequence.spawn`. .. autosummary:: From 81bb563172c84eb1eba92b05fa99d902d28af13b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Dec 2020 13:07:40 -0700 Subject: [PATCH 0234/1270] MAINT: Small style fixes. --- numpy/core/code_generators/generate_umath.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index cdd2a84941fa..4e9a2cfec3ef 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -46,7 +46,7 @@ class TypeDescription: If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y instead of PyUFunc_x_x/PyUFunc_xx_x. cfunc_alias : str or none, optional - appended to inner loop C function name, e.g. FLOAT_{cfunc_alias} (see make_arrays) + Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. See make_arrays. NOTE: it doesn't support 'astype' simd: list Available SIMD ufunc loops, dispatched at runtime in specified order From c5539458a06e79af6b3a96134de39336f44aa1a8 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 15 Dec 2020 06:08:16 +0000 Subject: [PATCH 0235/1270] BLD, BUG: Fix detecting aarch64 on macOS Co-authored-by: h-vetinari --- .../reference/simd/simd-optimizations.py | 8 +- numpy/distutils/ccompiler_opt.py | 144 +++++++++--------- numpy/distutils/tests/test_ccompiler_opt.py | 2 +- .../tests/test_ccompiler_opt_conf.py | 4 +- 4 files changed, 77 insertions(+), 81 deletions(-) diff --git a/doc/source/reference/simd/simd-optimizations.py b/doc/source/reference/simd/simd-optimizations.py index 5d6da50e3aff..a78302db5e89 100644 --- a/doc/source/reference/simd/simd-optimizations.py +++ b/doc/source/reference/simd/simd-optimizations.py @@ -8,7 +8,7 @@ from numpy.distutils.ccompiler_opt import CCompilerOpt class FakeCCompilerOpt(CCompilerOpt): - fake_info = "" + fake_info = ("arch", "compiler", "extra_args") # disable caching no need for it conf_nocache = True def __init__(self, *args, **kwargs): @@ -101,7 +101,7 @@ def features_table_sections(name, ftable=None, gtable=None, tab_size=4): return content def features_table(arch, cc="gcc", pretty_name=None, **kwargs): - FakeCCompilerOpt.fake_info = arch + cc + FakeCCompilerOpt.fake_info = (arch, cc, '') ccopt = FakeCCompilerOpt(cpu_baseline="max") features = ccopt.cpu_baseline_names() ftable = ccopt.gen_features_table(features, **kwargs) @@ -112,12 +112,12 @@ def features_table(arch, cc="gcc", pretty_name=None, **kwargs): return features_table_sections(pretty_name, ftable, gtable, **kwargs) def features_table_diff(arch, cc, cc_vs="gcc", pretty_name=None, **kwargs): - FakeCCompilerOpt.fake_info = arch + cc + FakeCCompilerOpt.fake_info = (arch, cc, '') ccopt = FakeCCompilerOpt(cpu_baseline="max") fnames = ccopt.cpu_baseline_names() features = {f:ccopt.feature_implies(f) for f in fnames} - FakeCCompilerOpt.fake_info = arch + cc_vs + FakeCCompilerOpt.fake_info = (arch, cc_vs, '') ccopt_vs = FakeCCompilerOpt(cpu_baseline="max") fnames_vs = ccopt_vs.cpu_baseline_names() features_vs = {f:ccopt_vs.feature_implies(f) for f in fnames_vs} diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 20dbb5c00ddb..0e0219609810 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -579,45 +579,41 @@ def dist_test(self, source, flags): return test def dist_info(self): - """Return a string containing all environment information, required - by the abstract class '_CCompiler' to discovering the platform - environment, also used as a cache factor in order to detect - any changes from outside. + """ + Return a tuple containing info about (platform, compiler, extra_args), + required by the abstract class '_CCompiler' for discovering the + platform environment. This is also used as a cache factor in order + to detect any changes happening from outside. """ if hasattr(self, "_dist_info"): return self._dist_info - # play it safe - cc_info = "" - compiler = getattr(self._ccompiler, "compiler", None) - if compiler is not None: - if isinstance(compiler, str): - cc_info += compiler - elif hasattr(compiler, "__iter__"): - cc_info += ' '.join(compiler) - # in case if 'compiler' attribute doesn't provide anything - cc_type = getattr(self._ccompiler, "compiler_type", "") - if cc_type in ("intelem", "intelemw", "mingw64"): - cc_info += "x86_64" + + cc_type = getattr(self._ccompiler, "compiler_type", '') + if cc_type in ("intelem", "intelemw"): + platform = "x86_64" elif cc_type in ("intel", "intelw", "intele"): - cc_info += "x86" - elif cc_type in ("msvc", "mingw32"): - import platform - if platform.architecture()[0] == "32bit": - cc_info += "x86" + platform = "x86" + else: + from distutils.util import get_platform + platform = get_platform() + + cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) + if not cc_type or cc_type == "unix": + if hasattr(cc_info, "__iter__"): + compiler = cc_info[0] else: - cc_info += "x86_64" + compiler = str(cc_info) else: - # the last hope, too bad for cross-compiling - import platform - cc_info += platform.machine() + compiler = cc_type - cc_info += cc_type - cflags = os.environ.get("CFLAGS", "") - if cflags not in cc_info: - cc_info += cflags + if hasattr(cc_info, "__iter__") and len(cc_info) > 1: + extra_args = ' '.join(cc_info[1:]) + else: + extra_args = os.environ.get("CFLAGS", "") + extra_args += os.environ.get("CPPFLAGS", "") - self._dist_info = cc_info - return cc_info + self._dist_info = (platform, compiler, extra_args) + return self._dist_info @staticmethod def dist_error(*args): @@ -893,57 +889,56 @@ class _CCompiler(object): def __init__(self): if hasattr(self, "cc_is_cached"): return - to_detect = ( - # attr regex - ( - ("cc_on_x64", "^(x|x86_|amd)64"), - ("cc_on_x86", "^(x86|i386|i686)"), - ("cc_on_ppc64le", "^(powerpc|ppc)64(el|le)"), - ("cc_on_ppc64", "^(powerpc|ppc)64"), - ("cc_on_armhf", "^arm"), - ("cc_on_aarch64", "^aarch64"), - # priority is given to first of string - # if it fail we search in the rest, due - # to append platform.machine() at the end, - # check method 'dist_info()' for more clarification. - ("cc_on_x64", ".*(x|x86_|amd)64.*"), - ("cc_on_x86", ".*(x86|i386|i686).*"), - ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*"), - ("cc_on_ppc64", ".*(powerpc|ppc)64.*"), - ("cc_on_armhf", ".*arm.*"), - ("cc_on_aarch64", ".*aarch64.*"), - # undefined platform - ("cc_on_noarch", ""), - ), - ( - ("cc_is_gcc", r".*(gcc|gnu\-g).*"), - ("cc_is_clang", ".*clang.*"), - ("cc_is_iccw", ".*(intelw|intelemw|iccw).*"), # intel msvc like - ("cc_is_icc", ".*(intel|icc).*"), # intel unix like - ("cc_is_msvc", ".*msvc.*"), - ("cc_is_nocc", ""), - ), - (("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*"),), - (("cc_has_native", ".*(-march=native|-xHost|/QxHost).*"),), - # in case if the class run with -DNPY_DISABLE_OPTIMIZATION - (("cc_noopt", ".*DISABLE_OPT.*"),), + # attr regex + detect_arch = ( + ("cc_on_x64", ".*(x|x86_|amd)64.*"), + ("cc_on_x86", ".*(win32|x86|i386|i686).*"), + ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*"), + ("cc_on_ppc64", ".*(powerpc|ppc)64.*"), + ("cc_on_aarch64", ".*(aarch64|arm64).*"), + ("cc_on_armhf", ".*arm.*"), + # undefined platform + ("cc_on_noarch", ""), + ) + detect_compiler = ( + ("cc_is_gcc", r".*(gcc|gnu\-g).*"), + ("cc_is_clang", ".*clang.*"), + ("cc_is_iccw", ".*(intelw|intelemw|iccw).*"), # intel msvc like + ("cc_is_icc", ".*(intel|icc).*"), # intel unix like + ("cc_is_msvc", ".*msvc.*"), + # undefined compiler will be treat it as gcc + ("cc_is_nocc", ""), + ) + detect_args = ( + ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*"), + ("cc_has_native", ".*(-march=native|-xHost|/QxHost).*"), + # in case if the class run with -DNPY_DISABLE_OPTIMIZATION + ("cc_noopt", ".*DISABLE_OPT.*"), ) - for section in to_detect: - for attr, rgex in section: - setattr(self, attr, False) dist_info = self.dist_info() - for section in to_detect: + platform, compiler_info, extra_args = dist_info + # set False to all attrs + for section in (detect_arch, detect_compiler, detect_args): for attr, rgex in section: - if rgex and not re.match(rgex, dist_info, re.IGNORECASE): + setattr(self, attr, False) + + for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): + for attr, rgex in detect: + if rgex and not re.match(rgex, searchin, re.IGNORECASE): continue setattr(self, attr, True) break + for attr, rgex in detect_args: + if rgex and not re.match(rgex, extra_args, re.IGNORECASE): + continue + setattr(self, attr, True) + if self.cc_on_noarch: self.dist_log( - "unable to detect CPU arch via compiler info, " - "optimization is disabled \ninfo << %s >> " % dist_info, + "unable to detect CPU architecture which lead to disable the optimization. " + f"check dist_info:<<\n{dist_info}\n>>", stderr=True ) self.cc_noopt = True @@ -958,8 +953,9 @@ def __init__(self): but still has the same gcc optimization flags. """ self.dist_log( - "unable to detect compiler name via info <<\n%s\n>> " - "treating it as a gcc" % dist_info, + "unable to detect compiler type which leads to treating it as GCC. " + "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." + f"check dist_info:<<\n{dist_info}\n>>", stderr=True ) self.cc_is_gcc = True diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py index a789be1ea99b..287a683c8d63 100644 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ b/numpy/distutils/tests/test_ccompiler_opt.py @@ -82,7 +82,7 @@ def setup(self): self._opt = None def nopt(self, *args, **kwargs): - FakeCCompilerOpt.fake_info = self.arch + '_' + self.cc + FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") return FakeCCompilerOpt(*args, **kwargs) def opt(self): diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py index 244748e5889a..09c1fad40c54 100644 --- a/numpy/distutils/tests/test_ccompiler_opt_conf.py +++ b/numpy/distutils/tests/test_ccompiler_opt_conf.py @@ -19,7 +19,7 @@ ) class FakeCCompilerOpt(CCompilerOpt): - fake_info = "" + fake_info = ("arch", "compiler", "extra_args") def __init__(self, *args, **kwargs): CCompilerOpt.__init__(self, None, **kwargs) def dist_compile(self, sources, flags, **kwargs): @@ -169,7 +169,7 @@ def setup(self): def test_features(self): for arch, compilers in arch_compilers.items(): for cc in compilers: - FakeCCompilerOpt.fake_info = arch + cc + FakeCCompilerOpt.fake_info = (arch, cc, "") _TestConfFeatures() if is_standalone: From 3944f409884b50c3cb0cf101b54e622e6125a850 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sun, 20 Dec 2020 02:05:29 +0000 Subject: [PATCH 0236/1270] BLD, MAINT: add platform info to the final optimization report --- numpy/distutils/ccompiler_opt.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 0e0219609810..f695ebb9267a 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -2300,19 +2300,25 @@ def generate_dispatch_header(self, header_path): def report(self, full=False): report = [] + platform_rows = [] baseline_rows = [] dispatch_rows = [] + report.append(("Platform", platform_rows)) + report.append(("", "")) report.append(("CPU baseline", baseline_rows)) report.append(("", "")) report.append(("CPU dispatch", dispatch_rows)) + ########## platform ########## + platform_rows.append(("Architecture", ( + "unsupported" if self.cc_on_noarch else self.cc_march) + )) + platform_rows.append(("Compiler", ( + "unix-like" if self.cc_is_nocc else self.cc_name) + )) ########## baseline ########## if self.cc_noopt: - baseline_rows.append(( - "Requested", "optimization disabled %s" % ( - "(unsupported arch)" if self.cc_on_noarch else "" - ) - )) + baseline_rows.append(("Requested", "optimization disabled")) else: baseline_rows.append(("Requested", repr(self._requested_baseline))) @@ -2333,11 +2339,7 @@ def report(self, full=False): ########## dispatch ########## if self.cc_noopt: - dispatch_rows.append(( - "Requested", "optimization disabled %s" % ( - "(unsupported arch)" if self.cc_on_noarch else "" - ) - )) + baseline_rows.append(("Requested", "optimization disabled")) else: dispatch_rows.append(("Requested", repr(self._requested_dispatch))) From f5579746f07a4f40d8b9391336fac0ee79659d22 Mon Sep 17 00:00:00 2001 From: Ryan Polley Date: Sun, 20 Dec 2020 20:45:36 -0600 Subject: [PATCH 0237/1270] DEC: deprecate the decorators in np.testing.dec Since the decorators in np.testing.dec are intended for nose test framework support, deprecate them by updating the docstring and adding a warning when they are used on functions --- numpy/testing/_private/decorators.py | 41 ++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py index 4c87d1a4919c..a18b3aa2ec72 100644 --- a/numpy/testing/_private/decorators.py +++ b/numpy/testing/_private/decorators.py @@ -14,6 +14,7 @@ """ import collections.abc +import warnings from .utils import SkipTest, assert_warns, HAS_REFCOUNT @@ -23,6 +24,10 @@ def slow(t): """ + .. deprecated:: 1.20 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + Label a test as 'slow'. The exact definition of a slow test is obviously both subjective and @@ -52,12 +57,18 @@ def test_big(self): print('Big, slow test') """ + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) t.slow = True return t def setastest(tf=True): """ + .. deprecated:: 1.20 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + Signals to nose that this function is or is not a test. Parameters @@ -84,6 +95,8 @@ def func_with_test_in_name(arg1, arg2): pass """ + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) def set_test(t): t.__test__ = tf return t @@ -91,6 +104,10 @@ def set_test(t): def skipif(skip_condition, msg=None): """ + .. deprecated:: 1.20 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + Make function raise SkipTest exception if a given condition is true. If the condition is a callable, it is used at runtime to dynamically @@ -123,6 +140,9 @@ def skip_decorator(f): # import time overhead at actual test-time. import nose + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + # Allow for both boolean or callable skip conditions. if isinstance(skip_condition, collections.abc.Callable): skip_val = lambda: skip_condition() @@ -167,6 +187,10 @@ def skipper_gen(*args, **kwargs): def knownfailureif(fail_condition, msg=None): """ + .. deprecated:: 1.20 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + Make function raise KnownFailureException exception if given condition is true. If the condition is a callable, it is used at runtime to dynamically @@ -195,6 +219,9 @@ def knownfailureif(fail_condition, msg=None): function in order to transmit function name, and various other metadata. """ + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + if msg is None: msg = 'Test skipped due to known failure' @@ -221,6 +248,10 @@ def knownfailer(*args, **kwargs): def deprecated(conditional=True): """ + .. deprecated:: 1.20 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + Filter deprecation warnings while running the test suite. This decorator can be used to filter DeprecationWarning's, to avoid @@ -249,6 +280,9 @@ def deprecate_decorator(f): # import time overhead at actual test-time. import nose + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + def _deprecated_imp(*args, **kwargs): # Poor man's replacement for the with statement with assert_warns(DeprecationWarning): @@ -267,6 +301,10 @@ def _deprecated_imp(*args, **kwargs): def parametrize(vars, input): """ + .. deprecated:: 1.20 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + Pytest compatibility class. This implements the simplest level of pytest.mark.parametrize for use in nose as an aid in making the transition to pytest. It achieves that by adding a dummy var parameter and ignoring @@ -279,6 +317,9 @@ def parametrize(vars, input): """ from .parameterized import parameterized + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + return parameterized(input) _needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") From 7fb6a0bf6064c5a06440ea188a281e2bb8c49e61 Mon Sep 17 00:00:00 2001 From: Ryan Polley Date: Sun, 20 Dec 2020 21:06:04 -0600 Subject: [PATCH 0238/1270] DEC: update deprecation version to 1.21 and add date comments --- numpy/testing/_private/decorators.py | 30 +++++++++++++++++----------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py index a18b3aa2ec72..cb49d9a73473 100644 --- a/numpy/testing/_private/decorators.py +++ b/numpy/testing/_private/decorators.py @@ -24,7 +24,7 @@ def slow(t): """ - .. deprecated:: 1.20 + .. deprecated:: 1.21 This decorator is retained for compatibility with the nose testing framework, which is being phased out. Please use the nose2 or pytest frameworks instead. @@ -57,15 +57,16 @@ def test_big(self): print('Big, slow test') """ + # Numpy 1.21, 2020-12-20 warnings.warn('the np.testing.dec decorators are included for nose support, and are ' - 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) t.slow = True return t def setastest(tf=True): """ - .. deprecated:: 1.20 + .. deprecated:: 1.21 This decorator is retained for compatibility with the nose testing framework, which is being phased out. Please use the nose2 or pytest frameworks instead. @@ -95,8 +96,9 @@ def func_with_test_in_name(arg1, arg2): pass """ + # Numpy 1.21, 2020-12-20 warnings.warn('the np.testing.dec decorators are included for nose support, and are ' - 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) def set_test(t): t.__test__ = tf return t @@ -104,7 +106,7 @@ def set_test(t): def skipif(skip_condition, msg=None): """ - .. deprecated:: 1.20 + .. deprecated:: 1.21 This decorator is retained for compatibility with the nose testing framework, which is being phased out. Please use the nose2 or pytest frameworks instead. @@ -140,8 +142,9 @@ def skip_decorator(f): # import time overhead at actual test-time. import nose + # Numpy 1.21, 2020-12-20 warnings.warn('the np.testing.dec decorators are included for nose support, and are ' - 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) # Allow for both boolean or callable skip conditions. if isinstance(skip_condition, collections.abc.Callable): @@ -187,7 +190,7 @@ def skipper_gen(*args, **kwargs): def knownfailureif(fail_condition, msg=None): """ - .. deprecated:: 1.20 + .. deprecated:: 1.21 This decorator is retained for compatibility with the nose testing framework, which is being phased out. Please use the nose2 or pytest frameworks instead. @@ -219,8 +222,9 @@ def knownfailureif(fail_condition, msg=None): function in order to transmit function name, and various other metadata. """ + # Numpy 1.21, 2020-12-20 warnings.warn('the np.testing.dec decorators are included for nose support, and are ' - 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) if msg is None: msg = 'Test skipped due to known failure' @@ -248,7 +252,7 @@ def knownfailer(*args, **kwargs): def deprecated(conditional=True): """ - .. deprecated:: 1.20 + .. deprecated:: 1.21 This decorator is retained for compatibility with the nose testing framework, which is being phased out. Please use the nose2 or pytest frameworks instead. @@ -280,8 +284,9 @@ def deprecate_decorator(f): # import time overhead at actual test-time. import nose + # Numpy 1.21, 2020-12-20 warnings.warn('the np.testing.dec decorators are included for nose support, and are ' - 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) def _deprecated_imp(*args, **kwargs): # Poor man's replacement for the with statement @@ -301,7 +306,7 @@ def _deprecated_imp(*args, **kwargs): def parametrize(vars, input): """ - .. deprecated:: 1.20 + .. deprecated:: 1.21 This decorator is retained for compatibility with the nose testing framework, which is being phased out. Please use the nose2 or pytest frameworks instead. @@ -317,8 +322,9 @@ def parametrize(vars, input): """ from .parameterized import parameterized + # Numpy 1.21, 2020-12-20 warnings.warn('the np.testing.dec decorators are included for nose support, and are ' - 'deprecated since NumPy v1.20. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) return parameterized(input) From b05294c169438d4e230374f0e9677682d84c0094 Mon Sep 17 00:00:00 2001 From: Ryan Polley Date: Sun, 20 Dec 2020 23:25:19 -0600 Subject: [PATCH 0239/1270] TST: move np.testing.dec tests to test_deprecations.py --- numpy/core/tests/test_deprecations.py | 214 ++++++++++++++++++++++++- numpy/testing/tests/test_decorators.py | 210 ------------------------ 2 files changed, 213 insertions(+), 211 deletions(-) delete mode 100644 numpy/testing/tests/test_decorators.py diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index a67fe62c3d9b..f6f3a3d42df0 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -13,7 +13,7 @@ import numpy as np from numpy.testing import ( - assert_raises, assert_warns, assert_, assert_array_equal + assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, KnownFailureException ) from numpy.core._multiarray_tests import fromstring_null_term_c_api @@ -785,3 +785,215 @@ class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase): def test_deprecated(self): ctor = np.core.multiarray.scalar self.assert_deprecated(lambda: ctor(np.dtype("O"), 1)) + +try: + with warnings.catch_warnings(): + warnings.simplefilter("always") + import nose # noqa: F401 +except ImportError: + HAVE_NOSE = False +else: + HAVE_NOSE = True + + +@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose") +class TestNoseDecoratorsDeprecated(_DeprecationTestCase): + class DidntSkipException(Exception): + pass + + def test_slow(self): + def _test_slow(): + @np.testing.dec.slow + def slow_func(x, y, z): + pass + + assert_(slow_func.slow) + self.assert_deprecated(_test_slow) + + def test_setastest(self): + def _test_setastest(): + @np.testing.dec.setastest() + def f_default(a): + pass + + @np.testing.dec.setastest(True) + def f_istest(a): + pass + + @np.testing.dec.setastest(False) + def f_isnottest(a): + pass + + assert_(f_default.__test__) + assert_(f_istest.__test__) + assert_(not f_isnottest.__test__) + self.assert_deprecated(_test_setastest, num=3) + + def test_skip_functions_hardcoded(self): + def _test_skip_functions_hardcoded(): + @np.testing.dec.skipif(True) + def f1(x): + raise self.DidntSkipException + + try: + f1('a') + except self.DidntSkipException: + raise Exception('Failed to skip') + except SkipTest().__class__: + pass + + @np.testing.dec.skipif(False) + def f2(x): + raise self.DidntSkipException + + try: + f2('a') + except self.DidntSkipException: + pass + except SkipTest().__class__: + raise Exception('Skipped when not expected to') + self.assert_deprecated(_test_skip_functions_hardcoded, num=2) + + def test_skip_functions_callable(self): + def _test_skip_functions_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @np.testing.dec.skipif(skip_tester) + def f1(x): + raise self.DidntSkipException + + try: + skip_flag = 'skip me!' + f1('a') + except self.DidntSkipException: + raise Exception('Failed to skip') + except SkipTest().__class__: + pass + + @np.testing.dec.skipif(skip_tester) + def f2(x): + raise self.DidntSkipException + + try: + skip_flag = 'five is right out!' + f2('a') + except self.DidntSkipException: + pass + except SkipTest().__class__: + raise Exception('Skipped when not expected to') + self.assert_deprecated(_test_skip_functions_callable, num=2) + + def test_skip_generators_hardcoded(self): + def _test_skip_generators_hardcoded(): + @np.testing.dec.knownfailureif(True, "This test is known to fail") + def g1(x): + yield from range(x) + + try: + for j in g1(10): + pass + except KnownFailureException().__class__: + pass + else: + raise Exception('Failed to mark as known failure') + + @np.testing.dec.knownfailureif(False, "This test is NOT known to fail") + def g2(x): + yield from range(x) + raise self.DidntSkipException('FAIL') + + try: + for j in g2(10): + pass + except KnownFailureException().__class__: + raise Exception('Marked incorrectly as known failure') + except self.DidntSkipException: + pass + self.assert_deprecated(_test_skip_generators_hardcoded, num=2) + + def test_skip_generators_callable(self): + def _test_skip_generators_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @np.testing.dec.knownfailureif(skip_tester, "This test is known to fail") + def g1(x): + yield from range(x) + + try: + skip_flag = 'skip me!' + for j in g1(10): + pass + except KnownFailureException().__class__: + pass + else: + raise Exception('Failed to mark as known failure') + + @np.testing.dec.knownfailureif(skip_tester, "This test is NOT known to fail") + def g2(x): + yield from range(x) + raise self.DidntSkipException('FAIL') + + try: + skip_flag = 'do not skip' + for j in g2(10): + pass + except KnownFailureException().__class__: + raise Exception('Marked incorrectly as known failure') + except self.DidntSkipException: + pass + self.assert_deprecated(_test_skip_generators_callable, num=2) + + def test_deprecated(self): + def _test_deprecated(): + @np.testing.dec.deprecated(True) + def non_deprecated_func(): + pass + + @np.testing.dec.deprecated() + def deprecated_func(): + import warnings + warnings.warn("TEST: deprecated func", DeprecationWarning) + + @np.testing.dec.deprecated() + def deprecated_func2(): + import warnings + warnings.warn("AHHHH") + raise ValueError + + @np.testing.dec.deprecated() + def deprecated_func3(): + import warnings + warnings.warn("AHHHH") + + # marked as deprecated, but does not raise DeprecationWarning + assert_raises(AssertionError, non_deprecated_func) + # should be silent + deprecated_func() + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") # do not propagate unrelated warnings + # fails if deprecated decorator just disables test. See #1453. + assert_raises(ValueError, deprecated_func2) + # warning is not a DeprecationWarning + assert_raises(AssertionError, deprecated_func3) + self.assert_deprecated(_test_deprecated, num=4) + + def test_parametrize(self): + def _test_parametrize(): + # dec.parametrize assumes that it is being run by nose. Because + # we are running under pytest, we need to explicitly check the + # results. + @np.testing.dec.parametrize('base, power, expected', + [(1, 1, 1), + (2, 1, 2), + (2, 2, 4)]) + def check_parametrize(base, power, expected): + assert_(base**power == expected) + + count = 0 + for test in check_parametrize(): + test[0](*test[1:]) + count += 1 + assert_(count == 3) + self.assert_deprecated(_test_parametrize) diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py deleted file mode 100644 index b60d6dfbce90..000000000000 --- a/numpy/testing/tests/test_decorators.py +++ /dev/null @@ -1,210 +0,0 @@ -""" -Test the decorators from ``testing.decorators``. - -""" -import warnings -import pytest - -from numpy.testing import ( - assert_, assert_raises, dec, SkipTest, KnownFailureException, - ) - - -try: - with warnings.catch_warnings(): - warnings.simplefilter("always") - import nose # noqa: F401 -except ImportError: - HAVE_NOSE = False -else: - HAVE_NOSE = True - - -@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose") -class TestNoseDecorators: - # These tests are run in a class for simplicity while still - # getting a report on each, skipped or success. - - class DidntSkipException(Exception): - pass - - def test_slow(self): - @dec.slow - def slow_func(x, y, z): - pass - - assert_(slow_func.slow) - - def test_setastest(self): - @dec.setastest() - def f_default(a): - pass - - @dec.setastest(True) - def f_istest(a): - pass - - @dec.setastest(False) - def f_isnottest(a): - pass - - assert_(f_default.__test__) - assert_(f_istest.__test__) - assert_(not f_isnottest.__test__) - - def test_skip_functions_hardcoded(self): - @dec.skipif(True) - def f1(x): - raise self.DidntSkipException - - try: - f1('a') - except self.DidntSkipException: - raise Exception('Failed to skip') - except SkipTest().__class__: - pass - - @dec.skipif(False) - def f2(x): - raise self.DidntSkipException - - try: - f2('a') - except self.DidntSkipException: - pass - except SkipTest().__class__: - raise Exception('Skipped when not expected to') - - def test_skip_functions_callable(self): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.skipif(skip_tester) - def f1(x): - raise self.DidntSkipException - - try: - skip_flag = 'skip me!' - f1('a') - except self.DidntSkipException: - raise Exception('Failed to skip') - except SkipTest().__class__: - pass - - @dec.skipif(skip_tester) - def f2(x): - raise self.DidntSkipException - - try: - skip_flag = 'five is right out!' - f2('a') - except self.DidntSkipException: - pass - except SkipTest().__class__: - raise Exception('Skipped when not expected to') - - def test_skip_generators_hardcoded(self): - @dec.knownfailureif(True, "This test is known to fail") - def g1(x): - yield from range(x) - - try: - for j in g1(10): - pass - except KnownFailureException().__class__: - pass - else: - raise Exception('Failed to mark as known failure') - - @dec.knownfailureif(False, "This test is NOT known to fail") - def g2(x): - yield from range(x) - raise self.DidntSkipException('FAIL') - - try: - for j in g2(10): - pass - except KnownFailureException().__class__: - raise Exception('Marked incorrectly as known failure') - except self.DidntSkipException: - pass - - def test_skip_generators_callable(self): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.knownfailureif(skip_tester, "This test is known to fail") - def g1(x): - yield from range(x) - - try: - skip_flag = 'skip me!' - for j in g1(10): - pass - except KnownFailureException().__class__: - pass - else: - raise Exception('Failed to mark as known failure') - - @dec.knownfailureif(skip_tester, "This test is NOT known to fail") - def g2(x): - yield from range(x) - raise self.DidntSkipException('FAIL') - - try: - skip_flag = 'do not skip' - for j in g2(10): - pass - except KnownFailureException().__class__: - raise Exception('Marked incorrectly as known failure') - except self.DidntSkipException: - pass - - def test_deprecated(self): - @dec.deprecated(True) - def non_deprecated_func(): - pass - - @dec.deprecated() - def deprecated_func(): - import warnings - warnings.warn("TEST: deprecated func", DeprecationWarning) - - @dec.deprecated() - def deprecated_func2(): - import warnings - warnings.warn("AHHHH") - raise ValueError - - @dec.deprecated() - def deprecated_func3(): - import warnings - warnings.warn("AHHHH") - - # marked as deprecated, but does not raise DeprecationWarning - assert_raises(AssertionError, non_deprecated_func) - # should be silent - deprecated_func() - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") # do not propagate unrelated warnings - # fails if deprecated decorator just disables test. See #1453. - assert_raises(ValueError, deprecated_func2) - # warning is not a DeprecationWarning - assert_raises(AssertionError, deprecated_func3) - - def test_parametrize(self): - # dec.parametrize assumes that it is being run by nose. Because - # we are running under pytest, we need to explicitly check the - # results. - @dec.parametrize('base, power, expected', - [(1, 1, 1), - (2, 1, 2), - (2, 2, 4)]) - def check_parametrize(base, power, expected): - assert_(base**power == expected) - - count = 0 - for test in check_parametrize(): - test[0](*test[1:]) - count += 1 - assert_(count == 3) From 36d0f17184628377ffa4bfac98a0346d6d10a7da Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 21 Dec 2020 07:30:09 +0000 Subject: [PATCH 0240/1270] MAINT: Bump pytest from 6.2.0 to 6.2.1 Bumps [pytest](https://github.com/pytest-dev/pytest) from 6.2.0 to 6.2.1. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/6.2.0...6.2.1) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index e3fc9ccc6764..2a4f64ca5a71 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -2,7 +2,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 hypothesis==5.43.3 -pytest==6.2.0 +pytest==6.2.1 pytz==2020.4 pytest-cov==2.10.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' From 0e7ca712891b07341fcd6a01b9834807fd12e49c Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 21 Dec 2020 12:21:16 +0200 Subject: [PATCH 0241/1270] TST: add back sdist test run --- .github/workflows/build_test.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index ee43561bfc9b..ce0c492e6f98 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -199,3 +199,18 @@ jobs: - uses: ./.github/actions + sdist: + needs: smoke_test + runs-on: ubuntu-latest + env: + USE_SDIST: 1 + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - uses: actions/setup-python@v2 + with: + python-version: ${{ env.PYTHON_VERSION }} + - uses: ./.github/actions + From ac8d17df37d199f9ce664c1e5c7207491b9a6e25 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 21 Dec 2020 13:02:59 +0200 Subject: [PATCH 0242/1270] BLD,DOC: pin sphinx to 3.3.1 --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 0650e0a58d3b..0e47198b3a8c 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx>=3.2 +sphinx==3.3.1 numpydoc==1.1.0 ipython scipy From bf1eb1b2965adc75d87e28621b5490dc45768ff0 Mon Sep 17 00:00:00 2001 From: Faris A Chugthai <20028782+farisachugthai@users.noreply.github.com> Date: Mon, 21 Dec 2020 10:01:32 -0500 Subject: [PATCH 0243/1270] DOC: Update TESTS.rst.txt Use python3 style print statements in examples. --- doc/TESTS.rst.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index d1af7017bc03..0cb24684fc7f 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -145,7 +145,7 @@ can label it with ``pytest.mark.slow``:: @pytest.mark.slow def test_big(self): - print 'Big, slow test' + print('Big, slow test') Similarly for methods:: @@ -162,21 +162,21 @@ name; thus:: def setup(): """Module-level setup""" - print 'doing setup' + print('doing setup') def teardown(): """Module-level teardown""" - print 'doing teardown' + print('doing teardown') class TestMe: def setup(): """Class-level setup""" - print 'doing setup' + print('doing setup') def teardown(): """Class-level teardown""" - print 'doing teardown' + print('doing teardown') Setup and teardown functions to functions and methods are known as "fixtures", From 864848ebc58d4eb3a5e79ce1ce3c0f07bf77e8f3 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 26 Nov 2020 17:15:26 +0100 Subject: [PATCH 0244/1270] MAINT: Add aliases for commonly used `ArrayLike` objects --- numpy/typing/__init__.py | 18 +++++++- numpy/typing/_array_like.py | 85 ++++++++++++++++++++++++++++++++++--- 2 files changed, 96 insertions(+), 7 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index d9d9557bf465..aec2d460af21 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -219,9 +219,25 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _ScalarLike, _VoidLike, ) -from ._array_like import _SupportsArray, ArrayLike from ._shape import _Shape, _ShapeLike from ._dtype_like import _SupportsDType, _VoidDTypeLike, DTypeLike +from ._array_like import ( + ArrayLike, + _ArrayLike, + _NestedSequence, + _SupportsArray, + _ArrayLikeBool, + _ArrayLikeUInt, + _ArrayLikeInt, + _ArrayLikeFloat, + _ArrayLikeComplex, + _ArrayLikeTD64, + _ArrayLikeDT64, + _ArrayLikeObject, + _ArrayLikeVoid, + _ArrayLikeStr, + _ArrayLikeBytes, +) if __doc__ is not None: from ._add_docstring import _docstrings diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 63b67b33ab22..d6473442c37b 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -3,8 +3,22 @@ import sys from typing import Any, overload, Sequence, TYPE_CHECKING, Union, TypeVar -from numpy import ndarray, dtype -from ._scalars import _ScalarLike +from numpy import ( + ndarray, + dtype, + generic, + bool_, + unsignedinteger, + integer, + floating, + complexfloating, + timedelta64, + datetime64, + object_, + void, + str_, + bytes_, +) from ._dtype_like import DTypeLike if sys.version_info >= (3, 8): @@ -18,6 +32,7 @@ else: HAVE_PROTOCOL = True +_T = TypeVar("_T") _DType = TypeVar("_DType", bound="dtype[Any]") if TYPE_CHECKING or HAVE_PROTOCOL: @@ -30,6 +45,24 @@ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... else: _SupportsArray = Any +# TODO: Wait for support for recursive types +_NestedSequence = Union[ + _T, + Sequence[_T], + Sequence[Sequence[_T]], + Sequence[Sequence[Sequence[_T]]], + Sequence[Sequence[Sequence[Sequence[_T]]]], +] +_RecursiveSequence = Sequence[Sequence[Sequence[Sequence[Sequence[Any]]]]] + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_ArrayLike = Union[ + _NestedSequence[_SupportsArray[_DType]], + _NestedSequence[_T], +] + # TODO: support buffer protocols once # # https://bugs.python.org/issue27501 @@ -38,8 +71,48 @@ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... # # https://github.com/python/typing/issues/593 ArrayLike = Union[ - _ScalarLike, - Sequence[_ScalarLike], - Sequence[Sequence[Any]], # TODO: Wait for support for recursive types - "_SupportsArray[Any]", + _RecursiveSequence, + _ArrayLike[ + "dtype[Any]", + Union[bool, int, float, complex, str, bytes] + ], +] + +# `ArrayLike`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool = _ArrayLike[ + "dtype[bool_]", + bool, +] +_ArrayLikeUInt = _ArrayLike[ + "dtype[Union[bool_, unsignedinteger[Any]]]", + bool, +] +_ArrayLikeInt = _ArrayLike[ + "dtype[Union[bool_, integer[Any]]]", + Union[bool, int], +] +_ArrayLikeFloat = _ArrayLike[ + "dtype[Union[bool_, integer[Any], floating[Any]]]", + Union[bool, int, float], +] +_ArrayLikeComplex = _ArrayLike[ + "dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]", + Union[bool, int, float, complex], +] +_ArrayLikeTD64 = _ArrayLike[ + "dtype[Union[bool_, integer[Any], timedelta64]]", + Union[bool, int], +] +_ArrayLikeDT64 = _NestedSequence[_SupportsArray["dtype[datetime64]"]] +_ArrayLikeObject = _NestedSequence[_SupportsArray["dtype[object_]"]] + +_ArrayLikeVoid = _NestedSequence[_SupportsArray["dtype[void]"]] +_ArrayLikeStr = _ArrayLike[ + "dtype[str_]", + str, +] +_ArrayLikeBytes = _ArrayLike[ + "dtype[bytes_]", + bytes, ] From 50dce51e9fcbf5d80bfdc1bfe2b41fc9a9e9f9cc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 21 Dec 2020 16:46:25 -0600 Subject: [PATCH 0245/1270] BUG: Fix concatenation when the output is "S" or "U" Previously, the dtype was used, this now assumes that we want to cast to a string of (unknown) length. This is a simplified version of what happens in `np.array()` or `arr.astype()` (it does never inspect the values, e.g. for object casts). This is more complex as I would like, and with the refactor of ResultType and similar can be cleaned up a bit more hopefully. Note that currently, object to "S" or "U" casts simply return length 64 strings, but with the new version, this will be an error (although the error message probably needs improvement). This is a behaviour inherited from other places however. --- numpy/core/src/multiarray/convert_datatype.c | 67 ++++++++++++++++++++ numpy/core/src/multiarray/convert_datatype.h | 4 ++ numpy/core/src/multiarray/multiarraymodule.c | 57 ++++++----------- numpy/core/tests/test_shape_base.py | 28 +++++++- 4 files changed, 116 insertions(+), 40 deletions(-) diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index f9dd35a73e18..5d5b69bd5c5b 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -871,6 +871,73 @@ PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType) } +/* + * Helper to find the target descriptor for multiple arrays given an input + * one that may be a DType class (e.g. "U" or "S"). + * Works with arrays, since that is what `concatenate` works with. However, + * unlike `np.array(...)` or `arr.astype()` we will never inspect the array's + * content, which means that object arrays can only be cast to strings if a + * fixed width is provided (same for string -> generic datetime). + * + * As this function uses `PyArray_ExtractDTypeAndDescriptor`, it should + * eventually be refactored to move the step to an earlier point. + */ +NPY_NO_EXPORT PyArray_Descr * +PyArray_FindConcatenationDescriptor( + npy_intp n, PyArrayObject **arrays, PyObject *requested_dtype) +{ + if (requested_dtype == NULL) { + return PyArray_ResultType(n, arrays, 0, NULL); + } + + PyArray_DTypeMeta *common_dtype; + PyArray_Descr *result = NULL; + if (PyArray_ExtractDTypeAndDescriptor( + requested_dtype, &result, &common_dtype) < 0) { + return NULL; + } + if (result != NULL) { + if (result->subarray != NULL) { + PyErr_Format(PyExc_TypeError, + "The dtype `%R` is not a valid dtype for concatenation " + "since it is a subarray dtype (the subarray dimensions " + "would be added as array dimensions).", result); + Py_DECREF(result); + return NULL; + } + goto finish; + } + assert(n > 0); /* concatenate requires at least one array input. */ + PyArray_Descr *descr = PyArray_DESCR(arrays[0]); + result = PyArray_CastDescrToDType(descr, common_dtype); + if (result == NULL || n == 1) { + goto finish; + } + /* + * This could short-cut a bit, calling `common_instance` directly and/or + * returning the `default_descr()` directly. Avoiding that (for now) as + * it would duplicate code from `PyArray_PromoteTypes`. + */ + for (npy_intp i = 1; i < n; i++) { + descr = PyArray_DESCR(arrays[i]); + PyArray_Descr *curr = PyArray_CastDescrToDType(descr, common_dtype); + if (curr == NULL) { + Py_SETREF(result, NULL); + goto finish; + } + Py_SETREF(result, PyArray_PromoteTypes(result, curr)); + Py_DECREF(curr); + if (result == NULL) { + goto finish; + } + } + + finish: + Py_DECREF(common_dtype); + return result; +} + + /** * This function defines the common DType operator. * diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h index cc1930f77db2..97006b952543 100644 --- a/numpy/core/src/multiarray/convert_datatype.h +++ b/numpy/core/src/multiarray/convert_datatype.h @@ -49,6 +49,10 @@ npy_set_invalid_cast_error( NPY_NO_EXPORT PyArray_Descr * PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType); +NPY_NO_EXPORT PyArray_Descr * +PyArray_FindConcatenationDescriptor( + npy_intp n, PyArrayObject **arrays, PyObject *requested_dtype); + NPY_NO_EXPORT int PyArray_AddCastingImplmentation(PyBoundArrayMethodObject *meth); diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 32c5ac0dc20c..c2c139798a18 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -448,17 +448,10 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, /* Get the priority subtype for the array */ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays); - - if (dtype == NULL) { - /* Get the resulting dtype from combining all the arrays */ - dtype = (PyArray_Descr *)PyArray_ResultType( - narrays, arrays, 0, NULL); - if (dtype == NULL) { - return NULL; - } - } - else { - Py_INCREF(dtype); + PyArray_Descr *descr = PyArray_FindConcatenationDescriptor( + narrays, arrays, (PyObject *)dtype); + if (descr == NULL) { + return NULL; } /* @@ -467,7 +460,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, * resolution rules matching that of the NpyIter. */ PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm); - s = dtype->elsize; + s = descr->elsize; for (idim = ndim-1; idim >= 0; --idim) { int iperm = strideperm[idim]; strides[iperm] = s; @@ -475,17 +468,13 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, } /* Allocate the array for the result. This steals the 'dtype' reference. */ - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, - dtype, - ndim, - shape, - strides, - NULL, - 0, - NULL); + ret = (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, descr, ndim, shape, strides, NULL, 0, NULL, + NULL, 0, 1); if (ret == NULL) { return NULL; } + assert(PyArray_DESCR(ret) == descr); } /* @@ -575,32 +564,22 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, /* Get the priority subtype for the array */ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays); - if (dtype == NULL) { - /* Get the resulting dtype from combining all the arrays */ - dtype = (PyArray_Descr *)PyArray_ResultType( - narrays, arrays, 0, NULL); - if (dtype == NULL) { - return NULL; - } - } - else { - Py_INCREF(dtype); + PyArray_Descr *descr = PyArray_FindConcatenationDescriptor( + narrays, arrays, (PyObject *)dtype); + if (descr == NULL) { + return NULL; } - stride = dtype->elsize; + stride = descr->elsize; /* Allocate the array for the result. This steals the 'dtype' reference. */ - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, - dtype, - 1, - &shape, - &stride, - NULL, - 0, - NULL); + ret = (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, descr, 1, &shape, &stride, NULL, 0, NULL, + NULL, 0, 1); if (ret == NULL) { return NULL; } + assert(PyArray_DESCR(ret) == descr); } /* diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index 4e56ace90fb6..9922c91731f2 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -343,7 +343,7 @@ def test_bad_out_shape(self): concatenate((a, b), out=np.empty(4)) @pytest.mark.parametrize("axis", [None, 0]) - @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) + @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"]) @pytest.mark.parametrize("casting", ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) def test_out_and_dtype(self, axis, out_dtype, casting): @@ -369,6 +369,32 @@ def test_out_and_dtype(self, axis, out_dtype, casting): with assert_raises(TypeError): concatenate(to_concat, out=out, dtype=out_dtype, axis=axis) + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"]) + @pytest.mark.parametrize("arrs", + [([0.],), ([0.], [1]), ([0], ["string"], [1.])]) + def test_dtype_with_promotion(self, arrs, string_dt, axis): + # Note that U0 and S0 should be deprecated eventually and changed to + # actually give the empty string result (together with `np.array`) + res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe") + assert res.dtype == np.promote_types("d", string_dt) + + @pytest.mark.parametrize("axis", [None, 0]) + def test_string_dtype_does_not_inspect(self, axis): + # The error here currently depends on NPY_USE_NEW_CASTINGIMPL as + # the new version rejects using the "default string length" of 64. + # The new behaviour is better, `np.array()` and `arr.astype()` would + # have to be used instead. (currently only raises due to unsafe cast) + with pytest.raises((ValueError, TypeError)): + np.concatenate(([None], [1]), dtype="S", axis=axis) + with pytest.raises((ValueError, TypeError)): + np.concatenate(([None], [1]), dtype="U", axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + def test_subarray_error(self, axis): + with pytest.raises(TypeError, match=".*subarray dtype"): + np.concatenate(([1], [1]), dtype="(2,)i", axis=axis) + def test_stack(): # non-iterable input From c5daaf0683f360143b16bfe6e0ce83c50f5cb757 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 22 Dec 2020 09:44:08 +0800 Subject: [PATCH 0246/1270] remove one inner loops in order to save more cycles. --- numpy/core/src/multiarray/item_selection.c | 50 +++++++++++----------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 0bab1eff98b7..77fff5eb47c3 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2141,6 +2141,7 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_cou npy_intp lane_max = 0; npyv_u8 vsum8 = npyv_zero_u8(); while (*d < end && lane_max <= max_count - 1) { + // we count zeros because `cmpeq` cheaper than `cmpneq` for most archs npyv_u8 vt = npyv_cvt_u8_b8(npyv_cmpeq_u8(npyv_load_u8(*d), vzero)); vt = npyv_and_u8(vt, vone); vsum8 = npyv_add_u8(vsum8, vt); @@ -2150,46 +2151,39 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_cou return vsum8; } -static NPY_INLINE NPY_GCC_OPT_3 npyv_u16 +static NPY_INLINE NPY_GCC_OPT_3 npyv_u16x2 count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_count) { - npyv_u16 vsum16 = npyv_zero_u16(); + npyv_u16x2 vsum16; + vsum16.val[0] = vsum16.val[1] = npyv_zero_u16(); npy_intp lane_max = 0; - while (*d < end && lane_max <= max_count - 2*NPY_MAX_UINT8) { + while (*d < end && lane_max <= max_count - NPY_MAX_UINT8) { npyv_u8 vsum8 = count_zero_bytes_u8(d, end, NPY_MAX_UINT8); npyv_u16x2 part = npyv_expand_u16_u8(vsum8); - vsum16 = npyv_add_u16(vsum16, npyv_add_u16(part.val[0], part.val[1])); - lane_max += 2*NPY_MAX_UINT8; + vsum16.val[0] = npyv_add_u16(vsum16.val[0], part.val[0]); + vsum16.val[1] = npyv_add_u16(vsum16.val[1], part.val[1]); + lane_max += NPY_MAX_UINT8; } return vsum16; } -static NPY_INLINE NPY_GCC_OPT_3 npyv_u32 -count_zero_bytes_u32(const npy_uint8 **d, const npy_uint8 *end, npy_uint32 max_count) -{ - npyv_u32 vsum32 = npyv_zero_u32(); - npy_intp lane_max = 0; - while (*d < end && lane_max <= max_count - 2*NPY_MAX_UINT16) { - npyv_u16 vsum16 = count_zero_bytes_u16(d, end, NPY_MAX_UINT16); - npyv_u32x2 part = npyv_expand_u32_u16(vsum16); - vsum32 = npyv_add_u32(vsum32, npyv_add_u32(part.val[0], part.val[1])); - lane_max += 2*NPY_MAX_UINT16; - } - return vsum32; -} /* * Counts the number of non-zero values in a raw array. - * The one loop process is as follows(take SSE2 with 128bits vector for example): + * The one loop process is shown below(take SSE2 with 128bits vector for example): * |------------16 lanes---------| - * [vsum8] 255 255 255 ... 255 255 255 255 count_zero_bytes_u8: counting 255*16 elements + *[vsum8] 255 255 255 ... 255 255 255 255 count_zero_bytes_u8: counting 255*16 elements * !! * |------------8 lanes---------| - * [vsum16] 65535 65535 65535 ... 65535 count_zero_bytes_u16: counting 65535*8 elements + *[vsum16] 65535 65535 65535 ... 65535 count_zero_bytes_u16: counting (2*16-1)*16 elements + * 65535 65535 65535 ... 65535 * !! * |------------4 lanes---------| - * [vsum32] 1073741824 ... 1073741824 count_zero_bytes_u32(overflow control): counting 2**32-1 elements + *[sum_32_0] 65535 65535 65535 65535 count_nonzero_bytes + * 65535 65535 65535 65535 + *[sum_32_1] 65535 65535 65535 65535 + * 65535 65535 65535 65535 * !! - * 2**32-1 count_zero_bytes + * (2*16-1)*16 */ static NPY_INLINE NPY_GCC_OPT_3 npy_intp count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) @@ -2197,9 +2191,13 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) npy_intp zero_count = 0; const npy_uint8 *end = d + unrollx; while (d < end) { - // The npyv_nlanes_u32 factor ensures that the sum of all lanes still fits in a uint32 - npyv_u32 vsum32 = count_zero_bytes_u32(&d, end, NPY_MAX_UINT32 / npyv_nlanes_u32); - zero_count += npyv_sum_u32(vsum32); + npyv_u16x2 vsum16 = count_zero_bytes_u16(&d, end, NPY_MAX_UINT16); + npyv_u32x2 sum_32_0 = npyv_expand_u32_u16(vsum16.val[0]); + npyv_u32x2 sum_32_1 = npyv_expand_u32_u16(vsum16.val[1]); + zero_count += npyv_sum_u32(npyv_add_u32( + npyv_add_u32(sum_32_0.val[0], sum_32_0.val[1]), + npyv_add_u32(sum_32_1.val[0], sum_32_1.val[1]) + )); } return unrollx - zero_count; } From ab12dc87f4717fc0fdeca314cd32cd5924928595 Mon Sep 17 00:00:00 2001 From: daaawx Date: Tue, 22 Dec 2020 15:46:24 +0100 Subject: [PATCH 0247/1270] DOC: Update examples in stack docstrings (#18054) The numbers chosen in the stacking examples can make them difficult to interpret. I suggest using different numbers to make it unambiguous. --- numpy/core/shape_base.py | 42 ++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index e4dc30d4c86e..e90358ba5f53 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -258,20 +258,20 @@ def vstack(tup): Examples -------- >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) + >>> b = np.array([4, 5, 6]) >>> np.vstack((a,b)) array([[1, 2, 3], - [2, 3, 4]]) + [4, 5, 6]]) >>> a = np.array([[1], [2], [3]]) - >>> b = np.array([[2], [3], [4]]) + >>> b = np.array([[4], [5], [6]]) >>> np.vstack((a,b)) array([[1], [2], [3], - [2], - [3], - [4]]) + [4], + [5], + [6]]) """ if not overrides.ARRAY_FUNCTION_ENABLED: @@ -321,15 +321,15 @@ def hstack(tup): Examples -------- >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.hstack((a,b)) - array([1, 2, 3, 2, 3, 4]) + array([1, 2, 3, 4, 5, 6]) >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) + >>> b = np.array([[4],[5],[6]]) >>> np.hstack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1, 4], + [2, 5], + [3, 6]]) """ if not overrides.ARRAY_FUNCTION_ENABLED: @@ -403,15 +403,15 @@ def stack(arrays, axis=0, out=None): (3, 4, 10) >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) + >>> b = np.array([4, 5, 6]) >>> np.stack((a, b)) array([[1, 2, 3], - [2, 3, 4]]) + [4, 5, 6]]) >>> np.stack((a, b), axis=-1) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1, 4], + [2, 5], + [3, 6]]) """ if not overrides.ARRAY_FUNCTION_ENABLED: @@ -786,9 +786,9 @@ def block(arrays): array([1, 2, 3]) >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) + >>> b = np.array([4, 5, 6]) >>> np.block([a, b, 10]) # hstack([a, b, 10]) - array([ 1, 2, 3, 2, 3, 4, 10]) + array([ 1, 2, 3, 4, 5, 6, 10]) >>> A = np.ones((2, 2), int) >>> B = 2 * A @@ -799,10 +799,10 @@ def block(arrays): With a list of depth 2, `block` can be used in place of `vstack`: >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) + >>> b = np.array([4, 5, 6]) >>> np.block([[a], [b]]) # vstack([a, b]) array([[1, 2, 3], - [2, 3, 4]]) + [4, 5, 6]]) >>> A = np.ones((2, 2), int) >>> B = 2 * A From dd25816ccb9701af74895c95d3f0f2d31abbdf01 Mon Sep 17 00:00:00 2001 From: Ryan Polley Date: Tue, 22 Dec 2020 10:15:20 -0600 Subject: [PATCH 0248/1270] TST: add stacklevel parameter to test calls to warnings.warn For whatever reason the build system doesn't like the test calls to warnings.warn with no stacklevel parameter inside the tests that I moved. --- numpy/core/tests/test_deprecations.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index f6f3a3d42df0..25cd3d22949a 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -954,18 +954,18 @@ def non_deprecated_func(): @np.testing.dec.deprecated() def deprecated_func(): import warnings - warnings.warn("TEST: deprecated func", DeprecationWarning) + warnings.warn("TEST: deprecated func", DeprecationWarning, stacklevel=1) @np.testing.dec.deprecated() def deprecated_func2(): import warnings - warnings.warn("AHHHH") + warnings.warn("AHHHH", stacklevel=1) raise ValueError @np.testing.dec.deprecated() def deprecated_func3(): import warnings - warnings.warn("AHHHH") + warnings.warn("AHHHH", stacklevel=1) # marked as deprecated, but does not raise DeprecationWarning assert_raises(AssertionError, non_deprecated_func) From a5285349805c5e794700a3dfcb378b391c53b0c8 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 16 Nov 2020 19:08:49 +0100 Subject: [PATCH 0249/1270] ENH: Added a mypy plugin for handling platform-specific `np.number` precisions --- numpy/__init__.pyi | 85 +++++++++++++---------- numpy/typing/__init__.py | 34 +++++++++ numpy/typing/_nbit.py | 16 +++++ numpy/typing/mypy_plugin.py | 58 ++++++++++++++++ numpy/typing/tests/data/mypy.ini | 2 +- numpy/typing/tests/data/pass/scalars.py | 35 ++++++++++ numpy/typing/tests/data/reveal/scalars.py | 36 ++++++++++ 7 files changed, 229 insertions(+), 37 deletions(-) create mode 100644 numpy/typing/_nbit.py create mode 100644 numpy/typing/mypy_plugin.py diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3165a6319265..6df10d7a7568 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -21,10 +21,24 @@ from numpy.typing import ( _SupportsDType, _VoidDTypeLike, NBitBase, + _256Bit, + _128Bit, + _96Bit, + _80Bit, _64Bit, _32Bit, _16Bit, _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitInt, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, ) from numpy.typing._callable import ( _BoolOp, @@ -311,29 +325,22 @@ broadcast_to: Any busday_count: Any busday_offset: Any busdaycalendar: Any -byte: Any byte_bounds: Any bytes0: Any c_: Any can_cast: Any cast: Any -cdouble: Any -cfloat: Any chararray: Any -clongdouble: Any -clongfloat: Any column_stack: Any common_type: Any compare_chararrays: Any complex256: Any -complex_: Any concatenate: Any conj: Any copy: Any copyto: Any corrcoef: Any cov: Any -csingle: Any cumproduct: Any datetime_as_string: Any datetime_data: Any @@ -349,7 +356,6 @@ digitize: Any disp: Any divide: Any dot: Any -double: Any dsplit: Any dstack: Any ediff1d: Any @@ -365,7 +371,6 @@ flip: Any fliplr: Any flipud: Any float128: Any -float_: Any format_parser: Any frombuffer: Any fromfile: Any @@ -377,7 +382,6 @@ genfromtxt: Any get_include: Any geterrobj: Any gradient: Any -half: Any hamming: Any hanning: Any histogram: Any @@ -393,12 +397,8 @@ index_exp: Any info: Any inner: Any insert: Any -int0: Any -int_: Any -intc: Any interp: Any intersect1d: Any -intp: Any is_busday: Any iscomplex: Any iscomplexobj: Any @@ -415,10 +415,6 @@ lexsort: Any load: Any loads: Any loadtxt: Any -longcomplex: Any -longdouble: Any -longfloat: Any -longlong: Any lookfor: Any mafromtxt: Any mask_indices: Any @@ -504,11 +500,8 @@ setdiff1d: Any seterrobj: Any setxor1d: Any shares_memory: Any -short: Any show_config: Any sinc: Any -single: Any -singlecomplex: Any sort_complex: Any source: Any split: Any @@ -527,18 +520,11 @@ triu_indices_from: Any typeDict: Any typecodes: Any typename: Any -ubyte: Any -uint: Any -uint0: Any -uintc: Any -uintp: Any -ulonglong: Any union1d: Any unique: Any unpackbits: Any unravel_index: Any unwrap: Any -ushort: Any vander: Any vdot: Any vectorize: Any @@ -1853,6 +1839,14 @@ int16 = signedinteger[_16Bit] int32 = signedinteger[_32Bit] int64 = signedinteger[_64Bit] +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int0 = signedinteger[_NBitIntP] +int_ = signedinteger[_NBitInt] +longlong = signedinteger[_NBitLongLong] + class timedelta64(generic): def __init__( self, @@ -1917,6 +1911,14 @@ uint16 = unsignedinteger[_16Bit] uint32 = unsignedinteger[_32Bit] uint64 = unsignedinteger[_64Bit] +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint0 = unsignedinteger[_NBitIntP] +uint = unsignedinteger[_NBitInt] +ulonglong = unsignedinteger[_NBitLongLong] + class inexact(number[_NBit_co]): ... # type: ignore _IntType = TypeVar("_IntType", bound=integer) @@ -1945,6 +1947,13 @@ float16 = floating[_16Bit] float32 = floating[_32Bit] float64 = floating[_64Bit] +half = floating[_NBitHalf] +single = floating[_NBitSingle] +double = floating[_NBitDouble] +float_ = floating[_NBitDouble] +longdouble = floating[_NBitLongDouble] +longfloat = floating[_NBitLongDouble] + # The main reason for `complexfloating` having two typevars is cosmetic. # It is used to clarify why `complex128`s precision is `_64Bit`, the latter # describing the two 64 bit floats representing its real and imaginary component @@ -1972,6 +1981,15 @@ class complexfloating(inexact[_NBit_co], Generic[_NBit_co, _NBit_co2]): complex64 = complexfloating[_32Bit, _32Bit] complex128 = complexfloating[_64Bit, _64Bit] +csingle = complexfloating[_NBitSingle, _NBitSingle] +singlecomplex = complexfloating[_NBitSingle, _NBitSingle] +cdouble = complexfloating[_NBitDouble, _NBitDouble] +complex_ = complexfloating[_NBitDouble, _NBitDouble] +cfloat = complexfloating[_NBitDouble, _NBitDouble] +clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble] +clongfloat = complexfloating[_NBitLongDouble, _NBitLongDouble] +longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble] + class flexible(generic): ... # type: ignore class void(flexible): @@ -2007,15 +2025,10 @@ class str_(character, str): self, __value: bytes, encoding: str = ..., errors: str = ... ) -> None: ... -unicode_ = str0 = str_ +unicode_ = str_ +str0 = str_ -# TODO(alan): Platform dependent types -# longcomplex, longdouble, longfloat -# bytes, short, intc, intp, longlong -# half, single, double, longdouble -# uint_, int_, float_, complex_ -# float128, complex256 -# float96 +# TODO: Platform dependent types: float128, complex256, float96 def array( object: object, diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index d9d9557bf465..694046609054 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -19,6 +19,28 @@ .. _typing-extensions: https://pypi.org/project/typing-extensions/ +Mypy plugin +----------- + +A mypy_ plugin is available for automatically assigning the (platform-dependent) +precisions of certain `~numpy.number` subclasses, including the likes of +`~numpy.int_`, `~numpy.intp` and `~numpy.longlong`. See the documentation on +:ref:`scalar types ` for a comprehensive overview +of the affected classes. + +Note that while usage of the plugin is completely optional, without it the +precision of above-mentioned classes will be inferred as `~typing.Any`. + +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + +.. _mypy: http://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html + Differences from the runtime NumPy API -------------------------------------- @@ -207,6 +229,18 @@ class _8Bit(_16Bit): ... # type: ignore[misc] # Clean up the namespace del TYPE_CHECKING, final, List +from ._nbit import ( + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitInt, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, +) from ._scalars import ( _CharLike, _BoolLike, diff --git a/numpy/typing/_nbit.py b/numpy/typing/_nbit.py new file mode 100644 index 000000000000..b8d35db4f594 --- /dev/null +++ b/numpy/typing/_nbit.py @@ -0,0 +1,16 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import Any + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte = Any +_NBitShort = Any +_NBitIntC = Any +_NBitIntP = Any +_NBitInt = Any +_NBitLongLong = Any + +_NBitHalf = Any +_NBitSingle = Any +_NBitDouble = Any +_NBitLongDouble = Any diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py new file mode 100644 index 000000000000..023ab5fd18f3 --- /dev/null +++ b/numpy/typing/mypy_plugin.py @@ -0,0 +1,58 @@ +"""A module containing `numpy`-specific plugins for mypy.""" + +import typing as t + +import numpy as np + +import mypy.types +from mypy.types import Type +from mypy.plugin import Plugin, AnalyzeTypeContext + +HookFunc = t.Callable[[AnalyzeTypeContext], Type] + + +def _get_precision_dict() -> t.Dict[str, str]: + names = [ + ("_NBitByte", np.byte), + ("_NBitShort", np.short), + ("_NBitIntC", np.intc), + ("_NBitIntP", np.intp), + ("_NBitInt", np.int_), + ("_NBitLongLong", np.longlong), + + ("_NBitHalf", np.half), + ("_NBitSingle", np.single), + ("_NBitDouble", np.double), + ("_NBitLongDouble", np.longdouble), + ] + ret = {} + for name, typ in names: + n: int = 8 * typ().dtype.alignment + ret[f'numpy.typing._nbit.{name}'] = f"numpy._{n}Bit" + return ret + + +#: A dictionary mapping type-aliases in `numpy.typing._nbit` to +#: concrete `numpy.typing.NBitBase` subclasses. +_PRECISION_DICT = _get_precision_dict() + + +def _hook(ctx: AnalyzeTypeContext) -> Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"numpy.typing._nbit.{name}"] + return api.named_type(name_new) + + +class _NumpyPlugin(Plugin): + """A plugin for assigning platform-specific `numpy.number` precisions.""" + + def get_type_analyze_hook(self, fullname: str) -> t.Optional[HookFunc]: + if fullname in _PRECISION_DICT: + return _hook + return None + + +def plugin(version: str) -> t.Type[_NumpyPlugin]: + return _NumpyPlugin diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index 91d93588af15..35cfbec89ec2 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,5 +1,5 @@ [mypy] -mypy_path = ../../.. +plugins = numpy.typing.mypy_plugin [mypy-numpy] ignore_errors = True diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index b7f7880e4561..2f2643e8eb33 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -163,3 +163,38 @@ def __float__(self) -> float: c16.squeeze() c16.byteswap() c16.transpose() + +# Aliases +np.str0() + +np.byte() +np.short() +np.intc() +np.intp() +np.int0() +np.int_() +np.longlong() + +np.ubyte() +np.ushort() +np.uintc() +np.uintp() +np.uint0() +np.uint() +np.ulonglong() + +np.half() +np.single() +np.double() +np.float_() +np.longdouble() +np.longfloat() + +np.csingle() +np.singlecomplex() +np.cdouble() +np.complex_() +np.cfloat() +np.clongdouble() +np.clongfloat() +np.longcomplex() diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py index e887e302d4f4..841b1473d3a9 100644 --- a/numpy/typing/tests/data/reveal/scalars.py +++ b/numpy/typing/tests/data/reveal/scalars.py @@ -26,3 +26,39 @@ reveal_type(np.unicode_('foo')) # E: numpy.str_ reveal_type(np.str0('foo')) # E: numpy.str_ + +# Aliases +reveal_type(np.unicode_()) # E: numpy.str_ +reveal_type(np.str0()) # E: numpy.str_ + +reveal_type(np.byte()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.short()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.intc()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.intp()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.int0()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.int_()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.longlong()) # E: numpy.signedinteger[numpy.typing._ + +reveal_type(np.ubyte()) # E: numpy.unsignedinteger[numpy.typing._ +reveal_type(np.ushort()) # E: numpy.unsignedinteger[numpy.typing._ +reveal_type(np.uintc()) # E: numpy.unsignedinteger[numpy.typing._ +reveal_type(np.uintp()) # E: numpy.unsignedinteger[numpy.typing._ +reveal_type(np.uint0()) # E: numpy.unsignedinteger[numpy.typing._ +reveal_type(np.uint()) # E: numpy.unsignedinteger[numpy.typing._ +reveal_type(np.ulonglong()) # E: numpy.unsignedinteger[numpy.typing._ + +reveal_type(np.half()) # E: numpy.floating[numpy.typing._ +reveal_type(np.single()) # E: numpy.floating[numpy.typing._ +reveal_type(np.double()) # E: numpy.floating[numpy.typing._ +reveal_type(np.float_()) # E: numpy.floating[numpy.typing._ +reveal_type(np.longdouble()) # E: numpy.floating[numpy.typing._ +reveal_type(np.longfloat()) # E: numpy.floating[numpy.typing._ + +reveal_type(np.csingle()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.singlecomplex()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.cdouble()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.complex_()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.cfloat()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.clongdouble()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.clongfloat()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.longcomplex()) # E: numpy.complexfloating[numpy.typing._ From edacfda3da401c56f1eb32d58de08581e03f84d6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 16 Nov 2020 19:08:49 +0100 Subject: [PATCH 0250/1270] ENH: Centralize all `DTypeLike` string aliases in `numpy.typing` --- numpy/__init__.pyi | 372 ++++++++++++------------------------ numpy/typing/__init__.py | 41 ++++ numpy/typing/_char_codes.py | 115 +++++++++++ 3 files changed, 282 insertions(+), 246 deletions(-) create mode 100644 numpy/typing/_char_codes.py diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6df10d7a7568..514f4dc4a1cf 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -40,6 +40,47 @@ from numpy.typing import ( _NBitDouble, _NBitLongDouble, ) +from numpy.typing import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) from numpy.typing._callable import ( _BoolOp, _BoolBitOp, @@ -555,269 +596,108 @@ class dtype(Generic[_DTypeScalar]): # # so we have to make sure the overloads for the narrowest type is # first. + # Builtin types @overload - def __new__( - cls, - dtype: Union[ - Type[bool], - Literal[ - "?", - "=?", - "?", - "bool", - "bool_", - ], - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[bool_]: ... + def __new__(cls, dtype: Type[bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... @overload - def __new__( - cls, - dtype: Literal[ - "uint8", - "u1", - "=u1", - "u1", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[uint8]: ... + def __new__(cls, dtype: Type[int], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... @overload - def __new__( - cls, - dtype: Literal[ - "uint16", - "u2", - "=u2", - "u2", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[uint16]: ... + def __new__(cls, dtype: Optional[Type[float]], align: bool = ..., copy: bool = ...) -> dtype[float_]: ... @overload - def __new__( - cls, - dtype: Literal[ - "uint32", - "u4", - "=u4", - "u4", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[uint32]: ... + def __new__(cls, dtype: Type[complex], align: bool = ..., copy: bool = ...) -> dtype[complex_]: ... @overload - def __new__( - cls, - dtype: Literal[ - "uint64", - "u8", - "=u8", - "u8", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[uint64]: ... + def __new__(cls, dtype: Type[str], align: bool = ..., copy: bool = ...) -> dtype[str_]: ... @overload - def __new__( - cls, - dtype: Literal[ - "int8", - "i1", - "=i1", - "i1", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[int8]: ... + def __new__(cls, dtype: Type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... + + # `unsignedinteger` string-based representations @overload - def __new__( - cls, - dtype: Literal[ - "int16", - "i2", - "=i2", - "i2", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[int16]: ... + def __new__(cls, dtype: _UInt8Codes, align: bool = ..., copy: bool = ...) -> dtype[uint8]: ... @overload - def __new__( - cls, - dtype: Literal[ - "int32", - "i4", - "=i4", - "i4", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[int32]: ... + def __new__(cls, dtype: _UInt16Codes, align: bool = ..., copy: bool = ...) -> dtype[uint16]: ... @overload - def __new__( - cls, - dtype: Literal[ - "int64", - "i8", - "=i8", - "i8", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[int64]: ... - # "int"/int resolve to int_, which is system dependent and as of - # now untyped. Long-term we'll do something fancier here. + def __new__(cls, dtype: _UInt32Codes, align: bool = ..., copy: bool = ...) -> dtype[uint32]: ... @overload - def __new__( - cls, - dtype: Union[Type[int], Literal["int"]], - align: bool = ..., - copy: bool = ..., - ) -> dtype: ... + def __new__(cls, dtype: _UInt64Codes, align: bool = ..., copy: bool = ...) -> dtype[uint64]: ... @overload - def __new__( - cls, - dtype: Literal[ - "float16", - "f4", - "=f4", - "f4", - "e", - "=e", - "e", - "half", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[float16]: ... + def __new__(cls, dtype: _UByteCodes, align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ... @overload - def __new__( - cls, - dtype: Literal[ - "float32", - "f4", - "=f4", - "f4", - "f", - "=f", - "f", - "single", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[float32]: ... + def __new__(cls, dtype: _UShortCodes, align: bool = ..., copy: bool = ...) -> dtype[ushort]: ... @overload - def __new__( - cls, - dtype: Union[ - None, - Type[float], - Literal[ - "float64", - "f8", - "=f8", - "f8", - "d", - "d", - "float", - "double", - "float_", - ], - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[float64]: ... + def __new__(cls, dtype: _UIntCCodes, align: bool = ..., copy: bool = ...) -> dtype[uintc]: ... @overload - def __new__( - cls, - dtype: Literal[ - "complex64", - "c8", - "=c8", - "c8", - "F", - "=F", - "F", - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[complex64]: ... + def __new__(cls, dtype: _UIntPCodes, align: bool = ..., copy: bool = ...) -> dtype[uintp]: ... @overload - def __new__( - cls, - dtype: Union[ - Type[complex], - Literal[ - "complex128", - "c16", - "=c16", - "c16", - "D", - "=D", - "D", - ], - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[complex128]: ... + def __new__(cls, dtype: _UIntCodes, align: bool = ..., copy: bool = ...) -> dtype[uint]: ... @overload - def __new__( - cls, - dtype: Union[ - Type[bytes], - Literal[ - "S", - "=S", - "S", - "bytes", - "bytes_", - "bytes0", - ], - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[bytes_]: ... + def __new__(cls, dtype: _ULongLongCodes, align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ... + + # `signedinteger` string-based representations @overload - def __new__( - cls, - dtype: Union[ - Type[str], - Literal[ - "U", - "=U", - # U intentionally not included; they are not - # the same dtype and which one dtype("U") translates - # to is platform-dependent. - "str", - "str_", - "str0", - ], - ], - align: bool = ..., - copy: bool = ..., - ) -> dtype[str_]: ... + def __new__(cls, dtype: _Int8Codes, align: bool = ..., copy: bool = ...) -> dtype[int8]: ... + @overload + def __new__(cls, dtype: _Int16Codes, align: bool = ..., copy: bool = ...) -> dtype[int16]: ... + @overload + def __new__(cls, dtype: _Int32Codes, align: bool = ..., copy: bool = ...) -> dtype[int32]: ... + @overload + def __new__(cls, dtype: _Int64Codes, align: bool = ..., copy: bool = ...) -> dtype[int64]: ... + @overload + def __new__(cls, dtype: _ByteCodes, align: bool = ..., copy: bool = ...) -> dtype[byte]: ... + @overload + def __new__(cls, dtype: _ShortCodes, align: bool = ..., copy: bool = ...) -> dtype[short]: ... + @overload + def __new__(cls, dtype: _IntCCodes, align: bool = ..., copy: bool = ...) -> dtype[intc]: ... + @overload + def __new__(cls, dtype: _IntPCodes, align: bool = ..., copy: bool = ...) -> dtype[intp]: ... + @overload + def __new__(cls, dtype: _IntCodes, align: bool = ..., copy: bool = ...) -> dtype[int_]: ... + @overload + def __new__(cls, dtype: _LongLongCodes, align: bool = ..., copy: bool = ...) -> dtype[longlong]: ... + + # `floating` string-based representations + @overload + def __new__(cls, dtype: _Float16Codes, align: bool = ..., copy: bool = ...) -> dtype[float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes, align: bool = ..., copy: bool = ...) -> dtype[float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes, align: bool = ..., copy: bool = ...) -> dtype[float64]: ... + @overload + def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ...) -> dtype[half]: ... + @overload + def __new__(cls, dtype: _SingleCodes, align: bool = ..., copy: bool = ...) -> dtype[single]: ... + @overload + def __new__(cls, dtype: _DoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[double]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__(cls, dtype: _Complex64Codes, align: bool = ..., copy: bool = ...) -> dtype[complex64]: ... + @overload + def __new__(cls, dtype: _Complex128Codes, align: bool = ..., copy: bool = ...) -> dtype[complex128]: ... + @overload + def __new__(cls, dtype: _CSingleCodes, align: bool = ..., copy: bool = ...) -> dtype[csingle]: ... + @overload + def __new__(cls, dtype: _CDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[cdouble]: ... + @overload + def __new__(cls, dtype: _CLongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations + @overload + def __new__(cls, dtype: _BoolCodes, align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... + @overload + def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ...) -> dtype[timedelta64]: ... + @overload + def __new__(cls, dtype: _DT64Codes, align: bool = ..., copy: bool = ...) -> dtype[datetime64]: ... + @overload + def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: _BytesCodes, align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... + @overload + def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ...) -> dtype[void]: ... + @overload + def __new__(cls, dtype: _ObjectCodes, align: bool = ..., copy: bool = ...) -> dtype[object_]: ... + # dtype of a dtype is the same dtype @overload def __new__( diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 694046609054..9f8e27573ba2 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -241,6 +241,47 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _NBitDouble, _NBitLongDouble, ) +from ._char_codes import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) from ._scalars import ( _CharLike, _BoolLike, diff --git a/numpy/typing/_char_codes.py b/numpy/typing/_char_codes.py new file mode 100644 index 000000000000..7e5b82cfaf73 --- /dev/null +++ b/numpy/typing/_char_codes.py @@ -0,0 +1,115 @@ +import sys +from typing import Any, TYPE_CHECKING + +if sys.version_info >= (3, 8): + from typing import Literal + HAVE_LITERAL = True +else: + try: + from typing_extensions import Literal + except ImportError: + HAVE_LITERAL = False + else: + HAVE_LITERAL = True + +if TYPE_CHECKING or HAVE_LITERAL: + _BoolCodes = Literal["?", "=?", "?", "bool", "bool_", "bool8"] + + _UInt8Codes = Literal["uint8", "u1", "=u1", "u1"] + _UInt16Codes = Literal["uint16", "u2", "=u2", "u2"] + _UInt32Codes = Literal["uint32", "u4", "=u4", "u4"] + _UInt64Codes = Literal["uint64", "u8", "=u8", "u8"] + + _Int8Codes = Literal["int8", "i1", "=i1", "i1"] + _Int16Codes = Literal["int16", "i2", "=i2", "i2"] + _Int32Codes = Literal["int32", "i4", "=i4", "i4"] + _Int64Codes = Literal["int64", "i8", "=i8", "i8"] + + _Float16Codes = Literal["float16", "f2", "=f2", "f2"] + _Float32Codes = Literal["float32", "f4", "=f4", "f4"] + _Float64Codes = Literal["float64", "f8", "=f8", "f8"] + + _Complex64Codes = Literal["complex64", "c8", "=c8", "c8"] + _Complex128Codes = Literal["complex128", "c16", "=c16", "c16"] + + _ByteCodes = Literal["byte", "b", "=b", "b"] + _ShortCodes = Literal["short", "h", "=h", "h"] + _IntCCodes = Literal["intc", "i", "=i", "i"] + _IntPCodes = Literal["intp", "int0", "p", "=p", "p"] + _IntCodes = Literal["long", "int", "int_", "l", "=l", "l"] + _LongLongCodes = Literal["longlong", "q", "=q", "q"] + + _UByteCodes = Literal["ubyte", "B", "=B", "B"] + _UShortCodes = Literal["ushort", "H", "=H", "H"] + _UIntCCodes = Literal["uintc", "I", "=I", "I"] + _UIntPCodes = Literal["uintp", "uint0", "P", "=P", "P"] + _UIntCodes = Literal["uint", "L", "=L", "L"] + _ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"] + + _HalfCodes = Literal["half", "e", "=e", "e"] + _SingleCodes = Literal["single", "f", "=f", "f"] + _DoubleCodes = Literal["double" "float", "float_", "d", "=d", "d"] + _LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "g"] + + _CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "F"] + _CDoubleCodes = Literal["cdouble" "complex", "complex_", "cfloat", "D", "=D", "D"] + _CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"] + + _DT64Codes = Literal["datetime64", "M", "=M", "M"] + _TD64Codes = Literal["timedelta64", "m", "=m", "m"] + + _StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"] + _BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"] + _VoidCodes = Literal["void", "void0", "V", "=V", "V"] + _ObjectCodes = Literal["object", "object_", "O", "=O", "O"] + +else: + _BoolCodes = Any + + _UInt8Codes = Any + _UInt16Codes = Any + _UInt32Codes = Any + _UInt64Codes = Any + + _Int8Codes = Any + _Int16Codes = Any + _Int32Codes = Any + _Int64Codes = Any + + _Float16Codes = Any + _Float32Codes = Any + _Float64Codes = Any + + _Complex64Codes = Any + _Complex128Codes = Any + + _ByteCodes = Any + _ShortCodes = Any + _IntCCodes = Any + _IntPCodes = Any + _IntCodes = Any + _LongLongCodes = Any + + _UByteCodes = Any + _UShortCodes = Any + _UIntCCodes = Any + _UIntPCodes = Any + _UIntCodes = Any + _ULongLongCodes = Any + + _HalfCodes = Any + _SingleCodes = Any + _DoubleCodes = Any + _LongDoubleCodes = Any + + _CSingleCodes = Any + _CDoubleCodes = Any + _CLongDoubleCodes = Any + + _DT64Codes = Any + _TD64Codes = Any + + _StrCodes = Any + _BytesCodes = Any + _VoidCodes = Any + _ObjectCodes = Any From 0daed8701f941945a10daf4969843a346c933baf Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 24 Nov 2020 22:55:23 +0100 Subject: [PATCH 0251/1270] ENH: Use `intp` as return type for all appropiate functions Since `intp` is now annotated it can be used in place of `signedinteger[Any]` --- numpy/__init__.pyi | 8 +++---- numpy/core/fromnumeric.pyi | 13 +++++----- numpy/typing/tests/data/reveal/fromnumeric.py | 24 +++++++++---------- .../typing/tests/data/reveal/ndarray_misc.py | 12 +++++----- 4 files changed, 29 insertions(+), 28 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 514f4dc4a1cf..656048173c7d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -970,21 +970,21 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., ) -> _NdArraySubClass: ... @overload - def argmax(self, axis: None = ..., out: None = ...) -> signedinteger: ... + def argmax(self, axis: None = ..., out: None = ...) -> intp: ... @overload def argmax( self, axis: _ShapeLike = ..., out: None = ... - ) -> Union[signedinteger, ndarray]: ... + ) -> Union[ndarray, intp]: ... @overload def argmax( self, axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ... ) -> _NdArraySubClass: ... @overload - def argmin(self, axis: None = ..., out: None = ...) -> signedinteger: ... + def argmin(self, axis: None = ..., out: None = ...) -> intp: ... @overload def argmin( self, axis: _ShapeLike = ..., out: None = ... - ) -> Union[signedinteger, ndarray]: ... + ) -> Union[ndarray, intp]: ... @overload def argmin( self, axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ... diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index 66eb3bfb892f..3b147e1d732f 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -6,6 +6,7 @@ from numpy import ( ndarray, number, integer, + intp, bool_, generic, _OrderKACF, @@ -130,7 +131,7 @@ def argpartition( axis: Optional[int] = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., -) -> integer: ... +) -> intp: ... @overload def argpartition( a: _ScalarBuiltin, @@ -160,24 +161,24 @@ def argsort( order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... @overload -def argmax(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> integer: ... +def argmax(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> intp: ... @overload def argmax( a: ArrayLike, axis: int = ..., out: Optional[ndarray] = ... -) -> Union[integer, ndarray]: ... +) -> Union[ndarray, intp]: ... @overload -def argmin(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> integer: ... +def argmin(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> intp: ... @overload def argmin( a: ArrayLike, axis: int = ..., out: Optional[ndarray] = ... -) -> Union[integer, ndarray]: ... +) -> Union[ndarray, intp]: ... @overload def searchsorted( a: ArrayLike, v: _Scalar, side: _SortSide = ..., sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array -) -> integer: ... +) -> intp: ... @overload def searchsorted( a: ArrayLike, diff --git a/numpy/typing/tests/data/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.py index 2972fa1afaab..2d823b2e2cfc 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.py +++ b/numpy/typing/tests/data/reveal/fromnumeric.py @@ -66,8 +66,8 @@ reveal_type(np.partition(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.partition(B, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argpartition(a, 0)) # E: numpy.integer[Any] -reveal_type(np.argpartition(b, 0)) # E: numpy.integer[Any] +reveal_type(np.argpartition(a, 0)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.argpartition(b, 0)) # E: numpy.signedinteger[numpy.typing._ reveal_type(np.argpartition(c, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argpartition(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argpartition(B, 0)) # E: numpy.ndarray[Any, Any] @@ -78,18 +78,18 @@ reveal_type(np.argsort(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argsort(B, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argmax(A)) # E: numpy.integer[Any] -reveal_type(np.argmax(B)) # E: numpy.integer[Any] -reveal_type(np.argmax(A, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] -reveal_type(np.argmax(B, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] +reveal_type(np.argmax(A)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.argmax(B)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.argmax(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ +reveal_type(np.argmax(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ -reveal_type(np.argmin(A)) # E: numpy.integer[Any] -reveal_type(np.argmin(B)) # E: numpy.integer[Any] -reveal_type(np.argmin(A, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] -reveal_type(np.argmin(B, axis=0)) # E: Union[numpy.integer[Any], numpy.ndarray[Any, Any]] +reveal_type(np.argmin(A)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.argmin(B)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.argmin(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ +reveal_type(np.argmin(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ -reveal_type(np.searchsorted(A[0], 0)) # E: numpy.integer[Any] -reveal_type(np.searchsorted(B[0], 0)) # E: numpy.integer[Any] +reveal_type(np.searchsorted(A[0], 0)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.searchsorted(B[0], 0)) # E: numpy.signedinteger[numpy.typing._ reveal_type(np.searchsorted(A[0], [0])) # E: numpy.ndarray[Any, Any] reveal_type(np.searchsorted(B[0], [0])) # E: numpy.ndarray[Any, Any] diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index 3e640b3ba8cd..df6cd4586e0a 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -26,14 +26,14 @@ class SubClass(np.ndarray): ... reveal_type(A.any(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] reveal_type(A.any(out=B)) # E: SubClass -reveal_type(f8.argmax()) # E: numpy.signedinteger[Any] -reveal_type(A.argmax()) # E: numpy.signedinteger[Any] -reveal_type(A.argmax(axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.argmax()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(A.argmax()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(A.argmax(axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ reveal_type(A.argmax(out=B)) # E: SubClass -reveal_type(f8.argmin()) # E: numpy.signedinteger[Any] -reveal_type(A.argmin()) # E: numpy.signedinteger[Any] -reveal_type(A.argmin(axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.argmin()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(A.argmin()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(A.argmin(axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ reveal_type(A.argmin(out=B)) # E: SubClass reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any] From 133cf30ad1f34d1d6990133502c5a30209b51e6f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 24 Nov 2020 22:55:33 +0100 Subject: [PATCH 0252/1270] ENH: Take the precision of `np.int_` into account arithmetic operations --- numpy/typing/_callable.py | 30 ++++++++++--------- numpy/typing/tests/data/reveal/arithmetic.py | 24 +++++++-------- numpy/typing/tests/data/reveal/bitwise_ops.py | 20 ++++++------- numpy/typing/tests/data/reveal/mod.py | 24 +++++++-------- 4 files changed, 50 insertions(+), 48 deletions(-) diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 831921fd7922..8f464cc755ce 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -29,11 +29,13 @@ unsignedinteger, signedinteger, int8, + int_, floating, float64, complexfloating, complex128, ) +from ._nbit import _NBitInt from ._scalars import ( _BoolLike, _IntLike, @@ -72,7 +74,7 @@ class _BoolOp(Protocol[_GenericType_co]): @overload def __call__(self, __other: _BoolLike) -> _GenericType_co: ... @overload # platform dependent - def __call__(self, __other: int) -> signedinteger[Any]: ... + def __call__(self, __other: int) -> int_: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -84,7 +86,7 @@ class _BoolBitOp(Protocol[_GenericType_co]): @overload def __call__(self, __other: _BoolLike) -> _GenericType_co: ... @overload # platform dependent - def __call__(self, __other: int) -> signedinteger[Any]: ... + def __call__(self, __other: int) -> int_: ... @overload def __call__(self, __other: _IntType) -> _IntType: ... @@ -93,7 +95,7 @@ class _BoolSub(Protocol): @overload def __call__(self, __other: bool) -> NoReturn: ... @overload # platform dependent - def __call__(self, __other: int) -> signedinteger[Any]: ... + def __call__(self, __other: int) -> int_: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -113,7 +115,7 @@ class _BoolMod(Protocol): @overload def __call__(self, __other: _BoolLike) -> int8: ... @overload # platform dependent - def __call__(self, __other: int) -> signedinteger[Any]: ... + def __call__(self, __other: int) -> int_: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -125,7 +127,7 @@ class _BoolDivMod(Protocol): @overload def __call__(self, __other: _BoolLike) -> _2Tuple[int8]: ... @overload # platform dependent - def __call__(self, __other: int) -> _2Tuple[signedinteger[Any]]: ... + def __call__(self, __other: int) -> _2Tuple[int_]: ... @overload def __call__(self, __other: float) -> _2Tuple[float64]: ... @overload @@ -143,7 +145,7 @@ class _IntTrueDiv(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> floating[_NBit_co]: ... @overload - def __call__(self, __other: int) -> floating[Any]: ... + def __call__(self, __other: int) -> floating[_NBitInt]: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -212,7 +214,7 @@ class _SignedIntOp(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> signedinteger[_NBit_co]: ... @overload - def __call__(self, __other: int) -> signedinteger[Any]: ... + def __call__(self, __other: int) -> signedinteger[Union[_NBit_co, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -226,7 +228,7 @@ class _SignedIntBitOp(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> signedinteger[_NBit_co]: ... @overload - def __call__(self, __other: int) -> signedinteger[Any]: ... + def __call__(self, __other: int) -> signedinteger[Union[_NBit_co, _NBitInt]]: ... @overload def __call__( self, __other: signedinteger[_NBit] @@ -236,7 +238,7 @@ class _SignedIntMod(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> signedinteger[_NBit_co]: ... @overload - def __call__(self, __other: int) -> signedinteger[Any]: ... + def __call__(self, __other: int) -> signedinteger[Union[_NBit_co, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -248,7 +250,7 @@ class _SignedIntDivMod(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit_co]]: ... @overload - def __call__(self, __other: int) -> _2Tuple[signedinteger[Any]]: ... + def __call__(self, __other: int) -> _2Tuple[signedinteger[Union[_NBit_co, _NBitInt]]]: ... @overload def __call__(self, __other: float) -> _2Tuple[float64]: ... @overload @@ -260,7 +262,7 @@ class _FloatOp(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> floating[_NBit_co]: ... @overload - def __call__(self, __other: int) -> floating[Any]: ... + def __call__(self, __other: int) -> floating[Union[_NBit_co, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -274,7 +276,7 @@ class _FloatMod(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> floating[_NBit_co]: ... @overload - def __call__(self, __other: int) -> floating[Any]: ... + def __call__(self, __other: int) -> floating[Union[_NBit_co, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload @@ -286,7 +288,7 @@ class _FloatDivMod(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> _2Tuple[floating[_NBit_co]]: ... @overload - def __call__(self, __other: int) -> _2Tuple[floating[Any]]: ... + def __call__(self, __other: int) -> _2Tuple[floating[Union[_NBit_co, _NBitInt]]]: ... @overload def __call__(self, __other: float) -> _2Tuple[float64]: ... @overload @@ -298,7 +300,7 @@ class _ComplexOp(Protocol[_NBit_co]): @overload def __call__(self, __other: bool) -> complexfloating[_NBit_co, _NBit_co]: ... @overload - def __call__(self, __other: int) -> complexfloating[Any, Any]: ... + def __call__(self, __other: int) -> complexfloating[Union[_NBit_co, _NBitInt], Union[_NBit_co, _NBitInt]]: ... @overload def __call__(self, __other: Union[float, complex]) -> complex128: ... @overload diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py index 20310e691647..de88602f3265 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.py @@ -126,7 +126,7 @@ reveal_type(c16 + b) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(c16 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(c16 + f) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + i) # E: numpy.complexfloating[Any, Any] +reveal_type(c16 + i) # E: numpy.complexfloating[numpy.typing._ reveal_type(c16 + AR) # E: Any reveal_type(c16 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] @@ -139,7 +139,7 @@ reveal_type(b + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(c + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(f + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i + c16) # E: numpy.complexfloating[Any, Any] +reveal_type(i + c16) # E: numpy.complexfloating[numpy.typing._ reveal_type(AR + c16) # E: Any reveal_type(c8 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] @@ -152,7 +152,7 @@ reveal_type(c8 + b) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] reveal_type(c8 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(c8 + f) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + i) # E: numpy.complexfloating[Any, Any] +reveal_type(c8 + i) # E: numpy.complexfloating[numpy.typing._ reveal_type(c8 + AR) # E: Any reveal_type(c16 + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] @@ -165,7 +165,7 @@ reveal_type(b + c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] reveal_type(c + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(f + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i + c8) # E: numpy.complexfloating[Any, Any] +reveal_type(i + c8) # E: numpy.complexfloating[numpy.typing._ reveal_type(AR + c8) # E: Any # Float @@ -178,7 +178,7 @@ reveal_type(f8 + b) # E: numpy.floating[numpy.typing._64Bit] reveal_type(f8 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(f8 + f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + i) # E: numpy.floating[Any] +reveal_type(f8 + i) # E: numpy.floating[numpy.typing._ reveal_type(f8 + AR) # E: Any reveal_type(f8 + f8) # E: numpy.floating[numpy.typing._64Bit] @@ -189,7 +189,7 @@ reveal_type(b + f8) # E: numpy.floating[numpy.typing._64Bit] reveal_type(c + f8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(f + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i + f8) # E: numpy.floating[Any] +reveal_type(i + f8) # E: numpy.floating[numpy.typing._ reveal_type(AR + f8) # E: Any reveal_type(f4 + f8) # E: numpy.floating[numpy.typing._64Bit] @@ -200,7 +200,7 @@ reveal_type(f4 + b) # E: numpy.floating[numpy.typing._32Bit] reveal_type(f4 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(f4 + f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 + i) # E: numpy.floating[Any] +reveal_type(f4 + i) # E: numpy.floating[numpy.typing._ reveal_type(f4 + AR) # E: Any reveal_type(f8 + f4) # E: numpy.floating[numpy.typing._64Bit] @@ -211,7 +211,7 @@ reveal_type(b + f4) # E: umpy.floating[numpy.typing._32Bit] reveal_type(c + f4) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(f + f4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i + f4) # E: numpy.floating[Any] +reveal_type(i + f4) # E: numpy.floating[numpy.typing._ reveal_type(AR + f4) # E: Any # Int @@ -224,7 +224,7 @@ reveal_type(i8 + b) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(i8 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(i8 + f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 + i) # E: numpy.signedinteger[Any] +reveal_type(i8 + i) # E: numpy.signedinteger[numpy.typing._ reveal_type(i8 + AR) # E: Any reveal_type(u8 + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] @@ -245,7 +245,7 @@ reveal_type(b + i8) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(c + i8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(f + i8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i + i8) # E: numpy.signedinteger[Any] +reveal_type(i + i8) # E: numpy.signedinteger[numpy.typing._ reveal_type(AR + i8) # E: Any reveal_type(u8 + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] @@ -260,7 +260,7 @@ reveal_type(i4 + i8) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(i4 + i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 + i) # E: numpy.signedinteger[Any] +reveal_type(i4 + i) # E: numpy.signedinteger[numpy.typing._ reveal_type(i4 + b_) # E: numpy.signedinteger[numpy.typing._32Bit] reveal_type(i4 + b) # E: numpy.signedinteger[numpy.typing._32Bit] reveal_type(i4 + AR) # E: Any @@ -276,7 +276,7 @@ reveal_type(i8 + i4) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(i4 + i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i + i4) # E: numpy.signedinteger[Any] +reveal_type(i + i4) # E: numpy.signedinteger[numpy.typing._ reveal_type(b_ + i4) # E: numpy.signedinteger[numpy.typing._32Bit] reveal_type(b + i4) # E: numpy.signedinteger[numpy.typing._32Bit] reveal_type(AR + i4) # E: Any diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.py b/numpy/typing/tests/data/reveal/bitwise_ops.py index cb9131a96938..af49244834f9 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.py +++ b/numpy/typing/tests/data/reveal/bitwise_ops.py @@ -39,11 +39,11 @@ reveal_type(i8 ^ i4) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(i8 & i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 << i) # E: numpy.signedinteger[Any] -reveal_type(i8 >> i) # E: numpy.signedinteger[Any] -reveal_type(i8 | i) # E: numpy.signedinteger[Any] -reveal_type(i8 ^ i) # E: numpy.signedinteger[Any] -reveal_type(i8 & i) # E: numpy.signedinteger[Any] +reveal_type(i8 << i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(i8 >> i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(i8 | i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(i8 ^ i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(i8 & i) # E: numpy.signedinteger[numpy.typing._ reveal_type(i8 << b_) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(i8 >> b_) # E: numpy.signedinteger[numpy.typing._64Bit] @@ -117,11 +117,11 @@ reveal_type(b_ ^ b) # E: numpy.bool_ reveal_type(b_ & b) # E: numpy.bool_ -reveal_type(b_ << i) # E: numpy.signedinteger[Any] -reveal_type(b_ >> i) # E: numpy.signedinteger[Any] -reveal_type(b_ | i) # E: numpy.signedinteger[Any] -reveal_type(b_ ^ i) # E: numpy.signedinteger[Any] -reveal_type(b_ & i) # E: numpy.signedinteger[Any] +reveal_type(b_ << i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(b_ >> i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(b_ | i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(b_ ^ i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(b_ & i) # E: numpy.signedinteger[numpy.typing._ reveal_type(~i8) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(~i4) # E: numpy.signedinteger[numpy.typing._32Bit] diff --git a/numpy/typing/tests/data/reveal/mod.py b/numpy/typing/tests/data/reveal/mod.py index 4292041f8302..3330cf175922 100644 --- a/numpy/typing/tests/data/reveal/mod.py +++ b/numpy/typing/tests/data/reveal/mod.py @@ -34,7 +34,7 @@ # Bool reveal_type(b_ % b) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(b_ % i) # E: numpy.signedinteger[Any] +reveal_type(b_ % i) # E: numpy.signedinteger[numpy.typing._ reveal_type(b_ % f) # E: numpy.floating[numpy.typing._64Bit] reveal_type(b_ % b_) # E: numpy.signedinteger[numpy.typing._8Bit] reveal_type(b_ % i8) # E: numpy.signedinteger[numpy.typing._64Bit] @@ -43,7 +43,7 @@ reveal_type(b_ % AR) # E: Any reveal_type(divmod(b_, b)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(divmod(b_, i)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]] +reveal_type(divmod(b_, i)) # E: Tuple[numpy.signedinteger[numpy.typing._ reveal_type(divmod(b_, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(b_, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] reveal_type(divmod(b_, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] @@ -52,7 +52,7 @@ reveal_type(divmod(b_, AR)) # E: Tuple[Any, Any] reveal_type(b % b_) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(i % b_) # E: numpy.signedinteger[Any] +reveal_type(i % b_) # E: numpy.signedinteger[numpy.typing._ reveal_type(f % b_) # E: numpy.floating[numpy.typing._64Bit] reveal_type(b_ % b_) # E: numpy.signedinteger[numpy.typing._8Bit] reveal_type(i8 % b_) # E: numpy.signedinteger[numpy.typing._64Bit] @@ -61,7 +61,7 @@ reveal_type(AR % b_) # E: Any reveal_type(divmod(b, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(divmod(i, b_)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]] +reveal_type(divmod(i, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._ reveal_type(divmod(f, b_)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(b_, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] reveal_type(divmod(i8, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] @@ -72,7 +72,7 @@ # int reveal_type(i8 % b) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 % i) # E: numpy.signedinteger[Any] +reveal_type(i8 % i) # E: numpy.signedinteger[numpy.typing._ reveal_type(i8 % f) # E: numpy.floating[numpy.typing._64Bit] reveal_type(i8 % i8) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(i8 % f8) # E: numpy.floating[numpy.typing._64Bit] @@ -83,7 +83,7 @@ reveal_type(i8 % AR) # E: Any reveal_type(divmod(i8, b)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(i8, i)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]] +reveal_type(divmod(i8, i)) # E: Tuple[numpy.signedinteger[numpy.typing._ reveal_type(divmod(i8, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(i8, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] reveal_type(divmod(i8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] @@ -94,7 +94,7 @@ reveal_type(divmod(i8, AR)) # E: Tuple[Any, Any] reveal_type(b % i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i % i8) # E: numpy.signedinteger[Any] +reveal_type(i % i8) # E: numpy.signedinteger[numpy.typing._ reveal_type(f % i8) # E: numpy.floating[numpy.typing._64Bit] reveal_type(i8 % i8) # E: numpy.signedinteger[numpy.typing._64Bit] reveal_type(f8 % i8) # E: numpy.floating[numpy.typing._64Bit] @@ -105,7 +105,7 @@ reveal_type(AR % i8) # E: Any reveal_type(divmod(b, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(i, i8)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]] +reveal_type(divmod(i, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._ reveal_type(divmod(f, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(i8, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] reveal_type(divmod(f8, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] @@ -118,14 +118,14 @@ # float reveal_type(f8 % b) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 % i) # E: numpy.floating[Any] +reveal_type(f8 % i) # E: numpy.floating[numpy.typing._ reveal_type(f8 % f) # E: numpy.floating[numpy.typing._64Bit] reveal_type(i8 % f4) # E: numpy.floating[numpy.typing._64Bit] reveal_type(f4 % f4) # E: numpy.floating[numpy.typing._32Bit] reveal_type(f8 % AR) # E: Any reveal_type(divmod(f8, b)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f8, i)) # E: Tuple[numpy.floating[Any], numpy.floating[Any]] +reveal_type(divmod(f8, i)) # E: Tuple[numpy.floating[numpy.typing._ reveal_type(divmod(f8, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(f8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(f8, f4)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] @@ -133,7 +133,7 @@ reveal_type(divmod(f8, AR)) # E: Tuple[Any, Any] reveal_type(b % f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i % f8) # E: numpy.floating[Any] +reveal_type(i % f8) # E: numpy.floating[numpy.typing._ reveal_type(f % f8) # E: numpy.floating[numpy.typing._64Bit] reveal_type(f8 % f8) # E: numpy.floating[numpy.typing._64Bit] reveal_type(f8 % f8) # E: numpy.floating[numpy.typing._64Bit] @@ -141,7 +141,7 @@ reveal_type(AR % f8) # E: Any reveal_type(divmod(b, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i, f8)) # E: Tuple[numpy.floating[Any], numpy.floating[Any]] +reveal_type(divmod(i, f8)) # E: Tuple[numpy.floating[numpy.typing._ reveal_type(divmod(f, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(f8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] reveal_type(divmod(f4, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] From dd7e9ad06a2696378f1b501ad44bb3a5094e5a25 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 25 Nov 2020 12:14:56 +0100 Subject: [PATCH 0253/1270] DOC,STY: Fixed a few typos and the likes --- numpy/typing/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 9f8e27573ba2..0d9c17d10c32 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -91,7 +91,7 @@ >>> x.dtype = np.bool_ This sort of mutation is not allowed by the types. Users who want to -write statically typed code should insted use the `numpy.ndarray.view` +write statically typed code should instead use the `numpy.ndarray.view` method to create a view of the array with a different dtype. DTypeLike @@ -104,15 +104,15 @@ >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)}) -Although this is valid Numpy code, the type checker will complain about it, +Although this is valid NumPy code, the type checker will complain about it, since its usage is discouraged. Please see : :ref:`Data type objects ` -Number Precision +Number precision ~~~~~~~~~~~~~~~~ The precision of `numpy.number` subclasses is treated as a covariant generic -parameter (see :class:`~NBitBase`), simplifying the annoting of proccesses +parameter (see :class:`~NBitBase`), simplifying the annotating of processes involving precision-based casting. .. code-block:: python @@ -171,7 +171,7 @@ class NBitBase: An object representing `numpy.number` precision during static type checking. Used exclusively for the purpose static type checking, `NBitBase` - represents the base of a hierachieral set of subclasses. + represents the base of a hierarchical set of subclasses. Each subsequent subclass is herein used for representing a lower level of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. From 0917df2953ebe716c45c92671cb652eacd7dd8fd Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 25 Nov 2020 13:51:58 +0100 Subject: [PATCH 0254/1270] DOC: Add a docstring to the mypy plugin entry-point --- numpy/typing/mypy_plugin.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 023ab5fd18f3..5110e3707506 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -55,4 +55,5 @@ def get_type_analyze_hook(self, fullname: str) -> t.Optional[HookFunc]: def plugin(version: str) -> t.Type[_NumpyPlugin]: + """An entry-point for mypy.""" return _NumpyPlugin From fd17d1eb73b93facafac84ecc24f3117de5247b4 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 25 Nov 2020 13:59:33 +0100 Subject: [PATCH 0255/1270] TST: Add `numpy.typing.mypy_plugin` to the list of public modules --- numpy/tests/test_public_api.py | 1 + numpy/typing/mypy_plugin.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 7b2a590c3872..69430eeda089 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -170,6 +170,7 @@ def test_NPY_NO_EXPORT(): "random", "testing", "typing", + "typing.mypy_plugin", "version", ]] diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 5110e3707506..3418701675e5 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -8,6 +8,8 @@ from mypy.types import Type from mypy.plugin import Plugin, AnalyzeTypeContext +__all__: t.List[str] = [] + HookFunc = t.Callable[[AnalyzeTypeContext], Type] From 8685cf3ad8faa3dc2828be59fa45351456f55aee Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 25 Nov 2020 15:24:43 +0100 Subject: [PATCH 0256/1270] MAINT,TST: Fixed a failure due platform-dependent precisions --- numpy/typing/tests/data/reveal/dtype.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.py index 626a15270a25..f786637dc9fd 100644 --- a/numpy/typing/tests/data/reveal/dtype.py +++ b/numpy/typing/tests/data/reveal/dtype.py @@ -15,15 +15,15 @@ reveal_type(np.dtype("str")) # E: numpy.dtype[numpy.str_] # Python types -reveal_type(np.dtype(complex)) # E: numpy.dtype[numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]] -reveal_type(np.dtype(float)) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(np.dtype(int)) # E: numpy.dtype +reveal_type(np.dtype(complex)) # E: numpy.dtype[numpy.complexfloating[numpy.typing._ +reveal_type(np.dtype(float)) # E: numpy.dtype[numpy.floating[numpy.typing._ +reveal_type(np.dtype(int)) # E: numpy.dtype[numpy.signedinteger[numpy.typing._ reveal_type(np.dtype(bool)) # E: numpy.dtype[numpy.bool_] reveal_type(np.dtype(str)) # E: numpy.dtype[numpy.str_] reveal_type(np.dtype(bytes)) # E: numpy.dtype[numpy.bytes_] # Special case for None -reveal_type(np.dtype(None)) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(np.dtype(None)) # E: numpy.dtype[numpy.floating[numpy.typing._ # Dtypes of dtypes reveal_type(np.dtype(np.dtype(np.float64))) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] From 92e549fc1c02e86363daf9de5945d2fb5259951c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 25 Nov 2020 15:27:14 +0100 Subject: [PATCH 0257/1270] MAINT: Import `mypy` via a try/except approach Fixes an issue where the `pypy` tests would fail --- numpy/typing/mypy_plugin.py | 38 ++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 3418701675e5..505a01a0bb1d 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -4,14 +4,17 @@ import numpy as np -import mypy.types -from mypy.types import Type -from mypy.plugin import Plugin, AnalyzeTypeContext +try: + import mypy.types + from mypy.types import Type + from mypy.plugin import Plugin, AnalyzeTypeContext + _HookFunc = t.Callable[[AnalyzeTypeContext], Type] + MYPY_EX: t.Optional[ModuleNotFoundError] = None +except ModuleNotFoundError as ex: + MYPY_EX = ex __all__: t.List[str] = [] -HookFunc = t.Callable[[AnalyzeTypeContext], Type] - def _get_precision_dict() -> t.Dict[str, str]: names = [ @@ -39,7 +42,7 @@ def _get_precision_dict() -> t.Dict[str, str]: _PRECISION_DICT = _get_precision_dict() -def _hook(ctx: AnalyzeTypeContext) -> Type: +def _hook(ctx: "AnalyzeTypeContext") -> "Type": """Replace a type-alias with a concrete ``NBitBase`` subclass.""" typ, _, api = ctx name = typ.name.split(".")[-1] @@ -47,15 +50,20 @@ def _hook(ctx: AnalyzeTypeContext) -> Type: return api.named_type(name_new) -class _NumpyPlugin(Plugin): - """A plugin for assigning platform-specific `numpy.number` precisions.""" +if MYPY_EX is None: + class _NumpyPlugin(Plugin): + """A plugin for assigning platform-specific `numpy.number` precisions.""" - def get_type_analyze_hook(self, fullname: str) -> t.Optional[HookFunc]: - if fullname in _PRECISION_DICT: - return _hook - return None + def get_type_analyze_hook(self, fullname: str) -> t.Optional[_HookFunc]: + if fullname in _PRECISION_DICT: + return _hook + return None + def plugin(version: str) -> t.Type[_NumpyPlugin]: + """An entry-point for mypy.""" + return _NumpyPlugin -def plugin(version: str) -> t.Type[_NumpyPlugin]: - """An entry-point for mypy.""" - return _NumpyPlugin +else: + def plugin(version: str) -> t.Type["_NumpyPlugin"]: + """An entry-point for mypy.""" + raise MYPY_EX From 9fa57fd482e113c089cc9aae8ac4200ac877455e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 27 Nov 2020 12:27:56 +0100 Subject: [PATCH 0258/1270] ENH: Add unit support to the `datetime64`/`timedelta64` character codes --- numpy/typing/_char_codes.py | 72 +++++++++++++++++++++++++++++++++---- 1 file changed, 66 insertions(+), 6 deletions(-) diff --git a/numpy/typing/_char_codes.py b/numpy/typing/_char_codes.py index 7e5b82cfaf73..143644e88de5 100644 --- a/numpy/typing/_char_codes.py +++ b/numpy/typing/_char_codes.py @@ -55,14 +55,74 @@ _CDoubleCodes = Literal["cdouble" "complex", "complex_", "cfloat", "D", "=D", "D"] _CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"] - _DT64Codes = Literal["datetime64", "M", "=M", "M"] - _TD64Codes = Literal["timedelta64", "m", "=m", "m"] - _StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"] _BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"] _VoidCodes = Literal["void", "void0", "V", "=V", "V"] _ObjectCodes = Literal["object", "object_", "O", "=O", "O"] + _DT64Codes = Literal[ + "datetime64", "=datetime64", "datetime64", + "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", + "datetime64[M]", "=datetime64[M]", "datetime64[M]", + "datetime64[W]", "=datetime64[W]", "datetime64[W]", + "datetime64[D]", "=datetime64[D]", "datetime64[D]", + "datetime64[h]", "=datetime64[h]", "datetime64[h]", + "datetime64[m]", "=datetime64[m]", "datetime64[m]", + "datetime64[s]", "=datetime64[s]", "datetime64[s]", + "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", + "datetime64[us]", "=datetime64[us]", "datetime64[us]", + "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", + "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", + "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", + "datetime64[as]", "=datetime64[as]", "datetime64[as]", + "M", "=M", "M", + "M8", "=M8", "M8", + "M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "=M8[as]", "M8[as]", + ] + _TD64Codes = Literal[ + "timedelta64", "=timedelta64", "timedelta64", + "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", + "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", + "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", + "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", + "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", + "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", + "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", + "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", + "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", + "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", + "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", + "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", + "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", + "m", "=m", "m", + "m8", "=m8", "m8", + "m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "=m8[as]", "m8[as]", + ] + else: _BoolCodes = Any @@ -106,10 +166,10 @@ _CDoubleCodes = Any _CLongDoubleCodes = Any - _DT64Codes = Any - _TD64Codes = Any - _StrCodes = Any _BytesCodes = Any _VoidCodes = Any _ObjectCodes = Any + + _DT64Codes = Any + _TD64Codes = Any From ba4ad04b356aac976cf7940d8885c66cdab0b706 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 30 Nov 2020 14:09:57 +0100 Subject: [PATCH 0259/1270] DOC: Add a comment about truncating and sub-string matching Co-Authored-By: Eric Wieser <425260+eric-wieser@users.noreply.github.com> --- numpy/typing/tests/data/reveal/arithmetic.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py index de88602f3265..4d07e8dac069 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.py @@ -126,6 +126,9 @@ reveal_type(c16 + b) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(c16 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] reveal_type(c16 + f) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] + +# note this comment is deliberate truncated as the result varies by platform, +# and the numpy `reveal` tests use substring matching reveal_type(c16 + i) # E: numpy.complexfloating[numpy.typing._ reveal_type(c16 + AR) # E: Any From 4b27d3a54d7c8dd4d2ae502f85a15d8bc529ba86 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 11 Dec 2020 21:45:00 +0100 Subject: [PATCH 0260/1270] Update mypy_plugin.py --- numpy/typing/mypy_plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 505a01a0bb1d..bdd5c50f3389 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -32,7 +32,7 @@ def _get_precision_dict() -> t.Dict[str, str]: ] ret = {} for name, typ in names: - n: int = 8 * typ().dtype.alignment + n: int = 8 * typ().dtype.itemsize ret[f'numpy.typing._nbit.{name}'] = f"numpy._{n}Bit" return ret From 75c532dd95d7233de5a4f57554a88657380dc1ec Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 11 Dec 2020 22:09:06 +0100 Subject: [PATCH 0261/1270] REL: Added a release note --- .../upcoming_changes/17843.new_feature.rst | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 doc/release/upcoming_changes/17843.new_feature.rst diff --git a/doc/release/upcoming_changes/17843.new_feature.rst b/doc/release/upcoming_changes/17843.new_feature.rst new file mode 100644 index 000000000000..d2b9d2adc84c --- /dev/null +++ b/doc/release/upcoming_changes/17843.new_feature.rst @@ -0,0 +1,22 @@ +Added a mypy plugin for handling platform-specific `numpy.number` precisions +---------------------------------------------------------------------------- + +A mypy_ plugin is now available for automatically assigning the (platform-dependent) +precisions of certain `~numpy.number` subclasses, including the likes of +`~numpy.int_`, `~numpy.intp` and `~numpy.longlong`. See the documentation on +:ref:`scalar types ` for a comprehensive overview +of the affected classes. + +Note that while usage of the plugin is completely optional, without it the +precision of above-mentioned classes will be inferred as `~typing.Any`. + +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + + +.. _mypy: http://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html From f66d5dbb02f576541133f1108d73ae81f8a8f42f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 22 Dec 2020 21:11:50 +0100 Subject: [PATCH 0262/1270] BLD: ensure we give the right error message for old Python versions Before this change, it would give a random syntax error somewhere in `versioneer`. --- setup.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index e54328e064af..5f4b553c94bb 100755 --- a/setup.py +++ b/setup.py @@ -24,9 +24,18 @@ import subprocess import textwrap import warnings -import versioneer import builtins + +# Python supported version checks. Keep right after stdlib imports to ensure we +# get a sensible error for older Python versions +if sys.version_info[:2] < (3, 7): + raise RuntimeError("Python version >= 3.7 required.") + + +import versioneer + + # This is a bit hackish: we are setting a global variable so that the main # numpy __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet. While ugly, it's @@ -41,10 +50,6 @@ MAJOR, MINOR, MICRO = FULLVERSION.split('.')[:3] VERSION = '{}.{}.{}'.format(MAJOR, MINOR, MICRO) -# Python supported version checks -if sys.version_info[:2] < (3, 7): - raise RuntimeError("Python version >= 3.7 required.") - # The first version not in the `Programming Language :: Python :: ...` classifiers above if sys.version_info >= (3, 10): fmt = "NumPy {} may not yet support Python {}.{}." From 150d459e0cf7ce8c92c971260e4aa88cbae43a2c Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 16 Nov 2020 21:20:57 +0000 Subject: [PATCH 0263/1270] ENH, SIMD: Add new NPYV intrinsics pack(1) - add bitwise logical operations for boolean vectors - add round conversion for float vectors - add NAN test for float vectors - add conditional addition and subtraction - add #definition NPY_SIMD_FMA3 to check FUSED native support - add testing cases for all of the above --- numpy/core/src/_simd/_simd.dispatch.c.src | 87 +++++++++++++ numpy/core/src/_simd/_simd_easyintrin.inc | 30 +++++ numpy/core/src/common/simd/avx2/avx2.h | 5 + numpy/core/src/common/simd/avx2/conversion.h | 8 ++ numpy/core/src/common/simd/avx2/operators.h | 22 ++++ numpy/core/src/common/simd/avx512/avx512.h | 2 + .../core/src/common/simd/avx512/conversion.h | 8 ++ numpy/core/src/common/simd/avx512/maskop.h | 54 ++++++++ numpy/core/src/common/simd/avx512/operators.h | 63 ++++++++++ numpy/core/src/common/simd/emulate_maskop.h | 44 +++++++ numpy/core/src/common/simd/neon/conversion.h | 20 +++ numpy/core/src/common/simd/neon/neon.h | 5 + numpy/core/src/common/simd/neon/operators.h | 25 ++++ numpy/core/src/common/simd/simd.h | 7 ++ numpy/core/src/common/simd/sse/conversion.h | 8 ++ numpy/core/src/common/simd/sse/operators.h | 22 ++++ numpy/core/src/common/simd/sse/sse.h | 6 +- numpy/core/src/common/simd/vsx/conversion.h | 50 ++++++++ numpy/core/src/common/simd/vsx/operators.h | 18 +++ numpy/core/src/common/simd/vsx/vsx.h | 1 + numpy/core/tests/test_simd.py | 118 +++++++++++++++++- 21 files changed, 598 insertions(+), 5 deletions(-) create mode 100644 numpy/core/src/common/simd/avx512/maskop.h create mode 100644 numpy/core/src/common/simd/emulate_maskop.h diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index e3dbcdece581..eaff81338ec3 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -372,15 +372,57 @@ SIMD_IMPL_INTRIN_1(@intrin@_@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ #endif +/*************************** + * Mask operations + ***************************/ +/**begin repeat1 + * #intrin = ifadd, ifsub# + */ + SIMD_IMPL_INTRIN_4(@intrin@_@sfx@, v@sfx@, v@bsfx@, v@sfx@, v@sfx@, v@sfx@) +/**end repeat1**/ + #endif // simd_sup /**end repeat**/ /************************************************************************* * Variant ************************************************************************/ SIMD_IMPL_INTRIN_0N(cleanup) + +/************************************************************************* + * A special section for f32/f64 intrinsics outside the main repeater + ************************************************************************/ +/*************************** + * Operators + ***************************/ +// check special cases +SIMD_IMPL_INTRIN_1(notnan_f32, vb32, vf32) +#if NPY_SIMD_F64 + SIMD_IMPL_INTRIN_1(notnan_f64, vb64, vf64) +#endif +/*************************** + * Conversions + ***************************/ +// round to nearest integer (assume even) +SIMD_IMPL_INTRIN_1(round_s32_f32, vs32, vf32) +#if NPY_SIMD_F64 + SIMD_IMPL_INTRIN_2(round_s32_f64, vs32, vf64, vf64) +#endif + /************************************************************************* * A special section for boolean intrinsics outside the main repeater ************************************************************************/ +/*************************** + * Operators + ***************************/ +// Logical +/**begin repeat + * #bsfx = b8, b16, b32, b64# + */ +SIMD_IMPL_INTRIN_2(and_@bsfx@, v@bsfx@, v@bsfx@, v@bsfx@) +SIMD_IMPL_INTRIN_2(or_@bsfx@, v@bsfx@, v@bsfx@, v@bsfx@) +SIMD_IMPL_INTRIN_2(xor_@bsfx@, v@bsfx@, v@bsfx@, v@bsfx@) +SIMD_IMPL_INTRIN_1(not_@bsfx@, v@bsfx@, v@bsfx@) +/**end repeat**/ /*************************** * Conversions ***************************/ @@ -534,6 +576,16 @@ SIMD_INTRIN_DEF(sum_@sfx@) SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ #endif + +/*************************** + * Mask operations + ***************************/ +/**begin repeat1 + * #intrin = ifadd, ifsub# + */ + SIMD_INTRIN_DEF(@intrin@_@sfx@) +/**end repeat1**/ + #endif // simd_sup /**end repeat**/ /************************************************************************* @@ -541,9 +593,41 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) ************************************************************************/ SIMD_INTRIN_DEF(cleanup) +/************************************************************************* + * A special section for f32/f64 intrinsics outside the main repeater + ************************************************************************/ +/*************************** + * Operators + ***************************/ +// check special cases +SIMD_INTRIN_DEF(notnan_f32) +#if NPY_SIMD_F64 + SIMD_INTRIN_DEF(notnan_f64) +#endif +/*************************** + * Conversions + ***************************/ +// round to nearest integer (assume even) +SIMD_INTRIN_DEF(round_s32_f32) +#if NPY_SIMD_F64 + SIMD_INTRIN_DEF(round_s32_f64) +#endif + /************************************************************************* * A special section for boolean intrinsics outside the main repeater ************************************************************************/ +/*************************** + * Operators + ***************************/ +// Logical +/**begin repeat + * #bsfx = b8, b16, b32, b64# + */ +SIMD_INTRIN_DEF(and_@bsfx@) +SIMD_INTRIN_DEF(or_@bsfx@) +SIMD_INTRIN_DEF(xor_@bsfx@) +SIMD_INTRIN_DEF(not_@bsfx@) +/**end repeat**/ /*************************** * Conversions ***************************/ @@ -590,6 +674,9 @@ NPY_CPU_DISPATCH_CURFX(simd_create_module)(void) if (PyModule_AddIntConstant(m, "simd_f64", NPY_SIMD_F64)) { goto err; } + if (PyModule_AddIntConstant(m, "simd_fma3", NPY_SIMD_FMA3)) { + goto err; + } if (PyModule_AddIntConstant(m, "simd_width", NPY_SIMD_WIDTH)) { goto err; } diff --git a/numpy/core/src/_simd/_simd_easyintrin.inc b/numpy/core/src/_simd/_simd_easyintrin.inc index 54e7ccf01f50..f83d7a286b93 100644 --- a/numpy/core/src/_simd/_simd_easyintrin.inc +++ b/numpy/core/src/_simd/_simd_easyintrin.inc @@ -123,6 +123,36 @@ }; \ return simd_arg_to_obj(&ret); \ } + +#define SIMD_IMPL_INTRIN_4(NAME, RET, IN0, IN1, IN2, IN3) \ + static PyObject *simd__intrin_##NAME \ + (PyObject* NPY_UNUSED(self), PyObject *args) \ + { \ + simd_arg arg1 = {.dtype = simd_data_##IN0}; \ + simd_arg arg2 = {.dtype = simd_data_##IN1}; \ + simd_arg arg3 = {.dtype = simd_data_##IN2}; \ + simd_arg arg4 = {.dtype = simd_data_##IN3}; \ + if (!PyArg_ParseTuple( \ + args, "O&O&O&O&:"NPY_TOSTRING(NAME), \ + simd_arg_converter, &arg1, \ + simd_arg_converter, &arg2, \ + simd_arg_converter, &arg3, \ + simd_arg_converter, &arg4 \ + )) return NULL; \ + simd_data data = {.RET = npyv_##NAME( \ + arg1.data.IN0, arg2.data.IN1, \ + arg3.data.IN2, arg4.data.IN3 \ + )}; \ + simd_arg_free(&arg1); \ + simd_arg_free(&arg2); \ + simd_arg_free(&arg3); \ + simd_arg_free(&arg4); \ + simd_arg ret = { \ + .data = data, .dtype = simd_data_##RET \ + }; \ + return simd_arg_to_obj(&ret); \ + } + /** * Helper macros for repeating and expand a certain macro. * Mainly used for converting a scalar to an immediate constant. diff --git a/numpy/core/src/common/simd/avx2/avx2.h b/numpy/core/src/common/simd/avx2/avx2.h index 6f0d3c0d97da..bcd90b110a8a 100644 --- a/numpy/core/src/common/simd/avx2/avx2.h +++ b/numpy/core/src/common/simd/avx2/avx2.h @@ -5,6 +5,11 @@ #define NPY_SIMD 256 #define NPY_SIMD_WIDTH 32 #define NPY_SIMD_F64 1 +#ifdef NPY_HAVE_FMA3 + #define NPY_SIMD_FMA3 1 // native support +#else + #define NPY_SIMD_FMA3 0 // fast emulated +#endif // Enough limit to allow us to use _mm256_i32gather_* #define NPY_SIMD_MAXLOAD_STRIDE32 (0x7fffffff / 8) diff --git a/numpy/core/src/common/simd/avx2/conversion.h b/numpy/core/src/common/simd/avx2/conversion.h index f72678b548a4..dc6b187669c1 100644 --- a/numpy/core/src/common/simd/avx2/conversion.h +++ b/numpy/core/src/common/simd/avx2/conversion.h @@ -43,4 +43,12 @@ NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) { return (npy_uint8)_mm256_movemask_pd(_mm256_castsi256_pd(a)); } +// round to nearest integer (assuming even) +#define npyv_round_s32_f32 _mm256_cvtps_epi32 +NPY_FINLINE npyv_s32 npyv_round_s32_f64(npyv_f64 a, npyv_f64 b) +{ + __m128i lo = _mm256_cvtpd_epi32(a), hi = _mm256_cvtpd_epi32(b); + return _mm256_inserti128_si256(_mm256_castsi128_si256(lo), hi, 1); +} + #endif // _NPY_SIMD_AVX2_CVT_H diff --git a/numpy/core/src/common/simd/avx2/operators.h b/numpy/core/src/common/simd/avx2/operators.h index c1d30413fe5e..5fc7719e916d 100644 --- a/numpy/core/src/common/simd/avx2/operators.h +++ b/numpy/core/src/common/simd/avx2/operators.h @@ -61,6 +61,10 @@ NPY_FINLINE __m256i npyv_shr_s64(__m256i a, int c) #define npyv_and_s64 _mm256_and_si256 #define npyv_and_f32 _mm256_and_ps #define npyv_and_f64 _mm256_and_pd +#define npyv_and_b8 _mm256_and_si256 +#define npyv_and_b16 _mm256_and_si256 +#define npyv_and_b32 _mm256_and_si256 +#define npyv_and_b64 _mm256_and_si256 // OR #define npyv_or_u8 _mm256_or_si256 @@ -73,6 +77,10 @@ NPY_FINLINE __m256i npyv_shr_s64(__m256i a, int c) #define npyv_or_s64 _mm256_or_si256 #define npyv_or_f32 _mm256_or_ps #define npyv_or_f64 _mm256_or_pd +#define npyv_or_b8 _mm256_or_si256 +#define npyv_or_b16 _mm256_or_si256 +#define npyv_or_b32 _mm256_or_si256 +#define npyv_or_b64 _mm256_or_si256 // XOR #define npyv_xor_u8 _mm256_xor_si256 @@ -85,6 +93,10 @@ NPY_FINLINE __m256i npyv_shr_s64(__m256i a, int c) #define npyv_xor_s64 _mm256_xor_si256 #define npyv_xor_f32 _mm256_xor_ps #define npyv_xor_f64 _mm256_xor_pd +#define npyv_xor_b8 _mm256_xor_si256 +#define npyv_xor_b16 _mm256_xor_si256 +#define npyv_xor_b32 _mm256_xor_si256 +#define npyv_xor_b64 _mm256_xor_si256 // NOT #define npyv_not_u8(A) _mm256_xor_si256(A, _mm256_set1_epi32(-1)) @@ -97,6 +109,10 @@ NPY_FINLINE __m256i npyv_shr_s64(__m256i a, int c) #define npyv_not_s64 npyv_not_u8 #define npyv_not_f32(A) _mm256_xor_ps(A, _mm256_castsi256_ps(_mm256_set1_epi32(-1))) #define npyv_not_f64(A) _mm256_xor_pd(A, _mm256_castsi256_pd(_mm256_set1_epi32(-1))) +#define npyv_not_b8 npyv_not_u8 +#define npyv_not_b16 npyv_not_u8 +#define npyv_not_b32 npyv_not_u8 +#define npyv_not_b64 npyv_not_u8 /*************************** * Comparison @@ -197,4 +213,10 @@ NPY_FINLINE __m256i npyv_cmpge_u32(__m256i a, __m256i b) #define npyv_cmpge_f32(A, B) _mm256_castps_si256(_mm256_cmp_ps(A, B, _CMP_GE_OQ)) #define npyv_cmpge_f64(A, B) _mm256_castpd_si256(_mm256_cmp_pd(A, B, _CMP_GE_OQ)) +// check special cases +NPY_FINLINE npyv_b32 npyv_notnan_f32(npyv_f32 a) +{ return _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q)); } +NPY_FINLINE npyv_b64 npyv_notnan_f64(npyv_f64 a) +{ return _mm256_castpd_si256(_mm256_cmp_pd(a, a, _CMP_ORD_Q)); } + #endif // _NPY_SIMD_AVX2_OPERATORS_H diff --git a/numpy/core/src/common/simd/avx512/avx512.h b/numpy/core/src/common/simd/avx512/avx512.h index 2de33765a97c..f38686834cfb 100644 --- a/numpy/core/src/common/simd/avx512/avx512.h +++ b/numpy/core/src/common/simd/avx512/avx512.h @@ -4,6 +4,7 @@ #define NPY_SIMD 512 #define NPY_SIMD_WIDTH 64 #define NPY_SIMD_F64 1 +#define NPY_SIMD_FMA3 1 // native support // Enough limit to allow us to use _mm512_i32gather_* and _mm512_i32scatter_* #define NPY_SIMD_MAXLOAD_STRIDE32 (0x7fffffff / 16) #define NPY_SIMD_MAXSTORE_STRIDE32 (0x7fffffff / 16) @@ -73,3 +74,4 @@ typedef struct { __m512d val[3]; } npyv_f64x3; #include "conversion.h" #include "arithmetic.h" #include "math.h" +#include "maskop.h" diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 6ad299dd51da..1d71d7b49612 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -82,4 +82,12 @@ NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) { return (npy_uint8)a; } +// round to nearest integer (assuming even) +#define npyv_round_s32_f32 _mm512_cvtps_epi32 +NPY_FINLINE npyv_s32 npyv_round_s32_f64(npyv_f64 a, npyv_f64 b) +{ + __m256i lo = _mm512_cvtpd_epi32(a), hi = _mm512_cvtpd_epi32(b); + return npyv512_combine_si256(lo, hi); +} + #endif // _NPY_SIMD_AVX512_CVT_H diff --git a/numpy/core/src/common/simd/avx512/maskop.h b/numpy/core/src/common/simd/avx512/maskop.h new file mode 100644 index 000000000000..d1c188390a11 --- /dev/null +++ b/numpy/core/src/common/simd/avx512/maskop.h @@ -0,0 +1,54 @@ +#ifndef NPY_SIMD + #error "Not a standalone header, use simd/simd.h instead" +#endif + +#ifndef _NPY_SIMD_AVX512_MASKOP_H +#define _NPY_SIMD_AVX512_MASKOP_H + +/** + * Implements conditional addition and subtraction. + * e.g. npyv_ifadd_f32(m, a, b, c) -> m ? a + b : c + * e.g. npyv_ifsub_f32(m, a, b, c) -> m ? a - b : c + */ +#define NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(SFX, BSFX) \ + NPY_FINLINE npyv_##SFX npyv_ifadd_##SFX \ + (npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \ + { \ + npyv_##SFX add = npyv_add_##SFX(a, b); \ + return npyv_select_##SFX(m, add, c); \ + } \ + NPY_FINLINE npyv_##SFX npyv_ifsub_##SFX \ + (npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \ + { \ + npyv_##SFX sub = npyv_sub_##SFX(a, b); \ + return npyv_select_##SFX(m, sub, c); \ + } + +#define NPYV_IMPL_AVX512_MASK_ADDSUB(SFX, BSFX, ZSFX) \ + NPY_FINLINE npyv_##SFX npyv_ifadd_##SFX \ + (npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \ + { return _mm512_mask_add_##ZSFX(c, m, a, b); } \ + NPY_FINLINE npyv_##SFX npyv_ifsub_##SFX \ + (npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \ + { return _mm512_mask_sub_##ZSFX(c, m, a, b); } + +#ifdef NPY_HAVE_AVX512BW + NPYV_IMPL_AVX512_MASK_ADDSUB(u8, b8, epi8) + NPYV_IMPL_AVX512_MASK_ADDSUB(s8, b8, epi8) + NPYV_IMPL_AVX512_MASK_ADDSUB(u16, b16, epi16) + NPYV_IMPL_AVX512_MASK_ADDSUB(s16, b16, epi16) +#else + NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(u8, b8) + NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(s8, b8) + NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(u16, b16) + NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(s16, b16) +#endif + +NPYV_IMPL_AVX512_MASK_ADDSUB(u32, b32, epi32) +NPYV_IMPL_AVX512_MASK_ADDSUB(s32, b32, epi32) +NPYV_IMPL_AVX512_MASK_ADDSUB(u64, b64, epi64) +NPYV_IMPL_AVX512_MASK_ADDSUB(s64, b64, epi64) +NPYV_IMPL_AVX512_MASK_ADDSUB(f32, b32, ps) +NPYV_IMPL_AVX512_MASK_ADDSUB(f64, b64, pd) + +#endif // _NPY_SIMD_AVX512_MASKOP_H diff --git a/numpy/core/src/common/simd/avx512/operators.h b/numpy/core/src/common/simd/avx512/operators.h index f76ea5e2d6c5..5f1771770c5b 100644 --- a/numpy/core/src/common/simd/avx512/operators.h +++ b/numpy/core/src/common/simd/avx512/operators.h @@ -90,6 +90,20 @@ NPYV_IMPL_AVX512_FROM_SI512_PS_2ARG(npyv_and_f32, _mm512_and_si512) NPYV_IMPL_AVX512_FROM_SI512_PD_2ARG(npyv_and_f64, _mm512_and_si512) #endif +#ifdef NPY_HAVE_AVX512BW_MASK + #define npyv_and_b8 _kand_mask64 + #define npyv_and_b16 _kand_mask32 +#elif defined(NPY_HAVE_AVX512BW) + NPY_FINLINE npyv_b8 npyv_and_b8(npyv_b8 a, npyv_b8 b) + { return a & b; } + NPY_FINLINE npyv_b16 npyv_and_b16(npyv_b16 a, npyv_b16 b) + { return a & b; } +#else + #define npyv_and_b8 _mm512_and_si512 + #define npyv_and_b16 _mm512_and_si512 +#endif +#define npyv_and_b32 _mm512_kand +#define npyv_and_b64 _mm512_kand // OR #define npyv_or_u8 _mm512_or_si512 @@ -107,6 +121,20 @@ NPYV_IMPL_AVX512_FROM_SI512_PS_2ARG(npyv_or_f32, _mm512_or_si512) NPYV_IMPL_AVX512_FROM_SI512_PD_2ARG(npyv_or_f64, _mm512_or_si512) #endif +#ifdef NPY_HAVE_AVX512BW_MASK + #define npyv_or_b8 _kor_mask64 + #define npyv_or_b16 _kor_mask32 +#elif defined(NPY_HAVE_AVX512BW) + NPY_FINLINE npyv_b8 npyv_or_b8(npyv_b8 a, npyv_b8 b) + { return a | b; } + NPY_FINLINE npyv_b16 npyv_or_b16(npyv_b16 a, npyv_b16 b) + { return a | b; } +#else + #define npyv_or_b8 _mm512_or_si512 + #define npyv_or_b16 _mm512_or_si512 +#endif +#define npyv_or_b32 _mm512_kor +#define npyv_or_b64 _mm512_kor // XOR #define npyv_xor_u8 _mm512_xor_si512 @@ -124,6 +152,20 @@ NPYV_IMPL_AVX512_FROM_SI512_PS_2ARG(npyv_xor_f32, _mm512_xor_si512) NPYV_IMPL_AVX512_FROM_SI512_PD_2ARG(npyv_xor_f64, _mm512_xor_si512) #endif +#ifdef NPY_HAVE_AVX512BW_MASK + #define npyv_xor_b8 _kxor_mask64 + #define npyv_xor_b16 _kxor_mask32 +#elif defined(NPY_HAVE_AVX512BW) + NPY_FINLINE npyv_b8 npyv_xor_b8(npyv_b8 a, npyv_b8 b) + { return a ^ b; } + NPY_FINLINE npyv_b16 npyv_xor_b16(npyv_b16 a, npyv_b16 b) + { return a ^ b; } +#else + #define npyv_xor_b8 _mm512_xor_si512 + #define npyv_xor_b16 _mm512_xor_si512 +#endif +#define npyv_xor_b32 _mm512_kxor +#define npyv_xor_b64 _mm512_kxor // NOT #define npyv_not_u8(A) _mm512_xor_si512(A, _mm512_set1_epi32(-1)) @@ -141,6 +183,21 @@ #define npyv_not_f32(A) _mm512_castsi512_ps(npyv_not_u32(_mm512_castps_si512(A))) #define npyv_not_f64(A) _mm512_castsi512_pd(npyv_not_u64(_mm512_castpd_si512(A))) #endif +#ifdef NPY_HAVE_AVX512BW_MASK + #define npyv_not_b8 _knot_mask64 + #define npyv_not_b16 _knot_mask32 +#elif defined(NPY_HAVE_AVX512BW) + NPY_FINLINE npyv_b8 npyv_not_b8(npyv_b8 a) + { return ~a; } + NPY_FINLINE npyv_b16 npyv_not_b16(npyv_b16 a) + { return ~a; } +#else + #define npyv_not_b8 npyv_not_u8 + #define npyv_not_b16 npyv_not_u8 +#endif +#define npyv_not_b32 _mm512_knot +#define npyv_not_b64 _mm512_knot + /*************************** * Comparison @@ -256,4 +313,10 @@ #define npyv_cmpge_f32(A, B) _mm512_cmp_ps_mask(A, B, _CMP_GE_OQ) #define npyv_cmpge_f64(A, B) _mm512_cmp_pd_mask(A, B, _CMP_GE_OQ) +// check special cases +NPY_FINLINE npyv_b32 npyv_notnan_f32(npyv_f32 a) +{ return _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q); } +NPY_FINLINE npyv_b64 npyv_notnan_f64(npyv_f64 a) +{ return _mm512_cmp_pd_mask(a, a, _CMP_ORD_Q); } + #endif // _NPY_SIMD_AVX512_OPERATORS_H diff --git a/numpy/core/src/common/simd/emulate_maskop.h b/numpy/core/src/common/simd/emulate_maskop.h new file mode 100644 index 000000000000..7e7446bc56ef --- /dev/null +++ b/numpy/core/src/common/simd/emulate_maskop.h @@ -0,0 +1,44 @@ +/** + * This header is used internaly by all current supported SIMD extention, + * execpt for AVX512. + */ +#ifndef NPY_SIMD + #error "Not a standalone header, use simd/simd.h instead" +#endif + +#ifndef _NPY_SIMD_EMULATE_MASKOP_H +#define _NPY_SIMD_EMULATE_MASKOP_H + +/** + * Implements conditional addition and subtraction. + * e.g. npyv_ifadd_f32(mask, a, b, c) -> mask ? a + b : c + * e.g. npyv_ifsub_f32(mask, a, b, c) -> mask ? a - b : c + */ +#define NPYV_IMPL_EMULATE_MASK_ADDSUB(SFX, BSFX) \ + NPY_FINLINE npyv_##SFX npyv_ifadd_##SFX \ + (npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \ + { \ + npyv_##SFX add = npyv_add_##SFX(a, b); \ + return npyv_select_##SFX(m, add, c); \ + } \ + NPY_FINLINE npyv_##SFX npyv_ifsub_##SFX \ + (npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \ + { \ + npyv_##SFX sub = npyv_sub_##SFX(a, b); \ + return npyv_select_##SFX(m, sub, c); \ + } + +NPYV_IMPL_EMULATE_MASK_ADDSUB(u8, b8) +NPYV_IMPL_EMULATE_MASK_ADDSUB(s8, b8) +NPYV_IMPL_EMULATE_MASK_ADDSUB(u16, b16) +NPYV_IMPL_EMULATE_MASK_ADDSUB(s16, b16) +NPYV_IMPL_EMULATE_MASK_ADDSUB(u32, b32) +NPYV_IMPL_EMULATE_MASK_ADDSUB(s32, b32) +NPYV_IMPL_EMULATE_MASK_ADDSUB(u64, b64) +NPYV_IMPL_EMULATE_MASK_ADDSUB(s64, b64) +NPYV_IMPL_EMULATE_MASK_ADDSUB(f32, b32) +#if NPY_SIMD_F64 + NPYV_IMPL_EMULATE_MASK_ADDSUB(f64, b64) +#endif + +#endif // _NPY_SIMD_EMULATE_MASKOP_H diff --git a/numpy/core/src/common/simd/neon/conversion.h b/numpy/core/src/common/simd/neon/conversion.h index f9840b1cb710..f92910b666dc 100644 --- a/numpy/core/src/common/simd/neon/conversion.h +++ b/numpy/core/src/common/simd/neon/conversion.h @@ -71,4 +71,24 @@ NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) return vgetq_lane_u64(bit, 0) | ((int)vgetq_lane_u64(bit, 1) << 1); } +// round to nearest integer +#if NPY_SIMD_F64 + #define npyv_round_s32_f32 vcvtnq_s32_f32 + NPY_FINLINE npyv_s32 npyv_round_s32_f64(npyv_f64 a, npyv_f64 b) + { + npyv_s64 lo = vcvtnq_s64_f64(a), hi = vcvtnq_s64_f64(b); + return vcombine_s32(vmovn_s64(lo), vmovn_s64(hi)); + } +#else + NPY_FINLINE npyv_s32 npyv_round_s32_f32(npyv_f32 a) + { + // halves will be rounded up. it's very costly + // to obey IEEE standard on arm7. tests should pass +-1 difference + const npyv_u32 sign = vdupq_n_u32(0x80000000); + const npyv_f32 half = vdupq_n_f32(0.5f); + npyv_f32 sign_half = vbslq_f32(sign, a, half); + return vcvtq_s32_f32(vaddq_f32(a, sign_half)); + } +#endif + #endif // _NPY_SIMD_NEON_CVT_H diff --git a/numpy/core/src/common/simd/neon/neon.h b/numpy/core/src/common/simd/neon/neon.h index c8ddc92add01..e6f6a732478a 100644 --- a/numpy/core/src/common/simd/neon/neon.h +++ b/numpy/core/src/common/simd/neon/neon.h @@ -10,6 +10,11 @@ #else #define NPY_SIMD_F64 0 #endif +#ifdef NPY_HAVE_NEON_VFPV4 + #define NPY_SIMD_FMA3 1 // native support +#else + #define NPY_SIMD_FMA3 0 // HW emulated +#endif typedef uint8x16_t npyv_u8; typedef int8x16_t npyv_s8; diff --git a/numpy/core/src/common/simd/neon/operators.h b/numpy/core/src/common/simd/neon/operators.h index c1ad4ba12a6f..280c5e0da42a 100644 --- a/numpy/core/src/common/simd/neon/operators.h +++ b/numpy/core/src/common/simd/neon/operators.h @@ -58,6 +58,10 @@ vreinterpretq_f32_u8(vandq_u8(vreinterpretq_u8_f32(A), vreinterpretq_u8_f32(B))) #define npyv_and_f64(A, B) \ vreinterpretq_f64_u8(vandq_u8(vreinterpretq_u8_f64(A), vreinterpretq_u8_f64(B))) +#define npyv_and_b8 vandq_u8 +#define npyv_and_b16 vandq_u16 +#define npyv_and_b32 vandq_u32 +#define npyv_and_b64 vandq_u64 // OR #define npyv_or_u8 vorrq_u8 @@ -72,6 +76,11 @@ vreinterpretq_f32_u8(vorrq_u8(vreinterpretq_u8_f32(A), vreinterpretq_u8_f32(B))) #define npyv_or_f64(A, B) \ vreinterpretq_f64_u8(vorrq_u8(vreinterpretq_u8_f64(A), vreinterpretq_u8_f64(B))) +#define npyv_or_b8 vorrq_u8 +#define npyv_or_b16 vorrq_u16 +#define npyv_or_b32 vorrq_u32 +#define npyv_or_b64 vorrq_u64 + // XOR #define npyv_xor_u8 veorq_u8 @@ -86,6 +95,10 @@ vreinterpretq_f32_u8(veorq_u8(vreinterpretq_u8_f32(A), vreinterpretq_u8_f32(B))) #define npyv_xor_f64(A, B) \ vreinterpretq_f64_u8(veorq_u8(vreinterpretq_u8_f64(A), vreinterpretq_u8_f64(B))) +#define npyv_xor_b8 veorq_u8 +#define npyv_xor_b16 veorq_u16 +#define npyv_xor_b32 veorq_u32 +#define npyv_xor_b64 veorq_u64 // NOT #define npyv_not_u8 vmvnq_u8 @@ -98,6 +111,10 @@ #define npyv_not_s64(A) vreinterpretq_s64_u8(vmvnq_u8(vreinterpretq_u8_s64(A))) #define npyv_not_f32(A) vreinterpretq_f32_u8(vmvnq_u8(vreinterpretq_u8_f32(A))) #define npyv_not_f64(A) vreinterpretq_f64_u8(vmvnq_u8(vreinterpretq_u8_f64(A))) +#define npyv_not_b8 vmvnq_u8 +#define npyv_not_b16 vmvnq_u16 +#define npyv_not_b32 vmvnq_u32 +#define npyv_not_b64 npyv_not_u64 /*************************** * Comparison @@ -215,4 +232,12 @@ #define npyv_cmple_f32(A, B) npyv_cmpge_f32(B, A) #define npyv_cmple_f64(A, B) npyv_cmpge_f64(B, A) +// check special cases +NPY_FINLINE npyv_b32 npyv_notnan_f32(npyv_f32 a) +{ return vceqq_f32(a, a); } +#if NPY_SIMD_F64 + NPY_FINLINE npyv_b64 npyv_notnan_f64(npyv_f64 a) + { return vceqq_f64(a, a); } +#endif + #endif // _NPY_SIMD_NEON_OPERATORS_H diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 8804223c9fef..d6c14228d7a4 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -48,7 +48,14 @@ typedef double npyv_lanetype_f64; #define NPY_SIMD 0 #define NPY_SIMD_WIDTH 0 #define NPY_SIMD_F64 0 + #define NPY_SIMD_FMA3 0 #endif + +// enable emulated mask operations for all SIMD extension except for AVX512 +#if !defined(NPY_HAVE_AVX512F) && NPY_SIMD && NPY_SIMD < 512 + #include "emulate_maskop.h" +#endif + /** * Some SIMD extensions currently(AVX2, AVX512F) require (de facto) * a maximum number of strides sizes when dealing with non-contiguous memory access. diff --git a/numpy/core/src/common/simd/sse/conversion.h b/numpy/core/src/common/simd/sse/conversion.h index ab4beea96db6..d690ec313f0f 100644 --- a/numpy/core/src/common/simd/sse/conversion.h +++ b/numpy/core/src/common/simd/sse/conversion.h @@ -42,4 +42,12 @@ NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) { return (npy_uint8)_mm_movemask_pd(_mm_castsi128_pd(a)); } +// round to nearest integer (assuming even) +#define npyv_round_s32_f32 _mm_cvtps_epi32 +NPY_FINLINE npyv_s32 npyv_round_s32_f64(npyv_f64 a, npyv_f64 b) +{ + __m128i lo = _mm_cvtpd_epi32(a), hi = _mm_cvtpd_epi32(b); + return _mm_unpacklo_epi64(lo, hi); +} + #endif // _NPY_SIMD_SSE_CVT_H diff --git a/numpy/core/src/common/simd/sse/operators.h b/numpy/core/src/common/simd/sse/operators.h index 6e32ca4fd9b3..51c84fb4e9d9 100644 --- a/numpy/core/src/common/simd/sse/operators.h +++ b/numpy/core/src/common/simd/sse/operators.h @@ -62,6 +62,10 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_and_s64 _mm_and_si128 #define npyv_and_f32 _mm_and_ps #define npyv_and_f64 _mm_and_pd +#define npyv_and_b8 _mm_and_si128 +#define npyv_and_b16 _mm_and_si128 +#define npyv_and_b32 _mm_and_si128 +#define npyv_and_b64 _mm_and_si128 // OR #define npyv_or_u8 _mm_or_si128 @@ -74,6 +78,10 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_or_s64 _mm_or_si128 #define npyv_or_f32 _mm_or_ps #define npyv_or_f64 _mm_or_pd +#define npyv_or_b8 _mm_or_si128 +#define npyv_or_b16 _mm_or_si128 +#define npyv_or_b32 _mm_or_si128 +#define npyv_or_b64 _mm_or_si128 // XOR #define npyv_xor_u8 _mm_xor_si128 @@ -86,6 +94,10 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_xor_s64 _mm_xor_si128 #define npyv_xor_f32 _mm_xor_ps #define npyv_xor_f64 _mm_xor_pd +#define npyv_xor_b8 _mm_xor_si128 +#define npyv_xor_b16 _mm_xor_si128 +#define npyv_xor_b32 _mm_xor_si128 +#define npyv_xor_b64 _mm_xor_si128 // NOT #define npyv_not_u8(A) _mm_xor_si128(A, _mm_set1_epi32(-1)) @@ -98,6 +110,10 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_not_s64 npyv_not_u8 #define npyv_not_f32(A) _mm_xor_ps(A, _mm_castsi128_ps(_mm_set1_epi32(-1))) #define npyv_not_f64(A) _mm_xor_pd(A, _mm_castsi128_pd(_mm_set1_epi32(-1))) +#define npyv_not_b8 npyv_not_u8 +#define npyv_not_b16 npyv_not_u8 +#define npyv_not_b32 npyv_not_u8 +#define npyv_not_b64 npyv_not_u8 /*************************** * Comparison @@ -255,4 +271,10 @@ NPY_FINLINE __m128i npyv_shr_s64(__m128i a, int c) #define npyv_cmpge_f32(a, b) _mm_castps_si128(_mm_cmpge_ps(a, b)) #define npyv_cmpge_f64(a, b) _mm_castpd_si128(_mm_cmpge_pd(a, b)) +// check special cases +NPY_FINLINE npyv_b32 npyv_notnan_f32(npyv_f32 a) +{ return _mm_castps_si128(_mm_cmpord_ps(a, a)); } +NPY_FINLINE npyv_b64 npyv_notnan_f64(npyv_f64 a) +{ return _mm_castpd_si128(_mm_cmpord_pd(a, a)); } + #endif // _NPY_SIMD_SSE_OPERATORS_H diff --git a/numpy/core/src/common/simd/sse/sse.h b/numpy/core/src/common/simd/sse/sse.h index 132d3d347a5d..dc0b62f7394e 100644 --- a/numpy/core/src/common/simd/sse/sse.h +++ b/numpy/core/src/common/simd/sse/sse.h @@ -5,7 +5,11 @@ #define NPY_SIMD 128 #define NPY_SIMD_WIDTH 16 #define NPY_SIMD_F64 1 - +#if defined(NPY_HAVE_FMA3) || defined(NPY_HAVE_FMA4) + #define NPY_SIMD_FMA3 1 // native support +#else + #define NPY_SIMD_FMA3 0 // fast emulated +#endif typedef __m128i npyv_u8; typedef __m128i npyv_s8; typedef __m128i npyv_u16; diff --git a/numpy/core/src/common/simd/vsx/conversion.h b/numpy/core/src/common/simd/vsx/conversion.h index 5803e1cdd7a0..72fe10e69111 100644 --- a/numpy/core/src/common/simd/vsx/conversion.h +++ b/numpy/core/src/common/simd/vsx/conversion.h @@ -51,4 +51,54 @@ NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) return vec_extract(bit, 0) | (int)vec_extract(bit, 1) << 1; } +// truncate compatible with all compilers(internal use for now) +NPY_FINLINE npyv_s32 npyv__trunc_s32_f32(npyv_f32 a) +{ +#ifdef __IBMC__ + return vec_cts(a, 0); +#elif defined(__clang__) + /** + * old versions of CLANG doesn't support %x in the inline asm template + * which fixes register number when using any of the register constraints wa, wd, wf. + * therefore, we count on built-in functions. + */ + return __builtin_convertvector(a, npyv_s32); +#else // gcc + npyv_s32 ret; + __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (ret) : "wa" (a)); + return ret; +#endif +} +NPY_FINLINE npyv_s32 npyv__trunc_s32_f64(npyv_f64 a, npyv_f64 b) +{ +#ifdef __IBMC__ + const npyv_u8 seq_even = npyv_set_u8(0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27); + // unfortunately, XLC missing asm register vsx fixer + // hopefully, xlc can optimize around big-endian compatibility + npyv_s32 lo_even = vec_cts(a, 0); + npyv_s32 hi_even = vec_cts(b, 0); + return vec_perm(lo_even, hi_even, seq_even); +#else + const npyv_u8 seq_odd = npyv_set_u8(4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31); + #ifdef __clang__ + // __builtin_convertvector doesn't support this conversion on wide range of versions + // fortunately, almost all versions have direct builtin of 'xvcvdpsxws' + npyv_s32 lo_odd = __builtin_vsx_xvcvdpsxws(a); + npyv_s32 hi_odd = __builtin_vsx_xvcvdpsxws(b); + #else // gcc + npyv_s32 lo_odd, hi_odd; + __asm__ ("xvcvdpsxws %x0,%x1" : "=wa" (lo_odd) : "wa" (a)); + __asm__ ("xvcvdpsxws %x0,%x1" : "=wa" (hi_odd) : "wa" (b)); + #endif + return vec_perm(lo_odd, hi_odd, seq_odd); +#endif +} + +// round to nearest integer (assuming even) +NPY_FINLINE npyv_s32 npyv_round_s32_f32(npyv_f32 a) +{ return npyv__trunc_s32_f32(vec_rint(a)); } + +NPY_FINLINE npyv_s32 npyv_round_s32_f64(npyv_f64 a, npyv_f64 b) +{ return npyv__trunc_s32_f64(vec_rint(a), vec_rint(b)); } + #endif // _NPY_SIMD_VSX_CVT_H diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h index ca020d9e0e8c..230610129eb9 100644 --- a/numpy/core/src/common/simd/vsx/operators.h +++ b/numpy/core/src/common/simd/vsx/operators.h @@ -56,6 +56,10 @@ #define npyv_and_s64 vec_and #define npyv_and_f32 vec_and #define npyv_and_f64 vec_and +#define npyv_and_b8 vec_and +#define npyv_and_b16 vec_and +#define npyv_and_b32 vec_and +#define npyv_and_b64 vec_and // OR #define npyv_or_u8 vec_or @@ -68,6 +72,10 @@ #define npyv_or_s64 vec_or #define npyv_or_f32 vec_or #define npyv_or_f64 vec_or +#define npyv_or_b8 vec_or +#define npyv_or_b16 vec_or +#define npyv_or_b32 vec_or +#define npyv_or_b64 vec_or // XOR #define npyv_xor_u8 vec_xor @@ -80,6 +88,10 @@ #define npyv_xor_s64 vec_xor #define npyv_xor_f32 vec_xor #define npyv_xor_f64 vec_xor +#define npyv_xor_b8 vec_xor +#define npyv_xor_b16 vec_xor +#define npyv_xor_b32 vec_xor +#define npyv_xor_b64 vec_xor // NOT // note: we implement npyv_not_b*(boolen types) for internal use*/ @@ -213,4 +225,10 @@ NPY_FINLINE npyv_f64 npyv_not_f64(npyv_f64 a) #define npyv_cmple_f32(A, B) npyv_cmpge_f32(B, A) #define npyv_cmple_f64(A, B) npyv_cmpge_f64(B, A) +// check special cases +NPY_FINLINE npyv_b32 npyv_notnan_f32(npyv_f32 a) +{ return vec_cmpeq(a, a); } +NPY_FINLINE npyv_b64 npyv_notnan_f64(npyv_f64 a) +{ return vec_cmpeq(a, a); } + #endif // _NPY_SIMD_VSX_OPERATORS_H diff --git a/numpy/core/src/common/simd/vsx/vsx.h b/numpy/core/src/common/simd/vsx/vsx.h index 27dde98e7c8a..769f5a08f4cc 100644 --- a/numpy/core/src/common/simd/vsx/vsx.h +++ b/numpy/core/src/common/simd/vsx/vsx.h @@ -5,6 +5,7 @@ #define NPY_SIMD 128 #define NPY_SIMD_WIDTH 16 #define NPY_SIMD_F64 1 +#define NPY_SIMD_FMA3 1 // native support typedef __vector unsigned char npyv_u8; typedef __vector signed char npyv_s8; diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 196003cdd3e3..2f378667db72 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -1,13 +1,16 @@ # NOTE: Please avoid the use of numpy.testing since NPYV intrinsics # may be involved in their functionality. -import pytest, math +import pytest, math, re from numpy.core._simd import targets +from numpy.core._multiarray_umath import __cpu_baseline__ class _Test_Utility: - # submodule of the desired SIMD extention, e.g. targets["AVX512F"] + # submodule of the desired SIMD extension, e.g. targets["AVX512F"] npyv = None # the current data type suffix e.g. 's8' sfx = None + # target name can be 'baseline' or one or more of CPU features + target_name = None def __getattr__(self, attr): """ @@ -92,6 +95,14 @@ def _nan(self): v = self.npyv.setall_u32(0x7fc00000) return self.npyv.reinterpret_f32_u32(v)[0] + def _cpu_features(self): + target = self.target_name + if target == "baseline": + target = __cpu_baseline__ + else: + target = target.split('__') # multi-target separator + return ' '.join(target) + class _SIMD_BOOL(_Test_Utility): """ To test all boolean vector types at once @@ -110,6 +121,32 @@ def _load_b(self, data): cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}") return cvt(load(data)) + def test_operators_logical(self): + """ + Logical operations for boolean types. + Test intrinsics: + npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX + """ + data_a = self._data() + data_b = self._data(reverse=True) + vdata_a = self._load_b(data_a) + vdata_b = self._load_b(data_b) + + data_and = [a & b for a, b in zip(data_a, data_b)] + vand = getattr(self, "and")(vdata_a, vdata_b) + assert vand == data_and + + data_or = [a | b for a, b in zip(data_a, data_b)] + vor = getattr(self, "or")(vdata_a, vdata_b) + assert vor == data_or + + data_xor = [a ^ b for a, b in zip(data_a, data_b)] + vxor = getattr(self, "xor")(vdata_a, vdata_b) + assert vxor == data_xor + + vnot = getattr(self, "not")(vdata_a) + assert vnot == data_b + def test_tobits(self): data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)]) for data in (self._data(), self._data(reverse=True)): @@ -164,6 +201,46 @@ def test_arithmetic_subadd_saturated(self): subs = self.subs(vdata_a, vdata_b) assert subs == data_subs +class _SIMD_FP32(_Test_Utility): + """ + To only test single precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinics: + npyv_round_s32_##SFX + """ + features = self._cpu_features() + if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features): + # very costly to emulate nearest even on Armv7 + # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1 + _round = lambda v: int(v + (0.5 if v >= 0 else -0.5)) + else: + _round = round + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + data_round = [_round(x) for x in vdata_a] + vround = self.round_s32(vdata_a) + assert vround == data_round + +class _SIMD_FP64(_Test_Utility): + """ + To only test double precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinics: + npyv_round_s32_##SFX + """ + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + vdata_b = self.mul(vdata_a, self.setall(-1.5)) + data_round = [round(x) for x in list(vdata_a) + list(vdata_b)] + vround = self.round_s32(vdata_a, vdata_b) + assert vround == data_round + class _SIMD_FP(_Test_Utility): """ To test all float vector types at once @@ -247,6 +324,14 @@ def test_reciprocal(self): recip = self.recip(vdata) assert recip == data_recip + def test_special_cases(self): + """ + Compare Not NaN. Test intrinics: + npyv_notnan_##SFX + """ + nnan = self.notnan(self.setall(self._nan())) + assert nnan == [0]*self.nlanes + class _SIMD_ALL(_Test_Utility): """ To test all vector types at once @@ -440,7 +525,7 @@ def test_misc(self): vsetf = self.setf(10, *data_a) assert vsetf == data_a - # We're testing the sainty of _simd's type-vector, + # We're testing the sanity of _simd's type-vector, # reinterpret* intrinsics itself are tested via compiler # during the build of _simd module sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64", "f32"] @@ -632,6 +717,29 @@ def test_arithmetic_reduce_sum(self): vsum = self.sum(vdata) assert vsum == data_sum + def test_mask_conditional(self): + """ + Conditional addition and subtraction for all supported data types. + Test intrinics: + npyv_ifadd_##SFX, npyv_ifsub_##SFX + """ + vdata_a = self.load(self._data()) + vdata_b = self.load(self._data(reverse=True)) + true_mask = self.cmpeq(self.zero(), self.zero()) + false_mask = self.cmpneq(self.zero(), self.zero()) + + data_sub = self.sub(vdata_b, vdata_a) + ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b) + assert ifsub == data_sub + ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b) + assert ifsub == vdata_b + + data_add = self.add(vdata_b, vdata_a) + ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b) + assert ifadd == data_add + ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b) + assert ifadd == vdata_b + bool_sfx = ("b8", "b16", "b32", "b64") int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") fp_sfx = ("f32", "f64") @@ -640,6 +748,8 @@ def test_arithmetic_reduce_sum(self): bool_sfx: _SIMD_BOOL, int_sfx : _SIMD_INT, fp_sfx : _SIMD_FP, + ("f32",): _SIMD_FP32, + ("f64",): _SIMD_FP64, all_sfx : _SIMD_ALL } for target_name, npyv in targets.items(): @@ -664,7 +774,7 @@ def test_arithmetic_reduce_sum(self): for sfx in sfxes: skip_m = skip_sfx.get(sfx, skip) inhr = (cls,) - attr = dict(npyv=targets[target_name], sfx=sfx) + attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name) tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) if skip_m: pytest.mark.skip(reason=skip_m)(tcls) From e5428f24f0d24080ac01e26b3b99672f9e1cc93e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 30 Nov 2020 17:14:13 +0100 Subject: [PATCH 0264/1270] TST: Allow mypy output types to be specified via aliases --- numpy/typing/tests/test_typing.py | 82 +++++++++++++++++++++++++++++-- 1 file changed, 79 insertions(+), 3 deletions(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 90de4fd6dc41..361688c5d194 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -3,9 +3,12 @@ import os import re from collections import defaultdict -from typing import Optional +from typing import Optional, IO, Dict, List import pytest +import numpy as np +from numpy.typing.mypy_plugin import _PRECISION_DICT + try: from mypy import api except ImportError: @@ -123,6 +126,79 @@ def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error)) +def _construct_format_dict(): + dct = {k.split(".")[-1]: v.replace("numpy", "numpy.typing") for + k, v in _PRECISION_DICT.items()} + + return { + "uint8": "numpy.unsignedinteger[numpy.typing._8Bit]", + "uint16": "numpy.unsignedinteger[numpy.typing._16Bit]", + "uint32": "numpy.unsignedinteger[numpy.typing._32Bit]", + "uint64": "numpy.unsignedinteger[numpy.typing._64Bit]", + "int8": "numpy.signedinteger[numpy.typing._8Bit]", + "int16": "numpy.signedinteger[numpy.typing._16Bit]", + "int32": "numpy.signedinteger[numpy.typing._32Bit]", + "int64": "numpy.signedinteger[numpy.typing._64Bit]", + "float16": "numpy.floating[numpy.typing._16Bit]", + "float32": "numpy.floating[numpy.typing._32Bit]", + "float64": "numpy.floating[numpy.typing._64Bit]", + "complex64": "numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]", + "complex128": "numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]", + + "ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]", + "ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]", + "uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]", + "uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]", + "uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]", + "ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]", + "byte": f"numpy.signedinteger[{dct['_NBitByte']}]", + "short": f"numpy.signedinteger[{dct['_NBitShort']}]", + "intc": f"numpy.signedinteger[{dct['_NBitIntC']}]", + "intp": f"numpy.signedinteger[{dct['_NBitIntP']}]", + "int_": f"numpy.signedinteger[{dct['_NBitInt']}]", + "longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]", + + "half": f"numpy.floating[{dct['_NBitHalf']}]", + "single": f"numpy.floating[{dct['_NBitSingle']}]", + "double": f"numpy.floating[{dct['_NBitDouble']}]", + "longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]", + "csingle": f"numpy.complexfloating[{dct['_NBitSingle']}, {dct['_NBitSingle']}]", + "cdouble": f"numpy.complexfloating[{dct['_NBitDouble']}, {dct['_NBitDouble']}]", + "clongdouble": f"numpy.complexfloating[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]", + + # numpy.typing + "_NBitInt": dct['_NBitInt'], + } + + +#: A dictionary with all supported format keys (as keys) +#: and matching values +FORMAT_DICT: Dict[str, str] = _construct_format_dict() + + +def _parse_reveals(file: IO[str]) -> List[str]: + """Extract and parse all ``" # E: "`` comments from the passed file-like object. + + All format keys will be substituted for their respective value from `FORMAT_DICT`, + *e.g.* ``"{float64}"`` becomes ``"numpy.floating[numpy.typing._64Bit]"``. + """ + string = file.read().replace("*", "") + + # Grab all `# E:`-based comments + comments_array = np.char.partition(string.split("\n"), sep=" # E: ")[:, 2] + comments = "/n".join(comments_array) + + # Only search for the `{*}` pattern within comments, + # otherwise there is the risk of accidently grabbing dictionaries and sets + key_set = set(re.findall(r"\{(.*?)\}", comments)) + kwargs = { + k: FORMAT_DICT.get(k, f"") for k in key_set + } + fmt_str = comments.format(**kwargs) + + return fmt_str.split("/n") + + @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR)) @@ -138,7 +214,7 @@ def test_reveal(path): ]) with open(path) as fin: - lines = fin.read().replace('*', '').split("\n") + lines = _parse_reveals(fin) stdout_list = stdout.replace('*', '').split("\n") for error_line in stdout_list: @@ -155,7 +231,7 @@ def test_reveal(path): lineno = int(match.group('lineno')) - 1 assert "Revealed type is" in error_line - marker = lines[lineno].split("# E:")[-1].strip() + marker = lines[lineno] _test_reveal(path, marker, error_line, 1 + lineno) From 47246d2f3867d3d8999d540e2b14dce795c87f45 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 30 Nov 2020 17:14:49 +0100 Subject: [PATCH 0265/1270] TST: Use type-aliases for describing `np.number` subclasses --- numpy/typing/tests/data/reveal/arithmetic.py | 386 +++++++++--------- numpy/typing/tests/data/reveal/bitwise_ops.py | 136 +++--- numpy/typing/tests/data/reveal/dtype.py | 22 +- numpy/typing/tests/data/reveal/fromnumeric.py | 42 +- numpy/typing/tests/data/reveal/mod.py | 176 ++++---- .../tests/data/reveal/nbit_base_example.py | 8 +- .../typing/tests/data/reveal/ndarray_misc.py | 20 +- numpy/typing/tests/data/reveal/scalars.py | 76 ++-- 8 files changed, 432 insertions(+), 434 deletions(-) diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py index 4d07e8dac069..8574df9365ee 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.py @@ -25,36 +25,36 @@ # unary ops -reveal_type(-c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(-c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(-f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(-f4) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(-i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(-i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(-u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(-u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] +reveal_type(-c16) # E: {complex128} +reveal_type(-c8) # E: {complex64} +reveal_type(-f8) # E: {float64} +reveal_type(-f4) # E: {float32} +reveal_type(-i8) # E: {int64} +reveal_type(-i4) # E: {int32} +reveal_type(-u8) # E: {uint64} +reveal_type(-u4) # E: {uint32} reveal_type(-td) # E: numpy.timedelta64 reveal_type(-AR) # E: Any -reveal_type(+c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(+c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(+f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(+f4) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(+i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(+i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(+u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(+u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] +reveal_type(+c16) # E: {complex128} +reveal_type(+c8) # E: {complex64} +reveal_type(+f8) # E: {float64} +reveal_type(+f4) # E: {float32} +reveal_type(+i8) # E: {int64} +reveal_type(+i4) # E: {int32} +reveal_type(+u8) # E: {uint64} +reveal_type(+u4) # E: {uint32} reveal_type(+td) # E: numpy.timedelta64 reveal_type(+AR) # E: Any -reveal_type(abs(c16)) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(abs(c8)) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(abs(f8)) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(abs(f4)) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(abs(i8)) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(abs(i4)) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(abs(u8)) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(abs(u4)) # E: numpy.unsignedinteger[numpy.typing._32Bit] +reveal_type(abs(c16)) # E: {float64} +reveal_type(abs(c8)) # E: {float32} +reveal_type(abs(f8)) # E: {float64} +reveal_type(abs(f4)) # E: {float32} +reveal_type(abs(i8)) # E: {int64} +reveal_type(abs(i4)) # E: {int32} +reveal_type(abs(u8)) # E: {uint64} +reveal_type(abs(u4)) # E: {uint32} reveal_type(abs(td)) # E: numpy.timedelta64 reveal_type(abs(b_)) # E: numpy.bool_ reveal_type(abs(AR)) # E: Any @@ -81,214 +81,212 @@ reveal_type(td / f) # E: numpy.timedelta64 reveal_type(td / f4) # E: numpy.timedelta64 reveal_type(td / f8) # E: numpy.timedelta64 -reveal_type(td / td) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(td // td) # E: numpy.signedinteger[numpy.typing._64Bit] +reveal_type(td / td) # E: {float64} +reveal_type(td // td) # E: {int64} # boolean -reveal_type(b_ / b) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / i) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / i8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / i4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / u8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / u4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / f4) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(b_ / c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(b_ / c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(b_ / c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] - -reveal_type(b / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i4 / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(u8 / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(u4 / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 / b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 / b_) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(c / b_) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 / b_) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 / b_) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] +reveal_type(b_ / b) # E: {float64} +reveal_type(b_ / b_) # E: {float64} +reveal_type(b_ / i) # E: {float64} +reveal_type(b_ / i8) # E: {float64} +reveal_type(b_ / i4) # E: {float64} +reveal_type(b_ / u8) # E: {float64} +reveal_type(b_ / u4) # E: {float64} +reveal_type(b_ / f) # E: {float64} +reveal_type(b_ / f8) # E: {float64} +reveal_type(b_ / f4) # E: {float32} +reveal_type(b_ / c) # E: {complex128} +reveal_type(b_ / c16) # E: {complex128} +reveal_type(b_ / c8) # E: {complex64} + +reveal_type(b / b_) # E: {float64} +reveal_type(b_ / b_) # E: {float64} +reveal_type(i / b_) # E: {float64} +reveal_type(i8 / b_) # E: {float64} +reveal_type(i4 / b_) # E: {float64} +reveal_type(u8 / b_) # E: {float64} +reveal_type(u4 / b_) # E: {float64} +reveal_type(f / b_) # E: {float64} +reveal_type(f8 / b_) # E: {float64} +reveal_type(f4 / b_) # E: {float32} +reveal_type(c / b_) # E: {complex128} +reveal_type(c16 / b_) # E: {complex128} +reveal_type(c8 / b_) # E: {complex64} # Complex -reveal_type(c16 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + f8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + i8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + f4) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + i4) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + b_) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + b) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c16 + f) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] - -# note this comment is deliberate truncated as the result varies by platform, -# and the numpy `reveal` tests use substring matching -reveal_type(c16 + i) # E: numpy.complexfloating[numpy.typing._ +reveal_type(c16 + c16) # E: {complex128} +reveal_type(c16 + f8) # E: {complex128} +reveal_type(c16 + i8) # E: {complex128} +reveal_type(c16 + c8) # E: {complex128} +reveal_type(c16 + f4) # E: {complex128} +reveal_type(c16 + i4) # E: {complex128} +reveal_type(c16 + b_) # E: {complex128} +reveal_type(c16 + b) # E: {complex128} +reveal_type(c16 + c) # E: {complex128} +reveal_type(c16 + f) # E: {complex128} + +reveal_type(c16 + i) # E: {complex128} reveal_type(c16 + AR) # E: Any -reveal_type(c16 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f8 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i8 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f4 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i4 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(b_ + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(b + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i + c16) # E: numpy.complexfloating[numpy.typing._ +reveal_type(c16 + c16) # E: {complex128} +reveal_type(f8 + c16) # E: {complex128} +reveal_type(i8 + c16) # E: {complex128} +reveal_type(c8 + c16) # E: {complex128} +reveal_type(f4 + c16) # E: {complex128} +reveal_type(i4 + c16) # E: {complex128} +reveal_type(b_ + c16) # E: {complex128} +reveal_type(b + c16) # E: {complex128} +reveal_type(c + c16) # E: {complex128} +reveal_type(f + c16) # E: {complex128} +reveal_type(i + c16) # E: {complex128} reveal_type(AR + c16) # E: Any -reveal_type(c8 + c16) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + f8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + i8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(c8 + f4) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(c8 + i4) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(c8 + b_) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(c8 + b) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(c8 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + f) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + i) # E: numpy.complexfloating[numpy.typing._ +reveal_type(c8 + c16) # E: {complex128} +reveal_type(c8 + f8) # E: {complex128} +reveal_type(c8 + i8) # E: {complex128} +reveal_type(c8 + c8) # E: {complex64} +reveal_type(c8 + f4) # E: {complex64} +reveal_type(c8 + i4) # E: {complex64} +reveal_type(c8 + b_) # E: {complex64} +reveal_type(c8 + b) # E: {complex64} +reveal_type(c8 + c) # E: {complex128} +reveal_type(c8 + f) # E: {complex128} +reveal_type(c8 + i) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] reveal_type(c8 + AR) # E: Any -reveal_type(c16 + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f8 + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i8 + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(c8 + c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(f4 + c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(i4 + c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(b_ + c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(b + c8) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(c + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f + c8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i + c8) # E: numpy.complexfloating[numpy.typing._ +reveal_type(c16 + c8) # E: {complex128} +reveal_type(f8 + c8) # E: {complex128} +reveal_type(i8 + c8) # E: {complex128} +reveal_type(c8 + c8) # E: {complex64} +reveal_type(f4 + c8) # E: {complex64} +reveal_type(i4 + c8) # E: {complex64} +reveal_type(b_ + c8) # E: {complex64} +reveal_type(b + c8) # E: {complex64} +reveal_type(c + c8) # E: {complex128} +reveal_type(f + c8) # E: {complex128} +reveal_type(i + c8) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] reveal_type(AR + c8) # E: Any # Float -reveal_type(f8 + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + i8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + f4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + i4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + b) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f8 + f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 + i) # E: numpy.floating[numpy.typing._ +reveal_type(f8 + f8) # E: {float64} +reveal_type(f8 + i8) # E: {float64} +reveal_type(f8 + f4) # E: {float64} +reveal_type(f8 + i4) # E: {float64} +reveal_type(f8 + b_) # E: {float64} +reveal_type(f8 + b) # E: {float64} +reveal_type(f8 + c) # E: {complex128} +reveal_type(f8 + f) # E: {float64} +reveal_type(f8 + i) # E: {float64} reveal_type(f8 + AR) # E: Any -reveal_type(f8 + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i4 + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(c + f8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i + f8) # E: numpy.floating[numpy.typing._ +reveal_type(f8 + f8) # E: {float64} +reveal_type(i8 + f8) # E: {float64} +reveal_type(f4 + f8) # E: {float64} +reveal_type(i4 + f8) # E: {float64} +reveal_type(b_ + f8) # E: {float64} +reveal_type(b + f8) # E: {float64} +reveal_type(c + f8) # E: {complex128} +reveal_type(f + f8) # E: {float64} +reveal_type(i + f8) # E: {float64} reveal_type(AR + f8) # E: Any -reveal_type(f4 + f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 + i8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 + f4) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(f4 + i4) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(f4 + b_) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(f4 + b) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(f4 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f4 + f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 + i) # E: numpy.floating[numpy.typing._ +reveal_type(f4 + f8) # E: {float64} +reveal_type(f4 + i8) # E: {float64} +reveal_type(f4 + f4) # E: {float32} +reveal_type(f4 + i4) # E: {float32} +reveal_type(f4 + b_) # E: {float32} +reveal_type(f4 + b) # E: {float32} +reveal_type(f4 + c) # E: {complex128} +reveal_type(f4 + f) # E: {float64} +reveal_type(f4 + i) # E: numpy.floating[{_NBitInt}] reveal_type(f4 + AR) # E: Any -reveal_type(f8 + f4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 + f4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 + f4) # E: umpy.floating[numpy.typing._32Bit] -reveal_type(i4 + f4) # E: umpy.floating[numpy.typing._32Bit] -reveal_type(b_ + f4) # E: umpy.floating[numpy.typing._32Bit] -reveal_type(b + f4) # E: umpy.floating[numpy.typing._32Bit] -reveal_type(c + f4) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f + f4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i + f4) # E: numpy.floating[numpy.typing._ +reveal_type(f8 + f4) # E: {float64} +reveal_type(i8 + f4) # E: {float64} +reveal_type(f4 + f4) # E: {float32} +reveal_type(i4 + f4) # E: {float32} +reveal_type(b_ + f4) # E: {float32} +reveal_type(b + f4) # E: {float32} +reveal_type(c + f4) # E: {complex128} +reveal_type(f + f4) # E: {float64} +reveal_type(i + f4) # E: numpy.floating[{_NBitInt}] reveal_type(AR + f4) # E: Any # Int -reveal_type(i8 + i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 + u8) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(i8 + i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(i8 + b_) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 + b) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(i8 + f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 + i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(i8 + i8) # E: {int64} +reveal_type(i8 + u8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i8 + i4) # E: {int64} +reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i8 + b_) # E: {int64} +reveal_type(i8 + b) # E: {int64} +reveal_type(i8 + c) # E: {complex128} +reveal_type(i8 + f) # E: {float64} +reveal_type(i8 + i) # E: {int64} reveal_type(i8 + AR) # E: Any -reveal_type(u8 + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 + i4) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(u8 + u4) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 + b_) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 + b) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 + c) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(u8 + f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(u8 + i) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] +reveal_type(u8 + u8) # E: {uint64} +reveal_type(u8 + i4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u8 + u4) # E: {uint64} +reveal_type(u8 + b_) # E: {uint64} +reveal_type(u8 + b) # E: {uint64} +reveal_type(u8 + c) # E: {complex128} +reveal_type(u8 + f) # E: {float64} +reveal_type(u8 + i) # E: Union[numpy.signedinteger[Any], {float64}] reveal_type(u8 + AR) # E: Any -reveal_type(i8 + i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(u8 + i8) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(i4 + i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(b_ + i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(b + i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(c + i8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f + i8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i + i8) # E: numpy.signedinteger[numpy.typing._ +reveal_type(i8 + i8) # E: {int64} +reveal_type(u8 + i8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i4 + i8) # E: {int64} +reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(b_ + i8) # E: {int64} +reveal_type(b + i8) # E: {int64} +reveal_type(c + i8) # E: {complex128} +reveal_type(f + i8) # E: {float64} +reveal_type(i + i8) # E: {int64} reveal_type(AR + i8) # E: Any -reveal_type(u8 + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(i4 + u8) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(u4 + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(b_ + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(b + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(c + u8) # E: numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit] -reveal_type(f + u8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i + u8) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] +reveal_type(u8 + u8) # E: {uint64} +reveal_type(i4 + u8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u4 + u8) # E: {uint64} +reveal_type(b_ + u8) # E: {uint64} +reveal_type(b + u8) # E: {uint64} +reveal_type(c + u8) # E: {complex128} +reveal_type(f + u8) # E: {float64} +reveal_type(i + u8) # E: Union[numpy.signedinteger[Any], {float64}] reveal_type(AR + u8) # E: Any -reveal_type(i4 + i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i4 + i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 + i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(i4 + b_) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 + b) # E: numpy.signedinteger[numpy.typing._32Bit] +reveal_type(i4 + i8) # E: {int64} +reveal_type(i4 + i4) # E: {int32} +reveal_type(i4 + i) # E: {int_} +reveal_type(i4 + b_) # E: {int32} +reveal_type(i4 + b) # E: {int32} reveal_type(i4 + AR) # E: Any -reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(u4 + i4) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(u4 + u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u4 + u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(u4 + i) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(u4 + b_) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(u4 + b) # E: numpy.unsignedinteger[numpy.typing._32Bit] +reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u4 + i4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u4 + u8) # E: {uint64} +reveal_type(u4 + u4) # E: {uint32} +reveal_type(u4 + i) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u4 + b_) # E: {uint32} +reveal_type(u4 + b) # E: {uint32} reveal_type(u4 + AR) # E: Any -reveal_type(i8 + i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i4 + i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i + i4) # E: numpy.signedinteger[numpy.typing._ -reveal_type(b_ + i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(b + i4) # E: numpy.signedinteger[numpy.typing._32Bit] +reveal_type(i8 + i4) # E: {int64} +reveal_type(i4 + i4) # E: {int32} +reveal_type(i + i4) # E: {int_} +reveal_type(b_ + i4) # E: {int32} +reveal_type(b + i4) # E: {int32} reveal_type(AR + i4) # E: Any -reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(i4 + u4) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] -reveal_type(u8 + u4) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u4 + u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(b_ + u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(b + u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(i + u4) # E: Union[numpy.signedinteger[Any], numpy.floating[numpy.typing._64Bit]] +reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i4 + u4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u8 + u4) # E: {uint64} +reveal_type(u4 + u4) # E: {uint32} +reveal_type(b_ + u4) # E: {uint32} +reveal_type(b + u4) # E: {uint32} +reveal_type(i + u4) # E: Union[numpy.signedinteger[Any], {float64}] reveal_type(AR + u4) # E: Any diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.py b/numpy/typing/tests/data/reveal/bitwise_ops.py index af49244834f9..6b9969568c8e 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.py +++ b/numpy/typing/tests/data/reveal/bitwise_ops.py @@ -15,11 +15,11 @@ AR.setflags(write=False) -reveal_type(i8 << i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 >> i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 | i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 ^ i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 & i8) # E: numpy.signedinteger[numpy.typing._64Bit] +reveal_type(i8 << i8) # E: {int64} +reveal_type(i8 >> i8) # E: {int64} +reveal_type(i8 | i8) # E: {int64} +reveal_type(i8 ^ i8) # E: {int64} +reveal_type(i8 & i8) # E: {int64} reveal_type(i8 << AR) # E: Any reveal_type(i8 >> AR) # E: Any @@ -27,41 +27,41 @@ reveal_type(i8 ^ AR) # E: Any reveal_type(i8 & AR) # E: Any -reveal_type(i4 << i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 >> i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 | i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 ^ i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 & i4) # E: numpy.signedinteger[numpy.typing._32Bit] - -reveal_type(i8 << i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 >> i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 | i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 ^ i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 & i4) # E: numpy.signedinteger[numpy.typing._64Bit] - -reveal_type(i8 << i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(i8 >> i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(i8 | i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(i8 ^ i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(i8 & i) # E: numpy.signedinteger[numpy.typing._ - -reveal_type(i8 << b_) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 >> b_) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 | b_) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 ^ b_) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 & b_) # E: numpy.signedinteger[numpy.typing._64Bit] - -reveal_type(i8 << b) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 >> b) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 | b) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 ^ b) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 & b) # E: numpy.signedinteger[numpy.typing._64Bit] - -reveal_type(u8 << u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 >> u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 | u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 ^ u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 & u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] +reveal_type(i4 << i4) # E: {int32} +reveal_type(i4 >> i4) # E: {int32} +reveal_type(i4 | i4) # E: {int32} +reveal_type(i4 ^ i4) # E: {int32} +reveal_type(i4 & i4) # E: {int32} + +reveal_type(i8 << i4) # E: {int64} +reveal_type(i8 >> i4) # E: {int64} +reveal_type(i8 | i4) # E: {int64} +reveal_type(i8 ^ i4) # E: {int64} +reveal_type(i8 & i4) # E: {int64} + +reveal_type(i8 << i) # E: {int64} +reveal_type(i8 >> i) # E: {int64} +reveal_type(i8 | i) # E: {int64} +reveal_type(i8 ^ i) # E: {int64} +reveal_type(i8 & i) # E: {int64} + +reveal_type(i8 << b_) # E: {int64} +reveal_type(i8 >> b_) # E: {int64} +reveal_type(i8 | b_) # E: {int64} +reveal_type(i8 ^ b_) # E: {int64} +reveal_type(i8 & b_) # E: {int64} + +reveal_type(i8 << b) # E: {int64} +reveal_type(i8 >> b) # E: {int64} +reveal_type(i8 | b) # E: {int64} +reveal_type(i8 ^ b) # E: {int64} +reveal_type(i8 & b) # E: {int64} + +reveal_type(u8 << u8) # E: {uint64} +reveal_type(u8 >> u8) # E: {uint64} +reveal_type(u8 | u8) # E: {uint64} +reveal_type(u8 ^ u8) # E: {uint64} +reveal_type(u8 & u8) # E: {uint64} reveal_type(u8 << AR) # E: Any reveal_type(u8 >> AR) # E: Any @@ -69,11 +69,11 @@ reveal_type(u8 ^ AR) # E: Any reveal_type(u8 & AR) # E: Any -reveal_type(u4 << u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(u4 >> u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(u4 | u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(u4 ^ u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] -reveal_type(u4 & u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] +reveal_type(u4 << u4) # E: {uint32} +reveal_type(u4 >> u4) # E: {uint32} +reveal_type(u4 | u4) # E: {uint32} +reveal_type(u4 ^ u4) # E: {uint32} +reveal_type(u4 & u4) # E: {uint32} reveal_type(u4 << i4) # E: numpy.signedinteger[Any] reveal_type(u4 >> i4) # E: numpy.signedinteger[Any] @@ -87,20 +87,20 @@ reveal_type(u4 ^ i) # E: numpy.signedinteger[Any] reveal_type(u4 & i) # E: numpy.signedinteger[Any] -reveal_type(u8 << b_) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 >> b_) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 | b_) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 ^ b_) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 & b_) # E: numpy.unsignedinteger[numpy.typing._64Bit] +reveal_type(u8 << b_) # E: {uint64} +reveal_type(u8 >> b_) # E: {uint64} +reveal_type(u8 | b_) # E: {uint64} +reveal_type(u8 ^ b_) # E: {uint64} +reveal_type(u8 & b_) # E: {uint64} -reveal_type(u8 << b) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 >> b) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 | b) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 ^ b) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(u8 & b) # E: numpy.unsignedinteger[numpy.typing._64Bit] +reveal_type(u8 << b) # E: {uint64} +reveal_type(u8 >> b) # E: {uint64} +reveal_type(u8 | b) # E: {uint64} +reveal_type(u8 ^ b) # E: {uint64} +reveal_type(u8 & b) # E: {uint64} -reveal_type(b_ << b_) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(b_ >> b_) # E: numpy.signedinteger[numpy.typing._8Bit] +reveal_type(b_ << b_) # E: {int8} +reveal_type(b_ >> b_) # E: {int8} reveal_type(b_ | b_) # E: numpy.bool_ reveal_type(b_ ^ b_) # E: numpy.bool_ reveal_type(b_ & b_) # E: numpy.bool_ @@ -111,21 +111,21 @@ reveal_type(b_ ^ AR) # E: Any reveal_type(b_ & AR) # E: Any -reveal_type(b_ << b) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(b_ >> b) # E: numpy.signedinteger[numpy.typing._8Bit] +reveal_type(b_ << b) # E: {int8} +reveal_type(b_ >> b) # E: {int8} reveal_type(b_ | b) # E: numpy.bool_ reveal_type(b_ ^ b) # E: numpy.bool_ reveal_type(b_ & b) # E: numpy.bool_ -reveal_type(b_ << i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(b_ >> i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(b_ | i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(b_ ^ i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(b_ & i) # E: numpy.signedinteger[numpy.typing._ +reveal_type(b_ << i) # E: {int_} +reveal_type(b_ >> i) # E: {int_} +reveal_type(b_ | i) # E: {int_} +reveal_type(b_ ^ i) # E: {int_} +reveal_type(b_ & i) # E: {int_} -reveal_type(~i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(~i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(~u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(~u4) # E: numpy.unsignedinteger[numpy.typing._32Bit] +reveal_type(~i8) # E: {int64} +reveal_type(~i4) # E: {int32} +reveal_type(~u8) # E: {uint64} +reveal_type(~u4) # E: {uint32} reveal_type(~b_) # E: numpy.bool_ reveal_type(~AR) # E: Any diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.py index f786637dc9fd..b30a0ad6ebe3 100644 --- a/numpy/typing/tests/data/reveal/dtype.py +++ b/numpy/typing/tests/data/reveal/dtype.py @@ -2,31 +2,31 @@ dtype_obj: np.dtype[np.str_] -reveal_type(np.dtype(np.float64)) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(np.dtype(np.int64)) # E: numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(np.dtype(np.float64)) # E: numpy.dtype[{float64}] +reveal_type(np.dtype(np.int64)) # E: numpy.dtype[{int64}] # String aliases -reveal_type(np.dtype("float64")) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(np.dtype("float32")) # E: numpy.dtype[numpy.floating[numpy.typing._32Bit]] -reveal_type(np.dtype("int64")) # E: numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(np.dtype("int32")) # E: numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(np.dtype("float64")) # E: numpy.dtype[{float64}] +reveal_type(np.dtype("float32")) # E: numpy.dtype[{float32}] +reveal_type(np.dtype("int64")) # E: numpy.dtype[{int64}] +reveal_type(np.dtype("int32")) # E: numpy.dtype[{int32}] reveal_type(np.dtype("bool")) # E: numpy.dtype[numpy.bool_] reveal_type(np.dtype("bytes")) # E: numpy.dtype[numpy.bytes_] reveal_type(np.dtype("str")) # E: numpy.dtype[numpy.str_] # Python types -reveal_type(np.dtype(complex)) # E: numpy.dtype[numpy.complexfloating[numpy.typing._ -reveal_type(np.dtype(float)) # E: numpy.dtype[numpy.floating[numpy.typing._ -reveal_type(np.dtype(int)) # E: numpy.dtype[numpy.signedinteger[numpy.typing._ +reveal_type(np.dtype(complex)) # E: numpy.dtype[{cdouble}] +reveal_type(np.dtype(float)) # E: numpy.dtype[{double}] +reveal_type(np.dtype(int)) # E: numpy.dtype[{int_}] reveal_type(np.dtype(bool)) # E: numpy.dtype[numpy.bool_] reveal_type(np.dtype(str)) # E: numpy.dtype[numpy.str_] reveal_type(np.dtype(bytes)) # E: numpy.dtype[numpy.bytes_] # Special case for None -reveal_type(np.dtype(None)) # E: numpy.dtype[numpy.floating[numpy.typing._ +reveal_type(np.dtype(None)) # E: numpy.dtype[{double}] # Dtypes of dtypes -reveal_type(np.dtype(np.dtype(np.float64))) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(np.dtype(np.dtype(np.float64))) # E: numpy.dtype[{float64}] # Parameterized dtypes reveal_type(np.dtype("S8")) # E: numpy.dtype diff --git a/numpy/typing/tests/data/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.py index 2d823b2e2cfc..2b58f019f2b3 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.py +++ b/numpy/typing/tests/data/reveal/fromnumeric.py @@ -13,7 +13,7 @@ d = np.array(1.0, dtype=np.float32) # writeable reveal_type(np.take(a, 0)) # E: numpy.bool_ -reveal_type(np.take(b, 0)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.take(b, 0)) # E: {float32} reveal_type( np.take(c, 0) # E: Union[numpy.generic, datetime.datetime, datetime.timedelta] ) @@ -66,8 +66,8 @@ reveal_type(np.partition(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.partition(B, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argpartition(a, 0)) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.argpartition(b, 0)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.argpartition(a, 0)) # E: {intp} +reveal_type(np.argpartition(b, 0)) # E: {intp} reveal_type(np.argpartition(c, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argpartition(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argpartition(B, 0)) # E: numpy.ndarray[Any, Any] @@ -78,18 +78,18 @@ reveal_type(np.argsort(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.argsort(B, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argmax(A)) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.argmax(B)) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.argmax(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ -reveal_type(np.argmax(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ +reveal_type(np.argmax(A)) # E: {intp} +reveal_type(np.argmax(B)) # E: {intp} +reveal_type(np.argmax(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] +reveal_type(np.argmax(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] -reveal_type(np.argmin(A)) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.argmin(B)) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.argmin(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ -reveal_type(np.argmin(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ +reveal_type(np.argmin(A)) # E: {intp} +reveal_type(np.argmin(B)) # E: {intp} +reveal_type(np.argmin(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] +reveal_type(np.argmin(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] -reveal_type(np.searchsorted(A[0], 0)) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.searchsorted(B[0], 0)) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.searchsorted(A[0], 0)) # E: {intp} +reveal_type(np.searchsorted(B[0], 0)) # E: {intp} reveal_type(np.searchsorted(A[0], [0])) # E: numpy.ndarray[Any, Any] reveal_type(np.searchsorted(B[0], [0])) # E: numpy.ndarray[Any, Any] @@ -100,7 +100,7 @@ reveal_type(np.resize(B, (5, 5))) # E: numpy.ndarray[Any, Any] reveal_type(np.squeeze(a)) # E: numpy.bool_ -reveal_type(np.squeeze(b)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.squeeze(b)) # E: {float32} reveal_type(np.squeeze(c)) # E: numpy.ndarray[Any, Any] reveal_type(np.squeeze(A)) # E: numpy.ndarray[Any, Any] reveal_type(np.squeeze(B)) # E: numpy.ndarray[Any, Any] @@ -136,13 +136,13 @@ reveal_type(np.compress([True], B)) # E: numpy.ndarray[Any, Any] reveal_type(np.clip(a, 0, 1.0)) # E: numpy.number[Any] -reveal_type(np.clip(b, -1, 1)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.clip(b, -1, 1)) # E: {float32} reveal_type(np.clip(c, 0, 1)) # E: numpy.number[Any] reveal_type(np.clip(A, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.clip(B, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.sum(a)) # E: numpy.number[Any] -reveal_type(np.sum(b)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.sum(b)) # E: {float32} reveal_type(np.sum(c)) # E: numpy.number[Any] reveal_type(np.sum(A)) # E: numpy.number[Any] reveal_type(np.sum(B)) # E: numpy.number[Any] @@ -176,7 +176,7 @@ reveal_type(np.cumsum(B)) # E: numpy.ndarray[Any, Any] reveal_type(np.ptp(a)) # E: numpy.number[Any] -reveal_type(np.ptp(b)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.ptp(b)) # E: {float32} reveal_type(np.ptp(c)) # E: numpy.number[Any] reveal_type(np.ptp(A)) # E: numpy.number[Any] reveal_type(np.ptp(B)) # E: numpy.number[Any] @@ -186,7 +186,7 @@ reveal_type(np.ptp(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.amax(a)) # E: numpy.number[Any] -reveal_type(np.amax(b)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.amax(b)) # E: {float32} reveal_type(np.amax(c)) # E: numpy.number[Any] reveal_type(np.amax(A)) # E: numpy.number[Any] reveal_type(np.amax(B)) # E: numpy.number[Any] @@ -196,7 +196,7 @@ reveal_type(np.amax(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.amin(a)) # E: numpy.number[Any] -reveal_type(np.amin(b)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.amin(b)) # E: {float32} reveal_type(np.amin(c)) # E: numpy.number[Any] reveal_type(np.amin(A)) # E: numpy.number[Any] reveal_type(np.amin(B)) # E: numpy.number[Any] @@ -206,7 +206,7 @@ reveal_type(np.amin(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(np.prod(a)) # E: numpy.number[Any] -reveal_type(np.prod(b)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.prod(b)) # E: {float32} reveal_type(np.prod(c)) # E: numpy.number[Any] reveal_type(np.prod(A)) # E: numpy.number[Any] reveal_type(np.prod(B)) # E: numpy.number[Any] @@ -236,7 +236,7 @@ reveal_type(np.size(B)) # E: int reveal_type(np.around(a)) # E: numpy.number[Any] -reveal_type(np.around(b)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(np.around(b)) # E: {float32} reveal_type(np.around(c)) # E: numpy.number[Any] reveal_type(np.around(A)) # E: numpy.ndarray[Any, Any] reveal_type(np.around(B)) # E: numpy.ndarray[Any, Any] diff --git a/numpy/typing/tests/data/reveal/mod.py b/numpy/typing/tests/data/reveal/mod.py index 3330cf175922..989ef99fd002 100644 --- a/numpy/typing/tests/data/reveal/mod.py +++ b/numpy/typing/tests/data/reveal/mod.py @@ -27,123 +27,123 @@ reveal_type(AR2 % td) # E: Any reveal_type(td % AR2) # E: Any -reveal_type(divmod(td, td)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64] +reveal_type(divmod(td, td)) # E: Tuple[{int64}, numpy.timedelta64] reveal_type(divmod(AR2, td)) # E: Tuple[Any, Any] reveal_type(divmod(td, AR2)) # E: Tuple[Any, Any] # Bool -reveal_type(b_ % b) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(b_ % i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(b_ % f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ % b_) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(b_ % i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(b_ % u8) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(b_ % f8) # E: numpy.floating[numpy.typing._64Bit] +reveal_type(b_ % b) # E: {int8} +reveal_type(b_ % i) # E: {int_} +reveal_type(b_ % f) # E: {float64} +reveal_type(b_ % b_) # E: {int8} +reveal_type(b_ % i8) # E: {int64} +reveal_type(b_ % u8) # E: {uint64} +reveal_type(b_ % f8) # E: {float64} reveal_type(b_ % AR) # E: Any -reveal_type(divmod(b_, b)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(divmod(b_, i)) # E: Tuple[numpy.signedinteger[numpy.typing._ -reveal_type(divmod(b_, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(b_, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(divmod(b_, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(b_, u8)) # E: Tuple[numpy.unsignedinteger[numpy.typing._64Bit], numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(divmod(b_, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] +reveal_type(divmod(b_, b)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(b_, i)) # E: Tuple[{int_}, {int_}] +reveal_type(divmod(b_, f)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(b_, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(b_, u8)) # E: Tuple[{uint64}, {uint64}] +reveal_type(divmod(b_, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(b_, AR)) # E: Tuple[Any, Any] -reveal_type(b % b_) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(i % b_) # E: numpy.signedinteger[numpy.typing._ -reveal_type(f % b_) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(b_ % b_) # E: numpy.signedinteger[numpy.typing._8Bit] -reveal_type(i8 % b_) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(u8 % b_) # E: numpy.unsignedinteger[numpy.typing._64Bit] -reveal_type(f8 % b_) # E: numpy.floating[numpy.typing._64Bit] +reveal_type(b % b_) # E: {int8} +reveal_type(i % b_) # E: {int_} +reveal_type(f % b_) # E: {float64} +reveal_type(b_ % b_) # E: {int8} +reveal_type(i8 % b_) # E: {int64} +reveal_type(u8 % b_) # E: {uint64} +reveal_type(f8 % b_) # E: {float64} reveal_type(AR % b_) # E: Any -reveal_type(divmod(b, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(divmod(i, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._ -reveal_type(divmod(f, b_)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(b_, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(divmod(i8, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(u8, b_)) # E: Tuple[numpy.unsignedinteger[numpy.typing._64Bit], numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(divmod(f8, b_)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] +reveal_type(divmod(b, b_)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(i, b_)) # E: Tuple[{int_}, {int_}] +reveal_type(divmod(f, b_)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(i8, b_)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(u8, b_)) # E: Tuple[{uint64}, {uint64}] +reveal_type(divmod(f8, b_)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(AR, b_)) # E: Tuple[Any, Any] # int -reveal_type(i8 % b) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 % i) # E: numpy.signedinteger[numpy.typing._ -reveal_type(i8 % f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 % i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i8 % f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i4 % i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i4 % f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i4 % i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(i4 % f4) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(i8 % b) # E: {int64} +reveal_type(i8 % i) # E: {int64} +reveal_type(i8 % f) # E: {float64} +reveal_type(i8 % i8) # E: {int64} +reveal_type(i8 % f8) # E: {float64} +reveal_type(i4 % i8) # E: {int64} +reveal_type(i4 % f8) # E: {float64} +reveal_type(i4 % i4) # E: {int32} +reveal_type(i4 % f4) # E: {float32} reveal_type(i8 % AR) # E: Any -reveal_type(divmod(i8, b)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(i8, i)) # E: Tuple[numpy.signedinteger[numpy.typing._ -reveal_type(divmod(i8, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i8, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(i8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i8, i4)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(i8, f4)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i4, i4)) # E: Tuple[numpy.signedinteger[numpy.typing._32Bit], numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(divmod(i4, f4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]] +reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, i)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, f)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i8, i4)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, f4)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] +reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(i8, AR)) # E: Tuple[Any, Any] -reveal_type(b % i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(i % i8) # E: numpy.signedinteger[numpy.typing._ -reveal_type(f % i8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 % i8) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(f8 % i8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 % i4) # E: numpy.signedinteger[numpy.typing._64Bit] -reveal_type(f8 % i4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i4 % i4) # E: numpy.signedinteger[numpy.typing._32Bit] -reveal_type(f4 % i4) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(b % i8) # E: {int64} +reveal_type(i % i8) # E: {int64} +reveal_type(f % i8) # E: {float64} +reveal_type(i8 % i8) # E: {int64} +reveal_type(f8 % i8) # E: {float64} +reveal_type(i8 % i4) # E: {int64} +reveal_type(f8 % i4) # E: {float64} +reveal_type(i4 % i4) # E: {int32} +reveal_type(f4 % i4) # E: {float32} reveal_type(AR % i8) # E: Any -reveal_type(divmod(b, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(i, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._ -reveal_type(divmod(f, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i8, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(f8, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i4, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(divmod(f4, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i4, i4)) # E: Tuple[numpy.signedinteger[numpy.typing._32Bit], numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(divmod(f4, i4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]] +reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(f, i8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(f8, i8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i4, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(f4, i8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] +reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(AR, i8)) # E: Tuple[Any, Any] # float -reveal_type(f8 % b) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 % i) # E: numpy.floating[numpy.typing._ -reveal_type(f8 % f) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i8 % f4) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 % f4) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(f8 % b) # E: {float64} +reveal_type(f8 % i) # E: {float64} +reveal_type(f8 % f) # E: {float64} +reveal_type(i8 % f4) # E: {float64} +reveal_type(f4 % f4) # E: {float32} reveal_type(f8 % AR) # E: Any -reveal_type(divmod(f8, b)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f8, i)) # E: Tuple[numpy.floating[numpy.typing._ -reveal_type(divmod(f8, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f8, f4)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f4, f4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]] +reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, i)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f4)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(f8, AR)) # E: Tuple[Any, Any] -reveal_type(b % f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(i % f8) # E: numpy.floating[numpy.typing._ -reveal_type(f % f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 % f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f8 % f8) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(f4 % f4) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(b % f8) # E: {float64} +reveal_type(i % f8) # E: {float64} +reveal_type(f % f8) # E: {float64} +reveal_type(f8 % f8) # E: {float64} +reveal_type(f8 % f8) # E: {float64} +reveal_type(f4 % f4) # E: {float32} reveal_type(AR % f8) # E: Any -reveal_type(divmod(b, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(i, f8)) # E: Tuple[numpy.floating[numpy.typing._ -reveal_type(divmod(f, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f4, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]] -reveal_type(divmod(f4, f4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]] +reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f4, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(AR, f8)) # E: Tuple[Any, Any] diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.py b/numpy/typing/tests/data/reveal/nbit_base_example.py index 0c4c53f9b1f1..99fb71560a24 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.py +++ b/numpy/typing/tests/data/reveal/nbit_base_example.py @@ -12,7 +12,7 @@ def add(a: np.floating[T], b: np.integer[T]) -> np.floating[T]: f8: np.float64 f4: np.float32 -reveal_type(add(f8, i8)) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(add(f4, i8)) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(add(f8, i4)) # E: numpy.floating[numpy.typing._64Bit] -reveal_type(add(f4, i4)) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(add(f8, i8)) # E: {float64} +reveal_type(add(f4, i8)) # E: {float64} +reveal_type(add(f8, i4)) # E: {float64} +reveal_type(add(f4, i4)) # E: {float32} diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index df6cd4586e0a..0f7345358db6 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -26,14 +26,14 @@ class SubClass(np.ndarray): ... reveal_type(A.any(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] reveal_type(A.any(out=B)) # E: SubClass -reveal_type(f8.argmax()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(A.argmax()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(A.argmax(axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ +reveal_type(f8.argmax()) # E: {intp} +reveal_type(A.argmax()) # E: {intp} +reveal_type(A.argmax(axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] reveal_type(A.argmax(out=B)) # E: SubClass -reveal_type(f8.argmin()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(A.argmin()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(A.argmin(axis=0)) # E: Union[numpy.ndarray[Any, Any], numpy.signedinteger[numpy.typing._ +reveal_type(f8.argmin()) # E: {intp} +reveal_type(A.argmin()) # E: {intp} +reveal_type(A.argmin(axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] reveal_type(A.argmin(out=B)) # E: SubClass reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any] @@ -53,11 +53,11 @@ class SubClass(np.ndarray): ... reveal_type(A.compress([0])) # E: numpy.ndarray[Any, Any] reveal_type(A.compress([0], out=B)) # E: SubClass -reveal_type(f8.conj()) # E: numpy.floating[numpy.typing._64Bit] +reveal_type(f8.conj()) # E: {float64} reveal_type(A.conj()) # E: numpy.ndarray[Any, Any] reveal_type(B.conj()) # E: SubClass -reveal_type(f8.conjugate()) # E: numpy.floating[numpy.typing._64Bit] +reveal_type(f8.conjugate()) # E: {float64} reveal_type(A.conjugate()) # E: numpy.ndarray[Any, Any] reveal_type(B.conjugate()) # E: SubClass @@ -87,7 +87,7 @@ class SubClass(np.ndarray): ... reveal_type(A.min(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.min(out=B)) # E: SubClass -reveal_type(f8.newbyteorder()) # E: numpy.floating[numpy.typing._64Bit] +reveal_type(f8.newbyteorder()) # E: {float64} reveal_type(A.newbyteorder()) # E: numpy.ndarray[Any, Any] reveal_type(B.newbyteorder('|')) # E: SubClass @@ -103,7 +103,7 @@ class SubClass(np.ndarray): ... reveal_type(A.ptp(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] reveal_type(A.ptp(out=B)) # E: SubClass -reveal_type(f8.round()) # E: numpy.floating[numpy.typing._64Bit] +reveal_type(f8.round()) # E: {float64} reveal_type(A.round()) # E: numpy.ndarray[Any, Any] reveal_type(A.round(out=B)) # E: SubClass diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py index 841b1473d3a9..faa7ac3d2883 100644 --- a/numpy/typing/tests/data/reveal/scalars.py +++ b/numpy/typing/tests/data/reveal/scalars.py @@ -2,11 +2,11 @@ x = np.complex64(3 + 2j) -reveal_type(x.real) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(x.imag) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(x.real) # E: {float32} +reveal_type(x.imag) # E: {float32} -reveal_type(x.real.real) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(x.real.imag) # E: numpy.floating[numpy.typing._32Bit] +reveal_type(x.real.real) # E: {float32} +reveal_type(x.real.imag) # E: {float32} reveal_type(x.itemsize) # E: int reveal_type(x.shape) # E: Tuple[] @@ -15,14 +15,14 @@ reveal_type(x.ndim) # E: Literal[0] reveal_type(x.size) # E: Literal[1] -reveal_type(x.squeeze()) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(x.byteswap()) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] -reveal_type(x.transpose()) # E: numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit] +reveal_type(x.squeeze()) # E: {complex64} +reveal_type(x.byteswap()) # E: {complex64} +reveal_type(x.transpose()) # E: {complex64} -reveal_type(x.dtype) # E: numpy.dtype[numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]] +reveal_type(x.dtype) # E: numpy.dtype[{complex64}] -reveal_type(np.complex64().real) # E: numpy.floating[numpy.typing._32Bit] -reveal_type(np.complex128().imag) # E: numpy.floating[numpy.typing._64Bit] +reveal_type(np.complex64().real) # E: {float32} +reveal_type(np.complex128().imag) # E: {float64} reveal_type(np.unicode_('foo')) # E: numpy.str_ reveal_type(np.str0('foo')) # E: numpy.str_ @@ -31,34 +31,34 @@ reveal_type(np.unicode_()) # E: numpy.str_ reveal_type(np.str0()) # E: numpy.str_ -reveal_type(np.byte()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.short()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.intc()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.intp()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.int0()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.int_()) # E: numpy.signedinteger[numpy.typing._ -reveal_type(np.longlong()) # E: numpy.signedinteger[numpy.typing._ +reveal_type(np.byte()) # E: {byte} +reveal_type(np.short()) # E: {short} +reveal_type(np.intc()) # E: {intc} +reveal_type(np.intp()) # E: {intp} +reveal_type(np.int0()) # E: {intp} +reveal_type(np.int_()) # E: {int_} +reveal_type(np.longlong()) # E: {longlong} -reveal_type(np.ubyte()) # E: numpy.unsignedinteger[numpy.typing._ -reveal_type(np.ushort()) # E: numpy.unsignedinteger[numpy.typing._ -reveal_type(np.uintc()) # E: numpy.unsignedinteger[numpy.typing._ -reveal_type(np.uintp()) # E: numpy.unsignedinteger[numpy.typing._ -reveal_type(np.uint0()) # E: numpy.unsignedinteger[numpy.typing._ -reveal_type(np.uint()) # E: numpy.unsignedinteger[numpy.typing._ -reveal_type(np.ulonglong()) # E: numpy.unsignedinteger[numpy.typing._ +reveal_type(np.ubyte()) # E: {ubyte} +reveal_type(np.ushort()) # E: {ushort} +reveal_type(np.uintc()) # E: {uintc} +reveal_type(np.uintp()) # E: {uintp} +reveal_type(np.uint0()) # E: {uintp} +reveal_type(np.uint()) # E: {uint} +reveal_type(np.ulonglong()) # E: {ulonglong} -reveal_type(np.half()) # E: numpy.floating[numpy.typing._ -reveal_type(np.single()) # E: numpy.floating[numpy.typing._ -reveal_type(np.double()) # E: numpy.floating[numpy.typing._ -reveal_type(np.float_()) # E: numpy.floating[numpy.typing._ -reveal_type(np.longdouble()) # E: numpy.floating[numpy.typing._ -reveal_type(np.longfloat()) # E: numpy.floating[numpy.typing._ +reveal_type(np.half()) # E: {half} +reveal_type(np.single()) # E: {single} +reveal_type(np.double()) # E: {double} +reveal_type(np.float_()) # E: {double} +reveal_type(np.longdouble()) # E: {longdouble} +reveal_type(np.longfloat()) # E: {longdouble} -reveal_type(np.csingle()) # E: numpy.complexfloating[numpy.typing._ -reveal_type(np.singlecomplex()) # E: numpy.complexfloating[numpy.typing._ -reveal_type(np.cdouble()) # E: numpy.complexfloating[numpy.typing._ -reveal_type(np.complex_()) # E: numpy.complexfloating[numpy.typing._ -reveal_type(np.cfloat()) # E: numpy.complexfloating[numpy.typing._ -reveal_type(np.clongdouble()) # E: numpy.complexfloating[numpy.typing._ -reveal_type(np.clongfloat()) # E: numpy.complexfloating[numpy.typing._ -reveal_type(np.longcomplex()) # E: numpy.complexfloating[numpy.typing._ +reveal_type(np.csingle()) # E: {csingle} +reveal_type(np.singlecomplex()) # E: {csingle} +reveal_type(np.cdouble()) # E: {cdouble} +reveal_type(np.complex_()) # E: {cdouble} +reveal_type(np.cfloat()) # E: {cdouble} +reveal_type(np.clongdouble()) # E: {clongdouble} +reveal_type(np.clongfloat()) # E: {clongdouble} +reveal_type(np.longcomplex()) # E: {clongdouble} From 9ed8d5dd3dfe7b7945ec2ac5d7c37f1b8c4e4ab7 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 23 Dec 2020 11:46:59 +0800 Subject: [PATCH 0266/1270] passing pointer not the address. --- .../core/src/multiarray/einsum_sumprod.c.src | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index a3d2b127f938..88b73759837f 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -95,12 +95,12 @@ */ #if !@complex@ -static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ **data, npy_intp count) +static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count) { @temptype@ accum = 0; #if @NPYV_CHK@ // NPYV check for @type@ /* Use aligned instructions if possible */ - const int is_aligned = EINSUM_IS_ALIGNED(*data); + const int is_aligned = EINSUM_IS_ALIGNED(data); const int vstep = npyv_nlanes_@sfx@; npyv_@sfx@ vaccum = npyv_zero_@sfx@(); const npy_intp vstepx4 = vstep * 4; @@ -111,11 +111,11 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ **data, npy_intp count) * #st = storea, store# */ @cond@ { - for (; count >= vstepx4; count -= vstepx4, *data += vstepx4) { + for (; count >= vstepx4; count -= vstepx4, data += vstepx4) { /**begin repeat2 * #i = 0, 1, 2, 3# */ - npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(*data + vstep * @i@); + npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data + vstep * @i@); /**end repeat2**/ npyv_@sfx@ a01 = npyv_add_@sfx@(a0, a1); npyv_@sfx@ a23 = npyv_add_@sfx@(a2, a3); @@ -124,22 +124,22 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ **data, npy_intp count) } } /**end repeat1**/ - for (; count > 0; count -= vstep, *data += vstep) { - npyv_@sfx@ a = npyv_load_tillz_@sfx@(*data, count); + for (; count > 0; count -= vstep, data += vstep) { + npyv_@sfx@ a = npyv_load_tillz_@sfx@(data, count); vaccum = npyv_add_@sfx@(a, vaccum); } accum = npyv_sum_@sfx@(vaccum); npyv_cleanup(); #else #ifndef NPY_DISABLE_OPTIMIZATION - for (; count > 4; count -= 4, *data += 4) { - const @temptype@ a01 = @from@(**data) + @from@(*(*data + 1)); - const @temptype@ a23 = @from@(*(*data + 2)) + @from@(*(*data + 3)); + for (; count > 4; count -= 4, data += 4) { + const @temptype@ a01 = @from@(*data) + @from@(*(data + 1)); + const @temptype@ a23 = @from@(*(data + 2)) + @from@(*(data + 3)); accum += a01 + a23; } #endif // !NPY_DISABLE_OPTIMIZATION - for (; count > 0; --count, *data += 1) { - accum += @from@(**data); + for (; count > 0; --count, data += 1) { + accum += @from@(*data); } #endif // NPYV check for @type@ return accum; @@ -711,7 +711,7 @@ static NPY_GCC_OPT_3 void { @type@ *data1 = (@type@ *)dataptr[1]; @temptype@ value0 = @from@(*(@type@ *)dataptr[0]); - @temptype@ accum = @name@_sum_of_arr(&data1, count); + @temptype@ accum = @name@_sum_of_arr(data1, count); *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum); } @@ -721,7 +721,7 @@ static NPY_GCC_OPT_3 void { @type@ *data0 = (@type@ *)dataptr[0]; @temptype@ value1 = @from@(*(@type@ *)dataptr[1]); - @temptype@ accum = @name@_sum_of_arr(&data0, count); + @temptype@ accum = @name@_sum_of_arr(data0, count); *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value1 * accum); } @@ -831,7 +831,7 @@ static NPY_GCC_OPT_3 void NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n", (int)count); #if !@complex@ @type@ *data = (@type@ *)dataptr[0]; - @temptype@ accum = @name@_sum_of_arr(&data, count); + @temptype@ accum = @name@_sum_of_arr(data, count); *((@type@ *)dataptr[1]) = @to@(accum + @from@(*((@type@ *)dataptr[1]))); #else @temptype@ accum_re = 0, accum_im = 0; From b3681b6af3be4220a6f380c0f34f63f77aaf4b07 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 23 Dec 2020 17:07:53 +0800 Subject: [PATCH 0267/1270] Add docstrings to indicate which intrinsics are tested. --- numpy/core/tests/test_simd.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 825e61298e95..71356f81258e 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -664,6 +664,11 @@ def test_conversion_boolean(self): assert false_vsfx != true_vsfx def test_conversion_expand(self): + """ + Test expand intrinics: + npyv_expand_u16_u8 + npyv_expand_u32_u16 + """ if self.sfx not in ("u8", "u16"): return totype = self.sfx[0]+str(int(self.sfx[1:])*2) @@ -722,6 +727,12 @@ def test_arithmetic_div(self): assert div == data_div def test_arithmetic_reduce_sum(self): + """ + Test reduce sum intrinics: + npyv_sum_u32 + npyv_sum_f32 + npyv_sum_f64 + """ if self.sfx not in ("u32", "f32", "f64"): return # reduce sum From c016f636e175910a2a419bbbfed2944911a10d64 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 23 Dec 2020 17:15:59 +0800 Subject: [PATCH 0268/1270] simplify the index related code. --- numpy/core/src/multiarray/einsum_sumprod.c.src | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index 88b73759837f..d1b76de4e437 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -133,12 +133,12 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count) #else #ifndef NPY_DISABLE_OPTIMIZATION for (; count > 4; count -= 4, data += 4) { - const @temptype@ a01 = @from@(*data) + @from@(*(data + 1)); - const @temptype@ a23 = @from@(*(data + 2)) + @from@(*(data + 3)); + const @temptype@ a01 = @from@(*data) + @from@(data[1]); + const @temptype@ a23 = @from@(data[2]) + @from@(data[3]); accum += a01 + a23; } #endif // !NPY_DISABLE_OPTIMIZATION - for (; count > 0; --count, data += 1) { + for (; count > 0; --count, data++) { accum += @from@(*data); } #endif // NPYV check for @type@ From 6c65d01705dffb2b285531813dc40cbc71799d8d Mon Sep 17 00:00:00 2001 From: Ronan Lamy Date: Thu, 24 Dec 2020 18:23:46 +0100 Subject: [PATCH 0269/1270] MAINT: Remove obsolete workaround to set ndarray.__hash__ = None In Python 3, a type that sets tp_richcompare but leaves tp_hash null is always unhashable. In Python 2, such types used to inherit tp_hash from their bases. --- numpy/core/src/multiarray/arrayobject.c | 4 ---- numpy/core/src/multiarray/multiarraymodule.c | 7 ------- 2 files changed, 11 deletions(-) diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index a2474d79fcf5..1326140d5f32 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -1754,10 +1754,6 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = { .tp_as_number = &array_as_number, .tp_as_sequence = &array_as_sequence, .tp_as_mapping = &array_as_mapping, - /* - * The tp_hash slot will be set PyObject_HashNotImplemented when the - * module is loaded. - */ .tp_str = (reprfunc)array_str, .tp_as_buffer = &array_as_buffer, .tp_flags =(Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE), diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index e10fe39bd954..dfd27a0bcef3 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -4556,13 +4556,6 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } - /* - * Before calling PyType_Ready, initialize the tp_hash slot in - * PyArray_Type to work around mingw32 not being able initialize - * static structure slots with functions from the Python C_API. - */ - PyArray_Type.tp_hash = PyObject_HashNotImplemented; - if (PyType_Ready(&PyUFunc_Type) < 0) { goto err; } From 54c37a37dc205ed4d4850d36a7f9aa5d482aa7ba Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Fri, 25 Dec 2020 10:32:41 -0700 Subject: [PATCH 0270/1270] MAINT: crackfortran regex simplify * remove extraneous character class markers used in `crackline_re_1`: `\w` and `=` on their own have no benefit to character class `[]` inclusion * `name_match` has a character class that can be simplified because `\w` metacharacter already encompasses the digit metacharacter and the underscore --- numpy/f2py/crackfortran.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 2e95e45969e5..2177c97cd93c 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -636,7 +636,7 @@ def _simplifyargs(argsline): a.append(n) return ','.join(a) -crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*', re.I) +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) def crackline(line, reset=0): @@ -2605,7 +2605,7 @@ def analyzevars(block): params = get_parameters(vars, get_useparameters(block)) dep_matches = {} - name_match = re.compile(r'\w[\w\d_$]*').match + name_match = re.compile(r'\w[\w$]*').match for v in list(vars.keys()): m = name_match(v) if m: From b16288698742e7593db61cf9a618e2d03de6b36e Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sun, 18 Oct 2020 12:03:17 +0000 Subject: [PATCH 0271/1270] SIMD: Replace raw SIMD of sin/cos with NPYV The new code improves the performance of non-contiguous memory access for the output array without any reduction in performance. For PPC64LE the performance increased by 2-3.0, and 1.5-2.0 on aarch64. --- .gitignore | 1 + numpy/core/code_generators/generate_umath.py | 4 +- numpy/core/include/numpy/npy_math.h | 9 - numpy/core/setup.py | 1 + numpy/core/src/umath/loops.c.src | 26 +- numpy/core/src/umath/loops.h.src | 13 +- .../umath/loops_trigonometric.dispatch.c.src | 230 +++++++++++++++++ numpy/core/src/umath/loops_utils.h.src | 11 + numpy/core/src/umath/npy_simd_data.h | 16 -- numpy/core/src/umath/simd.inc.src | 238 ------------------ 10 files changed, 259 insertions(+), 290 deletions(-) create mode 100644 numpy/core/src/umath/loops_trigonometric.dispatch.c.src diff --git a/.gitignore b/.gitignore index b0fa037a6647..5a5e464cca8f 100644 --- a/.gitignore +++ b/.gitignore @@ -218,3 +218,4 @@ numpy/core/src/_simd/_simd_inc.h # umath module numpy/core/src/umath/loops_unary_fp.dispatch.c numpy/core/src/umath/loops_arithm_fp.dispatch.c +numpy/core/src/umath/loops_trigonometric.dispatch.c diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 4e9a2cfec3ef..6ee8031cb98d 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -676,7 +676,7 @@ def english_upper(s): docstrings.get('numpy.core.umath.cos'), None, TD('e', f='cos', astype={'e':'f'}), - TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), + TD('f', dispatch=[('loops_trigonometric', 'f')]), TD('fdg' + cmplx, f='cos'), TD(P, f='cos'), ), @@ -685,7 +685,7 @@ def english_upper(s): docstrings.get('numpy.core.umath.sin'), None, TD('e', f='sin', astype={'e':'f'}), - TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), + TD('f', dispatch=[('loops_trigonometric', 'f')]), TD('fdg' + cmplx, f='sin'), TD(P, f='sin'), ), diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h index 7d71c36ccd3b..f32e298f081f 100644 --- a/numpy/core/include/numpy/npy_math.h +++ b/numpy/core/include/numpy/npy_math.h @@ -150,15 +150,6 @@ NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); -/* - * avx function has a common API for both sin & cos. This enum is used to - * distinguish between the two - */ -typedef enum { - npy_compute_sin, - npy_compute_cos -} NPY_TRIG_OP; - /* * C99 double math funcs */ diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 2e020a595c14..1042a1c4520d 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -929,6 +929,7 @@ def generate_umath_c(ext, build_dir): join('src', 'umath', 'scalarmath.c.src'), join('src', 'umath', 'ufunc_type_resolution.c'), join('src', 'umath', 'override.c'), + join('src', 'umath', 'loops_trigonometric.dispatch.c.src'), ] umath_deps = [ diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 839d2b3ae2bb..ba538d2ab81a 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1658,8 +1658,8 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void /**end repeat**/ /**begin repeat - * #func = sin, cos, exp, log# - * #scalarf = npy_sinf, npy_cosf, npy_expf, npy_logf# + * #func = exp, log# + * #scalarf = npy_expf, npy_logf# */ NPY_NO_EXPORT NPY_GCC_OPT_3 void @@ -1749,28 +1749,6 @@ FLOAT_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *step /**end repeat1**/ -/**begin repeat1 - * #func = cos, sin# - * #enum = npy_compute_cos, npy_compute_sin# - * #scalarf = npy_cosf, npy_sinf# - */ - -NPY_NO_EXPORT NPY_GCC_OPT_3 void -FLOAT_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - if (!run_unary_@isa@_sincos_FLOAT(args, dimensions, steps, @enum@)) { - UNARY_LOOP { -#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS - @ISA@_sincos_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0], @enum@); -#else - const npy_float in1 = *(npy_float *)ip1; - *(npy_float *)op1 = @scalarf@(in1); -#endif - } - } -} - -/**end repeat1**/ /**end repeat**/ NPY_NO_EXPORT NPY_GCC_OPT_3 void diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index c15ff8e3b75d..d73c9fa7f4fc 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -225,8 +225,19 @@ DOUBLE_log(char **args, npy_intp const *dimensions, npy_intp const *steps, void NPY_NO_EXPORT void DOUBLE_log_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +#ifndef NPY_DISABLE_OPTIMIZATION + #include "loops_trigonometric.dispatch.h" +#endif +/**begin repeat + * #func = sin, cos# + */ +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void FLOAT_@func@, ( + char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func) +)) +/**end repeat**/ + /**begin repeat - * #func = sin, cos, exp, log# + * #func = exp, log# */ NPY_NO_EXPORT void FLOAT_@func@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); diff --git a/numpy/core/src/umath/loops_trigonometric.dispatch.c.src b/numpy/core/src/umath/loops_trigonometric.dispatch.c.src new file mode 100644 index 000000000000..8c2c83e7c998 --- /dev/null +++ b/numpy/core/src/umath/loops_trigonometric.dispatch.c.src @@ -0,0 +1,230 @@ +/*@targets + ** $maxopt baseline + ** (avx2 fma3) avx512f + ** vsx2 + ** neon_vfpv4 + **/ +#include "numpy/npy_math.h" +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +/* + * TODO: + * - use vectorized version of Payne-Hanek style reduction for large elements or + * when there's no native FUSED support instead of fallback to libc + */ +#if NPY_SIMD_FMA3 // native support +/* + * Vectorized Cody-Waite range reduction technique + * Performs the reduction step x* = x - y*C in three steps: + * 1) x* = x - y*c1 + * 2) x* = x - y*c2 + * 3) x* = x - y*c3 + * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision + */ +NPY_FINLINE npyv_f32 +simd_range_reduction_f32(npyv_f32 x, npyv_f32 y, npyv_f32 c1, npyv_f32 c2, npyv_f32 c3) +{ + npyv_f32 reduced_x = npyv_muladd_f32(y, c1, x); + reduced_x = npyv_muladd_f32(y, c2, reduced_x); + reduced_x = npyv_muladd_f32(y, c3, reduced_x); + return reduced_x; +} +/* + * Approximate cosine algorithm for x \in [-PI/4, PI/4] + * Maximum ULP across all 32-bit floats = 0.875 + */ +NPY_FINLINE npyv_f32 +simd_cosine_poly_f32(npyv_f32 x2) +{ + const npyv_f32 invf8 = npyv_setall_f32(0x1.98e616p-16f); + const npyv_f32 invf6 = npyv_setall_f32(-0x1.6c06dcp-10f); + const npyv_f32 invf4 = npyv_setall_f32(0x1.55553cp-05f); + const npyv_f32 invf2 = npyv_setall_f32(-0x1.000000p-01f); + const npyv_f32 invf0 = npyv_setall_f32(0x1.000000p+00f); + + npyv_f32 r = npyv_muladd_f32(invf8, x2, invf6); + r = npyv_muladd_f32(r, x2, invf4); + r = npyv_muladd_f32(r, x2, invf2); + r = npyv_muladd_f32(r, x2, invf0); + return r; +} +/* + * Approximate sine algorithm for x \in [-PI/4, PI/4] + * Maximum ULP across all 32-bit floats = 0.647 + * Polynomial approximation based on unpublished work by T. Myklebust + */ +NPY_FINLINE npyv_f32 +simd_sine_poly_f32(npyv_f32 x, npyv_f32 x2) +{ + const npyv_f32 invf9 = npyv_setall_f32(0x1.7d3bbcp-19f); + const npyv_f32 invf7 = npyv_setall_f32(-0x1.a06bbap-13f); + const npyv_f32 invf5 = npyv_setall_f32(0x1.11119ap-07f); + const npyv_f32 invf3 = npyv_setall_f32(-0x1.555556p-03f); + + npyv_f32 r = npyv_muladd_f32(invf9, x2, invf7); + r = npyv_muladd_f32(r, x2, invf5); + r = npyv_muladd_f32(r, x2, invf3); + r = npyv_muladd_f32(r, x2, npyv_zero_f32()); + r = npyv_muladd_f32(r, x, x); + return r; +} +/* + * Vectorized approximate sine/cosine algorithms: The following code is a + * vectorized version of the algorithm presented here: + * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 + * (1) Load data in registers and generate mask for elements that are + * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, + * 117435.992f] for sine. + * (2) For elements within range, perform range reduction using Cody-Waite's + * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4]. + * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k = + * int(y). + * (4) For elements outside that range, Cody-Waite reduction performs poorly + * leading to catastrophic cancellation. We compute cosine by calling glibc in + * a scalar fashion. + * (5) Vectorized implementation has a max ULP of 1.49 and performs at least + * 5-7x(x86) - 2.5-3x(Power) - 1-2x(Arm) faster than scalar implementations + * when magnitude of all elements in the array < 71476.0625f (117435.992f for sine). + * Worst case performance is when all the elements are large leading to about 1-2% reduction in + * performance. + */ +typedef enum +{ + SIMD_COMPUTE_SIN, + SIMD_COMPUTE_COS +} SIMD_TRIG_OP; + +static void SIMD_MSVC_NOINLINE +simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, + npy_intp len, SIMD_TRIG_OP trig_op) +{ + // Load up frequently used constants + const npyv_f32 zerosf = npyv_zero_f32(); + const npyv_s32 ones = npyv_setall_s32(1); + const npyv_s32 twos = npyv_setall_s32(2); + const npyv_f32 two_over_pi = npyv_setall_f32(0x1.45f306p-1f); + const npyv_f32 codyw_pio2_highf = npyv_setall_f32(-0x1.921fb0p+00f); + const npyv_f32 codyw_pio2_medf = npyv_setall_f32(-0x1.5110b4p-22f); + const npyv_f32 codyw_pio2_lowf = npyv_setall_f32(-0x1.846988p-48f); + const npyv_f32 rint_cvt_magic = npyv_setall_f32(0x1.800000p+23f); + // Cody-Waite's range + float max_codi = 117435.992f; + if (trig_op == SIMD_COMPUTE_COS) { + max_codi = 71476.0625f; + } + const npyv_f32 max_cody = npyv_setall_f32(max_codi); + const int vstep = npyv_nlanes_f32; + + for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { + npyv_f32 x_in; + if (ssrc == 1) { + x_in = npyv_load_tillz_f32(src, len); + } else { + x_in = npyv_loadn_tillz_f32(src, ssrc, len); + } + npyv_b32 simd_mask = npyv_cmple_f32(npyv_abs_f32(x_in), max_cody); + npy_uint64 simd_maski = npyv_tobits_b32(simd_mask); + /* + * For elements outside of this range, Cody-Waite's range reduction + * becomes inaccurate and we will call libc to compute cosine for + * these numbers + */ + if (simd_maski != 0) { + npyv_b32 nnan_mask = npyv_notnan_f32(x_in); + npyv_f32 x = npyv_select_f32(npyv_and_b32(nnan_mask, simd_mask), x_in, zerosf); + + npyv_f32 quadrant = npyv_mul_f32(x, two_over_pi); + // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 + quadrant = npyv_add_f32(quadrant, rint_cvt_magic); + quadrant = npyv_sub_f32(quadrant, rint_cvt_magic); + + // Cody-Waite's range reduction algorithm + npyv_f32 reduced_x = simd_range_reduction_f32( + x, quadrant, codyw_pio2_highf, codyw_pio2_medf, codyw_pio2_lowf + ); + npyv_f32 reduced_x2 = npyv_square_f32(reduced_x); + + // compute cosine and sine + npyv_f32 cos = simd_cosine_poly_f32(reduced_x2); + npyv_f32 sin = simd_sine_poly_f32(reduced_x, reduced_x2); + + npyv_s32 iquadrant = npyv_round_s32_f32(quadrant); + if (trig_op == SIMD_COMPUTE_COS) { + iquadrant = npyv_add_s32(iquadrant, ones); + } + // blend sin and cos based on the quadrant + npyv_b32 sine_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, ones), npyv_zero_s32()); + cos = npyv_select_f32(sine_mask, sin, cos); + + // multiply by -1 for appropriate elements + npyv_b32 negate_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, twos), twos); + cos = npyv_ifsub_f32(negate_mask, zerosf, cos, cos); + cos = npyv_select_f32(nnan_mask, cos, npyv_setall_f32(NPY_NANF)); + + if (sdst == 1) { + npyv_store_till_f32(dst, len, cos); + } else { + npyv_storen_till_f32(dst, sdst, len, cos); + } + } + if (simd_maski != ((1 << vstep) - 1)) { + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[npyv_nlanes_f32]; + npyv_storea_f32(ip_fback, x_in); + + // process elements using libc for large elements + if (trig_op == SIMD_COMPUTE_COS) { + for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst*i] = npy_cosf(ip_fback[i]); + } + } + else { + for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst*i] = npy_sinf(ip_fback[i]); + } + } + } + } + npyv_cleanup(); +} +#endif // NPY_SIMD_FMA3 + +/**begin repeat + * #func = cos, sin# + * #enum = SIMD_COMPUTE_COS, SIMD_COMPUTE_SIN# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ + const float *src = (float*)args[0]; + float *dst = (float*)args[1]; + + const int lsize = sizeof(src[0]); + const npy_intp ssrc = steps[0] / lsize; + const npy_intp sdst = steps[1] / lsize; + npy_intp len = dimensions[0]; + assert(steps[0] % lsize == 0 && steps[1] % lsize == 0); +#if NPY_SIMD_FMA3 + if (is_mem_overlap(src, steps[0], dst, steps[1], len) || + !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) + ) { + for (; len > 0; --len, src += ssrc, dst += sdst) { + simd_sincos_f32(src, 1, dst, 1, 1, @enum@); + } + } else { + simd_sincos_f32(src, ssrc, dst, sdst, len, @enum@); + } +#else + for (; len > 0; --len, src += ssrc, dst += sdst) { + const float src0 = *src; + *dst = npy_@func@f(src0); + } +#endif +} +/**end repeat**/ diff --git a/numpy/core/src/umath/loops_utils.h.src b/numpy/core/src/umath/loops_utils.h.src index dfa790ed9f34..1a2a5a32ba20 100644 --- a/numpy/core/src/umath/loops_utils.h.src +++ b/numpy/core/src/umath/loops_utils.h.src @@ -3,6 +3,17 @@ #include "numpy/npy_common.h" // NPY_FINLINE #include "numpy/halffloat.h" // npy_half_to_float + +/** + * Old versions of MSVC causes ambiguous link errors when we deal with large SIMD kernels + * which lead to break the build, probably releated to the following bug: + * https://developercommunity.visualstudio.com/content/problem/415095/internal-compiler-error-with-perfectly-forwarded-r.html + */ +#if defined(_MSC_VER) && _MSC_VER < 1916 + #define SIMD_MSVC_NOINLINE __declspec(noinline) +#else + #define SIMD_MSVC_NOINLINE +#endif /* * nomemoverlap - returns false if two strided arrays have an overlapping * region in memory. ip_size/op_size = size of the arrays which can be negative diff --git a/numpy/core/src/umath/npy_simd_data.h b/numpy/core/src/umath/npy_simd_data.h index 45487d0a8553..be9288affc7e 100644 --- a/numpy/core/src/umath/npy_simd_data.h +++ b/numpy/core/src/umath/npy_simd_data.h @@ -119,22 +119,6 @@ static npy_uint64 EXP_Table_tail[32] = { #define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f #define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f #define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f -/* - * Constants used in vector implementation of sinf/cosf(x) - */ -#define NPY_TWO_O_PIf 0x1.45f306p-1f -#define NPY_CODY_WAITE_PI_O_2_HIGHf -0x1.921fb0p+00f -#define NPY_CODY_WAITE_PI_O_2_MEDf -0x1.5110b4p-22f -#define NPY_CODY_WAITE_PI_O_2_LOWf -0x1.846988p-48f -#define NPY_COEFF_INVF0_COSINEf 0x1.000000p+00f -#define NPY_COEFF_INVF2_COSINEf -0x1.000000p-01f -#define NPY_COEFF_INVF4_COSINEf 0x1.55553cp-05f -#define NPY_COEFF_INVF6_COSINEf -0x1.6c06dcp-10f -#define NPY_COEFF_INVF8_COSINEf 0x1.98e616p-16f -#define NPY_COEFF_INVF3_SINEf -0x1.555556p-03f -#define NPY_COEFF_INVF5_SINEf 0x1.11119ap-07f -#define NPY_COEFF_INVF7_SINEf -0x1.a06bbap-13f -#define NPY_COEFF_INVF9_SINEf 0x1.7d3bbcp-19f /* * Lookup table of log(c_k) diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 3d4e6de8729b..e667639860f5 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -271,25 +271,6 @@ run_unary_@isa@_@func@_FLOAT(char **args, npy_intp const *dimensions, npy_intp c /**end repeat1**/ -#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS -static NPY_INLINE void -@ISA@_sincos_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp steps, NPY_TRIG_OP); -#endif - -static NPY_INLINE int -run_unary_@isa@_sincos_FLOAT(char **args, npy_intp const *dimensions, npy_intp const *steps, NPY_TRIG_OP my_trig_op) -{ -#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS - if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), sizeof(npy_float), @REGISTER_SIZE@)) { - @ISA@_sincos_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0], my_trig_op); - return 1; - } - else - return 0; -#endif - return 0; -} - /**end repeat**/ #if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS @@ -975,19 +956,6 @@ fma_invert_mask_pd(__m256i ymask) return _mm256_andnot_si256(ymask, _mm256_set1_epi32(0xFFFFFFFF)); } -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_should_calculate_sine(__m256i k, __m256i andop, __m256i cmp) -{ - return _mm256_cvtepi32_ps( - _mm256_cmpeq_epi32(_mm256_and_si256(k, andop), cmp)); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_should_negate(__m256i k, __m256i andop, __m256i cmp) -{ - return fma_should_calculate_sine(k, andop, cmp); -} - static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 fma_get_exponent(__m256 x) { @@ -1215,18 +1183,6 @@ avx512_invert_mask_pd(__mmask8 ymask) return _mm512_knot(ymask); } -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 -avx512_should_calculate_sine(__m512i k, __m512i andop, __m512i cmp) -{ - return _mm512_cmpeq_epi32_mask(_mm512_and_epi32(k, andop), cmp); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 -avx512_should_negate(__m512i k, __m512i andop, __m512i cmp) -{ - return avx512_should_calculate_sine(k, andop, cmp); -} - static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 avx512_get_exponent(__m512 x) { @@ -1458,40 +1414,6 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @mask@ return _mm@vsize@_@or@(m1,m2); } -/* - * Approximate cosine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.875 - */ - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ -@isa@_cosine(@vtype@ x2, @vtype@ invf8, @vtype@ invf6, @vtype@ invf4, - @vtype@ invf2, @vtype@ invf0) -{ - @vtype@ cos = @fmadd@(invf8, x2, invf6); - cos = @fmadd@(cos, x2, invf4); - cos = @fmadd@(cos, x2, invf2); - cos = @fmadd@(cos, x2, invf0); - return cos; -} - -/* - * Approximate sine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.647 - */ - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ -@isa@_sine(@vtype@ x, @vtype@ x2, @vtype@ invf9, @vtype@ invf7, - @vtype@ invf5, @vtype@ invf3, - @vtype@ zero) -{ - @vtype@ sin = @fmadd@(invf9, x2, invf7); - sin = @fmadd@(sin, x2, invf5); - sin = @fmadd@(sin, x2, invf3); - sin = @fmadd@(sin, x2, zero); - sin = @fmadd@(sin, x, x); - return sin; -} - static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ @isa@_sqrt_ps(@vtype@ x) { @@ -2004,167 +1926,7 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void * #cvtps_epi32 = _mm256_cvtps_epi32, # * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS# */ - -/* - * Vectorized approximate sine/cosine algorithms: The following code is a - * vectorized version of the algorithm presented here: - * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 - * (1) Load data in ZMM/YMM registers and generate mask for elements that are - * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, - * 117435.992f] for sine. - * (2) For elements within range, perform range reduction using Cody-Waite's - * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4]. - * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k = - * int(y). - * (4) For elements outside that range, Cody-Waite reduction performs poorly - * leading to catastrophic cancellation. We compute cosine by calling glibc in - * a scalar fashion. - * (5) Vectorized implementation has a max ULP of 1.49 and performs at least - * 5-7x faster than scalar implementations when magnitude of all elements in - * the array < 71476.0625f (117435.992f for sine). Worst case performance is - * when all the elements are large leading to about 1-2% reduction in - * performance. - */ - #if defined @CHK@ -static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void -@ISA@_sincos_FLOAT(npy_float * op, - npy_float * ip, - const npy_intp array_size, - const npy_intp steps, - NPY_TRIG_OP my_trig_op) -{ - const npy_intp stride = steps/(npy_intp)sizeof(npy_float); - const npy_int num_lanes = @NUM_LANES@; - npy_float large_number = 71476.0625f; - if (my_trig_op == npy_compute_sin) { - large_number = 117435.992f; - } - - /* Load up frequently used constants */ - @vtype@i zeros = _mm@vsize@_set1_epi32(0); - @vtype@i ones = _mm@vsize@_set1_epi32(1); - @vtype@i twos = _mm@vsize@_set1_epi32(2); - @vtype@ two_over_pi = _mm@vsize@_set1_ps(NPY_TWO_O_PIf); - @vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_HIGHf); - @vtype@ codyw_c2 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_MEDf); - @vtype@ codyw_c3 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_LOWf); - @vtype@ cos_invf0 = _mm@vsize@_set1_ps(NPY_COEFF_INVF0_COSINEf); - @vtype@ cos_invf2 = _mm@vsize@_set1_ps(NPY_COEFF_INVF2_COSINEf); - @vtype@ cos_invf4 = _mm@vsize@_set1_ps(NPY_COEFF_INVF4_COSINEf); - @vtype@ cos_invf6 = _mm@vsize@_set1_ps(NPY_COEFF_INVF6_COSINEf); - @vtype@ cos_invf8 = _mm@vsize@_set1_ps(NPY_COEFF_INVF8_COSINEf); - @vtype@ sin_invf3 = _mm@vsize@_set1_ps(NPY_COEFF_INVF3_SINEf); - @vtype@ sin_invf5 = _mm@vsize@_set1_ps(NPY_COEFF_INVF5_SINEf); - @vtype@ sin_invf7 = _mm@vsize@_set1_ps(NPY_COEFF_INVF7_SINEf); - @vtype@ sin_invf9 = _mm@vsize@_set1_ps(NPY_COEFF_INVF9_SINEf); - @vtype@ cvt_magic = _mm@vsize@_set1_ps(NPY_RINT_CVT_MAGICf); - @vtype@ zero_f = _mm@vsize@_set1_ps(0.0f); - @vtype@ quadrant, reduced_x, reduced_x2, cos, sin; - @vtype@i iquadrant; - @mask@ nan_mask, glibc_mask, sine_mask, negate_mask; - @mask@ load_mask = @isa@_get_full_load_mask_ps(); - npy_intp num_remaining_elements = array_size; - - /* - * Note: while generally indices are npy_intp, we ensure that our maximum index - * will fit in an int32 as a precondition for this function via - * IS_OUTPUT_BLOCKABLE_UNARY - */ - npy_int32 indexarr[16]; - for (npy_int32 ii = 0; ii < 16; ii++) { - indexarr[ii] = ii*stride; - } - @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]); - - while (num_remaining_elements > 0) { - - if (num_remaining_elements < num_lanes) { - load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, - num_lanes); - } - - @vtype@ x_in; - if (stride == 1) { - x_in = @isa@_masked_load_ps(load_mask, ip); - } - else { - x_in = @isa@_masked_gather_ps(zero_f, ip, vindex, load_mask); - } - - /* - * For elements outside of this range, Cody-Waite's range reduction - * becomes inaccurate and we will call glibc to compute cosine for - * these numbers - */ - - glibc_mask = @isa@_in_range_mask(x_in, large_number,-large_number); - glibc_mask = @and_masks@(load_mask, glibc_mask); - nan_mask = _mm@vsize@_cmp_ps@vsub@(x_in, x_in, _CMP_NEQ_UQ); - @vtype@ x = @isa@_set_masked_lanes_ps(x_in, zero_f, @or_masks@(nan_mask, glibc_mask)); - npy_int iglibc_mask = @mask_to_int@(glibc_mask); - - if (iglibc_mask != @full_mask@) { - quadrant = _mm@vsize@_mul_ps(x, two_over_pi); - - /* round to nearest */ - quadrant = _mm@vsize@_add_ps(quadrant, cvt_magic); - quadrant = _mm@vsize@_sub_ps(quadrant, cvt_magic); - - /* Cody-Waite's range reduction algorithm */ - reduced_x = @isa@_range_reduction(x, quadrant, - codyw_c1, codyw_c2, codyw_c3); - reduced_x2 = _mm@vsize@_mul_ps(reduced_x, reduced_x); - - /* compute cosine and sine */ - cos = @isa@_cosine(reduced_x2, cos_invf8, cos_invf6, cos_invf4, - cos_invf2, cos_invf0); - sin = @isa@_sine(reduced_x, reduced_x2, sin_invf9, sin_invf7, - sin_invf5, sin_invf3, zero_f); - - iquadrant = _mm@vsize@_cvtps_epi32(quadrant); - if (my_trig_op == npy_compute_cos) { - iquadrant = _mm@vsize@_add_epi32(iquadrant, ones); - } - - /* blend sin and cos based on the quadrant */ - sine_mask = @isa@_should_calculate_sine(iquadrant, ones, zeros); - cos = @isa@_blend(cos, sin, sine_mask); - - /* multiply by -1 for appropriate elements */ - negate_mask = @isa@_should_negate(iquadrant, twos, twos); - cos = @isa@_blend(cos, _mm@vsize@_sub_ps(zero_f, cos), negate_mask); - cos = @isa@_set_masked_lanes_ps(cos, _mm@vsize@_set1_ps(NPY_NANF), nan_mask); - - @masked_store@(op, @cvtps_epi32@(load_mask), cos); - } - - /* process elements using glibc for large elements */ - if (iglibc_mask != 0) { - float NPY_DECL_ALIGNED(@BYTES@) ip_fback[@NUM_LANES@]; - _mm@vsize@_store_ps(ip_fback, x_in); - - if (my_trig_op == npy_compute_cos) { - for (int ii = 0; ii < num_lanes; ++ii, iglibc_mask >>= 1) { - if (iglibc_mask & 0x01) { - op[ii] = npy_cosf(ip_fback[ii]); - } - } - } - else { - for (int ii = 0; ii < num_lanes; ++ii, iglibc_mask >>= 1) { - if (iglibc_mask & 0x01) { - op[ii] = npy_sinf(ip_fback[ii]); - } - } - } - } - ip += num_lanes*stride; - op += num_lanes; - num_remaining_elements -= num_lanes; - } -} - /* * Vectorized implementation of exp using AVX2 and AVX512: * 1) if x >= xmax; return INF (overflow) From 968288aa5d6db1ee972c4ba5869415ea47b4c27f Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 25 Dec 2020 00:46:55 +0000 Subject: [PATCH 0272/1270] MAINT: Suppress maybe-uninitialized warning in gcc on VSX --- numpy/core/src/common/simd/vsx/vsx.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/core/src/common/simd/vsx/vsx.h b/numpy/core/src/common/simd/vsx/vsx.h index 769f5a08f4cc..66b76208f042 100644 --- a/numpy/core/src/common/simd/vsx/vsx.h +++ b/numpy/core/src/common/simd/vsx/vsx.h @@ -2,6 +2,16 @@ #error "Not a standalone header" #endif +#if defined(__GNUC__) && __GNUC__ <= 7 + /** + * GCC <= 7 produces ambiguous warning caused by -Werror=maybe-uninitialized, + * when certain intrinsics involved. `vec_ld` is one of them but it seemed to work fine, + * and suppressing the warning wouldn't affect its functionality. + */ + #pragma GCC diagnostic ignored "-Wuninitialized" + #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + #define NPY_SIMD 128 #define NPY_SIMD_WIDTH 16 #define NPY_SIMD_F64 1 From 14706548928f78b31431bcd3eb6bf7d76c914335 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 26 Dec 2020 03:19:47 +0000 Subject: [PATCH 0273/1270] BENCH: Rename `bench_avx.py` to `bench_ufunc_strides.py` This test should not be exclusive to AVX. this patch also extends unary test to cover different sets of output strides. --- .../{bench_avx.py => bench_ufunc_strides.py} | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) rename benchmarks/benchmarks/{bench_avx.py => bench_ufunc_strides.py} (92%) diff --git a/benchmarks/benchmarks/bench_avx.py b/benchmarks/benchmarks/bench_ufunc_strides.py similarity index 92% rename from benchmarks/benchmarks/bench_avx.py rename to benchmarks/benchmarks/bench_ufunc_strides.py index 82866c17077f..58f325e76174 100644 --- a/benchmarks/benchmarks/bench_avx.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -2,7 +2,7 @@ import numpy as np -avx_ufuncs = ['sin', +unary_ufuncs = ['sin', 'cos', 'exp', 'log', @@ -20,14 +20,15 @@ 'isinf', 'signbit'] stride = [1, 2, 4] +stride_out = [1, 2, 4] dtype = ['f', 'd'] -class AVX_UFunc(Benchmark): - params = [avx_ufuncs, stride, dtype] - param_names = ['avx_based_ufunc', 'stride', 'dtype'] +class Unary(Benchmark): + params = [unary_ufuncs, stride, stride_out, dtype] + param_names = ['ufunc', 'stride_in', 'stride_out', 'dtype'] timeout = 10 - def setup(self, ufuncname, stride, dtype): + def setup(self, ufuncname, stride, stride_out, dtype): np.seterr(all='ignore') try: self.f = getattr(np, ufuncname) @@ -35,9 +36,10 @@ def setup(self, ufuncname, stride, dtype): raise NotImplementedError() N = 10000 self.arr = np.ones(stride*N, dtype) + self.arr_out = np.empty(stride_out*N, dtype) - def time_ufunc(self, ufuncname, stride, dtype): - self.f(self.arr[::stride]) + def time_ufunc(self, ufuncname, stride, stride_out, dtype): + self.f(self.arr[::stride], self.arr_out[::stride_out]) class AVX_UFunc_log(Benchmark): params = [stride, dtype] From 7161b771f5a0e0422f24257963ed6c19f7b707b9 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Sat, 26 Dec 2020 16:04:26 -0700 Subject: [PATCH 0274/1270] MAINT: PR 18072 revisions * `name_match` regular expression now starts by matching a letter only, based on reviewer feedback --- numpy/f2py/crackfortran.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 2177c97cd93c..733336032a60 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -2605,7 +2605,7 @@ def analyzevars(block): params = get_parameters(vars, get_useparameters(block)) dep_matches = {} - name_match = re.compile(r'\w[\w$]*').match + name_match = re.compile(r'[A-Za-z][\w$]*').match for v in list(vars.keys()): m = name_match(v) if m: From cb7b26d4e18583d7b010425264b9a9a379554606 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Sat, 26 Dec 2020 16:30:23 -0700 Subject: [PATCH 0275/1270] MAINT: exprtype regex simplify * `determineexprtype_re_1` was modified to remove extraneous character class markers around a single `,` * a similar change was applied for the word metacharacter in `determineexprtype_re_2` and `determineexprtype_re_3` * the third character class in `determineexprtype_re_3` was simplified to remove an escape sequence--by placing the `-` at the start of the character class its metacharacter status can be avoided --- numpy/f2py/crackfortran.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 2e95e45969e5..2d2960c1c523 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -2988,10 +2988,10 @@ def analyzeargs(block): block['vars'][block['result']] = {} return block -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P[\w]+)|)\Z', re.I) +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) determineexprtype_re_3 = re.compile( - r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P[\w]+)|)\Z', re.I) + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) From 9aa658ae3800a06fe61f304ca48a7d99a1ed3799 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 27 Dec 2020 17:37:33 +0000 Subject: [PATCH 0276/1270] Update doc/source/reference/arrays.scalars.rst --- doc/source/reference/arrays.scalars.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index ea1cae23e8ba..be7992d83584 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -176,8 +176,12 @@ Inexact types .. note:: - The printing behavior of inexact scalars is to use as few digits as possible - to display the result unambiguously. This means that equal values at + Inexact scalars are printed using the fewest decimal digits needed to + distinguish their value from other values of the same datatype, + by judicious rounding. See the ``unique`` parameter of + `format_float_positional` and `format_float_scientific`. + + This means that variables with equal binary values but whose datatypes are of different precisions may display differently:: >>> f16 = np.float16("0.1") From 7059f0680a97434248d9626a318040adf9531626 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 27 Dec 2020 17:38:33 +0000 Subject: [PATCH 0277/1270] Update doc/source/reference/arrays.scalars.rst --- doc/source/reference/arrays.scalars.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index be7992d83584..0e2f4d7b9edf 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -196,6 +196,17 @@ Inexact types ``f16`` prints as ``0.1`` because it is as close to that value as possible, whereas the other types do not as they have more precision and therefore have closer values. + + Conversely, the same value approximated at different precisions may compare + unequal despite printing identically: + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32("0.1") + >>> f64 = np.float64("0.1") + >>> f16 == f32 == f64 + False + >>> f16, f32, f64 + (0.1, 0.1, 0.1) Floating-point types ++++++++++++++++++++ From 4832edc55ffa7ba66ced716bfebbec05d93f8097 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 27 Dec 2020 17:40:43 +0000 Subject: [PATCH 0278/1270] Update doc/source/reference/arrays.scalars.rst --- doc/source/reference/arrays.scalars.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 0e2f4d7b9edf..227d12d4d6a7 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -197,8 +197,8 @@ Inexact types whereas the other types do not as they have more precision and therefore have closer values. - Conversely, the same value approximated at different precisions may compare - unequal despite printing identically: + Conversely, floating-point scalars of different precisions which approximate + the same decimal value may compare unequal despite printing identically: >>> f16 = np.float16("0.1") >>> f32 = np.float32("0.1") From 9b3f65096ee6bee277552da987ce296985baa96d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 20 Dec 2020 16:35:37 -0700 Subject: [PATCH 0279/1270] MAINT: Add dist_info to valid setup.py commands. The follwing changes are made to 'setup.py'. - Add 'dist_info' to the valid 'setup.py' commands. The 'dist_info' command was added to setuptools in 2017 and we were following the unrecognized command path in setup.py which raised a warning. - Add 'version' to the recognized information commands, it came in with the move to versioneer. - Include the arguments passed to 'setup.py' when the unrecognized command warning is raised. This will aid in maintaining the command checks in the future. --- setup.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/setup.py b/setup.py index e54328e064af..7d76129ac654 100755 --- a/setup.py +++ b/setup.py @@ -240,7 +240,8 @@ def parse_setuppy_commands(): '--maintainer', '--maintainer-email', '--contact', '--contact-email', '--url', '--license', '--description', '--long-description', '--platforms', '--classifiers', - '--keywords', '--provides', '--requires', '--obsoletes'] + '--keywords', '--provides', '--requires', '--obsoletes', + 'version',] for command in info_commands: if command in args: @@ -251,8 +252,7 @@ def parse_setuppy_commands(): # below and not standalone. Hence they're not added to good_commands. good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py', 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm', - 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src', - 'version') + 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src',) for command in good_commands: if command in args: @@ -340,17 +340,13 @@ def parse_setuppy_commands(): # Commands that do more than print info, but also don't need Cython and # template parsing. - other_commands = ['egg_info', 'install_egg_info', 'rotate'] + other_commands = ['egg_info', 'install_egg_info', 'rotate', 'dist_info'] for command in other_commands: if command in args: return False # If we got here, we didn't detect what setup.py command was given - import warnings - warnings.warn("Unrecognized setuptools command, proceeding with " - "generating Cython sources and expanding templates", - stacklevel=2) - return True + raise RuntimeError("Unrecognized setuptools command: {}".format(args)) def get_docs_url(): @@ -416,7 +412,7 @@ def setup_package(): # Raise errors for unsupported commands, improve help output, etc. run_build = parse_setuppy_commands() - if run_build and 'version' not in sys.argv: + if run_build: # patches distutils, even though we don't use it #from setuptools import setup from numpy.distutils.core import setup From acfc3f3ee997111042638debb1d49bfe9829b6df Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 27 Dec 2020 21:23:50 +0100 Subject: [PATCH 0280/1270] NEP: mark NEP 28 on website redesign as final [ci skip] --- doc/neps/nep-0028-website-redesign.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/neps/nep-0028-website-redesign.rst b/doc/neps/nep-0028-website-redesign.rst index e27da91c6c8b..592209a5ff89 100644 --- a/doc/neps/nep-0028-website-redesign.rst +++ b/doc/neps/nep-0028-website-redesign.rst @@ -7,7 +7,7 @@ NEP 28 — numpy.org website redesign :Author: Ralf Gommers :Author: Joe LaChance :Author: Shekhar Rajak -:Status: Accepted +:Status: Final :Type: Informational :Created: 2019-07-16 :Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-August/079889.html @@ -323,7 +323,9 @@ Alternatives we considered for the overall design of the website: Discussion ---------- -Mailing list thread discussing this NEP: TODO +- Pull request for this NEP (with a good amount of discussion): https://github.com/numpy/numpy/pull/14032 +- Email about NEP for review: https://mail.python.org/pipermail/numpy-discussion/2019-July/079856.html +- Proposal to accept this NEP: https://mail.python.org/pipermail/numpy-discussion/2019-August/079889.html References and Footnotes From e81922e74097774d12736531e391b7b79bad3b50 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 27 Dec 2020 23:26:53 +0100 Subject: [PATCH 0281/1270] DOC: fix build warnings in NEP 35 --- ...35-array-creation-dispatch-with-array-function.rst | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst index 5ec01081a9b0..e0ca59316547 100644 --- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst +++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst @@ -327,7 +327,7 @@ The first version of this proposal suggested the implementation above as one viable solution for NumPy functions implemented in C. However, due to the downsides pointed out above we have decided to discard any changes on the Python side and resolve those issues with a pure-C implementation. Please refer to -[implementation]_ for details. +[7]_ for details. Reading the Reference Array Downstream ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -344,6 +344,7 @@ An example of such use is to create a new Dask array while preserving its backend type: .. code:: python + # Returns dask.array np.asarray([1, 2, 3], like=da.array(cp.array(()))) @@ -357,6 +358,7 @@ not use the ``like=`` argument via the ``self`` attribute from instead: .. code:: python + # Returns dask.array np.asarray([1, 2, 3], like=da.array(cp.array(()))) @@ -379,6 +381,7 @@ implemented in Dask. The current relevant part of ``__array_function__`` definition in Dask is seen below: .. code:: python + def __array_function__(self, func, types, args, kwargs): # Code not relevant for this example here @@ -388,6 +391,7 @@ definition in Dask is seen below: And this is how the updated code would look like: .. code:: python + def __array_function__(self, func, types, args, kwargs): # Code not relevant for this example here @@ -419,8 +423,7 @@ by downstream libraries is beyond the scope of the present proposal. Discussion ---------- -.. [implementation] `Implementation's pull request on GitHub `_ -.. [discussion] `Further discussion on implementation and the NEP's content `_ +- `Further discussion on implementation and the NEP's content `_ References ---------- @@ -437,6 +440,8 @@ References .. [6] `NEP 37 — A dispatch protocol for NumPy-like modules `_. +.. [7] `Implementation's pull request on GitHub `_ + Copyright --------- From aed54ec40da57745fd114577b65b61df1bdb0220 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 27 Dec 2020 23:40:46 +0100 Subject: [PATCH 0282/1270] DOC: fix invalid C code in NEP 10 --- doc/neps/nep-0010-new-iterator-ufunc.rst | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/doc/neps/nep-0010-new-iterator-ufunc.rst b/doc/neps/nep-0010-new-iterator-ufunc.rst index 358018c46417..4e7fdfdf5769 100644 --- a/doc/neps/nep-0010-new-iterator-ufunc.rst +++ b/doc/neps/nep-0010-new-iterator-ufunc.rst @@ -575,12 +575,16 @@ ndim, and niter will produce slightly different layouts. intp shape; /* The current coordinate along this axis */ intp coord; - /* The operand and index strides for this axis + /* The operand and index strides for this axis */ intp stride[niter]; - {intp indexstride;} #if (flags&FLAGS_HASINDEX); + #if (flags&FLAGS_HASINDEX) + intp indexstride; + #endif /* The operand pointers and index values for this axis */ char* ptr[niter]; - {intp index;} #if (flags&FLAGS_HASINDEX); + #if (flags&FLAGS_HASINDEX) + intp index; + #endif }[ndim]; }; From aa3d08d067affe9ecac73cba44a16f7017ad3081 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 27 Dec 2020 23:42:52 +0100 Subject: [PATCH 0283/1270] DOC: fix build warning for NEP 36 --- doc/neps/nep-0036-fair-play.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/neps/nep-0036-fair-play.rst b/doc/neps/nep-0036-fair-play.rst index 32e5e1e75c03..4c8195b71607 100644 --- a/doc/neps/nep-0036-fair-play.rst +++ b/doc/neps/nep-0036-fair-play.rst @@ -129,7 +129,7 @@ Fair play rules discussion on the mailing list before monkeypatching NumPy. Questions and answers -------------------- +--------------------- **Q:** We would like to distribute an optimized version of NumPy that utilizes special instructions for our company's CPU. You recommend From 11d1f13983c95d8f7436dc2d18b79353cdf9da4f Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 28 Dec 2020 07:08:17 +0000 Subject: [PATCH 0284/1270] MAINT: Bump sphinx from 3.3.1 to 3.4.1 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 3.3.1 to 3.4.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/3.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v3.3.1...v3.4.1) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 0e47198b3a8c..82da7875b7d3 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==3.3.1 +sphinx==3.4.1 numpydoc==1.1.0 ipython scipy From fe2c1015122f66943ef41af09c111a701f0fce0c Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 28 Dec 2020 07:08:37 +0000 Subject: [PATCH 0285/1270] MAINT: Bump pytz from 2020.4 to 2020.5 Bumps [pytz](https://github.com/stub42/pytz) from 2020.4 to 2020.5. - [Release notes](https://github.com/stub42/pytz/releases) - [Commits](https://github.com/stub42/pytz/compare/release_2020.4...release_2020.5) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 2a4f64ca5a71..6c074a316d35 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -3,7 +3,7 @@ wheel<0.36.3 setuptools<49.2.0 hypothesis==5.43.3 pytest==6.2.1 -pytz==2020.4 +pytz==2020.5 pytest-cov==2.10.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending From ed4089a248ea7a3609c2d0e02802aa1fd8b2c533 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 28 Dec 2020 08:23:19 +0000 Subject: [PATCH 0286/1270] MAINT: Bump hypothesis from 5.43.3 to 5.43.4 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 5.43.3 to 5.43.4. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-5.43.3...hypothesis-python-5.43.4) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 6c074a316d35..496cb3a09e5c 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==5.43.3 +hypothesis==5.43.4 pytest==6.2.1 pytz==2020.5 pytest-cov==2.10.1 From e63001a5701da3c3c1ac29c9e4bbe225f062a653 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Mon, 28 Dec 2020 11:00:03 -0700 Subject: [PATCH 0287/1270] MAINT: regex char class improve MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * replace superfluous single-character regex character classes with their literal string equivalents; this avoids the overhead associated with a character class when there's only a single character enclosed (so there's no benefit to the class overhead) * for more information see: Chapter 6 of: Friedl, Jeffrey. Mastering Regular Expressions. 3rd ed., O’Reilly Media, 2009. --- numpy/distutils/ccompiler_opt.py | 2 +- numpy/distutils/command/build_src.py | 8 ++++---- numpy/distutils/conv_template.py | 4 ++-- numpy/distutils/extension.py | 4 ++-- numpy/distutils/fcompiler/__init__.py | 8 ++++---- numpy/distutils/fcompiler/ibm.py | 2 +- numpy/distutils/from_template.py | 2 +- numpy/distutils/misc_util.py | 6 +++--- numpy/f2py/crackfortran.py | 10 +++++----- numpy/f2py/f2py2e.py | 4 ++-- numpy/lib/polynomial.py | 2 +- tools/commitstats.py | 2 +- 12 files changed, 27 insertions(+), 27 deletions(-) diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index f695ebb9267a..ecf5172cccad 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -1770,7 +1770,7 @@ def parse_targets(self, source): tokens = tokens[start_pos:end_pos] return self._parse_target_tokens(tokens) - _parse_regex_arg = re.compile(r'\s|[,]|([+-])') + _parse_regex_arg = re.compile(r'\s|,|([+-])') def _parse_arg_features(self, arg_name, req_features): if not isinstance(req_features, str): self.dist_fatal("expected a string in '%s'" % arg_name) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index 303d6197cbd3..5581011f6f22 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -715,14 +715,14 @@ def swig_sources(self, sources, extension): return new_sources + py_files -_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match +_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match +_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match #### SWIG related auxiliary functions #### _swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', re.I).match -_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search -_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search +_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search +_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search def get_swig_target(source): with open(source, 'r') as f: diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py index d08015fdf39f..e46db066302f 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/distutils/conv_template.py @@ -137,7 +137,7 @@ def paren_repl(obj): numrep = obj.group(2) return ','.join([torep]*int(numrep)) -parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)") +parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") plainrep = re.compile(r"([^*]+)\*(\d+)") def parse_values(astr): # replaces all occurrences of '(a,b,c)*4' in astr @@ -207,7 +207,7 @@ def parse_loop_header(loophead) : dlist.append(tmp) return dlist -replace_re = re.compile(r"@([\w]+)@") +replace_re = re.compile(r"@(\w+)@") def parse_string(astr, env, level, line) : lineno = "#line %d\n" % line diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py index 67114ef2e3f7..c90b5d725389 100644 --- a/numpy/distutils/extension.py +++ b/numpy/distutils/extension.py @@ -10,8 +10,8 @@ from distutils.extension import Extension as old_Extension -cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match +cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match class Extension(old_Extension): diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index 76f00ee91b97..4730a5a0999a 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -962,10 +962,10 @@ def dummy_fortran_file(): return name[:-2] -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search _free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match def is_free_format(file): diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py index 4a83682e5165..eff24401a1c3 100644 --- a/numpy/distutils/fcompiler/ibm.py +++ b/numpy/distutils/fcompiler/ibm.py @@ -77,7 +77,7 @@ def get_flags_linker_so(self): fo, new_cfg = make_temp_file(suffix='_xlf.cfg') log.info('Creating '+new_cfg) with open(xlf_cfg, 'r') as fi: - crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match + crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match for line in fi: m = crt1_match(line) if m: diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py index 070b7d8b8855..7add44c7679d 100644 --- a/numpy/distutils/from_template.py +++ b/numpy/distutils/from_template.py @@ -206,7 +206,7 @@ def process_str(allstr): return writestr -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]", re.I) +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) def resolve_includes(source): d = os.path.dirname(source) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index d3073ab2d742..5392663d65ae 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -428,9 +428,9 @@ def msvc_runtime_major(): ######################### #XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match +cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match def _get_f90_modules(source): """Return a list of Fortran f90 module names that diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index ab15bc09b016..3d4dffde9b05 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -294,10 +294,10 @@ def getextension(name): return '' return name[i + 1:] -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search _free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match @@ -1482,7 +1482,7 @@ def cracktypespec0(typespec, ll): ll = ll[i + 2:] return typespec, selector, attr, ll ##### -namepattern = re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z', re.I) +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) kindselector = re.compile( r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) charselector = re.compile( diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index b45d985aae52..1b922ea901f6 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -511,7 +511,7 @@ def run_compile(): remove_build_dir = 1 build_dir = tempfile.mkdtemp() - _reg1 = re.compile(r'[-][-]link[-]') + _reg1 = re.compile(r'--link-') sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] if sysinfo_flags: @@ -569,7 +569,7 @@ def run_compile(): del flib_flags[i] assert len(flib_flags) <= 2, repr(flib_flags) - _reg5 = re.compile(r'[-][-](verbose)') + _reg5 = re.compile(r'--(verbose)') setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in setup_flags] diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index ea966ffa343b..e9df783b4b76 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -1037,7 +1037,7 @@ def polydiv(u, v): return poly1d(q), poly1d(r) return q, r -_poly_mat = re.compile(r"[*][*]([0-9]*)") +_poly_mat = re.compile(r"\*\*([0-9]*)") def _raise_power(astr, wrap=70): n = 0 line1 = '' diff --git a/tools/commitstats.py b/tools/commitstats.py index 14c37d4d2b97..534f0a1b8416 100644 --- a/tools/commitstats.py +++ b/tools/commitstats.py @@ -4,7 +4,7 @@ import numpy as np import os -names = re.compile(r'r\d+\s[|]\s(.*)\s[|]\s200') +names = re.compile(r'r\d+\s\|\s(.*)\s\|\s200') def get_count(filename, repo): mystr = open(filename).read() From 31e00f035d217a7237b767c76ed7b8d903367839 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 25 Nov 2020 22:28:55 +0000 Subject: [PATCH 0288/1270] NEP: NumPy sponsorship guidelines (NEP 46) --- doc/neps/nep-0046-sponsorship-guidelines.rst | 241 +++++++++++++++++++ 1 file changed, 241 insertions(+) create mode 100644 doc/neps/nep-0046-sponsorship-guidelines.rst diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst new file mode 100644 index 000000000000..a9c2d85a919b --- /dev/null +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -0,0 +1,241 @@ +.. _NEP46: + +===================================== +NEP 46 — NumPy Sponsorship Guidelines +===================================== + +:Author: Ralf Gommers +:Status: Draft +:Type: Process +:Created: 2020-12-27 +:Resolution: (required for Accepted | Rejected | Withdrawn) + + +Abstract +-------- + +This NEP provides guidelines on how the NumPy project will acknowledge +financial and in-kind support. + + +Motivation and Scope +-------------------- + +In the past few years the NumPy project has gotten significant financial +support, as well as dedicated work time for maintainers to work on NumPy. There +is a need to acknowledge that support - funders and organizations expect or require +it, it's helpful when looking for new funding, and it's the right thing to do. +Furthermore, having a clear policy for how NumPy acknowledges support is +helpful when searching for new support. + +This NEP is aimed at both the NumPy community - who can use it when looking for +support and acknowledging existing support - and at past, current and +prospective sponsors, who often want or need to know what they get in return +for their support (other than a healthier NumPy). + +The scope of this proposal includes: + +- direct financial support, employers providing paid time for NumPy maintainers + and regular contributors, and in-kind support such as free hardware resources or + services. +- where and how NumPy acknowledges support (e.g., logo placement on the website). +- the amount and duration of support which leads to acknowledgement. +- who in the NumPy project is responsible for sponsorship related topics, and + how to contact them. + + +How NumPy will acknowledge support +---------------------------------- + +There will be two different ways to acknowledge financial and in-kind support, +one to recognize significant active support, and another one to recognize +support received in the past and smaller amounts of support. + +Entities who fall under "significant active supporter" we'll call Sponsor. +The minimum level of support given to NumPy to be considered a Sponsor are: + +- $30,000/yr for unrestricted financial contributions +- $60,000/yr for financial contributions for a particular purpose +- $100,000/yr for in-kind contributions + +The rationale for the above levels is that unrestricted financial contributions +are typically the most valuable for the project, and the hardest to obtain. +The opposite is true for in-kind contributions. The dollar value of the levels +also reflect that NumPy's needs have grown to the point where we need at least +a few paid developers in order to effectively support our user base and +continue to move the project forward. Financial support at or above these +levels is needed to be able to make a significant difference. + +Sponsors will get acknowledged through: + +- a small logo displayed on the front page of the NumPy website +- prominent logo placement on https://numpy.org/about/ +- logos displayed in talks about NumPy by maintainers +- announcements of the sponsorship on the NumPy mailing list and the numpy-team + Twitter account + +In addition to Sponsors, we already have the concept of Institutional Partner +(defined in NumPy's +`governance document `__), +for entities who employ a NumPy maintainer and let them work on NumPy as part +of their official duties. The governance document doesn't currently define a +minimum amount of paid maintainer time needed to be considered for partnership. +Therefore we propose that level here, roughly in line with the sponsorship +levels: + +- 6 person-months/yr of paid work time for one or more NumPy maintainers or + regular contributors + +Institutional Partners get the same benefits as Sponsors, in addition to what +is specified in the NumPy governance document. + +Finally, a new page on the website (https://numpy.org/funding/, linked from the +About page) will be added to acknowledge all current and previous sponsors, +partners, and any other entities and individuals who provided $5,000 or more of +financial or in-kind support. This page will include relevant details of +support (dates, amounts, names and purpose); no logos will be used on this +page. The rationale for the $5,000 minimum level is to keep the amount of work +maintaining the page reasonable; the level is the equivalent of, e.g., one GSoC +or a person-week's worth of engineering time in a Western country, which seems +like a reasonable lower limit. + + +Implementation +-------------- + +The following content changes need to be made: + +- Add a section with small logos towards the bottom of the `numpy.org + `__ website. +- Create a full list of historical and current support and deploy it to + https://numpy.org/funding. +- Update the NumPy governance document for changes to Institutional Partner + eligibility requirements and benefits. +- Update https://numpy.org/about with details on how to get in touch with the + NumPy project about sponsorship related matters (see next section). + + +A NumPy Funding Team +~~~~~~~~~~~~~~~~~~~~ + +At the moment NumPy has only one official body, the Steering Council, and no +good way to get in touch with either that body or any person or group +responsible for funding and sponsorship related matters. The way this is +typically done now is to somehow find the personal email of a maintainer, and +email them in private. There is a need to organize this more transparently - a +potential sponsor isn't likely to inquire through the mailing list, nor is it +easy for a potential sponsor to know if they're reaching out to the right +person in private. + +https://numpy.org/about/ already says that NumPy has a "funding and grants" +team, however that is not the case. We propose to organize this team, name team +members on it, and add the names of those team members plus a dedicated email +address for the team to the About page. + + +Status before this proposal +--------------------------- + +Acknowledgement of support +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At the time of writing (Dec 2020), the logos of the four largest financial +sponsors and two institutional partners are displayed on +https://numpy.org/about/. The `Nature paper about NumPy `__ +mentions some early funding. No comprehensive list of received funding and +in-kind support is published anywhere. + +Decisions on which logos to list on the website have been made mostly by the +website team. Decisions on which entities to recognize as Institutional Partner +have been made by the NumPy Steering Council. + + +NumPy governance, decision-making and financial oversight +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*This section is meant as context for the reader, to help put the rest of this +NEP in perspective, and perhaps answer questions the reader has when reading +this as a potential sponsor.* + +NumPy has a formal governance structure defined in +`this governance document `__). +Decisions are made by consensus among all active participants in a discussion +(typically on the mailing list), and if consensus cannot be reached then the +Steering Council takes the decision (also by consensus). + +NumPy is a sponsored project of NumFOCUS, a US-based 501(c)3 nonprofit. +NumFOCUS administers NumPy funds, and ensures they are spent in accordance with +its mission and nonprofit status. In practice, NumPy has a NumFOCUS +subcommittee (with its members named in the NumPy governance document) who can +authorize financial transactions. Those transactions, for example paying a +contractor for a particular activity or deliverable, are decided on by the +NumPy Steering Council. + + +Alternatives +------------ + +*Tiered sponsorship levels.* We considered using tiered sponsorship levels, and +rejected this alternative because it would be more complex, and not necessarily +communicate the right intent - the minimum levels are for us to determine how +to acknowledge support that we receive, not a commercial value proposition. +Entities typically will support NumPy because they rely on the project or want +to help advance it, and not to get brand awareness through logo placement. + +*Listing all donations*. Note that in the past we have received many smaller +donations, mostly from individuals through NumFOCUS. It would be great to list +all of those contributions, but given the way we receive information on those +donations right now, that would be quite labor-intensive. If we manage to move +to a more suitable platform, such as `Open Collective `__, +in the future, we should reconsider listing all individual donations. + + +Related Work +------------ + +Here we provide a few examples of how other project handle sponsorship +guidelines and acknowledgements. + +*Scikit-learn* has a narrow banner with logos at the bottom of +https://scikit-learn.org, and a list of present funding and past sponsors at +https://scikit-learn.org/stable/about.html#funding. Plus a separate section +"Infrastructure support" at the bottom of that same About page. + +*Jupyter* has logos of sponsors and institutional partners in two sections on +https://jupyter.org/about. Some subprojects have separate approaches, for +example sponsors are listed (by using the `all-contributors +`__ bot) in the README for +`jupyterlab-git `__. For a recent +discussion on that, see `here `_. + +*NumFOCUS* has a large banner with sponsor logos on its front page at +https://numfocus.org, and a full page with sponsors at different sponsorship +levels listed at https://numfocus.org/sponsors. They also have a +`Corporate Sponsorship Prospectus `__, +which includes a lot of detail on both sponsorship levels and benefits, as well +as how that helps NumFOCUS-affiliated projects (including NumPy). + + +Discussion +---------- + +Mailing list thread(s) discussing this NEP: TODO + + +References and Footnotes +------------------------ + +- `Inside NumPy: preparing for the next decade `__ presentation at SciPy'19 discussing impact of the first NumPy grant. +- `Issue `__ and + `email `__ + where IBM offered a $5,000 bounty for VSX SIMD support +- `JupyterLab Corporate Engagement and Contribution Guide `__ + + +.. _jupyterlab-git acknowledgements discussion: https://github.com/jupyterlab/jupyterlab-git/pull/530 + + +Copyright +--------- + +This document has been placed in the public domain. From 14385cdb4186c0636ad44532afa953473597dab8 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 29 Dec 2020 02:01:12 +0000 Subject: [PATCH 0289/1270] BUG, SIMD: Fix _simd module build for 64bit ARM/NEON clang --- numpy/core/src/_simd/_simd_easyintrin.inc | 4 ++-- numpy/core/src/common/simd/neon/operators.h | 12 ++++++------ numpy/core/tests/test_simd.py | 13 ++++++++++--- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/numpy/core/src/_simd/_simd_easyintrin.inc b/numpy/core/src/_simd/_simd_easyintrin.inc index f83d7a286b93..4521b2d87f07 100644 --- a/numpy/core/src/_simd/_simd_easyintrin.inc +++ b/numpy/core/src/_simd/_simd_easyintrin.inc @@ -87,10 +87,10 @@ simd_arg_converter, &arg1, \ simd_arg_converter, &arg2 \ )) return NULL; \ - simd_data data; \ + simd_data data = {.u64 = 0}; \ data.RET = NPY_CAT(SIMD__IMPL_COUNT_, CONST_RNG)( \ SIMD__REPEAT_2IMM, NAME, IN0 \ - ) npyv_##NAME(arg1.data.IN0, 0); \ + ) data.RET; \ simd_arg_free(&arg1); \ simd_arg ret = { \ .data = data, .dtype = simd_data_##RET \ diff --git a/numpy/core/src/common/simd/neon/operators.h b/numpy/core/src/common/simd/neon/operators.h index 280c5e0da42a..b43ba36537e9 100644 --- a/numpy/core/src/common/simd/neon/operators.h +++ b/numpy/core/src/common/simd/neon/operators.h @@ -34,12 +34,12 @@ #define npyv_shr_s64(A, C) vshlq_s64(A, npyv_setall_s64(-(C))) // right by an immediate constant -#define npyv_shri_u16(VEC, C) ((C) == 0 ? VEC : vshrq_n_u16(VEC, C)) -#define npyv_shri_s16(VEC, C) ((C) == 0 ? VEC : vshrq_n_s16(VEC, C)) -#define npyv_shri_u32(VEC, C) ((C) == 0 ? VEC : vshrq_n_u32(VEC, C)) -#define npyv_shri_s32(VEC, C) ((C) == 0 ? VEC : vshrq_n_s32(VEC, C)) -#define npyv_shri_u64(VEC, C) ((C) == 0 ? VEC : vshrq_n_u64(VEC, C)) -#define npyv_shri_s64(VEC, C) ((C) == 0 ? VEC : vshrq_n_s64(VEC, C)) +#define npyv_shri_u16 vshrq_n_u16 +#define npyv_shri_s16 vshrq_n_s16 +#define npyv_shri_u32 vshrq_n_u32 +#define npyv_shri_s32 vshrq_n_s32 +#define npyv_shri_u64 vshrq_n_u64 +#define npyv_shri_s64 vshrq_n_s64 /*************************** * Logical diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 71356f81258e..23a5bb6c3064 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -173,14 +173,21 @@ def test_operators_shift(self): # left shift shl = self.shl(vdata_a, count) assert shl == data_shl_a - # left shift by an immediate constant - shli = self.shli(vdata_a, count) - assert shli == data_shl_a # load to cast data_shr_a = self.load([a >> count for a in data_a]) # right shift shr = self.shr(vdata_a, count) assert shr == data_shr_a + + # shift by zero or max or out-range immediate constant is not applicable and illogical + for count in range(1, self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift by an immediate constant + shli = self.shli(vdata_a, count) + assert shli == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) # right shift by an immediate constant shri = self.shri(vdata_a, count) assert shri == data_shr_a From fcaaf276ee6dc7149a9ef442111748970fa46925 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Mon, 28 Dec 2020 22:15:34 -0800 Subject: [PATCH 0290/1270] DOC: Update reference to verbatim in a few location. Single backticks default role is reference, while here it seem to be for verbatim. Fix it in a couple of places. --- numpy/polynomial/_polybase.py | 2 +- numpy/polynomial/chebyshev.py | 2 +- numpy/polynomial/hermite.py | 2 +- numpy/polynomial/hermite_e.py | 2 +- numpy/polynomial/laguerre.py | 2 +- numpy/polynomial/legendre.py | 2 +- numpy/polynomial/polynomial.py | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 59c380f10e3a..4462f9d6b62c 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -940,7 +940,7 @@ class domain in NumPy 1.4 and ``None`` in later versions. also returned. w : array_like, shape (M,), optional Weights. If not None the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 6745c9371b57..1149cdffa22c 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1586,7 +1586,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index c679c529880f..eef5c25b225e 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1311,7 +1311,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 1ce8ebe04ca2..05d1337b0b33 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1302,7 +1302,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 9cff0b71ca68..69d55751087d 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1308,7 +1308,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 427f9f82f381..23ddd07cacbd 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1328,7 +1328,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 44784023bfd9..940eed5e38d5 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1253,7 +1253,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): to solve the fit's matrix equation) is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. From 74249e3f8593df5f5af0f8c0888cbfbc9c480658 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Tue, 29 Dec 2020 08:36:29 -0700 Subject: [PATCH 0291/1270] MAINT: multiline regex class simplify * follow up to gh-18083 covering multi-line uses of `re.compile(..` and some cases for `re.match(..` with single (meta)character classes --- numpy/core/code_generators/genapi.py | 2 +- numpy/f2py/capi_maps.py | 2 +- numpy/f2py/crackfortran.py | 12 ++++++------ numpy/f2py/f2py2e.py | 6 +++--- numpy/lib/_version.py | 2 +- numpy/linalg/lapack_lite/clapack_scrub.py | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index ca6a228284a4..9c3666103053 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -175,7 +175,7 @@ def split_arguments(argstr): def finish_arg(): if current_argument: argstr = ''.join(current_argument).strip() - m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr) + m = re.match(r'(.*(\s+|\*))(\w+)$', argstr) if m: typename = m.group(1).strip() name = m.group(3) diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 472ddde43e66..fe0d4a52bd16 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -307,7 +307,7 @@ def getstrlength(var): len = a['*'] elif 'len' in a: len = a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): if isintent_hide(var): errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( repr(var))) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 3d4dffde9b05..16a9a6fab94e 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -868,7 +868,7 @@ def appenddecl(decl, decl2, force=1): return decl selectpattern = re.compile( - r'\s*(?P(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) nameargspattern = re.compile( r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) callnameargspattern = re.compile( @@ -1389,7 +1389,7 @@ def analyzeline(m, case, line): previous_context = ('common', bn, groupcounter) elif case == 'use': m1 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) if m1: mm = m1.groupdict() if 'use' not in groupcache[groupcounter]: @@ -1406,7 +1406,7 @@ def analyzeline(m, case, line): for l in ll: if '=' in l: m2 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z', l, re.I) + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) if m2: rl[m2.group('local').strip()] = m2.group( 'use').strip() @@ -1484,13 +1484,13 @@ def cracktypespec0(typespec, ll): ##### namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) kindselector = re.compile( - r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) charselector = re.compile( - r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z', re.I) + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) lenkindpattern = re.compile( r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) lenarraypattern = re.compile( - r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) def removespaces(expr): diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 1b922ea901f6..a14f068f15dd 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -518,7 +518,7 @@ def run_compile(): sysinfo_flags = [f[7:] for f in sysinfo_flags] _reg2 = re.compile( - r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include') + r'--((no-|)(wrap-functions|lower)|debug-capi|quiet)|-include') f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] f2py_flags2 = [] @@ -536,11 +536,11 @@ def run_compile(): sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] _reg3 = re.compile( - r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)') + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in flib_flags] _reg4 = re.compile( - r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))') + r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in fc_flags] diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index d4098acb5aea..0f26d6503c5d 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -54,7 +54,7 @@ class NumpyVersion(): def __init__(self, vstring): self.vstring = vstring - ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) + ver_main = re.match(r'\d\.\d+\.\d+', vstring) if not ver_main: raise ValueError("Not a valid numpy version string") diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index f3e7d25d2eab..738fad7fe1ee 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -224,7 +224,7 @@ def OutOfHeader(line): def removeSubroutinePrototypes(source): expression = re.compile( - r'/[*] Subroutine [*]/^\s*(?:(?:inline|static)\s+){0,2}(?!else|typedef|return)\w+\s+\*?\s*(\w+)\s*\([^0]+\)\s*;?' + r'/\* Subroutine \*/^\s*(?:(?:inline|static)\s+){0,2}(?!else|typedef|return)\w+\s+\*?\s*(\w+)\s*\([^0]+\)\s*;?' ) lines = LineQueue() for line in StringIO(source): From f36e940a4726abb38c4929259e8eaf00d68c3d18 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Tue, 29 Dec 2020 18:53:53 +0100 Subject: [PATCH 0292/1270] DOC: Avoid using "set of" when talking about an ordered list. ... or when the input isn't/cannot be a set. I left a few usages, e.g. in random sampling, where "set" is reasonable as informal description of an array as the order doesn't matter; however, for e.g. np.gradient the order of the returned list is clearly important, so "set" is wrong. Also some other minor doc edits noticed during the grepping: using `shape` instead of `form` in `cov` is consistent with most other places; the wording in `Polynomial.trim` now matches other methods on the same class. --- benchmarks/benchmarks/bench_app.py | 4 ++-- benchmarks/benchmarks/common.py | 2 +- numpy/core/fromnumeric.py | 2 +- numpy/core/src/multiarray/methods.c | 2 +- numpy/lib/function_base.py | 2 +- numpy/lib/histograms.py | 2 +- numpy/ma/core.py | 4 ++-- numpy/ma/extras.py | 2 +- numpy/polynomial/_polybase.py | 2 +- numpy/testing/_private/parameterized.py | 2 +- tools/npy_tempita/__init__.py | 2 +- tools/refguide_check.py | 4 ++-- 12 files changed, 15 insertions(+), 15 deletions(-) diff --git a/benchmarks/benchmarks/bench_app.py b/benchmarks/benchmarks/bench_app.py index bee95c201172..d22aa2e09604 100644 --- a/benchmarks/benchmarks/bench_app.py +++ b/benchmarks/benchmarks/bench_app.py @@ -70,8 +70,8 @@ def maxes_of_dots(self, arrays): Arrays must agree only on the first dimension. - For numpy it a join benchmark of dot products and max() - on a set of arrays. + Numpy uses this as a simultaneous benchmark of 1) dot products + and 2) max(, axis=). """ feature_scores = ([0] * len(arrays)) for (i, sd) in enumerate(arrays): diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index b65cc5fd212a..b95d09192e5e 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -14,7 +14,7 @@ # time-consuming functions (ufunc, linalg, etc) nxs, nys = 100, 100 -# a set of interesting types to test +# a list of interesting types to test TYPES1 = [ 'int16', 'float16', 'int32', 'float32', diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index efb052bc244d..52df1aad99f9 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -308,7 +308,7 @@ def _choose_dispatcher(a, choices, out=None, mode=None): @array_function_dispatch(_choose_dispatcher) def choose(a, choices, out=None, mode='raise'): """ - Construct an array from an index array and a set of arrays to choose from. + Construct an array from an index array and a list of arrays to choose from. First of all, if confused or uncertain, definitely look at the Examples - in its full generality, this function is less simple than it might diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 9c8bb4135005..8bcf591a27e9 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1019,7 +1019,7 @@ array_getarray(PyArrayObject *self, PyObject *args) } /* - * Check whether any of a set of input and output args have a non-default + * Check whether any of the input and output args have a non-default * __array_ufunc__ method. Return 1 if so, 0 if not, and -1 on error. * * This function primarily exists to help ndarray.__array_ufunc__ determine diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 696fe617b4be..276ffa5d4f4b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -846,7 +846,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Returns ------- gradient : ndarray or list of ndarray - A set of ndarrays (or a single ndarray if there is only one dimension) + A list of ndarrays (or a single ndarray if there is only one dimension) corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 1a9b41ceda6b..7af67a7ee69d 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -678,7 +678,7 @@ def _histogram_dispatcher( def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): r""" - Compute the histogram of a set of data. + Compute the histogram of a dataset. Parameters ---------- diff --git a/numpy/ma/core.py b/numpy/ma/core.py index d6af22337cc3..54cb12f17e64 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7339,9 +7339,9 @@ def where(condition, x=_NoValue, y=_NoValue): def choose(indices, choices, out=None, mode='raise'): """ - Use an index array to construct a new array from a set of choices. + Use an index array to construct a new array from a list of choices. - Given an array of integers and a set of n choice arrays, this method + Given an array of integers and a list of n choice arrays, this method will create a new array that merges each of the choice arrays. Where a value in `a` is i, the new array will have the value that choices[i] contains in the same place. diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 1bf03e966326..96e64914a5df 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1322,7 +1322,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same - form as `x`. + shape as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 4462f9d6b62c..ef3f9896da01 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -694,7 +694,7 @@ def trim(self, tol=0): Returns ------- new_series : series - Contains the new set of coefficients. + New instance of series with trimmed coefficients. """ coef = pu.trimcoef(self.coef, tol) diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py index ac7db6c4041f..55a204e3f887 100644 --- a/numpy/testing/_private/parameterized.py +++ b/numpy/testing/_private/parameterized.py @@ -339,7 +339,7 @@ def assert_not_in_testcase_subclass(self): "'@parameterized.expand' instead.") def _terrible_magic_get_defining_classes(self): - """ Returns the set of parent classes of the class currently being defined. + """ Returns the list of parent classes of the class currently being defined. Will likely only work if called from the ``parameterized`` decorator. This function is entirely @brandon_rhodes's fault, as he suggested the implementation: http://stackoverflow.com/a/8793684/71522 diff --git a/tools/npy_tempita/__init__.py b/tools/npy_tempita/__init__.py index 50a99510450d..fedcd91f45b8 100644 --- a/tools/npy_tempita/__init__.py +++ b/tools/npy_tempita/__init__.py @@ -705,7 +705,7 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): def trim_lex(tokens): r""" - Takes a lexed set of tokens, and removes whitespace when there is + Takes a lexed list of tokens, and removes whitespace when there is a directive on a line by itself: >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) diff --git a/tools/refguide_check.py b/tools/refguide_check.py index f0f6461b77ea..ddcc1028d5d8 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -305,7 +305,7 @@ def compare(all_dict, others, names, module_name): List of non deprecated sub modules for module_name others : list List of sub modules for module_name - names : set + names : set Set of function names or special directives present in docstring of module_name module_name : ModuleType @@ -780,7 +780,7 @@ def _run_doctests(tests, full_name, verbose, doctest_warnings): Parameters ---------- - tests: list + tests : list full_name : str From c002440ff26d9f7f5ac3dfdff27637212ef512ac Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2020 21:48:52 +0100 Subject: [PATCH 0293/1270] NEP: update backwards compatibility NEP text, remove examples The removed examples were ones that one or more people objected to, because they were either not completely settled, or controversial. --- doc/neps/nep-0023-backwards-compatibility.rst | 110 ++++-------------- 1 file changed, 21 insertions(+), 89 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index c8bd7c180468..b5caecd914a2 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -43,11 +43,9 @@ General principles: - When assessing the costs, keep in mind that most users do not read the mailing list, do not look at deprecation warnings, and sometimes wait more than one or two years before upgrading from their old version. And that NumPy has - many hundreds of thousands or even a couple of million users, so "no one will - do or use this" is very likely incorrect. -- Benefits include improved functionality, usability and performance (in order - of importance), as well as lower maintenance cost and improved future - extensibility. + millions of users, so "no one will do or use this" is very likely incorrect. +- Benefits include improved functionality, usability and performance, + as well as lower maintenance cost and improved future extensibility. - Bug fixes are exempt from the backwards compatibility policy. However in case of serious impact on users (e.g. a downstream library doesn't build anymore), even bug fixes may have to be delayed for one or more releases. @@ -89,21 +87,6 @@ forces users to change their code more than once, which is almost never the right thing to do. Instead, a better approach here would have been to deprecate ``histogram`` and introduce a new function ``hist`` in its place. -**Returning a view rather than a copy** - -The ``ndarray.diag`` method used to return a copy. A view would be better for -both performance and design consistency. This change was warned about -(``FutureWarning``) in v.8.0, and in v1.9.0 ``diag`` was changed to return -a *read-only* view. The planned change to a writeable view in v1.10.0 was -postponed due to backwards compatibility concerns, and is still an open issue -(gh-7661). - -What should have happened instead: nothing. This change resulted in a lot of -discussions and wasted effort, did not achieve its final goal, and was not that -important in the first place. Finishing the change to a *writeable* view in -the future is not desired, because it will result in users silently getting -different results if they upgraded multiple versions or simply missed the -warnings. **Disallowing indexing with floats** @@ -120,75 +103,29 @@ scikit-learn. Overall the change was worth the cost, and introducing it in master first to allow testing, then removing it again before a release, is a useful strategy. -Similar recent deprecations also look like good examples of +Similar deprecations that also look like good examples of cleanups/improvements: -- removing deprecated boolean indexing (gh-8312) -- deprecating truth testing on empty arrays (gh-9718) -- deprecating ``np.sum(generator)`` (gh-10670, one issue with this one is that - its warning message is wrong - this should error in the future). +- removing deprecated boolean indexing (in 2016, see `gh-8312 `__) +- deprecating truth testing on empty arrays (in 2017, see `gh-9718 `__) + **Removing the financial functions** -The financial functions (e.g. ``np.pmt``) are badly named, are present in the -main NumPy namespace, and don't really fit well within NumPy's scope. -They were added in 2008 after +The financial functions (e.g. ``np.pmt``) had short non-descriptive names, were +present in the main NumPy namespace, and didn't really fit well within NumPy's +scope. They were added in 2008 after `a discussion `_ on the mailing list where opinion was divided (but a majority in favor). -At the moment these functions don't cause a lot of overhead, however there are -multiple issues and PRs a year for them which cost maintainer time to deal -with. And they clutter up the ``numpy`` namespace. Discussion in 2013 happened -on removing them again (gh-2880). - -This case is borderline, but given that they're clearly out of scope, -deprecation and removal out of at least the main ``numpy`` namespace can be -proposed. Alternatively, document clearly that new features for financial -functions are unwanted, to keep the maintenance costs to a minimum. - -**Examples of features not added because of backwards compatibility** - -TODO: do we have good examples here? Possibly subclassing related? - - -Removing complete submodules -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The financial functions didn't cause a lot of overhead, however there were +still multiple issues and PRs a year for them which cost maintainer time to +deal with. And they cluttered up the ``numpy`` namespace. Discussion on +removing them happened in 2013 (gh-2880, rejected) and then again in 2019 +(:ref:`NEP32`, accepted without significant complaints). -This year there have been suggestions to consider removing some or all of -``numpy.distutils``, ``numpy.f2py``, ``numpy.linalg``, and ``numpy.random``. -The motivation was that all these cost maintenance effort, and that they slow -down work on the core of NumPy (ndarrays, dtypes and ufuncs). - -The impact on downstream libraries and users would be very large, and -maintenance of these modules would still have to happen. Therefore this is -simply not a good idea; removing these submodules should not happen even for -a new major version of NumPy. - - -Subclassing of ndarray -^^^^^^^^^^^^^^^^^^^^^^ - -Subclassing of ``ndarray`` is a pain point. ``ndarray`` was not (or at least -not well) designed to be subclassed. Despite that, a lot of subclasses have -been created even within the NumPy code base itself, and some of those (e.g. -``MaskedArray``, ``astropy.units.Quantity``) are quite popular. The main -problems with subclasses are: - -- They make it hard to change ``ndarray`` in ways that would otherwise be - backwards compatible. -- Some of them change the behavior of ndarray methods, making it difficult to - write code that accepts array duck-types. - -Subclassing ``ndarray`` has been officially discouraged for a long time. Of -the most important subclasses, ``np.matrix`` will be deprecated (see gh-10142) -and ``MaskedArray`` will be kept in NumPy (`NEP 17 -`_). -``MaskedArray`` will ideally be rewritten in a way such that it uses only -public NumPy APIs. For subclasses outside of NumPy, more work is needed to -provide alternatives (e.g. mixins, see gh-9016 and gh-10446) or better support -for custom dtypes (see gh-2899). Until that is done, subclasses need to be -taken into account when making change to the NumPy code base. A future change -in NumPy to not support subclassing will certainly need a major version -increase. +Given that they were clearly outside of NumPy's scope, moving them to a +separate ``numpy-financial`` package and removing them from NumPy after a +deprecation period made sense. Policy @@ -270,21 +207,16 @@ confusing than helpful. gh-10156 contains more discussion on this alternative. Discussion ---------- -TODO - -This section may just be a bullet list including links to any discussions -regarding the NEP: - -- This includes links to mailing list threads or relevant GitHub issues. +- `Mailing list discussion on the first version of this NEP in 2018 `__ References and Footnotes ------------------------ -.. [1] TODO +None Copyright --------- -This document has been placed in the public domain. [1]_ +This document has been placed in the public domain. From c40ced602869c56a915eb7dc86d91a44e9843fe5 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2020 21:52:13 +0100 Subject: [PATCH 0294/1270] NEP: remove versioning related content from backwards compat NEP There was agreement in the previous discussion that this doesn't belong in this NEP. --- doc/neps/nep-0023-backwards-compatibility.rst | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index b5caecd914a2..0b58a5f4b429 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -161,13 +161,7 @@ Removal of deprecated functionality: impact of the removal is such that a major version number increase is warranted. - shall be listed in the release notes of the release where the removal happened. - -Versioning: - -- removal of deprecated code can be done in any minor (but not bugfix) release. -- for heavily used functionality (e.g. removal of ``np.matrix``, of a whole submodule, - or significant changes to behavior for subclasses) the major version number shall - be increased. +- can be done in any minor (but not bugfix) release. In concrete cases where this policy needs to be applied, decisions are made according to the `NumPy governance model @@ -194,15 +188,6 @@ ecosystem - being fairly conservative is required in order to not increase the extra maintenance for downstream libraries and end users to an unacceptable level. -**Semantic versioning.** - -This would change the versioning scheme for code removals; those could then -only be done when the major version number is increased. Rationale for -rejection: semantic versioning is relatively common in software engineering, -however it is not at all common in the Python world. Also, it would mean that -NumPy's version number simply starts to increase faster, which would be more -confusing than helpful. gh-10156 contains more discussion on this alternative. - Discussion ---------- @@ -213,7 +198,7 @@ Discussion References and Footnotes ------------------------ -None +- `Issue requesting semantic versioning `__ Copyright From ed5dd615732951a596767064cb9d86e7b2f86a38 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2020 23:03:03 +0100 Subject: [PATCH 0295/1270] NEP: rewrite the backwards compatibility NEP contents Also add examples of how deprecations and future warnings should be done, taken from the current code base. --- doc/neps/nep-0023-backwards-compatibility.rst | 250 +++++++++++++----- 1 file changed, 180 insertions(+), 70 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index 0b58a5f4b429..acfb379b7fd6 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -19,44 +19,195 @@ processes for individual cases where breaking backwards compatibility is considered. -Detailed description +Motivation and Scope -------------------- NumPy has a very large user base. Those users rely on NumPy being stable and the code they write that uses NumPy functionality to keep working. NumPy is also actively maintained and improved -- and sometimes improvements -require, or are made much easier, by breaking backwards compatibility. +require, or are made much easier by, breaking backwards compatibility. Finally, there are trade-offs in stability for existing users vs. avoiding errors or having a better user experience for new users. These competing -needs often give rise to heated debates and delays in accepting or rejecting +needs often give rise to long debates and to delays in accepting or rejecting contributions. This NEP tries to address that by providing a policy as well as examples and rationales for when it is or isn't a good idea to break backwards compatibility. -General principles: +In scope for this NEP are: -- Aim not to break users' code unnecessarily. -- Aim never to change code in ways that can result in users silently getting - incorrect results from their previously working code. -- Backwards incompatible changes can be made, provided the benefits outweigh - the costs. -- When assessing the costs, keep in mind that most users do not read the mailing - list, do not look at deprecation warnings, and sometimes wait more than one or - two years before upgrading from their old version. And that NumPy has - millions of users, so "no one will do or use this" is very likely incorrect. -- Benefits include improved functionality, usability and performance, - as well as lower maintenance cost and improved future extensibility. -- Bug fixes are exempt from the backwards compatibility policy. However in case - of serious impact on users (e.g. a downstream library doesn't build anymore), - even bug fixes may have to be delayed for one or more releases. -- The Python API and the C API will be treated in the same way. +- Principles of NumPy's approach to backwards compatibility. +- How to deprecate functionality, and when to remove already deprecated + functionality. +- Decision making process for deprecations and removals. +Out of scope are: -Examples -^^^^^^^^ +- Making concrete decisions about deprecations of particular functionality. +- NumPy's versioning scheme. -We now discuss a number of concrete examples to illustrate typical issues -and trade-offs. + +General principles +------------------ + +When considering proposed changes that are backwards incompatible, the +main principles the NumPy developers use when making a decision are: + +1. Changes need to benefit users more than they harm them. +2. NumPy is widely used so breaking changes should by default be assumed to be + fairly harmful. +3. Decisions should be based on data and actual effects on users and downstream + packages rather than, e.g., appealing to the docs or for stylistic reasons. +4. Silently getting a wrong answer is much worse than getting a loud error. + +When assessing the costs of proposed changes, keep in mind that most users do +not read the mailing list, do not look at deprecation warnings, and sometimes +wait more than one or two years before upgrading from their old version. And +that NumPy has millions of users, so "no one will do or use this" is very +likely incorrect. + +Benefits include improved functionality, usability and performance, as well as +lower maintenance cost and improved future extensibility. + +Fixes for clear bugs are exempt from this backwards compatibility policy. +However in case of serious impact on users (e.g. a downstream library doesn't +build anymore or would start giving incorrect results), even bug fixes may have +to be delayed for one or more releases. + + +Strategies related to deprecations +---------------------------------- + +Getting hard data on the impact of a deprecation of often difficult. Strategies +that can be used to assess such impact include: + +- Use a code search engine ([1]_) or static ([2]_) or dynamic ([3]_) code + analysis tools to determine where and how the functionality is used. +- Testing prominent downstream libraries against a development build of NumPy + containing the proposed change to get real-world data on its impact. +- Making a change in master and reverting it, if needed, before a release. We + do encourage other packages to test against NumPy's master branch, so this + often turns up issues quickly. + +If the impact is unclear or significant, it is often good to consider +alternatives to deprecations. For example discouraging use in documentation +only, or moving the documentation for the functionality to a less prominent +place or even removing it completely. Commenting on open issues related to it +that they are low-prio or labeling them as "wontfix" will also be a signal to +users, and reduce the maintenance effort needing to be spent. + + +Implementing deprecations and removals +-------------------------------------- + +Deprecation warnings are necessary in all cases where functionality +will eventually be removed. If there is no intent to remove functionality, +then it should not be deprecated either. A "please don't use this" in the +documentation or other type of warning should be used instead. + +Deprecations: + +- shall include the version number of the release in which the functionality + was deprecated. +- shall include information on alternatives to the deprecated functionality, or a + reason for the deprecation if no clear alternative is available. +- shall use ``VisibleDeprecationWarning`` rather than ``DeprecationWarning`` + for cases of relevance to end users. For cases only relevant to + downstream libraries, a regular ``DeprecationWarning`` is fine. + *Rationale: regular deprecation warnings are invisible by default; library + authors should be aware how deprecations work and test for them, but we can't + expect this from all users.* +- shall be listed in the release notes of the release where the deprecation is + first present. +- shall set a ``stacklevel``, so the warning appears to come from the correct + place. +- shall be mentioned in the documentation for the functionality. A + ``.. deprecated::`` directive can be used for this. + +Examples of good deprecation warnings: + +.. code-block:: python + + warnings.warn('np.asscalar(a) is deprecated since NumPy 1.16.0, use ' + 'a.item() instead', DeprecationWarning, stacklevel=3) + + warnings.warn("Importing from numpy.testing.utils is deprecated " + "since 1.15.0, import from numpy.testing instead.", + DeprecationWarning, stacklevel=2) + + # A change in NumPy 1.14.0 for Python 3 loadtxt/genfromtext, slightly + # tweaked in this NEP (original didn't have version number). + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated since NumPy 1.14.0. Set the encoding, " + "use None for the system default.", + np.VisibleDeprecationWarning, stacklevel=2) + +Removal of deprecated functionality: + +- shall be done after at least 2 releases (assuming the current 6-monthly + release cycle; if that changes, there shall be at least 1 year between + deprecation and removal). +- shall be listed in the release notes of the release where the removal happened. +- can be done in any minor (but not bugfix) release. + +For backwards incompatible changes that aren't "deprecate and remove" but for +which code will start behaving differently, a ``FutureWarning`` should be +used. Release notes, mentioning version number and using ``stacklevel`` should +be done in the same way as for deprecation warnings. A ``.. versionchanged::`` +directive can be used in the documentation to indicate when the behavior +changed: + +.. code-block:: python + + def argsort(self, axis=np._NoValue, ...): + """ + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + + .. versionchanged:: 1.13.0 + Previously, the default was documented to be -1, but that was + in error. At some future date, the default will change to -1, as + originally intended. + Until then, the axis should be given explicitly when + ``arr.ndim > 1``, to avoid a FutureWarning. + """ + ... + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + + +Decision making +~~~~~~~~~~~~~~~ + +In concrete cases where this policy needs to be applied, decisions are made according +to the `NumPy governance model +`_. + +All deprecations must be proposed on the mailing list, in order to give everyone +with an interest in NumPy development to be able to comment. Removal of +deprecated functionality does not need discussion on the mailing list. + + +Functionality with more strict deprecation policies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- ``numpy.random`` has its own backwards compatibility policy, + see `NEP 19 `_. +- The file format for ``.npy`` and ``.npz`` files must not be changed in a backwards + incompatible way. + + +Example cases +------------- + +We now discuss a few concrete examples from NumPy's history to illustrate +typical issues and trade-offs. **Changing the behavior of a function** @@ -128,53 +279,6 @@ separate ``numpy-financial`` package and removing them from NumPy after a deprecation period made sense. -Policy ------- - -1. Code changes that have the potential to silently change the results of a users' - code must never be made (except in the case of clear bugs). -2. Code changes that break users' code (i.e. the user will see a clear exception) - can be made, *provided the benefit is worth the cost* and suitable deprecation - warnings have been raised first. -3. Deprecation warnings are in all cases warnings that functionality will be removed. - If there is no intent to remove functionality, then deprecation in documentation - only or other types of warnings shall be used. -4. Deprecations for stylistic reasons (e.g. consistency between functions) are - strongly discouraged. - -Deprecations: - -- shall include the version numbers of both when the functionality was deprecated - and when it will be removed (either two releases after the warning is - introduced, or in the next major version). -- shall include information on alternatives to the deprecated functionality, or a - reason for the deprecation if no clear alternative is available. -- shall use ``VisibleDeprecationWarning`` rather than ``DeprecationWarning`` - for cases of relevance to end users (as opposed to cases only relevant to - libraries building on top of NumPy). -- shall be listed in the release notes of the release where the deprecation happened. - -Removal of deprecated functionality: - -- shall be done after 2 releases (assuming a 6-monthly release cycle; if that changes, - there shall be at least 1 year between deprecation and removal), unless the - impact of the removal is such that a major version number increase is - warranted. -- shall be listed in the release notes of the release where the removal happened. -- can be done in any minor (but not bugfix) release. - -In concrete cases where this policy needs to be applied, decisions are made according -to the `NumPy governance model -`_. - -Functionality with more strict policies: - -- ``numpy.random`` has its own backwards compatibility policy, - see `NEP 19 `_. -- The file format for ``.npy`` and ``.npz`` files must not be changed in a backwards - incompatible way. - - Alternatives ------------ @@ -200,6 +304,12 @@ References and Footnotes - `Issue requesting semantic versioning `__ +.. [1] https://searchcode.com/ + +.. [2] https://github.com/Quansight-Labs/python-api-inspect + +.. [3] https://github.com/data-apis/python-record-api + Copyright --------- From c755c91d124ab41616832a26fa2da8205f74aaf5 Mon Sep 17 00:00:00 2001 From: Nilo Kruchelski <42773210+krnilo@users.noreply.github.com> Date: Wed, 30 Dec 2020 16:09:08 -0300 Subject: [PATCH 0296/1270] DOC: add missing details to linalg.lstsq docstring It turns out that lstsq also minimizes the 2-norm of x when a is rank-deficient. I found that by searching the documentation for the LAPACK library, which is the current implementation of lstsq (as of Dec 2020). Ref: https://www.netlib.org/lapack/lug/node27.html --- numpy/linalg/linalg.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 7775e4c32fcd..132a1a5d8947 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -2171,13 +2171,14 @@ def lstsq(a, b, rcond="warn"): r""" Return the least-squares solution to a linear matrix equation. - Computes the vector x that approximatively solves the equation + Computes the vector `x` that approximatively solves the equation ``a @ x = b``. The equation may be under-, well-, or over-determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Else, `x` minimizes the - Euclidean 2-norm :math:`|| b - a x ||`. + Euclidean 2-norm :math:`||b-ax||`. If there are multiple minimizing + solutions, the one with the smallest 2-norm :math:`||x||` is returned. Parameters ---------- From 860c8b82939d1535351f7b651c284a55efe21b10 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 30 Dec 2020 16:32:49 -0700 Subject: [PATCH 0297/1270] STY: Add spaces around '-'. [ci skip] --- numpy/linalg/linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 132a1a5d8947..46fb2502e5cc 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -2177,7 +2177,7 @@ def lstsq(a, b, rcond="warn"): equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Else, `x` minimizes the - Euclidean 2-norm :math:`||b-ax||`. If there are multiple minimizing + Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing solutions, the one with the smallest 2-norm :math:`||x||` is returned. Parameters From 2432ea3cbff7b467e9b948f893cca4111c8417c7 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Fri, 1 Jan 2021 12:34:26 -0500 Subject: [PATCH 0298/1270] TST: Avoid changing odd tempfile names in tests' site.cfg CI once produced a tempfile name with the string 'mkl' embedded. The old code changed this as well as the section name. This should ensure only section names get changed, and the restriction on the number of replacements should catch any weird corner cases, since I think the sections came first. --- numpy/distutils/tests/test_system_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py index ec15126f7f7b..b722281ad531 100644 --- a/numpy/distutils/tests/test_system_info.py +++ b/numpy/distutils/tests/test_system_info.py @@ -269,7 +269,7 @@ def test_overrides(self): # But if we copy the values to a '[mkl]' section the value # is correct with open(cfg, 'r') as fid: - mkl = fid.read().replace('ALL', 'mkl') + mkl = fid.read().replace('[ALL]', '[mkl]', 1) with open(cfg, 'w') as fid: fid.write(mkl) info = mkl_info() @@ -277,7 +277,7 @@ def test_overrides(self): # Also, the values will be taken from a section named '[DEFAULT]' with open(cfg, 'r') as fid: - dflt = fid.read().replace('mkl', 'DEFAULT') + dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) with open(cfg, 'w') as fid: fid.write(dflt) info = mkl_info() From 4799b904b759c035041d30e4cf2fe7340aac3955 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Fri, 1 Jan 2021 13:07:27 -0500 Subject: [PATCH 0299/1270] TST: Turn some tests with loos into parametrized tests. I wanted to mark only some parts of the loops as xfail for another PR. That part of the PR probably won't make it into numpy, but I think parametrized tests give better information on failure than tests with loops do, so I'm submitting these here. --- numpy/core/tests/test_multiarray.py | 38 ++++++++++------- numpy/core/tests/test_scalarmath.py | 66 +++++++++++++++++------------ 2 files changed, 61 insertions(+), 43 deletions(-) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 048b1688f2a7..624e1aa2dfea 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -8576,23 +8576,29 @@ class MyAlwaysEqualNew(MyAlwaysEqual): assert_equal(array != my_always_equal, 'ne') -def test_npymath_complex(): +@pytest.mark.parametrize( + ["fun", "npfun", "x", "y", "test_dtype"], + [ + pytest.param( + fun, npfun, x, y, test_dtype + ) + for (fun, npfun), x, y, test_dtype in itertools.product( + [ + (_multiarray_tests.npy_cabs, np.absolute), + (_multiarray_tests.npy_carg, np.angle), + ], + [1, np.inf, -np.inf, np.nan], + [1, np.inf, -np.inf, np.nan], + [np.complex64, np.complex128, np.clongdouble], + ) + ], +) +def test_npymath_complex(fun, npfun, x, y, test_dtype): # Smoketest npymath functions - from numpy.core._multiarray_tests import ( - npy_cabs, npy_carg) - - funcs = {npy_cabs: np.absolute, - npy_carg: np.angle} - vals = (1, np.inf, -np.inf, np.nan) - types = (np.complex64, np.complex128, np.clongdouble) - - for fun, npfun in funcs.items(): - for x, y in itertools.product(vals, vals): - for t in types: - z = t(complex(x, y)) - got = fun(z) - expected = npfun(z) - assert_allclose(got, expected) + z = test_dtype(complex(x, y)) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) def test_npymath_real(): diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index d8529418e58b..5b07e36fae7d 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -653,33 +653,45 @@ def test_result(self): class TestAbs: - def _test_abs_func(self, absfunc): - for tp in floating_types + complex_floating_types: - x = tp(-1.5) - assert_equal(absfunc(x), 1.5) - x = tp(0.0) - res = absfunc(x) - # assert_equal() checks zero signedness - assert_equal(res, 0.0) - x = tp(-0.0) - res = absfunc(x) - assert_equal(res, 0.0) - - x = tp(np.finfo(tp).max) - assert_equal(absfunc(x), x.real) - - x = tp(np.finfo(tp).tiny) - assert_equal(absfunc(x), x.real) - - x = tp(np.finfo(tp).min) - assert_equal(absfunc(x), -x.real) - - def test_builtin_abs(self): - self._test_abs_func(abs) - - def test_numpy_abs(self): - self._test_abs_func(np.abs) - + def _test_abs_func(self, absfunc, test_dtype): + x = test_dtype(-1.5) + assert_equal(absfunc(x), 1.5) + x = test_dtype(0.0) + res = absfunc(x) + # assert_equal() checks zero signedness + assert_equal(res, 0.0) + x = test_dtype(-0.0) + res = absfunc(x) + assert_equal(res, 0.0) + + x = test_dtype(np.finfo(test_dtype).max) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).tiny) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).min) + assert_equal(absfunc(x), -x.real) + + @pytest.mark.parametrize( + "dtype", + [ + pytest.param(dtype) + for dtype in floating_types + complex_floating_types + ], + ) + def test_builtin_abs(self, dtype): + self._test_abs_func(abs, dtype) + + @pytest.mark.parametrize( + "dtype", + [ + pytest.param(dtype) + for dtype in floating_types + complex_floating_types + ], + ) + def test_numpy_abs(self, dtype): + self._test_abs_func(np.abs, dtype) class TestBitShifts: From 71fa9e43b0ee0edfa8c3d844268bbe07cd68761f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 28 Dec 2020 17:50:38 +0100 Subject: [PATCH 0300/1270] DOC: roadmap update - updates the contents for changes in 2020 - reorders sections to reflect priorities from user survey - makes it easier to understand the concrete goals for "interoperability" --- doc/neps/roadmap.rst | 81 ++++++++++++++++++++++++-------------------- 1 file changed, 44 insertions(+), 37 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 40fcbd325222..7e5d1a03b0a8 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -17,6 +17,12 @@ facilitate interoperability with all such packages, and the code that uses them, may include (among other things) interoperability protocols, better duck typing support and ndarray subclass handling. +The key goal is: *make it easy for code written for NumPy to also work with +other NumPy-like projects.* This will enable GPU support via, e.g, CuPy or JAX, +distributed array support via Dask, and writing special-purpose arrays (either +from scratch, or as a ``numpy.ndarray`` subclass) that work well with SciPy, +scikit-learn and other such packages. + The ``__array_ufunc__`` and ``__array_function__`` protocols are stable, but do not cover the whole API. New protocols for overriding other functionality in NumPy are needed. Work in this area aims to bring to completion one or more @@ -33,56 +39,59 @@ of the API, as discussed in `this section of NEP 37 `__. -Extensibility -------------- - -We aim to make it much easier to extend NumPy. The primary topic here is to -improve the dtype system. - -- Easier custom dtypes: - - - Simplify and/or wrap the current C-API - - More consistent support for dtype metadata - - Support for writing a dtype in Python - -- New string dtype(s): - - - Encoded strings with fixed-width storage (utf8, latin1, ...) and/or - - Variable length strings (could share implementation with dtype=object, - but are explicitly type-checked) - - One of these should probably be the default for text data. The current - behavior on Python 3 is neither efficient nor user friendly. - - Performance ----------- -Improvements to NumPy's performance are important to many users. The primary -topic at the moment is better use of SIMD instructions, also on platforms other -than x86 - see :ref:`NEP38`. +Improvements to NumPy's performance are important to many users. We have +focused this effort on Universal SIMD (see :ref:`NEP38`) intrinsics which +provide nice improvements across various hardware platforms via an abstraction +layer. The infrastructure is in place, and we welcome follow-on PRs to add +SIMD support across all relevant NumPy functions. Other performance improvement ideas include: -- Reducing ufunc and ``__array_function__`` overhead. -- Optimizations in individual functions. - A better story around parallel execution. +- Optimizations in individual functions. +- Reducing ufunc and ``__array_function__`` overhead. Furthermore we would like to improve the benchmarking system, in terms of coverage, easy of use, and publication of the results (now `here `__) as part of the docs or website. -Website and documentation +Documentation and website ------------------------- The NumPy `documentation `__ is of varying quality. The API documentation is in good shape; tutorials and high-level documentation on many topics are missing or outdated. See :ref:`NEP44` for -planned improvements. +planned improvements. Adding more tutorials is underway in the +`numpy-tutorials repo `__. Our website (https://numpy.org) was completely redesigned recently. We aim to -further improve it by adding translations, better Hugo-Sphinx integration via a -new Sphinx theme, and more (see `this tracking issue `__). +further improve it by adding translations, more case studies and other +high-level content, and more (see `this tracking issue `__). + + +Extensibility +------------- + +We aim to make it much easier to extend NumPy. The primary topic here is to +improve the dtype system - see :ref:`NEP41` and related NEPs linked from it. +Concrete goals for the dtype system rewrite are: + +- Easier custom dtypes: + + - Simplify and/or wrap the current C-API + - More consistent support for dtype metadata + - Support for writing a dtype in Python + +- Allow adding (a) new string dtype(s). This could be encoded strings with + fixed-width storage (e.g., ``utf8`` or ``latin1``), and/or a variable length + string dtype. The latter could share an implementation with ``dtype=object``, + but be explicitly type-checked. + One of these should probably be the default for text data. The current + string dtype support is neither efficient nor user friendly. User experience @@ -90,18 +99,17 @@ User experience Type annotations ```````````````` -We aim to add type annotations for all NumPy functionality, so users can use +NumPy 1.20 adds type annotations for most NumPy functionality, so users can use tools like `mypy`_ to type check their code and IDEs can improve their support -for NumPy. The existing type stubs in the `numpy-stubs`_ repository are being -improved and will be moved into the NumPy code base. +for NumPy. Improving those type annotations, for example to support annotating +array shapes and dtypes, is ongoing. Platform support ```````````````` We aim to increase our support for different hardware architectures. This includes adding CI coverage when CI services are available, providing wheels on -PyPI for ARM64 (``aarch64``) and POWER8/9 (``ppc64le``), providing better -build and install documentation, and resolving build issues on other platforms -like AIX. +PyPI for POWER8/9 (``ppc64le``), providing better build and install +documentation, and resolving build issues on other platforms like AIX. Maintenance @@ -126,4 +134,3 @@ Maintenance .. _`mypy`: https://mypy.readthedocs.io -.. _`numpy-stubs`: https://github.com/numpy/numpy-stubs From f9e939b34dfd5196dc77cff2f0123a5c958ecc7f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 2 Jan 2021 17:50:39 +0100 Subject: [PATCH 0301/1270] DOC: address review comments on NEP 23 --- doc/neps/nep-0023-backwards-compatibility.rst | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index acfb379b7fd6..d4f77a7976e2 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -25,7 +25,7 @@ Motivation and Scope NumPy has a very large user base. Those users rely on NumPy being stable and the code they write that uses NumPy functionality to keep working. NumPy is also actively maintained and improved -- and sometimes improvements -require, or are made much easier by, breaking backwards compatibility. +require or are made easier by breaking backwards compatibility. Finally, there are trade-offs in stability for existing users vs. avoiding errors or having a better user experience for new users. These competing needs often give rise to long debates and to delays in accepting or rejecting @@ -89,7 +89,7 @@ that can be used to assess such impact include: often turns up issues quickly. If the impact is unclear or significant, it is often good to consider -alternatives to deprecations. For example discouraging use in documentation +alternatives to deprecations. For example, discouraging use in documentation only, or moving the documentation for the functionality to a less prominent place or even removing it completely. Commenting on open issues related to it that they are low-prio or labeling them as "wontfix" will also be a signal to @@ -118,30 +118,41 @@ Deprecations: expect this from all users.* - shall be listed in the release notes of the release where the deprecation is first present. +- shall not be introduced in micro (or bug fix) releases. - shall set a ``stacklevel``, so the warning appears to come from the correct place. - shall be mentioned in the documentation for the functionality. A ``.. deprecated::`` directive can be used for this. -Examples of good deprecation warnings: +Examples of good deprecation warnings (also note standard form of the comments +above the warning, helps when grepping): .. code-block:: python + # NumPy 1.15.0, 2018-09-02 warnings.warn('np.asscalar(a) is deprecated since NumPy 1.16.0, use ' 'a.item() instead', DeprecationWarning, stacklevel=3) + # NumPy 1.15.0, 2018-02-10 warnings.warn("Importing from numpy.testing.utils is deprecated " "since 1.15.0, import from numpy.testing instead.", DeprecationWarning, stacklevel=2) - # A change in NumPy 1.14.0 for Python 3 loadtxt/genfromtext, slightly - # tweaked in this NEP (original didn't have version number). + # NumPy 1.14.0, 2017-07-14 warnings.warn( "Reading unicode strings without specifying the encoding " "argument is deprecated since NumPy 1.14.0. Set the encoding, " "use None for the system default.", np.VisibleDeprecationWarning, stacklevel=2) +.. code-block:: C + + /* DEPRECATED 2020-05-13, NumPy 1.20 */ + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + matrix_deprecation_msg, ufunc->name, "first") < 0) { + return NULL; + } + Removal of deprecated functionality: - shall be done after at least 2 releases (assuming the current 6-monthly @@ -190,7 +201,7 @@ to the `NumPy governance model `_. All deprecations must be proposed on the mailing list, in order to give everyone -with an interest in NumPy development to be able to comment. Removal of +with an interest in NumPy development a chance to comment. Removal of deprecated functionality does not need discussion on the mailing list. @@ -304,13 +315,14 @@ References and Footnotes - `Issue requesting semantic versioning `__ +- `PEP 387 - Backwards Compatibility Policy `__ + .. [1] https://searchcode.com/ .. [2] https://github.com/Quansight-Labs/python-api-inspect .. [3] https://github.com/data-apis/python-record-api - Copyright --------- From caec7f21ce3ca2672e93781a379734295c00debe Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 30 Dec 2020 10:15:55 +0000 Subject: [PATCH 0302/1270] BUG, BLD: Generate the main dispatcher config header into the build dir The new path becomes `build/src.*/numpy/distutils/include/npy_cpu_dispatch_config.h` instead of `numpy/core/src/common/_cpu_dispatch.h`. The new path allows other projects to re-use the CPU dispatcher once we decide to expose the following headers: - `numpy/core/src/common/npy_cpu_dispatch.h` - `numpy/core/src/common/npy_cpu_features.h` --- .gitignore | 3 --- numpy/core/src/common/npy_cpu_dispatch.h | 4 +-- numpy/distutils/ccompiler_opt.py | 32 +++++++++++++----------- numpy/distutils/command/build_clib.py | 18 +++++++++---- numpy/distutils/command/build_ext.py | 22 +++++++++++----- 5 files changed, 48 insertions(+), 31 deletions(-) diff --git a/.gitignore b/.gitignore index 5a5e464cca8f..b1a8db5c3a31 100644 --- a/.gitignore +++ b/.gitignore @@ -204,9 +204,6 @@ tools/swig/test/Array.py # SIMD generated files # ################################### -# the main config header, contains all the definitions and -# headers of instruction-sets -numpy/core/src/common/_cpu_dispatch.h # config headers of dispatchable sources *.dispatch.h # wrapped sources of dispatched targets, e.g. *.dispatch.avx2.c diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h index a0f82fa3da05..f69fd2b2ee83 100644 --- a/numpy/core/src/common/npy_cpu_dispatch.h +++ b/numpy/core/src/common/npy_cpu_dispatch.h @@ -7,7 +7,7 @@ #include "npy_cpu_features.h" // NPY_CPU_HAVE #include "numpy/utils.h" // NPY_EXPAND, NPY_CAT /** - * Bringing the main configration header '_cpu_dispatch.h'. + * Bringing the main configration header 'npy_cpu_dispatch_config.h'. * * This header is generated by the distutils module 'ccompiler_opt', * and contains all the #definitions and headers of instruction-sets, @@ -33,7 +33,7 @@ #define NPY__DISPATCH_DEFBOOL typedef bool npy__dispatch_bkbool; #endif - #include "_cpu_dispatch.h" + #include "npy_cpu_dispatch_config.h" #ifdef NPY_HAVE_VSX #undef bool #undef vector diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index ecf5172cccad..39c08d36b2a0 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -2240,6 +2240,14 @@ def generate_dispatch_header(self, header_path): baseline_len = len(baseline_names) dispatch_len = len(dispatch_names) + header_dir = os.path.dirname(header_path) + if not os.path.exists(header_dir): + self.dist_log( + f"dispatch header dir {header_dir} isn't exist, creating it", + stderr=True + ) + os.makedirs(header_dir) + with open(header_path, 'w') as f: baseline_calls = ' \\\n'.join([ ( @@ -2504,30 +2512,24 @@ def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False )) return False -def new_ccompiler_opt(compiler, **kwargs): +def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): """ Create a new instance of 'CCompilerOpt' and generate the dispatch header - inside NumPy source dir. + which containing all definitions and headers of instruction-sets for + the enabled CPU baseline and dispatch-able features. Parameters ---------- - 'compiler' : CCompiler instance - '**kwargs': passed as-is to `CCompilerOpt(...)` + compiler : CCompiler instance + dispatch_hpath : str + path of the dispatch header + **kwargs: passed as-is to `CCompilerOpt(...)` Returns ------- new instance of CCompilerOpt """ opt = CCompilerOpt(compiler, **kwargs) - npy_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) - header_dir = os.path.join(npy_path, *("core/src/common".split("/"))) - header_path = os.path.join(header_dir, "_cpu_dispatch.h") - if not os.path.exists(header_path) or not opt.is_cached(): - if not os.path.exists(header_dir): - opt.dist_log( - "dispatch header dir '%s' isn't exist, creating it" % header_dir, - stderr=True - ) - os.makedirs(header_dir) - opt.generate_dispatch_header(header_path) + if not os.path.exists(dispatch_hpath) or not opt.is_cached(): + opt.generate_dispatch_header(dispatch_hpath) return opt diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index a0db6f31f7e5..1b3004c2f099 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -118,12 +118,15 @@ def run(self): self.compiler.show_customization() if not self.disable_optimization: + dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") + dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py' - )) + os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') + ) self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, cpu_baseline=self.cpu_baseline, - cpu_dispatch=self.cpu_dispatch, cache_path=opt_cache_path + compiler=self.compiler, dispatch_hpath=dispatch_hpath, + cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, + cache_path=opt_cache_path ) if not self.compiler_opt.is_cached(): log.info("Detected changes on compiler optimizations, force rebuilding") @@ -271,7 +274,12 @@ def build_a_library(self, build_info, lib_name, libraries): copt_baseline_flags = [] copt_macros = [] if not self.disable_optimization: - copt_build_src = None if self.inplace else self.get_finalized_command("build_src").build_src + bsrc_dir = self.get_finalized_command("build_src").build_src + dispatch_hpath = os.path.join("numpy", "distutils", "include") + dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) + include_dirs.append(dispatch_hpath) + + copt_build_src = None if self.inplace else bsrc_dir copt_c_sources = [ c_sources.pop(c_sources.index(src)) for src in c_sources[:] if src.endswith(".dispatch.c") diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index ca6f8bcd24d6..448f7941cd93 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -146,11 +146,16 @@ def run(self): self.compiler.show_customization() if not self.disable_optimization: - opt_cache_path = os.path.abspath(os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')) - self.compiler_opt = new_ccompiler_opt(compiler=self.compiler, - cpu_baseline=self.cpu_baseline, - cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path) + dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") + dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) + opt_cache_path = os.path.abspath( + os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') + ) + self.compiler_opt = new_ccompiler_opt( + compiler=self.compiler, dispatch_hpath=dispatch_hpath, + cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, + cache_path=opt_cache_path + ) if not self.compiler_opt.is_cached(): log.info("Detected changes on compiler optimizations, force rebuilding") self.force = True @@ -416,7 +421,12 @@ def build_extension(self, ext): copt_baseline_flags = [] copt_macros = [] if not self.disable_optimization: - copt_build_src = None if self.inplace else self.get_finalized_command("build_src").build_src + bsrc_dir = self.get_finalized_command("build_src").build_src + dispatch_hpath = os.path.join("numpy", "distutils", "include") + dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) + include_dirs.append(dispatch_hpath) + + copt_build_src = None if self.inplace else bsrc_dir copt_c_sources = [ c_sources.pop(c_sources.index(src)) for src in c_sources[:] if src.endswith(".dispatch.c") From ce6cd579d4accd81edfd96fd8b7923455d5a9e61 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 2 Jan 2021 08:30:46 +0200 Subject: [PATCH 0303/1270] MAINT, BLD: few tweaks in the comments and log message Co-authored-by: Matti Picus Co-authored-by: h-vetinari Co-authored-by: Derek Homeier --- numpy/core/src/common/npy_cpu_dispatch.h | 4 ++-- numpy/distutils/ccompiler_opt.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h index f69fd2b2ee83..c8411104a867 100644 --- a/numpy/core/src/common/npy_cpu_dispatch.h +++ b/numpy/core/src/common/npy_cpu_dispatch.h @@ -7,10 +7,10 @@ #include "npy_cpu_features.h" // NPY_CPU_HAVE #include "numpy/utils.h" // NPY_EXPAND, NPY_CAT /** - * Bringing the main configration header 'npy_cpu_dispatch_config.h'. + * Including the main configuration header 'npy_cpu_dispatch_config.h'. * * This header is generated by the distutils module 'ccompiler_opt', - * and contains all the #definitions and headers of instruction-sets, + * and contains all the #definitions and headers for platform-specific instruction-sets * that had been configured through command arguments '--cpu-baseline' and '--cpu-dispatch'. * * It also contains extra C #definitions and macros that are used for implementing diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 39c08d36b2a0..5fa17b2ee90c 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -2225,8 +2225,8 @@ def try_dispatch(self, sources, src_dir=None, **kwargs): def generate_dispatch_header(self, header_path): """ - Generate the dispatch header which containing all definitions - and headers of instruction-sets for the enabled CPU baseline and + Generate the dispatch header which contains the #definitions and headers + for platform-specific instruction-sets for the enabled CPU baseline and dispatch-able features. Its highly recommended to take a look at the generated header @@ -2243,7 +2243,7 @@ def generate_dispatch_header(self, header_path): header_dir = os.path.dirname(header_path) if not os.path.exists(header_dir): self.dist_log( - f"dispatch header dir {header_dir} isn't exist, creating it", + f"dispatch header dir {header_dir} does not exist, creating it", stderr=True ) os.makedirs(header_dir) @@ -2515,7 +2515,7 @@ def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): """ Create a new instance of 'CCompilerOpt' and generate the dispatch header - which containing all definitions and headers of instruction-sets for + which contains the #definitions and headers of platform-specific instruction-sets for the enabled CPU baseline and dispatch-able features. Parameters From 0386777c34a8a503504ee71dac084359cd26d575 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Sun, 3 Jan 2021 12:47:03 -0500 Subject: [PATCH 0304/1270] TST: Change big parametrized loop to nested small parametrized loops. Suggested by charris for clarity. Since I'm not marking some tests xfail based on the values of all parameters, this looks nicer. --- numpy/core/tests/test_multiarray.py | 21 +++++++-------------- numpy/core/tests/test_scalarmath.py | 16 ++-------------- 2 files changed, 9 insertions(+), 28 deletions(-) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 624e1aa2dfea..b30fcb812aa5 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -8577,22 +8577,15 @@ class MyAlwaysEqualNew(MyAlwaysEqual): @pytest.mark.parametrize( - ["fun", "npfun", "x", "y", "test_dtype"], + ["fun", "npfun"], [ - pytest.param( - fun, npfun, x, y, test_dtype - ) - for (fun, npfun), x, y, test_dtype in itertools.product( - [ - (_multiarray_tests.npy_cabs, np.absolute), - (_multiarray_tests.npy_carg, np.angle), - ], - [1, np.inf, -np.inf, np.nan], - [1, np.inf, -np.inf, np.nan], - [np.complex64, np.complex128, np.clongdouble], - ) - ], + (_multiarray_tests.npy_cabs, np.absolute), + (_multiarray_tests.npy_carg, np.angle) + ] ) +@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__()) def test_npymath_complex(fun, npfun, x, y, test_dtype): # Smoketest npymath functions z = test_dtype(complex(x, y)) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 5b07e36fae7d..0b615edfadcd 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -673,23 +673,11 @@ def _test_abs_func(self, absfunc, test_dtype): x = test_dtype(np.finfo(test_dtype).min) assert_equal(absfunc(x), -x.real) - @pytest.mark.parametrize( - "dtype", - [ - pytest.param(dtype) - for dtype in floating_types + complex_floating_types - ], - ) + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) def test_builtin_abs(self, dtype): self._test_abs_func(abs, dtype) - @pytest.mark.parametrize( - "dtype", - [ - pytest.param(dtype) - for dtype in floating_types + complex_floating_types - ], - ) + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) def test_numpy_abs(self, dtype): self._test_abs_func(np.abs, dtype) From 86cd3582c0e9101ac40434ae4488abdab1c1c910 Mon Sep 17 00:00:00 2001 From: PhanatosZou Date: Sun, 3 Jan 2021 18:50:48 -0800 Subject: [PATCH 0305/1270] fix exception cause in mingw32ccompiler.py --- numpy/distutils/mingw32ccompiler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 3358695a8254..4681d403b4e3 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -566,7 +566,7 @@ def msvc_manifest_xml(maj, min): fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] except KeyError: raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) + (maj, min)) from None # Don't be fooled, it looks like an XML, but it is not. In particular, it # should not have any space before starting, and its size should be # divisible by 4, most likely for alignment constraints when the xml is From 6b4787c85e393090039993495591d85d1ba66118 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 07:05:16 +0000 Subject: [PATCH 0306/1270] MAINT: Bump hypothesis from 5.43.4 to 5.46.0 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 5.43.4 to 5.46.0. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-5.43.4...hypothesis-python-5.46.0) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 496cb3a09e5c..53dd25f76d74 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==5.43.4 +hypothesis==5.46.0 pytest==6.2.1 pytz==2020.5 pytest-cov==2.10.1 From 9961807f24b5e53b2248cffd1274567a3a6139ba Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 4 Jan 2021 22:48:30 -0600 Subject: [PATCH 0307/1270] BUG: Fix promotion of half and string I somehow managed to miss that half breaks the order of dtypes and has a higher number than the strings. Could be backported, but it doesn't really matter, since it only makes a difference if the compile time flag is used and even then is pretty fringe. --- numpy/core/src/multiarray/dtypemeta.c | 10 ++++++---- numpy/core/tests/test_half.py | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index 4c11723e7dd1..b1cd074a08d8 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -375,7 +375,10 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) { assert(cls->type_num < NPY_NTYPES); if (!other->legacy || other->type_num > cls->type_num) { - /* Let the more generic (larger type number) DType handle this */ + /* + * Let the more generic (larger type number) DType handle this + * (note that half is after all others, which works out here.) + */ Py_INCREF(Py_NotImplemented); return (PyArray_DTypeMeta *)Py_NotImplemented; } @@ -398,9 +401,8 @@ static PyArray_DTypeMeta * string_unicode_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) { assert(cls->type_num < NPY_NTYPES); - if (!other->legacy || other->type_num > cls->type_num || - other->type_num == NPY_OBJECT) { - /* Let the more generic (larger type number) DType handle this */ + if (!other->legacy || (!PyTypeNum_ISNUMBER(other->type_num) && + (cls->type_num == NPY_STRING && other->type_num == NPY_UNICODE))) { Py_INCREF(Py_NotImplemented); return (PyArray_DTypeMeta *)Py_NotImplemented; } diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py index ae9827bc7e78..1b6fd21e14bb 100644 --- a/numpy/core/tests/test_half.py +++ b/numpy/core/tests/test_half.py @@ -67,6 +67,21 @@ def test_half_conversions(self): j = np.array(i_f16, dtype=int) assert_equal(i_int, j) + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_to_string(self, string_dt): + # Currently uses S/U32 (which is sufficient for float32) + expected_dt = np.dtype(f"{string_dt}32") + assert np.promote_types(np.float16, string_dt) == expected_dt + assert np.promote_types(string_dt, np.float16) == expected_dt + + arr = np.ones(3, dtype=np.float16).astype(string_dt) + assert arr.dtype == expected_dt + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_from_string(self, string_dt): + string = np.array("3.1416", dtype=string_dt) + assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16) + @pytest.mark.parametrize("offset", [None, "up", "down"]) @pytest.mark.parametrize("shift", [None, "up", "down"]) @pytest.mark.parametrize("float_t", [np.float32, np.float64]) From 172311808c0857465cd09e9f7d295bfcd0179b1e Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 5 Jan 2021 06:45:05 +0000 Subject: [PATCH 0308/1270] BLD: Add extra check for AVX512DQ mask conversions --- numpy/distutils/ccompiler_opt.py | 2 +- numpy/distutils/checks/extra_avx512dq_mask.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 numpy/distutils/checks/extra_avx512dq_mask.c diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 5fa17b2ee90c..e6c720399143 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -259,7 +259,7 @@ class _Config: AVX512_SKX = dict( interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", detect="AVX512_SKX", implies_detect=False, - extra_checks="AVX512BW_MASK" + extra_checks="AVX512BW_MASK AVX512DQ_MASK" ), AVX512_CLX = dict( interest=43, implies="AVX512_SKX", group="AVX512VNNI", diff --git a/numpy/distutils/checks/extra_avx512dq_mask.c b/numpy/distutils/checks/extra_avx512dq_mask.c new file mode 100644 index 000000000000..f0dc88bdd372 --- /dev/null +++ b/numpy/distutils/checks/extra_avx512dq_mask.c @@ -0,0 +1,16 @@ +#include +/** + * Test DQ mask operations due to: + * - MSVC has supported it since vs2019 see, + * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html + * - Clang >= v8.0 + * - GCC >= v7.1 + */ +int main(void) +{ + __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1)); + m8 = _kor_mask8(m8, m8); + m8 = _kxor_mask8(m8, m8); + m8 = _cvtu32_mask8(_cvtmask8_u32(m8)); + return (int)_cvtmask8_u32(m8); +} From 998ca7c883c3c3bcedfae4f21f1a7a9f32b19301 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 5 Jan 2021 06:46:02 +0000 Subject: [PATCH 0309/1270] BUG, MAINT: improve avx512 mask logical operations It also fixes conversion warning between `__mmask16` and `__mmask8` on msvc2019 when logical intrinsics of AVX512DQ are available. --- .../core/src/common/simd/avx512/conversion.h | 8 +- numpy/core/src/common/simd/avx512/operators.h | 94 ++++++++++--------- 2 files changed, 55 insertions(+), 47 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/conversion.h b/numpy/core/src/common/simd/avx512/conversion.h index 7f4ae484dc03..0bd44179b332 100644 --- a/numpy/core/src/common/simd/avx512/conversion.h +++ b/numpy/core/src/common/simd/avx512/conversion.h @@ -119,7 +119,13 @@ NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) { return (npy_uint16)a; } NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) -{ return (npy_uint8)a; } +{ +#ifdef NPY_HAVE_AVX512DQ_MASK + return _cvtmask8_u32(a); +#else + return (npy_uint8)a; +#endif +} // round to nearest integer (assuming even) #define npyv_round_s32_f32 _mm512_cvtps_epi32 diff --git a/numpy/core/src/common/simd/avx512/operators.h b/numpy/core/src/common/simd/avx512/operators.h index 5f1771770c5b..d53932fa8726 100644 --- a/numpy/core/src/common/simd/avx512/operators.h +++ b/numpy/core/src/common/simd/avx512/operators.h @@ -90,21 +90,6 @@ NPYV_IMPL_AVX512_FROM_SI512_PS_2ARG(npyv_and_f32, _mm512_and_si512) NPYV_IMPL_AVX512_FROM_SI512_PD_2ARG(npyv_and_f64, _mm512_and_si512) #endif -#ifdef NPY_HAVE_AVX512BW_MASK - #define npyv_and_b8 _kand_mask64 - #define npyv_and_b16 _kand_mask32 -#elif defined(NPY_HAVE_AVX512BW) - NPY_FINLINE npyv_b8 npyv_and_b8(npyv_b8 a, npyv_b8 b) - { return a & b; } - NPY_FINLINE npyv_b16 npyv_and_b16(npyv_b16 a, npyv_b16 b) - { return a & b; } -#else - #define npyv_and_b8 _mm512_and_si512 - #define npyv_and_b16 _mm512_and_si512 -#endif -#define npyv_and_b32 _mm512_kand -#define npyv_and_b64 _mm512_kand - // OR #define npyv_or_u8 _mm512_or_si512 #define npyv_or_s8 _mm512_or_si512 @@ -121,20 +106,6 @@ NPYV_IMPL_AVX512_FROM_SI512_PS_2ARG(npyv_or_f32, _mm512_or_si512) NPYV_IMPL_AVX512_FROM_SI512_PD_2ARG(npyv_or_f64, _mm512_or_si512) #endif -#ifdef NPY_HAVE_AVX512BW_MASK - #define npyv_or_b8 _kor_mask64 - #define npyv_or_b16 _kor_mask32 -#elif defined(NPY_HAVE_AVX512BW) - NPY_FINLINE npyv_b8 npyv_or_b8(npyv_b8 a, npyv_b8 b) - { return a | b; } - NPY_FINLINE npyv_b16 npyv_or_b16(npyv_b16 a, npyv_b16 b) - { return a | b; } -#else - #define npyv_or_b8 _mm512_or_si512 - #define npyv_or_b16 _mm512_or_si512 -#endif -#define npyv_or_b32 _mm512_kor -#define npyv_or_b64 _mm512_kor // XOR #define npyv_xor_u8 _mm512_xor_si512 @@ -152,21 +123,6 @@ NPYV_IMPL_AVX512_FROM_SI512_PS_2ARG(npyv_xor_f32, _mm512_xor_si512) NPYV_IMPL_AVX512_FROM_SI512_PD_2ARG(npyv_xor_f64, _mm512_xor_si512) #endif -#ifdef NPY_HAVE_AVX512BW_MASK - #define npyv_xor_b8 _kxor_mask64 - #define npyv_xor_b16 _kxor_mask32 -#elif defined(NPY_HAVE_AVX512BW) - NPY_FINLINE npyv_b8 npyv_xor_b8(npyv_b8 a, npyv_b8 b) - { return a ^ b; } - NPY_FINLINE npyv_b16 npyv_xor_b16(npyv_b16 a, npyv_b16 b) - { return a ^ b; } -#else - #define npyv_xor_b8 _mm512_xor_si512 - #define npyv_xor_b16 _mm512_xor_si512 -#endif -#define npyv_xor_b32 _mm512_kxor -#define npyv_xor_b64 _mm512_kxor - // NOT #define npyv_not_u8(A) _mm512_xor_si512(A, _mm512_set1_epi32(-1)) #define npyv_not_s8 npyv_not_u8 @@ -183,21 +139,67 @@ #define npyv_not_f32(A) _mm512_castsi512_ps(npyv_not_u32(_mm512_castps_si512(A))) #define npyv_not_f64(A) _mm512_castsi512_pd(npyv_not_u64(_mm512_castpd_si512(A))) #endif + +/*************************** + * Logical (boolean) + ***************************/ #ifdef NPY_HAVE_AVX512BW_MASK + #define npyv_and_b8 _kand_mask64 + #define npyv_and_b16 _kand_mask32 + #define npyv_or_b8 _kor_mask64 + #define npyv_or_b16 _kor_mask32 + #define npyv_xor_b8 _kxor_mask64 + #define npyv_xor_b16 _kxor_mask32 #define npyv_not_b8 _knot_mask64 #define npyv_not_b16 _knot_mask32 #elif defined(NPY_HAVE_AVX512BW) - NPY_FINLINE npyv_b8 npyv_not_b8(npyv_b8 a) + NPY_FINLINE npyv_b8 npyv_and_b8(npyv_b8 a, npyv_b8 b) + { return a & b; } + NPY_FINLINE npyv_b16 npyv_and_b16(npyv_b16 a, npyv_b16 b) + { return a & b; } + NPY_FINLINE npyv_b8 npyv_or_b8(npyv_b8 a, npyv_b8 b) + { return a | b; } + NPY_FINLINE npyv_b16 npyv_or_b16(npyv_b16 a, npyv_b16 b) + { return a | b; } + NPY_FINLINE npyv_b8 npyv_xor_b8(npyv_b8 a, npyv_b8 b) + { return a ^ b; } + NPY_FINLINE npyv_b16 npyv_xor_b16(npyv_b16 a, npyv_b16 b) + { return a ^ b; } + NPY_FINLINE npyv_b8 npyv_not_b8(npyv_b8 a) { return ~a; } NPY_FINLINE npyv_b16 npyv_not_b16(npyv_b16 a) { return ~a; } #else + #define npyv_and_b8 _mm512_and_si512 + #define npyv_and_b16 _mm512_and_si512 + #define npyv_or_b8 _mm512_or_si512 + #define npyv_or_b16 _mm512_or_si512 + #define npyv_xor_b8 _mm512_xor_si512 + #define npyv_xor_b16 _mm512_xor_si512 #define npyv_not_b8 npyv_not_u8 #define npyv_not_b16 npyv_not_u8 #endif + +#define npyv_and_b32 _mm512_kand +#define npyv_or_b32 _mm512_kor +#define npyv_xor_b32 _mm512_kxor #define npyv_not_b32 _mm512_knot -#define npyv_not_b64 _mm512_knot +#ifdef NPY_HAVE_AVX512DQ_MASK + #define npyv_and_b64 _kand_mask8 + #define npyv_or_b64 _kor_mask8 + #define npyv_xor_b64 _kxor_mask8 + #define npyv_not_b64 _knot_mask8 +#else + NPY_FINLINE npyv_b64 npyv_and_b64(npyv_b64 a, npyv_b64 b) + { return (npyv_b64)_mm512_kand((npyv_b32)a, (npyv_b32)b); } + NPY_FINLINE npyv_b64 npyv_or_b64(npyv_b64 a, npyv_b64 b) + { return (npyv_b64)_mm512_kor((npyv_b32)a, (npyv_b32)b); } + NPY_FINLINE npyv_b64 npyv_xor_b64(npyv_b64 a, npyv_b64 b) + { return (npyv_b64)_mm512_kxor((npyv_b32)a, (npyv_b32)b); } + NPY_FINLINE npyv_b64 npyv_not_b64(npyv_b64 a) + { return (npyv_b64)_mm512_knot((npyv_b32)a); } +#endif /*************************** * Comparison From 5458c67afe90cee9ff7d4b96edf1666febb39ecb Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 5 Jan 2021 11:09:01 -0700 Subject: [PATCH 0310/1270] REL: Update master after 1.19.5 release. --- doc/changelog/1.19.5-changelog.rst | 32 ++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/1.19.5-notes.rst | 42 +++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+) create mode 100644 doc/changelog/1.19.5-changelog.rst create mode 100644 doc/source/release/1.19.5-notes.rst diff --git a/doc/changelog/1.19.5-changelog.rst b/doc/changelog/1.19.5-changelog.rst new file mode 100644 index 000000000000..f7cbd5377190 --- /dev/null +++ b/doc/changelog/1.19.5-changelog.rst @@ -0,0 +1,32 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Christoph Gohlke +* Matti Picus +* Raghuveer Devulapalli +* Sebastian Berg +* Simon Graham + +* Veniamin Petrenko + +* Bernie Gray + + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#17756 `__: BUG: Fix segfault due to out of bound pointer in floatstatus... +* `#17774 `__: BUG: fix np.timedelta64('nat').__format__ throwing an exception +* `#17775 `__: BUG: Fixed file handle leak in array_tofile. +* `#17786 `__: BUG: Raise recursion error during dimension discovery +* `#17917 `__: BUG: Fix subarray dtype used with too large count in fromfile +* `#17918 `__: BUG: 'bool' object has no attribute 'ndim' +* `#17919 `__: BUG: ensure _UFuncNoLoopError can be pickled +* `#17924 `__: BLD: use BUFFERSIZE=20 in OpenBLAS +* `#18026 `__: BLD: update to OpenBLAS 0.3.13 +* `#18036 `__: BUG: make a variable volatile to work around clang compiler bug +* `#18114 `__: REL: Prepare for the NumPy 1.19.5 release. diff --git a/doc/source/release.rst b/doc/source/release.rst index 29199fb83140..e4b3f6af64ab 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -7,6 +7,7 @@ Release Notes 1.21.0 1.20.0 + 1.19.5 1.19.4 1.19.3 1.19.2 diff --git a/doc/source/release/1.19.5-notes.rst b/doc/source/release/1.19.5-notes.rst new file mode 100644 index 000000000000..048f2718cddf --- /dev/null +++ b/doc/source/release/1.19.5-notes.rst @@ -0,0 +1,42 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.19.5 Release Notes +========================== + +NumPy 1.19.5 is a short bugfix release. Apart from fixing several bugs, the +main improvement is the update to OpenBLAS 0.3.13 that works around the windows +2004 bug while not breaking execution on other platforms. This release supports +Python 3.6-3.9 and is planned to be the last release in the 1.19.x cycle. + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Christoph Gohlke +* Matti Picus +* Raghuveer Devulapalli +* Sebastian Berg +* Simon Graham + +* Veniamin Petrenko + +* Bernie Gray + + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#17756 `__: BUG: Fix segfault due to out of bound pointer in floatstatus... +* `#17774 `__: BUG: fix np.timedelta64('nat').__format__ throwing an exception +* `#17775 `__: BUG: Fixed file handle leak in array_tofile. +* `#17786 `__: BUG: Raise recursion error during dimension discovery +* `#17917 `__: BUG: Fix subarray dtype used with too large count in fromfile +* `#17918 `__: BUG: 'bool' object has no attribute 'ndim' +* `#17919 `__: BUG: ensure _UFuncNoLoopError can be pickled +* `#17924 `__: BLD: use BUFFERSIZE=20 in OpenBLAS +* `#18026 `__: BLD: update to OpenBLAS 0.3.13 +* `#18036 `__: BUG: make a variable volatile to work around clang compiler bug +* `#18114 `__: REL: Prepare for the NumPy 1.19.5 release. From da887a666ad975ece7fb7465005aa99c0ddef8d2 Mon Sep 17 00:00:00 2001 From: Chunlin <834352945@qq.com> Date: Wed, 6 Jan 2021 03:20:10 +0800 Subject: [PATCH 0311/1270] MAINT: CPUs that support unaligned access. (#18065) * add CPUs that support unaligned access. * add comments demonstrate the common scenoirs of unaligned access. --- numpy/core/include/numpy/npy_common.h | 8 -------- numpy/core/include/numpy/npy_cpu.h | 14 ++++++++++---- numpy/core/src/multiarray/common.h | 2 +- numpy/core/src/multiarray/compiled_base.c | 2 +- numpy/core/src/multiarray/item_selection.c | 2 +- .../src/multiarray/lowlevel_strided_loops.c.src | 2 +- 6 files changed, 14 insertions(+), 16 deletions(-) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index d5a586c56a39..c8495db8e58f 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -10,14 +10,6 @@ #include #endif -// int*, int64* should be propertly aligned on ARMv7 to avoid bus error -#if !defined(NPY_STRONG_ALIGNMENT) && defined(__arm__) && !(defined(__aarch64__) || defined(_M_ARM64)) -#define NPY_STRONG_ALIGNMENT 1 -#endif -#if !defined(NPY_STRONG_ALIGNMENT) -#define NPY_STRONG_ALIGNMENT 0 -#endif - // compile time environment variables #ifndef NPY_RELAXED_STRIDES_CHECKING #define NPY_RELAXED_STRIDES_CHECKING 0 diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h index 4dbf9d84e384..065176ac5fb6 100644 --- a/numpy/core/include/numpy/npy_cpu.h +++ b/numpy/core/include/numpy/npy_cpu.h @@ -110,10 +110,16 @@ information about your platform (OS, CPU and compiler) #endif -#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1 -#else -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0 +/* + * Except for the following architectures, memory access is limited to the natural + * alignment of data types otherwise it may lead to bus error or performance regression. + * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. +*/ +#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) + #define NPY_ALIGNMENT_REQUIRED 0 +#endif +#ifndef NPY_ALIGNMENT_REQUIRED + #define NPY_ALIGNMENT_REQUIRED 1 #endif #endif diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index ef9bc79da325..2f2e7e25bea2 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -267,7 +267,7 @@ npy_memchr(char * haystack, char needle, } else { /* usually find elements to skip path */ - if (NPY_CPU_HAVE_UNALIGNED_ACCESS && needle == 0 && stride == 1) { + if (!NPY_ALIGNMENT_REQUIRED && needle == 0 && stride == 1) { /* iterate until last multiple of 4 */ char * block_end = haystack + size - (size % sizeof(unsigned int)); while (p < block_end) { diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 6ae4dda6bba9..fa5d7db75e88 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1521,7 +1521,7 @@ pack_inner(const char *inptr, bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); if(out_stride == 1 && - (!NPY_STRONG_ALIGNMENT || isAligned)) { + (!NPY_ALIGNMENT_REQUIRED || isAligned)) { npy_uint64 *ptr64 = (npy_uint64*)outptr; #if NPY_SIMD_WIDTH == 16 npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 77fff5eb47c3..8e4b2ebe120e 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2245,7 +2245,7 @@ count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const count += count_nonzero_bytes((const npy_uint8 *)d, stride); d += stride; #else - if (NPY_CPU_HAVE_UNALIGNED_ACCESS || + if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(d, sizeof(npy_uint64))) { npy_uintp stride = 6 * sizeof(npy_uint64); for (; d < e - (shape[0] % stride); d += stride) { diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index 0590558be2f5..b8ebee6ed96b 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -29,7 +29,7 @@ * instructions (16 byte). * So this flag can only be enabled if autovectorization is disabled. */ -#if NPY_CPU_HAVE_UNALIGNED_ACCESS +#if NPY_ALIGNMENT_REQUIRED # define NPY_USE_UNALIGNED_ACCESS 0 #else # define NPY_USE_UNALIGNED_ACCESS 0 From 5363cfa7d07641e84fbb5f21a217018c9b55b2fa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 6 Jan 2021 11:33:05 -0600 Subject: [PATCH 0312/1270] DEP: Deprecate `data_type.dtype` if attribute is not already a dtype (#13578) * DEP: Deprecate `data_type.dtype` if attribute is not already a dtype After the deprecation, a recursive lookup for `.dtype` will not be possible, since `.dtype` has to be a dtype instance. --- changelog/13578.deprecation.rst | 7 ++ numpy/core/src/multiarray/descriptor.c | 14 ++++ numpy/core/tests/test_deprecations.py | 17 +++++ numpy/core/tests/test_dtype.py | 12 ++-- numpy/f2py/tests/test_array_from_pyobj.py | 84 +++++++++++------------ numpy/typing/tests/data/pass/dtype.py | 2 +- 6 files changed, 87 insertions(+), 49 deletions(-) create mode 100644 changelog/13578.deprecation.rst diff --git a/changelog/13578.deprecation.rst b/changelog/13578.deprecation.rst new file mode 100644 index 000000000000..58ec7e58967f --- /dev/null +++ b/changelog/13578.deprecation.rst @@ -0,0 +1,7 @@ +The ``.dtype`` attribute must return a ``dtype`` +------------------------------------------------ + +A ``DeprecationWarning`` is now given if the ``.dtype`` attribute +of an object passed into ``np.dtype`` or as a ``dtype=obj`` argument +is not a dtype. NumPy will stop attempting to recursively coerce the +result of ``.dtype``. diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index a8d575248bb7..f0dfac55dee8 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -108,6 +108,11 @@ _try_convert_from_dtype_attr(PyObject *obj) goto fail; } + if (PyArray_DescrCheck(dtypedescr)) { + /* The dtype attribute is already a valid descriptor */ + return (PyArray_Descr *)dtypedescr; + } + if (Py_EnterRecursiveCall( " while trying to convert the given data type from its " "`.dtype` attribute.") != 0) { @@ -122,6 +127,15 @@ _try_convert_from_dtype_attr(PyObject *obj) goto fail; } + /* Deprecated 2021-01-05, NumPy 1.21 */ + if (DEPRECATE("in the future the `.dtype` attribute of a given data" + "type object must be a valid dtype instance. " + "`data_type.dtype` may need to be coerced using " + "`np.dtype(data_type.dtype)`. (Deprecated NumPy 1.20)") < 0) { + Py_DECREF(newdescr); + return NULL; + } + return newdescr; fail: diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 7760281ffb91..5498e1cf996f 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -329,6 +329,23 @@ def test_all_dtypes(self): args=(dt,)) +class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase): + # Deprecated 2021-01-05, NumPy 1.21 + message = r".*`.dtype` attribute" + + def test_deprecation_dtype_attribute_is_dtype(self): + class dt: + dtype = "f8" + + class vdt(np.void): + dtype = "f,f" + + self.assert_deprecated(lambda: np.dtype(dt)) + self.assert_deprecated(lambda: np.dtype(dt())) + self.assert_deprecated(lambda: np.dtype(vdt)) + self.assert_deprecated(lambda: np.dtype(vdt(1))) + + class TestTestDeprecated: def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 0ebcc72dac78..03e0e172abbd 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -3,6 +3,7 @@ import pytest import ctypes import gc +import warnings import numpy as np from numpy.core._rational_tests import rational @@ -1106,7 +1107,7 @@ def test_keyword_argument(): class TestFromDTypeAttribute: def test_simple(self): class dt: - dtype = "f8" + dtype = np.dtype("f8") assert np.dtype(dt) == np.float64 assert np.dtype(dt()) == np.float64 @@ -1130,22 +1131,21 @@ class dt(np.void): # what this should be useful for. Note that if np.void is used # numpy will think we are deallocating a base type [1.17, 2019-02]. dtype = np.dtype("f,f") - pass np.dtype(dt) np.dtype(dt(1)) def test_void_subtype_recursion(self): - class dt(np.void): + class vdt(np.void): pass - dt.dtype = dt + vdt.dtype = vdt with pytest.raises(RecursionError): - np.dtype(dt) + np.dtype(vdt) with pytest.raises(RecursionError): - np.dtype(dt(1)) + np.dtype(vdt(1)) class TestDTypeClasses: diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index b719f2495ed4..0524da342864 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -3,9 +3,8 @@ import copy import pytest -from numpy import ( - array, alltrue, ndarray, zeros, dtype, intp, clongdouble - ) +import numpy as np + from numpy.testing import assert_, assert_equal from numpy.core.multiarray import typeinfo from . import util @@ -119,7 +118,7 @@ def is_intent_exact(self, *names): # 16 byte long double types this means the inout intent cannot be satisfied # and several tests fail as the alignment flag can be randomly true or fals # when numpy gains an aligned allocator the tests could be enabled again -if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and sys.platform != 'win32'): _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ @@ -133,7 +132,7 @@ class Type: _type_cache = {} def __new__(cls, name): - if isinstance(name, dtype): + if isinstance(name, np.dtype): dtype0 = name name = None for n, i in typeinfo.items(): @@ -153,7 +152,8 @@ def _init(self, name): info = typeinfo[self.NAME] self.type_num = getattr(wrap, 'NPY_' + self.NAME) assert_equal(self.type_num, info.num) - self.dtype = info.type + self.dtype = np.dtype(info.type) + self.type = info.type self.elsize = info.bits / 8 self.dtypechar = info.char @@ -202,7 +202,7 @@ def __init__(self, typ, dims, intent, obj): # arr.dtypechar may be different from typ.dtypechar self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) - assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) + assert_(isinstance(self.arr, np.ndarray), repr(type(self.arr))) self.arr_attr = wrap.array_attrs(self.arr) @@ -225,11 +225,12 @@ def __init__(self, typ, dims, intent, obj): return if intent.is_intent('cache'): - assert_(isinstance(obj, ndarray), repr(type(obj))) - self.pyarr = array(obj).reshape(*dims).copy() + assert_(isinstance(obj, np.ndarray), repr(type(obj))) + self.pyarr = np.array(obj).reshape(*dims).copy() else: - self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims), - order=self.intent.is_intent('c') and 'C' or 'F') + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=self.intent.is_intent('c') and 'C' or 'F') assert_(self.pyarr.dtype == typ, repr((self.pyarr.dtype, typ))) assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) @@ -266,7 +267,7 @@ def __init__(self, typ, dims, intent, obj): repr((self.arr_attr[5][3], self.type.elsize))) assert_(self.arr_equal(self.pyarr, self.arr)) - if isinstance(self.obj, ndarray): + if isinstance(self.obj, np.ndarray): if typ.elsize == Type(obj.dtype).elsize: if not intent.is_intent('copy') and self.arr_attr[1] <= 1: assert_(self.has_shared_memory()) @@ -274,8 +275,7 @@ def __init__(self, typ, dims, intent, obj): def arr_equal(self, arr1, arr2): if arr1.shape != arr2.shape: return False - s = arr1 == arr2 - return alltrue(s.flatten()) + return (arr1 == arr2).all() def __str__(self): return str(self.arr) @@ -285,7 +285,7 @@ def has_shared_memory(self): """ if self.obj is self.arr: return True - if not isinstance(self.obj, ndarray): + if not isinstance(self.obj, np.ndarray): return False obj_attr = wrap.array_attrs(self.obj) return obj_attr[0] == self.arr_attr[0] @@ -318,7 +318,7 @@ def test_in_from_2seq(self): def test_in_from_2casttype(self): for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_, obj) if t.elsize == self.type.elsize: assert_( @@ -327,7 +327,7 @@ def test_in_from_2casttype(self): assert_(not a.has_shared_memory(), repr(t.dtype)) def test_inout_2seq(self): - obj = array(self.num2seq, dtype=self.type.dtype) + obj = np.array(self.num2seq, dtype=self.type.dtype) a = self.array([len(self.num2seq)], intent.inout, obj) assert_(a.has_shared_memory()) @@ -341,12 +341,12 @@ def test_inout_2seq(self): raise SystemError('intent(inout) should have failed on sequence') def test_f_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype, order='F') + obj = np.array(self.num23seq, dtype=self.type.dtype, order='F') shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.inout, obj) assert_(a.has_shared_memory()) - obj = array(self.num23seq, dtype=self.type.dtype, order='C') + obj = np.array(self.num23seq, dtype=self.type.dtype, order='C') shape = (len(self.num23seq), len(self.num23seq[0])) try: a = self.array(shape, intent.in_.inout, obj) @@ -359,14 +359,14 @@ def test_f_inout_23seq(self): 'intent(inout) should have failed on improper array') def test_c_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype) + obj = np.array(self.num23seq, dtype=self.type.dtype) shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.c.inout, obj) assert_(a.has_shared_memory()) def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) @@ -377,14 +377,14 @@ def test_c_in_from_23seq(self): def test_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_f_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') + obj = np.array(self.num23seq, dtype=t.dtype, order='F') a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) if t.elsize == self.type.elsize: @@ -394,7 +394,7 @@ def test_f_in_from_23casttype(self): def test_c_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) if t.elsize == self.type.elsize: @@ -404,14 +404,14 @@ def test_c_in_from_23casttype(self): def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') + obj = np.array(self.num23seq, dtype=t.dtype, order='F') a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) @@ -420,7 +420,7 @@ def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) a = self.array(shape, intent.in_.c.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) @@ -428,7 +428,7 @@ def test_in_cache_from_2casttype(self): a = self.array(shape, intent.in_.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) - obj = array(self.num2seq, dtype=t.dtype, order='F') + obj = np.array(self.num2seq, dtype=t.dtype, order='F') a = self.array(shape, intent.in_.c.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) @@ -449,7 +449,7 @@ def test_in_cache_from_2casttype_failure(self): for t in self.type.all_types(): if t.elsize >= self.type.elsize: continue - obj = array(self.num2seq, dtype=t.dtype) + obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) try: self.array(shape, intent.in_.cache, obj) # Should succeed @@ -485,18 +485,18 @@ def test_hidden(self): shape = (2,) a = self.array(shape, intent.hide, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) shape = (2, 3) a = self.array(shape, intent.hide, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) shape = (2, 3) a = self.array(shape, intent.c.hide, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) shape = (-1, 3) @@ -514,18 +514,18 @@ def test_optional_none(self): shape = (2,) a = self.array(shape, intent.optional, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) shape = (2, 3) a = self.array(shape, intent.optional, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) shape = (2, 3) a = self.array(shape, intent.c.optional, None) assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) def test_optional_from_2seq(self): @@ -547,14 +547,14 @@ def test_optional_from_23seq(self): assert_(not a.has_shared_memory()) def test_inplace(self): - obj = array(self.num23seq, dtype=self.type.dtype) + obj = np.array(self.num23seq, dtype=self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape, intent.inplace, obj) assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) a.arr[1][2] = 54 assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) + np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! assert_(not obj.flags['CONTIGUOUS']) @@ -563,17 +563,17 @@ def test_inplace_from_casttype(self): for t in self.type.cast_types(): if t is self.type: continue - obj = array(self.num23seq, dtype=t.dtype) - assert_(obj.dtype.type == t.dtype) - assert_(obj.dtype.type is not self.type.dtype) + obj = np.array(self.num23seq, dtype=t.dtype) + assert_(obj.dtype.type == t.type) + assert_(obj.dtype.type is not self.type.type) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape, intent.inplace, obj) assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) a.arr[1][2] = 54 assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) + np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! assert_(not obj.flags['CONTIGUOUS']) - assert_(obj.dtype.type is self.type.dtype) # obj changed inplace! + assert_(obj.dtype.type is self.type.type) # obj changed inplace! diff --git a/numpy/typing/tests/data/pass/dtype.py b/numpy/typing/tests/data/pass/dtype.py index a97edc302582..4f93426f3a90 100644 --- a/numpy/typing/tests/data/pass/dtype.py +++ b/numpy/typing/tests/data/pass/dtype.py @@ -31,7 +31,7 @@ class Test: - dtype = float + dtype = np.dtype(float) np.dtype(Test()) From 73fe877ff967f279d470b81ad447b9f3056c1335 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 7 Jan 2021 11:22:42 +0200 Subject: [PATCH 0313/1270] ENH, SIMD: Ditching the old CPU dispatcher(Exp & Log) (#18101) The second patch in a series of pull-requests aims to facilitate the migration process to our new SIMD interface(NPYV). It is basically a process that focuses on getting rid of the main umath SIMD source `simd.inc`, which contains almost all SIMD kernels, by splitting it into several dispatch-able sources without changing the base code, which facilitates the review process during the move to NPYV(universal intrinsics). In this patch, we have moved the the following raw SIMD loops to the new dispatcher: - FLOAT_exp, DOUBLE_exp - FLOAT_log, DOUBLE_log - FLOAT_frexp, DOUBLE_frexp - FLOAT_ldexp, DOUBLE_ldexp --- .gitignore | 1 + numpy/core/code_generators/generate_umath.py | 14 +- numpy/core/setup.py | 3 +- numpy/core/src/umath/fast_loop_macros.h | 15 + numpy/core/src/umath/loops.c.src | 138 +- numpy/core/src/umath/loops.h.src | 35 +- .../umath/loops_exponent_log.dispatch.c.src | 1293 +++++++++++++++++ numpy/core/src/umath/npy_simd_data.h | 8 +- numpy/core/src/umath/simd.inc.src | 1088 -------------- 9 files changed, 1348 insertions(+), 1247 deletions(-) create mode 100644 numpy/core/src/umath/loops_exponent_log.dispatch.c.src diff --git a/.gitignore b/.gitignore index b1a8db5c3a31..05df19335be2 100644 --- a/.gitignore +++ b/.gitignore @@ -216,3 +216,4 @@ numpy/core/src/_simd/_simd_inc.h numpy/core/src/umath/loops_unary_fp.dispatch.c numpy/core/src/umath/loops_arithm_fp.dispatch.c numpy/core/src/umath/loops_trigonometric.dispatch.c +numpy/core/src/umath/loops_exponent_log.dispatch.c diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 6ee8031cb98d..b5305fbfce98 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -722,8 +722,7 @@ def english_upper(s): docstrings.get('numpy.core.umath.exp'), None, TD('e', f='exp', astype={'e':'f'}), - TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), - TD('d', simd=[('avx512f', 'd')]), + TD('fd', dispatch=[('loops_exponent_log', 'fd')]), TD('fdg' + cmplx, f='exp'), TD(P, f='exp'), ), @@ -746,8 +745,7 @@ def english_upper(s): docstrings.get('numpy.core.umath.log'), None, TD('e', f='log', astype={'e':'f'}), - TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), - TD('d', simd=[('avx512f', 'd')]), + TD('fd', dispatch=[('loops_exponent_log', 'fd')]), TD('fdg' + cmplx, f='log'), TD(P, f='log'), ), @@ -920,10 +918,10 @@ def english_upper(s): docstrings.get('numpy.core.umath.ldexp'), None, [TypeDescription('e', None, 'ei', 'e'), - TypeDescription('f', None, 'fi', 'f', simd=['avx512_skx']), + TypeDescription('f', None, 'fi', 'f', dispatch='loops_exponent_log'), TypeDescription('e', FuncNameSuffix('long'), 'el', 'e'), TypeDescription('f', FuncNameSuffix('long'), 'fl', 'f'), - TypeDescription('d', None, 'di', 'd', simd=['avx512_skx']), + TypeDescription('d', None, 'di', 'd', dispatch='loops_exponent_log'), TypeDescription('d', FuncNameSuffix('long'), 'dl', 'd'), TypeDescription('g', None, 'gi', 'g'), TypeDescription('g', FuncNameSuffix('long'), 'gl', 'g'), @@ -934,8 +932,8 @@ def english_upper(s): docstrings.get('numpy.core.umath.frexp'), None, [TypeDescription('e', None, 'e', 'ei'), - TypeDescription('f', None, 'f', 'fi', simd=['avx512_skx']), - TypeDescription('d', None, 'd', 'di', simd=['avx512_skx']), + TypeDescription('f', None, 'f', 'fi', dispatch='loops_exponent_log'), + TypeDescription('d', None, 'd', 'di', dispatch='loops_exponent_log'), TypeDescription('g', None, 'g', 'gi'), ], ), diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 1042a1c4520d..dfb26c9c1108 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -920,6 +920,8 @@ def generate_umath_c(ext, build_dir): join('src', 'umath', 'loops.c.src'), join('src', 'umath', 'loops_unary_fp.dispatch.c.src'), join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'), + join('src', 'umath', 'loops_trigonometric.dispatch.c.src'), + join('src', 'umath', 'loops_exponent_log.dispatch.c.src'), join('src', 'umath', 'matmul.h.src'), join('src', 'umath', 'matmul.c.src'), join('src', 'umath', 'clip.h.src'), @@ -929,7 +931,6 @@ def generate_umath_c(ext, build_dir): join('src', 'umath', 'scalarmath.c.src'), join('src', 'umath', 'ufunc_type_resolution.c'), join('src', 'umath', 'override.c'), - join('src', 'umath', 'loops_trigonometric.dispatch.c.src'), ] umath_deps = [ diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h index dbcff8793506..b81795b9621f 100644 --- a/numpy/core/src/umath/fast_loop_macros.h +++ b/numpy/core/src/umath/fast_loop_macros.h @@ -10,6 +10,21 @@ #ifndef _NPY_UMATH_FAST_LOOP_MACROS_H_ #define _NPY_UMATH_FAST_LOOP_MACROS_H_ +/* + * MAX_STEP_SIZE is used to determine if we need to use SIMD version of the ufunc. + * Very large step size can be as slow as processing it using scalar. The + * value of 2097152 ( = 2MB) was chosen using 2 considerations: + * 1) Typical linux kernel page size is 4Kb, but sometimes it could also be 2MB + * which is == 2097152 Bytes. For a step size as large as this, surely all + * the loads/stores of gather/scatter instructions falls on 16 different pages + * which one would think would slow down gather/scatter instructions. + * 2) It additionally satisfies MAX_STEP_SIZE*16/esize < NPY_MAX_INT32 which + * allows us to use i32 version of gather/scatter (as opposed to the i64 version) + * without problems (step larger than NPY_MAX_INT32*esize/16 would require use of + * i64gather/scatter). esize = element size = 4/8 bytes for float/double. + */ +#define MAX_STEP_SIZE 2097152 + static NPY_INLINE npy_uintp abs_ptrdiff(char *a, char *b) { diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index ba538d2ab81a..570b3ec04bc7 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1657,39 +1657,6 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void /**end repeat1**/ /**end repeat**/ -/**begin repeat - * #func = exp, log# - * #scalarf = npy_expf, npy_logf# - */ - -NPY_NO_EXPORT NPY_GCC_OPT_3 void -FLOAT_@func@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const npy_float in1 = *(npy_float *)ip1; - *(npy_float *)op1 = @scalarf@(in1); - } -} - -/**end repeat**/ - -NPY_NO_EXPORT NPY_GCC_OPT_3 void -DOUBLE_exp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const npy_double in1 = *(npy_double *)ip1; - *(npy_double *)op1 = npy_exp(in1); - } -} -NPY_NO_EXPORT NPY_GCC_OPT_3 void -DOUBLE_log(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const npy_double in1 = *(npy_double *)ip1; - *(npy_double *)op1 = npy_log(in1); - } -} - /**begin repeat * #isa = avx512f, fma# * #ISA = AVX512F, FMA# @@ -1720,59 +1687,8 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void /**end repeat2**/ /**end repeat1**/ - -/**begin repeat1 - * #func = exp, log# - * #scalarf = npy_expf, npy_logf# - */ - -NPY_NO_EXPORT NPY_GCC_OPT_3 void -FLOAT_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - if (!run_unary_@isa@_@func@_FLOAT(args, dimensions, steps)) { - UNARY_LOOP { - /* - * We use the AVX function to compute exp/log for scalar elements as well. - * This is needed to ensure the output of strided and non-strided - * cases match. SIMD code handles strided input cases, but not - * strided output. - */ -#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS - @ISA@_@func@_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0]); -#else - const npy_float in1 = *(npy_float *)ip1; - *(npy_float *)op1 = @scalarf@(in1); -#endif - } - } -} - -/**end repeat1**/ - /**end repeat**/ -NPY_NO_EXPORT NPY_GCC_OPT_3 void -DOUBLE_exp_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - if (!run_unary_avx512f_exp_DOUBLE(args, dimensions, steps)) { - UNARY_LOOP { - const npy_double in1 = *(npy_double *)ip1; - *(npy_double *)op1 = npy_exp(in1); - } - } -} - -NPY_NO_EXPORT NPY_GCC_OPT_3 void -DOUBLE_log_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - if (!run_unary_avx512f_log_DOUBLE(args, dimensions, steps)) { - UNARY_LOOP { - const npy_double in1 = *(npy_double *)ip1; - *(npy_double *)op1 = npy_log(in1); - } - } -} - /**begin repeat * Float types * #type = npy_float, npy_double, npy_longdouble# @@ -2044,41 +1960,6 @@ NPY_NO_EXPORT void } } -NPY_NO_EXPORT void -@TYPE@_frexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP_TWO_OUT { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = npy_frexp@c@(in1, (int *)op2); - } -} - -NPY_NO_EXPORT void -@TYPE@_frexp_avx512_skx(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func) -{ - if (!run_unary_two_out_avx512_skx_frexp_@TYPE@(args, dimensions, steps)) { - @TYPE@_frexp(args, dimensions, steps, func); - } -} - -NPY_NO_EXPORT void -@TYPE@_ldexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const int in2 = *(int *)ip2; - *((@type@ *)op1) = npy_ldexp@c@(in1, in2); - } -} - -NPY_NO_EXPORT void -@TYPE@_ldexp_avx512_skx(char **args, const npy_intp *dimensions, const npy_intp *steps, void *func) -{ - if (!run_binary_avx512_skx_ldexp_@TYPE@(args, dimensions, steps)) { - @TYPE@_ldexp(args, dimensions, steps, func); - } -} - NPY_NO_EXPORT void @TYPE@_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { @@ -2179,6 +2060,25 @@ LONGDOUBLE_square(char **args, npy_intp const *dimensions, npy_intp const *steps } } +NPY_NO_EXPORT void +LONGDOUBLE_frexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP_TWO_OUT { + const npy_longdouble in1 = *(npy_longdouble *)ip1; + *((npy_longdouble *)op1) = npy_frexpl(in1, (int *)op2); + } +} + +NPY_NO_EXPORT void +LONGDOUBLE_ldexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const npy_longdouble in1 = *(npy_longdouble *)ip1; + const int in2 = *(int *)ip2; + *((npy_longdouble *)op1) = npy_ldexpl(in1, in2); + } +} + /* ***************************************************************************** ** HALF-FLOAT LOOPS ** diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index d73c9fa7f4fc..b3a19be12d62 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -213,18 +213,6 @@ NPY_NO_EXPORT void /**end repeat1**/ /**end repeat**/ -NPY_NO_EXPORT void -DOUBLE_exp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_exp_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_log(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_log_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - #ifndef NPY_DISABLE_OPTIMIZATION #include "loops_trigonometric.dispatch.h" #endif @@ -236,19 +224,18 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void FLOAT_@func@, ( )) /**end repeat**/ +#ifndef NPY_DISABLE_OPTIMIZATION + #include "loops_exponent_log.dispatch.h" +#endif /**begin repeat - * #func = exp, log# + * #TYPE = FLOAT, DOUBLE# */ -NPY_NO_EXPORT void -FLOAT_@func@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - /**begin repeat1 - * #isa = avx512f, fma# + * # kind = exp, log, frexp, ldexp# */ - -NPY_NO_EXPORT void -FLOAT_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, ( + char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func) +)) /**end repeat1**/ /**end repeat**/ @@ -372,15 +359,9 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @TYPE@_frexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -NPY_NO_EXPORT void -@TYPE@_frexp_avx512_skx(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - NPY_NO_EXPORT void @TYPE@_ldexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -NPY_NO_EXPORT void -@TYPE@_ldexp_avx512_skx(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - NPY_NO_EXPORT void @TYPE@_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /**end repeat**/ diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src new file mode 100644 index 000000000000..1dc24b226666 --- /dev/null +++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src @@ -0,0 +1,1293 @@ +/*@targets + ** $maxopt baseline + ** (avx2 fma3) avx512f avx512_skx + **/ + +#define _UMATHMODULE +#define _MULTIARRAYMODULE +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include + +#include "numpy/npy_math.h" +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "lowlevel_strided_loops.h" +// Provides the various *_LOOP macros +#include "fast_loop_macros.h" +#include "npy_simd_data.h" + +// TODO: tweak & replace raw SIMD with NPYV + +/******************************************************************************** + ** bunch of helper functions used in ISA_exp/log_FLOAT + ********************************************************************************/ +#if !defined(_MSC_VER) && defined(NPY_HAVE_AVX512F) + /** + * For somehow MSVC commit aggressive optimization lead + * to raises 'RuntimeWarning: RuntimeWarning: overflow encountered in exp' + * + * the issue mainly caused by '_mm512_maskz_loadu_ps', we need to + * investigate about it while moving to NPYV. + */ + #define SIMD_AVX512F +#elif defined(NPY_HAVE_AVX2) && defined(NPY_HAVE_FMA3) + #define SIMD_AVX2_FMA3 +#endif +#if !defined(_MSC_VER) && defined(NPY_HAVE_AVX512_SKX) + #define SIMD_AVX512_SKX +#endif +#if defined(SIMD_AVX512F) && !(defined(__clang__) && (__clang_major__ < 10 || \ + (__clang_major__ == 10 && __clang_minor__ < 1))) + #define SIMD_AVX512F_NOCLANG_BUG +#endif + +#ifdef SIMD_AVX2_FMA3 + +static NPY_INLINE __m256 +fma_get_full_load_mask_ps(void) +{ + return _mm256_set1_ps(-1.0); +} + +static NPY_INLINE __m256i +fma_get_full_load_mask_pd(void) +{ + return _mm256_castpd_si256(_mm256_set1_pd(-1.0)); +} + +static NPY_INLINE __m256 +fma_get_partial_load_mask_ps(const npy_int num_elem, const npy_int num_lanes) +{ + float maskint[16] = {-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0, + 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; + float* addr = maskint + num_lanes - num_elem; + return _mm256_loadu_ps(addr); +} + +static NPY_INLINE __m256i +fma_get_partial_load_mask_pd(const npy_int num_elem, const npy_int num_lanes) +{ + npy_int maskint[16] = {-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1}; + npy_int* addr = maskint + 2*num_lanes - 2*num_elem; + return _mm256_loadu_si256((__m256i*) addr); +} + +static NPY_INLINE __m256 +fma_masked_gather_ps(__m256 src, + npy_float* addr, + __m256i vindex, + __m256 mask) +{ + return _mm256_mask_i32gather_ps(src, addr, vindex, mask, 4); +} + +static NPY_INLINE __m256d +fma_masked_gather_pd(__m256d src, + npy_double* addr, + __m128i vindex, + __m256d mask) +{ + return _mm256_mask_i32gather_pd(src, addr, vindex, mask, 8); +} + +static NPY_INLINE __m256 +fma_masked_load_ps(__m256 mask, npy_float* addr) +{ + return _mm256_maskload_ps(addr, _mm256_cvtps_epi32(mask)); +} + +static NPY_INLINE __m256d +fma_masked_load_pd(__m256i mask, npy_double* addr) +{ + return _mm256_maskload_pd(addr, mask); +} + +static NPY_INLINE __m256 +fma_set_masked_lanes_ps(__m256 x, __m256 val, __m256 mask) +{ + return _mm256_blendv_ps(x, val, mask); +} + +static NPY_INLINE __m256d +fma_set_masked_lanes_pd(__m256d x, __m256d val, __m256d mask) +{ + return _mm256_blendv_pd(x, val, mask); +} + +static NPY_INLINE __m256 +fma_blend(__m256 x, __m256 y, __m256 ymask) +{ + return _mm256_blendv_ps(x, y, ymask); +} + +static NPY_INLINE __m256 +fma_invert_mask_ps(__m256 ymask) +{ + return _mm256_andnot_ps(ymask, _mm256_set1_ps(-1.0)); +} + +static NPY_INLINE __m256i +fma_invert_mask_pd(__m256i ymask) +{ + return _mm256_andnot_si256(ymask, _mm256_set1_epi32(0xFFFFFFFF)); +} + +static NPY_INLINE __m256 +fma_get_exponent(__m256 x) +{ + /* + * Special handling of denormals: + * 1) Multiply denormal elements with 2**100 (0x71800000) + * 2) Get the 8 bits of unbiased exponent + * 3) Subtract 100 from exponent of denormals + */ + + __m256 two_power_100 = _mm256_castsi256_ps(_mm256_set1_epi32(0x71800000)); + __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ); + __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ); + + /* + * It is necessary for temp1 to be volatile, a bug in clang optimizes it out which leads + * to an overflow warning in some cases. See https://github.com/numpy/numpy/issues/18005 + */ + volatile __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); + __m256 temp = _mm256_mul_ps(temp1, two_power_100); + x = _mm256_blendv_ps(x, temp, denormal_mask); + + __m256 exp = _mm256_cvtepi32_ps( + _mm256_sub_epi32( + _mm256_srli_epi32( + _mm256_castps_si256(x), 23),_mm256_set1_epi32(0x7E))); + + __m256 denorm_exp = _mm256_sub_ps(exp, _mm256_set1_ps(100.0f)); + return _mm256_blendv_ps(exp, denorm_exp, denormal_mask); +} + +static NPY_INLINE __m256 +fma_get_mantissa(__m256 x) +{ + /* + * Special handling of denormals: + * 1) Multiply denormal elements with 2**100 (0x71800000) + * 2) Get the 23 bits of mantissa + * 3) Mantissa for denormals is not affected by the multiplication + */ + + __m256 two_power_100 = _mm256_castsi256_ps(_mm256_set1_epi32(0x71800000)); + __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ); + __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ); + + /* + * It is necessary for temp1 to be volatile, a bug in clang optimizes it out which leads + * to an overflow warning in some cases. See https://github.com/numpy/numpy/issues/18005 + */ + volatile __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); + __m256 temp = _mm256_mul_ps(temp1, two_power_100); + x = _mm256_blendv_ps(x, temp, denormal_mask); + + __m256i mantissa_bits = _mm256_set1_epi32(0x7fffff); + __m256i exp_126_bits = _mm256_set1_epi32(126 << 23); + return _mm256_castsi256_ps( + _mm256_or_si256( + _mm256_and_si256( + _mm256_castps_si256(x), mantissa_bits), exp_126_bits)); +} + +static NPY_INLINE __m256 +fma_scalef_ps(__m256 poly, __m256 quadrant) +{ + /* + * Handle denormals (which occur when quadrant <= -125): + * 1) This function computes poly*(2^quad) by adding the exponent of + poly to quad + * 2) When quad <= -125, the output is a denormal and the above logic + breaks down + * 3) To handle such cases, we split quadrant: -125 + (quadrant + 125) + * 4) poly*(2^-125) is computed the usual way + * 5) 2^(quad-125) can be computed by: 2 << abs(quad-125) + * 6) The final div operation generates the denormal + */ + __m256 minquadrant = _mm256_set1_ps(-125.0f); + __m256 denormal_mask = _mm256_cmp_ps(quadrant, minquadrant, _CMP_LE_OQ); + if (_mm256_movemask_ps(denormal_mask) != 0x0000) { + __m256 quad_diff = _mm256_sub_ps(quadrant, minquadrant); + quad_diff = _mm256_sub_ps(_mm256_setzero_ps(), quad_diff); + quad_diff = _mm256_blendv_ps(_mm256_setzero_ps(), quad_diff, denormal_mask); + __m256i two_power_diff = _mm256_sllv_epi32( + _mm256_set1_epi32(1), _mm256_cvtps_epi32(quad_diff)); + quadrant = _mm256_max_ps(quadrant, minquadrant); //keep quadrant >= -126 + __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23); + poly = _mm256_castsi256_ps( + _mm256_add_epi32( + _mm256_castps_si256(poly), exponent)); + __m256 denorm_poly = _mm256_div_ps(poly, _mm256_cvtepi32_ps(two_power_diff)); + return _mm256_blendv_ps(poly, denorm_poly, denormal_mask); + } + else { + __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23); + poly = _mm256_castsi256_ps( + _mm256_add_epi32( + _mm256_castps_si256(poly), exponent)); + return poly; + } +} + +#endif // SIMD_AVX2_FMA3 + +#ifdef SIMD_AVX512F + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 +avx512_get_full_load_mask_ps(void) +{ + return 0xFFFF; +} + +static NPY_INLINE __mmask8 +avx512_get_full_load_mask_pd(void) +{ + return 0xFF; +} + +static NPY_INLINE __mmask16 +avx512_get_partial_load_mask_ps(const npy_int num_elem, const npy_int total_elem) +{ + return (0x0001 << num_elem) - 0x0001; +} + +static NPY_INLINE __mmask8 +avx512_get_partial_load_mask_pd(const npy_int num_elem, const npy_int total_elem) +{ + return (0x01 << num_elem) - 0x01; +} + +static NPY_INLINE __m512 +avx512_masked_gather_ps(__m512 src, + npy_float* addr, + __m512i vindex, + __mmask16 kmask) +{ + return _mm512_mask_i32gather_ps(src, kmask, vindex, addr, 4); +} + +static NPY_INLINE __m512d +avx512_masked_gather_pd(__m512d src, + npy_double* addr, + __m256i vindex, + __mmask8 kmask) +{ + return _mm512_mask_i32gather_pd(src, kmask, vindex, addr, 8); +} + +static NPY_INLINE __m512 +avx512_masked_load_ps(__mmask16 mask, npy_float* addr) +{ + return _mm512_maskz_loadu_ps(mask, (__m512 *)addr); +} + +static NPY_INLINE __m512d +avx512_masked_load_pd(__mmask8 mask, npy_double* addr) +{ + return _mm512_maskz_loadu_pd(mask, (__m512d *)addr); +} + +static NPY_INLINE __m512 +avx512_set_masked_lanes_ps(__m512 x, __m512 val, __mmask16 mask) +{ + return _mm512_mask_blend_ps(mask, x, val); +} + +static NPY_INLINE __m512d +avx512_set_masked_lanes_pd(__m512d x, __m512d val, __mmask8 mask) +{ + return _mm512_mask_blend_pd(mask, x, val); +} + +static NPY_INLINE __m512 +avx512_blend(__m512 x, __m512 y, __mmask16 ymask) +{ + return _mm512_mask_mov_ps(x, ymask, y); +} + +static NPY_INLINE __mmask16 +avx512_invert_mask_ps(__mmask16 ymask) +{ + return _mm512_knot(ymask); +} + +static NPY_INLINE __mmask8 +avx512_invert_mask_pd(__mmask8 ymask) +{ + return _mm512_knot(ymask); +} + +static NPY_INLINE __m512 +avx512_get_exponent(__m512 x) +{ + return _mm512_add_ps(_mm512_getexp_ps(x), _mm512_set1_ps(1.0f)); +} + +static NPY_INLINE __m512 +avx512_get_mantissa(__m512 x) +{ + return _mm512_getmant_ps(x, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src); +} + +static NPY_INLINE __m512 +avx512_scalef_ps(__m512 poly, __m512 quadrant) +{ + return _mm512_scalef_ps(poly, quadrant); +} + +static NPY_INLINE __m512d +avx512_permute_x4var_pd(__m512d t0, + __m512d t1, + __m512d t2, + __m512d t3, + __m512i index) +{ + __mmask8 lut_mask = _mm512_cmp_epi64_mask( + _mm512_and_epi64(_mm512_set1_epi64(0x10ULL), index), + _mm512_set1_epi64(0), _MM_CMPINT_GT); + __m512d res1 = _mm512_permutex2var_pd(t0, index, t1); + __m512d res2 = _mm512_permutex2var_pd(t2, index, t3); + return _mm512_mask_blend_pd(lut_mask, res1, res2); +} + +static NPY_INLINE __m512d +avx512_permute_x8var_pd(__m512d t0, __m512d t1, __m512d t2, __m512d t3, + __m512d t4, __m512d t5, __m512d t6, __m512d t7, + __m512i index) +{ + __mmask8 lut_mask = _mm512_cmp_epi64_mask( + _mm512_and_epi64(_mm512_set1_epi64(0x20ULL), index), + _mm512_set1_epi64(0), _MM_CMPINT_GT); + __m512d res1 = avx512_permute_x4var_pd(t0, t1, t2, t3, index); + __m512d res2 = avx512_permute_x4var_pd(t4, t5, t6, t7, index); + return _mm512_mask_blend_pd(lut_mask, res1, res2); +} + +#endif // SIMD_AVX512F + +/******************************************************************************** + ** Defining the SIMD kernels + ********************************************************************************/ +/**begin repeat + * #ISA = FMA, AVX512F# + * #isa = fma, avx512# + * #vtype = __m256, __m512# + * #vsize = 256, 512# + * #BYTES = 32, 64# + * #NUM_LANES = 8, 16# + * #mask = __m256, __mmask16# + * #vsub = , _mask# + * #or_masks =_mm256_or_ps, _mm512_kor# + * #and_masks =_mm256_and_ps, _mm512_kand# + * #xor_masks =_mm256_xor_ps, _mm512_kxor# + * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps# + * #mask_to_int = _mm256_movemask_ps, # + * #full_mask= 0xFF, 0xFFFF# + * #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps# + * #cvtps_epi32 = _mm256_cvtps_epi32, # + * #CHK = SIMD_AVX2_FMA3, SIMD_AVX512F# + */ +#ifdef @CHK@ +/* + * Vectorized Cody-Waite range reduction technique + * Performs the reduction step x* = x - y*C in three steps: + * 1) x* = x - y*c1 + * 2) x* = x - y*c2 + * 3) x* = x - y*c3 + * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision + */ +static NPY_INLINE @vtype@ +simd_range_reduction(@vtype@ x, @vtype@ y, @vtype@ c1, @vtype@ c2, @vtype@ c3) +{ + @vtype@ reduced_x = @fmadd@(y, c1, x); + reduced_x = @fmadd@(y, c2, reduced_x); + reduced_x = @fmadd@(y, c3, reduced_x); + return reduced_x; +} +/* + * Vectorized implementation of exp using AVX2 and AVX512: + * 1) if x >= xmax; return INF (overflow) + * 2) if x <= xmin; return 0.0f (underflow) + * 3) Range reduction (using Coyd-Waite): + * a) y = x - k*ln(2); k = rint(x/ln(2)); y \in [0, ln(2)] + * 4) Compute exp(y) = P/Q, ratio of 2 polynomials P and Q + * b) P = 5th order and Q = 2nd order polynomials obtained from Remez's + * algorithm (mini-max polynomial approximation) + * 5) Compute exp(x) = exp(y) * 2^k + * 6) Max ULP error measured across all 32-bit FP's = 2.52 (x = 0xc2781e37) + * 7) Max relative error measured across all 32-bit FP's= 2.1264E-07 (for the + * same x = 0xc2781e37) + */ +static void +simd_exp_FLOAT(npy_float * op, + npy_float * ip, + const npy_intp array_size, + const npy_intp steps) +{ + const npy_intp stride = steps/(npy_intp)sizeof(npy_float); + const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float); + npy_float xmax = 88.72283935546875f; + npy_float xmin = -103.97208404541015625f; + + /* + * Note: while generally indices are npy_intp, we ensure that our maximum index + * will fit in an int32 as a precondition for this function via + * IS_OUTPUT_BLOCKABLE_UNARY + */ + npy_int32 indexarr[16]; + for (npy_int32 ii = 0; ii < 16; ii++) { + indexarr[ii] = ii*stride; + } + + /* Load up frequently used constants */ + @vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_LOGE_2_HIGHf); + @vtype@ codyw_c2 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_LOGE_2_LOWf); + @vtype@ exp_p0 = _mm@vsize@_set1_ps(NPY_COEFF_P0_EXPf); + @vtype@ exp_p1 = _mm@vsize@_set1_ps(NPY_COEFF_P1_EXPf); + @vtype@ exp_p2 = _mm@vsize@_set1_ps(NPY_COEFF_P2_EXPf); + @vtype@ exp_p3 = _mm@vsize@_set1_ps(NPY_COEFF_P3_EXPf); + @vtype@ exp_p4 = _mm@vsize@_set1_ps(NPY_COEFF_P4_EXPf); + @vtype@ exp_p5 = _mm@vsize@_set1_ps(NPY_COEFF_P5_EXPf); + @vtype@ exp_q0 = _mm@vsize@_set1_ps(NPY_COEFF_Q0_EXPf); + @vtype@ exp_q1 = _mm@vsize@_set1_ps(NPY_COEFF_Q1_EXPf); + @vtype@ exp_q2 = _mm@vsize@_set1_ps(NPY_COEFF_Q2_EXPf); + @vtype@ cvt_magic = _mm@vsize@_set1_ps(NPY_RINT_CVT_MAGICf); + @vtype@ log2e = _mm@vsize@_set1_ps(NPY_LOG2Ef); + @vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF); + @vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f); + @vtype@ poly, num_poly, denom_poly, quadrant; + @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]); + + @mask@ xmax_mask, xmin_mask, nan_mask, inf_mask; + @mask@ overflow_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); + @mask@ load_mask = @isa@_get_full_load_mask_ps(); + npy_intp num_remaining_elements = array_size; + + while (num_remaining_elements > 0) { + + if (num_remaining_elements < num_lanes) { + load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, + num_lanes); + } + + @vtype@ x; + if (stride == 1) { + x = @isa@_masked_load_ps(load_mask, ip); + } + else { + x = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask); + } + + nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ); + x = @isa@_set_masked_lanes_ps(x, zeros_f, nan_mask); + + xmax_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmax), _CMP_GE_OQ); + xmin_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmin), _CMP_LE_OQ); + inf_mask = _mm@vsize@_cmp_ps@vsub@(x, inf, _CMP_EQ_OQ); + overflow_mask = @or_masks@(overflow_mask, + @xor_masks@(xmax_mask, inf_mask)); + + x = @isa@_set_masked_lanes_ps(x, zeros_f, @or_masks@( + @or_masks@(nan_mask, xmin_mask), xmax_mask)); + + quadrant = _mm@vsize@_mul_ps(x, log2e); + + /* round to nearest */ + quadrant = _mm@vsize@_add_ps(quadrant, cvt_magic); + quadrant = _mm@vsize@_sub_ps(quadrant, cvt_magic); + + /* Cody-Waite's range reduction algorithm */ + x = simd_range_reduction(x, quadrant, codyw_c1, codyw_c2, zeros_f); + + num_poly = @fmadd@(exp_p5, x, exp_p4); + num_poly = @fmadd@(num_poly, x, exp_p3); + num_poly = @fmadd@(num_poly, x, exp_p2); + num_poly = @fmadd@(num_poly, x, exp_p1); + num_poly = @fmadd@(num_poly, x, exp_p0); + denom_poly = @fmadd@(exp_q2, x, exp_q1); + denom_poly = @fmadd@(denom_poly, x, exp_q0); + poly = _mm@vsize@_div_ps(num_poly, denom_poly); + + /* + * compute val = poly * 2^quadrant; which is same as adding the + * exponent of quadrant to the exponent of poly. quadrant is an int, + * so extracting exponent is simply extracting 8 bits. + */ + poly = @isa@_scalef_ps(poly, quadrant); + + /* + * elem > xmax; return inf + * elem < xmin; return 0.0f + * elem = +/- nan, return nan + */ + poly = @isa@_set_masked_lanes_ps(poly, _mm@vsize@_set1_ps(NPY_NANF), nan_mask); + poly = @isa@_set_masked_lanes_ps(poly, inf, xmax_mask); + poly = @isa@_set_masked_lanes_ps(poly, zeros_f, xmin_mask); + + @masked_store@(op, @cvtps_epi32@(load_mask), poly); + + ip += num_lanes*stride; + op += num_lanes; + num_remaining_elements -= num_lanes; + } + + if (@mask_to_int@(overflow_mask)) { + npy_set_floatstatus_overflow(); + } +} + +/* + * Vectorized implementation of log using AVX2 and AVX512 + * 1) if x < 0.0f; return -NAN (invalid input) + * 2) Range reduction: y = x/2^k; + * a) y = normalized mantissa, k is the exponent (0.5 <= y < 1) + * 3) Compute log(y) = P/Q, ratio of 2 polynomials P and Q + * b) P = 5th order and Q = 5th order polynomials obtained from Remez's + * algorithm (mini-max polynomial approximation) + * 5) Compute log(x) = log(y) + k*ln(2) + * 6) Max ULP error measured across all 32-bit FP's = 3.83 (x = 0x3f486945) + * 7) Max relative error measured across all 32-bit FP's = 2.359E-07 (for same + * x = 0x3f486945) + */ + +static void +simd_log_FLOAT(npy_float * op, + npy_float * ip, + const npy_intp array_size, + const npy_intp steps) +{ + const npy_intp stride = steps/(npy_intp)sizeof(npy_float); + const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float); + + /* + * Note: while generally indices are npy_intp, we ensure that our maximum index + * will fit in an int32 as a precondition for this function via + * IS_OUTPUT_BLOCKABLE_UNARY + */ + npy_int32 indexarr[16]; + for (npy_int32 ii = 0; ii < 16; ii++) { + indexarr[ii] = ii*stride; + } + + /* Load up frequently used constants */ + @vtype@ log_p0 = _mm@vsize@_set1_ps(NPY_COEFF_P0_LOGf); + @vtype@ log_p1 = _mm@vsize@_set1_ps(NPY_COEFF_P1_LOGf); + @vtype@ log_p2 = _mm@vsize@_set1_ps(NPY_COEFF_P2_LOGf); + @vtype@ log_p3 = _mm@vsize@_set1_ps(NPY_COEFF_P3_LOGf); + @vtype@ log_p4 = _mm@vsize@_set1_ps(NPY_COEFF_P4_LOGf); + @vtype@ log_p5 = _mm@vsize@_set1_ps(NPY_COEFF_P5_LOGf); + @vtype@ log_q0 = _mm@vsize@_set1_ps(NPY_COEFF_Q0_LOGf); + @vtype@ log_q1 = _mm@vsize@_set1_ps(NPY_COEFF_Q1_LOGf); + @vtype@ log_q2 = _mm@vsize@_set1_ps(NPY_COEFF_Q2_LOGf); + @vtype@ log_q3 = _mm@vsize@_set1_ps(NPY_COEFF_Q3_LOGf); + @vtype@ log_q4 = _mm@vsize@_set1_ps(NPY_COEFF_Q4_LOGf); + @vtype@ log_q5 = _mm@vsize@_set1_ps(NPY_COEFF_Q5_LOGf); + @vtype@ loge2 = _mm@vsize@_set1_ps(NPY_LOGE2f); + @vtype@ nan = _mm@vsize@_set1_ps(NPY_NANF); + @vtype@ neg_nan = _mm@vsize@_set1_ps(-NPY_NANF); + @vtype@ neg_inf = _mm@vsize@_set1_ps(-NPY_INFINITYF); + @vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF); + @vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f); + @vtype@ ones_f = _mm@vsize@_set1_ps(1.0f); + @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)indexarr); + @vtype@ poly, num_poly, denom_poly, exponent; + + @mask@ inf_mask, nan_mask, sqrt2_mask, zero_mask, negx_mask; + @mask@ invalid_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); + @mask@ divide_by_zero_mask = invalid_mask; + @mask@ load_mask = @isa@_get_full_load_mask_ps(); + npy_intp num_remaining_elements = array_size; + + while (num_remaining_elements > 0) { + + if (num_remaining_elements < num_lanes) { + load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, + num_lanes); + } + + @vtype@ x_in; + if (stride == 1) { + x_in = @isa@_masked_load_ps(load_mask, ip); + } + else { + x_in = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask); + } + + negx_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_LT_OQ); + zero_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_EQ_OQ); + inf_mask = _mm@vsize@_cmp_ps@vsub@(x_in, inf, _CMP_EQ_OQ); + nan_mask = _mm@vsize@_cmp_ps@vsub@(x_in, x_in, _CMP_NEQ_UQ); + divide_by_zero_mask = @or_masks@(divide_by_zero_mask, + @and_masks@(zero_mask, load_mask)); + invalid_mask = @or_masks@(invalid_mask, negx_mask); + + @vtype@ x = @isa@_set_masked_lanes_ps(x_in, zeros_f, negx_mask); + + /* set x = normalized mantissa */ + exponent = @isa@_get_exponent(x); + x = @isa@_get_mantissa(x); + + /* if x < sqrt(2) {exp = exp-1; x = 2*x} */ + sqrt2_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(NPY_SQRT1_2f), _CMP_LE_OQ); + x = @isa@_blend(x, _mm@vsize@_add_ps(x,x), sqrt2_mask); + exponent = @isa@_blend(exponent, + _mm@vsize@_sub_ps(exponent,ones_f), sqrt2_mask); + + /* x = x - 1 */ + x = _mm@vsize@_sub_ps(x, ones_f); + + /* Polynomial approximation for log(1+x) */ + num_poly = @fmadd@(log_p5, x, log_p4); + num_poly = @fmadd@(num_poly, x, log_p3); + num_poly = @fmadd@(num_poly, x, log_p2); + num_poly = @fmadd@(num_poly, x, log_p1); + num_poly = @fmadd@(num_poly, x, log_p0); + denom_poly = @fmadd@(log_q5, x, log_q4); + denom_poly = @fmadd@(denom_poly, x, log_q3); + denom_poly = @fmadd@(denom_poly, x, log_q2); + denom_poly = @fmadd@(denom_poly, x, log_q1); + denom_poly = @fmadd@(denom_poly, x, log_q0); + poly = _mm@vsize@_div_ps(num_poly, denom_poly); + poly = @fmadd@(exponent, loge2, poly); + + /* + * x < 0.0f; return -NAN + * x = +/- NAN; return NAN + * x = 0.0f; return -INF + */ + poly = @isa@_set_masked_lanes_ps(poly, nan, nan_mask); + poly = @isa@_set_masked_lanes_ps(poly, neg_nan, negx_mask); + poly = @isa@_set_masked_lanes_ps(poly, neg_inf, zero_mask); + poly = @isa@_set_masked_lanes_ps(poly, inf, inf_mask); + + @masked_store@(op, @cvtps_epi32@(load_mask), poly); + + ip += num_lanes*stride; + op += num_lanes; + num_remaining_elements -= num_lanes; + } + + if (@mask_to_int@(invalid_mask)) { + npy_set_floatstatus_invalid(); + } + if (@mask_to_int@(divide_by_zero_mask)) { + npy_set_floatstatus_divbyzero(); + } +} +#endif // @CHK@ +/**end repeat**/ + +#ifdef SIMD_AVX512F_NOCLANG_BUG +/* + * Vectorized implementation of exp double using AVX512 + * Reference: Tang, P.T.P., "Table-driven implementation of the + * exponential function in IEEE floating-point + * arithmetic," ACM Transactions on Mathematical + * Software, vol. 15, pp. 144-157, 1989. + * 1) if x > mTH_max or x is INF; return INF (overflow) + * 2) if x < mTH_min; return 0.0f (underflow) + * 3) if abs(x) < mTH_nearzero; return 1.0f + x + * 4) if x is Nan; return Nan + * 5) Range reduction: + * x = (32m + j)ln2 / 32 + r; r in [-ln2/64, ln2/64] + * 6) exp(r) - 1 is approximated by a polynomial function p(r) + * exp(x) = 2^m(2^(j/32) + 2^(j/32)p(r)); + */ +static void +AVX512F_exp_DOUBLE(npy_double * op, + npy_double * ip, + const npy_intp array_size, + const npy_intp steps) +{ + npy_intp num_remaining_elements = array_size; + const npy_intp stride = steps / (npy_intp)sizeof(npy_double); + const npy_int num_lanes = 64 / (npy_intp)sizeof(npy_double); + npy_int32 indexarr[8]; + for (npy_int32 ii = 0; ii < 8; ii++) { + indexarr[ii] = ii*stride; + } + + __m512d InvLn2N = _mm512_set1_pd(NPY_INV_LN2_MUL_32); + __m512d mShift = _mm512_set1_pd(NPY_RINT_CVT_MAGIC); + __m512d mNegL1 = _mm512_set1_pd(NPY_TANG_NEG_L1); + __m512d mNegL2 = _mm512_set1_pd(NPY_TANG_NEG_L2); + __m512i mMod = _mm512_set1_epi64(0x1f); + __m512d mA1 = _mm512_set1_pd(NPY_TANG_A1); + __m512d mA2 = _mm512_set1_pd(NPY_TANG_A2); + __m512d mA3 = _mm512_set1_pd(NPY_TANG_A3); + __m512d mA4 = _mm512_set1_pd(NPY_TANG_A4); + __m512d mA5 = _mm512_set1_pd(NPY_TANG_A5); + __m512d mTH_nearzero = _mm512_set1_pd(0x1p-54); + __m512d mTH_max = _mm512_set1_pd(0x1.62e42fefa39efp+9); + __m512d mTH_min = _mm512_set1_pd(-0x1.74910d52d3053p+9); + __m512d mTH_inf = _mm512_set1_pd(NPY_INFINITY); + __m512d zeros_d = _mm512_set1_pd(0.0f); + __m512d ones_d = _mm512_set1_pd(1.0f); + __m256i vindex = _mm256_loadu_si256((__m256i*)&indexarr[0]); + + __m512d mTable_top_0 = _mm512_loadu_pd(&(EXP_Table_top[8*0])); + __m512d mTable_top_1 = _mm512_loadu_pd(&(EXP_Table_top[8*1])); + __m512d mTable_top_2 = _mm512_loadu_pd(&(EXP_Table_top[8*2])); + __m512d mTable_top_3 = _mm512_loadu_pd(&(EXP_Table_top[8*3])); + __m512d mTable_tail_0 = _mm512_loadu_pd(&(EXP_Table_tail[8*0])); + __m512d mTable_tail_1 = _mm512_loadu_pd(&(EXP_Table_tail[8*1])); + __m512d mTable_tail_2 = _mm512_loadu_pd(&(EXP_Table_tail[8*2])); + __m512d mTable_tail_3 = _mm512_loadu_pd(&(EXP_Table_tail[8*3])); + + __mmask8 overflow_mask = avx512_get_partial_load_mask_pd(0, num_lanes); + __mmask8 load_mask = avx512_get_full_load_mask_pd(); + __mmask8 xmin_mask, xmax_mask, inf_mask, nan_mask, nearzero_mask; + + while (num_remaining_elements > 0) { + if (num_remaining_elements < num_lanes) { + load_mask = avx512_get_partial_load_mask_pd(num_remaining_elements, + num_lanes); + } + + __m512d x; + if (1 == stride) { + x = avx512_masked_load_pd(load_mask, ip); + } + else { + x = avx512_masked_gather_pd(zeros_d, ip, vindex, load_mask); + } + + nan_mask = _mm512_cmp_pd_mask(x, x, _CMP_NEQ_UQ); + x = avx512_set_masked_lanes_pd(x, zeros_d, nan_mask); + xmax_mask = _mm512_cmp_pd_mask(x, mTH_max, _CMP_GT_OQ); + xmin_mask = _mm512_cmp_pd_mask(x, mTH_min, _CMP_LT_OQ); + inf_mask = _mm512_cmp_pd_mask(x, mTH_inf, _CMP_EQ_OQ); + __m512i x_abs = _mm512_and_epi64(_mm512_castpd_si512(x), + _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF)); + nearzero_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(x_abs), + mTH_nearzero, _CMP_LT_OQ); + nearzero_mask = _mm512_kxor(nearzero_mask, nan_mask); + overflow_mask = _mm512_kor(overflow_mask, + _mm512_kxor(xmax_mask, inf_mask)); + x = avx512_set_masked_lanes_pd(x, zeros_d, + _mm512_kor(_mm512_kor(nan_mask, xmin_mask), + _mm512_kor(xmax_mask, nearzero_mask))); + + /* z = x * 32/ln2 */ + __m512d z = _mm512_mul_pd(x, InvLn2N); + + /* round to nearest */ + __m512d kd = _mm512_add_pd(z, mShift); + __m512i ki = _mm512_castpd_si512(kd); + kd = _mm512_sub_pd(kd, mShift); + + /* r = (x + kd*mNegL1) + kd*mNegL2 */ + __m512d r1 = _mm512_fmadd_pd(kd, mNegL1, x); + __m512d r2 = _mm512_mul_pd(kd, mNegL2); + __m512d r = _mm512_add_pd(r1,r2); + + /* Polynomial approximation for exp(r) - 1 */ + __m512d q = _mm512_fmadd_pd(mA5, r, mA4); + q = _mm512_fmadd_pd(q, r, mA3); + q = _mm512_fmadd_pd(q, r, mA2); + q = _mm512_fmadd_pd(q, r, mA1); + q = _mm512_mul_pd(q, r); + __m512d p = _mm512_fmadd_pd(r, q, r2);; + p = _mm512_add_pd(r1, p); + + /* Get 2^(j/32) from lookup table */ + __m512i j = _mm512_and_epi64(ki, mMod); + __m512d top = avx512_permute_x4var_pd(mTable_top_0, mTable_top_1, + mTable_top_2, mTable_top_3, j); + __m512d tail = avx512_permute_x4var_pd(mTable_tail_0, mTable_tail_1, + mTable_tail_2, mTable_tail_3, j); + + /* + * s = top + tail; + * exp(x) = 2^m * (top + (tail + s * p)); + */ + __m512d s = _mm512_add_pd(top, tail); + __m512d res = _mm512_fmadd_pd(s, p, tail); + res = _mm512_add_pd(res, top); + res= _mm512_scalef_pd(res, _mm512_div_pd(kd, _mm512_set1_pd(32))); + + /* return special cases */ + res = avx512_set_masked_lanes_pd(res, _mm512_add_pd(x, ones_d), + nearzero_mask); + res = avx512_set_masked_lanes_pd(res, _mm512_set1_pd(NPY_NAN), + nan_mask); + res = avx512_set_masked_lanes_pd(res, mTH_inf, xmax_mask); + res = avx512_set_masked_lanes_pd(res, zeros_d, xmin_mask); + + _mm512_mask_storeu_pd(op, load_mask, res); + + ip += num_lanes * stride; + op += num_lanes; + num_remaining_elements -= num_lanes; + } + if (overflow_mask) { + npy_set_floatstatus_overflow(); + } +} +/* + * Vectorized implementation of log double using AVX512 + * Reference: + * [1] Tang, Ping Tak Peter. Table-lookup algorithms for elementary functions + * and their error analysis. No. CONF-9106103-1. Argonne National Lab., + * IL (USA), 1991. + * [2] Tang, Ping-Tak Peter. "Table-driven implementation of the logarithm + * function in IEEE floating-point arithmetic." ACM Transactions on + * Mathematical Software (TOMS) 16.4 (1990): 378-400. + * [3] Muller, Jean-Michel. "Elementary functions: algorithms and + * implementation." (2016). + * 1) if x = 0; return -INF + * 2) if x < 0; return NAN + * 3) if x is INF; return INF + * 4) if x is NAN; return NAN + * 5) if x on (1.0 - 0x1p-4, 1.0 + 0x1.09p-4), calling npy_log() + * 6) Range reduction: + * log(x) = log(2^m * z) + * = mln2 + log(z) + * 7) log(z) = log(z / c_k) + log(c_k); + * where c_k = 1 + k/64, k = 0,1,...,64 + * s.t. |x - c_k| <= 1/128 when x on[1,2]. + * 8) r = 2(x - c_k)/(x + c_k) + * log(x/c_k) = log((1 + r/2) / (1 - r/2)) + * = p(r) + * = 2((r/2) + 1/3*(r/2)^3 + 1/5*(r/2)^5 + ...) + */ +static void +AVX512F_log_DOUBLE(npy_double * op, + npy_double * ip, + const npy_intp array_size, + const npy_intp steps) +{ + npy_intp num_remaining_elements = array_size; + const npy_intp stride = steps / (npy_intp)sizeof(npy_double); + const npy_int num_lanes = 64 / (npy_intp)sizeof(npy_double); + npy_int32 indexarr[8]; + for (npy_int32 ii = 0; ii < 8; ii++) { + indexarr[ii] = ii*stride; + } + + __m512d zeros_d = _mm512_set1_pd(0.0f); + __m512d ones_d = _mm512_set1_pd(1.0f); + __m512d mInf = _mm512_set1_pd(NPY_INFINITY); + __m512d mInv64 = _mm512_castsi512_pd(_mm512_set1_epi64(0x3f90000000000000)); + __m512d mNeg_nan = _mm512_set1_pd(-NPY_NAN); + __m512d mNan = _mm512_set1_pd(NPY_NAN); + __m512d mNeg_inf = _mm512_set1_pd(-NPY_INFINITY); + __m512d mA1 = _mm512_set1_pd(NPY_TANG_LOG_A1); + __m512d mA2 = _mm512_set1_pd(NPY_TANG_LOG_A2); + __m512d mA3 = _mm512_set1_pd(NPY_TANG_LOG_A3); + __m512d mA4 = _mm512_set1_pd(NPY_TANG_LOG_A4); + __m512d mLN2HI = _mm512_set1_pd(NPY_TANG_LOG_LN2HI); + __m512d mLN2LO = _mm512_set1_pd(NPY_TANG_LOG_LN2LO); + + __m512d mTo_glibc_min = _mm512_set1_pd(1.0 - 0x1p-4); + __m512d mTo_glibc_max = _mm512_set1_pd(1.0 + 0x1.09p-4); + __m256i vindex = _mm256_loadu_si256((__m256i*)&indexarr[0]); + + /* Load lookup table data */ + /**begin repeat + * #i = 0, 1, 2, 3, 4, 5, 6, 7# + */ + + __m512d mLUT_TOP_@i@ = _mm512_loadu_pd(&(LOG_TABLE_TOP[8*@i@])); + __m512d mLUT_TAIL_@i@ = _mm512_loadu_pd(&(LOG_TABLE_TAIL[8*@i@])); + + /**end repeat**/ + + __mmask8 load_mask = avx512_get_full_load_mask_pd(); + __mmask8 invalid_mask = avx512_get_partial_load_mask_pd(0, num_lanes); + __mmask8 divide_by_zero_mask = invalid_mask; + + __mmask8 inf_mask, nan_mask, zero_mask, negx_mask, denormal_mask, + glibc_mask; + + __m512d x_in; + while (num_remaining_elements > 0) { + if (num_remaining_elements < num_lanes) { + load_mask = avx512_get_partial_load_mask_pd(num_remaining_elements, + num_lanes); + } + + if (1 == stride) { + x_in = avx512_masked_load_pd(load_mask, ip); + } + else { + x_in = avx512_masked_gather_pd(zeros_d, ip, vindex, load_mask); + } + + /* call glibc when x on [1.0 - 0x1p-4, 1.0 + 0x1.09p-4] */ + __mmask8 m1 = _mm512_cmp_pd_mask(x_in, mTo_glibc_max, _CMP_LT_OQ); + __mmask8 m2 = _mm512_cmp_pd_mask(x_in, mTo_glibc_min, _CMP_GT_OQ); + glibc_mask = m1 & m2; + + if (glibc_mask != 0xFF) { + zero_mask = _mm512_cmp_pd_mask(x_in, zeros_d, _CMP_EQ_OQ); + inf_mask = _mm512_cmp_pd_mask(x_in, mInf, _CMP_EQ_OQ); + negx_mask = _mm512_cmp_pd_mask(x_in, zeros_d, _CMP_LT_OQ); + nan_mask = _mm512_cmp_pd_mask(x_in, x_in, _CMP_NEQ_UQ); + + divide_by_zero_mask = divide_by_zero_mask | (zero_mask & load_mask); + invalid_mask = invalid_mask | negx_mask; + + __m512d x = avx512_set_masked_lanes_pd(x_in, zeros_d, negx_mask); + __m512i ix = _mm512_castpd_si512(x); + + /* Normalize x when it is denormal */ + __m512i top12 = _mm512_and_epi64(ix, + _mm512_set1_epi64(0xfff0000000000000)); + denormal_mask = _mm512_cmp_epi64_mask(top12, _mm512_set1_epi64(0), + _CMP_EQ_OQ); + denormal_mask = (~zero_mask) & denormal_mask; + ix = _mm512_castpd_si512(_mm512_mask_mul_pd(x, denormal_mask, + x, _mm512_set1_pd(0x1p52))); + ix = _mm512_mask_sub_epi64(ix, denormal_mask, + ix, _mm512_set1_epi64(52ULL << 52)); + + /* + * x = 2^k * z; where z in range [1,2] + */ + __m512i tmp = _mm512_sub_epi64(ix, + _mm512_set1_epi64(0x3ff0000000000000)); + __m512i i = _mm512_and_epi64(_mm512_srai_epi64(tmp, 52 - 6), + _mm512_set1_epi64(0x3fULL)); + __m512i ik = _mm512_srai_epi64(tmp, 52); + __m512d z = _mm512_castsi512_pd(_mm512_sub_epi64(ix, _mm512_and_epi64(tmp, + _mm512_set1_epi64(0xfff0000000000000)))); + /* c = i/64 + 1 */ + __m256i i_32 = _mm512_cvtepi64_epi32(i); + __m512d c = _mm512_fmadd_pd(_mm512_cvtepi32_pd(i_32), mInv64, ones_d); + + /* u = 2 * (z - c) / (z + c) */ + __m512d u = _mm512_div_pd(_mm512_sub_pd(z, c), _mm512_add_pd(z, c)); + u = _mm512_mul_pd(_mm512_set1_pd(2.0), u); + + /* v = u * u */ + __m512d v = _mm512_mul_pd(u,u); + + /* log(z/c) = u + u*v*(A1 + v*(A2 + v*(A3 + v*A4))) */ + __m512d res = _mm512_fmadd_pd(v, mA4, mA3); + res = _mm512_fmadd_pd(v, res, mA2); + res = _mm512_fmadd_pd(v, res, mA1); + res = _mm512_mul_pd(v, res); + res = _mm512_fmadd_pd(u, res, u); + + /* Load lookup table data */ + __m512d c_hi = avx512_permute_x8var_pd(mLUT_TOP_0, mLUT_TOP_1, + mLUT_TOP_2, mLUT_TOP_3, mLUT_TOP_4, mLUT_TOP_5, + mLUT_TOP_6, mLUT_TOP_7, i); + __m512d c_lo = avx512_permute_x8var_pd(mLUT_TAIL_0, mLUT_TAIL_1, + mLUT_TAIL_2, mLUT_TAIL_3, mLUT_TAIL_4, mLUT_TAIL_5, + mLUT_TAIL_6, mLUT_TAIL_7, i); + + /* + * log(x) = k * ln2_hi + c_hi + + * k * ln2_lo + c_lo + + * log(z/c) + */ + __m256i ik_32 = _mm512_cvtepi64_epi32(ik); + __m512d k = _mm512_cvtepi32_pd(ik_32); + __m512d tt = _mm512_fmadd_pd(k, mLN2HI, c_hi); + __m512d tt2 = _mm512_fmadd_pd(k, mLN2LO, c_lo); + tt = _mm512_add_pd(tt, tt2); + res = _mm512_add_pd(tt, res); + + /* return special cases */ + res = avx512_set_masked_lanes_pd(res, mNan, nan_mask); + res = avx512_set_masked_lanes_pd(res, mNeg_nan, negx_mask); + res = avx512_set_masked_lanes_pd(res, mNeg_inf, zero_mask); + res = avx512_set_masked_lanes_pd(res, mInf, inf_mask); + + _mm512_mask_storeu_pd(op, load_mask, res); + } + + /* call glibc's log func when x around 1.0f */ + if (glibc_mask != 0) { + double NPY_DECL_ALIGNED(64) ip_fback[8]; + _mm512_store_pd(ip_fback, x_in); + + for (int ii = 0; ii < 8; ++ii, glibc_mask >>= 1) { + if (glibc_mask & 0x01) { + op[ii] = npy_log(ip_fback[ii]); + } + } + } + ip += num_lanes * stride; + op += num_lanes; + num_remaining_elements -= num_lanes; + } + + if (invalid_mask) { + npy_set_floatstatus_invalid(); + } + if (divide_by_zero_mask) { + npy_set_floatstatus_divbyzero(); + } +} +#endif // AVX512F_NOCLANG_BUG + +#ifdef SIMD_AVX512_SKX +/**begin repeat + * #type = npy_float, npy_double# + * #TYPE = FLOAT, DOUBLE# + * #num_lanes = 16, 8# + * #vsuffix = ps, pd# + * #mask = __mmask16, __mmask8# + * #vtype1 = __m512, __m512d# + * #vtype2 = __m512i, __m256i# + * #scale = 4, 8# + * #vindextype = __m512i, __m256i# + * #vindexsize = 512, 256# + * #vindexload = _mm512_loadu_si512, _mm256_loadu_si256# + * #vtype2_load = _mm512_maskz_loadu_epi32, _mm256_maskz_loadu_epi32# + * #vtype2_gather = _mm512_mask_i32gather_epi32, _mm256_mmask_i32gather_epi32# + * #vtype2_store = _mm512_mask_storeu_epi32, _mm256_mask_storeu_epi32# + * #vtype2_scatter = _mm512_mask_i32scatter_epi32, _mm256_mask_i32scatter_epi32# + * #setzero = _mm512_setzero_epi32, _mm256_setzero_si256# + */ +static NPY_INLINE void +AVX512_SKX_ldexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) +{ + const npy_intp stride_ip1 = steps[0]/(npy_intp)sizeof(@type@); + const npy_intp stride_ip2 = steps[1]/(npy_intp)sizeof(int); + const npy_intp stride_op = steps[2]/(npy_intp)sizeof(@type@); + const npy_intp array_size = dimensions[0]; + npy_intp num_remaining_elements = array_size; + @type@* ip1 = (@type@*) args[0]; + int* ip2 = (int*) args[1]; + @type@* op = (@type@*) args[2]; + + @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@(); + + /* + * Note: while generally indices are npy_intp, we ensure that our maximum index + * will fit in an int32 as a precondition for this function via + * IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP + */ + + npy_int32 index_ip1[@num_lanes@], index_ip2[@num_lanes@], index_op[@num_lanes@]; + for (npy_int32 ii = 0; ii < @num_lanes@; ii++) { + index_ip1[ii] = ii*stride_ip1; + index_ip2[ii] = ii*stride_ip2; + index_op[ii] = ii*stride_op; + } + @vindextype@ vindex_ip1 = @vindexload@((@vindextype@*)&index_ip1[0]); + @vindextype@ vindex_ip2 = @vindexload@((@vindextype@*)&index_ip2[0]); + @vindextype@ vindex_op = @vindexload@((@vindextype@*)&index_op[0]); + @vtype1@ zeros_f = _mm512_setzero_@vsuffix@(); + @vtype2@ zeros = @setzero@(); + + while (num_remaining_elements > 0) { + if (num_remaining_elements < @num_lanes@) { + load_mask = avx512_get_partial_load_mask_@vsuffix@( + num_remaining_elements, @num_lanes@); + } + @vtype1@ x1; + @vtype2@ x2; + if (stride_ip1 == 1) { + x1 = avx512_masked_load_@vsuffix@(load_mask, ip1); + } + else { + x1 = avx512_masked_gather_@vsuffix@(zeros_f, ip1, vindex_ip1, load_mask); + } + if (stride_ip2 == 1) { + x2 = @vtype2_load@(load_mask, ip2); + } + else { + x2 = @vtype2_gather@(zeros, load_mask, vindex_ip2, ip2, 4); + } + + @vtype1@ out = _mm512_scalef_@vsuffix@(x1, _mm512_cvtepi32_@vsuffix@(x2)); + + if (stride_op == 1) { + _mm512_mask_storeu_@vsuffix@(op, load_mask, out); + } + else { + /* scatter! */ + _mm512_mask_i32scatter_@vsuffix@(op, load_mask, vindex_op, out, @scale@); + } + + ip1 += @num_lanes@*stride_ip1; + ip2 += @num_lanes@*stride_ip2; + op += @num_lanes@*stride_op; + num_remaining_elements -= @num_lanes@; + } +} + +static NPY_INLINE void +AVX512_SKX_frexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) +{ + const npy_intp stride_ip1 = steps[0]/(npy_intp)sizeof(@type@); + const npy_intp stride_op1 = steps[1]/(npy_intp)sizeof(@type@); + const npy_intp stride_op2 = steps[2]/(npy_intp)sizeof(int); + const npy_intp array_size = dimensions[0]; + npy_intp num_remaining_elements = array_size; + @type@* ip1 = (@type@*) args[0]; + @type@* op1 = (@type@*) args[1]; + int* op2 = (int*) args[2]; + + @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@(); + + /* + * Note: while generally indices are npy_intp, we ensure that our maximum index + * will fit in an int32 as a precondition for this function via + * IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP + */ + + npy_int32 index_ip1[@num_lanes@], index_op1[@num_lanes@], index_op2[@num_lanes@]; + for (npy_int32 ii = 0; ii < @num_lanes@; ii++) { + index_ip1[ii] = ii*stride_ip1; + index_op1[ii] = ii*stride_op1; + index_op2[ii] = ii*stride_op2; + } + @vindextype@ vindex_ip1 = @vindexload@((@vindextype@*)&index_ip1[0]); + @vindextype@ vindex_op1 = @vindexload@((@vindextype@*)&index_op1[0]); + @vindextype@ vindex_op2 = @vindexload@((@vindextype@*)&index_op2[0]); + @vtype1@ zeros_f = _mm512_setzero_@vsuffix@(); + + while (num_remaining_elements > 0) { + if (num_remaining_elements < @num_lanes@) { + load_mask = avx512_get_partial_load_mask_@vsuffix@( + num_remaining_elements, @num_lanes@); + } + @vtype1@ x1; + if (stride_ip1 == 1) { + x1 = avx512_masked_load_@vsuffix@(load_mask, ip1); + } + else { + x1 = avx512_masked_gather_@vsuffix@(zeros_f, ip1, vindex_ip1, load_mask); + } + + /* + * The x86 instructions vpgetmant and vpgetexp do not conform + * with NumPy's output for special floating points: NAN, +/-INF, +/-0.0 + * We mask these values with spmask to avoid invalid exceptions. + */ + @mask@ spmask =_mm512_knot(_mm512_fpclass_@vsuffix@_mask( + x1, 0b10011111)); + @vtype1@ out1 = _mm512_maskz_getmant_@vsuffix@( + spmask, x1, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src); + out1 = _mm512_mask_mov_@vsuffix@(x1, spmask, out1); + @vtype2@ out2 = _mm512_cvt@vsuffix@_epi32( + _mm512_maskz_add_@vsuffix@(spmask, _mm512_set1_@vsuffix@(1.0), + _mm512_maskz_getexp_@vsuffix@(spmask, x1))); + if (stride_op1 == 1) { + _mm512_mask_storeu_@vsuffix@(op1, load_mask, out1); + } + else { + _mm512_mask_i32scatter_@vsuffix@(op1, load_mask, vindex_op1, out1, @scale@); + } + if (stride_op2 == 1) { + @vtype2_store@(op2, load_mask, out2); + } + else { + @vtype2_scatter@(op2, load_mask, vindex_op2, out2, 4); + } + + ip1 += @num_lanes@*stride_ip1; + op1 += @num_lanes@*stride_op1; + op2 += @num_lanes@*stride_op2; + num_remaining_elements -= @num_lanes@; + } +} +/**end repeat**/ +#endif // SIMD_AVX512_SKX + + +/******************************************************************************** + ** Defining ufunc inner functions + ********************************************************************************/ +/**begin repeat + * #func = exp, log# + * #scalarf = npy_expf, npy_logf# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if defined(SIMD_AVX2_FMA3) || defined(SIMD_AVX512F) + // third arg in `IS_OUTPUT_BLOCKABLE_UNARY` is dummy + // TODO: get ride of this macro during the move to NPYV + if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), sizeof(npy_float), 64)) { + simd_@func@_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0]); + } + else { + UNARY_LOOP { + /* + * We use the AVX function to compute exp/log for scalar elements as well. + * This is needed to ensure the output of strided and non-strided + * cases match. SIMD code handles strided input cases, but not + * strided output. + */ + simd_@func@_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0]); + } + } +#else + UNARY_LOOP { + const npy_float in1 = *(npy_float *)ip1; + *(npy_float *)op1 = @scalarf@(in1); + } +#endif +} +/**end repeat**/ + +/**begin repeat + * #func = exp, log# + * #scalar = npy_exp, npy_log# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#ifdef SIMD_AVX512F_NOCLANG_BUG + if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_double), sizeof(npy_double), 64)) { + AVX512F_@func@_DOUBLE((npy_double*)args[1], (npy_double*)args[0], dimensions[0], steps[0]); + return; + } +#endif + UNARY_LOOP { + const npy_double in1 = *(npy_double *)ip1; + *(npy_double *)op1 = @scalar@(in1); + } +} +/**end repeat**/ + +/**begin repeat + * Float types + * #type = npy_float, npy_double# + * #TYPE = FLOAT, DOUBLE# + * #c = f, # + * #C = F, # + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_frexp) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ +#ifdef SIMD_AVX512_SKX + if (IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP) { + AVX512_SKX_frexp_@TYPE@(args, dimensions, steps); + return; + } +#endif + UNARY_LOOP_TWO_OUT { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = npy_frexp@c@(in1, (int *)op2); + } +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_ldexp) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ +#ifdef SIMD_AVX512_SKX + if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) { + AVX512_SKX_ldexp_@TYPE@(args, dimensions, steps); + return; + } +#endif + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const int in2 = *(int *)ip2; + *((@type@ *)op1) = npy_ldexp@c@(in1, in2); + } +} +/**end repeat**/ diff --git a/numpy/core/src/umath/npy_simd_data.h b/numpy/core/src/umath/npy_simd_data.h index be9288affc7e..62438d7a3fa8 100644 --- a/numpy/core/src/umath/npy_simd_data.h +++ b/numpy/core/src/umath/npy_simd_data.h @@ -1,6 +1,6 @@ #ifndef __NPY_SIMD_DATA_H_ #define __NPY_SIMD_DATA_H_ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS +#if defined NPY_HAVE_AVX512F #if !(defined(__clang__) && (__clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 1))) /* * Constants used in vector implementation of float64 exp(x) @@ -122,11 +122,11 @@ static npy_uint64 EXP_Table_tail[32] = { /* * Lookup table of log(c_k) - * Reference form: Tang, Ping-Tak Peter. "Table-driven implementation of the - * logarithm function in IEEE floating-point arithmetic." ACM Transactions + * Reference form: Tang, Ping-Tak Peter. "Table-driven implementation of the + * logarithm function in IEEE floating-point arithmetic." ACM Transactions * on Mathematical Software (TOMS) 16.4 (1990): 378-400. */ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS +#if defined NPY_HAVE_AVX512F #if !(defined(__clang__) && (__clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 1))) static npy_uint64 LOG_TABLE_TOP[64] = { 0x0000000000000000, diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index e667639860f5..1a345b1fbaec 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -36,21 +36,6 @@ #define VECTOR_SIZE_BYTES 16 -/* - * MAX_STEP_SIZE is used to determine if we need to use SIMD version of the ufunc. - * Very large step size can be as slow as processing it using scalar. The - * value of 2097152 ( = 2MB) was chosen using 2 considerations: - * 1) Typical linux kernel page size is 4Kb, but sometimes it could also be 2MB - * which is == 2097152 Bytes. For a step size as large as this, surely all - * the loads/stores of gather/scatter instructions falls on 16 different pages - * which one would think would slow down gather/scatter instructions. - * 2) It additionally satisfies MAX_STEP_SIZE*16/esize < NPY_MAX_INT32 which - * allows us to use i32 version of gather/scatter (as opposed to the i64 version) - * without problems (step larger than NPY_MAX_INT32*esize/16 would require use of - * i64gather/scatter). esize = element size = 4/8 bytes for float/double. - */ -#define MAX_STEP_SIZE 2097152 - /* * Dispatcher functions * decide whether the operation can be vectorized and run it @@ -134,42 +119,6 @@ run_binary_avx512f_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_in /**end repeat1**/ - -#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS && @EXISTS@ -static NPY_INLINE NPY_GCC_TARGET_AVX512_SKX void -AVX512_SKX_ldexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps); - -static NPY_INLINE NPY_GCC_TARGET_AVX512_SKX void -AVX512_SKX_frexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps); -#endif - -static NPY_INLINE int -run_binary_avx512_skx_ldexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS && @EXISTS@ - if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) { - AVX512_SKX_ldexp_@TYPE@(args, dimensions, steps); - return 1; - } - else - return 0; -#endif - return 0; -} - -static NPY_INLINE int -run_unary_two_out_avx512_skx_frexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS && @EXISTS@ - if (IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP) { - AVX512_SKX_frexp_@TYPE@(args, dimensions, steps); - return 1; - } - else - return 0; -#endif - return 0; -} /**end repeat**/ /**begin repeat @@ -245,74 +194,8 @@ run_unary_@isa@_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp /**end repeat2**/ /**end repeat1**/ - -/**begin repeat1 - * #func = exp, log# - */ - -#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS -static NPY_INLINE void -@ISA@_@func@_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp stride); -#endif - -static NPY_INLINE int -run_unary_@isa@_@func@_FLOAT(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS - if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), sizeof(npy_float), @REGISTER_SIZE@)) { - @ISA@_@func@_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0]); - return 1; - } - else - return 0; -#endif - return 0; -} - -/**end repeat1**/ - /**end repeat**/ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS -static NPY_INLINE void -AVX512F_exp_DOUBLE(npy_double *, npy_double *, const npy_intp n, const npy_intp stride); -#endif -static NPY_INLINE int -run_unary_avx512f_exp_DOUBLE(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS -#if !(defined(__clang__) && (__clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 1))) - if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_double), sizeof(npy_double), 64)) { - AVX512F_exp_DOUBLE((npy_double*)args[1], (npy_double*)args[0], dimensions[0], steps[0]); - return 1; - } - else - return 0; -#endif -#endif - return 0; -} - -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS -static NPY_INLINE void -AVX512F_log_DOUBLE(npy_double *, npy_double *, const npy_intp n, const npy_intp stride); -#endif -static NPY_INLINE int -run_unary_avx512f_log_DOUBLE(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS -#if !(defined(__clang__) && (__clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 1))) - if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_double), sizeof(npy_double), 64)) { - AVX512F_log_DOUBLE((npy_double*)args[1], (npy_double*)args[0], dimensions[0], steps[0]); - return 1; - } - else - return 0; -#endif -#endif - return 0; -} - /**begin repeat * Float types * #type = npy_float, npy_double, npy_longdouble# @@ -956,106 +839,6 @@ fma_invert_mask_pd(__m256i ymask) return _mm256_andnot_si256(ymask, _mm256_set1_epi32(0xFFFFFFFF)); } -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_get_exponent(__m256 x) -{ - /* - * Special handling of denormals: - * 1) Multiply denormal elements with 2**100 (0x71800000) - * 2) Get the 8 bits of unbiased exponent - * 3) Subtract 100 from exponent of denormals - */ - - __m256 two_power_100 = _mm256_castsi256_ps(_mm256_set1_epi32(0x71800000)); - __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ); - __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ); - - /* - * It is necessary for temp1 to be volatile, a bug in clang optimizes it out which leads - * to an overflow warning in some cases. See https://github.com/numpy/numpy/issues/18005 - */ - volatile __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); - __m256 temp = _mm256_mul_ps(temp1, two_power_100); - x = _mm256_blendv_ps(x, temp, denormal_mask); - - __m256 exp = _mm256_cvtepi32_ps( - _mm256_sub_epi32( - _mm256_srli_epi32( - _mm256_castps_si256(x), 23),_mm256_set1_epi32(0x7E))); - - __m256 denorm_exp = _mm256_sub_ps(exp, _mm256_set1_ps(100.0f)); - return _mm256_blendv_ps(exp, denorm_exp, denormal_mask); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_get_mantissa(__m256 x) -{ - /* - * Special handling of denormals: - * 1) Multiply denormal elements with 2**100 (0x71800000) - * 2) Get the 23 bits of mantissa - * 3) Mantissa for denormals is not affected by the multiplication - */ - - __m256 two_power_100 = _mm256_castsi256_ps(_mm256_set1_epi32(0x71800000)); - __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ); - __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ); - - /* - * It is necessary for temp1 to be volatile, a bug in clang optimizes it out which leads - * to an overflow warning in some cases. See https://github.com/numpy/numpy/issues/18005 - */ - volatile __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask); - __m256 temp = _mm256_mul_ps(temp1, two_power_100); - x = _mm256_blendv_ps(x, temp, denormal_mask); - - __m256i mantissa_bits = _mm256_set1_epi32(0x7fffff); - __m256i exp_126_bits = _mm256_set1_epi32(126 << 23); - return _mm256_castsi256_ps( - _mm256_or_si256( - _mm256_and_si256( - _mm256_castps_si256(x), mantissa_bits), exp_126_bits)); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256 -fma_scalef_ps(__m256 poly, __m256 quadrant) -{ - /* - * Handle denormals (which occur when quadrant <= -125): - * 1) This function computes poly*(2^quad) by adding the exponent of - poly to quad - * 2) When quad <= -125, the output is a denormal and the above logic - breaks down - * 3) To handle such cases, we split quadrant: -125 + (quadrant + 125) - * 4) poly*(2^-125) is computed the usual way - * 5) 2^(quad-125) can be computed by: 2 << abs(quad-125) - * 6) The final div operation generates the denormal - */ - __m256 minquadrant = _mm256_set1_ps(-125.0f); - __m256 denormal_mask = _mm256_cmp_ps(quadrant, minquadrant, _CMP_LE_OQ); - if (_mm256_movemask_ps(denormal_mask) != 0x0000) { - __m256 quad_diff = _mm256_sub_ps(quadrant, minquadrant); - quad_diff = _mm256_sub_ps(_mm256_setzero_ps(), quad_diff); - quad_diff = _mm256_blendv_ps(_mm256_setzero_ps(), quad_diff, denormal_mask); - __m256i two_power_diff = _mm256_sllv_epi32( - _mm256_set1_epi32(1), _mm256_cvtps_epi32(quad_diff)); - quadrant = _mm256_max_ps(quadrant, minquadrant); //keep quadrant >= -126 - __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23); - poly = _mm256_castsi256_ps( - _mm256_add_epi32( - _mm256_castps_si256(poly), exponent)); - __m256 denorm_poly = _mm256_div_ps(poly, _mm256_cvtepi32_ps(two_power_diff)); - return _mm256_blendv_ps(poly, denorm_poly, denormal_mask); - } - else { - __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23); - poly = _mm256_castsi256_ps( - _mm256_add_epi32( - _mm256_castps_si256(poly), exponent)); - return poly; - } -} - /**begin repeat * #vsub = ps, pd# * #vtype = __m256, __m256d# @@ -1183,52 +966,6 @@ avx512_invert_mask_pd(__mmask8 ymask) return _mm512_knot(ymask); } -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 -avx512_get_exponent(__m512 x) -{ - return _mm512_add_ps(_mm512_getexp_ps(x), _mm512_set1_ps(1.0f)); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 -avx512_get_mantissa(__m512 x) -{ - return _mm512_getmant_ps(x, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 -avx512_scalef_ps(__m512 poly, __m512 quadrant) -{ - return _mm512_scalef_ps(poly, quadrant); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d -avx512_permute_x4var_pd(__m512d t0, - __m512d t1, - __m512d t2, - __m512d t3, - __m512i index) -{ - __mmask8 lut_mask = _mm512_cmp_epi64_mask( - _mm512_and_epi64(_mm512_set1_epi64(0x10ULL), index), - _mm512_set1_epi64(0), _MM_CMPINT_GT); - __m512d res1 = _mm512_permutex2var_pd(t0, index, t1); - __m512d res2 = _mm512_permutex2var_pd(t2, index, t3); - return _mm512_mask_blend_pd(lut_mask, res1, res2); -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d -avx512_permute_x8var_pd(__m512d t0, __m512d t1, __m512d t2, __m512d t3, - __m512d t4, __m512d t5, __m512d t6, __m512d t7, - __m512i index) -{ - __mmask8 lut_mask = _mm512_cmp_epi64_mask( - _mm512_and_epi64(_mm512_set1_epi64(0x20ULL), index), - _mm512_set1_epi64(0), _MM_CMPINT_GT); - __m512d res1 = avx512_permute_x4var_pd(t0, t1, t2, t3, index); - __m512d res2 = avx512_permute_x4var_pd(t4, t5, t6, t7, index); - return _mm512_mask_blend_pd(lut_mask, res1, res2); -} - /**begin repeat * #vsub = ps, pd# * #type= npy_float, npy_double# @@ -1386,34 +1123,6 @@ avx512_csquare_@vsub@(@vtype@ x) #if defined @CHK@ -/* - * Vectorized Cody-Waite range reduction technique - * Performs the reduction step x* = x - y*C in three steps: - * 1) x* = x - y*c1 - * 2) x* = x - y*c2 - * 3) x* = x - y*c3 - * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision - */ - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ -@isa@_range_reduction(@vtype@ x, @vtype@ y, @vtype@ c1, @vtype@ c2, @vtype@ c3) -{ - @vtype@ reduced_x = @fmadd@(y, c1, x); - reduced_x = @fmadd@(y, c2, reduced_x); - reduced_x = @fmadd@(y, c3, reduced_x); - return reduced_x; -} - -static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @mask@ -@isa@_in_range_mask(@vtype@ x, npy_float fmax, npy_float fmin) -{ - @mask@ m1 = _mm@vsize@_cmp_ps@vsub@( - x, _mm@vsize@_set1_ps(fmax), _CMP_GT_OQ); - @mask@ m2 = _mm@vsize@_cmp_ps@vsub@( - x, _mm@vsize@_set1_ps(fmin), _CMP_LT_OQ); - return _mm@vsize@_@or@(m1,m2); -} - static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ @isa@_sqrt_ps(@vtype@ x) { @@ -1537,155 +1246,6 @@ AVX512_SKX_@func@_@TYPE@(npy_bool* op, @type@* ip, const npy_intp array_size, co * #vtype2_scatter = _mm512_mask_i32scatter_epi32, _mm256_mask_i32scatter_epi32# * #setzero = _mm512_setzero_epi32, _mm256_setzero_si256# */ - -#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS -static NPY_INLINE NPY_GCC_TARGET_AVX512_SKX void -AVX512_SKX_ldexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ - const npy_intp stride_ip1 = steps[0]/(npy_intp)sizeof(@type@); - const npy_intp stride_ip2 = steps[1]/(npy_intp)sizeof(int); - const npy_intp stride_op = steps[2]/(npy_intp)sizeof(@type@); - const npy_intp array_size = dimensions[0]; - npy_intp num_remaining_elements = array_size; - @type@* ip1 = (@type@*) args[0]; - int* ip2 = (int*) args[1]; - @type@* op = (@type@*) args[2]; - - @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@(); - - /* - * Note: while generally indices are npy_intp, we ensure that our maximum index - * will fit in an int32 as a precondition for this function via - * IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP - */ - - npy_int32 index_ip1[@num_lanes@], index_ip2[@num_lanes@], index_op[@num_lanes@]; - for (npy_int32 ii = 0; ii < @num_lanes@; ii++) { - index_ip1[ii] = ii*stride_ip1; - index_ip2[ii] = ii*stride_ip2; - index_op[ii] = ii*stride_op; - } - @vindextype@ vindex_ip1 = @vindexload@((@vindextype@*)&index_ip1[0]); - @vindextype@ vindex_ip2 = @vindexload@((@vindextype@*)&index_ip2[0]); - @vindextype@ vindex_op = @vindexload@((@vindextype@*)&index_op[0]); - @vtype1@ zeros_f = _mm512_setzero_@vsuffix@(); - @vtype2@ zeros = @setzero@(); - - while (num_remaining_elements > 0) { - if (num_remaining_elements < @num_lanes@) { - load_mask = avx512_get_partial_load_mask_@vsuffix@( - num_remaining_elements, @num_lanes@); - } - @vtype1@ x1; - @vtype2@ x2; - if (stride_ip1 == 1) { - x1 = avx512_masked_load_@vsuffix@(load_mask, ip1); - } - else { - x1 = avx512_masked_gather_@vsuffix@(zeros_f, ip1, vindex_ip1, load_mask); - } - if (stride_ip2 == 1) { - x2 = @vtype2_load@(load_mask, ip2); - } - else { - x2 = @vtype2_gather@(zeros, load_mask, vindex_ip2, ip2, 4); - } - - @vtype1@ out = _mm512_scalef_@vsuffix@(x1, _mm512_cvtepi32_@vsuffix@(x2)); - - if (stride_op == 1) { - _mm512_mask_storeu_@vsuffix@(op, load_mask, out); - } - else { - /* scatter! */ - _mm512_mask_i32scatter_@vsuffix@(op, load_mask, vindex_op, out, @scale@); - } - - ip1 += @num_lanes@*stride_ip1; - ip2 += @num_lanes@*stride_ip2; - op += @num_lanes@*stride_op; - num_remaining_elements -= @num_lanes@; - } -} - -static NPY_INLINE NPY_GCC_TARGET_AVX512_SKX void -AVX512_SKX_frexp_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ - const npy_intp stride_ip1 = steps[0]/(npy_intp)sizeof(@type@); - const npy_intp stride_op1 = steps[1]/(npy_intp)sizeof(@type@); - const npy_intp stride_op2 = steps[2]/(npy_intp)sizeof(int); - const npy_intp array_size = dimensions[0]; - npy_intp num_remaining_elements = array_size; - @type@* ip1 = (@type@*) args[0]; - @type@* op1 = (@type@*) args[1]; - int* op2 = (int*) args[2]; - - @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@(); - - /* - * Note: while generally indices are npy_intp, we ensure that our maximum index - * will fit in an int32 as a precondition for this function via - * IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP - */ - - npy_int32 index_ip1[@num_lanes@], index_op1[@num_lanes@], index_op2[@num_lanes@]; - for (npy_int32 ii = 0; ii < @num_lanes@; ii++) { - index_ip1[ii] = ii*stride_ip1; - index_op1[ii] = ii*stride_op1; - index_op2[ii] = ii*stride_op2; - } - @vindextype@ vindex_ip1 = @vindexload@((@vindextype@*)&index_ip1[0]); - @vindextype@ vindex_op1 = @vindexload@((@vindextype@*)&index_op1[0]); - @vindextype@ vindex_op2 = @vindexload@((@vindextype@*)&index_op2[0]); - @vtype1@ zeros_f = _mm512_setzero_@vsuffix@(); - - while (num_remaining_elements > 0) { - if (num_remaining_elements < @num_lanes@) { - load_mask = avx512_get_partial_load_mask_@vsuffix@( - num_remaining_elements, @num_lanes@); - } - @vtype1@ x1; - if (stride_ip1 == 1) { - x1 = avx512_masked_load_@vsuffix@(load_mask, ip1); - } - else { - x1 = avx512_masked_gather_@vsuffix@(zeros_f, ip1, vindex_ip1, load_mask); - } - - /* - * The x86 instructions vpgetmant and vpgetexp do not conform - * with NumPy's output for special floating points: NAN, +/-INF, +/-0.0 - * We mask these values with spmask to avoid invalid exceptions. - */ - @mask@ spmask =_mm512_knot(_mm512_fpclass_@vsuffix@_mask( - x1, 0b10011111)); - @vtype1@ out1 = _mm512_maskz_getmant_@vsuffix@( - spmask, x1, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src); - out1 = _mm512_mask_mov_@vsuffix@(x1, spmask, out1); - @vtype2@ out2 = _mm512_cvt@vsuffix@_epi32( - _mm512_maskz_add_@vsuffix@(spmask, _mm512_set1_@vsuffix@(1.0), - _mm512_maskz_getexp_@vsuffix@(spmask, x1))); - if (stride_op1 == 1) { - _mm512_mask_storeu_@vsuffix@(op1, load_mask, out1); - } - else { - _mm512_mask_i32scatter_@vsuffix@(op1, load_mask, vindex_op1, out1, @scale@); - } - if (stride_op2 == 1) { - @vtype2_store@(op2, load_mask, out2); - } - else { - @vtype2_scatter@(op2, load_mask, vindex_op2, out2, 4); - } - - ip1 += @num_lanes@*stride_ip1; - op1 += @num_lanes@*stride_op1; - op2 += @num_lanes@*stride_op2; - num_remaining_elements -= @num_lanes@; - } -} -#endif - /**begin repeat1 * #func = maximum, minimum# * #vectorf = max, min# @@ -1907,654 +1467,6 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void /**end repeat1**/ /**end repeat**/ -/**begin repeat - * #ISA = FMA, AVX512F# - * #isa = fma, avx512# - * #vtype = __m256, __m512# - * #vsize = 256, 512# - * #BYTES = 32, 64# - * #NUM_LANES = 8, 16# - * #mask = __m256, __mmask16# - * #vsub = , _mask# - * #or_masks =_mm256_or_ps, _mm512_kor# - * #and_masks =_mm256_and_ps, _mm512_kand# - * #xor_masks =_mm256_xor_ps, _mm512_kxor# - * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps# - * #mask_to_int = _mm256_movemask_ps, # - * #full_mask= 0xFF, 0xFFFF# - * #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps# - * #cvtps_epi32 = _mm256_cvtps_epi32, # - * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS# - */ -#if defined @CHK@ -/* - * Vectorized implementation of exp using AVX2 and AVX512: - * 1) if x >= xmax; return INF (overflow) - * 2) if x <= xmin; return 0.0f (underflow) - * 3) Range reduction (using Coyd-Waite): - * a) y = x - k*ln(2); k = rint(x/ln(2)); y \in [0, ln(2)] - * 4) Compute exp(y) = P/Q, ratio of 2 polynomials P and Q - * b) P = 5th order and Q = 2nd order polynomials obtained from Remez's - * algorithm (mini-max polynomial approximation) - * 5) Compute exp(x) = exp(y) * 2^k - * 6) Max ULP error measured across all 32-bit FP's = 2.52 (x = 0xc2781e37) - * 7) Max relative error measured across all 32-bit FP's= 2.1264E-07 (for the - * same x = 0xc2781e37) - */ - -static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void -@ISA@_exp_FLOAT(npy_float * op, - npy_float * ip, - const npy_intp array_size, - const npy_intp steps) -{ - const npy_intp stride = steps/(npy_intp)sizeof(npy_float); - const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float); - npy_float xmax = 88.72283935546875f; - npy_float xmin = -103.97208404541015625f; - - /* - * Note: while generally indices are npy_intp, we ensure that our maximum index - * will fit in an int32 as a precondition for this function via - * IS_OUTPUT_BLOCKABLE_UNARY - */ - npy_int32 indexarr[16]; - for (npy_int32 ii = 0; ii < 16; ii++) { - indexarr[ii] = ii*stride; - } - - /* Load up frequently used constants */ - @vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_LOGE_2_HIGHf); - @vtype@ codyw_c2 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_LOGE_2_LOWf); - @vtype@ exp_p0 = _mm@vsize@_set1_ps(NPY_COEFF_P0_EXPf); - @vtype@ exp_p1 = _mm@vsize@_set1_ps(NPY_COEFF_P1_EXPf); - @vtype@ exp_p2 = _mm@vsize@_set1_ps(NPY_COEFF_P2_EXPf); - @vtype@ exp_p3 = _mm@vsize@_set1_ps(NPY_COEFF_P3_EXPf); - @vtype@ exp_p4 = _mm@vsize@_set1_ps(NPY_COEFF_P4_EXPf); - @vtype@ exp_p5 = _mm@vsize@_set1_ps(NPY_COEFF_P5_EXPf); - @vtype@ exp_q0 = _mm@vsize@_set1_ps(NPY_COEFF_Q0_EXPf); - @vtype@ exp_q1 = _mm@vsize@_set1_ps(NPY_COEFF_Q1_EXPf); - @vtype@ exp_q2 = _mm@vsize@_set1_ps(NPY_COEFF_Q2_EXPf); - @vtype@ cvt_magic = _mm@vsize@_set1_ps(NPY_RINT_CVT_MAGICf); - @vtype@ log2e = _mm@vsize@_set1_ps(NPY_LOG2Ef); - @vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF); - @vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f); - @vtype@ poly, num_poly, denom_poly, quadrant; - @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]); - - @mask@ xmax_mask, xmin_mask, nan_mask, inf_mask; - @mask@ overflow_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); - @mask@ load_mask = @isa@_get_full_load_mask_ps(); - npy_intp num_remaining_elements = array_size; - - while (num_remaining_elements > 0) { - - if (num_remaining_elements < num_lanes) { - load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, - num_lanes); - } - - @vtype@ x; - if (stride == 1) { - x = @isa@_masked_load_ps(load_mask, ip); - } - else { - x = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask); - } - - nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ); - x = @isa@_set_masked_lanes_ps(x, zeros_f, nan_mask); - - xmax_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmax), _CMP_GE_OQ); - xmin_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmin), _CMP_LE_OQ); - inf_mask = _mm@vsize@_cmp_ps@vsub@(x, inf, _CMP_EQ_OQ); - overflow_mask = @or_masks@(overflow_mask, - @xor_masks@(xmax_mask, inf_mask)); - - x = @isa@_set_masked_lanes_ps(x, zeros_f, @or_masks@( - @or_masks@(nan_mask, xmin_mask), xmax_mask)); - - quadrant = _mm@vsize@_mul_ps(x, log2e); - - /* round to nearest */ - quadrant = _mm@vsize@_add_ps(quadrant, cvt_magic); - quadrant = _mm@vsize@_sub_ps(quadrant, cvt_magic); - - /* Cody-Waite's range reduction algorithm */ - x = @isa@_range_reduction(x, quadrant, codyw_c1, codyw_c2, zeros_f); - - num_poly = @fmadd@(exp_p5, x, exp_p4); - num_poly = @fmadd@(num_poly, x, exp_p3); - num_poly = @fmadd@(num_poly, x, exp_p2); - num_poly = @fmadd@(num_poly, x, exp_p1); - num_poly = @fmadd@(num_poly, x, exp_p0); - denom_poly = @fmadd@(exp_q2, x, exp_q1); - denom_poly = @fmadd@(denom_poly, x, exp_q0); - poly = _mm@vsize@_div_ps(num_poly, denom_poly); - - /* - * compute val = poly * 2^quadrant; which is same as adding the - * exponent of quadrant to the exponent of poly. quadrant is an int, - * so extracting exponent is simply extracting 8 bits. - */ - poly = @isa@_scalef_ps(poly, quadrant); - - /* - * elem > xmax; return inf - * elem < xmin; return 0.0f - * elem = +/- nan, return nan - */ - poly = @isa@_set_masked_lanes_ps(poly, _mm@vsize@_set1_ps(NPY_NANF), nan_mask); - poly = @isa@_set_masked_lanes_ps(poly, inf, xmax_mask); - poly = @isa@_set_masked_lanes_ps(poly, zeros_f, xmin_mask); - - @masked_store@(op, @cvtps_epi32@(load_mask), poly); - - ip += num_lanes*stride; - op += num_lanes; - num_remaining_elements -= num_lanes; - } - - if (@mask_to_int@(overflow_mask)) { - npy_set_floatstatus_overflow(); - } -} - -/* - * Vectorized implementation of log using AVX2 and AVX512 - * 1) if x < 0.0f; return -NAN (invalid input) - * 2) Range reduction: y = x/2^k; - * a) y = normalized mantissa, k is the exponent (0.5 <= y < 1) - * 3) Compute log(y) = P/Q, ratio of 2 polynomials P and Q - * b) P = 5th order and Q = 5th order polynomials obtained from Remez's - * algorithm (mini-max polynomial approximation) - * 5) Compute log(x) = log(y) + k*ln(2) - * 6) Max ULP error measured across all 32-bit FP's = 3.83 (x = 0x3f486945) - * 7) Max relative error measured across all 32-bit FP's = 2.359E-07 (for same - * x = 0x3f486945) - */ - -static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void -@ISA@_log_FLOAT(npy_float * op, - npy_float * ip, - const npy_intp array_size, - const npy_intp steps) -{ - const npy_intp stride = steps/(npy_intp)sizeof(npy_float); - const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float); - - /* - * Note: while generally indices are npy_intp, we ensure that our maximum index - * will fit in an int32 as a precondition for this function via - * IS_OUTPUT_BLOCKABLE_UNARY - */ - npy_int32 indexarr[16]; - for (npy_int32 ii = 0; ii < 16; ii++) { - indexarr[ii] = ii*stride; - } - - /* Load up frequently used constants */ - @vtype@ log_p0 = _mm@vsize@_set1_ps(NPY_COEFF_P0_LOGf); - @vtype@ log_p1 = _mm@vsize@_set1_ps(NPY_COEFF_P1_LOGf); - @vtype@ log_p2 = _mm@vsize@_set1_ps(NPY_COEFF_P2_LOGf); - @vtype@ log_p3 = _mm@vsize@_set1_ps(NPY_COEFF_P3_LOGf); - @vtype@ log_p4 = _mm@vsize@_set1_ps(NPY_COEFF_P4_LOGf); - @vtype@ log_p5 = _mm@vsize@_set1_ps(NPY_COEFF_P5_LOGf); - @vtype@ log_q0 = _mm@vsize@_set1_ps(NPY_COEFF_Q0_LOGf); - @vtype@ log_q1 = _mm@vsize@_set1_ps(NPY_COEFF_Q1_LOGf); - @vtype@ log_q2 = _mm@vsize@_set1_ps(NPY_COEFF_Q2_LOGf); - @vtype@ log_q3 = _mm@vsize@_set1_ps(NPY_COEFF_Q3_LOGf); - @vtype@ log_q4 = _mm@vsize@_set1_ps(NPY_COEFF_Q4_LOGf); - @vtype@ log_q5 = _mm@vsize@_set1_ps(NPY_COEFF_Q5_LOGf); - @vtype@ loge2 = _mm@vsize@_set1_ps(NPY_LOGE2f); - @vtype@ nan = _mm@vsize@_set1_ps(NPY_NANF); - @vtype@ neg_nan = _mm@vsize@_set1_ps(-NPY_NANF); - @vtype@ neg_inf = _mm@vsize@_set1_ps(-NPY_INFINITYF); - @vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF); - @vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f); - @vtype@ ones_f = _mm@vsize@_set1_ps(1.0f); - @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)indexarr); - @vtype@ poly, num_poly, denom_poly, exponent; - - @mask@ inf_mask, nan_mask, sqrt2_mask, zero_mask, negx_mask; - @mask@ invalid_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); - @mask@ divide_by_zero_mask = invalid_mask; - @mask@ load_mask = @isa@_get_full_load_mask_ps(); - npy_intp num_remaining_elements = array_size; - - while (num_remaining_elements > 0) { - - if (num_remaining_elements < num_lanes) { - load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, - num_lanes); - } - - @vtype@ x_in; - if (stride == 1) { - x_in = @isa@_masked_load_ps(load_mask, ip); - } - else { - x_in = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask); - } - - negx_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_LT_OQ); - zero_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_EQ_OQ); - inf_mask = _mm@vsize@_cmp_ps@vsub@(x_in, inf, _CMP_EQ_OQ); - nan_mask = _mm@vsize@_cmp_ps@vsub@(x_in, x_in, _CMP_NEQ_UQ); - divide_by_zero_mask = @or_masks@(divide_by_zero_mask, - @and_masks@(zero_mask, load_mask)); - invalid_mask = @or_masks@(invalid_mask, negx_mask); - - @vtype@ x = @isa@_set_masked_lanes_ps(x_in, zeros_f, negx_mask); - - /* set x = normalized mantissa */ - exponent = @isa@_get_exponent(x); - x = @isa@_get_mantissa(x); - - /* if x < sqrt(2) {exp = exp-1; x = 2*x} */ - sqrt2_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(NPY_SQRT1_2f), _CMP_LE_OQ); - x = @isa@_blend(x, _mm@vsize@_add_ps(x,x), sqrt2_mask); - exponent = @isa@_blend(exponent, - _mm@vsize@_sub_ps(exponent,ones_f), sqrt2_mask); - - /* x = x - 1 */ - x = _mm@vsize@_sub_ps(x, ones_f); - - /* Polynomial approximation for log(1+x) */ - num_poly = @fmadd@(log_p5, x, log_p4); - num_poly = @fmadd@(num_poly, x, log_p3); - num_poly = @fmadd@(num_poly, x, log_p2); - num_poly = @fmadd@(num_poly, x, log_p1); - num_poly = @fmadd@(num_poly, x, log_p0); - denom_poly = @fmadd@(log_q5, x, log_q4); - denom_poly = @fmadd@(denom_poly, x, log_q3); - denom_poly = @fmadd@(denom_poly, x, log_q2); - denom_poly = @fmadd@(denom_poly, x, log_q1); - denom_poly = @fmadd@(denom_poly, x, log_q0); - poly = _mm@vsize@_div_ps(num_poly, denom_poly); - poly = @fmadd@(exponent, loge2, poly); - - /* - * x < 0.0f; return -NAN - * x = +/- NAN; return NAN - * x = 0.0f; return -INF - */ - poly = @isa@_set_masked_lanes_ps(poly, nan, nan_mask); - poly = @isa@_set_masked_lanes_ps(poly, neg_nan, negx_mask); - poly = @isa@_set_masked_lanes_ps(poly, neg_inf, zero_mask); - poly = @isa@_set_masked_lanes_ps(poly, inf, inf_mask); - - @masked_store@(op, @cvtps_epi32@(load_mask), poly); - - ip += num_lanes*stride; - op += num_lanes; - num_remaining_elements -= num_lanes; - } - - if (@mask_to_int@(invalid_mask)) { - npy_set_floatstatus_invalid(); - } - if (@mask_to_int@(divide_by_zero_mask)) { - npy_set_floatstatus_divbyzero(); - } -} -#endif -/**end repeat**/ - -/* - * Vectorized implementation of exp double using AVX512 - * Reference: Tang, P.T.P., "Table-driven implementation of the - * exponential function in IEEE floating-point - * arithmetic," ACM Transactions on Mathematical - * Software, vol. 15, pp. 144-157, 1989. - * 1) if x > mTH_max or x is INF; return INF (overflow) - * 2) if x < mTH_min; return 0.0f (underflow) - * 3) if abs(x) < mTH_nearzero; return 1.0f + x - * 4) if x is Nan; return Nan - * 5) Range reduction: - * x = (32m + j)ln2 / 32 + r; r in [-ln2/64, ln2/64] - * 6) exp(r) - 1 is approximated by a polynomial function p(r) - * exp(x) = 2^m(2^(j/32) + 2^(j/32)p(r)); - */ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS -#if !(defined(__clang__) && (__clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 1))) -static NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F void -AVX512F_exp_DOUBLE(npy_double * op, - npy_double * ip, - const npy_intp array_size, - const npy_intp steps) -{ - npy_intp num_remaining_elements = array_size; - const npy_intp stride = steps / (npy_intp)sizeof(npy_double); - const npy_int num_lanes = 64 / (npy_intp)sizeof(npy_double); - npy_int32 indexarr[8]; - for (npy_int32 ii = 0; ii < 8; ii++) { - indexarr[ii] = ii*stride; - } - - __m512d InvLn2N = _mm512_set1_pd(NPY_INV_LN2_MUL_32); - __m512d mShift = _mm512_set1_pd(NPY_RINT_CVT_MAGIC); - __m512d mNegL1 = _mm512_set1_pd(NPY_TANG_NEG_L1); - __m512d mNegL2 = _mm512_set1_pd(NPY_TANG_NEG_L2); - __m512i mMod = _mm512_set1_epi64(0x1f); - __m512d mA1 = _mm512_set1_pd(NPY_TANG_A1); - __m512d mA2 = _mm512_set1_pd(NPY_TANG_A2); - __m512d mA3 = _mm512_set1_pd(NPY_TANG_A3); - __m512d mA4 = _mm512_set1_pd(NPY_TANG_A4); - __m512d mA5 = _mm512_set1_pd(NPY_TANG_A5); - __m512d mTH_nearzero = _mm512_set1_pd(0x1p-54); - __m512d mTH_max = _mm512_set1_pd(0x1.62e42fefa39efp+9); - __m512d mTH_min = _mm512_set1_pd(-0x1.74910d52d3053p+9); - __m512d mTH_inf = _mm512_set1_pd(NPY_INFINITY); - __m512d zeros_d = _mm512_set1_pd(0.0f); - __m512d ones_d = _mm512_set1_pd(1.0f); - __m256i vindex = _mm256_loadu_si256((__m256i*)&indexarr[0]); - - __m512d mTable_top_0 = _mm512_loadu_pd(&(EXP_Table_top[8*0])); - __m512d mTable_top_1 = _mm512_loadu_pd(&(EXP_Table_top[8*1])); - __m512d mTable_top_2 = _mm512_loadu_pd(&(EXP_Table_top[8*2])); - __m512d mTable_top_3 = _mm512_loadu_pd(&(EXP_Table_top[8*3])); - __m512d mTable_tail_0 = _mm512_loadu_pd(&(EXP_Table_tail[8*0])); - __m512d mTable_tail_1 = _mm512_loadu_pd(&(EXP_Table_tail[8*1])); - __m512d mTable_tail_2 = _mm512_loadu_pd(&(EXP_Table_tail[8*2])); - __m512d mTable_tail_3 = _mm512_loadu_pd(&(EXP_Table_tail[8*3])); - - __mmask8 overflow_mask = avx512_get_partial_load_mask_pd(0, num_lanes); - __mmask8 load_mask = avx512_get_full_load_mask_pd(); - __mmask8 xmin_mask, xmax_mask, inf_mask, nan_mask, nearzero_mask; - - while (num_remaining_elements > 0) { - if (num_remaining_elements < num_lanes) { - load_mask = avx512_get_partial_load_mask_pd(num_remaining_elements, - num_lanes); - } - - __m512d x; - if (1 == stride) { - x = avx512_masked_load_pd(load_mask, ip); - } - else { - x = avx512_masked_gather_pd(zeros_d, ip, vindex, load_mask); - } - - nan_mask = _mm512_cmp_pd_mask(x, x, _CMP_NEQ_UQ); - x = avx512_set_masked_lanes_pd(x, zeros_d, nan_mask); - xmax_mask = _mm512_cmp_pd_mask(x, mTH_max, _CMP_GT_OQ); - xmin_mask = _mm512_cmp_pd_mask(x, mTH_min, _CMP_LT_OQ); - inf_mask = _mm512_cmp_pd_mask(x, mTH_inf, _CMP_EQ_OQ); - __m512i x_abs = _mm512_and_epi64(_mm512_castpd_si512(x), - _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF)); - nearzero_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(x_abs), - mTH_nearzero, _CMP_LT_OQ); - nearzero_mask = _mm512_kxor(nearzero_mask, nan_mask); - overflow_mask = _mm512_kor(overflow_mask, - _mm512_kxor(xmax_mask, inf_mask)); - x = avx512_set_masked_lanes_pd(x, zeros_d, - _mm512_kor(_mm512_kor(nan_mask, xmin_mask), - _mm512_kor(xmax_mask, nearzero_mask))); - - /* z = x * 32/ln2 */ - __m512d z = _mm512_mul_pd(x, InvLn2N); - - /* round to nearest */ - __m512d kd = _mm512_add_pd(z, mShift); - __m512i ki = _mm512_castpd_si512(kd); - kd = _mm512_sub_pd(kd, mShift); - - /* r = (x + kd*mNegL1) + kd*mNegL2 */ - __m512d r1 = _mm512_fmadd_pd(kd, mNegL1, x); - __m512d r2 = _mm512_mul_pd(kd, mNegL2); - __m512d r = _mm512_add_pd(r1,r2); - - /* Polynomial approximation for exp(r) - 1 */ - __m512d q = _mm512_fmadd_pd(mA5, r, mA4); - q = _mm512_fmadd_pd(q, r, mA3); - q = _mm512_fmadd_pd(q, r, mA2); - q = _mm512_fmadd_pd(q, r, mA1); - q = _mm512_mul_pd(q, r); - __m512d p = _mm512_fmadd_pd(r, q, r2);; - p = _mm512_add_pd(r1, p); - - /* Get 2^(j/32) from lookup table */ - __m512i j = _mm512_and_epi64(ki, mMod); - __m512d top = avx512_permute_x4var_pd(mTable_top_0, mTable_top_1, - mTable_top_2, mTable_top_3, j); - __m512d tail = avx512_permute_x4var_pd(mTable_tail_0, mTable_tail_1, - mTable_tail_2, mTable_tail_3, j); - - /* - * s = top + tail; - * exp(x) = 2^m * (top + (tail + s * p)); - */ - __m512d s = _mm512_add_pd(top, tail); - __m512d res = _mm512_fmadd_pd(s, p, tail); - res = _mm512_add_pd(res, top); - res= _mm512_scalef_pd(res, _mm512_div_pd(kd, _mm512_set1_pd(32))); - - /* return special cases */ - res = avx512_set_masked_lanes_pd(res, _mm512_add_pd(x, ones_d), - nearzero_mask); - res = avx512_set_masked_lanes_pd(res, _mm512_set1_pd(NPY_NAN), - nan_mask); - res = avx512_set_masked_lanes_pd(res, mTH_inf, xmax_mask); - res = avx512_set_masked_lanes_pd(res, zeros_d, xmin_mask); - - _mm512_mask_storeu_pd(op, load_mask, res); - - ip += num_lanes * stride; - op += num_lanes; - num_remaining_elements -= num_lanes; - } - if (overflow_mask) { - npy_set_floatstatus_overflow(); - } -} -#endif -#endif - -/* - * Vectorized implementation of log double using AVX512 - * Reference: - * [1] Tang, Ping Tak Peter. Table-lookup algorithms for elementary functions - * and their error analysis. No. CONF-9106103-1. Argonne National Lab., - * IL (USA), 1991. - * [2] Tang, Ping-Tak Peter. "Table-driven implementation of the logarithm - * function in IEEE floating-point arithmetic." ACM Transactions on - * Mathematical Software (TOMS) 16.4 (1990): 378-400. - * [3] Muller, Jean-Michel. "Elementary functions: algorithms and - * implementation." (2016). - * 1) if x = 0; return -INF - * 2) if x < 0; return NAN - * 3) if x is INF; return INF - * 4) if x is NAN; return NAN - * 5) if x on (1.0 - 0x1p-4, 1.0 + 0x1.09p-4), calling npy_log() - * 6) Range reduction: - * log(x) = log(2^m * z) - * = mln2 + log(z) - * 7) log(z) = log(z / c_k) + log(c_k); - * where c_k = 1 + k/64, k = 0,1,...,64 - * s.t. |x - c_k| <= 1/128 when x on[1,2]. - * 8) r = 2(x - c_k)/(x + c_k) - * log(x/c_k) = log((1 + r/2) / (1 - r/2)) - * = p(r) - * = 2((r/2) + 1/3*(r/2)^3 + 1/5*(r/2)^5 + ...) - */ -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS -#if !(defined(__clang__) && (__clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 1))) -static NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F void -AVX512F_log_DOUBLE(npy_double * op, - npy_double * ip, - const npy_intp array_size, - const npy_intp steps) -{ - npy_intp num_remaining_elements = array_size; - const npy_intp stride = steps / (npy_intp)sizeof(npy_double); - const npy_int num_lanes = 64 / (npy_intp)sizeof(npy_double); - npy_int32 indexarr[8]; - for (npy_int32 ii = 0; ii < 8; ii++) { - indexarr[ii] = ii*stride; - } - - __m512d zeros_d = _mm512_set1_pd(0.0f); - __m512d ones_d = _mm512_set1_pd(1.0f); - __m512d mInf = _mm512_set1_pd(NPY_INFINITY); - __m512d mInv64 = (__m512d)(_mm512_set1_epi64(0x3f90000000000000)); - __m512d mNeg_nan = _mm512_set1_pd(-NPY_NAN); - __m512d mNan = _mm512_set1_pd(NPY_NAN); - __m512d mNeg_inf = _mm512_set1_pd(-NPY_INFINITY); - __m512d mA1 = _mm512_set1_pd(NPY_TANG_LOG_A1); - __m512d mA2 = _mm512_set1_pd(NPY_TANG_LOG_A2); - __m512d mA3 = _mm512_set1_pd(NPY_TANG_LOG_A3); - __m512d mA4 = _mm512_set1_pd(NPY_TANG_LOG_A4); - __m512d mLN2HI = _mm512_set1_pd(NPY_TANG_LOG_LN2HI); - __m512d mLN2LO = _mm512_set1_pd(NPY_TANG_LOG_LN2LO); - - __m512d mTo_glibc_min = _mm512_set1_pd(1.0 - 0x1p-4); - __m512d mTo_glibc_max = _mm512_set1_pd(1.0 + 0x1.09p-4); - __m256i vindex = _mm256_loadu_si256((__m256i*)&indexarr[0]); - - /* Load lookup table data */ - /**begin repeat - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - - __m512d mLUT_TOP_@i@ = _mm512_loadu_pd(&(LOG_TABLE_TOP[8*@i@])); - __m512d mLUT_TAIL_@i@ = _mm512_loadu_pd(&(LOG_TABLE_TAIL[8*@i@])); - - /**end repeat**/ - - __mmask8 load_mask = avx512_get_full_load_mask_pd(); - __mmask8 invalid_mask = avx512_get_partial_load_mask_pd(0, num_lanes); - __mmask8 divide_by_zero_mask = invalid_mask; - - __mmask8 inf_mask, nan_mask, zero_mask, negx_mask, denormal_mask, - glibc_mask; - - __m512d x_in; - while (num_remaining_elements > 0) { - if (num_remaining_elements < num_lanes) { - load_mask = avx512_get_partial_load_mask_pd(num_remaining_elements, - num_lanes); - } - - if (1 == stride) { - x_in = avx512_masked_load_pd(load_mask, ip); - } - else { - x_in = avx512_masked_gather_pd(zeros_d, ip, vindex, load_mask); - } - - /* call glibc when x on [1.0 - 0x1p-4, 1.0 + 0x1.09p-4] */ - __mmask8 m1 = _mm512_cmp_pd_mask(x_in, mTo_glibc_max, _CMP_LT_OQ); - __mmask8 m2 = _mm512_cmp_pd_mask(x_in, mTo_glibc_min, _CMP_GT_OQ); - glibc_mask = m1 & m2; - - if (glibc_mask != 0xFF) { - zero_mask = _mm512_cmp_pd_mask(x_in, zeros_d, _CMP_EQ_OQ); - inf_mask = _mm512_cmp_pd_mask(x_in, mInf, _CMP_EQ_OQ); - negx_mask = _mm512_cmp_pd_mask(x_in, zeros_d, _CMP_LT_OQ); - nan_mask = _mm512_cmp_pd_mask(x_in, x_in, _CMP_NEQ_UQ); - - divide_by_zero_mask = divide_by_zero_mask | (zero_mask & load_mask); - invalid_mask = invalid_mask | negx_mask; - - __m512d x = avx512_set_masked_lanes_pd(x_in, zeros_d, negx_mask); - __m512i ix = (__m512i)x; - - /* Normalize x when it is denormal */ - __m512i top12 = _mm512_and_epi64(ix, - _mm512_set1_epi64(0xfff0000000000000)); - denormal_mask = _mm512_cmp_epi64_mask(top12, _mm512_set1_epi64(0), - _CMP_EQ_OQ); - denormal_mask = (~zero_mask) & denormal_mask; - ix = (__m512i)_mm512_mask_mul_pd(x, denormal_mask, - x, _mm512_set1_pd(0x1p52)); - ix = _mm512_mask_sub_epi64(ix, denormal_mask, - ix, _mm512_set1_epi64(52ULL << 52)); - - /* - * x = 2^k * z; where z in range [1,2] - */ - __m512i tmp = _mm512_sub_epi64(ix, - _mm512_set1_epi64(0x3ff0000000000000)); - __m512i i = _mm512_and_epi64(_mm512_srai_epi64(tmp, 52 - 6), - _mm512_set1_epi64(0x3fULL)); - __m512i ik = _mm512_srai_epi64(tmp, 52); - __m512d z = (__m512d)(_mm512_sub_epi64(ix, _mm512_and_epi64(tmp, - _mm512_set1_epi64(0xfff0000000000000)))); - /* c = i/64 + 1 */ - __m256i i_32 = _mm512_cvtepi64_epi32(i); - __m512d c = _mm512_fmadd_pd(_mm512_cvtepi32_pd(i_32), mInv64, ones_d); - - /* u = 2 * (z - c) / (z + c) */ - __m512d u = _mm512_div_pd(_mm512_sub_pd(z, c), _mm512_add_pd(z, c)); - u = _mm512_mul_pd(_mm512_set1_pd(2.0), u); - - /* v = u * u */ - __m512d v = _mm512_mul_pd(u,u); - - /* log(z/c) = u + u*v*(A1 + v*(A2 + v*(A3 + v*A4))) */ - __m512d res = _mm512_fmadd_pd(v, mA4, mA3); - res = _mm512_fmadd_pd(v, res, mA2); - res = _mm512_fmadd_pd(v, res, mA1); - res = _mm512_mul_pd(v, res); - res = _mm512_fmadd_pd(u, res, u); - - /* Load lookup table data */ - __m512d c_hi = avx512_permute_x8var_pd(mLUT_TOP_0, mLUT_TOP_1, - mLUT_TOP_2, mLUT_TOP_3, mLUT_TOP_4, mLUT_TOP_5, - mLUT_TOP_6, mLUT_TOP_7, i); - __m512d c_lo = avx512_permute_x8var_pd(mLUT_TAIL_0, mLUT_TAIL_1, - mLUT_TAIL_2, mLUT_TAIL_3, mLUT_TAIL_4, mLUT_TAIL_5, - mLUT_TAIL_6, mLUT_TAIL_7, i); - - /* - * log(x) = k * ln2_hi + c_hi + - * k * ln2_lo + c_lo + - * log(z/c) - */ - __m256i ik_32 = _mm512_cvtepi64_epi32(ik); - __m512d k = _mm512_cvtepi32_pd(ik_32); - __m512d tt = _mm512_fmadd_pd(k, mLN2HI, c_hi); - __m512d tt2 = _mm512_fmadd_pd(k, mLN2LO, c_lo); - tt = _mm512_add_pd(tt, tt2); - res = _mm512_add_pd(tt, res); - - /* return special cases */ - res = avx512_set_masked_lanes_pd(res, mNan, nan_mask); - res = avx512_set_masked_lanes_pd(res, mNeg_nan, negx_mask); - res = avx512_set_masked_lanes_pd(res, mNeg_inf, zero_mask); - res = avx512_set_masked_lanes_pd(res, mInf, inf_mask); - - _mm512_mask_storeu_pd(op, load_mask, res); - } - - /* call glibc's log func when x around 1.0f */ - if (glibc_mask != 0) { - double NPY_DECL_ALIGNED(64) ip_fback[8]; - _mm512_store_pd(ip_fback, x_in); - - for (int ii = 0; ii < 8; ++ii, glibc_mask >>= 1) { - if (glibc_mask & 0x01) { - op[ii] = npy_log(ip_fback[ii]); - } - } - } - ip += num_lanes * stride; - op += num_lanes; - num_remaining_elements -= num_lanes; - } - - if (invalid_mask) { - npy_set_floatstatus_invalid(); - } - if (divide_by_zero_mask) { - npy_set_floatstatus_divbyzero(); - } -} -#endif -#endif - /**begin repeat * #TYPE = CFLOAT, CDOUBLE# * #type = npy_float, npy_double# From aa8984364ebf19cbcce5d64e354fd02d9dc531cd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 7 Dec 2020 16:23:25 -0600 Subject: [PATCH 0314/1270] MAINT: Move modref definition into setup.py It seems the only reason this did not cause problems before was that the test checking for C warnings only ran on a run where these were always 0 and thus undefined (and so were not redefined later). The `#ifndef` would have to happen at a later time apparently, so just avoid it. --- numpy/core/include/numpy/npy_common.h | 10 ---------- numpy/core/setup.py | 6 ++++++ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index c8495db8e58f..d5f329b66754 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -10,16 +10,6 @@ #include #endif -// compile time environment variables -#ifndef NPY_RELAXED_STRIDES_CHECKING - #define NPY_RELAXED_STRIDES_CHECKING 0 -#endif -#ifndef NPY_RELAXED_STRIDES_DEBUG - #define NPY_RELAXED_STRIDES_DEBUG 0 -#endif -#ifndef NPY_USE_NEW_CASTINGIMPL - #define NPY_USE_NEW_CASTINGIMPL 0 -#endif /* * using static inline modifiers when defining npy_math functions * allows the compiler to make optimizations when possible diff --git a/numpy/core/setup.py b/numpy/core/setup.py index dfb26c9c1108..b73e55eb60f2 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -468,14 +468,20 @@ def generate_config_h(ext, build_dir): # Use relaxed stride checking if NPY_RELAXED_STRIDES_CHECKING: moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) + else: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 0)) # Use bogus stride debug aid when relaxed strides are enabled if NPY_RELAXED_STRIDES_DEBUG: moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) + else: + moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 0)) # Use the new experimental casting implementation in NumPy 1.20: if NPY_USE_NEW_CASTINGIMPL: moredefs.append(('NPY_USE_NEW_CASTINGIMPL', 1)) + else: + moredefs.append(('NPY_USE_NEW_CASTINGIMPL', 0)) # Get long double representation rep = check_long_double_representation(config_cmd) From 732b3a50fa60b9fe063bbcf43d89636799349ddc Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 8 Jan 2021 15:51:50 +0100 Subject: [PATCH 0315/1270] DOC: address review comments on sponsorship NEP --- doc/neps/nep-0046-sponsorship-guidelines.rst | 29 ++++++++++++++------ 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst index a9c2d85a919b..88f40536ba85 100644 --- a/doc/neps/nep-0046-sponsorship-guidelines.rst +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -31,7 +31,7 @@ helpful when searching for new support. This NEP is aimed at both the NumPy community - who can use it when looking for support and acknowledging existing support - and at past, current and prospective sponsors, who often want or need to know what they get in return -for their support (other than a healthier NumPy). +for their support other than a healthier NumPy. The scope of this proposal includes: @@ -58,13 +58,26 @@ The minimum level of support given to NumPy to be considered a Sponsor are: - $60,000/yr for financial contributions for a particular purpose - $100,000/yr for in-kind contributions -The rationale for the above levels is that unrestricted financial contributions -are typically the most valuable for the project, and the hardest to obtain. -The opposite is true for in-kind contributions. The dollar value of the levels -also reflect that NumPy's needs have grown to the point where we need at least -a few paid developers in order to effectively support our user base and -continue to move the project forward. Financial support at or above these -levels is needed to be able to make a significant difference. +We define support being active as: + +- for a one-off donation: it was received within the previous 12 months +- for a grant or other support with a defined start and end date: the duration + of a grant period. + +After support moves from "active" to "inactive" status, the acknowledgement +will be left in its place for at least another 6 months. If appropriate, the +funding team can discuss opportunities for renewal with the sponsor. After +those 6 months, acknowledgement may be moved to the historical overview. The +exact timing of this move is at the discretion of the funding team, because +there may be reasons to keep it in the more prominent place for longer. + +The rationale for the above funding levels is that unrestricted financial +contributions are typically the most valuable for the project, and the hardest +to obtain. The opposite is true for in-kind contributions. The dollar value of +the levels also reflect that NumPy's needs have grown to the point where we +need at least a few paid developers in order to effectively support our user +base and continue to move the project forward. Financial support at or above +these levels is needed to be able to make a significant difference. Sponsors will get acknowledged through: From 90f4b0838b44fabd89bcdd128d5707e7526d9d94 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 4 Nov 2020 18:54:41 -0600 Subject: [PATCH 0316/1270] MAINT: Implement new casting loops based on NEP 42 and 43 switch to using new casting as default to run full CI Initialize casting to silence compile warnings only... BUG: As my comment even said, out_needs_api is caller-initialized so I shouldn't init it to 0, it might disable api again. Simplify tests again, it seems unnecessar to go this complex Simplify `#if` as the variable should always be defined TST: Add more tests (and some related fixes) This fixes a few small things (i.e. the decref loop can be NULL and checking for writeable was incorrect in the private API). Mostly adds new tests for some internal API as well as for the copy of the auxiliar data. The nditer test however also is a heavyweight tests for the general multistep casting --- numpy/core/setup.py | 1 + .../core/src/common/lowlevel_strided_loops.h | 82 ++ numpy/core/src/multiarray/array_method.c | 202 ++++- numpy/core/src/multiarray/array_method.h | 15 + numpy/core/src/multiarray/convert_datatype.c | 356 +++++++- numpy/core/src/multiarray/convert_datatype.h | 10 + numpy/core/src/multiarray/datetime.c | 123 ++- numpy/core/src/multiarray/dtype_transfer.c | 816 +++++++++++++++++- numpy/core/src/multiarray/dtype_transfer.h | 27 + .../multiarray/lowlevel_strided_loops.c.src | 2 + numpy/core/src/multiarray/usertypes.c | 4 +- numpy/core/tests/test_arraymethod.py | 58 ++ numpy/core/tests/test_casting_unittests.py | 342 +++++++- numpy/core/tests/test_nditer.py | 68 ++ 14 files changed, 2046 insertions(+), 60 deletions(-) create mode 100644 numpy/core/src/multiarray/dtype_transfer.h create mode 100644 numpy/core/tests/test_arraymethod.py diff --git a/numpy/core/setup.py b/numpy/core/setup.py index b73e55eb60f2..822f9f580d44 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -794,6 +794,7 @@ def get_mathlib_info(*args): join('src', 'multiarray', 'ctors.h'), join('src', 'multiarray', 'descriptor.h'), join('src', 'multiarray', 'dtypemeta.h'), + join('src', 'multiarray', 'dtype_transfer.h'), join('src', 'multiarray', 'dragon4.h'), join('src', 'multiarray', 'einsum_debug.h'), join('src', 'multiarray', 'einsum_sumprod.h'), diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h index 12aa61822a14..bda9bb5e4ed0 100644 --- a/numpy/core/src/common/lowlevel_strided_loops.h +++ b/numpy/core/src/common/lowlevel_strided_loops.h @@ -196,6 +196,88 @@ PyArray_GetDTypeTransferFunction(int aligned, NpyAuxData **out_transferdata, int *out_needs_api); + +/* Same as above, but only wraps copyswapn or legacy cast functions */ +NPY_NO_EXPORT int +PyArray_GetLegacyDTypeTransferFunction(int aligned, + npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + int move_references, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata, + int *out_needs_api, int wrap_if_unaligned); + +/* Specialized dtype transfer functions */ +NPY_NO_EXPORT int +get_nbo_cast_datetime_transfer_function(int aligned, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata); + +NPY_NO_EXPORT int +get_nbo_datetime_to_string_transfer_function( + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + PyArray_StridedUnaryOp **out_stransfer, NpyAuxData **out_transferdata); + +NPY_NO_EXPORT int +get_datetime_to_unicode_transfer_function(int aligned, + npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata, + int *out_needs_api); + +NPY_NO_EXPORT int +get_nbo_string_to_datetime_transfer_function( + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + PyArray_StridedUnaryOp **out_stransfer, NpyAuxData **out_transferdata); + +NPY_NO_EXPORT int +get_unicode_to_datetime_transfer_function(int aligned, + npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata, + int *out_needs_api); + +NPY_NO_EXPORT int +get_fields_transfer_function(int aligned, + npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + int move_references, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata, + int *out_needs_api); + +NPY_NO_EXPORT int +get_subarray_transfer_function(int aligned, + npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + int move_references, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata, + int *out_needs_api); + +NPY_NO_EXPORT int +_strided_to_strided_move_references(char *dst, npy_intp dst_stride, + char *src, npy_intp src_stride, + npy_intp N, npy_intp src_itemsize, + NpyAuxData *data); + +NPY_NO_EXPORT int +_strided_to_strided_copy_references(char *dst, npy_intp dst_stride, + char *src, npy_intp src_stride, + npy_intp N, npy_intp src_itemsize, + NpyAuxData *data); + +NPY_NO_EXPORT int +wrap_aligned_contig_transfer_function_with_copyswapn( + int aligned, npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + PyArray_StridedUnaryOp **out_stransfer, NpyAuxData **out_transferdata, + int *out_needs_api, + PyArray_StridedUnaryOp *caststransfer, NpyAuxData *castdata); + /* * This is identical to PyArray_GetDTypeTransferFunction, but returns a * transfer function which also takes a mask as a parameter. The mask is used diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c index cae452454811..38284fac20cb 100644 --- a/numpy/core/src/multiarray/array_method.c +++ b/numpy/core/src/multiarray/array_method.c @@ -121,6 +121,19 @@ default_resolve_descriptors( } +NPY_INLINE static int +is_contiguous( + npy_intp const *strides, PyArray_Descr *const *descriptors, int nargs) +{ + for (int i = 0; i < nargs; i++) { + if (strides[i] != descriptors[i]->elsize) { + return 0; + } + } + return 1; +} + + /** * The default method to fetch the correct loop for a cast or ufunc * (at the time of writing only casts). @@ -138,18 +151,36 @@ default_resolve_descriptors( * @param flags * @return 0 on success -1 on failure. */ -static int -default_get_strided_loop( - PyArrayMethod_Context *NPY_UNUSED(context), - int NPY_UNUSED(aligned), int NPY_UNUSED(move_references), - npy_intp *NPY_UNUSED(strides), - PyArray_StridedUnaryOp **NPY_UNUSED(out_loop), - NpyAuxData **NPY_UNUSED(out_transferdata), - NPY_ARRAYMETHOD_FLAGS *NPY_UNUSED(flags)) +NPY_NO_EXPORT int +npy_default_get_strided_loop( + PyArrayMethod_Context *context, + int aligned, int NPY_UNUSED(move_references), npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) { - PyErr_SetString(PyExc_NotImplementedError, - "default loop getter is not implemented"); - return -1; + PyArray_Descr **descrs = context->descriptors; + PyArrayMethodObject *meth = context->method; + *flags = meth->flags & NPY_METH_RUNTIME_FLAGS; + *out_transferdata = NULL; + + int nargs = meth->nin + meth->nout; + if (aligned) { + if (meth->contiguous_loop == NULL || + !is_contiguous(strides, descrs, nargs)) { + *out_loop = meth->strided_loop; + return 0; + } + *out_loop = meth->contiguous_loop; + } + else { + if (meth->unaligned_contiguous_loop == NULL || + !is_contiguous(strides, descrs, nargs)) { + *out_loop = meth->unaligned_strided_loop; + return 0; + } + *out_loop = meth->unaligned_contiguous_loop; + } + return 0; } @@ -225,7 +256,7 @@ fill_arraymethod_from_slots( PyArrayMethodObject *meth = res->method; /* Set the defaults */ - meth->get_strided_loop = &default_get_strided_loop; + meth->get_strided_loop = &npy_default_get_strided_loop; meth->resolve_descriptors = &default_resolve_descriptors; /* Fill in the slots passed by the user */ @@ -295,7 +326,7 @@ fill_arraymethod_from_slots( } } } - if (meth->get_strided_loop != &default_get_strided_loop) { + if (meth->get_strided_loop != &npy_default_get_strided_loop) { /* Do not check the actual loop fields. */ return 0; } @@ -468,6 +499,9 @@ boundarraymethod_dealloc(PyObject *self) * May raise an error, but usually should not. * The function validates the casting attribute compared to the returned * casting level. + * + * TODO: This function is not public API, and certain code paths will need + * changes and especially testing if they were to be made public. */ static PyObject * boundarraymethod__resolve_descripors( @@ -481,7 +515,7 @@ boundarraymethod__resolve_descripors( if (!PyTuple_CheckExact(descr_tuple) || PyTuple_Size(descr_tuple) != nin + nout) { - PyErr_Format(PyExc_ValueError, + PyErr_Format(PyExc_TypeError, "_resolve_descriptors() takes exactly one tuple with as many " "elements as the method takes arguments (%d+%d).", nin, nout); return NULL; @@ -494,7 +528,7 @@ boundarraymethod__resolve_descripors( } else if (tmp == Py_None) { if (i < nin) { - PyErr_SetString(PyExc_ValueError, + PyErr_SetString(PyExc_TypeError, "only output dtypes may be omitted (set to None)."); return NULL; } @@ -502,7 +536,7 @@ boundarraymethod__resolve_descripors( } else if (PyArray_DescrCheck(tmp)) { if (Py_TYPE(tmp) != (PyTypeObject *)self->dtypes[i]) { - PyErr_Format(PyExc_ValueError, + PyErr_Format(PyExc_TypeError, "input dtype %S was not an exact instance of the bound " "DType class %S.", tmp, self->dtypes[i]); return NULL; @@ -580,9 +614,145 @@ boundarraymethod__resolve_descripors( } +/* + * TODO: This function is not public API, and certain code paths will need + * changes and especially testing if they were to be made public. + */ +static PyObject * +boundarraymethod__simple_strided_call( + PyBoundArrayMethodObject *self, PyObject *arr_tuple) +{ + PyArrayObject *arrays[NPY_MAXARGS]; + PyArray_Descr *descrs[NPY_MAXARGS]; + PyArray_Descr *out_descrs[NPY_MAXARGS]; + ssize_t length = -1; + int aligned = 1; + npy_intp strides[NPY_MAXARGS]; + int nin = self->method->nin; + int nout = self->method->nout; + + if (!PyTuple_CheckExact(arr_tuple) || + PyTuple_Size(arr_tuple) != nin + nout) { + PyErr_Format(PyExc_TypeError, + "_simple_strided_call() takes exactly one tuple with as many " + "arrays as the method takes arguments (%d+%d).", nin, nout); + return NULL; + } + + for (int i = 0; i < nin + nout; i++) { + PyObject *tmp = PyTuple_GetItem(arr_tuple, i); + if (tmp == NULL) { + return NULL; + } + else if (!PyArray_CheckExact(tmp)) { + PyErr_SetString(PyExc_TypeError, + "All inputs must be NumPy arrays."); + return NULL; + } + arrays[i] = (PyArrayObject *)tmp; + descrs[i] = PyArray_DESCR(arrays[i]); + + /* Check that the input is compatible with a simple method call. */ + if (Py_TYPE(descrs[i]) != (PyTypeObject *)self->dtypes[i]) { + PyErr_Format(PyExc_TypeError, + "input dtype %S was not an exact instance of the bound " + "DType class %S.", descrs[i], self->dtypes[i]); + return NULL; + } + if (PyArray_NDIM(arrays[i]) != 1) { + PyErr_SetString(PyExc_ValueError, + "All arrays must be one dimensional."); + return NULL; + } + if (i == 0) { + length = PyArray_SIZE(arrays[i]); + } + else if (PyArray_SIZE(arrays[i]) != length) { + PyErr_SetString(PyExc_ValueError, + "All arrays must have the same length."); + return NULL; + } + if (i >= nout) { + if (PyArray_FailUnlessWriteable( + arrays[i], "_simple_strided_call() output") < 0) { + return NULL; + } + } + + strides[i] = PyArray_STRIDES(arrays[i])[0]; + /* TODO: We may need to distinguish aligned and itemsize-aligned */ + aligned &= PyArray_ISALIGNED(arrays[i]); + } + if (!aligned && !(self->method->flags & NPY_METH_SUPPORTS_UNALIGNED)) { + PyErr_SetString(PyExc_ValueError, + "method does not support unaligned input."); + return NULL; + } + + NPY_CASTING casting = self->method->resolve_descriptors( + self->method, self->dtypes, descrs, out_descrs); + + if (casting < 0) { + PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; + PyErr_Fetch(&err_type, &err_value, &err_traceback); + PyErr_SetString(PyExc_TypeError, + "cannot perform method call with the given dtypes."); + npy_PyErr_ChainExceptions(err_type, err_value, err_traceback); + return NULL; + } + + int dtypes_were_adapted = 0; + for (int i = 0; i < nin + nout; i++) { + /* NOTE: This check is probably much stricter than necessary... */ + dtypes_were_adapted |= descrs[i] != out_descrs[i]; + Py_DECREF(out_descrs[i]); + } + if (dtypes_were_adapted) { + PyErr_SetString(PyExc_TypeError, + "_simple_strided_call(): requires dtypes to not require a cast " + "(must match exactly with `_resolve_descriptors()`)."); + return NULL; + } + + PyArrayMethod_Context context = { + .caller = NULL, + .method = self->method, + .descriptors = descrs, + }; + PyArray_StridedUnaryOp *strided_loop = NULL; + NpyAuxData *loop_data = NULL; + NPY_ARRAYMETHOD_FLAGS flags = 0; + + if (self->method->get_strided_loop( + &context, aligned, 0, strides, + &strided_loop, &loop_data, &flags) < 0) { + return NULL; + } + + /* + * TODO: Add floating point error checks if requested and + * possibly release GIL if allowed by the flags. + */ + /* TODO: strided_loop is currently a cast loop, this will change. */ + int res = strided_loop( + PyArray_BYTES(arrays[1]), strides[1], + PyArray_BYTES(arrays[0]), strides[0], + length, descrs[0]->elsize, loop_data); + if (loop_data != NULL) { + loop_data->free(loop_data); + } + if (res < 0) { + return NULL; + } + Py_RETURN_NONE; +} + + PyMethodDef boundarraymethod_methods[] = { {"_resolve_descriptors", (PyCFunction)boundarraymethod__resolve_descripors, METH_O, "Resolve the given dtypes."}, + {"_simple_strided_call", (PyCFunction)boundarraymethod__simple_strided_call, + METH_O, "call on 1-d inputs and pre-allocated outputs (single call)."}, {NULL, 0, 0, NULL}, }; diff --git a/numpy/core/src/multiarray/array_method.h b/numpy/core/src/multiarray/array_method.h index 15ea948ce5fa..1fa8a9ba0290 100644 --- a/numpy/core/src/multiarray/array_method.h +++ b/numpy/core/src/multiarray/array_method.h @@ -144,6 +144,21 @@ extern NPY_NO_EXPORT PyTypeObject PyBoundArrayMethod_Type; #define NPY_METH_unaligned_contiguous_loop 6 +/* + * Used internally (initially) for real to complex loops only + */ +NPY_NO_EXPORT int +npy_default_get_strided_loop( + PyArrayMethod_Context *context, + int aligned, int NPY_UNUSED(move_references), npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + + +/* + * TODO: This function is the internal version, and its error paths may + * need better tests when a public version is exposed. + */ NPY_NO_EXPORT PyBoundArrayMethodObject * PyArrayMethod_FromSpec_int(PyArrayMethod_Spec *spec, int private); diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 5d5b69bd5c5b..5ee5f0c1686e 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -25,6 +25,7 @@ #include "datetime_strings.h" #include "array_method.h" #include "usertypes.h" +#include "dtype_transfer.h" /* @@ -61,7 +62,7 @@ PyArray_GetObjectToGenericCastingImpl(void); * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an * error set. */ -static PyObject * +NPY_NO_EXPORT PyObject * PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) { PyObject *res = PyDict_GetItem(from->castingimpls, (PyObject *)to); @@ -1973,6 +1974,30 @@ legacy_same_dtype_resolve_descriptors( } +NPY_NO_EXPORT int +legacy_cast_get_strided_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArray_Descr **descrs = context->descriptors; + int out_needs_api = 0; + + *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; + + if (PyArray_GetLegacyDTypeTransferFunction( + aligned, strides[0], strides[1], descrs[0], descrs[1], + move_references, out_loop, out_transferdata, &out_needs_api, 0) < 0) { + return -1; + } + if (!out_needs_api) { + *flags &= ~NPY_METH_REQUIRES_PYAPI; + } + return 0; +} + + /* * Simple dtype resolver for casting between two different (non-parametric) * (legacy) dtypes. @@ -2012,10 +2037,77 @@ simple_cast_resolve_descriptors( } +NPY_NO_EXPORT int +get_byteswap_loop( + PyArrayMethod_Context *context, + int aligned, int NPY_UNUSED(move_references), npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArray_Descr **descrs = context->descriptors; + assert(descrs[0]->kind == descrs[1]->kind); + assert(descrs[0]->elsize == descrs[1]->elsize); + int itemsize = descrs[0]->elsize; + *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + *out_transferdata = NULL; + if (descrs[0]->kind == 'c') { + /* + * TODO: we have an issue with complex, since the below loops + * use the itemsize, the complex alignment would be too small. + * Using aligned = 0, might cause slow downs in some cases. + */ + aligned = 0; + } + + if (PyDataType_ISNOTSWAPPED(descrs[0]) == + PyDataType_ISNOTSWAPPED(descrs[1])) { + *out_loop = PyArray_GetStridedCopyFn( + aligned, strides[0], strides[1], itemsize); + } + else if (!PyTypeNum_ISCOMPLEX(descrs[0]->type_num)) { + *out_loop = PyArray_GetStridedCopySwapFn( + aligned, strides[0], strides[1], itemsize); + } + else { + *out_loop = PyArray_GetStridedCopySwapPairFn( + aligned, strides[0], strides[1], itemsize); + } + if (*out_loop == NULL) { + return -1; + } + return 0; +} + + +NPY_NO_EXPORT int +complex_to_noncomplex_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + static PyObject *cls = NULL; + int ret; + npy_cache_import("numpy.core", "ComplexWarning", &cls); + if (cls == NULL) { + return -1; + } + ret = PyErr_WarnEx(cls, + "Casting complex values to real discards " + "the imaginary part", 1); + if (ret < 0) { + return -1; + } + return npy_default_get_strided_loop( + context, aligned, move_references, strides, + out_loop, out_transferdata, flags); +} + + static int add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) { - PyType_Slot slots[6]; + PyType_Slot slots[7]; PyArray_DTypeMeta *dtypes[2] = {from, to}; PyArrayMethod_Spec spec = { .name = "numeric_cast", @@ -2026,8 +2118,8 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) .dtypes = dtypes, }; - npy_intp from_itemsize = dtypes[0]->singleton->elsize; - npy_intp to_itemsize = dtypes[1]->singleton->elsize; + npy_intp from_itemsize = from->singleton->elsize; + npy_intp to_itemsize = to->singleton->elsize; slots[0].slot = NPY_METH_resolve_descriptors; slots[0].pfunc = &simple_cast_resolve_descriptors; @@ -2044,8 +2136,24 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) slots[4].slot = NPY_METH_unaligned_contiguous_loop; slots[4].pfunc = PyArray_GetStridedNumericCastFn( 0, from_itemsize, to_itemsize, from->type_num, to->type_num); - slots[5].slot = 0; - slots[5].pfunc = NULL; + if (PyTypeNum_ISCOMPLEX(from->type_num) && + !PyTypeNum_ISCOMPLEX(to->type_num) && + !PyTypeNum_ISBOOL(to->type_num)) { + /* + * The get_loop function must also give a ComplexWarning. We could + * consider moving this warning into the inner-loop at some point + * for simplicity (this requires ensuring it is only emitted once). + */ + slots[5].slot = NPY_METH_get_loop; + slots[5].pfunc = &complex_to_noncomplex_get_loop; + slots[6].slot = 0; + slots[6].pfunc = NULL; + } + else { + /* Use the default get loop function. */ + slots[5].slot = 0; + slots[5].pfunc = NULL; + } assert(slots[1].pfunc && slots[2].pfunc && slots[3].pfunc && slots[4].pfunc); @@ -2057,7 +2165,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) slots[0].slot = NPY_METH_resolve_descriptors; slots[0].pfunc = &legacy_same_dtype_resolve_descriptors; slots[1].slot = NPY_METH_get_loop; - slots[1].pfunc = NULL; + slots[1].pfunc = &get_byteswap_loop; slots[2].slot = 0; slots[2].pfunc = NULL; @@ -2240,9 +2348,9 @@ add_other_to_and_from_string_cast( */ PyArray_DTypeMeta *dtypes[2] = {other, string}; PyType_Slot slots[] = { - {NPY_METH_get_loop, NULL}, + {NPY_METH_get_loop, &legacy_cast_get_strided_loop}, {NPY_METH_resolve_descriptors, &cast_to_string_resolve_descriptors}, - {0, NULL}}; + {0, NULL}}; PyArrayMethod_Spec spec = { .name = "legacy_cast_to_string", .nin = 1, @@ -2300,6 +2408,35 @@ string_to_string_resolve_descriptors( } +NPY_NO_EXPORT int +string_to_string_get_loop( + PyArrayMethod_Context *context, + int aligned, int NPY_UNUSED(move_references), npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + int unicode_swap = 0; + PyArray_Descr **descrs = context->descriptors; + + assert(NPY_DTYPE(descrs[0]) == NPY_DTYPE(descrs[1])); + *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; + if (descrs[0]->type_num == NPY_UNICODE) { + if (PyDataType_ISNOTSWAPPED(descrs[0]) != + PyDataType_ISNOTSWAPPED(descrs[1])) { + unicode_swap = 1; + } + } + + if (PyArray_GetStridedZeroPadCopyFn( + aligned, unicode_swap, strides[0], strides[1], + descrs[0]->elsize, descrs[1]->elsize, + out_loop, out_transferdata) == NPY_FAIL) { + return -1; + } + return 0; +} + + /* * Add string casts. Right now all string casts are just legacy-wrapped ones * (except string<->string and unicode<->unicode), but they do require @@ -2339,9 +2476,9 @@ PyArray_InitializeStringCasts(void) /* string<->string and unicode<->unicode have their own specialized casts */ PyArray_DTypeMeta *dtypes[2]; PyType_Slot slots[] = { - {NPY_METH_get_loop, NULL}, + {NPY_METH_get_loop, &string_to_string_get_loop}, {NPY_METH_resolve_descriptors, &string_to_string_resolve_descriptors}, - {0, NULL}}; + {0, NULL}}; PyArrayMethod_Spec spec = { .name = "string_to_string_cast", .casting = NPY_NO_CASTING, @@ -2489,6 +2626,61 @@ int give_bad_field_error(PyObject *key) } +static int +nonstructured_to_structured_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + if (context->descriptors[1]->names != NULL) { + int needs_api = 0; + if (get_fields_transfer_function( + aligned, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api) == NPY_FAIL) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + else if (context->descriptors[1]->subarray != NULL) { + int needs_api = 0; + if (get_subarray_transfer_function( + aligned, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api) == NPY_FAIL) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + else { + /* + * TODO: This could be a simple zero padded cast, adding a decref + * in case of `move_references`. But for now use legacy casts + * (which is the behaviour at least up to 1.20). + */ + int needs_api = 0; + if (!aligned) { + /* We need to wrap if aligned is 0. Use a recursive call */ + + } + if (PyArray_GetLegacyDTypeTransferFunction( + 1, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api, 1) < 0) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + return 0; +} + + static PyObject * PyArray_GetGenericToVoidCastingImpl(void) { @@ -2508,7 +2700,7 @@ PyArray_GetGenericToVoidCastingImpl(void) method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; method->casting = NPY_SAFE_CASTING; method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; - method->get_strided_loop = NULL; + method->get_strided_loop = &nonstructured_to_structured_get_loop; return (PyObject *)method; } @@ -2578,6 +2770,56 @@ structured_to_nonstructured_resolve_descriptors( } +static int +structured_to_nonstructured_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + if (context->descriptors[0]->names != NULL) { + int needs_api = 0; + if (get_fields_transfer_function( + aligned, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api) == NPY_FAIL) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + else if (context->descriptors[0]->subarray != NULL) { + int needs_api = 0; + if (get_subarray_transfer_function( + aligned, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api) == NPY_FAIL) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + else { + /* + * In general this is currently defined through legacy behaviour via + * scalars, and should likely just not be allowed. + */ + int needs_api = 0; + if (PyArray_GetLegacyDTypeTransferFunction( + aligned, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api, 1) < 0) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + return 0; +} + + static PyObject * PyArray_GetVoidToGenericCastingImpl(void) { @@ -2597,7 +2839,7 @@ PyArray_GetVoidToGenericCastingImpl(void) method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; method->casting = NPY_UNSAFE_CASTING; method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; - method->get_strided_loop = NULL; + method->get_strided_loop = &structured_to_nonstructured_get_loop; return (PyObject *)method; } @@ -2761,6 +3003,56 @@ void_to_void_resolve_descriptors( } +NPY_NO_EXPORT int +void_to_void_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + if (context->descriptors[0]->names != NULL || + context->descriptors[1]->names != NULL) { + int needs_api = 0; + if (get_fields_transfer_function( + aligned, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api) == NPY_FAIL) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + else if (context->descriptors[0]->subarray != NULL || + context->descriptors[1]->subarray != NULL) { + int needs_api = 0; + if (get_subarray_transfer_function( + aligned, strides[0], strides[1], + context->descriptors[0], context->descriptors[1], + move_references, out_loop, out_transferdata, + &needs_api) == NPY_FAIL) { + return -1; + } + *flags = needs_api ? NPY_METH_REQUIRES_PYAPI : 0; + } + else { + /* + * This is a string-like copy of the two bytes (zero padding if + * necessary) + */ + if (PyArray_GetStridedZeroPadCopyFn( + 0, 0, strides[0], strides[1], + context->descriptors[0]->elsize, context->descriptors[1]->elsize, + out_loop, out_transferdata) == NPY_FAIL) { + return -1; + } + *flags = 0; + } + return 0; +} + + /* * This initializes the void to void cast. Voids include structured dtypes, * which means that they can cast from and to any other dtype and, in that @@ -2772,9 +3064,9 @@ PyArray_InitializeVoidToVoidCast(void) PyArray_DTypeMeta *Void = PyArray_DTypeFromTypeNum(NPY_VOID); PyArray_DTypeMeta *dtypes[2] = {Void, Void}; PyType_Slot slots[] = { - {NPY_METH_get_loop, NULL}, + {NPY_METH_get_loop, &void_to_void_get_loop}, {NPY_METH_resolve_descriptors, &void_to_void_resolve_descriptors}, - {0, NULL}}; + {0, NULL}}; PyArrayMethod_Spec spec = { .name = "void_to_void_cast", .casting = NPY_NO_CASTING, @@ -2859,7 +3151,7 @@ PyArray_GetObjectToGenericCastingImpl(void) method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; method->casting = NPY_UNSAFE_CASTING; method->resolve_descriptors = &object_to_any_resolve_descriptors; - method->get_strided_loop = NULL; + method->get_strided_loop = &object_to_any_get_loop; return (PyObject *)method; } @@ -2915,12 +3207,38 @@ PyArray_GetGenericToObjectCastingImpl(void) method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; method->casting = NPY_SAFE_CASTING; method->resolve_descriptors = &any_to_object_resolve_descriptors; - method->get_strided_loop = NULL; + method->get_strided_loop = &any_to_object_get_loop; return (PyObject *)method; } +/* + * Casts within the object dtype is always just a plain copy/view. + * For that reason, this function might remain unimplemented. + */ +static int +object_to_object_get_loop( + PyArrayMethod_Context *NPY_UNUSED(context), + int NPY_UNUSED(aligned), int move_references, + npy_intp *NPY_UNUSED(strides), + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; + if (move_references) { + *out_loop = &_strided_to_strided_move_references; + *out_transferdata = NULL; + } + else { + *out_loop = &_strided_to_strided_copy_references; + *out_transferdata = NULL; + } + return 0; +} + + static int PyArray_InitializeObjectToObjectCast(void) { @@ -2931,8 +3249,8 @@ PyArray_InitializeObjectToObjectCast(void) PyArray_DTypeMeta *Object = PyArray_DTypeFromTypeNum(NPY_OBJECT); PyArray_DTypeMeta *dtypes[2] = {Object, Object}; PyType_Slot slots[] = { - {NPY_METH_get_loop, NULL}, - {0, NULL}}; + {NPY_METH_get_loop, &object_to_object_get_loop}, + {0, NULL}}; PyArrayMethod_Spec spec = { .name = "object_to_object_cast", .casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW, diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h index 97006b952543..a147dec3c780 100644 --- a/numpy/core/src/multiarray/convert_datatype.h +++ b/numpy/core/src/multiarray/convert_datatype.h @@ -5,6 +5,9 @@ extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; +NPY_NO_EXPORT PyObject * +PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to); + NPY_NO_EXPORT PyObject * _get_castingimpl(PyObject *NPY_UNUSED(module), PyObject *args); @@ -73,6 +76,13 @@ legacy_same_dtype_resolve_descriptors( PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs); +NPY_NO_EXPORT int +legacy_cast_get_strided_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 9c1b606bb564..6962159447cb 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -3803,6 +3803,47 @@ time_to_time_resolve_descriptors( } +static int +time_to_time_get_loop( + PyArrayMethod_Context *context, + int aligned, int NPY_UNUSED(move_references), npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + int requires_wrap = 0; + int inner_aligned = aligned; + PyArray_Descr **descrs = context->descriptors; + *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if (!PyDataType_ISNOTSWAPPED(descrs[0]) || + !PyDataType_ISNOTSWAPPED(descrs[1])) { + inner_aligned = 1; + requires_wrap = 1; + } + if (get_nbo_cast_datetime_transfer_function( + inner_aligned, descrs[0], descrs[1], + out_loop, out_transferdata) == NPY_FAIL) { + return -1; + } + + if (!requires_wrap) { + return 0; + } + + int needs_api = 0; + NpyAuxData *castdata = *out_transferdata; + if (wrap_aligned_contig_transfer_function_with_copyswapn( + aligned, strides[0], strides[1], descrs[0], descrs[1], + out_loop, out_transferdata, &needs_api, + *out_loop, castdata) == NPY_FAIL) { + NPY_AUXDATA_FREE(castdata); + return -1; + } + assert(needs_api == 0); + return 0; +} + + /* Handles datetime<->timedelta type resolution (both directions) */ static NPY_CASTING datetime_to_timedelta_resolve_descriptors( @@ -3844,9 +3885,7 @@ time_to_string_resolve_descriptors( PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs) { - Py_INCREF(given_descrs[0]); - loop_descrs[0] = given_descrs[0]; - if (given_descrs[1] != NULL) { + if (given_descrs[1] != NULL && dtypes[0]->type_num == NPY_DATETIME) { /* * At the time of writing, NumPy does not check the length here, * but will error if filling fails. @@ -3863,6 +3902,10 @@ time_to_string_resolve_descriptors( size = get_datetime_iso_8601_strlen(0, meta->base); } else { + /* + * This is arguably missing space for the unit, e.g. for: + * `np.timedelta64(1231234342124, 'ms')` + */ size = 21; } if (dtypes[1]->type_num == NPY_UNICODE) { @@ -3870,15 +3913,47 @@ time_to_string_resolve_descriptors( } loop_descrs[1] = PyArray_DescrNewFromType(dtypes[1]->type_num); if (loop_descrs[1] == NULL) { - Py_DECREF(loop_descrs[0]); return -1; } loop_descrs[1]->elsize = size; } + + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + assert(self->casting == NPY_UNSAFE_CASTING); return NPY_UNSAFE_CASTING; } +static int +time_to_string_get_loop( + PyArrayMethod_Context *context, + int aligned, int NPY_UNUSED(move_references), npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArray_Descr **descrs = context->descriptors; + *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; + + if (descrs[1]->type_num == NPY_STRING) { + if (get_nbo_datetime_to_string_transfer_function( + descrs[0], descrs[1], + out_loop, out_transferdata) == NPY_FAIL) { + return -1; + } + } + else { + assert(descrs[1]->type_num == NPY_UNICODE); + int out_needs_api; + if (get_datetime_to_unicode_transfer_function( + aligned, strides[0], strides[1], descrs[0], descrs[1], + out_loop, out_transferdata, &out_needs_api) == NPY_FAIL) { + return -1; + } + } + return 0; +} + static NPY_CASTING string_to_datetime_cast_resolve_descriptors( @@ -3908,6 +3983,36 @@ string_to_datetime_cast_resolve_descriptors( } +static int +string_to_datetime_cast_get_loop( + PyArrayMethod_Context *context, + int aligned, int NPY_UNUSED(move_references), npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArray_Descr **descrs = context->descriptors; + *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; + + if (descrs[0]->type_num == NPY_STRING) { + if (get_nbo_string_to_datetime_transfer_function( + descrs[0], descrs[1], out_loop, out_transferdata) == NPY_FAIL) { + return -1; + } + } + else { + assert(descrs[0]->type_num == NPY_UNICODE); + int out_needs_api; + if (get_unicode_to_datetime_transfer_function( + aligned, strides[0], strides[1], descrs[0], descrs[1], + out_loop, out_transferdata, &out_needs_api) == NPY_FAIL) { + return -1; + } + } + return 0; +} + + + /* * This registers the castingimpl for all datetime related casts. */ @@ -3930,7 +4035,7 @@ PyArray_InitializeDatetimeCasts() slots[0].slot = NPY_METH_resolve_descriptors; slots[0].pfunc = &time_to_time_resolve_descriptors; slots[1].slot = NPY_METH_get_loop; - slots[1].pfunc = NULL; + slots[1].pfunc = &time_to_time_get_loop; slots[2].slot = 0; slots[2].pfunc = NULL; @@ -3955,10 +4060,12 @@ PyArray_InitializeDatetimeCasts() * Casting between timedelta and datetime uses legacy casting loops, but * custom dtype resolution (to handle copying of the time unit). */ + spec.flags = NPY_METH_REQUIRES_PYAPI; + slots[0].slot = NPY_METH_resolve_descriptors; slots[0].pfunc = &datetime_to_timedelta_resolve_descriptors; slots[1].slot = NPY_METH_get_loop; - slots[1].pfunc = NULL; + slots[1].pfunc = &legacy_cast_get_strided_loop; slots[2].slot = 0; slots[2].pfunc = NULL; @@ -4030,7 +4137,7 @@ PyArray_InitializeDatetimeCasts() slots[0].slot = NPY_METH_resolve_descriptors; slots[0].pfunc = &time_to_string_resolve_descriptors; slots[1].slot = NPY_METH_get_loop; - slots[1].pfunc = NULL; + slots[1].pfunc = &time_to_string_get_loop; slots[2].slot = 0; slots[2].pfunc = NULL; @@ -4070,7 +4177,7 @@ PyArray_InitializeDatetimeCasts() slots[0].slot = NPY_METH_resolve_descriptors; slots[0].pfunc = &string_to_datetime_cast_resolve_descriptors; slots[1].slot = NPY_METH_get_loop; - slots[1].pfunc = NULL; + slots[1].pfunc = &string_to_datetime_cast_get_loop; slots[2].slot = 0; slots[2].pfunc = NULL; diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index 630bd76f3955..9b8e5f32f816 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -28,8 +28,11 @@ #include "array_assign.h" #include "shape.h" -#include "lowlevel_strided_loops.h" +#include "dtype_transfer.h" #include "alloc.h" +#include "dtypemeta.h" +#include "array_method.h" +#include "array_coercion.h" #define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128 @@ -105,7 +108,7 @@ get_bool_setdstone_transfer_function(npy_intp dst_stride, /*************************** COPY REFERENCES *******************************/ /* Moves references from src to dst */ -static int +NPY_NO_EXPORT int _strided_to_strided_move_references(char *dst, npy_intp dst_stride, char *src, npy_intp src_stride, npy_intp N, npy_intp src_itemsize, @@ -134,7 +137,7 @@ _strided_to_strided_move_references(char *dst, npy_intp dst_stride, } /* Copies references from src to dst */ -static int +NPY_NO_EXPORT int _strided_to_strided_copy_references(char *dst, npy_intp dst_stride, char *src, npy_intp src_stride, npy_intp N, npy_intp src_itemsize, @@ -161,6 +164,217 @@ _strided_to_strided_copy_references(char *dst, npy_intp dst_stride, return 0; } +/************************** ANY TO OBJECT *********************************/ + +typedef struct { + NpyAuxData base; + PyArray_GetItemFunc *getitem; + PyArrayObject_fields arr_fields; + PyArray_StridedUnaryOp *decref_func; + NpyAuxData *decref_data; +} _any_to_object_auxdata; + + +static void +_any_to_object_auxdata_free(NpyAuxData *auxdata) +{ + _any_to_object_auxdata *data = (_any_to_object_auxdata *)auxdata; + + Py_DECREF(data->arr_fields.descr); + NPY_AUXDATA_FREE(data->decref_data); + PyMem_Free(data); +} + + +static NpyAuxData * +_any_to_object_auxdata_clone(NpyAuxData *auxdata) +{ + _any_to_object_auxdata *data = (_any_to_object_auxdata *)auxdata; + + _any_to_object_auxdata *res = PyMem_Malloc(sizeof(_any_to_object_auxdata)); + + memcpy(res, data, sizeof(*data)); + Py_INCREF(res->arr_fields.descr); + if (res->decref_data != NULL) { + res->decref_data = NPY_AUXDATA_CLONE(data->decref_data); + if (res->decref_data == NULL) { + NPY_AUXDATA_FREE((NpyAuxData *) res); + return NULL; + } + } + return (NpyAuxData *)res; +} + + +static int +_strided_to_strided_any_to_object(char *dst, npy_intp dst_stride, + char *src, npy_intp src_stride, + npy_intp N, npy_intp src_itemsize, + NpyAuxData *auxdata) +{ + _any_to_object_auxdata *data = (_any_to_object_auxdata *)auxdata; + + PyObject *dst_ref = NULL; + char *orig_src = src; + while (N > 0) { + memcpy(&dst_ref, dst, sizeof(dst_ref)); + Py_XDECREF(dst_ref); + dst_ref = data->getitem(src, &data->arr_fields); + memcpy(dst, &dst_ref, sizeof(PyObject *)); + + if (dst_ref == NULL) { + return -1; + } + src += src_stride; + dst += dst_stride; + --N; + } + if (data->decref_func != NULL) { + /* If necessar, clear the input buffer (`move_references`) */ + if (data->decref_func(NULL, 0, orig_src, src_stride, N, + src_itemsize, data->decref_data) < 0) { + return -1; + } + } + return 0; +} + + +NPY_NO_EXPORT int +any_to_object_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + + *flags = NPY_METH_REQUIRES_PYAPI; /* No need for floating point errors */ + + *out_loop = _strided_to_strided_any_to_object; + *out_transferdata = PyMem_Malloc(sizeof(_any_to_object_auxdata)); + if (*out_transferdata == NULL) { + return -1; + } + _any_to_object_auxdata *data = (_any_to_object_auxdata *)*out_transferdata; + data->base.free = &_any_to_object_auxdata_free; + data->base.clone = &_any_to_object_auxdata_clone; + data->arr_fields.base = NULL; + data->arr_fields.descr = context->descriptors[0]; + Py_INCREF(data->arr_fields.descr); + data->arr_fields.flags = aligned ? NPY_ARRAY_ALIGNED : 0; + data->arr_fields.nd = 0; + + data->getitem = context->descriptors[0]->f->getitem; + data->decref_func = NULL; + data->decref_data = NULL; + + if (move_references && PyDataType_REFCHK(context->descriptors[0])) { + int needs_api; + if (get_decsrcref_transfer_function( + aligned, strides[0], context->descriptors[0], + &data->decref_func, &data->decref_data, + &needs_api) == NPY_FAIL) { + NPY_AUXDATA_FREE(*out_transferdata); + *out_transferdata = NULL; + return -1; + } + } + return 0; +} + + +/************************** OBJECT TO ANY *********************************/ + +typedef struct { + NpyAuxData base; + PyArray_Descr *descr; + int move_references; +} _object_to_any_auxdata; + + +static void +_object_to_any_auxdata_free(NpyAuxData *auxdata) +{ + _object_to_any_auxdata *data = (_object_to_any_auxdata *)auxdata; + Py_DECREF(data->descr); + PyMem_Free(data); +} + +static NpyAuxData * +_object_to_any_auxdata_clone(NpyAuxData *data) +{ + _object_to_any_auxdata *res = PyMem_Malloc(sizeof(*res)); + if (res == NULL) { + return NULL; + } + memcpy(res, data, sizeof(*res)); + Py_INCREF(res->descr); + return (NpyAuxData *)res; +} + + +static int +strided_to_strided_object_to_any( + char *dst, npy_intp dst_stride, + char *src, npy_intp src_stride, + npy_intp N, npy_intp NPY_UNUSED(src_itemsize), + NpyAuxData *auxdata) +{ + _object_to_any_auxdata *data = (_object_to_any_auxdata *)auxdata; + + PyObject *src_ref; + + while (N > 0) { + memcpy(&src_ref, src, sizeof(src_ref)); + if (PyArray_Pack(data->descr, dst, src_ref) < 0) { + return -1; + } + + if (data->move_references) { + Py_DECREF(src_ref); + memset(src, 0, sizeof(src_ref)); + } + + N--; + dst += dst_stride; + src += src_stride; + } + return 0; +} + + +NPY_NO_EXPORT int +object_to_any_get_loop( + PyArrayMethod_Context *context, + int NPY_UNUSED(aligned), int move_references, + npy_intp *NPY_UNUSED(strides), + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + *flags = NPY_METH_REQUIRES_PYAPI; + + /* + * TODO: After passing `context`, auxdata can be statically allocated + * since `descriptor` is always passed. + */ + _object_to_any_auxdata *data = PyMem_Malloc(sizeof(*data)); + if (data == NULL) { + return -1; + } + data->base.free = &_object_to_any_auxdata_free; + data->base.clone = &_object_to_any_auxdata_clone; + + Py_INCREF(context->descriptors[1]); + data->descr = context->descriptors[1]; + data->move_references = move_references; + *out_transferdata = (NpyAuxData *)data; + *out_loop = &strided_to_strided_object_to_any; + return 0; +} + /************************** ZERO-PADDED COPY ******************************/ @@ -725,6 +939,7 @@ _aligned_contig_to_contig_cast(char *dst, npy_intp NPY_UNUSED(dst_stride), return 0; } +#if !NPY_USE_NEW_CASTINGIMPL static int get_nbo_cast_numeric_transfer_function(int aligned, npy_intp src_stride, npy_intp dst_stride, @@ -762,6 +977,7 @@ get_nbo_cast_numeric_transfer_function(int aligned, return NPY_SUCCEED; } +#endif /* * Does a datetime->datetime, timedelta->timedelta, @@ -1464,6 +1680,7 @@ get_legacy_dtype_cast_function( } +#if !NPY_USE_NEW_CASTINGIMPL static int get_nbo_cast_transfer_function(int aligned, npy_intp src_stride, npy_intp dst_stride, @@ -1548,6 +1765,7 @@ get_nbo_cast_transfer_function(int aligned, move_references, out_stransfer, out_transferdata, out_needs_api, out_needs_wrap); } +#endif NPY_NO_EXPORT int @@ -1619,6 +1837,7 @@ wrap_aligned_contig_transfer_function_with_copyswapn( } +#if !NPY_USE_NEW_CASTINGIMPL static int get_cast_transfer_function(int aligned, npy_intp src_stride, npy_intp dst_stride, @@ -1661,6 +1880,7 @@ get_cast_transfer_function(int aligned, caststransfer, castdata); } } +#endif /**************************** COPY 1 TO N CONTIGUOUS ************************/ @@ -2411,7 +2631,7 @@ get_subarray_broadcast_transfer_function(int aligned, * Handles subarray transfer. To call this, at least one of the dtype's * subarrays must be non-NULL */ -static int +NPY_NO_EXPORT int get_subarray_transfer_function(int aligned, npy_intp src_stride, npy_intp dst_stride, PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, @@ -2621,7 +2841,7 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride, * Handles fields transfer. To call this, at least one of the dtypes * must have fields. Does not take care of object<->structure conversion */ -static int +NPY_NO_EXPORT int get_fields_transfer_function(int aligned, npy_intp src_stride, npy_intp dst_stride, PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, @@ -3511,8 +3731,9 @@ PyArray_GetDTypeCopySwapFn(int aligned, /********************* MAIN DTYPE TRANSFER FUNCTION ***********************/ -NPY_NO_EXPORT int -PyArray_GetDTypeTransferFunction(int aligned, +#if !NPY_USE_NEW_CASTINGIMPL +static int +PyArray_LegacyGetDTypeTransferFunction(int aligned, npy_intp src_stride, npy_intp dst_stride, PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, int move_references, @@ -3756,6 +3977,544 @@ PyArray_GetDTypeTransferFunction(int aligned, out_stransfer, out_transferdata, out_needs_api); } +#endif + +/* + * ********************* Generalized Multistep Cast ************************ + * + * New general purpose multiple step cast function when resolve descriptors + * implies that multiple cast steps are necessary. + */ +#if NPY_USE_NEW_CASTINGIMPL + +/* + * The full context passed in is never the correct context for each + * individual cast, so we have to store each of these casts information. + * Certain fields may be undefined (currently, the `caller`). + */ +typedef struct { + PyArray_StridedUnaryOp *stransfer; + NpyAuxData *auxdata; + PyArrayMethod_Context context; + PyArray_Descr *descriptors[2]; +} _cast_info; + +typedef struct { + NpyAuxData base; + /* Information for main cast */ + _cast_info main; + /* Information for input preparation cast */ + _cast_info from; + /* Information for output finalization cast */ + _cast_info to; + char *from_buffer; + char *to_buffer; +} _multistep_castdata; + + +static NPY_INLINE void +_cast_info_free(_cast_info *cast_info) +{ + NPY_AUXDATA_FREE(cast_info->auxdata); + Py_DECREF(cast_info->descriptors[0]); + Py_DECREF(cast_info->descriptors[1]); + Py_DECREF(cast_info->context.method); +} + + +/* zero-padded data copy function */ +static void +_multistep_cast_auxdata_free(NpyAuxData *auxdata) +{ + _multistep_castdata *data = (_multistep_castdata *)auxdata; + _cast_info_free(&data->main); + if (data->from.stransfer != NULL) { + _cast_info_free(&data->from); + } + if (data->to.stransfer != NULL) { + _cast_info_free(&data->to); + } + PyMem_Free(data); +} + + +static NpyAuxData * +_multistep_cast_auxdata_clone(NpyAuxData *auxdata_old); + +static NpyAuxData * +_multistep_cast_auxdata_clone_int(NpyAuxData *auxdata_old, int move_auxdata) +{ + _multistep_castdata *castdata = (_multistep_castdata *)auxdata_old; + + /* Round up the structure size to 16-byte boundary for the buffers */ + ssize_t datasize = (sizeof(_multistep_castdata) + 15) & ~0xf; + + ssize_t from_buffer_offset = datasize; + if (castdata->from.stransfer != NULL) { + ssize_t src_itemsize = castdata->main.context.descriptors[0]->elsize; + datasize += NPY_LOWLEVEL_BUFFER_BLOCKSIZE * src_itemsize; + datasize = (datasize + 15) & ~0xf; + } + ssize_t to_buffer_offset = datasize; + if (castdata->to.stransfer != NULL) { + ssize_t dst_itemsize = castdata->main.context.descriptors[1]->elsize; + datasize += NPY_LOWLEVEL_BUFFER_BLOCKSIZE * dst_itemsize; + } + + char *char_data = PyMem_Malloc(datasize); + if (char_data == NULL) { + return NULL; + } + + _multistep_castdata *auxdata = (_multistep_castdata *)char_data; + + /* Copy the prepared old and fix it up internal pointers */ + memcpy(char_data, castdata, sizeof(*castdata)); + + auxdata->from_buffer = char_data + from_buffer_offset; + auxdata->to_buffer = char_data + to_buffer_offset; + + auxdata->main.context.descriptors = auxdata->main.descriptors; + auxdata->from.context.descriptors = auxdata->from.descriptors; + auxdata->to.context.descriptors = auxdata->to.descriptors; + + auxdata->base.free = &_multistep_cast_auxdata_free; + auxdata->base.clone = &_multistep_cast_auxdata_clone; + + /* Hold on to references and initialize buffers if necessary. */ + Py_INCREF(auxdata->main.descriptors[0]); + Py_INCREF(auxdata->main.descriptors[1]); + Py_INCREF(auxdata->main.context.method); + + if (!move_auxdata) { + /* Ensure we don't free twice on error: */ + auxdata->from.auxdata = NULL; + auxdata->to.auxdata = NULL; + + if (castdata->main.auxdata != NULL) { + auxdata->main.auxdata = NPY_AUXDATA_CLONE(castdata->main.auxdata); + if (auxdata->main.auxdata == NULL) { + NPY_AUXDATA_FREE((NpyAuxData *)auxdata); + return NULL; + } + } + } + else { + /* Clear the original, to avoid double free. */ + castdata->main.auxdata = NULL; + castdata->from.auxdata = NULL; + castdata->to.auxdata = NULL; + } + + if (castdata->from.stransfer != NULL) { + Py_INCREF(auxdata->from.descriptors[0]); + Py_INCREF(auxdata->from.descriptors[1]); + Py_INCREF(auxdata->from.context.method); + if (PyDataType_FLAGCHK(auxdata->main.descriptors[0], NPY_NEEDS_INIT)) { + memset(auxdata->from_buffer, 0, to_buffer_offset - from_buffer_offset); + } + if (!move_auxdata && castdata->from.auxdata != NULL) { + auxdata->from.auxdata = NPY_AUXDATA_CLONE(castdata->from.auxdata); + if (auxdata->from.auxdata == NULL) { + NPY_AUXDATA_FREE((NpyAuxData *)auxdata); + return NULL; + } + } + } + if (castdata->to.stransfer != NULL) { + Py_INCREF(auxdata->to.descriptors[0]); + Py_INCREF(auxdata->to.descriptors[1]); + Py_INCREF(auxdata->to.context.method); + if (PyDataType_FLAGCHK(auxdata->main.descriptors[1], NPY_NEEDS_INIT)) { + memset(auxdata->to_buffer, 0, datasize - to_buffer_offset); + } + if (!move_auxdata && castdata->to.auxdata != NULL) { + auxdata->to.auxdata = NPY_AUXDATA_CLONE(castdata->to.auxdata); + if (auxdata->to.auxdata == NULL) { + NPY_AUXDATA_FREE((NpyAuxData *)auxdata); + return NULL; + } + } + } + + return (NpyAuxData *)auxdata; +} + +static NpyAuxData * +_multistep_cast_auxdata_clone(NpyAuxData *auxdata_old) +{ + return _multistep_cast_auxdata_clone_int(auxdata_old, 0); +} + + +static int +_strided_to_strided_multistep_cast( + char *dst, npy_intp dst_stride, + char *src, npy_intp src_stride, + npy_intp N, npy_intp src_itemsize, + NpyAuxData *data) +{ + _multistep_castdata *castdata = (_multistep_castdata *)data; + + char *main_src, *main_dst; + npy_intp main_src_stride, main_dst_stride, main_src_itemsize; + + npy_intp block_size = NPY_LOWLEVEL_BUFFER_BLOCKSIZE; + while (N > 0) { + if (block_size > N) { + block_size = N; + } + + if (castdata->from.stransfer != NULL) { + npy_intp out_stride = castdata->from.descriptors[1]->elsize; + if (castdata->from.stransfer( + castdata->from_buffer, out_stride, src, src_stride, + block_size, src_itemsize, castdata->from.auxdata)) { + /* TODO: Internal buffer may require cleanup on error. */ + return -1; + } + main_src = castdata->from_buffer; + main_src_stride = out_stride; + main_src_itemsize = out_stride; + } + else { + main_src = src; + main_src_stride = src_stride; + main_src_itemsize = src_itemsize; + } + + if (castdata->to.stransfer != NULL) { + main_dst = castdata->to_buffer; + main_dst_stride = castdata->main.descriptors[1]->elsize; + } + else { + main_dst = dst; + main_dst_stride = dst_stride; + } + + if (castdata->main.stransfer( + main_dst, main_dst_stride, main_src, main_src_stride, + block_size, main_src_itemsize, castdata->main.auxdata)) { + /* TODO: Internal buffer may require cleanup on error. */ + return -1; + } + + if (castdata->to.stransfer != NULL) { + if (castdata->to.stransfer( + dst, dst_stride, main_dst, main_dst_stride, + block_size, main_dst_stride, castdata->to.auxdata)) { + return -1; + } + } + + N -= block_size; + src += block_size * src_stride; + dst += block_size * dst_stride; + } + return 0; +} + + +/* + * Initialize most of a cast-info structure, this step does not fetch the + * transferfunction and transferdata. + */ +static NPY_INLINE int +init_cast_info(_cast_info *cast_info, NPY_CASTING *casting, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, int main_step) +{ + PyObject *meth = PyArray_GetCastingImpl( + NPY_DTYPE(src_dtype), NPY_DTYPE(dst_dtype)); + if (meth == NULL) { + return -1; + } + if (meth == Py_None) { + Py_DECREF(Py_None); + PyErr_Format(PyExc_TypeError, + "Cannot cast data from %S to %S.", src_dtype, dst_dtype); + return -1; + } + /* Initialize the context and related data */ + cast_info->context.caller = NULL; + cast_info->stransfer = NULL; + cast_info->auxdata = NULL; + + cast_info->context.method = (PyArrayMethodObject *)meth; + cast_info->context.descriptors = cast_info->descriptors; + + PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(src_dtype), NPY_DTYPE(dst_dtype)}; + PyArray_Descr *in_descr[2] = {src_dtype, dst_dtype}; + + *casting = cast_info->context.method->resolve_descriptors( + cast_info->context.method, dtypes, in_descr, cast_info->descriptors); + if (NPY_UNLIKELY(*casting < 0)) { + if (!PyErr_Occurred()) { + PyErr_Format(PyExc_TypeError, + "Cannot cast data from %S to %S.", src_dtype, dst_dtype); + Py_DECREF(meth); + return -1; + } + } + + if (!main_step && NPY_UNLIKELY(src_dtype != cast_info->descriptors[0] || + dst_dtype != cast_info->descriptors[1])) { + /* + * We currently do not resolve recursively, but require a non + * main cast (within the same DType) to be done in a single step. + * This could be expanded at some point if the need arises. + */ + PyErr_Format(PyExc_RuntimeError, + "Required internal cast from %R to %R was not done in a single " + "step (a secondary cast must currently be between instances of " + "the same DType class and such a cast must currently return " + "the input descriptors unmodified).", + src_dtype, dst_dtype); + _cast_info_free(cast_info); + return -1; + } + + return 0; +} + + +/* + * Helper for PyArray_GetDTypeTransferFunction, which fetches a single + * transfer function from the each casting implementation (ArrayMethod). + * May set the transfer function to NULL when the cast can be achieved using + * a view. + * The `out_needs_api` flag must be initialized. + * + * NOTE: In theory casting errors here could be slightly misleading in case + * of a multi-step casting scenario. It should be possible to improve + * this in the future. + * + * Note about `move_references`: Move references means stealing of + * references. It is useful to clear buffers immediately. No matter the + * input all copies from a buffer must use `move_references`. Move references + * is thus used: + * * For the added initial "from" cast if it was passed in + * * Always in the main step if a "from" cast is made (it casts from a buffer) + * * Always for the "to" cast, as it always cast from a buffer to the output. + * + * Returns -1 on failure, 0 on success + */ +static int +get_transferfunction_for_descrs( + int aligned, + npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + int move_references, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata, + int *out_needs_api) +{ + *out_transferdata = NULL; /* ensure NULL on error */ + /* Storage for all cast info in case multi-step casting is necessary */ + _multistep_castdata castdata; + /* Initialize secondary `stransfer` to indicate whether they are used: */ + castdata.to.stransfer = NULL; + castdata.from.stransfer = NULL; + NPY_CASTING casting = -1; + int res = -1; + + if (init_cast_info(&castdata.main, &casting, src_dtype, dst_dtype, 1) < 0) { + return -1; + } + + /* + * Both input and output must be wrapped in case they may be unaligned + * and the method does not support unaligned data. + * NOTE: It is probable that most/all legacy loops actually do support + * unaligned output, we could move the wrapping there if we wanted + * to. It probably isn't speed relevant though and they should be + * deleted in any case. + */ + int must_wrap = (!aligned && + (castdata.main.context.method->flags & NPY_METH_SUPPORTS_UNALIGNED) == 0); + + /* + * Wrap the input with an additional cast if necessary. + */ + if (NPY_UNLIKELY(src_dtype != castdata.main.descriptors[0] || must_wrap)) { + NPY_CASTING from_casting = -1; + /* Cast function may not support the input, wrap if necessary */ + if (init_cast_info( + &castdata.from, &from_casting, + src_dtype, castdata.main.descriptors[0], 0) < 0) { + goto fail; + } + casting = PyArray_MinCastSafety(casting, from_casting); + + /* Prepare the actual cast (if necessary): */ + if (from_casting & _NPY_CAST_IS_VIEW && !must_wrap) { + /* This step is not necessary and can be skipped. */ + _cast_info_free(&castdata.from); + } + else { + /* Fetch the cast function and set up */ + PyArrayMethod_Context *context = &castdata.from.context; + npy_intp strides[2] = {src_stride, castdata.main.descriptors[0]->elsize}; + NPY_ARRAYMETHOD_FLAGS flags; + if (context->method->get_strided_loop( + context, aligned, move_references, strides, + &castdata.from.stransfer, &castdata.from.auxdata, &flags) < 0) { + assert(castdata.from.auxdata != NULL); + _cast_info_free(&castdata.from); + castdata.from.stransfer = NULL; /* ensure we cleanup once */ + goto fail; + } + assert(castdata.from.stransfer != NULL); + + *out_needs_api |= (flags & NPY_METH_REQUIRES_PYAPI) != 0; + /* The main cast now uses a buffered input: */ + src_stride = strides[1]; + move_references = 1; /* main cast has to clear the buffer */ + } + } + /* + * Wrap the output with an additional cast if necessary. + */ + if (NPY_UNLIKELY(dst_dtype != castdata.main.descriptors[1] || must_wrap)) { + NPY_CASTING to_casting = -1; + /* Cast function may not support the output, wrap if necessary */ + if (init_cast_info( + &castdata.to, &to_casting, + castdata.main.descriptors[1], dst_dtype, 0) < 0) { + goto fail; + } + casting = PyArray_MinCastSafety(casting, to_casting); + + /* Prepare the actual cast (if necessary): */ + if (to_casting & _NPY_CAST_IS_VIEW && !must_wrap) { + /* This step is not necessary and can be skipped. */ + _cast_info_free(&castdata.to); + } + else { + /* Fetch the cast function and set up */ + PyArrayMethod_Context *context = &castdata.to.context; + npy_intp strides[2] = {castdata.main.descriptors[1]->elsize, dst_stride}; + NPY_ARRAYMETHOD_FLAGS flags; + if (context->method->get_strided_loop( + context, aligned, 1 /* clear buffer */, strides, + &castdata.to.stransfer, &castdata.to.auxdata, &flags) < 0) { + assert(castdata.to.auxdata != NULL); + _cast_info_free(&castdata.to); + castdata.to.stransfer = NULL; /* ensure we cleanup once */ + goto fail; + } + assert(castdata.to.stransfer != NULL); + + *out_needs_api |= (flags & NPY_METH_REQUIRES_PYAPI) != 0; + /* The main cast now uses a buffered input: */ + dst_stride = strides[0]; + if (castdata.from.stransfer != NULL) { + /* Both input and output are wrapped, now always aligned */ + aligned = 1; + } + } + } + + /* Fetch the main cast function (with updated values) */ + PyArrayMethod_Context *context = &castdata.main.context; + npy_intp strides[2] = {src_stride, dst_stride}; + NPY_ARRAYMETHOD_FLAGS flags; + if (context->method->get_strided_loop( + context, aligned, move_references, strides, + &castdata.main.stransfer, &castdata.main.auxdata, &flags) < 0) { + goto fail; + } + + *out_needs_api |= (flags & NPY_METH_REQUIRES_PYAPI) != 0; + + if (castdata.from.stransfer == NULL && castdata.to.stransfer == NULL) { + /* The main step is sufficient to do the cast */ + *out_stransfer = castdata.main.stransfer; + *out_transferdata = castdata.main.auxdata; + castdata.main.auxdata = NULL; /* do not free the auxdata */ + _cast_info_free(&castdata.main); + return 0; + } + + /* Clone the castdata as it is currently not persistently stored. */ + *out_transferdata = _multistep_cast_auxdata_clone_int( + (NpyAuxData *)&castdata, 1); + if (*out_transferdata == NULL) { + PyErr_NoMemory(); + goto fail; + } + *out_stransfer = &_strided_to_strided_multistep_cast; + + res = 0; /* success */ + + fail: + _cast_info_free(&castdata.main); + if (castdata.from.stransfer != NULL) { + _cast_info_free(&castdata.from); + } + if (castdata.to.stransfer != NULL) { + _cast_info_free(&castdata.to); + } + return res; +} +#endif + + +NPY_NO_EXPORT int +PyArray_GetDTypeTransferFunction(int aligned, + npy_intp src_stride, npy_intp dst_stride, + PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, + int move_references, + PyArray_StridedUnaryOp **out_stransfer, + NpyAuxData **out_transferdata, + int *out_needs_api) +{ +#if NPY_USE_NEW_CASTINGIMPL + /* + * If one of the dtypes is NULL, we give back either a src decref + * function or a dst setzero function + * + * TODO: Eventually, we may wish to support user dtype with references + * (including and beyond bare `PyObject *` this may require extending + * the ArrayMethod API and those paths should likely be split out + * from this function.) + */ + if (dst_dtype == NULL) { + if (move_references) { + return get_decsrcref_transfer_function(aligned, + src_dtype->elsize, + src_dtype, + out_stransfer, out_transferdata, + out_needs_api); + } + else { + *out_stransfer = &_dec_src_ref_nop; + *out_transferdata = NULL; + return NPY_SUCCEED; + } + } + else if (src_dtype == NULL) { + return get_setdstzero_transfer_function(aligned, + dst_dtype->elsize, + dst_dtype, + out_stransfer, out_transferdata, + out_needs_api); + } + + if (get_transferfunction_for_descrs(aligned, + src_stride, dst_stride, + src_dtype, dst_dtype, move_references, + out_stransfer, out_transferdata, out_needs_api) < 0) { + return NPY_FAIL; + } + + return NPY_SUCCEED; + +#else + return PyArray_LegacyGetDTypeTransferFunction( + aligned, src_stride, dst_stride, src_dtype, dst_dtype, + move_references, out_stransfer, out_transferdata, out_needs_api); +#endif +} /* @@ -3763,7 +4522,9 @@ PyArray_GetDTypeTransferFunction(int aligned, * support. * It supports only wrapping the copyswapn functions and the legacy * cast functions registered with `PyArray_RegisterCastFunc`. - * This function takes the easy way out: It does not wrap + * This function takes the easy way out: It does not wrap, so if wrapping + * might be necessary due to unaligned data, the user has to ensure that + * this is done and aligned is passed in as True (this is asserted only). */ NPY_NO_EXPORT int PyArray_GetLegacyDTypeTransferFunction(int aligned, @@ -3772,7 +4533,7 @@ PyArray_GetLegacyDTypeTransferFunction(int aligned, int move_references, PyArray_StridedUnaryOp **out_stransfer, NpyAuxData **out_transferdata, - int *out_needs_api) + int *out_needs_api, int wrap_if_unaligned) { /* Note: We ignore `needs_wrap`; needs-wrap is handled by another cast */ int needs_wrap = 0; @@ -3790,7 +4551,8 @@ PyArray_GetLegacyDTypeTransferFunction(int aligned, out_stransfer, out_transferdata); } - if (get_legacy_dtype_cast_function(aligned, + if (get_legacy_dtype_cast_function( + aligned, src_stride, dst_stride, src_dtype, dst_dtype, move_references, @@ -3798,9 +4560,39 @@ PyArray_GetLegacyDTypeTransferFunction(int aligned, out_transferdata, out_needs_api, &needs_wrap) != NPY_SUCCEED) { - return NPY_FAIL; + return -1; } - return NPY_SUCCEED; + if (!needs_wrap) { + return 0; + } + if (NPY_UNLIKELY(!wrap_if_unaligned)) { + /* + * Legacy casts do not support unaligned which requires wrapping. + * However, normally we ensure that wrapping happens before calling + * this function, so this path should never happen. + */ + PyErr_Format(PyExc_RuntimeError, + "Internal NumPy error, casting %S to %S required wrapping, " + "probably it incorrectly flagged support for unaligned data. " + "(aligned passed to discovery is %d)", + src_dtype, dst_dtype, aligned); + return -1; + } + + /* + * If we are here, use the legacy code to wrap the above cast (which + * does not support unaligned data) into copyswapn. + */ + NpyAuxData *castdata = *out_transferdata; + *out_transferdata = NULL; + if (wrap_aligned_contig_transfer_function_with_copyswapn( + aligned, src_stride, dst_stride, src_dtype, dst_dtype, + out_stransfer, out_transferdata, out_needs_api, + *out_stransfer, castdata) == NPY_FAIL) { + NPY_AUXDATA_FREE(castdata); + return -1; + } + return 0; } diff --git a/numpy/core/src/multiarray/dtype_transfer.h b/numpy/core/src/multiarray/dtype_transfer.h new file mode 100644 index 000000000000..c61119bfa294 --- /dev/null +++ b/numpy/core/src/multiarray/dtype_transfer.h @@ -0,0 +1,27 @@ +#ifndef _NPY_DTYPE_TRANSFER_H +#define _NPY_DTYPE_TRANSFER_H + +#include "lowlevel_strided_loops.h" +#include "array_method.h" + + +NPY_NO_EXPORT int +any_to_object_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + npy_intp *strides, + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + +NPY_NO_EXPORT int +object_to_any_get_loop( + PyArrayMethod_Context *context, + int NPY_UNUSED(aligned), int move_references, + npy_intp *NPY_UNUSED(strides), + PyArray_StridedUnaryOp **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + + +#endif /* _NPY_DTYPE_TRANSFER_H */ diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index b8ebee6ed96b..04682d1ed7e3 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -19,6 +19,8 @@ #include "lowlevel_strided_loops.h" #include "array_assign.h" +#include "array_method.h" +#include "usertypes.h" /* diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c index 3eaf99196875..a1ed46f13a60 100644 --- a/numpy/core/src/multiarray/usertypes.c +++ b/numpy/core/src/multiarray/usertypes.c @@ -538,7 +538,7 @@ PyArray_AddLegacyWrapping_CastingImpl( if (from == to) { spec.flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_SUPPORTS_UNALIGNED; PyType_Slot slots[] = { - {NPY_METH_get_loop, NULL}, + {NPY_METH_get_loop, &legacy_cast_get_strided_loop}, {NPY_METH_resolve_descriptors, &legacy_same_dtype_resolve_descriptors}, {0, NULL}}; spec.slots = slots; @@ -547,7 +547,7 @@ PyArray_AddLegacyWrapping_CastingImpl( else { spec.flags = NPY_METH_REQUIRES_PYAPI; PyType_Slot slots[] = { - {NPY_METH_get_loop, NULL}, + {NPY_METH_get_loop, &legacy_cast_get_strided_loop}, {NPY_METH_resolve_descriptors, &simple_cast_resolve_descriptors}, {0, NULL}}; spec.slots = slots; diff --git a/numpy/core/tests/test_arraymethod.py b/numpy/core/tests/test_arraymethod.py new file mode 100644 index 000000000000..b1bc79b80a63 --- /dev/null +++ b/numpy/core/tests/test_arraymethod.py @@ -0,0 +1,58 @@ +""" +This file tests the generic aspects of ArrayMethod. At the time of writing +this is private API, but when added, public API may be added here. +""" + +import pytest + +import numpy as np +from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl + + +class TestResolveDescriptors: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize("args", [ + (True,), # Not a tuple. + ((None,)), # Too few elements + ((None, None, None),), # Too many + ((None, None),), # Input dtype is None, which is invalid. + ((np.dtype("d"), True),), # Output dtype is not a dtype + ((np.dtype("f"), None),), # Input dtype does not match method + ]) + def test_invalid_arguments(self, args): + with pytest.raises(TypeError): + self.method._resolve_descriptors(*args) + + +class TestSimpleStridedCall: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize(["args", "error"], [ + ((True,), TypeError), # Not a tuple + (((None,),), TypeError), # Too few elements + ((None, None), TypeError), # Inputs are not arrays. + (((None, None, None),), TypeError), # Too many + (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes + (((np.ones(3, dtype=">d"), np.ones(3, dtype="= dtype2.itemsize: + length = self.size // dtype1.itemsize + else: + length = self.size // dtype2.itemsize + + # Assume that the base array is well enough aligned for all inputs. + arr1 = np.empty(length, dtype=dtype1) + assert arr1.flags.c_contiguous + assert arr1.flags.aligned + + values = [random.randrange(-128, 128) for _ in range(length)] + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + arr1[i] = value + + if dtype2 is None: + if dtype1.char == "?": + values = [bool(v) for v in values] + return arr1, values + + if dtype2.char == "?": + values = [bool(v) for v in values] + + arr2 = np.empty(length, dtype=dtype2) + assert arr2.flags.c_contiguous + assert arr2.flags.aligned + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + arr2[i] = value + + return arr1, arr2, values + + def get_data_variation(self, arr1, arr2, aligned=True, contig=True): + """ + Returns a copy of arr1 that may be non-contiguous or unaligned, and a + matching array for arr2 (although not a copy). + """ + if contig: + stride1 = arr1.dtype.itemsize + stride2 = arr2.dtype.itemsize + elif aligned: + stride1 = 2 * arr1.dtype.itemsize + stride2 = 2 * arr2.dtype.itemsize + else: + stride1 = arr1.dtype.itemsize + 1 + stride2 = arr2.dtype.itemsize + 1 + + max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1 + max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1 + from_bytes = np.zeros(max_size1, dtype=np.uint8) + to_bytes = np.zeros(max_size2, dtype=np.uint8) + + # Sanity check that the above is large enough: + assert stride1 * len(arr1) <= from_bytes.nbytes + assert stride2 * len(arr2) <= to_bytes.nbytes + + if aligned: + new1 = as_strided(from_bytes[:-1].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[:-1].view(arr2.dtype), + arr2.shape, (stride2,)) + else: + new1 = as_strided(from_bytes[1:].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[1:].view(arr2.dtype), + arr2.shape, (stride2,)) + + new1[...] = arr1 + + if not contig: + # Ensure we did not overwrite bytes that should not be written: + offset = arr1.dtype.itemsize if aligned else 0 + buf = from_bytes[offset::stride1].tobytes() + assert buf.count(b"\0") == len(buf) + + if contig: + assert new1.flags.c_contiguous + assert new2.flags.c_contiguous + else: + assert not new1.flags.c_contiguous + assert not new2.flags.c_contiguous + + if aligned: + assert new1.flags.aligned + assert new2.flags.aligned + else: + assert not new1.flags.aligned or new1.dtype.alignment == 1 + assert not new2.flags.aligned or new2.dtype.alignment == 1 + + return new1, new2 + @pytest.mark.parametrize("from_Dt", simple_dtypes) def test_simple_cancast(self, from_Dt): for to_Dt in simple_dtypes: @@ -193,6 +291,183 @@ def test_simple_cancast(self, from_Dt): assert(to_dt is to_res) + @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") + @pytest.mark.parametrize("from_dt", simple_dtype_instances()) + def test_simple_direct_casts(self, from_dt): + """ + This test checks numeric direct casts for dtypes supported also by the + struct module (plus complex). It tries to be test a wide range of + inputs, but skips over possibly undefined behaviour (e.g. int rollover). + Longdouble and CLongdouble are tested, but only using double precision. + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + for to_dt in simple_dtype_instances(): + to_dt = to_dt.values[0] + cast = get_castingimpl(type(from_dt), type(to_dt)) + + casting, (from_res, to_res) = cast._resolve_descriptors( + (from_dt, to_dt)) + + if from_res is not from_dt or to_res is not to_dt: + # Do not test this case, it is handled in multiple steps, + # each of which should is tested individually. + return + + safe = (casting & ~Casting.cast_is_view) <= Casting.safe + del from_res, to_res, casting + + arr1, arr2, values = self.get_data(from_dt, to_dt) + + cast._simple_strided_call((arr1, arr2)) + + # Check via python list + assert arr2.tolist() == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + # Check if alignment makes a difference, but only if supported + # and only if the alignment can be wrong + if ((from_dt.alignment == 1 and to_dt.alignment == 1) or + not cast._supports_unaligned): + return + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + del arr1_o, arr2_o, cast + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_numeric_to_times(self, from_Dt): + # We currently only implement contiguous loops, so only need to + # test those. + from_dt = from_Dt() + + time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"), + np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")] + for time_dt in time_dtypes: + cast = get_castingimpl(type(from_dt), type(time_dt)) + + casting, (from_res, to_res) = cast._resolve_descriptors( + (from_dt, time_dt)) + + assert from_res is from_dt + assert to_res is time_dt + del from_res, to_res + + assert(casting & CAST_TABLE[from_Dt][type(time_dt)]) + + int64_dt = np.dtype(np.int64) + arr1, arr2, values = self.get_data(from_dt, int64_dt) + arr2 = arr2.view(time_dt) + arr2[...] = np.datetime64("NaT") + + if time_dt == np.dtype("M8"): + # This is a bit of a strange path, and could probably be removed + arr1[-1] = 0 # ensure at least one value is not NaT + + # The cast currently succeeds, but the values are invalid: + cast._simple_strided_call((arr1, arr2)) + with pytest.raises(ValueError): + str(arr2[-1]) # e.g. conversion to string fails + return + + cast._simple_strided_call((arr1, arr2)) + + assert [int(v) for v in arr2.tolist()] == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + @pytest.mark.parametrize( + ["from_dt", "to_dt", "expected_casting", "nom", "denom"], + [("M8[ns]", None, + Casting.no | Casting.cast_is_view, 1, 1), + (str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1), + ("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1), + ("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast + ("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1), + ("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6), + ("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1), + ("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7), + ("M8[4D]", "M8[1M]", Casting.same_kind, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, -1, 1314, -1315, 564442610]), + ("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1), + (str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1), + ("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1), + ("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast + ("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1), + ("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6), + ("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1), + ("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7), + ("m8[4D]", "m8[1M]", Casting.unsafe, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, 0, 1314, -1315, 564442610])]) + def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom): + from_dt = np.dtype(from_dt) + if to_dt is not None: + to_dt = np.dtype(to_dt) + + # Test a few values for casting (results generated with NumPy 1.19) + values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32]) + values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder)) + assert values.dtype.byteorder == from_dt.byteorder + assert np.isnat(values.view(from_dt)[0]) + + DType = type(from_dt) + cast = get_castingimpl(DType, DType) + casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt)) + assert from_res is from_dt + assert to_res is to_dt or to_dt is None + assert casting == expected_casting + + if nom is not None: + expected_out = (values * nom // denom).view(to_res) + expected_out[0] = "NaT" + else: + expected_out = np.empty_like(values) + expected_out[...] = denom + expected_out = expected_out.view(to_dt) + + orig_arr = values.view(from_dt) + orig_out = np.empty_like(expected_out) + + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): + # Casting from non-generic to generic units is an error and should + # probably be reported as an invalid cast earlier. + with pytest.raises(ValueError): + cast._simple_strided_call((orig_arr, orig_out)) + return + + for aligned in [True, True]: + for contig in [True, True]: + arr, out = self.get_data_variation( + orig_arr, orig_out, aligned, contig) + out[...] = 0 + cast._simple_strided_call((arr, out)) + assert_array_equal(out.view("int64"), expected_out.view("int64")) + def string_with_modified_length(self, dtype, change_length): fact = 1 if dtype.char == "S" else 4 length = dtype.itemsize // fact + change_length @@ -239,6 +514,67 @@ def test_string_cancast(self, other_DT, string_char): assert safety == Casting.unsafe assert other_dt is res_dt # returns the singleton for simple dtypes + @pytest.mark.parametrize("string_char", ["S", "U"]) + @pytest.mark.parametrize("other_dt", simple_dtype_instances()) + def test_simple_string_casts_roundtrip(self, other_dt, string_char): + """ + Tests casts from and to string by checking the roundtripping property. + + The test also covers some string to string casts (but not all). + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + string_DT = type(np.dtype(string_char)) + + cast = get_castingimpl(type(other_dt), string_DT) + cast_back = get_castingimpl(string_DT, type(other_dt)) + _, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None)) + + if res_other_dt is not other_dt: + # do not support non-native byteorder, skip test in that case + assert other_dt.byteorder != res_other_dt.byteorder + return + + orig_arr, values = self.get_data(other_dt, None) + str_arr = np.zeros(len(orig_arr), dtype=string_dt) + string_dt_short = self.string_with_modified_length(string_dt, -1) + str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short) + string_dt_long = self.string_with_modified_length(string_dt, 1) + str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long) + + assert not cast._supports_unaligned # if support is added, should test + assert not cast_back._supports_unaligned + + for contig in [True, False]: + other_arr, str_arr = self.get_data_variation( + orig_arr, str_arr, True, contig) + _, str_arr_short = self.get_data_variation( + orig_arr, str_arr_short.copy(), True, contig) + _, str_arr_long = self.get_data_variation( + orig_arr, str_arr_long, True, contig) + + cast._simple_strided_call((other_arr, str_arr)) + + cast._simple_strided_call((other_arr, str_arr_short)) + assert_array_equal(str_arr.astype(string_dt_short), str_arr_short) + + cast._simple_strided_call((other_arr, str_arr_long)) + assert_array_equal(str_arr, str_arr_long) + + if other_dt.kind == "b": + # Booleans do not roundtrip + continue + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr, other_arr)) + assert_array_equal(orig_arr, other_arr) + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr_long, other_arr)) + assert_array_equal(orig_arr, other_arr) + @pytest.mark.parametrize("other_dt", ["S8", "U8"]) @pytest.mark.parametrize("string_char", ["S", "U"]) def test_string_to_string_cancast(self, other_dt, string_char): diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index e10c7ad92db3..5e6472ae5536 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1362,6 +1362,74 @@ def test_iter_copy(): j = i.copy() assert_equal([x[()] for x in j], a.ravel(order='F')) + +@pytest.mark.parametrize("dtype", np.typecodes["All"]) +@pytest.mark.parametrize("loop_dtype", np.typecodes["All"]) +def test_iter_copy_casts(dtype, loop_dtype): + # Ensure the dtype is never flexible: + if loop_dtype.lower() == "m": + loop_dtype = loop_dtype + "[ms]" + elif np.dtype(loop_dtype).itemsize == 0: + loop_dtype = loop_dtype + "50" + + # Make things a bit more interesting by requiring a byte-swap as well: + arr = np.ones(1000, dtype=np.dtype(dtype).newbyteorder()) + try: + expected = arr.astype(loop_dtype) + except Exception: + # Some casts are not possible, do not worry about them + return + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[loop_dtype], casting="unsafe") + + if np.issubdtype(np.dtype(loop_dtype), np.number): + # Casting to strings may be strange, but for simple dtypes do not rely + # on the cast being correct: + assert_array_equal(expected, np.ones(1000, dtype=loop_dtype)) + + it_copy = it.copy() + res = next(it) + del it + res_copy = next(it_copy) + del it_copy + + assert_array_equal(res, expected) + assert_array_equal(res_copy, expected) + + +def test_iter_copy_casts_structured(): + # Test a complicated structured dtype for casting, as it requires + # both multiple steps and a more complex casting setup. + # Includes a structured -> unstructured (any to object), and many other + # casts, which cause this to require all steps in the casting machinery + # one level down as well as the iterator copy (which uses NpyAuxData clone) + in_dtype = np.dtype([("a", np.dtype("i,")), + ("b", np.dtype(">i,d,S17,>d,(3)f,O,i1"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))]) + arr = np.ones(1000, dtype=in_dtype) + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + expected = arr["a"].astype(out_dtype["a"]) + assert_array_equal(res1["a"], expected) + assert_array_equal(res2["a"], expected) + + for field in in_dtype["b"].names: + # Note that the .base avoids the subarray field + expected = arr["b"][field].astype(out_dtype["b"][field].base) + assert_array_equal(res1["b"][field], expected) + assert_array_equal(res2["b"][field], expected) + + def test_iter_allocate_output_simple(): # Check that the iterator will properly allocate outputs From aa6f5b76e60e7ba71b278e9c0125c35e5f8a92e7 Mon Sep 17 00:00:00 2001 From: iamsoto Date: Fri, 8 Jan 2021 18:04:11 -0800 Subject: [PATCH 0317/1270] ENH: keyboard interrupt for large array creation --- numpy/core/src/multiarray/array_coercion.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 1eac401bc2f3..0fd958282f89 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -1098,6 +1098,11 @@ PyArray_DiscoverDTypeAndShape_Recursive( return curr_dims + 1; } + /* Allow keyboard interrupts. See gh issue 18117. */ + if (PyErr_CheckSignals() < 0) { + return -1; + } + /* Recursive call for each sequence item */ for (Py_ssize_t i = 0; i < size; i++) { max_dims = PyArray_DiscoverDTypeAndShape_Recursive( From ad71a168dd8b18954da3a2fb6591324487af8765 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 11 Jan 2021 07:21:05 +0000 Subject: [PATCH 0318/1270] MAINT: Bump sphinx from 3.4.1 to 3.4.3 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 3.4.1 to 3.4.3. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/3.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v3.4.1...v3.4.3) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 82da7875b7d3..64ab9a41fe7c 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==3.4.1 +sphinx==3.4.3 numpydoc==1.1.0 ipython scipy From d8f4974b81942018edbf41b30f774d4ebd6fae2b Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 11 Jan 2021 07:22:46 +0000 Subject: [PATCH 0319/1270] MAINT: Bump hypothesis from 5.46.0 to 6.0.0 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 5.46.0 to 6.0.0. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-5.46.0...hypothesis-python-6.0.0) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 53dd25f76d74..b6aaa89951f2 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==5.46.0 +hypothesis==6.0.0 pytest==6.2.1 pytz==2020.5 pytest-cov==2.10.1 From 70a65fbee91d12cb4469e97bf5662821bb96f9e0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 9 Jan 2021 21:40:56 +0100 Subject: [PATCH 0320/1270] BLD: add found Cython version to check in cythonize.py Ref gh-18138, which has a confusing situation where the correct version seems installed but this check still raises. --- tools/cythonize.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/cythonize.py b/tools/cythonize.py index 6cebf0f729ea..911b812438a0 100755 --- a/tools/cythonize.py +++ b/tools/cythonize.py @@ -56,6 +56,7 @@ def process_pyx(fromfile, tofile): try: # try the cython in the installed python first (somewhat related to scipy/scipy#2397) + import Cython from Cython.Compiler.Version import version as cython_version except ImportError: # The `cython` command need not point to the version installed in the @@ -73,7 +74,9 @@ def process_pyx(fromfile, tofile): required_version = LooseVersion('0.29.21') if LooseVersion(cython_version) < required_version: - raise RuntimeError(f'Building {VENDOR} requires Cython >= {required_version}') + cython_path = Cython.__file__ + raise RuntimeError(f'Building {VENDOR} requires Cython >= {required_version}' + f', found {cython_version} at {cython_path}') subprocess.check_call( [sys.executable, '-m', 'cython'] + flags + ["-o", tofile, fromfile]) From 995d5a6865c2dc1d4dd7ab5d5b2f5f10bbd1a598 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 9 Jan 2021 20:01:22 +0100 Subject: [PATCH 0321/1270] MAINT: address review comments on sponsorship NEP [ci skip] --- doc/neps/nep-0046-sponsorship-guidelines.rst | 65 ++++++++++---------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst index 88f40536ba85..cc273ce2c654 100644 --- a/doc/neps/nep-0046-sponsorship-guidelines.rst +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -21,25 +21,27 @@ financial and in-kind support. Motivation and Scope -------------------- -In the past few years the NumPy project has gotten significant financial +In the past few years, the NumPy project has gotten significant financial support, as well as dedicated work time for maintainers to work on NumPy. There -is a need to acknowledge that support - funders and organizations expect or require -it, it's helpful when looking for new funding, and it's the right thing to do. -Furthermore, having a clear policy for how NumPy acknowledges support is -helpful when searching for new support. - -This NEP is aimed at both the NumPy community - who can use it when looking for -support and acknowledging existing support - and at past, current and -prospective sponsors, who often want or need to know what they get in return -for their support other than a healthier NumPy. +is a need to acknowledge that support - it's the right thing to do, it's +helpful when looking for new funding, and funders and organizations expect or +require it, Furthermore, having a clear policy for how NumPy acknowledges +support is helpful when searching for new support. Finally, this policy may +help set reasonable expectations for potential funders. + +This NEP is aimed at both the NumPy community - who can use it as a guideline +when looking for support on behalf of the project and when acknowledging +existing support - and at past, current and prospective sponsors, who often +want or need to know what they get in return for their support other than a +healthier NumPy. The scope of this proposal includes: - direct financial support, employers providing paid time for NumPy maintainers and regular contributors, and in-kind support such as free hardware resources or - services. -- where and how NumPy acknowledges support (e.g., logo placement on the website). -- the amount and duration of support which leads to acknowledgement. + services, +- where and how NumPy acknowledges support (e.g., logo placement on the website), +- the amount and duration of support which leads to acknowledgement, and - who in the NumPy project is responsible for sponsorship related topics, and how to contact them. @@ -47,22 +49,21 @@ The scope of this proposal includes: How NumPy will acknowledge support ---------------------------------- -There will be two different ways to acknowledge financial and in-kind support, +There will be two different ways to acknowledge financial and in-kind support: one to recognize significant active support, and another one to recognize support received in the past and smaller amounts of support. Entities who fall under "significant active supporter" we'll call Sponsor. The minimum level of support given to NumPy to be considered a Sponsor are: -- $30,000/yr for unrestricted financial contributions -- $60,000/yr for financial contributions for a particular purpose -- $100,000/yr for in-kind contributions +- $30,000/yr for unrestricted financial contributions (e.g., donations) +- $60,000/yr for financial contributions for a particular purpose (e.g., grants) +- $100,000/yr for in-kind contributions (e.g., time for employees to contribute) We define support being active as: -- for a one-off donation: it was received within the previous 12 months -- for a grant or other support with a defined start and end date: the duration - of a grant period. +- for a one-off donation: it was received within the previous 12 months, +- for recurring or financial or in-kind contributions: they should be ongoing. After support moves from "active" to "inactive" status, the acknowledgement will be left in its place for at least another 6 months. If appropriate, the @@ -75,9 +76,9 @@ The rationale for the above funding levels is that unrestricted financial contributions are typically the most valuable for the project, and the hardest to obtain. The opposite is true for in-kind contributions. The dollar value of the levels also reflect that NumPy's needs have grown to the point where we -need at least a few paid developers in order to effectively support our user -base and continue to move the project forward. Financial support at or above -these levels is needed to be able to make a significant difference. +need multiple paid developers in order to effectively support our user base and +continue to move the project forward. Financial support at or above these +levels is needed to be able to make a significant difference. Sponsors will get acknowledged through: @@ -97,7 +98,7 @@ Therefore we propose that level here, roughly in line with the sponsorship levels: - 6 person-months/yr of paid work time for one or more NumPy maintainers or - regular contributors + regular contributors to any NumPy team or activity Institutional Partners get the same benefits as Sponsors, in addition to what is specified in the NumPy governance document. @@ -106,7 +107,7 @@ Finally, a new page on the website (https://numpy.org/funding/, linked from the About page) will be added to acknowledge all current and previous sponsors, partners, and any other entities and individuals who provided $5,000 or more of financial or in-kind support. This page will include relevant details of -support (dates, amounts, names and purpose); no logos will be used on this +support (dates, amounts, names, and purpose); no logos will be used on this page. The rationale for the $5,000 minimum level is to keep the amount of work maintaining the page reasonable; the level is the equivalent of, e.g., one GSoC or a person-week's worth of engineering time in a Western country, which seems @@ -128,8 +129,8 @@ The following content changes need to be made: NumPy project about sponsorship related matters (see next section). -A NumPy Funding Team -~~~~~~~~~~~~~~~~~~~~ +NumPy Funding Team +~~~~~~~~~~~~~~~~~~ At the moment NumPy has only one official body, the Steering Council, and no good way to get in touch with either that body or any person or group @@ -141,7 +142,7 @@ easy for a potential sponsor to know if they're reaching out to the right person in private. https://numpy.org/about/ already says that NumPy has a "funding and grants" -team, however that is not the case. We propose to organize this team, name team +team. However that is not the case. We propose to organize this team, name team members on it, and add the names of those team members plus a dedicated email address for the team to the About page. @@ -163,8 +164,8 @@ website team. Decisions on which entities to recognize as Institutional Partner have been made by the NumPy Steering Council. -NumPy governance, decision-making and financial oversight -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +NumPy governance, decision-making, and financial oversight +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *This section is meant as context for the reader, to help put the rest of this NEP in perspective, and perhaps answer questions the reader has when reading @@ -206,7 +207,7 @@ in the future, we should reconsider listing all individual donations. Related Work ------------ -Here we provide a few examples of how other project handle sponsorship +Here we provide a few examples of how other projects handle sponsorship guidelines and acknowledgements. *Scikit-learn* has a narrow banner with logos at the bottom of @@ -238,7 +239,7 @@ Mailing list thread(s) discussing this NEP: TODO References and Footnotes ------------------------ -- `Inside NumPy: preparing for the next decade `__ presentation at SciPy'19 discussing impact of the first NumPy grant. +- `Inside NumPy: preparing for the next decade `__ presentation at SciPy'19 discussing the impact of the first NumPy grant. - `Issue `__ and `email `__ where IBM offered a $5,000 bounty for VSX SIMD support From c9e8bc8ac5d804d0acba1f6e81a4bb9c32984d84 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 11 Jan 2021 12:51:45 -0600 Subject: [PATCH 0322/1270] BUG: Promotion between strings and objects was assymetric After an unrelated fix, the logic for string and object promotion was incorrect briefly, this fixes it to be correct (symmetric). Before, string and unicode would return that `string.__common_dtype(object)` is in fact `string`, which is of course incorrect. (since `object.__common_dtype__(other)` always returns `object` this depended on the order, and the NumPy tests apparently did only test the opposite direction (or nothing). --- numpy/core/src/multiarray/dtypemeta.c | 5 +- numpy/core/tests/test_numeric.py | 68 ++++++++++++++------------- 2 files changed, 38 insertions(+), 35 deletions(-) diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index b1cd074a08d8..2931977c204e 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -400,9 +400,10 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) static PyArray_DTypeMeta * string_unicode_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) { - assert(cls->type_num < NPY_NTYPES); + assert(cls->type_num < NPY_NTYPES && cls != other); if (!other->legacy || (!PyTypeNum_ISNUMBER(other->type_num) && - (cls->type_num == NPY_STRING && other->type_num == NPY_UNICODE))) { + /* Not numeric so defer unless cls is unicode and other is string */ + !(cls->type_num == NPY_UNICODE && other->type_num == NPY_STRING))) { Py_INCREF(Py_NotImplemented); return (PyArray_DTypeMeta *)Py_NotImplemented; } diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 866a96e31c1c..280874d21695 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -888,39 +888,41 @@ def test_can_cast_and_promote_usertypes(self): assert np.can_cast(rational_dt, double_dt) assert np.promote_types(double_dt, rational_dt) is double_dt - def test_promote_types_strings(self): - assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) - assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) - assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) - assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) - assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) - assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) - assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) - assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) - assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) - assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) - assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) - assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) - assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) - assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) - assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) - assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) - assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30')) + @pytest.mark.parametrize("swap", ["", "swap"]) + @pytest.mark.parametrize("string_dtype", ["U", "S"]) + def test_promote_types_strings(self, swap, string_dtype): + if swap == "swap": + promote_types = lambda a, b: np.promote_types(b, a) + else: + promote_types = np.promote_types + + S = string_dtype + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S+'5')) + assert_equal(promote_types('b', S), np.dtype(S+'4')) + assert_equal(promote_types('u1', S), np.dtype(S+'3')) + assert_equal(promote_types('u2', S), np.dtype(S+'5')) + assert_equal(promote_types('u4', S), np.dtype(S+'10')) + assert_equal(promote_types('u8', S), np.dtype(S+'20')) + assert_equal(promote_types('i1', S), np.dtype(S+'4')) + assert_equal(promote_types('i2', S), np.dtype(S+'6')) + assert_equal(promote_types('i4', S), np.dtype(S+'11')) + assert_equal(promote_types('i8', S), np.dtype(S+'21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) + assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) + assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) + assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) + assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) + # Promote with object: + assert_equal(promote_types('O', S+'30'), np.dtype('O')) @pytest.mark.parametrize(["dtype1", "dtype2"], [[np.dtype("V6"), np.dtype("V10")], From cce2142a648f0c19eb9851c0cb51521d617d1acb Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 11 Jan 2021 12:02:37 -0600 Subject: [PATCH 0323/1270] BUG: Ensure too many advanced indices raises an exception The number of indices is limited to 2*MAXDIMS currently to allow mixing integer indices, e.g. with new indices `np.newaxis` (one removes output dimensions, the other adds new ones). This means that more than MAXDIMS advanced indices can be passed on to the advanced indexing machinery (`MapIterNew`), which did not check for this possibility. Closes gh-18145 --- numpy/core/src/multiarray/mapping.c | 16 +++++++++++++++- numpy/core/tests/test_indexing.py | 17 +++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index d64962f87150..0ebb337b0898 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -2328,7 +2328,7 @@ PyArray_MapIterNext(PyArrayMapIterObject *mit) * @param Number of indices * @param The array that is being iterated * - * @return 0 on success -1 on failure + * @return 0 on success -1 on failure (broadcasting or too many fancy indices) */ static int mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices, @@ -2369,6 +2369,19 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices, } } + /* Before contunuing, ensure that there are not too fancy indices */ + if (indices[i].type & HAS_FANCY) { + assert(indices[i].type == HAS_FANCY || + indices[i].type == HAS_0D_BOOL); + if (NPY_UNLIKELY(j >= NPY_MAXDIMS)) { + PyErr_Format(PyExc_IndexError, + "too many advanced (array) indices. This probably " + "means you are indexing with too many booleans. " + "(more than %d found)", NPY_MAXDIMS); + return -1; + } + } + /* (iterating) fancy index, store the iterator */ if (indices[i].type == HAS_FANCY) { mit->fancy_strides[j] = PyArray_STRIDE(arr, curr_dim); @@ -2655,6 +2668,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, /* For shape reporting on error */ PyArrayObject *original_extra_op = extra_op; + /* NOTE: MAXARGS is the actual limit (2*NPY_MAXDIMS is index number one) */ PyArrayObject *index_arrays[NPY_MAXDIMS]; PyArray_Descr *intp_descr; PyArray_Descr *dtypes[NPY_MAXDIMS]; /* borrowed references */ diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index 667c49240e4a..73dbc429c89c 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -3,6 +3,8 @@ import functools import operator +import pytest + import numpy as np from numpy.core._multiarray_tests import array_indexing from itertools import product @@ -547,6 +549,21 @@ def test_character_assignment(self): assert_array_equal(arr[0], np.array("asdfg", dtype="c")) assert arr[0, 1] == b"s" # make sure not all were set to "a" for both + @pytest.mark.parametrize("index", + [True, False, np.array([0])]) + @pytest.mark.parametrize("num", [32, 40]) + @pytest.mark.parametrize("original_ndim", [1, 32]) + def test_too_many_advanced_indices(self, index, num, original_ndim): + # These are limitations based on the number of arguments we can process. + # For `num=32` (and all boolean cases), the result is actually define; + # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons. + arr = np.ones((1,) * original_ndim) + with pytest.raises(IndexError): + arr[(index,) * num] + with pytest.raises(IndexError): + arr[(index,) * num] = 1. + + class TestFieldIndexing: def test_scalar_return_type(self): # Field access on an array should return an array, even if it From b038c35ff03d70780711be1116db53b1c72b224c Mon Sep 17 00:00:00 2001 From: Illviljan <14371165+Illviljan@users.noreply.github.com> Date: Mon, 11 Jan 2021 22:31:20 +0100 Subject: [PATCH 0324/1270] faster tril_indices nonzero is slow. Use np.indices and np.broadcast_to to speed it up. --- numpy/lib/twodim_base.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 2b4cbdfbbc11..8e008ba716f5 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -894,7 +894,10 @@ def tril_indices(n, k=0, m=None): [-10, -10, -10, -10]]) """ - return nonzero(tri(n, m, k=k, dtype=bool)) + tri = np.tri(n, m=m, k=k, dtype=bool) + + return tuple(np.broadcast_to(inds, tri.shape)[tri] + for inds in np.indices(tri.shape, sparse=True)) def _trilu_indices_form_dispatcher(arr, k=None): @@ -1010,7 +1013,10 @@ def triu_indices(n, k=0, m=None): [ 12, 13, 14, -1]]) """ - return nonzero(~tri(n, m, k=k-1, dtype=bool)) + tri = ~np.tri(n, m, k=k - 1, dtype=bool) + + return tuple(np.broadcast_to(inds, tri.shape)[tri] + for inds in np.indices(tri.shape, sparse=True)) @array_function_dispatch(_trilu_indices_form_dispatcher) From 275110674bd119eae350053d779e795cd66b35cf Mon Sep 17 00:00:00 2001 From: Alex Henrie Date: Tue, 12 Jan 2021 17:08:31 -0700 Subject: [PATCH 0325/1270] MAINT: Remove redundant null check before free --- numpy/core/src/npysort/timsort.c.src | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/numpy/core/src/npysort/timsort.c.src b/numpy/core/src/npysort/timsort.c.src index 3fdd46f61c51..5298f5a1d057 100644 --- a/numpy/core/src/npysort/timsort.c.src +++ b/numpy/core/src/npysort/timsort.c.src @@ -507,9 +507,7 @@ timsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr)) ret = 0; cleanup: - if (buffer.pw != NULL) { - free(buffer.pw); - } + free(buffer.pw); return ret; } From 124ad55c34117b1f1562dbd6d4e5641292c53a4b Mon Sep 17 00:00:00 2001 From: Alex Henrie Date: Tue, 12 Jan 2021 17:16:36 -0700 Subject: [PATCH 0326/1270] BUG: Initialize value of no_castable_output used in ufunc_loop_matches --- numpy/core/src/umath/ufunc_type_resolution.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 3ce06322f589..be48be079e92 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -2020,7 +2020,7 @@ type_tuple_type_resolver(PyUFuncObject *self, int n_specified = 0; int specified_types[NPY_MAXARGS], types[NPY_MAXARGS]; const char *ufunc_name; - int no_castable_output, use_min_scalar; + int no_castable_output = 0, use_min_scalar; /* For making a better error message on coercion error */ char err_dst_typecode = '-', err_src_typecode = '-'; From c9c911e10504b8430873dba67dd602309f6f5f35 Mon Sep 17 00:00:00 2001 From: Alex Henrie Date: Wed, 13 Jan 2021 14:08:26 -0700 Subject: [PATCH 0327/1270] MAINT: Make keyword arrays static --- numpy/core/src/multiarray/compiled_base.c | 4 ++-- numpy/core/src/multiarray/datetime_busday.c | 12 ++++++------ numpy/core/src/multiarray/methods.c | 2 +- numpy/core/src/multiarray/multiarraymodule.c | 2 +- numpy/core/src/multiarray/scalartypes.c.src | 10 +++++----- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index fa5d7db75e88..de793f87c156 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1037,7 +1037,7 @@ arr_ravel_multi_index(PyObject *self, PyObject *args, PyObject *kwds) NpyIter *iter = NULL; - char *kwlist[] = {"multi_index", "dims", "mode", "order", NULL}; + static char *kwlist[] = {"multi_index", "dims", "mode", "order", NULL}; memset(op, 0, sizeof(op)); dtype[0] = NULL; @@ -1232,7 +1232,7 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds) int i, ret_ndim; npy_intp ret_dims[NPY_MAXDIMS], ret_strides[NPY_MAXDIMS]; - char *kwlist[] = {"indices", "shape", "order", NULL}; + static char *kwlist[] = {"indices", "shape", "order", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:unravel_index", kwlist, diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index 2cf1575515bc..f0564146d9e6 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -934,8 +934,8 @@ NPY_NO_EXPORT PyObject * array_busday_offset(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { - char *kwlist[] = {"dates", "offsets", "roll", - "weekmask", "holidays", "busdaycal", "out", NULL}; + static char *kwlist[] = {"dates", "offsets", "roll", + "weekmask", "holidays", "busdaycal", "out", NULL}; PyObject *dates_in = NULL, *offsets_in = NULL, *out_in = NULL; @@ -1065,8 +1065,8 @@ NPY_NO_EXPORT PyObject * array_busday_count(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { - char *kwlist[] = {"begindates", "enddates", - "weekmask", "holidays", "busdaycal", "out", NULL}; + static char *kwlist[] = {"begindates", "enddates", + "weekmask", "holidays", "busdaycal", "out", NULL}; PyObject *dates_begin_in = NULL, *dates_end_in = NULL, *out_in = NULL; @@ -1210,8 +1210,8 @@ NPY_NO_EXPORT PyObject * array_is_busday(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { - char *kwlist[] = {"dates", - "weekmask", "holidays", "busdaycal", "out", NULL}; + static char *kwlist[] = {"dates", + "weekmask", "holidays", "busdaycal", "out", NULL}; PyObject *dates_in = NULL, *out_in = NULL; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 8bcf591a27e9..04ce53ed73d5 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -2289,7 +2289,7 @@ array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds) { PyObject *a = (PyObject *)self, *b, *o = NULL; PyArrayObject *ret; - char* kwlist[] = {"b", "out", NULL }; + static char* kwlist[] = {"b", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:dot", kwlist, &b, &o)) { diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index dfd27a0bcef3..2c00c498be71 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2319,7 +2319,7 @@ array_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwds) { PyObject *v, *a, *o = NULL; PyArrayObject *ret; - char* kwlist[] = {"a", "b", "out", NULL }; + static char* kwlist[] = {"a", "b", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O:matrixproduct", kwlist, &a, &v, &o)) { diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index e480628e710c..10f304fe7133 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -2711,7 +2711,7 @@ static PyObject * /* TODO: include type name in error message, which is not @name@ */ PyObject *obj = NULL; - char *kwnames[] = {"", NULL}; /* positional-only */ + static char *kwnames[] = {"", NULL}; /* positional-only */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwnames, &obj)) { return NULL; } @@ -2799,7 +2799,7 @@ static PyObject * object_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *kwds) { PyObject *obj = Py_None; - char *kwnames[] = {"", NULL}; /* positional-only */ + static char *kwnames[] = {"", NULL}; /* positional-only */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:object_", kwnames, &obj)) { return NULL; } @@ -2825,7 +2825,7 @@ static PyObject * PyObject *obj = NULL, *meta_obj = NULL; Py@Name@ScalarObject *ret; - char *kwnames[] = {"", "", NULL}; /* positional-only */ + static char *kwnames[] = {"", "", NULL}; /* positional-only */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwnames, &obj, &meta_obj)) { return NULL; } @@ -2884,7 +2884,7 @@ bool_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *kwds) PyObject *obj = NULL; PyArrayObject *arr; - char *kwnames[] = {"", NULL}; /* positional-only */ + static char *kwnames[] = {"", NULL}; /* positional-only */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:bool_", kwnames, &obj)) { return NULL; } @@ -2995,7 +2995,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds) PyObject *obj, *arr; PyObject *new = NULL; - char *kwnames[] = {"", NULL}; /* positional-only */ + static char *kwnames[] = {"", NULL}; /* positional-only */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:void", kwnames, &obj)) { return NULL; } From a968a6b55713c5ec956a060169ae4317ca2c3110 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 14 Jan 2021 09:48:32 +0200 Subject: [PATCH 0328/1270] BLD: try a pypy37 windows 64-bit build --- azure-pipelines.yml | 5 +++++ azure-steps-windows.yml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5f7afdaaff36..4979452e89af 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -206,6 +206,11 @@ stages: PYTHON_ARCH: 'x64' TEST_MODE: full BITS: 64 + PyPy37-64bit-full: + PYTHON_VERSION: 'PyPy3.7' + PYTHON_ARCH: 'x64' + TEST_MODE: full + BITS: 64 Python38-32bit-fast: PYTHON_VERSION: '3.8' PYTHON_ARCH: 'x86' diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index 28762f5d9528..6a69db7539b3 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -6,7 +6,7 @@ steps: architecture: $(PYTHON_ARCH) condition: not(contains(variables['PYTHON_VERSION'], 'PyPy')) - powershell: | - $url = "http://buildbot.pypy.org/nightly/py3.6/pypy-c-jit-latest-win32.zip" + $url = "http://buildbot.pypy.org/nightly/py3.7/pypy-c-jit-latest-win64.zip" $output = "pypy.zip" $wc = New-Object System.Net.WebClient $wc.DownloadFile($url, $output) From 2caecfb70024cddafb13ef2c113f9dc1e3e0acc5 Mon Sep 17 00:00:00 2001 From: Weh Andreas Date: Thu, 14 Jan 2021 11:38:42 +0100 Subject: [PATCH 0329/1270] Use sinus based formula for `chebpts1` --- numpy/polynomial/chebyshev.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 1149cdffa22c..4d0a4f483bbf 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1952,8 +1952,8 @@ def chebpts1(npts): if _npts < 1: raise ValueError("npts must be >= 1") - x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts) - return np.cos(x) + x = 0.5 * np.pi / _npts * np.arange(-_npts+1, _npts+1, 2) + return np.sin(x) def chebpts2(npts): From 6d596fdf6dd6f7c9b93207f9e704c22f03438781 Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 15 Jan 2021 10:08:37 +0200 Subject: [PATCH 0330/1270] BLD: run PyPy tests in fast mode --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 4979452e89af..b56d596a594c 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -209,7 +209,7 @@ stages: PyPy37-64bit-full: PYTHON_VERSION: 'PyPy3.7' PYTHON_ARCH: 'x64' - TEST_MODE: full + TEST_MODE: fast BITS: 64 Python38-32bit-fast: PYTHON_VERSION: '3.8' From a891c07d040067d42e35b25c558f0969cca2715c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 15 Jan 2021 18:22:57 +0100 Subject: [PATCH 0331/1270] TST: Clear the mypy cache before running any typing tests --- numpy/typing/tests/test_typing.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 361688c5d194..18520a7571c4 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -2,6 +2,7 @@ import itertools import os import re +import shutil from collections import defaultdict from typing import Optional, IO, Dict, List @@ -25,6 +26,15 @@ CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.fixture(scope="session", autouse=True) +def clear_cache() -> None: + """Clears the mypy cache before running any of the typing tests.""" + if os.path.isdir(CACHE_DIR): + shutil.rmtree(CACHE_DIR) + + def get_test_cases(directory): for root, _, files in os.walk(directory): for fname in files: From bad3b6f78a39aa89e0bbab17fbb92ce1c6d9ca96 Mon Sep 17 00:00:00 2001 From: Illviljan <14371165+Illviljan@users.noreply.github.com> Date: Fri, 15 Jan 2021 19:31:08 +0100 Subject: [PATCH 0332/1270] use same tri format --- numpy/lib/twodim_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 8e008ba716f5..05e92b59095e 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -894,7 +894,7 @@ def tril_indices(n, k=0, m=None): [-10, -10, -10, -10]]) """ - tri = np.tri(n, m=m, k=k, dtype=bool) + tri = np.tri(n, m, k=k, dtype=bool) return tuple(np.broadcast_to(inds, tri.shape)[tri] for inds in np.indices(tri.shape, sparse=True)) From b6313fdaa4d129e4b58684b0bede9696edde92d5 Mon Sep 17 00:00:00 2001 From: Illviljan <14371165+Illviljan@users.noreply.github.com> Date: Fri, 15 Jan 2021 20:18:47 +0100 Subject: [PATCH 0333/1270] import indices, broadcast_to --- numpy/lib/twodim_base.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 05e92b59095e..960797b68a64 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -6,11 +6,12 @@ from numpy.core.numeric import ( asanyarray, arange, zeros, greater_equal, multiply, ones, asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal, - nonzero + nonzero, indices ) from numpy.core.overrides import set_array_function_like_doc, set_module from numpy.core import overrides from numpy.core import iinfo +from numpy.lib.stride_tricks import broadcast_to __all__ = [ @@ -894,10 +895,10 @@ def tril_indices(n, k=0, m=None): [-10, -10, -10, -10]]) """ - tri = np.tri(n, m, k=k, dtype=bool) + tri_ = tri(n, m, k=k, dtype=bool) - return tuple(np.broadcast_to(inds, tri.shape)[tri] - for inds in np.indices(tri.shape, sparse=True)) + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) def _trilu_indices_form_dispatcher(arr, k=None): @@ -1013,10 +1014,10 @@ def triu_indices(n, k=0, m=None): [ 12, 13, 14, -1]]) """ - tri = ~np.tri(n, m, k=k - 1, dtype=bool) + tri_ = ~tri(n, m, k=k - 1, dtype=bool) - return tuple(np.broadcast_to(inds, tri.shape)[tri] - for inds in np.indices(tri.shape, sparse=True)) + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) @array_function_dispatch(_trilu_indices_form_dispatcher) From c6fc6884d3c9bc68872a7adaae2974143a910eac Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 19 Oct 2020 16:03:18 +0200 Subject: [PATCH 0334/1270] DEP: Formally deprecate the use of `typeDict` `typeDict` is a deprecated alias for `sctypeDict` and has been so for >= 14 years (https://github.com/numpy/numpy/commit/668950285c407593a368336ff2e737c5da84af7d) --- numpy/__init__.py | 4 ++++ numpy/__init__.pyi | 1 - numpy/core/numerictypes.py | 2 +- numpy/core/numerictypes.pyi | 2 +- tools/functions_missing_types.py | 1 + 5 files changed, 7 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index a242bb7dfaae..a0c9a44bc226 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -192,6 +192,10 @@ ) for n, n2 in [("long", "int"), ("unicode", "str")] }) + __deprecated_attrs__["typeDict"] = ( + getattr(core.numerictypes, "typeDict"), + "`np.typeDict` is a deprecated alias for `np.sctypeDict`." + ) from .core import round, abs, max, min # now that numpy modules are imported, can initialize limits diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 656048173c7d..a26b70af8d8f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -558,7 +558,6 @@ trim_zeros: Any triu: Any triu_indices: Any triu_indices_from: Any -typeDict: Any typecodes: Any typename: Any union1d: Any diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index e705dd3ea855..5a9c50ee3418 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -91,7 +91,7 @@ from numpy.core.overrides import set_module # we add more at the bottom -__all__ = ['sctypeDict', 'typeDict', 'sctypes', +__all__ = ['sctypeDict', 'sctypes', 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', 'issubdtype', 'datetime_data', 'datetime_as_string', diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi index 192015ff13b4..238495fd39e3 100644 --- a/numpy/core/numerictypes.pyi +++ b/numpy/core/numerictypes.pyi @@ -26,4 +26,4 @@ def find_common_type( ) -> dtype: ... # TODO: Add annotations for the following objects: -# typeDict, nbytes, cast, ScalarType & typecodes +# nbytes, cast, ScalarType & typecodes diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py index c2fe156f0716..b283f5a35640 100755 --- a/tools/functions_missing_types.py +++ b/tools/functions_missing_types.py @@ -45,6 +45,7 @@ "safe_eval", "set_numeric_ops", "test", + "typeDict", # Builtins "bool", "complex", From a4260ab10fff84710c3ae4a67271f03f823ac75c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 19 Oct 2020 16:11:07 +0200 Subject: [PATCH 0335/1270] MAINT,DEP: Replace references to `typeDict` with `sctypeDict` --- benchmarks/benchmarks/common.py | 2 +- numpy/core/numerictypes.py | 2 +- numpy/core/records.py | 2 +- numpy/core/tests/test_multiarray.py | 10 +++++----- numpy/core/tests/test_regression.py | 4 ++-- numpy/core/tests/test_scalarmath.py | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index b95d09192e5e..0c40e85b0612 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -21,7 +21,7 @@ 'int64', 'float64', 'complex64', 'longfloat', 'complex128', ] -if 'complex256' in numpy.typeDict: +if 'complex256' in numpy.sctypeDict: TYPES1.append('complex256') diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 5a9c50ee3418..62cebc7757d5 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -5,7 +5,7 @@ Exported symbols include: Dictionary with all registered number types (including aliases): - typeDict + sctypeDict Type objects (not all will be available, depends on platform): see variable sctypes for which ones you have diff --git a/numpy/core/records.py b/numpy/core/records.py index 00d456658bc4..5cc82ca6ef04 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -70,7 +70,7 @@ # of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' # are equally allowed -numfmt = nt.typeDict +numfmt = nt.sctypeDict # taken from OrderedDict recipes in the Python documentation # https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index b30fcb812aa5..19b065b465aa 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -207,7 +207,7 @@ def test_warnonwrite(self): a[2] = 10 # only warn once assert_(len(w) == 1) - + @pytest.mark.parametrize(["flag", "flag_value", "writeable"], [("writeable", True, True), # Delete _warn_on_write after deprecation and simplify @@ -1418,11 +1418,11 @@ def testassign(arr, v): a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')]) a[['a', 'b']] = a[['b', 'a']] assert_equal(a[0].item(), (2,1)) - + def test_scalar_assignment(self): with assert_raises(ValueError): - arr = np.arange(25).reshape(5, 5) - arr.itemset(3) + arr = np.arange(25).reshape(5, 5) + arr.itemset(3) def test_structuredscalar_indexing(self): # test gh-7262 @@ -7214,7 +7214,7 @@ def test_roundtrip_half(self): self._check_roundtrip(x) def test_roundtrip_single_types(self): - for typ in np.typeDict.values(): + for typ in np.sctypeDict.values(): dtype = np.dtype(typ) if dtype.char in 'Mm': diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 831e48e8b529..3c7d6f668c19 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -1531,7 +1531,7 @@ def test_fromstring_crash(self): np.fromstring(b'aa, aa, 1.0', sep=',') def test_ticket_1539(self): - dtypes = [x for x in np.typeDict.values() + dtypes = [x for x in np.sctypeDict.values() if (issubclass(x, np.number) and not issubclass(x, np.timedelta64))] a = np.array([], np.bool_) # not x[0] because it is unordered @@ -2332,7 +2332,7 @@ def test_invalid_structured_dtypes(self): def test_correct_hash_dict(self): # gh-8887 - __hash__ would be None despite tp_hash being set - all_types = set(np.typeDict.values()) - {np.void} + all_types = set(np.sctypeDict.values()) - {np.void} for t in all_types: val = t() diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 0b615edfadcd..e097235fdc0c 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -404,13 +404,13 @@ def test_iinfo_long_values(self): assert_(res == tgt) for code in np.typecodes['AllInteger']: - res = np.typeDict[code](np.iinfo(code).max) + res = np.sctypeDict[code](np.iinfo(code).max) tgt = np.iinfo(code).max assert_(res == tgt) def test_int_raise_behaviour(self): def overflow_error_func(dtype): - np.typeDict[dtype](np.iinfo(dtype).max + 1) + np.sctypeDict[dtype](np.iinfo(dtype).max + 1) for code in 'lLqQ': assert_raises(OverflowError, overflow_error_func, code) From 8f94cc6aecaa5aab1a42be99008a6db76d7eee86 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 19 Oct 2020 16:19:47 +0200 Subject: [PATCH 0336/1270] DEP: Add a deprecation note --- numpy/core/numerictypes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 62cebc7757d5..7e970820882c 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -542,6 +542,7 @@ def sctype2char(sctype): 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} # backwards compatibility --- deprecated name +# Formal deprecation: Numpy 1.20.0, 2020-10-19 typeDict = sctypeDict # b -> boolean From 5c9bd6e13ac87d961df82197bb82746d5e279f52 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 19 Oct 2020 16:21:14 +0200 Subject: [PATCH 0337/1270] TST: Add a deprecation test --- numpy/core/tests/test_deprecations.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 5498e1cf996f..cec837d0898b 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -698,6 +698,9 @@ def test_type_aliases(self): self.assert_deprecated(lambda: np.long) self.assert_deprecated(lambda: np.unicode) + # from np.core.numerictypes + self.assert_deprecated(lambda: np.typeDict) + class TestMatrixInOuter(_DeprecationTestCase): # 2020-05-13 NumPy 1.20.0 From 8238ceaf4cb92cdfcdfe95a0e6dc35a650ebeba9 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 19 Oct 2020 16:44:00 +0200 Subject: [PATCH 0338/1270] DEP: Added a release note --- doc/release/upcoming_changes/17586.deprecation.rst | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 doc/release/upcoming_changes/17586.deprecation.rst diff --git a/doc/release/upcoming_changes/17586.deprecation.rst b/doc/release/upcoming_changes/17586.deprecation.rst new file mode 100644 index 000000000000..845615315447 --- /dev/null +++ b/doc/release/upcoming_changes/17586.deprecation.rst @@ -0,0 +1,7 @@ +``np.typeDict`` has been formally deprecated +-------------------------------------------- +``np.typeDict`` is a deprecated alias for ``np.sctypeDict`` and +has been so for over 14 years (6689502_). +A deprecation warning will now be issued whenever getting ``np.typeDict``. + +.. _6689502: https://github.com/numpy/numpy/commit/668950285c407593a368336ff2e737c5da84af7d From edcb98e1c641141480159d84abd1c7831395766d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 19 Oct 2020 16:46:20 +0200 Subject: [PATCH 0339/1270] MAINT: Remove a redundant use of `sctypeDict` See https://github.com/numpy/numpy/pull/17586#discussion_r507799149 Co-Authored-By: Eric Wieser <425260+eric-wieser@users.noreply.github.com> --- numpy/core/tests/test_scalarmath.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index e097235fdc0c..3e8cad5e2ece 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -410,9 +410,9 @@ def test_iinfo_long_values(self): def test_int_raise_behaviour(self): def overflow_error_func(dtype): - np.sctypeDict[dtype](np.iinfo(dtype).max + 1) + dtype(np.iinfo(dtype).max + 1) - for code in 'lLqQ': + for code in [np.int_, np.uint, np.longlong, np.ulonglong]: assert_raises(OverflowError, overflow_error_func, code) def test_int_from_infinite_longdouble(self): From 74df99926a2d561cd756d6df87f27af264213933 Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Mon, 19 Oct 2020 17:00:18 +0200 Subject: [PATCH 0340/1270] Apply suggestions from code review * Add a deprecation comment to `numpy/__init__.py` * Replace a `getattr()` operation with `core.numerictypes.typeDict` * Remove a redundant call to `np.sctypeDict` Co-authored-by: Eric Wieser --- numpy/__init__.py | 3 ++- numpy/core/numerictypes.py | 2 +- numpy/core/tests/test_scalarmath.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index a0c9a44bc226..015fb52ab752 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -192,8 +192,9 @@ ) for n, n2 in [("long", "int"), ("unicode", "str")] }) + # Numpy 1.20.0, 2020-10-19 __deprecated_attrs__["typeDict"] = ( - getattr(core.numerictypes, "typeDict"), + core.numerictypes.typeDict, "`np.typeDict` is a deprecated alias for `np.sctypeDict`." ) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 7e970820882c..93af5c95d986 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -542,7 +542,7 @@ def sctype2char(sctype): 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} # backwards compatibility --- deprecated name -# Formal deprecation: Numpy 1.20.0, 2020-10-19 +# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py) typeDict = sctypeDict # b -> boolean diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 3e8cad5e2ece..f5b99dd86bb3 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -404,7 +404,7 @@ def test_iinfo_long_values(self): assert_(res == tgt) for code in np.typecodes['AllInteger']: - res = np.sctypeDict[code](np.iinfo(code).max) + res = np.dtype(code).type(np.iinfo(code).max) tgt = np.iinfo(code).max assert_(res == tgt) From c47e9621ebf76f8085ff5ec8b01c07921d14f6a7 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Thu, 14 Jan 2021 20:25:49 +0000 Subject: [PATCH 0341/1270] ENH: cpu features detection implementation on FreeBSD ARM --- numpy/core/src/common/npy_cpu_features.c.src | 25 +++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src index 69bbc83a2e38..4f3a95c717a1 100644 --- a/numpy/core/src/common/npy_cpu_features.c.src +++ b/numpy/core/src/common/npy_cpu_features.c.src @@ -500,7 +500,7 @@ npy__cpu_init_features_arm8(void) npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = 1; } -#ifdef __linux__ +#if defined(__linux__) || defined(__FreeBSD__) /* * we aren't sure of what kind kernel or clib we deal with * so we play it safe @@ -509,10 +509,23 @@ npy__cpu_init_features_arm8(void) #include "npy_cpuinfo_parser.h" __attribute__((weak)) unsigned long getauxval(unsigned long); // linker should handle it +#ifdef __FreeBSD__ +__attribute__((weak)) int elf_aux_info(int, void *, int); // linker should handle it + +static unsigned long getauxval(unsigned long k) +{ + unsigned long val = 0ul; + if (elf_aux_info == 0 || elf_aux_info((int)k, (void *)&val, (int)sizeof(val)) != 0) { + return 0ul; + } + return val; +} +#endif static int npy__cpu_init_features_linux(void) { unsigned long hwcap = 0, hwcap2 = 0; + #ifdef __linux__ if (getauxval != 0) { hwcap = getauxval(NPY__HWCAP); #ifdef __arm__ @@ -539,7 +552,14 @@ npy__cpu_init_features_linux(void) close(fd); } } + #else + hwcap = getauxval(NPY__HWCAP); + #ifdef __arm__ + hwcap2 = getauxval(NPY__HWCAP2); + #endif + #endif if (hwcap == 0 && hwcap2 == 0) { + #ifdef __linux__ /* * try parsing with /proc/cpuinfo, if sandboxed * failback to compiler definitions @@ -547,6 +567,9 @@ npy__cpu_init_features_linux(void) if(!get_feature_from_proc_cpuinfo(&hwcap, &hwcap2)) { return 0; } + #else + return 0; + #endif } #ifdef __arm__ // Detect Arm8 (aarch32 state) From 0e3aaeb275a023c0f13df25ad55bb3b20acc1cd5 Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Sun, 17 Jan 2021 22:53:57 +0200 Subject: [PATCH 0342/1270] ENH: [f2py] Add external attribute support. Closes #17859 --- numpy/f2py/crackfortran.py | 4 ++++ numpy/f2py/tests/test_crackfortran.py | 29 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 16a9a6fab94e..d278457969a7 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1611,6 +1611,10 @@ def updatevars(typespec, selector, attrspec, entitydecl): edecl['charselector'] = copy.copy(charselect) edecl['typename'] = typename edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) if m.group('after'): m1 = lenarraypattern.match(markouterparen(m.group('after'))) if m1: diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 735804024c9e..827c71ae9965 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -86,3 +86,32 @@ def test_defaultPublic(self, tmp_path): assert 'public' not in mod['vars']['a']['attrspec'] assert 'private' not in mod['vars']['seta']['attrspec'] assert 'public' in mod['vars']['seta']['attrspec'] + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + code = """ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end + """ + + def test_external_as_statement(self): + def incr(x): + return x + 123 + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + r = self.module.external_as_attribute(incr) + assert r == 123 From 8e2f7034438d6e1732ce63306a805e3fc3ec7ce7 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 18 Jan 2021 08:10:08 +0000 Subject: [PATCH 0343/1270] MAINT: Bump hypothesis from 6.0.0 to 6.0.2 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.0.0 to 6.0.2. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.0.0...hypothesis-python-6.0.2) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index b6aaa89951f2..52109a5d5fbd 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.0.0 +hypothesis==6.0.2 pytest==6.2.1 pytz==2020.5 pytest-cov==2.10.1 From d2e77689db58cb4aab91b5330a3336dd24930ade Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Mon, 18 Jan 2021 17:59:04 +0600 Subject: [PATCH 0344/1270] Added support for SIMD operations for int types in numpy.count_nonzero function --- numpy/core/src/multiarray/item_selection.c | 287 ++++++++++++++++++++- 1 file changed, 278 insertions(+), 9 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 8e4b2ebe120e..2d1d6db83b04 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2131,18 +2131,22 @@ count_nonzero_bytes_384(const npy_uint64 * w) #if NPY_SIMD +/* + +*/ + /* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */ static NPY_INLINE NPY_GCC_OPT_3 npyv_u8 count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_count) { - const npyv_u8 vone = npyv_setall_u8(1); - const npyv_u8 vzero = npyv_zero_u8(); + const npyv_u8 vone = npyv_setall_u8(1); + const npyv_u8 vzero = npyv_zero_u8(); - npy_intp lane_max = 0; - npyv_u8 vsum8 = npyv_zero_u8(); + npy_intp lane_max = 0; + npyv_u8 vsum8 = npyv_zero_u8(); while (*d < end && lane_max <= max_count - 1) { // we count zeros because `cmpeq` cheaper than `cmpneq` for most archs - npyv_u8 vt = npyv_cvt_u8_b8(npyv_cmpeq_u8(npyv_load_u8(*d), vzero)); + npyv_u8 vt = npyv_cvt_u8_b8(npyv_cmpeq_u8(npyv_load_u8(*d), vzero)); vt = npyv_and_u8(vt, vone); vsum8 = npyv_add_u8(vsum8, vt); *d += npyv_nlanes_u8; @@ -2155,8 +2159,8 @@ static NPY_INLINE NPY_GCC_OPT_3 npyv_u16x2 count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_count) { npyv_u16x2 vsum16; - vsum16.val[0] = vsum16.val[1] = npyv_zero_u16(); - npy_intp lane_max = 0; + vsum16.val[0] = vsum16.val[1] = npyv_zero_u16(); // Setting a vector of 0s (16 maybe) + npy_intp lane_max = 0; // scalar 0 while (*d < end && lane_max <= max_count - NPY_MAX_UINT8) { npyv_u8 vsum8 = count_zero_bytes_u8(d, end, NPY_MAX_UINT8); npyv_u16x2 part = npyv_expand_u16_u8(vsum8); @@ -2202,7 +2206,252 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) return unrollx - zero_count; } +#define MAX(x, y) (((x) > (y)) ? (x) : (y)) +#define MIN(x, y) (((x) < (y)) ? (x) : (y)) + + +static NPY_INLINE NPY_GCC_OPT_3 npy_uintp +count_nonzero_int16_simd(npy_int16 *d, npy_uintp unrollx) +{ + npy_uintp zero_count = 0; + const npy_uintp innerloop_jump = NPY_MAX_UINT16; + const npy_int16 *end = d + unrollx; + + const npyv_u16 vone = npyv_setall_u16(1); + const npyv_u16 vzero = npyv_zero_u16(); + + npy_int16 *target = d; + npy_uint16 sums[npyv_nlanes_u16]; + + while (dtype_num == NPY_BOOL) { + + +#if NPY_SIMD + if (dtype->type_num == NPY_INT16 || dtype->type_num == NPY_UINT16) { + return count_nonzero_int16(PyArray_NDIM(self), (npy_int16 *) PyArray_DATA(self), + PyArray_DIMS(self), PyArray_STRIDES(self)); + } + + if (dtype->type_num == NPY_INT32 || dtype->type_num == NPY_UINT32) { + return count_nonzero_int32(PyArray_NDIM(self), (npy_int32 *) PyArray_DATA(self), + PyArray_DIMS(self), PyArray_STRIDES(self)); + } + + if (dtype->type_num == NPY_INT64 || dtype->type_num == NPY_UINT64) { + return count_nonzero_int64(PyArray_NDIM(self), (npy_int64 *) PyArray_DATA(self), + PyArray_DIMS(self), PyArray_STRIDES(self)); + } + +#endif + + if (dtype->type_num == NPY_BOOL || dtype->type_num == NPY_INT8 || dtype->type_num == NPY_UINT8) { return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self), PyArray_DIMS(self), PyArray_STRIDES(self)); } + nonzero = PyArray_DESCR(self)->f->nonzero; /* If it's a trivial one-dimensional loop, don't use an iterator */ From c716a120cba2c8c2b972433604b085d87122823e Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Mon, 18 Jan 2021 18:01:55 +0600 Subject: [PATCH 0345/1270] Added tests for i1,i2,i4,i8 types for numpy.count_nonzero function --- numpy/core/tests/test_numeric.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 280874d21695..6de9e3764cd9 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1257,20 +1257,30 @@ def test_nonzero_onedim(self): assert_equal(np.count_nonzero(x), 4) assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) - x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], - dtype=[('a', 'i4'), ('b', 'i2')]) + # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + # dtype=[('a', 'i4'), ('b', 'i2')]) + x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.count_nonzero(x['c']), 3) + assert_equal(np.count_nonzero(x['d']), 4) assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) def test_nonzero_twodim(self): x = np.array([[0, 1, 0], [2, 0, 3]]) - assert_equal(np.count_nonzero(x), 3) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) x = np.eye(3) - assert_equal(np.count_nonzero(x), 3) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) x = np.array([[(0, 1), (0, 0), (1, 11)], From 26a2f584dc79cb0ef02ca41f4785152b8041c71b Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Mon, 18 Jan 2021 15:19:46 +0200 Subject: [PATCH 0346/1270] BUG: Fix detecting Fortran 90+ source format. --- numpy/distutils/fcompiler/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index 4730a5a0999a..812461538dc9 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -976,7 +976,7 @@ def is_free_format(file): with open(file, encoding='latin1') as f: line = f.readline() n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line): + if _has_f_header(line) or _has_fix_header(line): n = 0 elif _has_f90_header(line): n = 0 From a2f6fbb5b068a4e589bd95b8774d3624062c0995 Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Mon, 18 Jan 2021 15:22:54 +0200 Subject: [PATCH 0347/1270] BUG: subroutines using assumed shape arrays must be treated as F90 codes. Don't split inline comments. --- numpy/f2py/rules.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index f1490527eafd..12ed1f1ca039 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1186,7 +1186,7 @@ def buildmodule(m, um): for nb in nb_list: api, wrap = buildapi(nb) if wrap: - if ismoduleroutine(nb): + if ismoduleroutine(nb) or issubroutine_wrap(nb): funcwrappers2.append(wrap) else: funcwrappers.append(wrap) @@ -1310,7 +1310,11 @@ def buildmodule(m, um): '! It contains Fortran 90 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): - if len(l) > 72 and l[0] == ' ': + i = l.find('!') + if i >= 0 and i < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': lines.append(l[:72] + '&\n &') l = l[72:] while len(l) > 66: From 36439a7d0e0896703823c68d116b9900e49df998 Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Mon, 18 Jan 2021 15:24:24 +0200 Subject: [PATCH 0348/1270] BUG: f2py specific __user__ modules should not be used in Fortran sources to be compiled. Closes #17797. --- numpy/f2py/func2subr.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index e9976f43cf41..21d4c009cc26 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -130,7 +130,7 @@ def add(line, ret=ret): l = l + ', ' + fortranname if need_interface: for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): + if line.lstrip().startswith('use ') and '__user__' not in line: add(line) args = args[1:] @@ -222,7 +222,7 @@ def add(line, ret=ret): if need_interface: for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): + if line.lstrip().startswith('use ') and '__user__' not in line: add(line) dumped_args = [] @@ -247,7 +247,10 @@ def add(line, ret=ret): pass else: add('interface') - add(rout['saved_interface'].lstrip()) + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) add('end interface') sargs = ', '.join([a for a in args if a not in extra_args]) From 92fd7edc7033ee49669f63675c12cbbefe53a8bc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 18 Jan 2021 16:03:27 +0100 Subject: [PATCH 0349/1270] MAINT: Renamed `Like` to `Like_co` --- numpy/__init__.pyi | 136 +++++++++++++++++------------------ numpy/core/arrayprint.pyi | 10 +-- numpy/core/fromnumeric.pyi | 32 ++++----- numpy/core/function_base.pyi | 4 +- numpy/typing/__init__.py | 20 +++--- numpy/typing/_scalars.py | 28 ++++---- 6 files changed, 115 insertions(+), 115 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 656048173c7d..39a9f55b9171 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -11,13 +11,13 @@ from numpy.typing import ( DTypeLike, _Shape, _ShapeLike, - _CharLike, - _BoolLike, - _IntLike, - _FloatLike, - _ComplexLike, - _TD64Like, - _NumberLike, + _CharLike_co, + _BoolLike_co, + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _FloatLike_co, + _NumberLike_co, _SupportsDType, _VoidDTypeLike, NBitBase, @@ -858,11 +858,11 @@ _PartitionKind = Literal["introselect"] _SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] _SortSide = Literal["left", "right"] -_ArrayLikeBool = Union[_BoolLike, Sequence[_BoolLike], ndarray] +_ArrayLikeBool = Union[_BoolLike_co, Sequence[_BoolLike_co], ndarray] _ArrayLikeIntOrBool = Union[ - _IntLike, + _IntLike_co, ndarray, - Sequence[_IntLike], + Sequence[_IntLike_co], Sequence[Sequence[Any]], # TODO: wait for support for recursive types ] @@ -1073,7 +1073,7 @@ class _ArrayOrScalarCommon: axis: None = ..., out: None = ..., keepdims: Literal[False] = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> number: ... @overload @@ -1082,7 +1082,7 @@ class _ArrayOrScalarCommon: axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... @overload @@ -1091,7 +1091,7 @@ class _ArrayOrScalarCommon: axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... @overload @@ -1124,7 +1124,7 @@ class _ArrayOrScalarCommon: axis: None = ..., out: None = ..., keepdims: Literal[False] = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> number: ... @overload @@ -1133,7 +1133,7 @@ class _ArrayOrScalarCommon: axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... @overload @@ -1142,7 +1142,7 @@ class _ArrayOrScalarCommon: axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... def newbyteorder(self: _ArraySelf, __new_order: _ByteOrder = ...) -> _ArraySelf: ... @@ -1153,7 +1153,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: None = ..., keepdims: Literal[False] = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> number: ... @overload @@ -1163,7 +1163,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... @overload @@ -1173,7 +1173,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... @overload @@ -1234,7 +1234,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: None = ..., keepdims: Literal[False] = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> number: ... @overload @@ -1244,7 +1244,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... @overload @@ -1254,13 +1254,13 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... @overload def take( self, - indices: _IntLike, + indices: _IntLike_co, axis: Optional[int] = ..., out: None = ..., mode: _ModeKind = ..., @@ -1577,10 +1577,10 @@ class number(generic, Generic[_NBit_co]): # type: ignore __rpow__: _NumberOp __truediv__: _NumberOp __rtruediv__: _NumberOp - __lt__: _ComparisonOp[_NumberLike] - __le__: _ComparisonOp[_NumberLike] - __gt__: _ComparisonOp[_NumberLike] - __ge__: _ComparisonOp[_NumberLike] + __lt__: _ComparisonOp[_NumberLike_co] + __le__: _ComparisonOp[_NumberLike_co] + __gt__: _ComparisonOp[_NumberLike_co] + __ge__: _ComparisonOp[_NumberLike_co] class bool_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1619,10 +1619,10 @@ class bool_(generic): __rmod__: _BoolMod __divmod__: _BoolDivMod __rdivmod__: _BoolDivMod - __lt__: _ComparisonOp[_NumberLike] - __le__: _ComparisonOp[_NumberLike] - __gt__: _ComparisonOp[_NumberLike] - __ge__: _ComparisonOp[_NumberLike] + __lt__: _ComparisonOp[_NumberLike_co] + __le__: _ComparisonOp[_NumberLike_co] + __gt__: _ComparisonOp[_NumberLike_co] + __ge__: _ComparisonOp[_NumberLike_co] class object_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1635,21 +1635,21 @@ class datetime64(generic): @overload def __init__( self, - __value: Union[None, datetime64, _CharLike, dt.datetime] = ..., - __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ..., + __value: Union[None, datetime64, _CharLike_co, dt.datetime] = ..., + __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ..., ) -> None: ... @overload def __init__( self, __value: int, - __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] + __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] ) -> None: ... - def __add__(self, other: _TD64Like) -> datetime64: ... - def __radd__(self, other: _TD64Like) -> datetime64: ... + def __add__(self, other: _FloatLike_co) -> datetime64: ... + def __radd__(self, other: _FloatLike_co) -> datetime64: ... @overload def __sub__(self, other: datetime64) -> timedelta64: ... @overload - def __sub__(self, other: _TD64Like) -> datetime64: ... + def __sub__(self, other: _FloatLike_co) -> datetime64: ... def __rsub__(self, other: datetime64) -> timedelta64: ... __lt__: _ComparisonOp[datetime64] __le__: _ComparisonOp[datetime64] @@ -1658,13 +1658,13 @@ class datetime64(generic): # Support for `__index__` was added in python 3.8 (bpo-20092) if sys.version_info >= (3, 8): - _IntValue = Union[SupportsInt, _CharLike, SupportsIndex] - _FloatValue = Union[None, _CharLike, SupportsFloat, SupportsIndex] - _ComplexValue = Union[None, _CharLike, SupportsFloat, SupportsComplex, SupportsIndex] + _IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex] + _FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex] + _ComplexValue = Union[None, _CharLike_co, SupportsFloat, SupportsComplex, SupportsIndex] else: - _IntValue = Union[SupportsInt, _CharLike] - _FloatValue = Union[None, _CharLike, SupportsFloat] - _ComplexValue = Union[None, _CharLike, SupportsFloat, SupportsComplex] + _IntValue = Union[SupportsInt, _CharLike_co] + _FloatValue = Union[None, _CharLike_co, SupportsFloat] + _ComplexValue = Union[None, _CharLike_co, SupportsFloat, SupportsComplex] class integer(number[_NBit_co]): # type: ignore # NOTE: `__index__` is technically defined in the bottom-most @@ -1672,20 +1672,20 @@ class integer(number[_NBit_co]): # type: ignore def __index__(self) -> int: ... __truediv__: _IntTrueDiv[_NBit_co] __rtruediv__: _IntTrueDiv[_NBit_co] - def __mod__(self, value: _IntLike) -> integer: ... - def __rmod__(self, value: _IntLike) -> integer: ... + def __mod__(self, value: _IntLike_co) -> integer: ... + def __rmod__(self, value: _IntLike_co) -> integer: ... def __invert__(self: _IntType) -> _IntType: ... # Ensure that objects annotated as `integer` support bit-wise operations - def __lshift__(self, other: _IntLike) -> integer: ... - def __rlshift__(self, other: _IntLike) -> integer: ... - def __rshift__(self, other: _IntLike) -> integer: ... - def __rrshift__(self, other: _IntLike) -> integer: ... - def __and__(self, other: _IntLike) -> integer: ... - def __rand__(self, other: _IntLike) -> integer: ... - def __or__(self, other: _IntLike) -> integer: ... - def __ror__(self, other: _IntLike) -> integer: ... - def __xor__(self, other: _IntLike) -> integer: ... - def __rxor__(self, other: _IntLike) -> integer: ... + def __lshift__(self, other: _IntLike_co) -> integer: ... + def __rlshift__(self, other: _IntLike_co) -> integer: ... + def __rshift__(self, other: _IntLike_co) -> integer: ... + def __rrshift__(self, other: _IntLike_co) -> integer: ... + def __and__(self, other: _IntLike_co) -> integer: ... + def __rand__(self, other: _IntLike_co) -> integer: ... + def __or__(self, other: _IntLike_co) -> integer: ... + def __ror__(self, other: _IntLike_co) -> integer: ... + def __xor__(self, other: _IntLike_co) -> integer: ... + def __rxor__(self, other: _IntLike_co) -> integer: ... class signedinteger(integer[_NBit_co]): def __init__(self, __value: _IntValue = ...) -> None: ... @@ -1730,8 +1730,8 @@ longlong = signedinteger[_NBitLongLong] class timedelta64(generic): def __init__( self, - __value: Union[None, int, _CharLike, dt.timedelta, timedelta64] = ..., - __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ..., + __value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ..., + __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ..., ) -> None: ... def __int__(self) -> int: ... def __float__(self) -> float: ... @@ -1739,12 +1739,12 @@ class timedelta64(generic): def __neg__(self: _ArraySelf) -> _ArraySelf: ... def __pos__(self: _ArraySelf) -> _ArraySelf: ... def __abs__(self: _ArraySelf) -> _ArraySelf: ... - def __add__(self, other: _TD64Like) -> timedelta64: ... - def __radd__(self, other: _TD64Like) -> timedelta64: ... - def __sub__(self, other: _TD64Like) -> timedelta64: ... - def __rsub__(self, other: _TD64Like) -> timedelta64: ... - def __mul__(self, other: _FloatLike) -> timedelta64: ... - def __rmul__(self, other: _FloatLike) -> timedelta64: ... + def __add__(self, other: _FloatLike_co) -> timedelta64: ... + def __radd__(self, other: _FloatLike_co) -> timedelta64: ... + def __sub__(self, other: _FloatLike_co) -> timedelta64: ... + def __rsub__(self, other: _FloatLike_co) -> timedelta64: ... + def __mul__(self, other: _FloatLike_co) -> timedelta64: ... + def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... __truediv__: _TD64Div[float64] __floordiv__: _TD64Div[int64] def __rtruediv__(self, other: timedelta64) -> float64: ... @@ -1753,10 +1753,10 @@ class timedelta64(generic): def __rmod__(self, other: timedelta64) -> timedelta64: ... def __divmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... def __rdivmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... - __lt__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] - __le__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] - __gt__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] - __ge__: _ComparisonOp[Union[timedelta64, _IntLike, _BoolLike]] + __lt__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] + __le__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] + __gt__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] + __ge__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] class unsignedinteger(integer[_NBit_co]): # NOTE: `uint64 + signedinteger -> float64` @@ -1873,7 +1873,7 @@ longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble] class flexible(generic): ... # type: ignore class void(flexible): - def __init__(self, __value: Union[_IntLike, bytes]): ... + def __init__(self, __value: Union[_IntLike_co, bytes]): ... @property def real(self: _ArraySelf) -> _ArraySelf: ... @property diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi index 6aaae0320173..b2565b3451bd 100644 --- a/numpy/core/arrayprint.pyi +++ b/numpy/core/arrayprint.pyi @@ -21,7 +21,7 @@ from numpy import ( longdouble, clongdouble, ) -from numpy.typing import ArrayLike, _CharLike, _FloatLike +from numpy.typing import ArrayLike, _CharLike_co, _FloatLike_co if sys.version_info > (3, 8): from typing import Literal, TypedDict @@ -40,13 +40,13 @@ class _FormatDict(TypedDict, total=False): complexfloat: Callable[[complexfloating[Any, Any]], str] longcomplexfloat: Callable[[clongdouble], str] void: Callable[[void], str] - numpystr: Callable[[_CharLike], str] + numpystr: Callable[[_CharLike_co], str] object: Callable[[object], str] all: Callable[[object], str] int_kind: Callable[[integer[Any]], str] float_kind: Callable[[floating[Any]], str] complex_kind: Callable[[complexfloating[Any, Any]], str] - str_kind: Callable[[_CharLike], str] + str_kind: Callable[[_CharLike_co], str] class _FormatOptions(TypedDict): precision: int @@ -96,7 +96,7 @@ def array2string( legacy: Optional[Literal[False, "1.13"]] = ..., ) -> str: ... def format_float_scientific( - x: _FloatLike, + x: _FloatLike_co, precision: Optional[int] = ..., unique: bool = ..., trim: Literal["k", ".", "0", "-"] = ..., @@ -105,7 +105,7 @@ def format_float_scientific( exp_digits: Optional[int] = ..., ) -> str: ... def format_float_positional( - x: _FloatLike, + x: _FloatLike_co, precision: Optional[int] = ..., unique: bool = ..., fractional: bool = ..., diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index 3b147e1d732f..87c85e26bf42 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -23,9 +23,9 @@ from numpy.typing import ( ArrayLike, _ShapeLike, _Shape, - _IntLike, - _BoolLike, - _NumberLike, + _IntLike_co, + _BoolLike_co, + _NumberLike_co, ) if sys.version_info >= (3, 8): @@ -98,7 +98,7 @@ def choose( ) -> _ScalarIntOrBool: ... @overload def choose( - a: Union[_IntLike, _BoolLike], choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ... + a: Union[_IntLike_co, _BoolLike_co], choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ... ) -> Union[integer, bool_]: ... @overload def choose( @@ -250,7 +250,7 @@ def sum( dtype: DTypeLike = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _Number: ... @overload @@ -260,7 +260,7 @@ def sum( dtype: DTypeLike = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... @overload @@ -324,7 +324,7 @@ def amax( axis: Optional[_ShapeLike] = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _Number: ... @overload @@ -333,7 +333,7 @@ def amax( axis: None = ..., out: Optional[ndarray] = ..., keepdims: Literal[False] = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> number: ... @overload @@ -342,7 +342,7 @@ def amax( axis: Optional[_ShapeLike] = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... @overload @@ -351,7 +351,7 @@ def amin( axis: Optional[_ShapeLike] = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _Number: ... @overload @@ -360,7 +360,7 @@ def amin( axis: None = ..., out: Optional[ndarray] = ..., keepdims: Literal[False] = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> number: ... @overload @@ -369,7 +369,7 @@ def amin( axis: Optional[_ShapeLike] = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... @@ -387,7 +387,7 @@ def prod( dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _Number: ... @overload @@ -397,7 +397,7 @@ def prod( dtype: DTypeLike = ..., out: None = ..., keepdims: Literal[False] = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> number: ... @overload @@ -407,7 +407,7 @@ def prod( dtype: DTypeLike = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., - initial: _NumberLike = ..., + initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> Union[number, ndarray]: ... def cumprod( @@ -424,7 +424,7 @@ def around( ) -> _Number: ... @overload def around( - a: _NumberLike, decimals: int = ..., out: Optional[ndarray] = ... + a: _NumberLike_co, decimals: int = ..., out: Optional[ndarray] = ... ) -> number: ... @overload def around( diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi index 1490bed4aff3..d353543f6d47 100644 --- a/numpy/core/function_base.pyi +++ b/numpy/core/function_base.pyi @@ -2,7 +2,7 @@ import sys from typing import overload, Tuple, Union, Sequence, Any from numpy import ndarray, inexact -from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike +from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co if sys.version_info >= (3, 8): from typing import SupportsIndex, Literal @@ -15,7 +15,7 @@ else: # TODO: wait for support for recursive types _ArrayLikeNested = Sequence[Sequence[Any]] _ArrayLikeNumber = Union[ - _NumberLike, Sequence[_NumberLike], ndarray, _SupportsArray, _ArrayLikeNested + _NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested ] @overload def linspace( diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 86bba57be326..1f656ad668f7 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -283,16 +283,16 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _ObjectCodes, ) from ._scalars import ( - _CharLike, - _BoolLike, - _UIntLike, - _IntLike, - _FloatLike, - _ComplexLike, - _TD64Like, - _NumberLike, - _ScalarLike, - _VoidLike, + _CharLike_co, + _BoolLike_co, + _UIntLike_co, + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + _VoidLike_co, ) from ._shape import _Shape, _ShapeLike from ._dtype_like import _SupportsDType, _VoidDTypeLike, DTypeLike diff --git a/numpy/typing/_scalars.py b/numpy/typing/_scalars.py index 90b2eff7b464..516b996dc007 100644 --- a/numpy/typing/_scalars.py +++ b/numpy/typing/_scalars.py @@ -2,22 +2,22 @@ import numpy as np -# NOTE: `_StrLike` and `_BytesLike` are pointless, as `np.str_` and `np.bytes_` -# are already subclasses of their builtin counterpart +# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and +# `np.bytes_` are already subclasses of their builtin counterpart -_CharLike = Union[str, bytes] +_CharLike_co = Union[str, bytes] -# The 6 `Like` type-aliases below represent all scalars that can be +# The 6 `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike = Union[bool, np.bool_] -_UIntLike = Union[_BoolLike, np.unsignedinteger] -_IntLike = Union[_BoolLike, int, np.integer] -_FloatLike = Union[_IntLike, float, np.floating] -_ComplexLike = Union[_FloatLike, complex, np.complexfloating] -_TD64Like = Union[_IntLike, np.timedelta64] +_BoolLike_co = Union[bool, np.bool_] +_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger] +_IntLike_co = Union[_BoolLike_co, int, np.integer] +_FloatLike_co = Union[_IntLike_co, float, np.floating] +_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating] +_TD64Like_co = Union[_IntLike_co, np.timedelta64] -_NumberLike = Union[int, float, complex, np.number, np.bool_] -_ScalarLike = Union[ +_NumberLike_co = Union[int, float, complex, np.number, np.bool_] +_ScalarLike_co = Union[ int, float, complex, @@ -26,5 +26,5 @@ np.generic, ] -# `_VoidLike` is technically not a scalar, but it's close enough -_VoidLike = Union[Tuple[Any, ...], np.void] +# `_VoidLike_co` is technically not a scalar, but it's close enough +_VoidLike_co = Union[Tuple[Any, ...], np.void] From 5b94b02ea3ea5869f6fe85c6718b065323682c1c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 18 Jan 2021 16:03:53 +0100 Subject: [PATCH 0350/1270] MAINT: Renamed `_ArrayLike` to `_ArrayLike_co` --- numpy/typing/__init__.py | 22 +++++++++++----------- numpy/typing/_array_like.py | 24 ++++++++++++------------ 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 1f656ad668f7..97b87a682c99 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -301,17 +301,17 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _ArrayLike, _NestedSequence, _SupportsArray, - _ArrayLikeBool, - _ArrayLikeUInt, - _ArrayLikeInt, - _ArrayLikeFloat, - _ArrayLikeComplex, - _ArrayLikeTD64, - _ArrayLikeDT64, - _ArrayLikeObject, - _ArrayLikeVoid, - _ArrayLikeStr, - _ArrayLikeBytes, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _ArrayLikeVoid_co, + _ArrayLikeStr_co, + _ArrayLikeBytes_co, ) if __doc__ is not None: diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index d6473442c37b..35413393c33a 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -78,41 +78,41 @@ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... ], ] -# `ArrayLike`: array-like objects that can be coerced into `X` +# `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool = _ArrayLike[ +_ArrayLikeBool_co = _ArrayLike[ "dtype[bool_]", bool, ] -_ArrayLikeUInt = _ArrayLike[ +_ArrayLikeUInt_co = _ArrayLike[ "dtype[Union[bool_, unsignedinteger[Any]]]", bool, ] -_ArrayLikeInt = _ArrayLike[ +_ArrayLikeInt_co = _ArrayLike[ "dtype[Union[bool_, integer[Any]]]", Union[bool, int], ] -_ArrayLikeFloat = _ArrayLike[ +_ArrayLikeFloat_co = _ArrayLike[ "dtype[Union[bool_, integer[Any], floating[Any]]]", Union[bool, int, float], ] -_ArrayLikeComplex = _ArrayLike[ +_ArrayLikeComplex_co = _ArrayLike[ "dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]", Union[bool, int, float, complex], ] -_ArrayLikeTD64 = _ArrayLike[ +_ArrayLikeTD64_co = _ArrayLike[ "dtype[Union[bool_, integer[Any], timedelta64]]", Union[bool, int], ] -_ArrayLikeDT64 = _NestedSequence[_SupportsArray["dtype[datetime64]"]] -_ArrayLikeObject = _NestedSequence[_SupportsArray["dtype[object_]"]] +_ArrayLikeDT64_co = _NestedSequence[_SupportsArray["dtype[datetime64]"]] +_ArrayLikeObject_co = _NestedSequence[_SupportsArray["dtype[object_]"]] -_ArrayLikeVoid = _NestedSequence[_SupportsArray["dtype[void]"]] -_ArrayLikeStr = _ArrayLike[ +_ArrayLikeVoid_co = _NestedSequence[_SupportsArray["dtype[void]"]] +_ArrayLikeStr_co = _ArrayLike[ "dtype[str_]", str, ] -_ArrayLikeBytes = _ArrayLike[ +_ArrayLikeBytes_co = _ArrayLike[ "dtype[bytes_]", bytes, ] From 7081c2eb2522ac22c4e26a5a6e8a35c4cf498d88 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 18 Jan 2021 16:05:10 +0100 Subject: [PATCH 0351/1270] STY: Cleaned up the `numpy.typing` imports --- numpy/__init__.pyi | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 39a9f55b9171..afb85e7b462b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -7,10 +7,19 @@ from contextlib import ContextDecorator from numpy.core._internal import _ctypes from numpy.typing import ( + # Arrays ArrayLike, + + # DTypes DTypeLike, + _SupportsDType, + _VoidDTypeLike, + + # Shapes _Shape, _ShapeLike, + + # Scalars _CharLike_co, _BoolLike_co, _IntLike_co, @@ -18,8 +27,8 @@ from numpy.typing import ( _ComplexLike_co, _FloatLike_co, _NumberLike_co, - _SupportsDType, - _VoidDTypeLike, + + # `number` precision NBitBase, _256Bit, _128Bit, @@ -39,8 +48,8 @@ from numpy.typing import ( _NBitSingle, _NBitDouble, _NBitLongDouble, -) -from numpy.typing import ( + + # Character codes _BoolCodes, _UInt8Codes, _UInt16Codes, From 2951e733d9ffa03276d6859721b57209234bea12 Mon Sep 17 00:00:00 2001 From: Illviljan <14371165+Illviljan@users.noreply.github.com> Date: Mon, 18 Jan 2021 16:48:38 +0100 Subject: [PATCH 0352/1270] Add triu_indices and tril_indices --- benchmarks/benchmarks/bench_core.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 1c028542db04..f302f262d840 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -93,6 +93,12 @@ def time_triu_l10x10(self): def time_tril_l10x10(self): np.tril(self.l10x10) + def time_triu_indices_500(self): + np.triu_indices(500) + + def time_tril_indices_500(self): + np.tril_indices(500) + class Temporaries(Benchmark): def setup(self): From 2a5647a8e82f5a604f9101a02e82bf287cba1bc8 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 18 Jan 2021 16:11:05 +0100 Subject: [PATCH 0353/1270] MAINT: Simplify a union `_BoolLike_co` is already a subtype of `_IntLike_co`, no nead the use an explicit union here --- numpy/__init__.pyi | 16 ++++++++-------- numpy/core/fromnumeric.pyi | 3 +-- numpy/typing/_callable.py | 24 ++++++++++++------------ 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index afb85e7b462b..be87a166e03f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -25,7 +25,7 @@ from numpy.typing import ( _IntLike_co, _FloatLike_co, _ComplexLike_co, - _FloatLike_co, + _TD64Like_co, _NumberLike_co, # `number` precision @@ -1653,12 +1653,12 @@ class datetime64(generic): __value: int, __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] ) -> None: ... - def __add__(self, other: _FloatLike_co) -> datetime64: ... - def __radd__(self, other: _FloatLike_co) -> datetime64: ... + def __add__(self, other: _TD64Like_co) -> datetime64: ... + def __radd__(self, other: _TD64Like_co) -> datetime64: ... @overload def __sub__(self, other: datetime64) -> timedelta64: ... @overload - def __sub__(self, other: _FloatLike_co) -> datetime64: ... + def __sub__(self, other: _TD64Like_co) -> datetime64: ... def __rsub__(self, other: datetime64) -> timedelta64: ... __lt__: _ComparisonOp[datetime64] __le__: _ComparisonOp[datetime64] @@ -1748,10 +1748,10 @@ class timedelta64(generic): def __neg__(self: _ArraySelf) -> _ArraySelf: ... def __pos__(self: _ArraySelf) -> _ArraySelf: ... def __abs__(self: _ArraySelf) -> _ArraySelf: ... - def __add__(self, other: _FloatLike_co) -> timedelta64: ... - def __radd__(self, other: _FloatLike_co) -> timedelta64: ... - def __sub__(self, other: _FloatLike_co) -> timedelta64: ... - def __rsub__(self, other: _FloatLike_co) -> timedelta64: ... + def __add__(self, other: _TD64Like_co) -> timedelta64: ... + def __radd__(self, other: _TD64Like_co) -> timedelta64: ... + def __sub__(self, other: _TD64Like_co) -> timedelta64: ... + def __rsub__(self, other: _TD64Like_co) -> timedelta64: ... def __mul__(self, other: _FloatLike_co) -> timedelta64: ... def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... __truediv__: _TD64Div[float64] diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index 87c85e26bf42..fc7f28a59e88 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -24,7 +24,6 @@ from numpy.typing import ( _ShapeLike, _Shape, _IntLike_co, - _BoolLike_co, _NumberLike_co, ) @@ -98,7 +97,7 @@ def choose( ) -> _ScalarIntOrBool: ... @overload def choose( - a: Union[_IntLike_co, _BoolLike_co], choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ... + a: _IntLike_co, choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ... ) -> Union[integer, bool_]: ... @overload def choose( diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 8f464cc755ce..693b6dfed0a8 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -37,11 +37,11 @@ ) from ._nbit import _NBitInt from ._scalars import ( - _BoolLike, - _IntLike, - _FloatLike, - _ComplexLike, - _NumberLike, + _BoolLike_co, + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _NumberLike_co, ) from . import NBitBase from ._array_like import ArrayLike @@ -72,7 +72,7 @@ class _BoolOp(Protocol[_GenericType_co]): @overload - def __call__(self, __other: _BoolLike) -> _GenericType_co: ... + def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ... @overload # platform dependent def __call__(self, __other: int) -> int_: ... @overload @@ -84,7 +84,7 @@ def __call__(self, __other: _NumberType) -> _NumberType: ... class _BoolBitOp(Protocol[_GenericType_co]): @overload - def __call__(self, __other: _BoolLike) -> _GenericType_co: ... + def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ... @overload # platform dependent def __call__(self, __other: int) -> int_: ... @overload @@ -105,7 +105,7 @@ def __call__(self, __other: _NumberType) -> _NumberType: ... class _BoolTrueDiv(Protocol): @overload - def __call__(self, __other: Union[float, _IntLike]) -> float64: ... + def __call__(self, __other: Union[float, _IntLike_co]) -> float64: ... @overload def __call__(self, __other: complex) -> complex128: ... @overload @@ -113,7 +113,7 @@ def __call__(self, __other: _NumberType) -> _NumberType: ... class _BoolMod(Protocol): @overload - def __call__(self, __other: _BoolLike) -> int8: ... + def __call__(self, __other: _BoolLike_co) -> int8: ... @overload # platform dependent def __call__(self, __other: int) -> int_: ... @overload @@ -125,7 +125,7 @@ def __call__(self, __other: _FloatType) -> _FloatType: ... class _BoolDivMod(Protocol): @overload - def __call__(self, __other: _BoolLike) -> _2Tuple[int8]: ... + def __call__(self, __other: _BoolLike_co) -> _2Tuple[int8]: ... @overload # platform dependent def __call__(self, __other: int) -> _2Tuple[int_]: ... @overload @@ -139,7 +139,7 @@ class _TD64Div(Protocol[_NumberType_co]): @overload def __call__(self, __other: timedelta64) -> _NumberType_co: ... @overload - def __call__(self, __other: _FloatLike) -> timedelta64: ... + def __call__(self, __other: _FloatLike_co) -> timedelta64: ... class _IntTrueDiv(Protocol[_NBit_co]): @overload @@ -314,7 +314,7 @@ def __call__( ) -> complexfloating[Union[_NBit_co, _NBit], Union[_NBit_co, _NBit]]: ... class _NumberOp(Protocol): - def __call__(self, __other: _NumberLike) -> number: ... + def __call__(self, __other: _NumberLike_co) -> number: ... class _ComparisonOp(Protocol[_T]): @overload From 1ef11c93a52e18ee2c45d4173ec8e685ceb53e9a Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Mon, 18 Jan 2021 20:23:18 +0200 Subject: [PATCH 0354/1270] Add test for gh17797. --- numpy/f2py/auxfuncs.py | 5 +++++ numpy/f2py/crackfortran.py | 11 ++++++++--- numpy/f2py/rules.py | 13 ++++++++++--- numpy/f2py/tests/test_callback.py | 24 ++++++++++++++++++++++++ 4 files changed, 47 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 80b150655eb0..5250fea84031 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -257,6 +257,7 @@ def ismodule(rout): def isfunction(rout): return 'block' in rout and 'function' == rout['block'] + def isfunction_wrap(rout): if isintent_c(rout): return 0 @@ -284,6 +285,10 @@ def hasassumedshape(rout): return False +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + def isroutine(rout): return isfunction(rout) or issubroutine(rout) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index d278457969a7..2aa8dc4204a2 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -3113,7 +3113,7 @@ def crack2fortrangen(block, tab='\n', as_interface=False): result = ' result (%s)' % block['result'] if block['result'] not in argsl: argsl.append(block['result']) - body = crack2fortrangen(block['body'], tab + tabchar) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) vars = vars2fortran( block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) mess = '' @@ -3231,8 +3231,13 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): show(vars) outmess('vars2fortran: No definition for argument "%s".\n' % a) continue - if a == block['name'] and not block['block'] == 'function': - continue + if a == block['name']: + if block['block'] == 'function': + if block.get('result'): + # skip declaring function if its result is already declared + continue + else: + continue if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: if a in args: diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 12ed1f1ca039..05fba9c4fad2 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -73,7 +73,7 @@ issubroutine, issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char, isunsigned_chararray, isunsigned_long_long, isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, - l_and, l_not, l_or, outmess, replace, stripcomma, + l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper ) from . import capi_maps @@ -1184,9 +1184,12 @@ def buildmodule(m, um): nb1['args'] = a nb_list.append(nb1) for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) api, wrap = buildapi(nb) if wrap: - if ismoduleroutine(nb) or issubroutine_wrap(nb): + if isf90: funcwrappers2.append(wrap) else: funcwrappers.append(wrap) @@ -1288,7 +1291,11 @@ def buildmodule(m, um): 'C It contains Fortran 77 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): - if l and l[0] == ' ': + i = l.find('!') + if i >= 0 and i < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': while len(l) >= 66: lines.append(l[:66] + '\n &') l = l[66:] diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 81650a8191e8..acf8c2392cb6 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -211,3 +211,27 @@ class TestF77CallbackPythonTLS(TestF77Callback): compiler-provided """ options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + + suffix = '.f90' + + code = """ +function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) +end function gh17797 + """ + + def test_gh17797(self): + + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 From 624e9b42381b81442a59f51f8c0d57c787b3366e Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Mon, 18 Jan 2021 21:30:42 +0200 Subject: [PATCH 0355/1270] retrigger checks From 065f50706192d62fc00ff39660ea051539845f12 Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Tue, 19 Jan 2021 11:00:28 +0200 Subject: [PATCH 0356/1270] Apply reviewers comments. --- numpy/f2py/crackfortran.py | 10 +++++----- numpy/f2py/tests/test_callback.py | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 2aa8dc4204a2..1149633c0dfa 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -3232,11 +3232,11 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): outmess('vars2fortran: No definition for argument "%s".\n' % a) continue if a == block['name']: - if block['block'] == 'function': - if block.get('result'): - # skip declaring function if its result is already declared - continue - else: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction continue if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index acf8c2392cb6..6a59b6398a08 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -218,13 +218,13 @@ class TestF90Callback(util.F2PyTest): suffix = '.f90' code = """ -function gh17797(f, y) result(r) - external f - integer(8) :: r, f - integer(8), dimension(:) :: y - r = f(0) - r = r + sum(y) -end function gh17797 + function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) + end function gh17797 """ def test_gh17797(self): From 15cf37d5394e69fc1847b1efa8d5253de4890cbe Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Tue, 19 Jan 2021 16:19:11 +0600 Subject: [PATCH 0357/1270] Merged count_nonzero_int16/int32/int64 into count_nonzero_int and added benchmarks --- benchmarks/benchmarks/bench_core.py | 2 +- numpy/core/src/multiarray/item_selection.c | 206 +++++++-------------- 2 files changed, 67 insertions(+), 141 deletions(-) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 1c028542db04..279c6f475920 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -136,7 +136,7 @@ class CountNonzero(Benchmark): params = [ [1, 2, 3], [100, 10000, 1000000], - [bool, int, str, object] + [bool, np.int8, np.int16, np.int32, np.int64, str, object] ] def setup(self, numaxes, size, dtype): diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 2d1d6db83b04..01438e27d63a 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2206,9 +2206,6 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) return unrollx - zero_count; } -#define MAX(x, y) (((x) > (y)) ? (x) : (y)) -#define MIN(x, y) (((x) < (y)) ? (x) : (y)) - static NPY_INLINE NPY_GCC_OPT_3 npy_uintp count_nonzero_int16_simd(npy_int16 *d, npy_uintp unrollx) @@ -2225,7 +2222,7 @@ count_nonzero_int16_simd(npy_int16 *d, npy_uintp unrollx) while (dtype_num == NPY_INT16 || dtype->type_num == NPY_UINT16) { - return count_nonzero_int16(PyArray_NDIM(self), (npy_int16 *) PyArray_DATA(self), - PyArray_DIMS(self), PyArray_STRIDES(self)); - } +// #if NPY_SIMD +// if (dtype->type_num == NPY_INT16 || dtype->type_num == NPY_UINT16) { +// return count_nonzero_int16(PyArray_NDIM(self), (npy_int16 *) PyArray_DATA(self), +// PyArray_DIMS(self), PyArray_STRIDES(self)); +// } - if (dtype->type_num == NPY_INT32 || dtype->type_num == NPY_UINT32) { - return count_nonzero_int32(PyArray_NDIM(self), (npy_int32 *) PyArray_DATA(self), - PyArray_DIMS(self), PyArray_STRIDES(self)); - } +// if (dtype->type_num == NPY_INT32 || dtype->type_num == NPY_UINT32) { +// return count_nonzero_int32(PyArray_NDIM(self), (npy_int32 *) PyArray_DATA(self), +// PyArray_DIMS(self), PyArray_STRIDES(self)); +// } - if (dtype->type_num == NPY_INT64 || dtype->type_num == NPY_UINT64) { - return count_nonzero_int64(PyArray_NDIM(self), (npy_int64 *) PyArray_DATA(self), - PyArray_DIMS(self), PyArray_STRIDES(self)); - } +// if (dtype->type_num == NPY_INT64 || dtype->type_num == NPY_UINT64) { +// return count_nonzero_int64(PyArray_NDIM(self), (npy_int64 *) PyArray_DATA(self), +// PyArray_DIMS(self), PyArray_STRIDES(self)); +// } -#endif +// #endif + + if (dtype->type_num >= NPY_INT16 && dtype->type_num <= NPY_UINT64) { + return count_nonzero_int(PyArray_NDIM(self), (void *) PyArray_DATA(self), + PyArray_DIMS(self), PyArray_STRIDES(self), dtype->type_num); + } if (dtype->type_num == NPY_BOOL || dtype->type_num == NPY_INT8 || dtype->type_num == NPY_UINT8) { return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self), From 2b41cbf3e46e6d16e84f0fa800500346789dba6d Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Tue, 19 Jan 2021 16:22:54 +0600 Subject: [PATCH 0358/1270] Removed commented out code from PyArray_CountNonzero --- numpy/core/src/multiarray/item_selection.c | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 01438e27d63a..373286d2305f 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2467,25 +2467,6 @@ PyArray_CountNonzero(PyArrayObject *self) /* Special low-overhead version specific to the boolean type */ dtype = PyArray_DESCR(self); - -// #if NPY_SIMD -// if (dtype->type_num == NPY_INT16 || dtype->type_num == NPY_UINT16) { -// return count_nonzero_int16(PyArray_NDIM(self), (npy_int16 *) PyArray_DATA(self), -// PyArray_DIMS(self), PyArray_STRIDES(self)); -// } - -// if (dtype->type_num == NPY_INT32 || dtype->type_num == NPY_UINT32) { -// return count_nonzero_int32(PyArray_NDIM(self), (npy_int32 *) PyArray_DATA(self), -// PyArray_DIMS(self), PyArray_STRIDES(self)); -// } - -// if (dtype->type_num == NPY_INT64 || dtype->type_num == NPY_UINT64) { -// return count_nonzero_int64(PyArray_NDIM(self), (npy_int64 *) PyArray_DATA(self), -// PyArray_DIMS(self), PyArray_STRIDES(self)); -// } - -// #endif - if (dtype->type_num >= NPY_INT16 && dtype->type_num <= NPY_UINT64) { return count_nonzero_int(PyArray_NDIM(self), (void *) PyArray_DATA(self), PyArray_DIMS(self), PyArray_STRIDES(self), dtype->type_num); From 5db1d5fb64ff35429205aec4b6927a4b2c6b552b Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Tue, 19 Jan 2021 13:16:25 +0200 Subject: [PATCH 0359/1270] Apply reviewer comments. --- numpy/f2py/rules.py | 6 ++---- numpy/f2py/tests/test_callback.py | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 05fba9c4fad2..4e1cf0c7daa0 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1291,8 +1291,7 @@ def buildmodule(m, um): 'C It contains Fortran 77 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): - i = l.find('!') - if i >= 0 and i < 66: + if 0 <= l.find('!') < 66: # don't split comment lines lines.append(l + '\n') elif l and l[0] == ' ': @@ -1317,8 +1316,7 @@ def buildmodule(m, um): '! It contains Fortran 90 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): - i = l.find('!') - if i >= 0 and i < 72: + if 0 <= l.find('!') < 72: # don't split comment lines lines.append(l + '\n') elif len(l) > 72 and l[0] == ' ': diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 6a59b6398a08..f847dd49fd2a 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -217,7 +217,7 @@ class TestF90Callback(util.F2PyTest): suffix = '.f90' - code = """ + code = textwrap.dedent(""" function gh17797(f, y) result(r) external f integer(8) :: r, f @@ -225,7 +225,7 @@ class TestF90Callback(util.F2PyTest): r = f(0) r = r + sum(y) end function gh17797 - """ + """) def test_gh17797(self): From e4402bd8558db43b22fc612216fd7935d83d1297 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 19 Jan 2021 20:27:27 +0800 Subject: [PATCH 0360/1270] Optimize the sub function two-operands by using SIMD. --- .../core/src/multiarray/einsum_sumprod.c.src | 320 ++++-------------- 1 file changed, 75 insertions(+), 245 deletions(-) diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index d1b76de4e437..333b8e188355 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -20,28 +20,6 @@ #include "simd/simd.h" #include "common.h" -#ifdef NPY_HAVE_SSE_INTRINSICS -#define EINSUM_USE_SSE1 1 -#else -#define EINSUM_USE_SSE1 0 -#endif - -#ifdef NPY_HAVE_SSE2_INTRINSICS -#define EINSUM_USE_SSE2 1 -#else -#define EINSUM_USE_SSE2 0 -#endif - -#if EINSUM_USE_SSE1 -#include -#endif - -#if EINSUM_USE_SSE2 -#include -#endif - -#define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0) - // ARM/Neon don't have instructions for aligned memory access #ifdef NPY_HAVE_NEON #define EINSUM_IS_ALIGNED(x) 0 @@ -311,6 +289,77 @@ finish_after_unrolled_loop: #elif @nop@ == 2 && !@complex@ +// calculate the multiply and add operation such as dataout = data*scalar+dataout +static NPY_GCC_OPT_3 void +@name@_sum_of_products_muladd(@type@ *data, @type@ *data_out, @temptype@ scalar, npy_intp count) +{ +#if @NPYV_CHK@ // NPYV check for @type@ + /* Use aligned instructions if possible */ + const int is_aligned = EINSUM_IS_ALIGNED(data) && EINSUM_IS_ALIGNED(data_out); + const int vstep = npyv_nlanes_@sfx@; + const npyv_@sfx@ v_scalar = npyv_setall_@sfx@(scalar); + /**begin repeat2 + * #cond = if(is_aligned), else# + * #ld = loada, load# + * #st = storea, store# + */ + @cond@ { + const npy_intp vstepx4 = vstep * 4; + for (; count >= vstepx4; count -= vstepx4, data += vstepx4, data_out += vstepx4) { + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data + vstep * @i@); + npyv_@sfx@ c@i@ = npyv_@ld@_@sfx@(data_out + vstep * @i@); + /**end repeat3**/ + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@sfx@ abc@i@ = npyv_muladd_@sfx@(v_scalar, b@i@, c@i@); + /**end repeat3**/ + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@st@_@sfx@(data_out + vstep * @i@, abc@i@); + /**end repeat3**/ + } + } + /**end repeat2**/ + for (; count > 0; count -= vstep, data += vstep, data_out += vstep) { + npyv_@sfx@ a = npyv_load_tillz_@sfx@(data, count); + npyv_@sfx@ b = npyv_load_tillz_@sfx@(data_out, count); + npyv_store_till_@sfx@(data_out, count, npyv_muladd_@sfx@(a, v_scalar, b)); + } + npyv_cleanup(); +#else +#ifndef NPY_DISABLE_OPTIMIZATION + for (; count >= 4; count -= 4, data += 4, data_out += 4) { + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + const @type@ b@i@ = @from@(data[@i@]); + const @type@ c@i@ = @from@(data_out[@i@]); + /**end repeat2**/ + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + const @type@ abc@i@ = scalar * b@i@ + c@i@; + /**end repeat2**/ + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + data_out[@i@] = @to@(abc@i@); + /**end repeat2**/ + } +#endif // !NPY_DISABLE_OPTIMIZATION + for (; count > 0; --count, ++data, ++data_out) { + const @type@ b = @from@(*data); + const @type@ c = @from@(*data_out); + *data_out = @to@(scalar * b + c); + } +#endif // NPYV check for @type@ +} + static void @name@_sum_of_products_contig_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) @@ -403,242 +452,23 @@ static void @type@ *data1 = (@type@ *)dataptr[1]; @type@ *data_out = (@type@ *)dataptr[2]; -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b, value0_sse; -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, b, value0_sse; -#endif - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outcontig_two (%d)\n", (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(value0 * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - value0_sse = _mm_set_ps1(value0); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(value0_sse, _mm_load_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } - else { - return; - } - } -#elif EINSUM_USE_SSE2 && @float64@ - value0_sse = _mm_set1_pd(value0); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(value0_sse, _mm_load_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_load_pd(data_out+@i@)); - _mm_store_pd(data_out+@i@, b); -/**end repeat2**/ - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } - else { - return; - } - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(value0_sse, _mm_loadu_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(value0_sse, _mm_loadu_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@)); - _mm_storeu_pd(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(value0 * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } + @name@_sum_of_products_muladd(data1, data_out, value0, count); + } static void @name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) { - @type@ *data0 = (@type@ *)dataptr[0]; @temptype@ value1 = @from@(*(@type@ *)dataptr[1]); + @type@ *data0 = (@type@ *)dataptr[0]; @type@ *data_out = (@type@ *)dataptr[2]; -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b, value1_sse; -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, b, value1_sse; -#endif - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n", (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(@from@(data0[@i@])* - value1 + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - value1_sse = _mm_set_ps1(value1); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_load_ps(data0+@i@), value1_sse); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#elif EINSUM_USE_SSE2 && @float64@ - value1_sse = _mm_set1_pd(value1); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(_mm_load_pd(data0+@i@), value1_sse); - b = _mm_add_pd(a, _mm_load_pd(data_out+@i@)); - _mm_store_pd(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), value1_sse); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), value1_sse); - b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@)); - _mm_storeu_pd(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(@from@(data0[@i@])* - value1 + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + @name@_sum_of_products_muladd(data0, data_out, value1, count); } static NPY_GCC_OPT_3 void From 8b8bbdfa5b5331811841176f64470521a890a7c0 Mon Sep 17 00:00:00 2001 From: pmav99 Date: Tue, 19 Jan 2021 16:31:04 +0200 Subject: [PATCH 0361/1270] STY: Use explicit reexports for numpy.typing objects `mypy --strict` is disabling `implicit_reexport`. Consequently, when we try to import `ArrayLike` and `DTypeLike` from `numpy.typing`, mypy throws an error. With this commit we add explicit "reexports" for these two objects. Fixes #18190 --- numpy/typing/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 86bba57be326..47dbe4fbefed 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -295,9 +295,9 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _VoidLike, ) from ._shape import _Shape, _ShapeLike -from ._dtype_like import _SupportsDType, _VoidDTypeLike, DTypeLike +from ._dtype_like import _SupportsDType, _VoidDTypeLike, DTypeLike as DTypeLike from ._array_like import ( - ArrayLike, + ArrayLike as ArrayLike, _ArrayLike, _NestedSequence, _SupportsArray, From 00067212c85e4e74a81d4bc3344b51b9364a08e0 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 15 Jan 2021 18:21:57 +0100 Subject: [PATCH 0362/1270] MAINT: Changed the `NBitBase` variancy in `number` from co- to invariant --- numpy/__init__.pyi | 182 +++++++++++++++++++------------------- numpy/typing/_callable.py | 126 +++++++++++++------------- 2 files changed, 154 insertions(+), 154 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index be87a166e03f..dbf8077831e1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1527,8 +1527,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. _ScalarType = TypeVar("_ScalarType", bound=generic) -_NBit_co = TypeVar("_NBit_co", covariant=True, bound=NBitBase) -_NBit_co2 = TypeVar("_NBit_co2", covariant=True, bound=NBitBase) +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) class generic(_ArrayOrScalarCommon): @abstractmethod @@ -1562,7 +1562,7 @@ class generic(_ArrayOrScalarCommon): @property def dtype(self: _ScalarType) -> dtype[_ScalarType]: ... -class number(generic, Generic[_NBit_co]): # type: ignore +class number(generic, Generic[_NBit1]): # type: ignore @property def real(self: _ArraySelf) -> _ArraySelf: ... @property @@ -1675,12 +1675,12 @@ else: _FloatValue = Union[None, _CharLike_co, SupportsFloat] _ComplexValue = Union[None, _CharLike_co, SupportsFloat, SupportsComplex] -class integer(number[_NBit_co]): # type: ignore +class integer(number[_NBit1]): # type: ignore # NOTE: `__index__` is technically defined in the bottom-most # sub-classes (`int64`, `uint32`, etc) def __index__(self) -> int: ... - __truediv__: _IntTrueDiv[_NBit_co] - __rtruediv__: _IntTrueDiv[_NBit_co] + __truediv__: _IntTrueDiv[_NBit1] + __rtruediv__: _IntTrueDiv[_NBit1] def __mod__(self, value: _IntLike_co) -> integer: ... def __rmod__(self, value: _IntLike_co) -> integer: ... def __invert__(self: _IntType) -> _IntType: ... @@ -1696,32 +1696,32 @@ class integer(number[_NBit_co]): # type: ignore def __xor__(self, other: _IntLike_co) -> integer: ... def __rxor__(self, other: _IntLike_co) -> integer: ... -class signedinteger(integer[_NBit_co]): +class signedinteger(integer[_NBit1]): def __init__(self, __value: _IntValue = ...) -> None: ... - __add__: _SignedIntOp[_NBit_co] - __radd__: _SignedIntOp[_NBit_co] - __sub__: _SignedIntOp[_NBit_co] - __rsub__: _SignedIntOp[_NBit_co] - __mul__: _SignedIntOp[_NBit_co] - __rmul__: _SignedIntOp[_NBit_co] - __floordiv__: _SignedIntOp[_NBit_co] - __rfloordiv__: _SignedIntOp[_NBit_co] - __pow__: _SignedIntOp[_NBit_co] - __rpow__: _SignedIntOp[_NBit_co] - __lshift__: _SignedIntBitOp[_NBit_co] - __rlshift__: _SignedIntBitOp[_NBit_co] - __rshift__: _SignedIntBitOp[_NBit_co] - __rrshift__: _SignedIntBitOp[_NBit_co] - __and__: _SignedIntBitOp[_NBit_co] - __rand__: _SignedIntBitOp[_NBit_co] - __xor__: _SignedIntBitOp[_NBit_co] - __rxor__: _SignedIntBitOp[_NBit_co] - __or__: _SignedIntBitOp[_NBit_co] - __ror__: _SignedIntBitOp[_NBit_co] - __mod__: _SignedIntMod[_NBit_co] - __rmod__: _SignedIntMod[_NBit_co] - __divmod__: _SignedIntDivMod[_NBit_co] - __rdivmod__: _SignedIntDivMod[_NBit_co] + __add__: _SignedIntOp[_NBit1] + __radd__: _SignedIntOp[_NBit1] + __sub__: _SignedIntOp[_NBit1] + __rsub__: _SignedIntOp[_NBit1] + __mul__: _SignedIntOp[_NBit1] + __rmul__: _SignedIntOp[_NBit1] + __floordiv__: _SignedIntOp[_NBit1] + __rfloordiv__: _SignedIntOp[_NBit1] + __pow__: _SignedIntOp[_NBit1] + __rpow__: _SignedIntOp[_NBit1] + __lshift__: _SignedIntBitOp[_NBit1] + __rlshift__: _SignedIntBitOp[_NBit1] + __rshift__: _SignedIntBitOp[_NBit1] + __rrshift__: _SignedIntBitOp[_NBit1] + __and__: _SignedIntBitOp[_NBit1] + __rand__: _SignedIntBitOp[_NBit1] + __xor__: _SignedIntBitOp[_NBit1] + __rxor__: _SignedIntBitOp[_NBit1] + __or__: _SignedIntBitOp[_NBit1] + __ror__: _SignedIntBitOp[_NBit1] + __mod__: _SignedIntMod[_NBit1] + __rmod__: _SignedIntMod[_NBit1] + __divmod__: _SignedIntDivMod[_NBit1] + __rdivmod__: _SignedIntDivMod[_NBit1] int8 = signedinteger[_8Bit] int16 = signedinteger[_16Bit] @@ -1767,33 +1767,33 @@ class timedelta64(generic): __gt__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] __ge__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] -class unsignedinteger(integer[_NBit_co]): +class unsignedinteger(integer[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` def __init__(self, __value: _IntValue = ...) -> None: ... - __add__: _UnsignedIntOp[_NBit_co] - __radd__: _UnsignedIntOp[_NBit_co] - __sub__: _UnsignedIntOp[_NBit_co] - __rsub__: _UnsignedIntOp[_NBit_co] - __mul__: _UnsignedIntOp[_NBit_co] - __rmul__: _UnsignedIntOp[_NBit_co] - __floordiv__: _UnsignedIntOp[_NBit_co] - __rfloordiv__: _UnsignedIntOp[_NBit_co] - __pow__: _UnsignedIntOp[_NBit_co] - __rpow__: _UnsignedIntOp[_NBit_co] - __lshift__: _UnsignedIntBitOp[_NBit_co] - __rlshift__: _UnsignedIntBitOp[_NBit_co] - __rshift__: _UnsignedIntBitOp[_NBit_co] - __rrshift__: _UnsignedIntBitOp[_NBit_co] - __and__: _UnsignedIntBitOp[_NBit_co] - __rand__: _UnsignedIntBitOp[_NBit_co] - __xor__: _UnsignedIntBitOp[_NBit_co] - __rxor__: _UnsignedIntBitOp[_NBit_co] - __or__: _UnsignedIntBitOp[_NBit_co] - __ror__: _UnsignedIntBitOp[_NBit_co] - __mod__: _UnsignedIntMod[_NBit_co] - __rmod__: _UnsignedIntMod[_NBit_co] - __divmod__: _UnsignedIntDivMod[_NBit_co] - __rdivmod__: _UnsignedIntDivMod[_NBit_co] + __add__: _UnsignedIntOp[_NBit1] + __radd__: _UnsignedIntOp[_NBit1] + __sub__: _UnsignedIntOp[_NBit1] + __rsub__: _UnsignedIntOp[_NBit1] + __mul__: _UnsignedIntOp[_NBit1] + __rmul__: _UnsignedIntOp[_NBit1] + __floordiv__: _UnsignedIntOp[_NBit1] + __rfloordiv__: _UnsignedIntOp[_NBit1] + __pow__: _UnsignedIntOp[_NBit1] + __rpow__: _UnsignedIntOp[_NBit1] + __lshift__: _UnsignedIntBitOp[_NBit1] + __rlshift__: _UnsignedIntBitOp[_NBit1] + __rshift__: _UnsignedIntBitOp[_NBit1] + __rrshift__: _UnsignedIntBitOp[_NBit1] + __and__: _UnsignedIntBitOp[_NBit1] + __rand__: _UnsignedIntBitOp[_NBit1] + __xor__: _UnsignedIntBitOp[_NBit1] + __rxor__: _UnsignedIntBitOp[_NBit1] + __or__: _UnsignedIntBitOp[_NBit1] + __ror__: _UnsignedIntBitOp[_NBit1] + __mod__: _UnsignedIntMod[_NBit1] + __rmod__: _UnsignedIntMod[_NBit1] + __divmod__: _UnsignedIntDivMod[_NBit1] + __rdivmod__: _UnsignedIntDivMod[_NBit1] uint8 = unsignedinteger[_8Bit] uint16 = unsignedinteger[_16Bit] @@ -1808,29 +1808,29 @@ uint0 = unsignedinteger[_NBitIntP] uint = unsignedinteger[_NBitInt] ulonglong = unsignedinteger[_NBitLongLong] -class inexact(number[_NBit_co]): ... # type: ignore +class inexact(number[_NBit1]): ... # type: ignore _IntType = TypeVar("_IntType", bound=integer) _FloatType = TypeVar('_FloatType', bound=floating) -class floating(inexact[_NBit_co]): +class floating(inexact[_NBit1]): def __init__(self, __value: _FloatValue = ...) -> None: ... - __add__: _FloatOp[_NBit_co] - __radd__: _FloatOp[_NBit_co] - __sub__: _FloatOp[_NBit_co] - __rsub__: _FloatOp[_NBit_co] - __mul__: _FloatOp[_NBit_co] - __rmul__: _FloatOp[_NBit_co] - __truediv__: _FloatOp[_NBit_co] - __rtruediv__: _FloatOp[_NBit_co] - __floordiv__: _FloatOp[_NBit_co] - __rfloordiv__: _FloatOp[_NBit_co] - __pow__: _FloatOp[_NBit_co] - __rpow__: _FloatOp[_NBit_co] - __mod__: _FloatMod[_NBit_co] - __rmod__: _FloatMod[_NBit_co] - __divmod__: _FloatDivMod[_NBit_co] - __rdivmod__: _FloatDivMod[_NBit_co] + __add__: _FloatOp[_NBit1] + __radd__: _FloatOp[_NBit1] + __sub__: _FloatOp[_NBit1] + __rsub__: _FloatOp[_NBit1] + __mul__: _FloatOp[_NBit1] + __rmul__: _FloatOp[_NBit1] + __truediv__: _FloatOp[_NBit1] + __rtruediv__: _FloatOp[_NBit1] + __floordiv__: _FloatOp[_NBit1] + __rfloordiv__: _FloatOp[_NBit1] + __pow__: _FloatOp[_NBit1] + __rpow__: _FloatOp[_NBit1] + __mod__: _FloatMod[_NBit1] + __rmod__: _FloatMod[_NBit1] + __divmod__: _FloatDivMod[_NBit1] + __rdivmod__: _FloatDivMod[_NBit1] float16 = floating[_16Bit] float32 = floating[_32Bit] @@ -1847,25 +1847,25 @@ longfloat = floating[_NBitLongDouble] # It is used to clarify why `complex128`s precision is `_64Bit`, the latter # describing the two 64 bit floats representing its real and imaginary component -class complexfloating(inexact[_NBit_co], Generic[_NBit_co, _NBit_co2]): +class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): def __init__(self, __value: _ComplexValue = ...) -> None: ... @property - def real(self) -> floating[_NBit_co]: ... # type: ignore[override] - @property - def imag(self) -> floating[_NBit_co2]: ... # type: ignore[override] - def __abs__(self) -> floating[_NBit_co]: ... # type: ignore[override] - __add__: _ComplexOp[_NBit_co] - __radd__: _ComplexOp[_NBit_co] - __sub__: _ComplexOp[_NBit_co] - __rsub__: _ComplexOp[_NBit_co] - __mul__: _ComplexOp[_NBit_co] - __rmul__: _ComplexOp[_NBit_co] - __truediv__: _ComplexOp[_NBit_co] - __rtruediv__: _ComplexOp[_NBit_co] - __floordiv__: _ComplexOp[_NBit_co] - __rfloordiv__: _ComplexOp[_NBit_co] - __pow__: _ComplexOp[_NBit_co] - __rpow__: _ComplexOp[_NBit_co] + def real(self) -> floating[_NBit1]: ... # type: ignore[override] + @property + def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] + __add__: _ComplexOp[_NBit1] + __radd__: _ComplexOp[_NBit1] + __sub__: _ComplexOp[_NBit1] + __rsub__: _ComplexOp[_NBit1] + __mul__: _ComplexOp[_NBit1] + __rmul__: _ComplexOp[_NBit1] + __truediv__: _ComplexOp[_NBit1] + __rtruediv__: _ComplexOp[_NBit1] + __floordiv__: _ComplexOp[_NBit1] + __rfloordiv__: _ComplexOp[_NBit1] + __pow__: _ComplexOp[_NBit1] + __rpow__: _ComplexOp[_NBit1] complex64 = complexfloating[_32Bit, _32Bit] complex128 = complexfloating[_64Bit, _64Bit] diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 693b6dfed0a8..77e3ee0c4dc5 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -61,8 +61,8 @@ _T = TypeVar("_T") _2Tuple = Tuple[_T, _T] - _NBit_co = TypeVar("_NBit_co", covariant=True, bound=NBitBase) - _NBit = TypeVar("_NBit", bound=NBitBase) + _NBit1 = TypeVar("_NBit1", bound=NBitBase) + _NBit2 = TypeVar("_NBit2", bound=NBitBase) _IntType = TypeVar("_IntType", bound=integer) _FloatType = TypeVar("_FloatType", bound=floating) @@ -141,9 +141,9 @@ def __call__(self, __other: timedelta64) -> _NumberType_co: ... @overload def __call__(self, __other: _FloatLike_co) -> timedelta64: ... - class _IntTrueDiv(Protocol[_NBit_co]): + class _IntTrueDiv(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> floating[_NBit_co]: ... + def __call__(self, __other: bool) -> floating[_NBit1]: ... @overload def __call__(self, __other: int) -> floating[_NBitInt]: ... @overload @@ -151,12 +151,12 @@ def __call__(self, __other: float) -> float64: ... @overload def __call__(self, __other: complex) -> complex128: ... @overload - def __call__(self, __other: integer[_NBit]) -> floating[Union[_NBit_co, _NBit]]: ... + def __call__(self, __other: integer[_NBit2]) -> floating[Union[_NBit1, _NBit2]]: ... - class _UnsignedIntOp(Protocol[_NBit_co]): + class _UnsignedIntOp(Protocol[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` @overload - def __call__(self, __other: bool) -> unsignedinteger[_NBit_co]: ... + def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... @overload def __call__( self, __other: Union[int, signedinteger[Any]] @@ -167,24 +167,24 @@ def __call__(self, __other: float) -> float64: ... def __call__(self, __other: complex) -> complex128: ... @overload def __call__( - self, __other: unsignedinteger[_NBit] - ) -> unsignedinteger[Union[_NBit_co, _NBit]]: ... + self, __other: unsignedinteger[_NBit2] + ) -> unsignedinteger[Union[_NBit1, _NBit2]]: ... - class _UnsignedIntBitOp(Protocol[_NBit_co]): + class _UnsignedIntBitOp(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> unsignedinteger[_NBit_co]: ... + def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... @overload def __call__(self, __other: int) -> signedinteger[Any]: ... @overload def __call__(self, __other: signedinteger[Any]) -> signedinteger[Any]: ... @overload def __call__( - self, __other: unsignedinteger[_NBit] - ) -> unsignedinteger[Union[_NBit_co, _NBit]]: ... + self, __other: unsignedinteger[_NBit2] + ) -> unsignedinteger[Union[_NBit1, _NBit2]]: ... - class _UnsignedIntMod(Protocol[_NBit_co]): + class _UnsignedIntMod(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> unsignedinteger[_NBit_co]: ... + def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... @overload def __call__( self, __other: Union[int, signedinteger[Any]] @@ -193,12 +193,12 @@ def __call__( def __call__(self, __other: float) -> float64: ... @overload def __call__( - self, __other: unsignedinteger[_NBit] - ) -> unsignedinteger[Union[_NBit_co, _NBit]]: ... + self, __other: unsignedinteger[_NBit2] + ) -> unsignedinteger[Union[_NBit1, _NBit2]]: ... - class _UnsignedIntDivMod(Protocol[_NBit_co]): + class _UnsignedIntDivMod(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit_co]]: ... + def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ... @overload def __call__( self, __other: Union[int, signedinteger[Any]] @@ -207,111 +207,111 @@ def __call__( def __call__(self, __other: float) -> _2Tuple[float64]: ... @overload def __call__( - self, __other: unsignedinteger[_NBit] - ) -> _2Tuple[unsignedinteger[Union[_NBit_co, _NBit]]]: ... + self, __other: unsignedinteger[_NBit2] + ) -> _2Tuple[unsignedinteger[Union[_NBit1, _NBit2]]]: ... - class _SignedIntOp(Protocol[_NBit_co]): + class _SignedIntOp(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> signedinteger[_NBit_co]: ... + def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... @overload - def __call__(self, __other: int) -> signedinteger[Union[_NBit_co, _NBitInt]]: ... + def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload def __call__(self, __other: complex) -> complex128: ... @overload def __call__( - self, __other: signedinteger[_NBit] - ) -> signedinteger[Union[_NBit_co, _NBit]]: ... + self, __other: signedinteger[_NBit2] + ) -> signedinteger[Union[_NBit1, _NBit2]]: ... - class _SignedIntBitOp(Protocol[_NBit_co]): + class _SignedIntBitOp(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> signedinteger[_NBit_co]: ... + def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... @overload - def __call__(self, __other: int) -> signedinteger[Union[_NBit_co, _NBitInt]]: ... + def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... @overload def __call__( - self, __other: signedinteger[_NBit] - ) -> signedinteger[Union[_NBit_co, _NBit]]: ... + self, __other: signedinteger[_NBit2] + ) -> signedinteger[Union[_NBit1, _NBit2]]: ... - class _SignedIntMod(Protocol[_NBit_co]): + class _SignedIntMod(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> signedinteger[_NBit_co]: ... + def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... @overload - def __call__(self, __other: int) -> signedinteger[Union[_NBit_co, _NBitInt]]: ... + def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload def __call__( - self, __other: signedinteger[_NBit] - ) -> signedinteger[Union[_NBit_co, _NBit]]: ... + self, __other: signedinteger[_NBit2] + ) -> signedinteger[Union[_NBit1, _NBit2]]: ... - class _SignedIntDivMod(Protocol[_NBit_co]): + class _SignedIntDivMod(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit_co]]: ... + def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ... @overload - def __call__(self, __other: int) -> _2Tuple[signedinteger[Union[_NBit_co, _NBitInt]]]: ... + def __call__(self, __other: int) -> _2Tuple[signedinteger[Union[_NBit1, _NBitInt]]]: ... @overload def __call__(self, __other: float) -> _2Tuple[float64]: ... @overload def __call__( - self, __other: signedinteger[_NBit] - ) -> _2Tuple[signedinteger[Union[_NBit_co, _NBit]]]: ... + self, __other: signedinteger[_NBit2] + ) -> _2Tuple[signedinteger[Union[_NBit1, _NBit2]]]: ... - class _FloatOp(Protocol[_NBit_co]): + class _FloatOp(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> floating[_NBit_co]: ... + def __call__(self, __other: bool) -> floating[_NBit1]: ... @overload - def __call__(self, __other: int) -> floating[Union[_NBit_co, _NBitInt]]: ... + def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload def __call__(self, __other: complex) -> complex128: ... @overload def __call__( - self, __other: Union[integer[_NBit], floating[_NBit]] - ) -> floating[Union[_NBit_co, _NBit]]: ... + self, __other: Union[integer[_NBit2], floating[_NBit2]] + ) -> floating[Union[_NBit1, _NBit2]]: ... - class _FloatMod(Protocol[_NBit_co]): + class _FloatMod(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> floating[_NBit_co]: ... + def __call__(self, __other: bool) -> floating[_NBit1]: ... @overload - def __call__(self, __other: int) -> floating[Union[_NBit_co, _NBitInt]]: ... + def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... @overload def __call__(self, __other: float) -> float64: ... @overload def __call__( - self, __other: Union[integer[_NBit], floating[_NBit]] - ) -> floating[Union[_NBit_co, _NBit]]: ... + self, __other: Union[integer[_NBit2], floating[_NBit2]] + ) -> floating[Union[_NBit1, _NBit2]]: ... - class _FloatDivMod(Protocol[_NBit_co]): + class _FloatDivMod(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> _2Tuple[floating[_NBit_co]]: ... + def __call__(self, __other: bool) -> _2Tuple[floating[_NBit1]]: ... @overload - def __call__(self, __other: int) -> _2Tuple[floating[Union[_NBit_co, _NBitInt]]]: ... + def __call__(self, __other: int) -> _2Tuple[floating[Union[_NBit1, _NBitInt]]]: ... @overload def __call__(self, __other: float) -> _2Tuple[float64]: ... @overload def __call__( - self, __other: Union[integer[_NBit], floating[_NBit]] - ) -> _2Tuple[floating[Union[_NBit_co, _NBit]]]: ... + self, __other: Union[integer[_NBit2], floating[_NBit2]] + ) -> _2Tuple[floating[Union[_NBit1, _NBit2]]]: ... - class _ComplexOp(Protocol[_NBit_co]): + class _ComplexOp(Protocol[_NBit1]): @overload - def __call__(self, __other: bool) -> complexfloating[_NBit_co, _NBit_co]: ... + def __call__(self, __other: bool) -> complexfloating[_NBit1, _NBit1]: ... @overload - def __call__(self, __other: int) -> complexfloating[Union[_NBit_co, _NBitInt], Union[_NBit_co, _NBitInt]]: ... + def __call__(self, __other: int) -> complexfloating[Union[_NBit1, _NBitInt], Union[_NBit1, _NBitInt]]: ... @overload def __call__(self, __other: Union[float, complex]) -> complex128: ... @overload def __call__( self, __other: Union[ - integer[_NBit], - floating[_NBit], - complexfloating[_NBit, _NBit], + integer[_NBit2], + floating[_NBit2], + complexfloating[_NBit2, _NBit2], ] - ) -> complexfloating[Union[_NBit_co, _NBit], Union[_NBit_co, _NBit]]: ... + ) -> complexfloating[Union[_NBit1, _NBit2], Union[_NBit1, _NBit2]]: ... class _NumberOp(Protocol): def __call__(self, __other: _NumberLike_co) -> number: ... From 4ef10a55103ddf98730305db11461457cfc93e6e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 15 Jan 2021 18:47:33 +0100 Subject: [PATCH 0363/1270] DOC,TST: Update the `NBitBase` example --- numpy/typing/__init__.py | 6 ++++-- numpy/typing/tests/data/reveal/nbit_base_example.py | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 3cfdfe50a3eb..3c15c7c0b918 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -184,13 +184,15 @@ class NBitBase: .. code-block:: python + >>> from __future__ import annotations >>> from typing import TypeVar, TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt - >>> T = TypeVar("T", bound=npt.NBitBase) + >>> T1 = TypeVar("T1", bound=npt.NBitBase) + >>> T2 = TypeVar("T2", bound=npt.NBitBase) - >>> def add(a: "np.floating[T]", b: "np.integer[T]") -> "np.floating[T]": + >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[Union[T1, T2]]: ... return a + b >>> a = np.float16() diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.py b/numpy/typing/tests/data/reveal/nbit_base_example.py index 99fb71560a24..d34f6f69a31d 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.py +++ b/numpy/typing/tests/data/reveal/nbit_base_example.py @@ -2,9 +2,10 @@ import numpy as np import numpy.typing as npt -T = TypeVar("T", bound=npt.NBitBase) +T1 = TypeVar("T1", bound=npt.NBitBase) +T2 = TypeVar("T2", bound=npt.NBitBase) -def add(a: np.floating[T], b: np.integer[T]) -> np.floating[T]: +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[Union[T1, T2]]: return a + b i8: np.int64 From db018a5d52b9773107325a8221a88eea0cbd79aa Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 15 Jan 2021 18:56:58 +0100 Subject: [PATCH 0364/1270] MAINT: Adjusted the return-type of `number` ops to `Any` --- numpy/typing/_callable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 77e3ee0c4dc5..e1fdee3ba194 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -314,7 +314,7 @@ def __call__( ) -> complexfloating[Union[_NBit1, _NBit2], Union[_NBit1, _NBit2]]: ... class _NumberOp(Protocol): - def __call__(self, __other: _NumberLike_co) -> number: ... + def __call__(self, __other: _NumberLike_co) -> Any: ... class _ComparisonOp(Protocol[_T]): @overload From 515c3416e0480b6219d6b8737642c3b27d7157f4 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 15 Jan 2021 18:59:36 +0100 Subject: [PATCH 0365/1270] TST: Added a tests for `number` invariancy --- numpy/typing/tests/data/fail/scalars.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py index f097408757f6..0aeff398fc87 100644 --- a/numpy/typing/tests/data/fail/scalars.py +++ b/numpy/typing/tests/data/fail/scalars.py @@ -1,5 +1,6 @@ import numpy as np +f2: np.float16 f8: np.float64 # Construction @@ -74,3 +75,8 @@ def __float__(self): f8.squeeze(axis=1) # E: incompatible type f8.squeeze(axis=(0, 1)) # E: incompatible type f8.transpose(1) # E: incompatible type + +def func(a: np.float32) -> None: ... + +func(f2) # E: incompatible type +func(f8) # E: incompatible type From 0a32e68bced83ce045f043c916a1b0cfa78df967 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 15 Jan 2021 19:13:46 +0100 Subject: [PATCH 0366/1270] DOC: Added a missing `Union` import --- numpy/typing/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 3c15c7c0b918..a1d943235115 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -185,7 +185,7 @@ class NBitBase: .. code-block:: python >>> from __future__ import annotations - >>> from typing import TypeVar, TYPE_CHECKING + >>> from typing import TypeVar, Union, TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt From f2faf842686616cb8e8c298a496fa621ec127c91 Mon Sep 17 00:00:00 2001 From: Eertmans Date: Tue, 19 Jan 2021 19:35:44 +0100 Subject: [PATCH 0367/1270] Fixing typo in docstring --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index e00bc4d989d5..7d652ce891e9 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -1092,7 +1092,7 @@ cdef class Generator: 0.0 # may vary >>> abs(sigma - np.std(s, ddof=1)) - 0.1 # may vary + 0.0 # may vary Display the histogram of the samples, along with the probability density function: From cafda248a5856a6424324929c1708ea8048d89d6 Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Tue, 19 Jan 2021 20:52:54 +0200 Subject: [PATCH 0368/1270] BUG: threads.h existence test requires GLIBC > 2.12. (#18180) * defined(__STDC_NO_THREADS__) can be trusted only when using glibc > 2.12 * Add __GLIBC__ defined check. --- numpy/f2py/cfuncs.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 26b43e7e6964..40496ccf10cd 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -549,7 +549,12 @@ #define F2PY_THREAD_LOCAL_DECL __declspec(thread) #elif defined(__STDC_VERSION__) \\ && (__STDC_VERSION__ >= 201112L) \\ - && !defined(__STDC_NO_THREADS__) + && !defined(__STDC_NO_THREADS__) \\ + && (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) +/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12, + see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html, + so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence + of `threads.h` when using an older release of glibc 2.12 */ #include #define F2PY_THREAD_LOCAL_DECL thread_local #elif defined(__GNUC__) \\ From c4014c7fb4f1753f296bac842098decf061e39bb Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Tue, 19 Jan 2021 21:35:29 +0200 Subject: [PATCH 0369/1270] Applied reviewers comments: add ident --- numpy/f2py/tests/test_callback.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index f847dd49fd2a..37736af21285 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -217,15 +217,16 @@ class TestF90Callback(util.F2PyTest): suffix = '.f90' - code = textwrap.dedent(""" - function gh17797(f, y) result(r) - external f - integer(8) :: r, f - integer(8), dimension(:) :: y - r = f(0) - r = r + sum(y) - end function gh17797 - """) + code = textwrap.dedent( + """ + function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) + end function gh17797 + """) def test_gh17797(self): From f4a67d39964e9c0743101a9ff532b62a07413b9c Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 20 Jan 2021 10:33:42 +0200 Subject: [PATCH 0370/1270] BLD: update OpenBLAS to af2b0d02 --- tools/openblas_support.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index dff19274ed49..1300795bb9a1 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -13,7 +13,7 @@ from urllib.error import HTTPError OPENBLAS_V = '0.3.13' -OPENBLAS_LONG = 'v0.3.13' +OPENBLAS_LONG = 'v0.3.13-62-gaf2b0d02' BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' ARCHITECTURES = ['', 'windows', 'darwin', 'aarch64', 'x86_64', From 6d3f8d06bd5ccf9ba665baec90c25032aec08813 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 20 Jan 2021 17:41:59 +0800 Subject: [PATCH 0371/1270] Add new intrinsics sum_u8/u16/u64. --- numpy/core/src/_simd/_simd.dispatch.c.src | 7 +++--- numpy/core/src/common/simd/avx2/arithmetic.h | 21 ++++++++++++++++ .../core/src/common/simd/avx512/arithmetic.h | 25 +++++++++++++++++++ numpy/core/src/common/simd/neon/arithmetic.h | 23 +++++++++++++++++ numpy/core/src/common/simd/sse/arithmetic.h | 19 ++++++++++++++ numpy/core/src/common/simd/vsx/arithmetic.h | 19 ++++++++++++++ numpy/core/tests/test_simd.py | 5 +++- 7 files changed, 115 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index af42192a96a0..a861cd94496a 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -16,6 +16,7 @@ * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# * #esfx = u16, s8, u32, s16, u32, s32, u64, s64, f32, f64# + * #sum_ret = u32, 0, u32, 0, u32, 0, u64, 0, f32, f64# * #expand_sup =1, 0, 1, 0, 0, 0, 0, 0, 0, 0# * #simd_sup = 1, 1, 1, 1, 1, 1, 1, 1, 1, NPY_SIMD_F64# * #fp_only = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# @@ -23,7 +24,7 @@ * #mul_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 1# * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# - * #sum_sup = 0, 0, 0, 0, 1, 0, 0, 0, 1, 1# + * #sum_sup = 1, 0, 1, 0, 1, 0, 1, 0, 1, 1# * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# @@ -362,7 +363,7 @@ SIMD_IMPL_INTRIN_3(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@, v@sfx@) #endif // fused_sup #if @sum_sup@ -SIMD_IMPL_INTRIN_1(sum_@sfx@, @sfx@, v@sfx@) +SIMD_IMPL_INTRIN_1(sum_@sfx@, @sum_ret@, v@sfx@) #endif // sum_sup /*************************** @@ -452,7 +453,7 @@ static PyMethodDef simd__intrinsics_methods[] = { * #mul_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 1# * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# - * #sum_sup = 0, 0, 0, 0, 1, 0, 0, 0, 1, 1# + * #sum_sup = 1, 0, 1, 0, 1, 0, 1, 0, 1, 1# * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index 3a3a82798829..c2153718d449 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -118,6 +118,14 @@ #endif // !NPY_HAVE_FMA3 // Horizontal add: Calculates the sum of all vector elements. + +NPY_FINLINE npy_uint32 npyv_sum_u8(__m256i a) +{ + __m256i half = _mm256_sad_epu8(a, _mm256_setzero_si256()); + __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))); +} + NPY_FINLINE npy_uint32 npyv_sum_u32(__m256i a) { __m256i s0 = _mm256_hadd_epi32(a, a); @@ -127,6 +135,19 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(__m256i a) return _mm_cvtsi128_si32(s1); } +NPY_FINLINE npy_uint32 npyv_sum_u16(__m256i a) +{ + npyv_u32x2 res = npyv_expand_u32_u16(a); + return (unsigned)npyv_sum_u32(_mm256_add_epi32(res.val[0], res.val[1])); +} + +NPY_FINLINE npy_uint64 npyv_sum_u64(__m256i a) +{ + npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; + _mm_store_si128((__m128i*)idx, _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extracti128_si256(a, 1))); + return idx[0] + idx[1]; +} + NPY_FINLINE float npyv_sum_f32(__m256 a) { __m256 sum_halves = _mm256_hadd_ps(a, a); diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index 6f668f439a89..c85f425f3013 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -145,6 +145,17 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) * intel compiler/GCC 7.1/Clang 4, we still need to support older GCC. ***************************/ +NPY_FINLINE npy_uint32 npyv_sum_u8(__m512i a) +{ + __m512i a16 = _mm512_add_epi16(_mm512_cvtepu8_epi16(npyv512_lower_si256(a)), + _mm512_cvtepu8_epi16(npyv512_higher_si256(a))); + a16 = _mm512_cvtepi16_epi32(_mm256_add_epi16(npyv512_lower_si256(a16), npyv512_higher_si256(a16))); + __m256i a8 = _mm256_add_epi32(npyv512_lower_si256(a16), npyv512_higher_si256(a16)); + __m128i a4 = _mm_add_epi32(_mm256_castsi256_si128(a8), _mm256_extracti128_si256(a8, 1)); + a4 = _mm_hadd_epi32(a4, a4); + return (npy_uint32)_mm_cvtsi128_si32(_mm_hadd_epi32(a4, a4)); +} + NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a)); @@ -153,6 +164,20 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); } +NPY_FINLINE npy_uint32 npyv_sum_u16(__m512i a) +{ + npyv_u32x2 res = npyv_expand_u32_u16(a); + return (unsigned)npyv_sum_u32(_mm512_add_epi32(res.val[0], res.val[1])); +} + +NPY_FINLINE npy_uint64 npyv_sum_u64(__m512i a) +{ + npy_uint64 NPY_DECL_ALIGNED(64) idx[2]; + __m256i half = _mm256_add_epi64(npyv512_lower_si256(a), npyv512_higher_si256(a)); + _mm_store_si128((__m128i*)idx, _mm_add_epi64(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1))); + return idx[0] + idx[1]; +} + #ifdef NPY_HAVE_AVX512F_REDUCE #define npyv_sum_f32 _mm512_reduce_add_ps #define npyv_sum_f64 _mm512_reduce_add_pd diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index 1c8bde15a318..b5da3076cd02 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -133,10 +133,33 @@ // Horizontal add: Calculates the sum of all vector elements. #if NPY_SIMD_F64 + #define npyv_sum_u8 vaddvq_u8 + #define npyv_sum_u16 vaddvq_u16 #define npyv_sum_u32 vaddvq_u32 + #define npyv_sum_u16 vaddvq_u64 #define npyv_sum_f32 vaddvq_f32 #define npyv_sum_f64 vaddvq_f64 #else + + NPY_FINLINE npy_uint32 npyv_sum_u8(npyv_u8 a) + { + uint32x4_t t0 = vpaddlq_u16(vpaddlq_u8(a)); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); + } + + NPY_FINLINE npy_uint32 npyv_sum_u16(npyv_u16 a) + { + uint32x4_t t0 = vpaddlq_u16(a); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); + } + + NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) + { + return vget_lane_u64(vadd_u64(vget_low_u64(a), vget_high_u64(a)),0); + } + NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { uint32x2_t a0 = vpadd_u32(vget_low_u32(a), vget_high_u32(a)); diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index faf5685d98d8..7dba6ea8ceca 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -150,6 +150,12 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) // Horizontal add: Calculates the sum of all vector elements. +NPY_FINLINE npy_uint32 npyv_sum_u8(__m128i a) +{ + __m128i half = _mm_sad_epu8(a, _mm_setzero_si128()); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(half, _mm_unpackhi_epi64(half, half))); +} + NPY_FINLINE npy_uint32 npyv_sum_u32(__m128i a) { __m128i t = _mm_add_epi32(a, _mm_srli_si128(a, 8)); @@ -157,6 +163,19 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(__m128i a) return (unsigned)_mm_cvtsi128_si32(t); } +NPY_FINLINE npy_uint32 npyv_sum_u16(__m128i a) +{ + npyv_u32x2 res = npyv_expand_u32_u16(a); + return (unsigned)npyv_sum_u32(_mm_add_epi32(res.val[0], res.val[1])); +} + +NPY_FINLINE npy_uint64 npyv_sum_u64(__m128i a) +{ + npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; + npyv_storea_u64(idx, a); + return idx[0] + idx[1]; +} + NPY_FINLINE float npyv_sum_f32(__m128 a) { #ifdef NPY_HAVE_SSE3 diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index 1288a52a7b6d..fa114389a374 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -118,6 +118,25 @@ // Horizontal add: Calculates the sum of all vector elements. +NPY_FINLINE npy_uint32 npyv_sum_u8(npyv_u8 a) +{ + const npyv_u32 zero4 = npyv_zero_u32(); + npyv_u32 sum4 = vec_sum4s(a, zero4); + return (npy_uint32)vec_extract(vec_sums(sum4, zero4), 3); +} + +NPY_FINLINE npy_uint32 npyv_sum_u16(npyv_u16 a) +{ + const npyv_u32 zero4 = npyv_zero_u32(); + const npyv_u32 v4 = vec_mergeh(vec_adds(a, vec_sld(a, a, 8)), zero4); + return vec_extract(vec_sums(v4, zero4), 3); +} + +NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) +{ + return vec_extract(vec_add(a, vec_permi(a, a, 3)), 0); +} + NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { const npyv_u32 rs = vec_add(a, vec_sld(a, a, 8)); diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 23a5bb6c3064..ae98c47f7e86 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -736,11 +736,14 @@ def test_arithmetic_div(self): def test_arithmetic_reduce_sum(self): """ Test reduce sum intrinics: + npyv_sum_u8 + npyv_sum_u16 npyv_sum_u32 + npyv_sum_u64 npyv_sum_f32 npyv_sum_f64 """ - if self.sfx not in ("u32", "f32", "f64"): + if self.sfx not in ("u8", "u16", "u32", "u64", "f32", "f64"): return # reduce sum data = self._data() From a054ffd20ba2cd865a53e6ab149ae7d24acc97d9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 20 Jan 2021 15:29:21 -0600 Subject: [PATCH 0372/1270] BUG: Keep ignoring most errors during array-protocol lookup Closes (the later point) in gh-17965 and reverts parts of gh-17817. Shapely did rely on being able to raise a NotImplementedError which then got ignored in the attribute lookup. Arguably, this should probably just raise an AttributeError to achieve that behaviour, but it means we can't just rip the band-aid off here. Since 1.20 is practically released, just reverting most of the change (leaving only recursion and memory error which are both arguably pretty fatal). Ignoring most errors should be deprecated (and I am happy to do so), but it is not important enough for 1.20 or very important in itself. Closes gh-17965 --- numpy/core/src/multiarray/ctors.c | 19 +++++++++++++++++-- numpy/core/tests/test_array_coercion.py | 12 +++++++----- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 58571b678aef..ef105ff2d3f8 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2124,7 +2124,16 @@ PyArray_FromInterface(PyObject *origin) if (iface == NULL) { if (PyErr_Occurred()) { - return NULL; + if (PyErr_ExceptionMatches(PyExc_RecursionError) || + PyErr_ExceptionMatches(PyExc_MemoryError)) { + /* RecursionError and MemoryError are considered fatal */ + return NULL; + } + /* + * This probably be deprecated, but at least shapely raised + * a NotImplementedError expecting it to be cleared (gh-17965) + */ + PyErr_Clear(); } return Py_NotImplemented; } @@ -2392,7 +2401,13 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) array_meth = PyArray_LookupSpecial_OnInstance(op, "__array__"); if (array_meth == NULL) { if (PyErr_Occurred()) { - return NULL; + if (PyErr_ExceptionMatches(PyExc_RecursionError) || + PyErr_ExceptionMatches(PyExc_MemoryError)) { + /* RecursionError and MemoryError are considered fatal */ + return NULL; + } + /* This probably be deprecated. */ + PyErr_Clear(); } return Py_NotImplemented; } diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py index 08b32dfccddc..8f709dbe110b 100644 --- a/numpy/core/tests/test_array_coercion.py +++ b/numpy/core/tests/test_array_coercion.py @@ -702,17 +702,19 @@ def test_too_large_array_error_paths(self): @pytest.mark.parametrize("attribute", ["__array_interface__", "__array__", "__array_struct__"]) - def test_bad_array_like_attributes(self, attribute): - # Check that errors during attribute retrieval are raised unless - # they are Attribute errors. + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_attributes(self, attribute, error): + # RecursionError and MemoryError are considered fatal. All errors + # (except AttributeError) should probably be raised in the future, + # but shapely made use of it, so it will require a deprecation. class BadInterface: def __getattr__(self, attr): if attr == attribute: - raise RuntimeError + raise error super().__getattr__(attr) - with pytest.raises(RuntimeError): + with pytest.raises(error): np.array(BadInterface()) @pytest.mark.parametrize("error", [RecursionError, MemoryError]) From 5479875b384303b9e3f88eef61f4d2266ec4a94e Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 21 Jan 2021 09:47:37 +0800 Subject: [PATCH 0373/1270] add neon sum_u8/u16/u64 --- numpy/core/src/common/simd/neon/arithmetic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index b5da3076cd02..90f62d063b9c 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -136,7 +136,7 @@ #define npyv_sum_u8 vaddvq_u8 #define npyv_sum_u16 vaddvq_u16 #define npyv_sum_u32 vaddvq_u32 - #define npyv_sum_u16 vaddvq_u64 + #define npyv_sum_u64 vaddvq_u64 #define npyv_sum_f32 vaddvq_f32 #define npyv_sum_f64 vaddvq_f64 #else From 00fbbc9308eb4517f06db6b065e6e1d4885c241f Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 21 Jan 2021 10:48:08 +0800 Subject: [PATCH 0374/1270] Optimize the avx512 sum_u8/sum_u16. --- .../core/src/common/simd/avx512/arithmetic.h | 52 ++++++++++--------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index c85f425f3013..69c3caf93788 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -147,8 +147,8 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) NPY_FINLINE npy_uint32 npyv_sum_u8(__m512i a) { - __m512i a16 = _mm512_add_epi16(_mm512_cvtepu8_epi16(npyv512_lower_si256(a)), - _mm512_cvtepu8_epi16(npyv512_higher_si256(a))); + npyv_u16x2 res = npyv_expand_u16_u8(a); + __m512i a16 = npyv_add_u16(res.val[0], res.val[1]); a16 = _mm512_cvtepi16_epi32(_mm256_add_epi16(npyv512_lower_si256(a16), npyv512_higher_si256(a16))); __m256i a8 = _mm256_add_epi32(npyv512_lower_si256(a16), npyv512_higher_si256(a16)); __m128i a4 = _mm_add_epi32(_mm256_castsi256_si128(a8), _mm256_extracti128_si256(a8, 1)); @@ -156,32 +156,29 @@ NPY_FINLINE npy_uint32 npyv_sum_u8(__m512i a) return (npy_uint32)_mm_cvtsi128_si32(_mm_hadd_epi32(a4, a4)); } -NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) -{ - __m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a)); - __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); - quarter = _mm_hadd_epi32(quarter, quarter); - return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); -} - -NPY_FINLINE npy_uint32 npyv_sum_u16(__m512i a) -{ - npyv_u32x2 res = npyv_expand_u32_u16(a); - return (unsigned)npyv_sum_u32(_mm512_add_epi32(res.val[0], res.val[1])); -} - -NPY_FINLINE npy_uint64 npyv_sum_u64(__m512i a) -{ - npy_uint64 NPY_DECL_ALIGNED(64) idx[2]; - __m256i half = _mm256_add_epi64(npyv512_lower_si256(a), npyv512_higher_si256(a)); - _mm_store_si128((__m128i*)idx, _mm_add_epi64(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1))); - return idx[0] + idx[1]; -} - #ifdef NPY_HAVE_AVX512F_REDUCE + #define npyv_sum_u32 _mm512_reduce_add_epi32 + #define npyv_sum_u64 _mm512_reduce_add_epi64 #define npyv_sum_f32 _mm512_reduce_add_ps #define npyv_sum_f64 _mm512_reduce_add_pd #else + + NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) + { + __m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a)); + __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); + quarter = _mm_hadd_epi32(quarter, quarter); + return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); + } + + NPY_FINLINE npy_uint64 npyv_sum_u64(__m512i a) + { + npy_uint64 NPY_DECL_ALIGNED(64) idx[2]; + __m256i half = _mm256_add_epi64(npyv512_lower_si256(a), npyv512_higher_si256(a)); + _mm_store_si128((__m128i*)idx, _mm_add_epi64(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1))); + return idx[0] + idx[1]; + } + NPY_FINLINE float npyv_sum_f32(npyv_f32 a) { __m512 h64 = _mm512_shuffle_f32x4(a, a, _MM_SHUFFLE(3, 2, 3, 2)); @@ -194,6 +191,7 @@ NPY_FINLINE npy_uint64 npyv_sum_u64(__m512i a) __m512 sum4 = _mm512_add_ps(sum8, h4); return _mm_cvtss_f32(_mm512_castps512_ps128(sum4)); } + NPY_FINLINE double npyv_sum_f64(npyv_f64 a) { __m512d h64 = _mm512_shuffle_f64x2(a, a, _MM_SHUFFLE(3, 2, 3, 2)); @@ -206,4 +204,10 @@ NPY_FINLINE npy_uint64 npyv_sum_u64(__m512i a) } #endif +NPY_FINLINE npy_uint32 npyv_sum_u16(__m512i a) +{ + npyv_u32x2 res = npyv_expand_u32_u16(a); + return (unsigned)npyv_sum_u32(_mm512_add_epi32(res.val[0], res.val[1])); +} + #endif // _NPY_SIMD_AVX512_ARITHMETIC_H From a6289b5c2b60b15371605ab95f5ed53dd60d7f14 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 21 Jan 2021 18:35:14 +0100 Subject: [PATCH 0375/1270] TST: Run mypy once and cache the results --- numpy/typing/tests/test_typing.py | 78 +++++++++++++------------------ 1 file changed, 33 insertions(+), 45 deletions(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 18520a7571c4..ef169b4ed4ec 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -25,15 +25,40 @@ MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") +#: A dictionary with file names as keys and lists of the mypy stdout as values. +#: To-be populated by `run_mypy`. +OUTPUT_MYPY: Dict[str, List[str]] = {} + @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -@pytest.fixture(scope="session", autouse=True) -def clear_cache() -> None: - """Clears the mypy cache before running any of the typing tests.""" +@pytest.fixture(scope="module", autouse=True) +def run_mypy() -> None: + """Clears the cache and run mypy before running any of the typing tests. + + The mypy results are cached in `OUTPUT_MYPY` for further use. + + """ if os.path.isdir(CACHE_DIR): shutil.rmtree(CACHE_DIR) + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR): + # Run mypy + stdout, stderr, _ = api.run([ + "--config-file", + MYPY_INI, + "--cache-dir", + CACHE_DIR, + directory, + ]) + assert not stderr, directory + stdout = stdout.replace('*', '') + + # Parse the output + key = lambda n: n.split(':', 1)[0] + iterator = itertools.groupby(stdout.split("\n"), key=key) + OUTPUT_MYPY.update((os.path.abspath(k), list(v)) for k, v in iterator) + def get_test_cases(directory): for root, _, files in os.walk(directory): @@ -54,15 +79,7 @@ def get_test_cases(directory): @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) def test_success(path): - stdout, stderr, exitcode = api.run([ - "--config-file", - MYPY_INI, - "--cache-dir", - CACHE_DIR, - path, - ]) - assert exitcode == 0, stdout - assert re.match(r"Success: no issues found in \d+ source files?", stdout.strip()) + assert path not in OUTPUT_MYPY @pytest.mark.slow @@ -71,29 +88,12 @@ def test_success(path): def test_fail(path): __tracebackhide__ = True - stdout, stderr, exitcode = api.run([ - "--config-file", - MYPY_INI, - "--cache-dir", - CACHE_DIR, - path, - ]) - assert exitcode != 0 - with open(path) as fin: lines = fin.readlines() errors = defaultdict(lambda: "") - error_lines = stdout.rstrip("\n").split("\n") - assert re.match( - r"Found \d+ errors? in \d+ files? \(checked \d+ source files?\)", - error_lines[-1].strip(), - ) - for error_line in error_lines[:-1]: - error_line = error_line.strip() - if not error_line: - continue - + assert path in OUTPUT_MYPY + for error_line in OUTPUT_MYPY[path]: match = re.match( r"^.+\.py:(?P\d+): (error|note): .+$", error_line, @@ -215,23 +215,11 @@ def _parse_reveals(file: IO[str]) -> List[str]: def test_reveal(path): __tracebackhide__ = True - stdout, stderr, exitcode = api.run([ - "--config-file", - MYPY_INI, - "--cache-dir", - CACHE_DIR, - path, - ]) - with open(path) as fin: lines = _parse_reveals(fin) - stdout_list = stdout.replace('*', '').split("\n") - for error_line in stdout_list: - error_line = error_line.strip() - if not error_line: - continue - + assert path in OUTPUT_MYPY + for error_line in OUTPUT_MYPY[path]: match = re.match( r"^.+\.py:(?P\d+): note: .+$", error_line, From 33273e4ae770cac1ee0cb304ad5a0056bb59ad56 Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Fri, 22 Jan 2021 00:44:58 +0100 Subject: [PATCH 0376/1270] ENH: Add dtype support to the array comparison ops (#18128) * ENH: Added `_ArrayLikeNumber` * ENH: Added dtype support to the array comparison ops * MAINT: Made `dtype` and `ndarray` covariant The dtypes scalar-type and ndarrays' dtype are now covariant instead of invariant. This change is necasary in order to ensure that all generic subclasses can be used as underlying scalar type. * TST: Updated the comparison typing tests * MAINT: Fixed an issue where certain `array > arraylike` operations where neglected More specifically operations between array-likes of `timedelta64` and `ndarray`s that can be cast into `timedelta64`. For example: ar_i = np.array([1]) seq_m = [np.timedelta64()] ar_i > seq_m --- numpy/__init__.pyi | 137 ++++++++++++++---- numpy/typing/__init__.py | 5 + numpy/typing/_array_like.py | 18 ++- numpy/typing/_callable.py | 16 +- numpy/typing/tests/data/fail/comparisons.py | 28 ++++ numpy/typing/tests/data/pass/comparisons.py | 96 +++++++++--- numpy/typing/tests/data/reveal/comparisons.py | 77 +++++----- 7 files changed, 282 insertions(+), 95 deletions(-) create mode 100644 numpy/typing/tests/data/fail/comparisons.py diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index dbf8077831e1..3d92a543b32c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -9,8 +9,16 @@ from numpy.core._internal import _ctypes from numpy.typing import ( # Arrays ArrayLike, + _ArrayND, + _ArrayOrScalar, + _NestedSequence, + _RecursiveSequence, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, # DTypes + DTypeLike, _SupportsDType, _VoidDTypeLike, @@ -127,6 +135,7 @@ from typing import ( Iterable, List, Mapping, + NoReturn, Optional, overload, Sequence, @@ -584,19 +593,19 @@ where: Any who: Any _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) -_DTypeScalar = TypeVar("_DTypeScalar", bound=generic) +_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"] -class dtype(Generic[_DTypeScalar]): +class dtype(Generic[_DTypeScalar_co]): names: Optional[Tuple[str, ...]] # Overload for subclass of generic @overload def __new__( cls, - dtype: Type[_DTypeScalar], + dtype: Type[_DTypeScalar_co], align: bool = ..., copy: bool = ..., - ) -> dtype[_DTypeScalar]: ... + ) -> dtype[_DTypeScalar_co]: ... # Overloads for string aliases, Python types, and some assorted # other special cases. Order is sometimes important because of the # subtype relationships @@ -711,10 +720,10 @@ class dtype(Generic[_DTypeScalar]): @overload def __new__( cls, - dtype: dtype[_DTypeScalar], + dtype: dtype[_DTypeScalar_co], align: bool = ..., copy: bool = ..., - ) -> dtype[_DTypeScalar]: ... + ) -> dtype[_DTypeScalar_co]: ... # TODO: handle _SupportsDType better @overload def __new__( @@ -791,7 +800,7 @@ class dtype(Generic[_DTypeScalar]): @property def str(self) -> builtins.str: ... @property - def type(self) -> Type[_DTypeScalar]: ... + def type(self) -> Type[_DTypeScalar_co]: ... class _flagsobj: aligned: bool @@ -1319,6 +1328,7 @@ class _ArrayOrScalarCommon: ) -> _NdArraySubClass: ... _DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) # TODO: Set the `bound` to something more suitable once we # have proper shape support @@ -1327,7 +1337,7 @@ _ShapeType = TypeVar("_ShapeType", bound=Any) _BufferType = Union[ndarray, bytes, bytearray, memoryview] _Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] -class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def base(self) -> Optional[ndarray]: ... @property @@ -1352,7 +1362,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): order: _OrderKACF = ..., ) -> _ArraySelf: ... @overload - def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType]: ... + def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType_co]: ... @overload def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... @property @@ -1464,10 +1474,77 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): def __iter__(self) -> Any: ... def __contains__(self, key) -> bool: ... def __index__(self) -> int: ... - def __lt__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... - def __le__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... - def __gt__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... - def __ge__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... + + # The last overload is for catching recursive objects whose + # nesting is too deep. + # The first overload is for catching `bytes` (as they are a subtype of + # `Sequence[int]`) and `str`. As `str` is a recusive sequence of + # strings, it will pass through the final overload otherwise + + @overload + def __lt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __lt__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + + @overload + def __le__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __le__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + + @overload + def __gt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __gt__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + + @overload + def __ge__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ge__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + def __matmul__(self, other: ArrayLike) -> Any: ... # NOTE: `ndarray` does not implement `__imatmul__` def __rmatmul__(self, other: ArrayLike) -> Any: ... @@ -1516,7 +1593,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): def __ior__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property - def dtype(self) -> _DType: ... + def dtype(self) -> _DType_co: ... # NOTE: while `np.generic` is not technically an instance of `ABCMeta`, # the `@abstractmethod` decorator is herein used to (forcefully) deny @@ -1586,10 +1663,10 @@ class number(generic, Generic[_NBit1]): # type: ignore __rpow__: _NumberOp __truediv__: _NumberOp __rtruediv__: _NumberOp - __lt__: _ComparisonOp[_NumberLike_co] - __le__: _ComparisonOp[_NumberLike_co] - __gt__: _ComparisonOp[_NumberLike_co] - __ge__: _ComparisonOp[_NumberLike_co] + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] class bool_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1628,10 +1705,10 @@ class bool_(generic): __rmod__: _BoolMod __divmod__: _BoolDivMod __rdivmod__: _BoolDivMod - __lt__: _ComparisonOp[_NumberLike_co] - __le__: _ComparisonOp[_NumberLike_co] - __gt__: _ComparisonOp[_NumberLike_co] - __ge__: _ComparisonOp[_NumberLike_co] + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] class object_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1660,10 +1737,10 @@ class datetime64(generic): @overload def __sub__(self, other: _TD64Like_co) -> datetime64: ... def __rsub__(self, other: datetime64) -> timedelta64: ... - __lt__: _ComparisonOp[datetime64] - __le__: _ComparisonOp[datetime64] - __gt__: _ComparisonOp[datetime64] - __ge__: _ComparisonOp[datetime64] + __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] # Support for `__index__` was added in python 3.8 (bpo-20092) if sys.version_info >= (3, 8): @@ -1762,10 +1839,10 @@ class timedelta64(generic): def __rmod__(self, other: timedelta64) -> timedelta64: ... def __divmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... def __rdivmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... - __lt__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] - __le__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] - __gt__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] - __ge__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] + __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] class unsignedinteger(integer[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index a1d943235115..4ec1f4b2fcae 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -302,18 +302,23 @@ class _8Bit(_16Bit): ... # type: ignore[misc] ArrayLike as ArrayLike, _ArrayLike, _NestedSequence, + _RecursiveSequence, _SupportsArray, + _ArrayND, + _ArrayOrScalar, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, _ArrayLikeObject_co, _ArrayLikeVoid_co, _ArrayLikeStr_co, _ArrayLikeBytes_co, + ) if __doc__ is not None: diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 35413393c33a..133f38800591 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -12,6 +12,7 @@ integer, floating, complexfloating, + number, timedelta64, datetime64, object_, @@ -33,15 +34,17 @@ HAVE_PROTOCOL = True _T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) _DType = TypeVar("_DType", bound="dtype[Any]") +_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]") if TYPE_CHECKING or HAVE_PROTOCOL: # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None`) of the to-be returned array. # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads - class _SupportsArray(Protocol[_DType]): - def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... + class _SupportsArray(Protocol[_DType_co]): + def __array__(self, dtype: None = ...) -> ndarray[Any, _DType_co]: ... else: _SupportsArray = Any @@ -100,6 +103,10 @@ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... "dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]", Union[bool, int, float, complex], ] +_ArrayLikeNumber_co = _ArrayLike[ + "dtype[Union[bool_, number[Any]]]", + Union[bool, int, float, complex], +] _ArrayLikeTD64_co = _ArrayLike[ "dtype[Union[bool_, integer[Any], timedelta64]]", Union[bool, int], @@ -116,3 +123,10 @@ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... "dtype[bytes_]", bytes, ] + +if TYPE_CHECKING: + _ArrayND = ndarray[Any, dtype[_ScalarType]] + _ArrayOrScalar = Union[_ScalarType, _ArrayND[_ScalarType]] +else: + _ArrayND = Any + _ArrayOrScalar = Any diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index e1fdee3ba194..1591ca144591 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -8,6 +8,8 @@ """ +from __future__ import annotations + import sys from typing import ( Union, @@ -21,6 +23,7 @@ from numpy import ( ndarray, + dtype, generic, bool_, timedelta64, @@ -44,7 +47,7 @@ _NumberLike_co, ) from . import NBitBase -from ._array_like import ArrayLike +from ._array_like import ArrayLike, _ArrayOrScalar if sys.version_info >= (3, 8): from typing import Protocol @@ -58,8 +61,9 @@ HAVE_PROTOCOL = True if TYPE_CHECKING or HAVE_PROTOCOL: - _T = TypeVar("_T") - _2Tuple = Tuple[_T, _T] + _T1 = TypeVar("_T1") + _T2 = TypeVar("_T2") + _2Tuple = Tuple[_T1, _T1] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) @@ -316,11 +320,11 @@ def __call__( class _NumberOp(Protocol): def __call__(self, __other: _NumberLike_co) -> Any: ... - class _ComparisonOp(Protocol[_T]): + class _ComparisonOp(Protocol[_T1, _T2]): @overload - def __call__(self, __other: _T) -> bool_: ... + def __call__(self, __other: _T1) -> bool_: ... @overload - def __call__(self, __other: ArrayLike) -> Union[ndarray, bool_]: ... + def __call__(self, __other: _T2) -> _ArrayOrScalar[bool_]: ... else: _BoolOp = Any diff --git a/numpy/typing/tests/data/fail/comparisons.py b/numpy/typing/tests/data/fail/comparisons.py new file mode 100644 index 000000000000..cad1c6555de3 --- /dev/null +++ b/numpy/typing/tests/data/fail/comparisons.py @@ -0,0 +1,28 @@ +from typing import Any +import numpy as np + +AR_i: np.ndarray[Any, np.dtype[np.int64]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] + +AR_f > AR_m # E: Unsupported operand types +AR_c > AR_m # E: Unsupported operand types + +AR_m > AR_f # E: Unsupported operand types +AR_m > AR_c # E: Unsupported operand types + +AR_i > AR_M # E: Unsupported operand types +AR_f > AR_M # E: Unsupported operand types +AR_m > AR_M # E: Unsupported operand types + +AR_M > AR_i # E: Unsupported operand types +AR_M > AR_f # E: Unsupported operand types +AR_M > AR_m # E: Unsupported operand types + +# Unfortunately `NoReturn` errors are not the most descriptive +_1 = AR_i > str() # E: Need type annotation +_2 = AR_i > bytes() # E: Need type annotation +_3 = str() > AR_M # E: Need type annotation +_4 = bytes() > AR_M # E: Need type annotation diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index b298117a62af..ce41de43596e 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +from typing import Any import numpy as np c16 = np.complex128() @@ -20,11 +23,62 @@ f = float() i = int() -AR = np.array([0], dtype=np.int64) -AR.setflags(write=False) - SEQ = (0, 1, 2, 3, 4) +AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float_]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex_]] = np.array([1.0j]) +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) + +# Arrays + +AR_b > AR_b +AR_b > AR_u +AR_b > AR_i +AR_b > AR_f +AR_b > AR_c + +AR_u > AR_b +AR_u > AR_u +AR_u > AR_i +AR_u > AR_f +AR_u > AR_c + +AR_i > AR_b +AR_i > AR_u +AR_i > AR_i +AR_i > AR_f +AR_i > AR_c + +AR_f > AR_b +AR_f > AR_u +AR_f > AR_i +AR_f > AR_f +AR_f > AR_c + +AR_c > AR_b +AR_c > AR_u +AR_c > AR_i +AR_c > AR_f +AR_c > AR_c + +AR_m > AR_b +AR_m > AR_u +AR_m > AR_i +AR_b > AR_m +AR_u > AR_m +AR_i > AR_m + +AR_M > AR_M + +AR_O > AR_O +1 > AR_O +AR_O > 1 + # Time structures dt > dt @@ -33,7 +87,7 @@ td > i td > i4 td > i8 -td > AR +td > AR_i td > SEQ # boolean @@ -51,7 +105,7 @@ b_ > c b_ > c16 b_ > c8 -b_ > AR +b_ > AR_i b_ > SEQ # Complex @@ -67,7 +121,7 @@ c16 > c c16 > f c16 > i -c16 > AR +c16 > AR_i c16 > SEQ c16 > c16 @@ -81,7 +135,7 @@ c > c16 f > c16 i > c16 -AR > c16 +AR_i > c16 SEQ > c16 c8 > c16 @@ -95,7 +149,7 @@ c8 > c c8 > f c8 > i -c8 > AR +c8 > AR_i c8 > SEQ c16 > c8 @@ -109,7 +163,7 @@ c > c8 f > c8 i > c8 -AR > c8 +AR_i > c8 SEQ > c8 # Float @@ -123,7 +177,7 @@ f8 > c f8 > f f8 > i -f8 > AR +f8 > AR_i f8 > SEQ f8 > f8 @@ -135,7 +189,7 @@ c > f8 f > f8 i > f8 -AR > f8 +AR_i > f8 SEQ > f8 f4 > f8 @@ -147,7 +201,7 @@ f4 > c f4 > f f4 > i -f4 > AR +f4 > AR_i f4 > SEQ f8 > f4 @@ -159,7 +213,7 @@ c > f4 f > f4 i > f4 -AR > f4 +AR_i > f4 SEQ > f4 # Int @@ -173,7 +227,7 @@ i8 > c i8 > f i8 > i -i8 > AR +i8 > AR_i i8 > SEQ u8 > u8 @@ -184,7 +238,7 @@ u8 > c u8 > f u8 > i -u8 > AR +u8 > AR_i u8 > SEQ i8 > i8 @@ -196,7 +250,7 @@ c > i8 f > i8 i > i8 -AR > i8 +AR_i > i8 SEQ > i8 u8 > u8 @@ -207,7 +261,7 @@ c > u8 f > u8 i > u8 -AR > u8 +AR_i > u8 SEQ > u8 i4 > i8 @@ -215,7 +269,7 @@ i4 > i i4 > b_ i4 > b -i4 > AR +i4 > AR_i i4 > SEQ u4 > i8 @@ -225,7 +279,7 @@ u4 > i u4 > b_ u4 > b -u4 > AR +u4 > AR_i u4 > SEQ i8 > i4 @@ -233,7 +287,7 @@ i > i4 b_ > i4 b > i4 -AR > i4 +AR_i > i4 SEQ > i4 i8 > u4 @@ -243,5 +297,5 @@ b_ > u4 b > u4 i > u4 -AR > u4 +AR_i > u4 SEQ > u4 diff --git a/numpy/typing/tests/data/reveal/comparisons.py b/numpy/typing/tests/data/reveal/comparisons.py index 507f713c7d5d..5053a9e8215e 100644 --- a/numpy/typing/tests/data/reveal/comparisons.py +++ b/numpy/typing/tests/data/reveal/comparisons.py @@ -33,8 +33,13 @@ reveal_type(td > i) # E: numpy.bool_ reveal_type(td > i4) # E: numpy.bool_ reveal_type(td > i8) # E: numpy.bool_ -reveal_type(td > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(td > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] + +reveal_type(td > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(td > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > td) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > td) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # boolean @@ -51,8 +56,8 @@ reveal_type(b_ > c) # E: numpy.bool_ reveal_type(b_ > c16) # E: numpy.bool_ reveal_type(b_ > c8) # E: numpy.bool_ -reveal_type(b_ > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(b_ > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(b_ > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(b_ > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # Complex @@ -67,8 +72,8 @@ reveal_type(c16 > c) # E: numpy.bool_ reveal_type(c16 > f) # E: numpy.bool_ reveal_type(c16 > i) # E: numpy.bool_ -reveal_type(c16 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(c16 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(c16 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(c16 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(c16 > c16) # E: numpy.bool_ reveal_type(f8 > c16) # E: numpy.bool_ @@ -81,8 +86,8 @@ reveal_type(c > c16) # E: numpy.bool_ reveal_type(f > c16) # E: numpy.bool_ reveal_type(i > c16) # E: numpy.bool_ -reveal_type(AR > c16) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > c16) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > c16) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > c16) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(c8 > c16) # E: numpy.bool_ reveal_type(c8 > f8) # E: numpy.bool_ @@ -95,8 +100,8 @@ reveal_type(c8 > c) # E: numpy.bool_ reveal_type(c8 > f) # E: numpy.bool_ reveal_type(c8 > i) # E: numpy.bool_ -reveal_type(c8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(c8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(c8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(c8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(c16 > c8) # E: numpy.bool_ reveal_type(f8 > c8) # E: numpy.bool_ @@ -109,8 +114,8 @@ reveal_type(c > c8) # E: numpy.bool_ reveal_type(f > c8) # E: numpy.bool_ reveal_type(i > c8) # E: numpy.bool_ -reveal_type(AR > c8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > c8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > c8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > c8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # Float @@ -123,8 +128,8 @@ reveal_type(f8 > c) # E: numpy.bool_ reveal_type(f8 > f) # E: numpy.bool_ reveal_type(f8 > i) # E: numpy.bool_ -reveal_type(f8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(f8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(f8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(f8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(f8 > f8) # E: numpy.bool_ reveal_type(i8 > f8) # E: numpy.bool_ @@ -135,8 +140,8 @@ reveal_type(c > f8) # E: numpy.bool_ reveal_type(f > f8) # E: numpy.bool_ reveal_type(i > f8) # E: numpy.bool_ -reveal_type(AR > f8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > f8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > f8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > f8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(f4 > f8) # E: numpy.bool_ reveal_type(f4 > i8) # E: numpy.bool_ @@ -147,8 +152,8 @@ reveal_type(f4 > c) # E: numpy.bool_ reveal_type(f4 > f) # E: numpy.bool_ reveal_type(f4 > i) # E: numpy.bool_ -reveal_type(f4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(f4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(f4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(f4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(f8 > f4) # E: numpy.bool_ reveal_type(i8 > f4) # E: numpy.bool_ @@ -159,8 +164,8 @@ reveal_type(c > f4) # E: numpy.bool_ reveal_type(f > f4) # E: numpy.bool_ reveal_type(i > f4) # E: numpy.bool_ -reveal_type(AR > f4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > f4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > f4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > f4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # Int @@ -173,8 +178,8 @@ reveal_type(i8 > c) # E: numpy.bool_ reveal_type(i8 > f) # E: numpy.bool_ reveal_type(i8 > i) # E: numpy.bool_ -reveal_type(i8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(i8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(i8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(i8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(u8 > i4) # E: numpy.bool_ @@ -184,8 +189,8 @@ reveal_type(u8 > c) # E: numpy.bool_ reveal_type(u8 > f) # E: numpy.bool_ reveal_type(u8 > i) # E: numpy.bool_ -reveal_type(u8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(u8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(u8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(u8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i8 > i8) # E: numpy.bool_ reveal_type(u8 > i8) # E: numpy.bool_ @@ -196,8 +201,8 @@ reveal_type(c > i8) # E: numpy.bool_ reveal_type(f > i8) # E: numpy.bool_ reveal_type(i > i8) # E: numpy.bool_ -reveal_type(AR > i8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > i8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > i8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > i8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(i4 > u8) # E: numpy.bool_ @@ -207,16 +212,16 @@ reveal_type(c > u8) # E: numpy.bool_ reveal_type(f > u8) # E: numpy.bool_ reveal_type(i > u8) # E: numpy.bool_ -reveal_type(AR > u8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > u8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > u8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > u8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i4 > i8) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i4 > i) # E: numpy.bool_ reveal_type(i4 > b_) # E: numpy.bool_ reveal_type(i4 > b) # E: numpy.bool_ -reveal_type(i4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(i4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(i4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(i4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(u4 > i8) # E: numpy.bool_ reveal_type(u4 > i4) # E: numpy.bool_ @@ -225,16 +230,16 @@ reveal_type(u4 > i) # E: numpy.bool_ reveal_type(u4 > b_) # E: numpy.bool_ reveal_type(u4 > b) # E: numpy.bool_ -reveal_type(u4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(u4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(u4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(u4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i8 > i4) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i > i4) # E: numpy.bool_ reveal_type(b_ > i4) # E: numpy.bool_ reveal_type(b > i4) # E: numpy.bool_ -reveal_type(AR > i4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > i4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > i4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > i4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i8 > u4) # E: numpy.bool_ reveal_type(i4 > u4) # E: numpy.bool_ @@ -243,5 +248,5 @@ reveal_type(b_ > u4) # E: numpy.bool_ reveal_type(b > u4) # E: numpy.bool_ reveal_type(i > u4) # E: numpy.bool_ -reveal_type(AR > u4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > u4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > u4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > u4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] From 605f82c977f2e00250d26a779fcd521ba6f38848 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 22 Jan 2021 01:17:30 +0100 Subject: [PATCH 0377/1270] MAINT: Manually assign the full path rather than using `os.path.abspath` Fixes an issue with the azure (windows) tests where `abspath` would point to the wrong directory --- numpy/typing/tests/test_typing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index ef169b4ed4ec..a0d92feb9d1c 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -42,6 +42,7 @@ def run_mypy() -> None: if os.path.isdir(CACHE_DIR): shutil.rmtree(CACHE_DIR) + root = os.path.dirname(os.path.dirname(np.__file__)) for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR): # Run mypy stdout, stderr, _ = api.run([ @@ -57,7 +58,7 @@ def run_mypy() -> None: # Parse the output key = lambda n: n.split(':', 1)[0] iterator = itertools.groupby(stdout.split("\n"), key=key) - OUTPUT_MYPY.update((os.path.abspath(k), list(v)) for k, v in iterator) + OUTPUT_MYPY.update((os.path.join(root, k), list(v)) for k, v in iterator) def get_test_cases(directory): From ec90d3c175e6cb1efd0ab8172b909f6fde8c277d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 21 Jan 2021 11:42:03 -0700 Subject: [PATCH 0378/1270] MAINT: Update pavement.py to work with versioneer. --- pavement.py | 48 ++++++++++++++++++++---------------------------- 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/pavement.py b/pavement.py index 373354432452..66c2cf953eaf 100644 --- a/pavement.py +++ b/pavement.py @@ -26,6 +26,7 @@ import sys import shutil import hashlib +import textwrap # The paver package needs to be installed to run tasks import paver @@ -49,25 +50,13 @@ installersdir=os.path.join("release", "installers")),) -#----------------------------- -# Generate the release version -#----------------------------- +#------------------------ +# Get the release version +#------------------------ sys.path.insert(0, os.path.dirname(__file__)) try: - setup_py = __import__("setup") - FULLVERSION = setup_py.VERSION - # This is duplicated from setup.py - if os.path.exists('.git'): - GIT_REVISION = setup_py.git_version() - elif os.path.exists('numpy/version.py'): - # must be a source distribution, use existing version file - from numpy.version import git_revision as GIT_REVISION - else: - GIT_REVISION = "Unknown" - - if not setup_py.ISRELEASED: - FULLVERSION += '.dev0+' + GIT_REVISION[:7] + from setup import FULLVERSION finally: sys.path.pop(0) @@ -210,22 +199,25 @@ def write_release_task(options, filename='README'): with open(notes) as fnotes: freadme.write(fnotes.read()) - freadme.writelines(""" -Checksums -========= + freadme.writelines(textwrap.dedent( + """ + Checksums + ========= -MD5 ---- -:: + MD5 + --- + :: -""") + """)) freadme.writelines([f' {c}\n' for c in compute_md5(idirs)]) - freadme.writelines(""" -SHA256 ------- -:: -""") + freadme.writelines(textwrap.dedent( + """ + SHA256 + ------ + :: + + """)) freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)]) # generate md file using pandoc before signing From 518edc70744faa0bd1443c9f9ce76a1c01d7749e Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 22 Jan 2021 08:45:21 +0200 Subject: [PATCH 0379/1270] TST: raise memory limit for test --- numpy/lib/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index aa449976435a..a823b12a266a 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -574,7 +574,7 @@ def test_unicode_and_bytes_fmt(self, fmt, iotype): @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work") @pytest.mark.slow - @requires_memory(free_bytes=7e9) + @requires_memory(free_bytes=8e9) def test_large_zip(self): def check_large_zip(memoryerror_raised): memoryerror_raised.value = False From 617193440550cd50b7426a2c2008fdb60944c226 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 22 Jan 2021 16:30:21 +0800 Subject: [PATCH 0380/1270] treat u8/u16 as overflow protection. --- numpy/core/src/_simd/_simd.dispatch.c.src | 15 +++++-- numpy/core/src/common/simd/avx2/arithmetic.h | 29 +++++++------ .../core/src/common/simd/avx512/arithmetic.h | 41 +++++++++++-------- numpy/core/src/common/simd/neon/arithmetic.h | 8 ++-- numpy/core/src/common/simd/sse/arithmetic.h | 8 ++-- numpy/core/src/common/simd/sse/sse.h | 1 + numpy/core/src/common/simd/sse/utils.h | 19 +++++++++ numpy/core/src/common/simd/vsx/arithmetic.h | 21 ++++++---- numpy/core/tests/test_simd.py | 20 +++++++-- 9 files changed, 110 insertions(+), 52 deletions(-) create mode 100644 numpy/core/src/common/simd/sse/utils.h diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index a861cd94496a..49378b5180c7 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -16,7 +16,7 @@ * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# * #esfx = u16, s8, u32, s16, u32, s32, u64, s64, f32, f64# - * #sum_ret = u32, 0, u32, 0, u32, 0, u64, 0, f32, f64# + * #sum_ret = u16, 0, u32, 0, u32, 0, u64, 0, f32, f64# * #expand_sup =1, 0, 1, 0, 0, 0, 0, 0, 0, 0# * #simd_sup = 1, 1, 1, 1, 1, 1, 1, 1, 1, NPY_SIMD_F64# * #fp_only = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# @@ -24,7 +24,8 @@ * #mul_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 1# * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# - * #sum_sup = 1, 0, 1, 0, 1, 0, 1, 0, 1, 1# + * #sumup_sup = 1, 0, 1, 0, 0, 0, 0, 0, 0, 0# + * #sum_sup = 0, 0, 0, 0, 1, 0, 1, 0, 1, 1# * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# @@ -366,6 +367,10 @@ SIMD_IMPL_INTRIN_3(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@, v@sfx@) SIMD_IMPL_INTRIN_1(sum_@sfx@, @sum_ret@, v@sfx@) #endif // sum_sup +#if @sumup_sup@ +SIMD_IMPL_INTRIN_1(sumup_@sfx@, @sum_ret@, v@sfx@) +#endif // sumup_sup + /*************************** * Math ***************************/ @@ -453,7 +458,8 @@ static PyMethodDef simd__intrinsics_methods[] = { * #mul_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 1# * #div_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #fused_sup = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# - * #sum_sup = 1, 0, 1, 0, 1, 0, 1, 0, 1, 1# + * #sumup_sup = 1, 0, 1, 0, 0, 0, 0, 0, 0, 0# + * #sum_sup = 0, 0, 0, 0, 1, 0, 1, 0, 1, 1# * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# @@ -575,6 +581,9 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) SIMD_INTRIN_DEF(sum_@sfx@) #endif // sum_sup +#if @sumup_sup@ +SIMD_INTRIN_DEF(sumup_@sfx@) +#endif // sumup_sup /*************************** * Math ***************************/ diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index c2153718d449..770d4230c902 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -5,6 +5,7 @@ #ifndef _NPY_SIMD_AVX2_ARITHMETIC_H #define _NPY_SIMD_AVX2_ARITHMETIC_H +#include "../sse/utils.h" /*************************** * Addition ***************************/ @@ -119,11 +120,12 @@ // Horizontal add: Calculates the sum of all vector elements. -NPY_FINLINE npy_uint32 npyv_sum_u8(__m256i a) +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { - __m256i half = _mm256_sad_epu8(a, _mm256_setzero_si256()); - __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); - return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))); + __m256i four = _mm256_sad_epu8(a, _mm256_setzero_si256()); + __m128i two = _mm_add_epi16(_mm256_castsi256_si128(four), _mm256_extracti128_si256(four, 1)); + __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); + return (npy_uint16)_mm_cvtsi128_si32(one); } NPY_FINLINE npy_uint32 npyv_sum_u32(__m256i a) @@ -135,20 +137,23 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(__m256i a) return _mm_cvtsi128_si32(s1); } -NPY_FINLINE npy_uint32 npyv_sum_u16(__m256i a) +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) { - npyv_u32x2 res = npyv_expand_u32_u16(a); - return (unsigned)npyv_sum_u32(_mm256_add_epi32(res.val[0], res.val[1])); + const npyv_u16 even_mask = _mm256_set1_epi32(0x0000FFFF); + __m256i even = _mm256_and_si256(a, even_mask); + __m256i odd = _mm256_srli_epi32(a, 16); + __m256i eight = _mm256_add_epi32(even, odd); + return npyv_sum_u32(eight); } -NPY_FINLINE npy_uint64 npyv_sum_u64(__m256i a) +NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { - npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; - _mm_store_si128((__m128i*)idx, _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extracti128_si256(a, 1))); - return idx[0] + idx[1]; + __m256i two = _mm256_add_epi64(a, _mm256_shuffle_epi32(a, _MM_SHUFFLE(1, 0, 3, 2))); + __m128i one = _mm_add_epi64(_mm256_castsi256_si128(two), _mm256_extracti128_si256(two, 1)); + return (npy_uint64)npyv128_cvtsi128_si64(one); } -NPY_FINLINE float npyv_sum_f32(__m256 a) +NPY_FINLINE float npyv_sum_f32(npyv_u32 a) { __m256 sum_halves = _mm256_hadd_ps(a, a); sum_halves = _mm256_hadd_ps(sum_halves, sum_halves); diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index 69c3caf93788..ea7dc0c3c0e9 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -6,7 +6,7 @@ #define _NPY_SIMD_AVX512_ARITHMETIC_H #include "../avx2/utils.h" - +#include "../sse/utils.h" /*************************** * Addition ***************************/ @@ -145,15 +145,19 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) * intel compiler/GCC 7.1/Clang 4, we still need to support older GCC. ***************************/ -NPY_FINLINE npy_uint32 npyv_sum_u8(__m512i a) +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { - npyv_u16x2 res = npyv_expand_u16_u8(a); - __m512i a16 = npyv_add_u16(res.val[0], res.val[1]); - a16 = _mm512_cvtepi16_epi32(_mm256_add_epi16(npyv512_lower_si256(a16), npyv512_higher_si256(a16))); - __m256i a8 = _mm256_add_epi32(npyv512_lower_si256(a16), npyv512_higher_si256(a16)); - __m128i a4 = _mm_add_epi32(_mm256_castsi256_si128(a8), _mm256_extracti128_si256(a8, 1)); - a4 = _mm_hadd_epi32(a4, a4); - return (npy_uint32)_mm_cvtsi128_si32(_mm_hadd_epi32(a4, a4)); +#ifdef NPY_HAVE_AVX512BW + __m512i eight = _mm512_sad_epu8(a, _mm512_setzero_si512()); + __m256i four = _mm256_add_epi16(npyv512_lower_si256(eight), npyv512_higher_si256(eight)); +#else + __m256i lo_four = _mm256_sad_epu8(npyv512_lower_si256(a), _mm256_setzero_si256()); + __m256i hi_four = _mm256_sad_epu8(npyv512_higher_si256(a), _mm256_setzero_si256()); + __m256i four = _mm256_add_epi16(lo_four, hi_four); +#endif + __m128i two = _mm_add_epi16(_mm256_castsi256_si128(four), _mm256_extracti128_si256(four, 1)); + __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); + return (npy_uint16)_mm_cvtsi128_si32(one); } #ifdef NPY_HAVE_AVX512F_REDUCE @@ -171,12 +175,12 @@ NPY_FINLINE npy_uint32 npyv_sum_u8(__m512i a) return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); } - NPY_FINLINE npy_uint64 npyv_sum_u64(__m512i a) + NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { - npy_uint64 NPY_DECL_ALIGNED(64) idx[2]; - __m256i half = _mm256_add_epi64(npyv512_lower_si256(a), npyv512_higher_si256(a)); - _mm_store_si128((__m128i*)idx, _mm_add_epi64(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1))); - return idx[0] + idx[1]; + __m256i four = _mm256_add_epi64(npyv512_lower_si256(a), npyv512_higher_si256(a)); + __m256i two = _mm256_add_epi64(four, _mm256_shuffle_epi32(four, _MM_SHUFFLE(1, 0, 3, 2))); + __m128i one = _mm_add_epi64(_mm256_castsi256_si128(two), _mm256_extracti128_si256(two, 1)); + return (npy_uint64)npyv128_cvtsi128_si64(one); } NPY_FINLINE float npyv_sum_f32(npyv_f32 a) @@ -204,10 +208,13 @@ NPY_FINLINE npy_uint32 npyv_sum_u8(__m512i a) } #endif -NPY_FINLINE npy_uint32 npyv_sum_u16(__m512i a) +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) { - npyv_u32x2 res = npyv_expand_u32_u16(a); - return (unsigned)npyv_sum_u32(_mm512_add_epi32(res.val[0], res.val[1])); + const npyv_u16 even_mask = _mm512_set1_epi32(0x0000FFFF); + __m512i even = _mm512_and_si512(a, even_mask); + __m512i odd = _mm512_srli_epi32(a, 16); + __m512i ff = _mm512_add_epi32(even, odd); + return npyv_sum_u32(ff); } #endif // _NPY_SIMD_AVX512_ARITHMETIC_H diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index 90f62d063b9c..81207ea5e606 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -133,22 +133,22 @@ // Horizontal add: Calculates the sum of all vector elements. #if NPY_SIMD_F64 - #define npyv_sum_u8 vaddvq_u8 - #define npyv_sum_u16 vaddvq_u16 + #define npyv_sumup_u8 vaddlvq_u8 + #define npyv_sumup_u16 vaddlvq_u16 #define npyv_sum_u32 vaddvq_u32 #define npyv_sum_u64 vaddvq_u64 #define npyv_sum_f32 vaddvq_f32 #define npyv_sum_f64 vaddvq_f64 #else - NPY_FINLINE npy_uint32 npyv_sum_u8(npyv_u8 a) + NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { uint32x4_t t0 = vpaddlq_u16(vpaddlq_u8(a)); uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); return vget_lane_u32(vpadd_u32(t1, t1), 0); } - NPY_FINLINE npy_uint32 npyv_sum_u16(npyv_u16 a) + NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) { uint32x4_t t0 = vpaddlq_u16(a); uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index 7dba6ea8ceca..968de75457f1 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -150,26 +150,26 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) // Horizontal add: Calculates the sum of all vector elements. -NPY_FINLINE npy_uint32 npyv_sum_u8(__m128i a) +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { __m128i half = _mm_sad_epu8(a, _mm_setzero_si128()); return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(half, _mm_unpackhi_epi64(half, half))); } -NPY_FINLINE npy_uint32 npyv_sum_u32(__m128i a) +NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m128i t = _mm_add_epi32(a, _mm_srli_si128(a, 8)); t = _mm_add_epi32(t, _mm_srli_si128(t, 4)); return (unsigned)_mm_cvtsi128_si32(t); } -NPY_FINLINE npy_uint32 npyv_sum_u16(__m128i a) +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) { npyv_u32x2 res = npyv_expand_u32_u16(a); return (unsigned)npyv_sum_u32(_mm_add_epi32(res.val[0], res.val[1])); } -NPY_FINLINE npy_uint64 npyv_sum_u64(__m128i a) +NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; npyv_storea_u64(idx, a); diff --git a/numpy/core/src/common/simd/sse/sse.h b/numpy/core/src/common/simd/sse/sse.h index dc0b62f7394e..0bb404312867 100644 --- a/numpy/core/src/common/simd/sse/sse.h +++ b/numpy/core/src/common/simd/sse/sse.h @@ -62,6 +62,7 @@ typedef struct { __m128d val[3]; } npyv_f64x3; #define npyv_nlanes_f32 4 #define npyv_nlanes_f64 2 +#include "utils.h" #include "memory.h" #include "misc.h" #include "reorder.h" diff --git a/numpy/core/src/common/simd/sse/utils.h b/numpy/core/src/common/simd/sse/utils.h new file mode 100644 index 000000000000..fbb96937705f --- /dev/null +++ b/numpy/core/src/common/simd/sse/utils.h @@ -0,0 +1,19 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_SSE_UTILS_H +#define _NPY_SIMD_SSE_UTILS_H + +#if !defined(__x86_64__) && !defined(_M_X64) +NPY_FINLINE npy_uint64 npyv128_cvtsi128_si64(npyv_u64 a) +{ + npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; + npyv_storea_u64(idx, a); + return idx[0]; +} +#else + #define npyv128_cvtsi128_si64 _mm_cvtsi128_si64 +#endif + +#endif // _NPY_SIMD_SSE_UTILS_H diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index fa114389a374..30d77c6fefa5 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -118,23 +118,26 @@ // Horizontal add: Calculates the sum of all vector elements. -NPY_FINLINE npy_uint32 npyv_sum_u8(npyv_u8 a) +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { - const npyv_u32 zero4 = npyv_zero_u32(); - npyv_u32 sum4 = vec_sum4s(a, zero4); - return (npy_uint32)vec_extract(vec_sums(sum4, zero4), 3); + const npyv_u32 zero = npyv_zero_u32(); + npyv_u32 four = vec_sum4s(a, zero); + npyv_u32 one = vec_sums((npyv_s32)sum4, (npyv_s32)zero4); + return (npy_uint16)vec_extract(one, 3); } -NPY_FINLINE npy_uint32 npyv_sum_u16(npyv_u16 a) +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) { - const npyv_u32 zero4 = npyv_zero_u32(); - const npyv_u32 v4 = vec_mergeh(vec_adds(a, vec_sld(a, a, 8)), zero4); - return vec_extract(vec_sums(v4, zero4), 3); + const npyv_s32 zero = npyv_zero_s32(); + npyv_u32x2 eight = npyv_expand_u32_u16(a); + npyv_u32 four = vec_add(eight.val[0], eight.val[1]); + npyv_s32 one = vec_sums((npyv_s32)four, zero); + return (npy_uint32)vec_extract(one, 3); } NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { - return vec_extract(vec_add(a, vec_permi(a, a, 3)), 0); + return vec_extract(vec_add(a, vec_mergel(a, a)), 0); } NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index ae98c47f7e86..c67e44fa73ab 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -736,14 +736,12 @@ def test_arithmetic_div(self): def test_arithmetic_reduce_sum(self): """ Test reduce sum intrinics: - npyv_sum_u8 - npyv_sum_u16 npyv_sum_u32 npyv_sum_u64 npyv_sum_f32 npyv_sum_f64 """ - if self.sfx not in ("u8", "u16", "u32", "u64", "f32", "f64"): + if self.sfx not in ("u32", "u64", "f32", "f64"): return # reduce sum data = self._data() @@ -753,6 +751,22 @@ def test_arithmetic_reduce_sum(self): vsum = self.sum(vdata) assert vsum == data_sum + def test_arithmetic_reduce_sumup(self): + """ + Test overflow protect reduce sumup intrinics: + npyv_sumup_u8 + npyv_sumup_u16 + """ + if self.sfx not in ("u8", "u16"): + return + rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes) + for r in rdata: + data = self._data(r) + vdata = self.load(data) + data_sum = sum(data) + vsum = self.sumup(vdata) + assert vsum == data_sum + def test_mask_conditional(self): """ Conditional addition and subtraction for all supported data types. From 7a0a6e36c474ba4849aa67166649f48400467b76 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 22 Jan 2021 16:43:47 +0800 Subject: [PATCH 0381/1270] correct the input paramater of f32. --- numpy/core/src/common/simd/avx2/arithmetic.h | 6 +++--- numpy/core/src/common/simd/sse/arithmetic.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index 770d4230c902..9e13d63240be 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -128,7 +128,7 @@ NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) return (npy_uint16)_mm_cvtsi128_si32(one); } -NPY_FINLINE npy_uint32 npyv_sum_u32(__m256i a) +NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m256i s0 = _mm256_hadd_epi32(a, a); s0 = _mm256_hadd_epi32(s0, s0); @@ -153,7 +153,7 @@ NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) return (npy_uint64)npyv128_cvtsi128_si64(one); } -NPY_FINLINE float npyv_sum_f32(npyv_u32 a) +NPY_FINLINE float npyv_sum_f32(npyv_f32 a) { __m256 sum_halves = _mm256_hadd_ps(a, a); sum_halves = _mm256_hadd_ps(sum_halves, sum_halves); @@ -163,7 +163,7 @@ NPY_FINLINE float npyv_sum_f32(npyv_u32 a) return _mm_cvtss_f32(sum); } -NPY_FINLINE double npyv_sum_f64(__m256d a) +NPY_FINLINE double npyv_sum_f64(npyv_f64 a) { __m256d sum_halves = _mm256_hadd_pd(a, a); __m128d lo = _mm256_castpd256_pd128(sum_halves); diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index 968de75457f1..92a53e630371 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -176,7 +176,7 @@ NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) return idx[0] + idx[1]; } -NPY_FINLINE float npyv_sum_f32(__m128 a) +NPY_FINLINE float npyv_sum_f32(npyv_f32 a) { #ifdef NPY_HAVE_SSE3 __m128 sum_halves = _mm_hadd_ps(a, a); @@ -190,7 +190,7 @@ NPY_FINLINE float npyv_sum_f32(__m128 a) #endif } -NPY_FINLINE double npyv_sum_f64(__m128d a) +NPY_FINLINE double npyv_sum_f64(npyv_f64 a) { #ifdef NPY_HAVE_SSE3 return _mm_cvtsd_f64(_mm_hadd_pd(a, a)); From 9fa688a9c433aad96c9d53c7fea09d54efbe5b68 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 22 Jan 2021 17:52:43 +0800 Subject: [PATCH 0382/1270] fix npyv_storea_u64 undefined error. --- numpy/core/src/common/simd/sse/utils.h | 4 ++-- numpy/core/src/common/simd/vsx/arithmetic.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/common/simd/sse/utils.h b/numpy/core/src/common/simd/sse/utils.h index fbb96937705f..5e03e12a3421 100644 --- a/numpy/core/src/common/simd/sse/utils.h +++ b/numpy/core/src/common/simd/sse/utils.h @@ -6,10 +6,10 @@ #define _NPY_SIMD_SSE_UTILS_H #if !defined(__x86_64__) && !defined(_M_X64) -NPY_FINLINE npy_uint64 npyv128_cvtsi128_si64(npyv_u64 a) +NPY_FINLINE npy_uint64 npyv128_cvtsi128_si64(__m128i a) { npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; - npyv_storea_u64(idx, a); + _mm_store_si128((__m128i *)idx, a); return idx[0]; } #else diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index 30d77c6fefa5..97d5efe61317 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -122,7 +122,7 @@ NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { const npyv_u32 zero = npyv_zero_u32(); npyv_u32 four = vec_sum4s(a, zero); - npyv_u32 one = vec_sums((npyv_s32)sum4, (npyv_s32)zero4); + npyv_u32 one = vec_sums((npyv_s32)four, (npyv_s32)zero); return (npy_uint16)vec_extract(one, 3); } From ef0a3f068b042f676515382e917576234858fa0e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 22 Jan 2021 16:49:50 +0100 Subject: [PATCH 0383/1270] TST: Alias `OUTPUT_MYPY` so that it appears in the local namespace --- numpy/typing/tests/test_typing.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index a0d92feb9d1c..0e1faf781792 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -58,7 +58,9 @@ def run_mypy() -> None: # Parse the output key = lambda n: n.split(':', 1)[0] iterator = itertools.groupby(stdout.split("\n"), key=key) - OUTPUT_MYPY.update((os.path.join(root, k), list(v)) for k, v in iterator) + OUTPUT_MYPY.update( + (os.path.join(root, k), list(v)) for k, v in iterator if k + ) def get_test_cases(directory): @@ -80,7 +82,9 @@ def get_test_cases(directory): @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) def test_success(path): - assert path not in OUTPUT_MYPY + # Alias `OUTPUT_MYPY` so that it appears in the local namespace + output_mypy = OUTPUT_MYPY + assert path not in output_mypy @pytest.mark.slow @@ -93,8 +97,10 @@ def test_fail(path): lines = fin.readlines() errors = defaultdict(lambda: "") - assert path in OUTPUT_MYPY - for error_line in OUTPUT_MYPY[path]: + + output_mypy = OUTPUT_MYPY + assert path in output_mypy + for error_line in output_mypy[path]: match = re.match( r"^.+\.py:(?P\d+): (error|note): .+$", error_line, @@ -219,8 +225,9 @@ def test_reveal(path): with open(path) as fin: lines = _parse_reveals(fin) - assert path in OUTPUT_MYPY - for error_line in OUTPUT_MYPY[path]: + output_mypy = OUTPUT_MYPY + assert path in output_mypy + for error_line in output_mypy[path]: match = re.match( r"^.+\.py:(?P\d+): note: .+$", error_line, From 15ff83a5807742e58247ad4f6bc8c53606c26ccc Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Fri, 22 Jan 2021 11:00:06 -0500 Subject: [PATCH 0384/1270] DOC: typo in post-loop return --- numpy/core/_add_newdocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 2cbfe52bee2a..8eb42caf5f1f 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -377,7 +377,7 @@ ... while not it.finished: ... it[0] = lamdaexpr(*it[1:]) ... it.iternext() - ... return it.operands[0] + ... return it.operands[0] >>> a = np.arange(5) >>> b = np.ones(5) From e81141b9f8f9c10f136012e08eafda12249043cb Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 22 Jan 2021 18:11:32 +0100 Subject: [PATCH 0385/1270] TST: Enable the `show_absolute_path` option in mypy.ini --- numpy/typing/tests/data/mypy.ini | 1 + numpy/typing/tests/test_typing.py | 5 +---- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index 35cfbec89ec2..548f762612fc 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,5 +1,6 @@ [mypy] plugins = numpy.typing.mypy_plugin +show_absolute_path = True [mypy-numpy] ignore_errors = True diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 0e1faf781792..f41aa2b6bbf1 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -42,7 +42,6 @@ def run_mypy() -> None: if os.path.isdir(CACHE_DIR): shutil.rmtree(CACHE_DIR) - root = os.path.dirname(os.path.dirname(np.__file__)) for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR): # Run mypy stdout, stderr, _ = api.run([ @@ -58,9 +57,7 @@ def run_mypy() -> None: # Parse the output key = lambda n: n.split(':', 1)[0] iterator = itertools.groupby(stdout.split("\n"), key=key) - OUTPUT_MYPY.update( - (os.path.join(root, k), list(v)) for k, v in iterator if k - ) + OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k) def get_test_cases(directory): From 201011fb56b07eafb7763bcc6c5e26abb30ffd88 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 22 Jan 2021 18:27:21 +0100 Subject: [PATCH 0386/1270] MAINT: Do not split at the first `:` character if the path contains a windows drive --- numpy/typing/tests/test_typing.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index f41aa2b6bbf1..324312a92af6 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -30,6 +30,15 @@ OUTPUT_MYPY: Dict[str, List[str]] = {} +def _key_func(key: str) -> str: + """Split at the first occurance of the ``:`` character. + + Windows drive-letters (*e.g.* ``C:``) are ignored herein. + """ + drive, tail = os.path.splitdrive(key) + return os.path.join(drive, tail.split(":", 1)[0]) + + @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.fixture(scope="module", autouse=True) @@ -55,8 +64,7 @@ def run_mypy() -> None: stdout = stdout.replace('*', '') # Parse the output - key = lambda n: n.split(':', 1)[0] - iterator = itertools.groupby(stdout.split("\n"), key=key) + iterator = itertools.groupby(stdout.split("\n"), key=_key_func) OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k) From 164959a9049c05901dd4a2cc66c07a4df37527c1 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 23 Jan 2021 13:59:19 +0100 Subject: [PATCH 0387/1270] MAINT: warn when shuffling unrecognized objects Closes gh-18206 --- numpy/random/_generator.pyx | 15 ++++++++++++--- numpy/random/mtrand.pyx | 13 +++++++++++-- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 7d652ce891e9..5a7d4a21a4f7 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2,6 +2,7 @@ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 import operator import warnings +from collections.abc import MutableSequence from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer from cpython cimport (Py_INCREF, PyFloat_AsDouble) @@ -4347,14 +4348,14 @@ cdef class Generator: """ shuffle(x, axis=0) - Modify a sequence in-place by shuffling its contents. + Modify an array or sequence in-place by shuffling its contents. The order of sub-arrays is changed but their contents remains the same. Parameters ---------- - x : array_like - The array or list to be shuffled. + x : ndarray or MutableSequence + The array, list or mutable sequence to be shuffled. axis : int, optional The axis which `x` is shuffled along. Default is 0. It is only supported on `ndarray` objects. @@ -4428,6 +4429,14 @@ cdef class Generator: x[i] = buf else: # Untyped path. + if not isinstance(x, (np.ndarray, MutableSequence)): + # See gh-18206. We may decide to deprecate here in the future. + warnings.warn( + "`x` isn't a recognized object; `shuffle` is not guaranteed " + "to behave correctly. E.g., non-numpy array/tensor objects " + "with view semantics may contain duplicates after shuffling." + ) + if axis != 0: raise NotImplementedError("Axis argument is only supported " "on ndarray objects") diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index d43e7f5aa6d9..9ee3d9ff3774 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -2,6 +2,7 @@ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 import operator import warnings +from collections.abc import MutableSequence import numpy as np @@ -4402,8 +4403,8 @@ cdef class RandomState: Parameters ---------- - x : array_like - The array or list to be shuffled. + x : ndarray or MutableSequence + The array, list or mutable sequence to be shuffled. Returns ------- @@ -4468,6 +4469,14 @@ cdef class RandomState: x[i] = buf else: # Untyped path. + if not isinstance(x, (np.ndarray, MutableSequence)): + # See gh-18206. We may decide to deprecate here in the future. + warnings.warn( + "`x` isn't a recognized object; `shuffle` is not guaranteed " + "to behave correctly. E.g., non-numpy array/tensor objects " + "with view semantics may contain duplicates after shuffling." + ) + with self.lock: for i in reversed(range(1, n)): j = random_interval(&self._bitgen, i) From 768acb7cf856bb49d4d183f8e9cbd456ecc32475 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 23 Jan 2021 14:16:17 +0100 Subject: [PATCH 0388/1270] BUG: shuffling empty array with axis=1 was broken This would trigger: ``` NotImplementedError: Axis argument is only supported on ndarray objects ``` because empty arrays and array scalars would take the "untyped" path. The bug exists only for Generator, not in RandomState (it doesn't have axis keyword for `shuffle`), but update both because it keeps implementations in sync and the change results in more understandable code also for RandomState. --- numpy/random/_generator.pyx | 15 ++++++++++++--- numpy/random/mtrand.pyx | 11 ++++++++--- numpy/random/tests/test_generator_mt19937.py | 15 ++++++++++++++- numpy/random/tests/test_randomstate.py | 6 +++++- 4 files changed, 39 insertions(+), 8 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 5a7d4a21a4f7..ba1713dfa1f8 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4415,7 +4415,15 @@ cdef class Generator: with self.lock, nogil: _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride, x_ptr, buf_ptr) - elif isinstance(x, np.ndarray) and x.ndim and x.size: + elif isinstance(x, np.ndarray): + if axis >= x.ndim: + raise np.AxisError(f"Cannot shuffle along axis {axis} for " + f"array of dimension {x.ndim}") + + if x.size == 0: + # shuffling is a no-op + return + x = np.swapaxes(x, 0, axis) buf = np.empty_like(x[0, ...]) with self.lock: @@ -4429,12 +4437,13 @@ cdef class Generator: x[i] = buf else: # Untyped path. - if not isinstance(x, (np.ndarray, MutableSequence)): + if not isinstance(x, MutableSequence): # See gh-18206. We may decide to deprecate here in the future. warnings.warn( "`x` isn't a recognized object; `shuffle` is not guaranteed " "to behave correctly. E.g., non-numpy array/tensor objects " - "with view semantics may contain duplicates after shuffling." + "with view semantics may contain duplicates after shuffling.", + UserWarning, stacklevel=2 ) if axis != 0: diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 9ee3d9ff3774..814630c034b0 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4457,7 +4457,11 @@ cdef class RandomState: self._shuffle_raw(n, sizeof(np.npy_intp), stride, x_ptr, buf_ptr) else: self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr) - elif isinstance(x, np.ndarray) and x.ndim and x.size: + elif isinstance(x, np.ndarray): + if x.size == 0: + # shuffling is a no-op + return + buf = np.empty_like(x[0, ...]) with self.lock: for i in reversed(range(1, n)): @@ -4469,12 +4473,13 @@ cdef class RandomState: x[i] = buf else: # Untyped path. - if not isinstance(x, (np.ndarray, MutableSequence)): + if not isinstance(x, MutableSequence): # See gh-18206. We may decide to deprecate here in the future. warnings.warn( "`x` isn't a recognized object; `shuffle` is not guaranteed " "to behave correctly. E.g., non-numpy array/tensor objects " - "with view semantics may contain duplicates after shuffling." + "with view semantics may contain duplicates after shuffling.", + UserWarning, stacklevel=2 ) with self.lock: diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index c4fb5883c925..47c81584caa6 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -960,6 +960,14 @@ def test_shuffle_custom_axis(self): random.shuffle(actual, axis=-1) assert_array_equal(actual, desired) + def test_shuffle_custom_axis_empty(self): + random = Generator(MT19937(self.seed)) + desired = np.array([]).reshape((0, 6)) + for axis in (0, 1): + actual = np.array([]).reshape((0, 6)) + random.shuffle(actual, axis=axis) + assert_array_equal(actual, desired) + def test_shuffle_axis_nonsquare(self): y1 = np.arange(20).reshape(2, 10) y2 = y1.copy() @@ -993,6 +1001,11 @@ def test_shuffle_exceptions(self): arr = [[1, 2, 3], [4, 5, 6]] assert_raises(NotImplementedError, random.shuffle, arr, 1) + arr = np.array(3) + assert_raises(TypeError, random.shuffle, arr) + arr = np.ones((3, 2)) + assert_raises(np.AxisError, random.shuffle, arr, 2) + def test_permutation(self): random = Generator(MT19937(self.seed)) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] @@ -1004,7 +1017,7 @@ def test_permutation(self): arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T actual = random.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) - + bad_x_str = "abcd" assert_raises(np.AxisError, random.permutation, bad_x_str) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index b70a043472cc..7f5f0805050e 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -642,7 +642,7 @@ def test_choice_nan_probabilities(self): a = np.array([42, 1, 2]) p = [None, None, None] assert_raises(ValueError, random.choice, a, p=p) - + def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 @@ -699,6 +699,10 @@ def test_shuffle_masked(self): assert_equal( sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + def test_shuffle_invalid_objects(self): + x = np.array(3) + assert_raises(TypeError, random.shuffle, x) + def test_permutation(self): random.seed(self.seed) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] From ffde0de1c87a07b1dd98b5563c97801d95db713d Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 23 Jan 2021 17:27:17 +0000 Subject: [PATCH 0389/1270] Commit code review suggestions by seberg [ci skip] Co-authored-by: Sebastian Berg --- doc/neps/nep-0023-backwards-compatibility.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index d4f77a7976e2..a034a01f5943 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -52,15 +52,16 @@ General principles When considering proposed changes that are backwards incompatible, the main principles the NumPy developers use when making a decision are: -1. Changes need to benefit users more than they harm them. +1. Changes need to benefit more than they harm users. 2. NumPy is widely used so breaking changes should by default be assumed to be fairly harmful. -3. Decisions should be based on data and actual effects on users and downstream - packages rather than, e.g., appealing to the docs or for stylistic reasons. -4. Silently getting a wrong answer is much worse than getting a loud error. +3. Decisions should be based on how they affect users and downstream packages. + This should be based on usage data where possible. It does not matter whether + this use contradicts the documentation or best practices. +4. The possibility of an incorrect result is much worse than an error or even crash. When assessing the costs of proposed changes, keep in mind that most users do -not read the mailing list, do not look at deprecation warnings, and sometimes +not read the mailing list, do not notice deprecation warnings, and sometimes wait more than one or two years before upgrading from their old version. And that NumPy has millions of users, so "no one will do or use this" is very likely incorrect. From a6edae724424e424f8390c75715ea2273645dfe3 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 23 Jan 2021 19:00:05 +0100 Subject: [PATCH 0390/1270] DOC: update for review comments on NEP 23 [ci skip] --- doc/neps/nep-0023-backwards-compatibility.rst | 49 +++++++++++-------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index a034a01f5943..b37499af9d3b 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -33,12 +33,17 @@ contributions. This NEP tries to address that by providing a policy as well as examples and rationales for when it is or isn't a good idea to break backwards compatibility. +In addition, this NEP can serve as documentation for users about how the NumPy +project treats backwards compatibility, and the speed at which they can expect +changes to be made. + In scope for this NEP are: - Principles of NumPy's approach to backwards compatibility. - How to deprecate functionality, and when to remove already deprecated functionality. - Decision making process for deprecations and removals. +- How to ensure that users are well informed about any change. Out of scope are: @@ -66,8 +71,9 @@ wait more than one or two years before upgrading from their old version. And that NumPy has millions of users, so "no one will do or use this" is very likely incorrect. -Benefits include improved functionality, usability and performance, as well as -lower maintenance cost and improved future extensibility. +Benefits of proposed changes can include improved functionality, usability and +performance, as well as lower maintenance cost and improved future +extensibility. Fixes for clear bugs are exempt from this backwards compatibility policy. However in case of serious impact on users (e.g. a downstream library doesn't @@ -81,13 +87,14 @@ Strategies related to deprecations Getting hard data on the impact of a deprecation of often difficult. Strategies that can be used to assess such impact include: -- Use a code search engine ([1]_) or static ([2]_) or dynamic ([3]_) code +- Use a code search engine ([1]_, [2]_) or static ([3]_) or dynamic ([4]_) code analysis tools to determine where and how the functionality is used. - Testing prominent downstream libraries against a development build of NumPy containing the proposed change to get real-world data on its impact. - Making a change in master and reverting it, if needed, before a release. We - do encourage other packages to test against NumPy's master branch, so this - often turns up issues quickly. + do encourage other packages to test against NumPy's master branch (and if + that's too burdensome, then at least to test pre-releases), so this often + turns up issues quickly. If the impact is unclear or significant, it is often good to consider alternatives to deprecations. For example, discouraging use in documentation @@ -110,13 +117,11 @@ Deprecations: - shall include the version number of the release in which the functionality was deprecated. - shall include information on alternatives to the deprecated functionality, or a - reason for the deprecation if no clear alternative is available. -- shall use ``VisibleDeprecationWarning`` rather than ``DeprecationWarning`` - for cases of relevance to end users. For cases only relevant to - downstream libraries, a regular ``DeprecationWarning`` is fine. - *Rationale: regular deprecation warnings are invisible by default; library - authors should be aware how deprecations work and test for them, but we can't - expect this from all users.* + reason for the deprecation if no clear alternative is available (note that + release notes can include longer messages if needed). +- shall use ``DeprecationWarning`` by default, and ``VisibleDeprecation`` + for changes that need attention again after already having been deprecated or + needing extra attention for some reason. - shall be listed in the release notes of the release where the deprecation is first present. - shall not be introduced in micro (or bug fix) releases. @@ -166,8 +171,8 @@ For backwards incompatible changes that aren't "deprecate and remove" but for which code will start behaving differently, a ``FutureWarning`` should be used. Release notes, mentioning version number and using ``stacklevel`` should be done in the same way as for deprecation warnings. A ``.. versionchanged::`` -directive can be used in the documentation to indicate when the behavior -changed: +directive shall be used in the documentation after the behaviour change was +made to indicate when the behavior changed: .. code-block:: python @@ -209,8 +214,9 @@ deprecated functionality does not need discussion on the mailing list. Functionality with more strict deprecation policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``numpy.random`` has its own backwards compatibility policy, - see `NEP 19 `_. +- ``numpy.random`` has its own backwards compatibility policy with additional + requirements on top of the ones in this NEP, see + `NEP 19 `_. - The file format for ``.npy`` and ``.npz`` files must not be changed in a backwards incompatible way. @@ -287,8 +293,9 @@ removing them happened in 2013 (gh-2880, rejected) and then again in 2019 (:ref:`NEP32`, accepted without significant complaints). Given that they were clearly outside of NumPy's scope, moving them to a -separate ``numpy-financial`` package and removing them from NumPy after a -deprecation period made sense. +separate ``numpy-financial`` package (which gave users an easy uption to update +their code - just a simply `pip install numpy-financial`) and removing them +from NumPy after a deprecation period made sense. Alternatives @@ -320,9 +327,11 @@ References and Footnotes .. [1] https://searchcode.com/ -.. [2] https://github.com/Quansight-Labs/python-api-inspect +.. [2] https://sourcegraph.com/search + +.. [3] https://github.com/Quansight-Labs/python-api-inspect -.. [3] https://github.com/data-apis/python-record-api +.. [4] https://github.com/data-apis/python-record-api Copyright --------- From 7670cb43442c068617f0b36d4aa1c1d4bbf11e21 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 23 Jan 2021 20:21:17 +0100 Subject: [PATCH 0391/1270] Address more review comments on the backwards compat NEP [ci skip] --- doc/neps/nep-0023-backwards-compatibility.rst | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index b37499af9d3b..0445c4b04f12 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -82,7 +82,10 @@ to be delayed for one or more releases. Strategies related to deprecations ----------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Impact assessment +````````````````` Getting hard data on the impact of a deprecation of often difficult. Strategies that can be used to assess such impact include: @@ -96,6 +99,9 @@ that can be used to assess such impact include: that's too burdensome, then at least to test pre-releases), so this often turns up issues quickly. +Alternatives to deprecations +```````````````````````````` + If the impact is unclear or significant, it is often good to consider alternatives to deprecations. For example, discouraging use in documentation only, or moving the documentation for the functionality to a less prominent @@ -109,8 +115,10 @@ Implementing deprecations and removals Deprecation warnings are necessary in all cases where functionality will eventually be removed. If there is no intent to remove functionality, -then it should not be deprecated either. A "please don't use this" in the -documentation or other type of warning should be used instead. +then it should not be deprecated either. A "please don't use this for new code" +in the documentation or other type of warning should be used instead, and the +documentation can be organized such that the preferred alternative is more +prominently shown. Deprecations: @@ -200,7 +208,7 @@ made to indicate when the behavior changed: Decision making -~~~~~~~~~~~~~~~ +--------------- In concrete cases where this policy needs to be applied, decisions are made according to the `NumPy governance model @@ -217,8 +225,9 @@ Functionality with more strict deprecation policies - ``numpy.random`` has its own backwards compatibility policy with additional requirements on top of the ones in this NEP, see `NEP 19 `_. -- The file format for ``.npy`` and ``.npz`` files must not be changed in a backwards - incompatible way. +- The file format for ``.npy`` and ``.npz`` files is strictly versioned + independent of the NumPy version; existing format versions must remain + backwards compatible even if a newer format version is introduced. Example cases From 5cbfefb4c69b130f8b01408615e9f7b42b707beb Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 23 Jan 2021 19:36:22 +0000 Subject: [PATCH 0392/1270] Remove duplicate axis check Co-authored-by: Eric Wieser --- numpy/random/_generator.pyx | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ba1713dfa1f8..3033a1495372 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4416,10 +4416,6 @@ cdef class Generator: _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride, x_ptr, buf_ptr) elif isinstance(x, np.ndarray): - if axis >= x.ndim: - raise np.AxisError(f"Cannot shuffle along axis {axis} for " - f"array of dimension {x.ndim}") - if x.size == 0: # shuffling is a no-op return From 1f928ec9a7c531b6eded12020415b7f8a007e2ac Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sat, 23 Jan 2021 15:11:19 -0800 Subject: [PATCH 0393/1270] DOC: Formatting consistency. Single back ticks are use for references, so this should be either double backticks, or quotes. As `'K'` is used later in the same sentence, and below in the `Notes` section, settle for single quotes. --- numpy/core/_add_newdocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 8eb42caf5f1f..6073166a0a74 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -821,7 +821,7 @@ ===== ========= =================================================== When ``copy=False`` and a copy is made for other reasons, the result is - the same as if ``copy=True``, with some exceptions for `A`, see the + the same as if ``copy=True``, with some exceptions for 'A', see the Notes section. The default order is 'K'. subok : bool, optional If True, then sub-classes will be passed-through, otherwise From 9129e6d8478119b6b97840d5e352709656348452 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sat, 23 Jan 2021 15:29:24 -0800 Subject: [PATCH 0394/1270] DOC: Double backticks for inline code example. Single backticks are supposed to be use for reference to other object, In this context double backticks (verbatim) appear to be better suited. --- numpy/core/numeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index c95c48d7139d..e776bd43bbd4 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -299,7 +299,7 @@ def full(shape, fill_value, dtype=None, order='C', *, like=None): Fill value. dtype : data-type, optional The desired data-type for the array The default, None, means - `np.array(fill_value).dtype`. + ``np.array(fill_value).dtype``. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. From 333fd949081b279da31e589d178d3ac609d0597b Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 24 Jan 2021 08:11:34 +0200 Subject: [PATCH 0395/1270] fix from review --- numpy/lib/tests/test_io.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index a823b12a266a..534ab683c584 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -574,13 +574,13 @@ def test_unicode_and_bytes_fmt(self, fmt, iotype): @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work") @pytest.mark.slow - @requires_memory(free_bytes=8e9) + @requires_memory(free_bytes=7e9) def test_large_zip(self): def check_large_zip(memoryerror_raised): memoryerror_raised.value = False try: # The test takes at least 6GB of memory, writes a file larger - # than 4GB + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` test_data = np.asarray([np.random.rand( np.random.randint(50,100),4) for i in range(800000)], dtype=object) @@ -599,6 +599,9 @@ def check_large_zip(memoryerror_raised): p.join() if memoryerror_raised.value: raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") assert p.exitcode == 0 class LoadTxtBase: From d3653bc6be7a655c2322b2d95f6209626e438171 Mon Sep 17 00:00:00 2001 From: Amarnath1904 <51597733+Amarnath1904@users.noreply.github.com> Date: Sun, 24 Jan 2021 15:48:18 +0530 Subject: [PATCH 0396/1270] STY: unify imports in __init__.py (#18187) minor style cleanup --- numpy/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index a242bb7dfaae..7dadb6491c05 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -109,8 +109,9 @@ import sys import warnings -from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning -from ._globals import _NoValue +from ._globals import ( + ModuleDeprecationWarning, VisibleDeprecationWarning, _NoValue +) # We first need to detect if we're being called as part of the numpy setup # procedure itself in a reliable manner. @@ -397,4 +398,3 @@ def _mac_os_check(): from ._version import get_versions __version__ = get_versions()['version'] del get_versions - From b97ef1fc36d7547a369928c640cc7246bff1a6ae Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 24 Jan 2021 12:50:17 +0100 Subject: [PATCH 0397/1270] Adopt copy-edits suggested by charris [ci skip] --- doc/neps/nep-0023-backwards-compatibility.rst | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index 0445c4b04f12..af5bdab29444 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -25,10 +25,10 @@ Motivation and Scope NumPy has a very large user base. Those users rely on NumPy being stable and the code they write that uses NumPy functionality to keep working. NumPy is also actively maintained and improved -- and sometimes improvements -require or are made easier by breaking backwards compatibility. +require, or are made easier, by breaking backwards compatibility. Finally, there are trade-offs in stability for existing users vs. avoiding errors or having a better user experience for new users. These competing -needs often give rise to long debates and to delays in accepting or rejecting +needs often give rise to long debates and delay accepting or rejecting contributions. This NEP tries to address that by providing a policy as well as examples and rationales for when it is or isn't a good idea to break backwards compatibility. @@ -58,27 +58,27 @@ When considering proposed changes that are backwards incompatible, the main principles the NumPy developers use when making a decision are: 1. Changes need to benefit more than they harm users. -2. NumPy is widely used so breaking changes should by default be assumed to be - fairly harmful. -3. Decisions should be based on how they affect users and downstream packages. - This should be based on usage data where possible. It does not matter whether +2. NumPy is widely used, so breaking changes should be assumed by default to be + harmful. +3. Decisions should be based on how they affect users and downstream packages + and should be based on usage data where possible. It does not matter whether this use contradicts the documentation or best practices. -4. The possibility of an incorrect result is much worse than an error or even crash. +4. The possibility of an incorrect result is worse than an error or even crash. When assessing the costs of proposed changes, keep in mind that most users do not read the mailing list, do not notice deprecation warnings, and sometimes wait more than one or two years before upgrading from their old version. And -that NumPy has millions of users, so "no one will do or use this" is very -likely incorrect. +that NumPy has millions of users, so "no one will do or use this" is likely +incorrect. Benefits of proposed changes can include improved functionality, usability and performance, as well as lower maintenance cost and improved future extensibility. Fixes for clear bugs are exempt from this backwards compatibility policy. -However in case of serious impact on users (e.g. a downstream library doesn't -build anymore or would start giving incorrect results), even bug fixes may have -to be delayed for one or more releases. +However, in case of serious impact on users even bug fixes may have to be +delayed for one or more releases. For example, if a downstream library would no +longer build or would give incorrect results." Strategies related to deprecations @@ -92,11 +92,11 @@ that can be used to assess such impact include: - Use a code search engine ([1]_, [2]_) or static ([3]_) or dynamic ([4]_) code analysis tools to determine where and how the functionality is used. -- Testing prominent downstream libraries against a development build of NumPy +- Test prominent downstream libraries against a development build of NumPy containing the proposed change to get real-world data on its impact. -- Making a change in master and reverting it, if needed, before a release. We - do encourage other packages to test against NumPy's master branch (and if - that's too burdensome, then at least to test pre-releases), so this often +- Make a change in master and revert it before release if it causes problems. + We encourage other packages to test against NumPy's master branch and if + that's too burdensome, then at least to test pre-releases. This often turns up issues quickly. Alternatives to deprecations @@ -115,7 +115,7 @@ Implementing deprecations and removals Deprecation warnings are necessary in all cases where functionality will eventually be removed. If there is no intent to remove functionality, -then it should not be deprecated either. A "please don't use this for new code" +then it should not be deprecated. A "please don't use this for new code" in the documentation or other type of warning should be used instead, and the documentation can be organized such that the preferred alternative is more prominently shown. @@ -125,14 +125,14 @@ Deprecations: - shall include the version number of the release in which the functionality was deprecated. - shall include information on alternatives to the deprecated functionality, or a - reason for the deprecation if no clear alternative is available (note that - release notes can include longer messages if needed). + reason for the deprecation if no clear alternative is available. Note that + release notes can include longer messages if needed. - shall use ``DeprecationWarning`` by default, and ``VisibleDeprecation`` for changes that need attention again after already having been deprecated or needing extra attention for some reason. - shall be listed in the release notes of the release where the deprecation is first present. -- shall not be introduced in micro (or bug fix) releases. +- shall not be introduced in micro (bug fix) releases. - shall set a ``stacklevel``, so the warning appears to come from the correct place. - shall be mentioned in the documentation for the functionality. A @@ -169,11 +169,11 @@ above the warning, helps when grepping): Removal of deprecated functionality: -- shall be done after at least 2 releases (assuming the current 6-monthly +- shall be done after at least 2 releases assuming the current 6-monthly release cycle; if that changes, there shall be at least 1 year between - deprecation and removal). + deprecation and removal. - shall be listed in the release notes of the release where the removal happened. -- can be done in any minor (but not bugfix) release. +- can be done in any minor, but not bugfix, release. For backwards incompatible changes that aren't "deprecate and remove" but for which code will start behaving differently, a ``FutureWarning`` should be @@ -214,7 +214,7 @@ In concrete cases where this policy needs to be applied, decisions are made acco to the `NumPy governance model `_. -All deprecations must be proposed on the mailing list, in order to give everyone +All deprecations must be proposed on the mailing list in order to give everyone with an interest in NumPy development a chance to comment. Removal of deprecated functionality does not need discussion on the mailing list. @@ -225,7 +225,7 @@ Functionality with more strict deprecation policies - ``numpy.random`` has its own backwards compatibility policy with additional requirements on top of the ones in this NEP, see `NEP 19 `_. -- The file format for ``.npy`` and ``.npz`` files is strictly versioned +- The file format of ``.npy`` and ``.npz`` files is strictly versioned independent of the NumPy version; existing format versions must remain backwards compatible even if a newer format version is introduced. @@ -298,13 +298,13 @@ on the mailing list where opinion was divided (but a majority in favor). The financial functions didn't cause a lot of overhead, however there were still multiple issues and PRs a year for them which cost maintainer time to deal with. And they cluttered up the ``numpy`` namespace. Discussion on -removing them happened in 2013 (gh-2880, rejected) and then again in 2019 +removing them was discussed in 2013 (gh-2880, rejected) and in 2019 (:ref:`NEP32`, accepted without significant complaints). Given that they were clearly outside of NumPy's scope, moving them to a -separate ``numpy-financial`` package (which gave users an easy uption to update -their code - just a simply `pip install numpy-financial`) and removing them -from NumPy after a deprecation period made sense. +separate ``numpy-financial`` package and removing them from NumPy after a +deprecation period made sense. That also gave users an easy way to update +their code by doing `pip install numpy-financial`. Alternatives From 3c7cd655c78ed7663e0dd672ed705364a09c8e16 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 24 Jan 2021 10:59:30 -0700 Subject: [PATCH 0398/1270] MAINT: Ignore ComplexWarning in ``test_iter_copy_casts`` ComplexWarning is showing up in the aarch64 wheel nightly build tests. I don't see them locally, do not know why. --- numpy/core/tests/test_nditer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 5e6472ae5536..94f61baca12f 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1365,6 +1365,7 @@ def test_iter_copy(): @pytest.mark.parametrize("dtype", np.typecodes["All"]) @pytest.mark.parametrize("loop_dtype", np.typecodes["All"]) +@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") def test_iter_copy_casts(dtype, loop_dtype): # Ensure the dtype is never flexible: if loop_dtype.lower() == "m": From e854a2da30c0631d7b9020b81696de39ab13140d Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sun, 24 Jan 2021 16:27:38 -0800 Subject: [PATCH 0399/1270] DOC: Misc single to double backticks fixes. As usual, single backtics are for references, double for verbatim. Rewrap the paragraph when necessary, Change also a couple of mode=xxx to mode='xxx' as well. --- numpy/core/fromnumeric.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 52df1aad99f9..c8de48ff88be 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -319,34 +319,34 @@ def choose(a, choices, out=None, mode='raise'): But this omits some subtleties. Here is a fully general summary: - Given an "index" array (`a`) of integers and a sequence of `n` arrays + Given an "index" array (`a`) of integers and a sequence of ``n`` arrays (`choices`), `a` and each choice array are first broadcast, as necessary, to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as + for each ``i``. Then, a new array with shape ``Ba.shape`` is created as follows: - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; + * if ``mode='raise'`` (the default), then, first of all, each element of + ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose + that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` + position in ``Ba`` - then the value at the same position in the new array + is the value in ``Bchoices[i]`` at that same position; - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) + * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) integer; modular arithmetic is used to map integers outside the range `[0, n-1]` back into that range; and then the new array is constructed as above; - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. + * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) + integer; negative integers are mapped to 0; values greater than ``n-1`` + are mapped to ``n-1``; and then the new array is constructed as above. Parameters ---------- a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. + This array must contain integers in ``[0, n-1]``, where ``n`` is the + number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. choices : sequence of arrays Choice arrays. `a` and all of the choices must be broadcastable to the same shape. If `choices` is itself an array (not recommended), then @@ -355,12 +355,12 @@ def choose(a, choices, out=None, mode='raise'): out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Note that `out` is always - buffered if `mode='raise'`; use other modes for better performance. + buffered if ``mode='raise'``; use other modes for better performance. mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: + Specifies how indices outside ``[0, n-1]`` will be treated: * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` + * 'wrap' : value becomes value mod ``n`` * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 Returns From d244aa9bae95d6061feaec4a9873ef4992c26245 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 25 Jan 2021 10:42:05 +0800 Subject: [PATCH 0400/1270] improve sumup intriniscs. --- numpy/core/src/_simd/_simd.dispatch.c.src | 5 +-- numpy/core/src/common/simd/avx2/arithmetic.h | 41 +++++++++--------- .../core/src/common/simd/avx512/arithmetic.h | 36 ++++++++-------- numpy/core/src/common/simd/neon/arithmetic.h | 42 +++++++++++-------- numpy/core/src/common/simd/sse/arithmetic.h | 40 ++++++++++-------- numpy/core/src/common/simd/sse/utils.h | 4 +- numpy/core/src/common/simd/vsx/arithmetic.h | 41 +++++++++--------- numpy/core/tests/test_simd.py | 8 +--- 8 files changed, 115 insertions(+), 102 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index 49378b5180c7..e5b58a8d296e 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -16,7 +16,6 @@ * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# * #esfx = u16, s8, u32, s16, u32, s32, u64, s64, f32, f64# - * #sum_ret = u16, 0, u32, 0, u32, 0, u64, 0, f32, f64# * #expand_sup =1, 0, 1, 0, 0, 0, 0, 0, 0, 0# * #simd_sup = 1, 1, 1, 1, 1, 1, 1, 1, 1, NPY_SIMD_F64# * #fp_only = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# @@ -364,11 +363,11 @@ SIMD_IMPL_INTRIN_3(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@, v@sfx@) #endif // fused_sup #if @sum_sup@ -SIMD_IMPL_INTRIN_1(sum_@sfx@, @sum_ret@, v@sfx@) +SIMD_IMPL_INTRIN_1(sum_@sfx@, @sfx@, v@sfx@) #endif // sum_sup #if @sumup_sup@ -SIMD_IMPL_INTRIN_1(sumup_@sfx@, @sum_ret@, v@sfx@) +SIMD_IMPL_INTRIN_1(sumup_@sfx@, @esfx@, v@sfx@) #endif // sumup_sup /*************************** diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index 9e13d63240be..c4c5f2093844 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -118,16 +118,10 @@ } #endif // !NPY_HAVE_FMA3 -// Horizontal add: Calculates the sum of all vector elements. - -NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) -{ - __m256i four = _mm256_sad_epu8(a, _mm256_setzero_si256()); - __m128i two = _mm_add_epi16(_mm256_castsi256_si128(four), _mm256_extracti128_si256(four, 1)); - __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); - return (npy_uint16)_mm_cvtsi128_si32(one); -} - +/*************************** + * Summation + ***************************/ +// reduce sum across vector NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m256i s0 = _mm256_hadd_epi32(a, a); @@ -137,15 +131,6 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) return _mm_cvtsi128_si32(s1); } -NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) -{ - const npyv_u16 even_mask = _mm256_set1_epi32(0x0000FFFF); - __m256i even = _mm256_and_si256(a, even_mask); - __m256i odd = _mm256_srli_epi32(a, 16); - __m256i eight = _mm256_add_epi32(even, odd); - return npyv_sum_u32(eight); -} - NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { __m256i two = _mm256_add_epi64(a, _mm256_shuffle_epi32(a, _MM_SHUFFLE(1, 0, 3, 2))); @@ -172,6 +157,24 @@ NPY_FINLINE double npyv_sum_f64(npyv_f64 a) return _mm_cvtsd_f64(sum); } +// extend sum across vector +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) +{ + __m256i four = _mm256_sad_epu8(a, _mm256_setzero_si256()); + __m128i two = _mm_add_epi16(_mm256_castsi256_si128(four), _mm256_extracti128_si256(four, 1)); + __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); + return (npy_uint16)_mm_cvtsi128_si32(one); +} + +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) +{ + const npyv_u16 even_mask = _mm256_set1_epi32(0x0000FFFF); + __m256i even = _mm256_and_si256(a, even_mask); + __m256i odd = _mm256_srli_epi32(a, 16); + __m256i eight = _mm256_add_epi32(even, odd); + return npyv_sum_u32(eight); +} + #endif // _NPY_SIMD_AVX2_ARITHMETIC_H diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index ea7dc0c3c0e9..a6e448baeed0 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -130,7 +130,7 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) #define npyv_nmulsub_f64 _mm512_fnmsub_pd /*************************** - * Reduce Sum: Calculates the sum of all vector elements. + * Summation: Calculates the sum of all vector elements. * there are three ways to implement reduce sum for AVX512: * 1- split(256) /add /split(128) /add /hadd /hadd /extract * 2- shuff(cross) /add /shuff(cross) /add /shuff /add /shuff /add /extract @@ -144,29 +144,13 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) * The third one is almost the same as the second one but only works for * intel compiler/GCC 7.1/Clang 4, we still need to support older GCC. ***************************/ - -NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) -{ -#ifdef NPY_HAVE_AVX512BW - __m512i eight = _mm512_sad_epu8(a, _mm512_setzero_si512()); - __m256i four = _mm256_add_epi16(npyv512_lower_si256(eight), npyv512_higher_si256(eight)); -#else - __m256i lo_four = _mm256_sad_epu8(npyv512_lower_si256(a), _mm256_setzero_si256()); - __m256i hi_four = _mm256_sad_epu8(npyv512_higher_si256(a), _mm256_setzero_si256()); - __m256i four = _mm256_add_epi16(lo_four, hi_four); -#endif - __m128i two = _mm_add_epi16(_mm256_castsi256_si128(four), _mm256_extracti128_si256(four, 1)); - __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); - return (npy_uint16)_mm_cvtsi128_si32(one); -} - +// reduce sum across vector #ifdef NPY_HAVE_AVX512F_REDUCE #define npyv_sum_u32 _mm512_reduce_add_epi32 #define npyv_sum_u64 _mm512_reduce_add_epi64 #define npyv_sum_f32 _mm512_reduce_add_ps #define npyv_sum_f64 _mm512_reduce_add_pd #else - NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a)); @@ -208,6 +192,22 @@ NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) } #endif +// extend sum across vector +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) +{ +#ifdef NPY_HAVE_AVX512BW + __m512i eight = _mm512_sad_epu8(a, _mm512_setzero_si512()); + __m256i four = _mm256_add_epi16(npyv512_lower_si256(eight), npyv512_higher_si256(eight)); +#else + __m256i lo_four = _mm256_sad_epu8(npyv512_lower_si256(a), _mm256_setzero_si256()); + __m256i hi_four = _mm256_sad_epu8(npyv512_higher_si256(a), _mm256_setzero_si256()); + __m256i four = _mm256_add_epi16(lo_four, hi_four); +#endif + __m128i two = _mm_add_epi16(_mm256_castsi256_si128(four), _mm256_extracti128_si256(four, 1)); + __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); + return (npy_uint16)_mm_cvtsi128_si32(one); +} + NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) { const npyv_u16 even_mask = _mm512_set1_epi32(0x0000FFFF); diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index 81207ea5e606..af34299a0ed9 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -131,30 +131,16 @@ { return vfmsq_f64(vnegq_f64(c), a, b); } #endif // NPY_SIMD_F64 -// Horizontal add: Calculates the sum of all vector elements. +/*************************** + * Summation + ***************************/ +// reduce sum across vector #if NPY_SIMD_F64 - #define npyv_sumup_u8 vaddlvq_u8 - #define npyv_sumup_u16 vaddlvq_u16 #define npyv_sum_u32 vaddvq_u32 #define npyv_sum_u64 vaddvq_u64 #define npyv_sum_f32 vaddvq_f32 #define npyv_sum_f64 vaddvq_f64 #else - - NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) - { - uint32x4_t t0 = vpaddlq_u16(vpaddlq_u8(a)); - uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); - return vget_lane_u32(vpadd_u32(t1, t1), 0); - } - - NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) - { - uint32x4_t t0 = vpaddlq_u16(a); - uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); - return vget_lane_u32(vpadd_u32(t1, t1), 0); - } - NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { return vget_lane_u64(vadd_u64(vget_low_u64(a), vget_high_u64(a)),0); @@ -173,4 +159,24 @@ } #endif +// extend sum across vector +#if NPY_SIMD_F64 + #define npyv_sumup_u8 vaddlvq_u8 + #define npyv_sumup_u16 vaddlvq_u16 +#else + NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) + { + uint32x4_t t0 = vpaddlq_u16(vpaddlq_u8(a)); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); + } + + NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) + { + uint32x4_t t0 = vpaddlq_u16(a); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); + } +#endif + #endif // _NPY_SIMD_NEON_ARITHMETIC_H diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index 92a53e630371..fcb0a1716cc6 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -148,14 +148,10 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) } #endif // !NPY_HAVE_FMA3 -// Horizontal add: Calculates the sum of all vector elements. - -NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) -{ - __m128i half = _mm_sad_epu8(a, _mm_setzero_si128()); - return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(half, _mm_unpackhi_epi64(half, half))); -} - +/*************************** + * Summation + ***************************/ +// reduce sum across vector NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m128i t = _mm_add_epi32(a, _mm_srli_si128(a, 8)); @@ -163,17 +159,10 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) return (unsigned)_mm_cvtsi128_si32(t); } -NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) -{ - npyv_u32x2 res = npyv_expand_u32_u16(a); - return (unsigned)npyv_sum_u32(_mm_add_epi32(res.val[0], res.val[1])); -} - NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { - npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; - npyv_storea_u64(idx, a); - return idx[0] + idx[1]; + __m128i one = _mm_add_epi64(a, _mm_unpackhi_epi64(a, a)); + return (npy_uint64)npyv128_cvtsi128_si64(one); } NPY_FINLINE float npyv_sum_f32(npyv_f32 a) @@ -199,6 +188,23 @@ NPY_FINLINE double npyv_sum_f64(npyv_f64 a) #endif } +// extend sum across vector +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) +{ + __m128i two = _mm_sad_epu8(a, _mm_setzero_si128()); + __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); + return (npy_uint16)_mm_cvtsi128_si32(one); +} + +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) +{ + const __m128i even_mask = _mm_set1_epi32(0x0000FFFF); + __m128i even = _mm_and_si128(a, even_mask); + __m128i odd = _mm_srli_epi32(a, 16); + __m128i four = _mm_add_epi32(even, odd); + return npyv_sum_u32(four); +} + #endif // _NPY_SIMD_SSE_ARITHMETIC_H diff --git a/numpy/core/src/common/simd/sse/utils.h b/numpy/core/src/common/simd/sse/utils.h index 5e03e12a3421..c23def11d44c 100644 --- a/numpy/core/src/common/simd/sse/utils.h +++ b/numpy/core/src/common/simd/sse/utils.h @@ -6,9 +6,9 @@ #define _NPY_SIMD_SSE_UTILS_H #if !defined(__x86_64__) && !defined(_M_X64) -NPY_FINLINE npy_uint64 npyv128_cvtsi128_si64(__m128i a) +NPY_FINLINE npy_int64 npyv128_cvtsi128_si64(__m128i a) { - npy_uint64 NPY_DECL_ALIGNED(32) idx[2]; + npy_int64 NPY_DECL_ALIGNED(16) idx[2]; _mm_store_si128((__m128i *)idx, a); return idx[0]; } diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index 97d5efe61317..339677857b7a 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -116,25 +116,10 @@ #define npyv_nmulsub_f32 vec_nmadd // equivalent to -(a*b + c) #define npyv_nmulsub_f64 vec_nmadd -// Horizontal add: Calculates the sum of all vector elements. - -NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) -{ - const npyv_u32 zero = npyv_zero_u32(); - npyv_u32 four = vec_sum4s(a, zero); - npyv_u32 one = vec_sums((npyv_s32)four, (npyv_s32)zero); - return (npy_uint16)vec_extract(one, 3); -} - -NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) -{ - const npyv_s32 zero = npyv_zero_s32(); - npyv_u32x2 eight = npyv_expand_u32_u16(a); - npyv_u32 four = vec_add(eight.val[0], eight.val[1]); - npyv_s32 one = vec_sums((npyv_s32)four, zero); - return (npy_uint32)vec_extract(one, 3); -} - +/*************************** + * Summation + ***************************/ +// reduce sum across vector NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { return vec_extract(vec_add(a, vec_mergel(a, a)), 0); @@ -157,4 +142,22 @@ NPY_FINLINE double npyv_sum_f64(npyv_f64 a) return vec_extract(a, 0) + vec_extract(a, 1); } +// extend sum across vector +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) +{ + const npyv_u32 zero = npyv_zero_u32(); + npyv_u32 four = vec_sum4s(a, zero); + npyv_s32 one = vec_sums((npyv_s32)four, (npyv_s32)zero); + return (npy_uint16)vec_extract(one, 3); +} + +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) +{ + const npyv_s32 zero = npyv_zero_s32(); + npyv_u32x2 eight = npyv_expand_u32_u16(a); + npyv_u32 four = vec_add(eight.val[0], eight.val[1]); + npyv_s32 one = vec_sums((npyv_s32)four, zero); + return (npy_uint32)vec_extract(one, 3); +} + #endif // _NPY_SIMD_VSX_ARITHMETIC_H diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index c67e44fa73ab..3efd328dac20 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -736,10 +736,7 @@ def test_arithmetic_div(self): def test_arithmetic_reduce_sum(self): """ Test reduce sum intrinics: - npyv_sum_u32 - npyv_sum_u64 - npyv_sum_f32 - npyv_sum_f64 + npyv_sum_##sfx """ if self.sfx not in ("u32", "u64", "f32", "f64"): return @@ -754,8 +751,7 @@ def test_arithmetic_reduce_sum(self): def test_arithmetic_reduce_sumup(self): """ Test overflow protect reduce sumup intrinics: - npyv_sumup_u8 - npyv_sumup_u16 + npyv_sumup_##sfx """ if self.sfx not in ("u8", "u16"): return From 3eef647fcc486c87da719fa480232fd351384a35 Mon Sep 17 00:00:00 2001 From: James Gerity Date: Sun, 24 Jan 2021 23:30:17 -0500 Subject: [PATCH 0401/1270] DOC: Drop '1-D' from docstring --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 3033a1495372..60017e8ce424 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -599,7 +599,7 @@ cdef class Generator: """ choice(a, size=None, replace=True, p=None, axis=0, shuffle=True) - Generates a random sample from a given 1-D array + Generates a random sample from a given array Parameters ---------- From 25885174d684d4eb9e162c09fd91e8296241df6e Mon Sep 17 00:00:00 2001 From: James Gerity Date: Sun, 24 Jan 2021 23:45:09 -0500 Subject: [PATCH 0402/1270] DOC: add example of sampling from 2-D array --- numpy/random/_generator.pyx | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 60017e8ce424..cba13ed59d8e 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -665,6 +665,13 @@ cdef class Generator: array([3,1,0]) # random >>> #This is equivalent to rng.permutation(np.arange(5))[:3] + Generate a uniform random sample from a 2-D array, without + replacement: + + >>> rng.choice([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 2, replace=False) + array([[3, 4, 5], + [0, 1, 2]]) + Generate a non-uniform random sample from np.arange(5) of size 3 without replacement: From 7541543234fda5a15f150919cf3c273330c7c728 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 25 Jan 2021 07:02:15 +0000 Subject: [PATCH 0403/1270] MAINT: Bump pydata-sphinx-theme from 0.4.1 to 0.4.2 Bumps [pydata-sphinx-theme](https://github.com/pandas-dev/pydata-sphinx-theme) from 0.4.1 to 0.4.2. - [Release notes](https://github.com/pandas-dev/pydata-sphinx-theme/releases) - [Changelog](https://github.com/pandas-dev/pydata-sphinx-theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/pandas-dev/pydata-sphinx-theme/compare/v0.4.1...v0.4.2) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 64ab9a41fe7c..26be985bbf4c 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -4,4 +4,4 @@ ipython scipy matplotlib pandas -pydata-sphinx-theme==0.4.1 +pydata-sphinx-theme==0.4.2 From f5e1a0699c34ec783dfe944cbdbeefbe27f204b3 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 25 Jan 2021 07:02:28 +0000 Subject: [PATCH 0404/1270] MAINT: Bump mypy from 0.790 to 0.800 Bumps [mypy](https://github.com/python/mypy) from 0.790 to 0.800. - [Release notes](https://github.com/python/mypy/releases) - [Commits](https://github.com/python/mypy/compare/v0.790...v0.800) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 52109a5d5fbd..812d9a0d22da 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -11,5 +11,5 @@ cffi # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # - Mypy doesn't currently work on Python 3.9 -mypy==0.790; platform_python_implementation != "PyPy" +mypy==0.800; platform_python_implementation != "PyPy" typing_extensions From c7cc6c5df4d1a24f35688983d5ad5163dcb190ca Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 25 Jan 2021 07:03:26 +0000 Subject: [PATCH 0405/1270] MAINT: Bump hypothesis from 6.0.2 to 6.0.3 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.0.2 to 6.0.3. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.0.2...hypothesis-python-6.0.3) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 52109a5d5fbd..5519a1403575 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.0.2 +hypothesis==6.0.3 pytest==6.2.1 pytz==2020.5 pytest-cov==2.10.1 From 8e8070f4f72f7025bccfbc4dd83bd2b82c569ba3 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 25 Jan 2021 07:03:45 +0000 Subject: [PATCH 0406/1270] MAINT: Bump pytest-cov from 2.10.1 to 2.11.1 Bumps [pytest-cov](https://github.com/pytest-dev/pytest-cov) from 2.10.1 to 2.11.1. - [Release notes](https://github.com/pytest-dev/pytest-cov/releases) - [Changelog](https://github.com/pytest-dev/pytest-cov/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest-cov/compare/v2.10.1...v2.11.1) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 52109a5d5fbd..72e0c3da694c 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -4,7 +4,7 @@ setuptools<49.2.0 hypothesis==6.0.2 pytest==6.2.1 pytz==2020.5 -pytest-cov==2.10.1 +pytest-cov==2.11.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending cffi From 7da7789a6508a60d0ad7662ac69bcee9c478c239 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 25 Jan 2021 11:59:46 +0100 Subject: [PATCH 0407/1270] TST: Fix two failing typing tests Mypy 0.800 changed one of its error messages; the `fail` tests have now been altered to reflect this change --- numpy/typing/tests/data/fail/array_constructors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/fail/array_constructors.py b/numpy/typing/tests/data/fail/array_constructors.py index 9cb59fe5f72e..f13fdacb26c9 100644 --- a/numpy/typing/tests/data/fail/array_constructors.py +++ b/numpy/typing/tests/data/fail/array_constructors.py @@ -7,10 +7,10 @@ np.require(a, requirements="TEST") # E: incompatible type np.zeros("test") # E: incompatible type -np.zeros() # E: Too few arguments +np.zeros() # E: Missing positional argument np.ones("test") # E: incompatible type -np.ones() # E: Too few arguments +np.ones() # E: Missing positional argument np.array(0, float, True) # E: Too many positional From c6e6dbc06af6d715ac679619e6e7882b7c0d3f5b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 22 Jan 2021 17:42:07 +0100 Subject: [PATCH 0408/1270] MAINT: Add (covariant) aliases for certain `ndarray`s --- numpy/__init__.pyi | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3d92a543b32c..b7ce0c790078 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1337,6 +1337,13 @@ _ShapeType = TypeVar("_ShapeType", bound=Any) _BufferType = Union[ndarray, bytes, bytearray, memoryview] _Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] +_ArrayUInt_co = _ArrayND[Union[bool_, unsignedinteger[Any]]] +_ArrayInt_co = _ArrayND[Union[bool_, integer[Any]]] +_ArrayFloat_co = _ArrayND[Union[bool_, integer[Any], floating[Any]]] +_ArrayComplex_co = _ArrayND[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]] +_ArrayNumber_co = _ArrayND[Union[bool_, number[Any]]] +_ArrayTD64_co = _ArrayND[Union[bool_, integer[Any], timedelta64]] + class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def base(self) -> Optional[ndarray]: ... @@ -1484,9 +1491,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __lt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __lt__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... @overload - def __lt__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... @overload def __lt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... @overload @@ -1500,9 +1507,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __le__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __le__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... @overload - def __le__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... @overload def __le__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... @overload @@ -1516,9 +1523,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __gt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __gt__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... @overload - def __gt__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... @overload def __gt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... @overload @@ -1532,9 +1539,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __ge__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ge__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... @overload - def __ge__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... @overload def __ge__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... @overload From aa578c76e3e0886313361fa62125788dae28a291 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 22 Jan 2021 17:43:51 +0100 Subject: [PATCH 0409/1270] ENH: Add dtype-support to 15 ufunc-based `ndarray` magic methods --- numpy/__init__.pyi | 352 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 334 insertions(+), 18 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b7ce0c790078..e744a15c6ccc 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -13,12 +13,17 @@ from numpy.typing import ( _ArrayOrScalar, _NestedSequence, _RecursiveSequence, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, _ArrayLikeNumber_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, + _ArrayLikeObject_co, # DTypes - DTypeLike, _SupportsDType, _VoidDTypeLike, @@ -1333,8 +1338,11 @@ _DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) # TODO: Set the `bound` to something more suitable once we # have proper shape support _ShapeType = TypeVar("_ShapeType", bound=Any) - +_NumberType = TypeVar("_NumberType", bound=number[Any]) _BufferType = Union[ndarray, bytes, bytearray, memoryview] + +_T = TypeVar("_T") +_2Tuple = Tuple[_T, _T] _Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] _ArrayUInt_co = _ArrayND[Union[bool_, unsignedinteger[Any]]] @@ -1552,29 +1560,335 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): other: _RecursiveSequence, ) -> _ArrayOrScalar[bool_]: ... - def __matmul__(self, other: ArrayLike) -> Any: ... + # Unary ops + @overload + def __abs__(self: _ArrayND[bool_]) -> _ArrayOrScalar[bool_]: ... + @overload + def __abs__(self: _ArrayND[complexfloating[_NBit1, _NBit1]]) -> _ArrayOrScalar[floating[_NBit1]]: ... + @overload + def __abs__(self: _ArrayND[_NumberType]) -> _ArrayOrScalar[_NumberType]: ... + @overload + def __abs__(self: _ArrayND[timedelta64]) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __abs__(self: _ArrayND[object_]) -> Any: ... + + @overload + def __invert__(self: _ArrayND[bool_]) -> _ArrayOrScalar[bool_]: ... + @overload + def __invert__(self: _ArrayND[_IntType]) -> _ArrayOrScalar[_IntType]: ... + @overload + def __invert__(self: _ArrayND[object_]) -> Any: ... + + @overload + def __pos__(self: _ArrayND[_NumberType]) -> _ArrayOrScalar[_NumberType]: ... + @overload + def __pos__(self: _ArrayND[timedelta64]) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __pos__(self: _ArrayND[object_]) -> Any: ... + + @overload + def __neg__(self: _ArrayND[_NumberType]) -> _ArrayOrScalar[_NumberType]: ... + @overload + def __neg__(self: _ArrayND[timedelta64]) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __neg__(self: _ArrayND[object_]) -> Any: ... + + # Binary ops # NOTE: `ndarray` does not implement `__imatmul__` - def __rmatmul__(self, other: ArrayLike) -> Any: ... - def __neg__(self: _ArraySelf) -> Any: ... - def __pos__(self: _ArraySelf) -> Any: ... - def __abs__(self: _ArraySelf) -> Any: ... - def __mod__(self, other: ArrayLike) -> Any: ... - def __rmod__(self, other: ArrayLike) -> Any: ... - def __divmod__(self, other: ArrayLike) -> Tuple[Any, Any]: ... - def __rdivmod__(self, other: ArrayLike) -> Tuple[Any, Any]: ... - def __add__(self, other: ArrayLike) -> Any: ... - def __radd__(self, other: ArrayLike) -> Any: ... - def __sub__(self, other: ArrayLike) -> Any: ... - def __rsub__(self, other: ArrayLike) -> Any: ... - def __mul__(self, other: ArrayLike) -> Any: ... - def __rmul__(self, other: ArrayLike) -> Any: ... + @overload + def __matmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __matmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + @overload + def __matmul__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __matmul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __matmul__( + self: _ArrayNumber_co, + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rmatmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rmatmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + @overload + def __rmatmul__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rmatmul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rmatmul__( + self: _ArrayNumber_co, + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __mod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __mod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __mod__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __mod__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __mod__( + self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __rmod__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rmod__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rmod__( + self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __divmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __divmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayOrScalar[int8]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayOrScalar[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayOrScalar[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayOrScalar[floating[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... + @overload + def __divmod__( + self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> _2Tuple[Any]: ... + + @overload + def __rdivmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rdivmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayOrScalar[int8]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayOrScalar[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayOrScalar[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayOrScalar[floating[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... + @overload + def __rdivmod__( + self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> _2Tuple[Any]: ... + + @overload + def __add__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __add__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayOrScalar[datetime64]: ... + @overload + def __add__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[datetime64]: ... + @overload + def __add__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __add__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __add__( + self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __radd__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __radd__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayOrScalar[datetime64]: ... + @overload + def __radd__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[datetime64]: ... + @overload + def __radd__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __radd__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __radd__( + self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __sub__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __sub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[datetime64]: ... + @overload + def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __sub__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __sub__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __sub__( + self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rsub__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rsub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayOrScalar[datetime64]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __rsub__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rsub__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rsub__( + self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __mul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __mul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __mul__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __mul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __mul__( + self: _ArrayND[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __rmul__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rmul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rmul__( + self: _ArrayND[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + def __floordiv__(self, other: ArrayLike) -> Any: ... def __rfloordiv__(self, other: ArrayLike) -> Any: ... def __pow__(self, other: ArrayLike) -> Any: ... def __rpow__(self, other: ArrayLike) -> Any: ... def __truediv__(self, other: ArrayLike) -> Any: ... def __rtruediv__(self, other: ArrayLike) -> Any: ... - def __invert__(self: _ArraySelf) -> Any: ... def __lshift__(self, other: ArrayLike) -> Any: ... def __rlshift__(self, other: ArrayLike) -> Any: ... def __rshift__(self, other: ArrayLike) -> Any: ... @@ -1585,6 +1899,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rxor__(self, other: ArrayLike) -> Any: ... def __or__(self, other: ArrayLike) -> Any: ... def __ror__(self, other: ArrayLike) -> Any: ... + # `np.generic` does not support inplace operations def __iadd__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... def __isub__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... @@ -1598,6 +1913,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __iand__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... def __ixor__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... def __ior__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self) -> _DType_co: ... From 598676ce0ecaa6cd99f423c178d6e712625d20ea Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 23 Jan 2021 03:37:38 +0100 Subject: [PATCH 0410/1270] MAINT: Added a missing overload for `ndarray[Any],ArrayLike[object_]->Any` The previous PR added support for `ndarray[object_],Any->Any` but its inverse was still missing --- numpy/__init__.pyi | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e744a15c6ccc..829ba82e18a4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1507,6 +1507,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __lt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... @overload + def __lt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + @overload def __lt__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, @@ -1523,6 +1525,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __le__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... @overload + def __le__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + @overload def __le__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, @@ -1539,6 +1543,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __gt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... @overload + def __gt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + @overload def __gt__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, @@ -1555,6 +1561,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __ge__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... @overload + def __ge__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + @overload def __ge__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, From 896de10ff81e2099f881b2b274b6c3506f748322 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 23 Jan 2021 03:41:18 +0100 Subject: [PATCH 0411/1270] TST: Added new tests for `ndarray` arithmetic --- numpy/typing/tests/data/fail/arithmetic.py | 30 +++ numpy/typing/tests/data/pass/arithmetic.py | 186 ++++++++++++++++--- numpy/typing/tests/data/reveal/arithmetic.py | 178 +++++++++++++++--- numpy/typing/tests/data/reveal/mod.py | 40 ++-- 4 files changed, 371 insertions(+), 63 deletions(-) diff --git a/numpy/typing/tests/data/fail/arithmetic.py b/numpy/typing/tests/data/fail/arithmetic.py index f32eddc4bc8d..bad7040b945a 100644 --- a/numpy/typing/tests/data/fail/arithmetic.py +++ b/numpy/typing/tests/data/fail/arithmetic.py @@ -1,9 +1,39 @@ +from typing import List, Any import numpy as np b_ = np.bool_() dt = np.datetime64(0, "D") td = np.timedelta64(0, "D") +AR_b: np.ndarray[Any, np.dtype[np.bool_]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] + +AR_LIKE_b: List[bool] +AR_LIKE_f: List[float] +AR_LIKE_c: List[complex] +AR_LIKE_m: List[np.timedelta64] +AR_LIKE_M: List[np.datetime64] + +# NOTE: mypys `NoReturn` errors are, unfortunately, not that great +_1 = AR_b - AR_LIKE_b # E: Need type annotation +_2 = AR_LIKE_b - AR_b # E: Need type annotation + +AR_f - AR_LIKE_m # E: Unsupported operand types +AR_f - AR_LIKE_M # E: Unsupported operand types +AR_c - AR_LIKE_m # E: Unsupported operand types +AR_c - AR_LIKE_M # E: Unsupported operand types + +AR_m - AR_LIKE_f # E: Unsupported operand types +AR_M - AR_LIKE_f # E: Unsupported operand types +AR_m - AR_LIKE_c # E: Unsupported operand types +AR_M - AR_LIKE_c # E: Unsupported operand types + +AR_m - AR_LIKE_M # E: Unsupported operand types +AR_LIKE_m - AR_M # E: Unsupported operand types + b_ - b_ # E: No overload variant dt + dt # E: Unsupported operand types diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index ffbaf2975332..4840d1fab1e7 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +from typing import Any import numpy as np c16 = np.complex128(1) @@ -20,8 +23,149 @@ f = float(1) i = int(1) -AR = np.ones(1, dtype=np.float64) -AR.setflags(write=False) + +class Object: + def __array__(self, dtype: None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + def __sub__(self, value: Any) -> Object: + return self + + def __rsub__(self, value: Any) -> Object: + return self + + +AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int64]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1j]) +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64(1, "D")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64(1, "D")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([Object()]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] +AR_LIKE_O = [Object()] + +# Array subtractions + +AR_b - AR_LIKE_u +AR_b - AR_LIKE_i +AR_b - AR_LIKE_f +AR_b - AR_LIKE_c +AR_b - AR_LIKE_m +AR_b - AR_LIKE_O + +AR_LIKE_u - AR_b +AR_LIKE_i - AR_b +AR_LIKE_f - AR_b +AR_LIKE_c - AR_b +AR_LIKE_m - AR_b +AR_LIKE_M - AR_b +AR_LIKE_O - AR_b + +AR_u - AR_LIKE_b +AR_u - AR_LIKE_u +AR_u - AR_LIKE_i +AR_u - AR_LIKE_f +AR_u - AR_LIKE_c +AR_u - AR_LIKE_m +AR_u - AR_LIKE_O + +AR_LIKE_b - AR_u +AR_LIKE_u - AR_u +AR_LIKE_i - AR_u +AR_LIKE_f - AR_u +AR_LIKE_c - AR_u +AR_LIKE_m - AR_u +AR_LIKE_M - AR_u +AR_LIKE_O - AR_u + +AR_i - AR_LIKE_b +AR_i - AR_LIKE_u +AR_i - AR_LIKE_i +AR_i - AR_LIKE_f +AR_i - AR_LIKE_c +AR_i - AR_LIKE_m +AR_i - AR_LIKE_O + +AR_LIKE_b - AR_i +AR_LIKE_u - AR_i +AR_LIKE_i - AR_i +AR_LIKE_f - AR_i +AR_LIKE_c - AR_i +AR_LIKE_m - AR_i +AR_LIKE_M - AR_i +AR_LIKE_O - AR_i + +AR_f - AR_LIKE_b +AR_f - AR_LIKE_u +AR_f - AR_LIKE_i +AR_f - AR_LIKE_f +AR_f - AR_LIKE_c +AR_f - AR_LIKE_O + +AR_LIKE_b - AR_f +AR_LIKE_u - AR_f +AR_LIKE_i - AR_f +AR_LIKE_f - AR_f +AR_LIKE_c - AR_f +AR_LIKE_O - AR_f + +AR_c - AR_LIKE_b +AR_c - AR_LIKE_u +AR_c - AR_LIKE_i +AR_c - AR_LIKE_f +AR_c - AR_LIKE_c +AR_c - AR_LIKE_O + +AR_LIKE_b - AR_c +AR_LIKE_u - AR_c +AR_LIKE_i - AR_c +AR_LIKE_f - AR_c +AR_LIKE_c - AR_c +AR_LIKE_O - AR_c + +AR_m - AR_LIKE_b +AR_m - AR_LIKE_u +AR_m - AR_LIKE_i +AR_m - AR_LIKE_m + +AR_LIKE_b - AR_m +AR_LIKE_u - AR_m +AR_LIKE_i - AR_m +AR_LIKE_m - AR_m +AR_LIKE_M - AR_m + +AR_M - AR_LIKE_b +AR_M - AR_LIKE_u +AR_M - AR_LIKE_i +AR_M - AR_LIKE_m +AR_M - AR_LIKE_M + +AR_LIKE_M - AR_M + +AR_O - AR_LIKE_b +AR_O - AR_LIKE_u +AR_O - AR_LIKE_i +AR_O - AR_LIKE_f +AR_O - AR_LIKE_c +AR_O - AR_LIKE_O + +AR_LIKE_b - AR_O +AR_LIKE_u - AR_O +AR_LIKE_i - AR_O +AR_LIKE_f - AR_O +AR_LIKE_c - AR_O +AR_LIKE_O - AR_O # unary ops @@ -34,7 +178,7 @@ -u8 -u4 -td --AR +-AR_f +c16 +c8 @@ -45,7 +189,7 @@ +u8 +u4 +td -+AR ++AR_f abs(c16) abs(c8) @@ -57,7 +201,7 @@ abs(u4) abs(td) abs(b_) -abs(AR) +abs(AR_f) # Time structures @@ -129,7 +273,7 @@ c16 + c c16 + f c16 + i -c16 + AR +c16 + AR_f c16 + c16 f8 + c16 @@ -142,7 +286,7 @@ c + c16 f + c16 i + c16 -AR + c16 +AR_f + c16 c8 + c16 c8 + f8 @@ -155,7 +299,7 @@ c8 + c c8 + f c8 + i -c8 + AR +c8 + AR_f c16 + c8 f8 + c8 @@ -168,7 +312,7 @@ c + c8 f + c8 i + c8 -AR + c8 +AR_f + c8 # Float @@ -181,7 +325,7 @@ f8 + c f8 + f f8 + i -f8 + AR +f8 + AR_f f8 + f8 i8 + f8 @@ -192,7 +336,7 @@ c + f8 f + f8 i + f8 -AR + f8 +AR_f + f8 f4 + f8 f4 + i8 @@ -203,7 +347,7 @@ f4 + c f4 + f f4 + i -f4 + AR +f4 + AR_f f8 + f4 i8 + f4 @@ -214,7 +358,7 @@ c + f4 f + f4 i + f4 -AR + f4 +AR_f + f4 # Int @@ -227,7 +371,7 @@ i8 + c i8 + f i8 + i -i8 + AR +i8 + AR_f u8 + u8 u8 + i4 @@ -237,7 +381,7 @@ u8 + c u8 + f u8 + i -u8 + AR +u8 + AR_f i8 + i8 u8 + i8 @@ -248,7 +392,7 @@ c + i8 f + i8 i + i8 -AR + i8 +AR_f + i8 u8 + u8 i4 + u8 @@ -258,14 +402,14 @@ c + u8 f + u8 i + u8 -AR + u8 +AR_f + u8 i4 + i8 i4 + i4 i4 + i i4 + b_ i4 + b -i4 + AR +i4 + AR_f u4 + i8 u4 + i4 @@ -274,14 +418,14 @@ u4 + i u4 + b_ u4 + b -u4 + AR +u4 + AR_f i8 + i4 i4 + i4 i + i4 b_ + i4 b + i4 -AR + i4 +AR_f + i4 i8 + u4 i4 + u4 @@ -290,4 +434,4 @@ b_ + u4 b + u4 i + u4 -AR + u4 +AR_f + u4 diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py index 8574df9365ee..1a0f595c50c0 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.py @@ -1,3 +1,4 @@ +from typing import Any, List import numpy as np c16 = np.complex128() @@ -20,8 +21,143 @@ f = float() i = int() -AR = np.array([0], dtype=np.float64) -AR.setflags(write=False) +AR_b: np.ndarray[Any, np.dtype[np.bool_]] +AR_u: np.ndarray[Any, np.dtype[np.uint32]] +AR_i: np.ndarray[Any, np.dtype[np.int64]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] +AR_O: np.ndarray[Any, np.dtype[np.object_]] + +AR_LIKE_b: List[bool] +AR_LIKE_u: List[np.uint32] +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] +AR_LIKE_c: List[complex] +AR_LIKE_m: List[np.timedelta64] +AR_LIKE_M: List[np.datetime64] +AR_LIKE_O: List[np.object_] + +# Array subtraction + +reveal_type(AR_b - AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_b - AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_b - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_b - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_b - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_b - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_u - AR_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_LIKE_i - AR_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_f - AR_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c - AR_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_m - AR_b) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_M - AR_b) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_O - AR_b) # E: Any + +reveal_type(AR_u - AR_LIKE_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_u - AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_u - AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_u - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_u - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_u - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_u - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_LIKE_u - AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_LIKE_i - AR_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_f - AR_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c - AR_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_m - AR_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_M - AR_u) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_O - AR_u) # E: Any + +reveal_type(AR_i - AR_LIKE_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_i - AR_LIKE_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_i - AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_i - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_i - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_i - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_i - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_u - AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_i - AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_f - AR_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c - AR_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_m - AR_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_M - AR_i) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_O - AR_i) # E: Any + +reveal_type(AR_f - AR_LIKE_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f - AR_LIKE_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f - AR_LIKE_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_f - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_u - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_i - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_f - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c - AR_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_O - AR_f) # E: Any + +reveal_type(AR_c - AR_LIKE_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c - AR_LIKE_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c - AR_LIKE_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c - AR_LIKE_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_u - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_i - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_f - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_c - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_O - AR_c) # E: Any + +reveal_type(AR_m - AR_LIKE_b) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m - AR_LIKE_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m - AR_LIKE_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_u - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_i - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_m - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_M - AR_m) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_O - AR_m) # E: Any + +reveal_type(AR_M - AR_LIKE_b) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_M - AR_LIKE_u) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_M - AR_LIKE_i) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_M - AR_LIKE_m) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_M - AR_LIKE_M) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_M - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_M - AR_M) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_O - AR_M) # E: Any + +reveal_type(AR_O - AR_LIKE_b) # E: Any +reveal_type(AR_O - AR_LIKE_u) # E: Any +reveal_type(AR_O - AR_LIKE_i) # E: Any +reveal_type(AR_O - AR_LIKE_f) # E: Any +reveal_type(AR_O - AR_LIKE_c) # E: Any +reveal_type(AR_O - AR_LIKE_m) # E: Any +reveal_type(AR_O - AR_LIKE_M) # E: Any +reveal_type(AR_O - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_O) # E: Any +reveal_type(AR_LIKE_u - AR_O) # E: Any +reveal_type(AR_LIKE_i - AR_O) # E: Any +reveal_type(AR_LIKE_f - AR_O) # E: Any +reveal_type(AR_LIKE_c - AR_O) # E: Any +reveal_type(AR_LIKE_m - AR_O) # E: Any +reveal_type(AR_LIKE_M - AR_O) # E: Any +reveal_type(AR_LIKE_O - AR_O) # E: Any # unary ops @@ -34,7 +170,7 @@ reveal_type(-u8) # E: {uint64} reveal_type(-u4) # E: {uint32} reveal_type(-td) # E: numpy.timedelta64 -reveal_type(-AR) # E: Any +reveal_type(-AR_f) # E: Any reveal_type(+c16) # E: {complex128} reveal_type(+c8) # E: {complex64} @@ -45,7 +181,7 @@ reveal_type(+u8) # E: {uint64} reveal_type(+u4) # E: {uint32} reveal_type(+td) # E: numpy.timedelta64 -reveal_type(+AR) # E: Any +reveal_type(+AR_f) # E: Any reveal_type(abs(c16)) # E: {float64} reveal_type(abs(c8)) # E: {float32} @@ -57,7 +193,7 @@ reveal_type(abs(u4)) # E: {uint32} reveal_type(abs(td)) # E: numpy.timedelta64 reveal_type(abs(b_)) # E: numpy.bool_ -reveal_type(abs(AR)) # E: Any +reveal_type(abs(AR_f)) # E: Any # Time structures @@ -128,7 +264,7 @@ reveal_type(c16 + f) # E: {complex128} reveal_type(c16 + i) # E: {complex128} -reveal_type(c16 + AR) # E: Any +reveal_type(c16 + AR_f) # E: Any reveal_type(c16 + c16) # E: {complex128} reveal_type(f8 + c16) # E: {complex128} @@ -141,7 +277,7 @@ reveal_type(c + c16) # E: {complex128} reveal_type(f + c16) # E: {complex128} reveal_type(i + c16) # E: {complex128} -reveal_type(AR + c16) # E: Any +reveal_type(AR_f + c16) # E: Any reveal_type(c8 + c16) # E: {complex128} reveal_type(c8 + f8) # E: {complex128} @@ -154,7 +290,7 @@ reveal_type(c8 + c) # E: {complex128} reveal_type(c8 + f) # E: {complex128} reveal_type(c8 + i) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] -reveal_type(c8 + AR) # E: Any +reveal_type(c8 + AR_f) # E: Any reveal_type(c16 + c8) # E: {complex128} reveal_type(f8 + c8) # E: {complex128} @@ -167,7 +303,7 @@ reveal_type(c + c8) # E: {complex128} reveal_type(f + c8) # E: {complex128} reveal_type(i + c8) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] -reveal_type(AR + c8) # E: Any +reveal_type(AR_f + c8) # E: Any # Float @@ -180,7 +316,7 @@ reveal_type(f8 + c) # E: {complex128} reveal_type(f8 + f) # E: {float64} reveal_type(f8 + i) # E: {float64} -reveal_type(f8 + AR) # E: Any +reveal_type(f8 + AR_f) # E: Any reveal_type(f8 + f8) # E: {float64} reveal_type(i8 + f8) # E: {float64} @@ -191,7 +327,7 @@ reveal_type(c + f8) # E: {complex128} reveal_type(f + f8) # E: {float64} reveal_type(i + f8) # E: {float64} -reveal_type(AR + f8) # E: Any +reveal_type(AR_f + f8) # E: Any reveal_type(f4 + f8) # E: {float64} reveal_type(f4 + i8) # E: {float64} @@ -202,7 +338,7 @@ reveal_type(f4 + c) # E: {complex128} reveal_type(f4 + f) # E: {float64} reveal_type(f4 + i) # E: numpy.floating[{_NBitInt}] -reveal_type(f4 + AR) # E: Any +reveal_type(f4 + AR_f) # E: Any reveal_type(f8 + f4) # E: {float64} reveal_type(i8 + f4) # E: {float64} @@ -213,7 +349,7 @@ reveal_type(c + f4) # E: {complex128} reveal_type(f + f4) # E: {float64} reveal_type(i + f4) # E: numpy.floating[{_NBitInt}] -reveal_type(AR + f4) # E: Any +reveal_type(AR_f + f4) # E: Any # Int @@ -226,7 +362,7 @@ reveal_type(i8 + c) # E: {complex128} reveal_type(i8 + f) # E: {float64} reveal_type(i8 + i) # E: {int64} -reveal_type(i8 + AR) # E: Any +reveal_type(i8 + AR_f) # E: Any reveal_type(u8 + u8) # E: {uint64} reveal_type(u8 + i4) # E: Union[numpy.signedinteger[Any], {float64}] @@ -236,7 +372,7 @@ reveal_type(u8 + c) # E: {complex128} reveal_type(u8 + f) # E: {float64} reveal_type(u8 + i) # E: Union[numpy.signedinteger[Any], {float64}] -reveal_type(u8 + AR) # E: Any +reveal_type(u8 + AR_f) # E: Any reveal_type(i8 + i8) # E: {int64} reveal_type(u8 + i8) # E: Union[numpy.signedinteger[Any], {float64}] @@ -247,7 +383,7 @@ reveal_type(c + i8) # E: {complex128} reveal_type(f + i8) # E: {float64} reveal_type(i + i8) # E: {int64} -reveal_type(AR + i8) # E: Any +reveal_type(AR_f + i8) # E: Any reveal_type(u8 + u8) # E: {uint64} reveal_type(i4 + u8) # E: Union[numpy.signedinteger[Any], {float64}] @@ -257,14 +393,14 @@ reveal_type(c + u8) # E: {complex128} reveal_type(f + u8) # E: {float64} reveal_type(i + u8) # E: Union[numpy.signedinteger[Any], {float64}] -reveal_type(AR + u8) # E: Any +reveal_type(AR_f + u8) # E: Any reveal_type(i4 + i8) # E: {int64} reveal_type(i4 + i4) # E: {int32} reveal_type(i4 + i) # E: {int_} reveal_type(i4 + b_) # E: {int32} reveal_type(i4 + b) # E: {int32} -reveal_type(i4 + AR) # E: Any +reveal_type(i4 + AR_f) # E: Any reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], {float64}] reveal_type(u4 + i4) # E: Union[numpy.signedinteger[Any], {float64}] @@ -273,14 +409,14 @@ reveal_type(u4 + i) # E: Union[numpy.signedinteger[Any], {float64}] reveal_type(u4 + b_) # E: {uint32} reveal_type(u4 + b) # E: {uint32} -reveal_type(u4 + AR) # E: Any +reveal_type(u4 + AR_f) # E: Any reveal_type(i8 + i4) # E: {int64} reveal_type(i4 + i4) # E: {int32} reveal_type(i + i4) # E: {int_} reveal_type(b_ + i4) # E: {int32} reveal_type(b + i4) # E: {int32} -reveal_type(AR + i4) # E: Any +reveal_type(AR_f + i4) # E: Any reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], {float64}] reveal_type(i4 + u4) # E: Union[numpy.signedinteger[Any], {float64}] @@ -289,4 +425,4 @@ reveal_type(b_ + u4) # E: {uint32} reveal_type(b + u4) # E: {uint32} reveal_type(i + u4) # E: Union[numpy.signedinteger[Any], {float64}] -reveal_type(AR + u4) # E: Any +reveal_type(AR_f + u4) # E: Any diff --git a/numpy/typing/tests/data/reveal/mod.py b/numpy/typing/tests/data/reveal/mod.py index 989ef99fd002..4a913f11a931 100644 --- a/numpy/typing/tests/data/reveal/mod.py +++ b/numpy/typing/tests/data/reveal/mod.py @@ -1,3 +1,4 @@ +from typing import Any import numpy as np f8 = np.float64() @@ -15,21 +16,18 @@ f = float() i = int() -AR = np.array([1], dtype=np.bool_) -AR.setflags(write=False) - -AR2 = np.array([1], dtype=np.timedelta64) -AR2.setflags(write=False) +AR_b: np.ndarray[Any, np.dtype[np.bool_]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] # Time structures reveal_type(td % td) # E: numpy.timedelta64 -reveal_type(AR2 % td) # E: Any -reveal_type(td % AR2) # E: Any +reveal_type(AR_m % td) # E: Any +reveal_type(td % AR_m) # E: Any reveal_type(divmod(td, td)) # E: Tuple[{int64}, numpy.timedelta64] -reveal_type(divmod(AR2, td)) # E: Tuple[Any, Any] -reveal_type(divmod(td, AR2)) # E: Tuple[Any, Any] +reveal_type(divmod(AR_m, td)) # E: Union[Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64], Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]]] +reveal_type(divmod(td, AR_m)) # E: Union[Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64], Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]]] # Bool @@ -40,7 +38,7 @@ reveal_type(b_ % i8) # E: {int64} reveal_type(b_ % u8) # E: {uint64} reveal_type(b_ % f8) # E: {float64} -reveal_type(b_ % AR) # E: Any +reveal_type(b_ % AR_b) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] reveal_type(divmod(b_, b)) # E: Tuple[{int8}, {int8}] reveal_type(divmod(b_, i)) # E: Tuple[{int_}, {int_}] @@ -49,7 +47,7 @@ reveal_type(divmod(b_, i8)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(b_, u8)) # E: Tuple[{uint64}, {uint64}] reveal_type(divmod(b_, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(b_, AR)) # E: Tuple[Any, Any] +reveal_type(divmod(b_, AR_b)) # E: Tuple[Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]], Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]] reveal_type(b % b_) # E: {int8} reveal_type(i % b_) # E: {int_} @@ -58,7 +56,7 @@ reveal_type(i8 % b_) # E: {int64} reveal_type(u8 % b_) # E: {uint64} reveal_type(f8 % b_) # E: {float64} -reveal_type(AR % b_) # E: Any +reveal_type(AR_b % b_) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] reveal_type(divmod(b, b_)) # E: Tuple[{int8}, {int8}] reveal_type(divmod(i, b_)) # E: Tuple[{int_}, {int_}] @@ -67,7 +65,7 @@ reveal_type(divmod(i8, b_)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(u8, b_)) # E: Tuple[{uint64}, {uint64}] reveal_type(divmod(f8, b_)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(AR, b_)) # E: Tuple[Any, Any] +reveal_type(divmod(AR_b, b_)) # E: Tuple[Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]], Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]] # int @@ -80,7 +78,7 @@ reveal_type(i4 % f8) # E: {float64} reveal_type(i4 % i4) # E: {int32} reveal_type(i4 % f4) # E: {float32} -reveal_type(i8 % AR) # E: Any +reveal_type(i8 % AR_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(i8, i)) # E: Tuple[{int64}, {int64}] @@ -91,7 +89,7 @@ reveal_type(divmod(i8, f4)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(i8, AR)) # E: Tuple[Any, Any] +reveal_type(divmod(i8, AR_b)) # E: Tuple[Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]], Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]] reveal_type(b % i8) # E: {int64} reveal_type(i % i8) # E: {int64} @@ -102,7 +100,7 @@ reveal_type(f8 % i4) # E: {float64} reveal_type(i4 % i4) # E: {int32} reveal_type(f4 % i4) # E: {float32} -reveal_type(AR % i8) # E: Any +reveal_type(AR_b % i8) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(i, i8)) # E: Tuple[{int64}, {int64}] @@ -113,7 +111,7 @@ reveal_type(divmod(f4, i8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(AR, i8)) # E: Tuple[Any, Any] +reveal_type(divmod(AR_b, i8)) # E: Tuple[Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]], Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]] # float @@ -122,7 +120,7 @@ reveal_type(f8 % f) # E: {float64} reveal_type(i8 % f4) # E: {float64} reveal_type(f4 % f4) # E: {float32} -reveal_type(f8 % AR) # E: Any +reveal_type(f8 % AR_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f8, i)) # E: Tuple[{float64}, {float64}] @@ -130,7 +128,7 @@ reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f8, f4)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(f8, AR)) # E: Tuple[Any, Any] +reveal_type(divmod(f8, AR_b)) # E: Tuple[Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]], Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]] reveal_type(b % f8) # E: {float64} reveal_type(i % f8) # E: {float64} @@ -138,7 +136,7 @@ reveal_type(f8 % f8) # E: {float64} reveal_type(f8 % f8) # E: {float64} reveal_type(f4 % f4) # E: {float32} -reveal_type(AR % f8) # E: Any +reveal_type(AR_b % f8) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i, f8)) # E: Tuple[{float64}, {float64}] @@ -146,4 +144,4 @@ reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f4, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(AR, f8)) # E: Tuple[Any, Any] +reveal_type(divmod(AR_b, f8)) # E: Tuple[Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]], Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]] From 1dd2bd66119f73a31eb510615bfb77c193a164fa Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 25 Jan 2021 16:03:54 +0100 Subject: [PATCH 0412/1270] STY: Remove `__all__` in favor of explicit reexports --- numpy/__init__.pyi | 154 ++++++++++++++++----------------------------- 1 file changed, 54 insertions(+), 100 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3d92a543b32c..4178f49517d6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -159,66 +159,66 @@ else: # Ensures that the stubs are picked up from numpy import ( - char, - ctypeslib, - emath, - fft, - lib, - linalg, - ma, - matrixlib, - polynomial, - random, - rec, - testing, - version, + char as char, + ctypeslib as ctypeslib, + emath as emath, + fft as fft, + lib as lib, + linalg as linalg, + ma as ma, + matrixlib as matrixlib, + polynomial as polynomial, + random as random, + rec as rec, + testing as testing, + version as version, ) from numpy.core.function_base import ( - linspace, - logspace, - geomspace, + linspace as linspace, + logspace as logspace, + geomspace as geomspace, ) from numpy.core.fromnumeric import ( - take, - reshape, - choose, - repeat, - put, - swapaxes, - transpose, - partition, - argpartition, - sort, - argsort, - argmax, - argmin, - searchsorted, - resize, - squeeze, - diagonal, - trace, - ravel, - nonzero, - shape, - compress, - clip, - sum, - all, - any, - cumsum, - ptp, - amax, - amin, - prod, - cumprod, - ndim, - size, - around, - mean, - std, - var, + take as take, + reshape as reshape, + choose as choose, + repeat as repeat, + put as put, + swapaxes as swapaxes, + transpose as transpose, + partition as partition, + argpartition as argpartition, + sort as sort, + argsort as argsort, + argmax as argmax, + argmin as argmin, + searchsorted as searchsorted, + resize as resize, + squeeze as squeeze, + diagonal as diagonal, + trace as trace, + ravel as ravel, + nonzero as nonzero, + shape as shape, + compress as compress, + clip as clip, + sum as sum, + all as all, + any as any, + cumsum as cumsum, + ptp as ptp, + amax as amax, + amin as amin, + prod as prod, + cumprod as cumprod, + ndim as ndim, + size as size, + around as around, + mean as mean, + std as std, + var as var, ) from numpy.core._asarray import ( @@ -311,52 +311,6 @@ from numpy.core.shape_base import ( vstack as vstack, ) -# Add an object to `__all__` if their stubs are defined in an external file; -# their stubs will not be recognized otherwise. -# NOTE: This is redundant for objects defined within this file. -__all__ = [ - "linspace", - "logspace", - "geomspace", - "take", - "reshape", - "choose", - "repeat", - "put", - "swapaxes", - "transpose", - "partition", - "argpartition", - "sort", - "argsort", - "argmax", - "argmin", - "searchsorted", - "resize", - "squeeze", - "diagonal", - "trace", - "ravel", - "nonzero", - "shape", - "compress", - "clip", - "sum", - "all", - "any", - "cumsum", - "ptp", - "amax", - "amin", - "prod", - "cumprod", - "ndim", - "size", - "around", - "mean", - "std", - "var", -] DataSource: Any MachAr: Any From bcfb1180374a50c34e11e576efe00d5c676d9f77 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 25 Jan 2021 16:05:01 +0100 Subject: [PATCH 0413/1270] ENH: Add annotations for certain module-level dunders --- numpy/__init__.pyi | 7 +++++++ numpy/typing/tests/data/reveal/modules.py | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4178f49517d6..27881a7259af 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -311,6 +311,13 @@ from numpy.core.shape_base import ( vstack as vstack, ) +__all__: List[str] +__path__: List[str] +__version__: str +__git_version__: str +__NUMPY_SETUP__: bool +__deprecated_attrs__: Dict[str, Tuple[type, str]] +__expired_functions__: Dict[str, str] DataSource: Any MachAr: Any diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py index 40646315270d..3ff44b6a7204 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.py @@ -18,3 +18,11 @@ # TODO: Remove when annotations have been added to `np.testing.assert_equal` reveal_type(np.testing.assert_equal) # E: Any + +reveal_type(np.__all__) # E: list[builtins.str] +reveal_type(np.__path__) # E: list[builtins.str] +reveal_type(np.__version__) # E: str +reveal_type(np.__git_version__) # E: str +reveal_type(np.__NUMPY_SETUP__) # E: bool +reveal_type(np.__deprecated_attrs__) # E: dict[builtins.str, Tuple[builtins.type, builtins.str]] +reveal_type(np.__expired_functions__) # E: dict[builtins.str, builtins.str] From 38bda3ce9e6dc075548f378806488ad152c2e46c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 4 Jan 2021 22:57:06 -0600 Subject: [PATCH 0414/1270] DEP: Deprecate promotion of numbers and bool to string --- doc/release/upcoming_changes/18116.future.rst | 29 ++++++++ numpy/core/src/multiarray/dtypemeta.c | 13 ++++ numpy/core/src/umath/ufunc_type_resolution.c | 55 +++++++++++++-- numpy/core/tests/test_array_coercion.py | 1 + numpy/core/tests/test_deprecations.py | 38 ++++++++++ numpy/core/tests/test_half.py | 6 +- numpy/core/tests/test_numeric.py | 69 +++++++++++-------- numpy/core/tests/test_regression.py | 4 +- numpy/core/tests/test_shape_base.py | 5 +- numpy/lib/tests/test_regression.py | 5 +- 10 files changed, 182 insertions(+), 43 deletions(-) create mode 100644 doc/release/upcoming_changes/18116.future.rst diff --git a/doc/release/upcoming_changes/18116.future.rst b/doc/release/upcoming_changes/18116.future.rst new file mode 100644 index 000000000000..1341d022f377 --- /dev/null +++ b/doc/release/upcoming_changes/18116.future.rst @@ -0,0 +1,29 @@ +Promotion of strings with numbers and bools is deprecated +--------------------------------------------------------- +Any promotion of numbers and strings is deprecated and will +give a ``FutureWarning`` the main affected functionalities +are: + +* `numpy.promote_types` and `numpy.result_type` which will raise + an error in this case in the future. +* `numpy.concatenate` will raise an error when concatenating a string + and numeric array. You can use ``dtype="S"`` to explicitly request + a string result. +* `numpy.array` and related functions will start returning ``object`` + arrays because these functions use ``object`` as a fallback when + no common dtype can be found. (In this case setting the + ``FutureWarning`` to be raised will unfortunately lead to the new + behaviour) + +This will mainly affect code such as:: + + np.asarray(['string', 0]) + +and:: + + np.concatenate((['string'], [0])) + +in both cases adding ``dtype="U"`` or ``dtype="S"`` will give the +previous (string) result. + +Comparisons, universal functions, and casting are not affected by this. diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index 2931977c204e..b2f36d794830 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -407,6 +407,19 @@ string_unicode_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) Py_INCREF(Py_NotImplemented); return (PyArray_DTypeMeta *)Py_NotImplemented; } + if (other->type_num != NPY_STRING && other->type_num != NPY_UNICODE) { + /* Deprecated 2020-12-19, NumPy 1.21. */ + if (DEPRECATE_FUTUREWARNING( + "Promotion of numbers and bools to strings is deprecated. " + "In the future, code such as `np.concatenate((['string'], [0]))` " + "will raise an error, while `np.asarray(['string', 0])` will " + "return an array with `dtype=object`. To avoid the warning " + "while retaining a string result use `dtype='U'` (or 'S'). " + "To get an array of Python objects use `dtype=object`. " + "(Warning added in NumPy 1.21)") < 0) { + return NULL; + } + } /* * The builtin types are ordered by complexity (aside from object) here. * Arguably, we should not consider numbers and strings "common", but diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index be48be079e92..c46346118dea 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -111,14 +111,18 @@ raise_no_loop_found_error( return -1; } for (i = 0; i < ufunc->nargs; ++i) { - Py_INCREF(dtypes[i]); - PyTuple_SET_ITEM(dtypes_tup, i, (PyObject *)dtypes[i]); + PyObject *tmp = Py_None; + if (dtypes[i] != NULL) { + tmp = (PyObject *)dtypes[i]; + } + Py_INCREF(tmp); + PyTuple_SET_ITEM(dtypes_tup, i, tmp); } /* produce an error object */ exc_value = PyTuple_Pack(2, ufunc, dtypes_tup); Py_DECREF(dtypes_tup); - if (exc_value == NULL){ + if (exc_value == NULL) { return -1; } PyErr_SetObject(exc_type, exc_value); @@ -329,10 +333,23 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, } if (type_tup == NULL) { - /* Input types are the result type */ - out_dtypes[0] = PyArray_ResultType(2, operands, 0, NULL); - if (out_dtypes[0] == NULL) { - return -1; + /* + * DEPRECATED NumPy 1.20, 2020-12. + * This check is required to avoid the FutureWarning that + * ResultType will give for number->string promotions. + * (We never supported flexible dtypes here.) + */ + if (!PyArray_ISFLEXIBLE(operands[0]) && + !PyArray_ISFLEXIBLE(operands[1])) { + out_dtypes[0] = PyArray_ResultType(2, operands, 0, NULL); + if (out_dtypes[0] == NULL) { + return -1; + } + } + else { + /* Not doing anything will lead to a loop no found error. */ + out_dtypes[0] = PyArray_DESCR(operands[0]); + Py_INCREF(out_dtypes[0]); } out_dtypes[1] = out_dtypes[0]; Py_INCREF(out_dtypes[1]); @@ -488,6 +505,30 @@ PyUFunc_SimpleUniformOperationTypeResolver( out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0])); } else { + int iop; + npy_bool has_flexible = 0; + npy_bool has_object = 0; + for (iop = 0; iop < ufunc->nin; iop++) { + if (PyArray_ISOBJECT(operands[iop])) { + has_object = 1; + } + if (PyArray_ISFLEXIBLE(operands[iop])) { + has_flexible = 1; + } + } + if (NPY_UNLIKELY(has_flexible && !has_object)) { + /* + * DEPRECATED NumPy 1.20, 2020-12. + * This check is required to avoid the FutureWarning that + * ResultType will give for number->string promotions. + * (We never supported flexible dtypes here.) + */ + for (iop = 0; iop < ufunc->nin; iop++) { + out_dtypes[iop] = PyArray_DESCR(operands[iop]); + Py_INCREF(out_dtypes[iop]); + } + return raise_no_loop_found_error(ufunc, out_dtypes); + } out_dtypes[0] = PyArray_ResultType(ufunc->nin, operands, 0, NULL); } if (out_dtypes[0] == NULL) { diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py index 08b32dfccddc..95159e1ad1c1 100644 --- a/numpy/core/tests/test_array_coercion.py +++ b/numpy/core/tests/test_array_coercion.py @@ -234,6 +234,7 @@ def test_scalar(self, scalar): # Additionally to string this test also runs into a corner case # with datetime promotion (the difference is the promotion order). + @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning") def test_scalar_promotion(self): for sc1, sc2 in product(scalar_instances(), scalar_instances()): sc1, sc2 = sc1.values[0], sc2.values[0] diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 5498e1cf996f..53441d9fef62 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -1100,3 +1100,41 @@ def check_parametrize(base, power, expected): count += 1 assert_(count == 3) self.assert_deprecated(_test_parametrize) + + +class TestStringPromotion(_DeprecationTestCase): + # Deprecated 2020-12-19, NumPy 1.21 + warning_cls = FutureWarning + message = "Promotion of numbers and bools to strings is deprecated." + + @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG") + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_deprecated(self, dtype, string_dt): + self.assert_deprecated(lambda: np.promote_types(dtype, string_dt)) + + # concatenate has to be able to promote to find the result dtype: + arr1 = np.ones(3, dtype=dtype) + arr2 = np.ones(3, dtype=string_dt) + self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=0)) + self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=None)) + + # coercing to an array is similar, but will fall-back to `object` + # (when raising the FutureWarning, this already happens) + self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]]), + exceptions=()) + + @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG") + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_not_deprecated(self, dtype, string_dt): + # The ufunc type resolvers run into this, but giving a futurewarning + # here is unnecessary (it ends up as an error anyway), so test that + # no warning is given: + arr1 = np.ones(3, dtype=dtype) + arr2 = np.ones(3, dtype=string_dt) + + # Adding two arrays uses result_type normally, which would fail: + with pytest.raises(TypeError): + self.assert_not_deprecated(lambda: arr1 + arr2) + # np.equal uses a different type resolver: + with pytest.raises(TypeError): + self.assert_not_deprecated(lambda: np.equal(arr1, arr2)) diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py index 1b6fd21e14bb..449a01d211f1 100644 --- a/numpy/core/tests/test_half.py +++ b/numpy/core/tests/test_half.py @@ -71,8 +71,10 @@ def test_half_conversions(self): def test_half_conversion_to_string(self, string_dt): # Currently uses S/U32 (which is sufficient for float32) expected_dt = np.dtype(f"{string_dt}32") - assert np.promote_types(np.float16, string_dt) == expected_dt - assert np.promote_types(string_dt, np.float16) == expected_dt + with pytest.warns(FutureWarning): + assert np.promote_types(np.float16, string_dt) == expected_dt + with pytest.warns(FutureWarning): + assert np.promote_types(string_dt, np.float16) == expected_dt arr = np.ones(3, dtype=np.float16).astype(string_dt) assert arr.dtype == expected_dt diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 280874d21695..f8b388b6f99a 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -847,10 +847,12 @@ def test_promote_types_endian(self): assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) - assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) - assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) - assert_equal(np.promote_types('i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) @@ -897,32 +899,38 @@ def test_promote_types_strings(self, swap, string_dtype): promote_types = np.promote_types S = string_dtype - # Promote numeric with unsized string: - assert_equal(promote_types('bool', S), np.dtype(S+'5')) - assert_equal(promote_types('b', S), np.dtype(S+'4')) - assert_equal(promote_types('u1', S), np.dtype(S+'3')) - assert_equal(promote_types('u2', S), np.dtype(S+'5')) - assert_equal(promote_types('u4', S), np.dtype(S+'10')) - assert_equal(promote_types('u8', S), np.dtype(S+'20')) - assert_equal(promote_types('i1', S), np.dtype(S+'4')) - assert_equal(promote_types('i2', S), np.dtype(S+'6')) - assert_equal(promote_types('i4', S), np.dtype(S+'11')) - assert_equal(promote_types('i8', S), np.dtype(S+'21')) - # Promote numeric with sized string: - assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) - assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) - assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) - assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) - assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) - # Promote with object: - assert_equal(promote_types('O', S+'30'), np.dtype('O')) + + with pytest.warns(FutureWarning, + match="Promotion of numbers and bools to strings") as record: + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S+'5')) + assert_equal(promote_types('b', S), np.dtype(S+'4')) + assert_equal(promote_types('u1', S), np.dtype(S+'3')) + assert_equal(promote_types('u2', S), np.dtype(S+'5')) + assert_equal(promote_types('u4', S), np.dtype(S+'10')) + assert_equal(promote_types('u8', S), np.dtype(S+'20')) + assert_equal(promote_types('i1', S), np.dtype(S+'4')) + assert_equal(promote_types('i2', S), np.dtype(S+'6')) + assert_equal(promote_types('i4', S), np.dtype(S+'11')) + assert_equal(promote_types('i8', S), np.dtype(S+'21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) + assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) + assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) + assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) + assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) + # Promote with object: + assert_equal(promote_types('O', S+'30'), np.dtype('O')) + + assert len(record) == 22 # each string promotion gave one warning + @pytest.mark.parametrize(["dtype1", "dtype2"], [[np.dtype("V6"), np.dtype("V10")], @@ -972,6 +980,7 @@ def test_promote_identical_types_metadata(self, dtype): assert res.isnative @pytest.mark.slow + @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning') @pytest.mark.parametrize(["dtype1", "dtype2"], itertools.product( list(np.typecodes["All"]) + diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 831e48e8b529..5faa9923c568 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -782,7 +782,9 @@ def test_mem_string_arr(self): # Ticket #514 s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" t = [] - np.hstack((t, s)) + with pytest.warns(FutureWarning, + match="Promotion of numbers and bools to strings"): + np.hstack((t, s)) def test_arr_transpose(self): # Ticket #516 diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index 9922c91731f2..a0c72f9d0fcb 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -256,7 +256,7 @@ def test_concatenate_axis_None(self): r = np.concatenate((a, b), axis=None) assert_equal(r.size, a.size + len(b)) assert_equal(r.dtype, a.dtype) - r = np.concatenate((a, b, c), axis=None) + r = np.concatenate((a, b, c), axis=None, dtype="U") d = array(['0.0', '1.0', '2.0', '3.0', '0', '1', '2', 'x']) assert_array_equal(r, d) @@ -377,7 +377,8 @@ def test_dtype_with_promotion(self, arrs, string_dt, axis): # Note that U0 and S0 should be deprecated eventually and changed to # actually give the empty string result (together with `np.array`) res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe") - assert res.dtype == np.promote_types("d", string_dt) + # The actual dtype should be identical to a cast (of a double array): + assert res.dtype == np.array(1.).astype(string_dt).dtype @pytest.mark.parametrize("axis", [None, 0]) def test_string_dtype_does_not_inspect(self, axis): diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 55df2a6752c2..94fac7ef0079 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -1,3 +1,5 @@ +import pytest + import os import numpy as np @@ -62,7 +64,8 @@ def test_mem_polymul(self): def test_mem_string_concat(self): # Ticket #469 x = np.array([]) - np.append(x, 'asdasd\tasdasd') + with pytest.warns(FutureWarning): + np.append(x, 'asdasd\tasdasd') def test_poly_div(self): # Ticket #553 From 59ca3b5e97e2186f439f3f2fc82259ba56a3b78f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 25 Jan 2021 20:44:01 +0100 Subject: [PATCH 0415/1270] TST: Add module-based tests to the `pass` tests --- numpy/typing/tests/data/pass/modules.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 numpy/typing/tests/data/pass/modules.py diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py new file mode 100644 index 000000000000..3ac21531185d --- /dev/null +++ b/numpy/typing/tests/data/pass/modules.py @@ -0,0 +1,23 @@ +import numpy as np + +np.char +np.ctypeslib +np.emath +np.fft +np.lib +np.linalg +np.ma +np.matrixlib +np.polynomial +np.random +np.rec +np.testing +np.version + +np.__all__ +np.__path__ +np.__version__ +np.__git_version__ +np.__NUMPY_SETUP__ +np.__deprecated_attrs__ +np.__expired_functions__ From ecec872d20612e975236e5c1e5a5800ef39c94ad Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Mon, 25 Jan 2021 22:22:49 -0800 Subject: [PATCH 0416/1270] DOC: lib/shape_base numpydoc formatting. Numpydoc parses parameter different depending on whether there is a space before the colon, for it to be properly interpreted as names and types; spaces need to be put on each side. --- numpy/lib/shape_base.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index f0596444ea97..9dfeee527c1a 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -69,13 +69,13 @@ def take_along_axis(arr, indices, axis): Parameters ---------- - arr: ndarray (Ni..., M, Nk...) + arr : ndarray (Ni..., M, Nk...) Source array - indices: ndarray (Ni..., J, Nk...) + indices : ndarray (Ni..., J, Nk...) Indices to take along each 1d slice of `arr`. This must match the dimension of arr, but dimensions Ni and Nj only need to broadcast against `arr`. - axis: int + axis : int The axis to take 1d slices along. If axis is None, the input array is treated as if it had first been flattened to 1d, for consistency with `sort` and `argsort`. @@ -190,16 +190,16 @@ def put_along_axis(arr, indices, values, axis): Parameters ---------- - arr: ndarray (Ni..., M, Nk...) + arr : ndarray (Ni..., M, Nk...) Destination array. - indices: ndarray (Ni..., J, Nk...) + indices : ndarray (Ni..., J, Nk...) Indices to change along each 1d slice of `arr`. This must match the dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast against `arr`. - values: array_like (Ni..., J, Nk...) + values : array_like (Ni..., J, Nk...) values to insert at those indices. Its shape and dimension are broadcast to match that of `indices`. - axis: int + axis : int The axis to take 1d slices along. If axis is None, the destination array is treated as if a flattened 1d view had been created of it. From 6af6feb3c2df511e36b0cc4f575de970630c759a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 26 Jan 2021 09:25:56 +0100 Subject: [PATCH 0417/1270] NEP: accept NEP 23 (backwards compatibility policy) Closes gh-16193 [ci skip] --- doc/neps/nep-0023-backwards-compatibility.rst | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index af5bdab29444..27b79d436317 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -5,10 +5,11 @@ NEP 23 — Backwards compatibility and deprecation policy ======================================================= :Author: Ralf Gommers -:Status: Draft +:Status: Accepted :Type: Process :Created: 2018-07-14 -:Resolution: (required for Accepted | Rejected | Withdrawn) +:Resolution: https://mail.python.org/pipermail/numpy-discussion/2021-January/081423.html + Abstract -------- @@ -325,6 +326,8 @@ Discussion ---------- - `Mailing list discussion on the first version of this NEP in 2018 `__ +- `Mailing list discussion on the Dec 2020 update of this NEP `__ +- `PR with review comments on the the Dec 2020 update of this NEP `__ References and Footnotes From 08f13c5492251b3699178a4f216c564b7df6e995 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 26 Jan 2021 09:34:30 +0100 Subject: [PATCH 0418/1270] NEP: accept NEP 46 (sponsorship guidelines) [ci skip] --- doc/neps/nep-0046-sponsorship-guidelines.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst index cc273ce2c654..bce125212fa3 100644 --- a/doc/neps/nep-0046-sponsorship-guidelines.rst +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -5,10 +5,10 @@ NEP 46 — NumPy Sponsorship Guidelines ===================================== :Author: Ralf Gommers -:Status: Draft +:Status: Accepted :Type: Process :Created: 2020-12-27 -:Resolution: (required for Accepted | Rejected | Withdrawn) +:Resolution: https://mail.python.org/pipermail/numpy-discussion/2021-January/081424.html Abstract @@ -233,7 +233,8 @@ as how that helps NumFOCUS-affiliated projects (including NumPy). Discussion ---------- -Mailing list thread(s) discussing this NEP: TODO +- `Mailing list thread discussing this NEP `__ +- `PR with review of the NEP draft `__ References and Footnotes From fe3a190dfa22ad845f5974d143a2817138261460 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 6 Jan 2021 20:25:24 +0100 Subject: [PATCH 0419/1270] MAINT: Ensure that the `_SupportsDType` protocol can only take dtypes; not arbitrary dtype-like objects xref https://github.com/numpy/numpy/pull/13578 --- numpy/__init__.pyi | 5 ++--- numpy/typing/_dtype_like.py | 11 +++++++---- numpy/typing/tests/data/fail/dtype.py | 12 +++++++++--- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3d92a543b32c..14f7acb6a173 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -724,14 +724,13 @@ class dtype(Generic[_DTypeScalar_co]): align: bool = ..., copy: bool = ..., ) -> dtype[_DTypeScalar_co]: ... - # TODO: handle _SupportsDType better @overload def __new__( cls, - dtype: _SupportsDType, + dtype: _SupportsDType[dtype[_DTypeScalar_co]], align: bool = ..., copy: bool = ..., - ) -> dtype[Any]: ... + ) -> dtype[_DTypeScalar_co]: ... # Handle strings that can't be expressed as literals; i.e. s1, s2, ... @overload def __new__( diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py index 1953bd5fcfcc..45edb4d76dc4 100644 --- a/numpy/typing/_dtype_like.py +++ b/numpy/typing/_dtype_like.py @@ -1,5 +1,5 @@ import sys -from typing import Any, List, Sequence, Tuple, Union, TYPE_CHECKING +from typing import Any, List, Sequence, Tuple, Union, TypeVar, TYPE_CHECKING from numpy import dtype from ._shape import _ShapeLike @@ -30,9 +30,12 @@ class _DTypeDict(_DTypeDictBase, total=False): itemsize: int aligned: bool + _DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype) + # A protocol for anything with the dtype attribute - class _SupportsDType(Protocol): - dtype: _DTypeLikeNested + class _SupportsDType(Protocol[_DType_co]): + @property + def dtype(self) -> _DType_co: ... else: _DTypeDict = Any @@ -67,7 +70,7 @@ class _SupportsDType(Protocol): # array-scalar types and generic types type, # TODO: enumerate these when we add type hints for numpy scalars # anything with a dtype attribute - _SupportsDType, + "_SupportsDType[np.dtype[Any]]", # character codes, type strings or comma-separated fields, e.g., 'float64' str, _VoidDTypeLike, diff --git a/numpy/typing/tests/data/fail/dtype.py b/numpy/typing/tests/data/fail/dtype.py index 7d4783d8f651..7d419a1d1e5f 100644 --- a/numpy/typing/tests/data/fail/dtype.py +++ b/numpy/typing/tests/data/fail/dtype.py @@ -1,10 +1,16 @@ import numpy as np -class Test: - not_dtype = float +class Test1: + not_dtype = np.dtype(float) -np.dtype(Test()) # E: No overload variant of "dtype" matches + +class Test2: + dtype = float + + +np.dtype(Test1()) # E: No overload variant of "dtype" matches +np.dtype(Test2()) # E: incompatible type np.dtype( # E: No overload variant of "dtype" matches { From 856e09e6a6d113d71da56b3ccdcfab72f921d01f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 6 Jan 2021 20:15:41 +0100 Subject: [PATCH 0420/1270] ENH: Added aliases for commonly used dtype-like objects --- numpy/typing/__init__.py | 17 +++- numpy/typing/_dtype_like.py | 152 +++++++++++++++++++++++++++++++++++- 2 files changed, 165 insertions(+), 4 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 4ec1f4b2fcae..8147789fb7c7 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -297,7 +297,22 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _VoidLike_co, ) from ._shape import _Shape, _ShapeLike -from ._dtype_like import _SupportsDType, _VoidDTypeLike, DTypeLike as DTypeLike +from ._dtype_like import ( + DTypeLike as DTypeLike, + _SupportsDType, + _VoidDTypeLike, + _DTypeLikeBool, + _DTypeLikeUInt, + _DTypeLikeInt, + _DTypeLikeFloat, + _DTypeLikeComplex, + _DTypeLikeTD64, + _DTypeLikeDT64, + _DTypeLikeObject, + _DTypeLikeVoid, + _DTypeLikeStr, + _DTypeLikeBytes, +) from ._array_like import ( ArrayLike as ArrayLike, _ArrayLike, diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py index 45edb4d76dc4..9b79f9b68520 100644 --- a/numpy/typing/_dtype_like.py +++ b/numpy/typing/_dtype_like.py @@ -1,7 +1,7 @@ import sys -from typing import Any, List, Sequence, Tuple, Union, TypeVar, TYPE_CHECKING +from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, TYPE_CHECKING -from numpy import dtype +import numpy as np from ._shape import _ShapeLike if sys.version_info >= (3, 8): @@ -15,6 +15,48 @@ else: HAVE_PROTOCOL = True +from ._char_codes import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) + _DTypeLikeNested = Any # TODO: wait for support for recursive types if TYPE_CHECKING or HAVE_PROTOCOL: @@ -64,7 +106,7 @@ def dtype(self) -> _DType_co: ... # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html DTypeLike = Union[ - dtype, + np.dtype, # default data type (float64) None, # array-scalar types and generic types @@ -82,3 +124,107 @@ def dtype(self) -> _DType_co: ... # therefore not included in the Union defining `DTypeLike`. # # See https://github.com/numpy/numpy/issues/16891 for more details. + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool = Union[ + Type[bool], + Type[np.bool_], + "np.dtype[np.bool_]", + "_SupportsDType[np.dtype[np.bool_]]", + _BoolCodes, +] +_DTypeLikeUInt = Union[ + Type[np.unsignedinteger], + "np.dtype[np.unsignedinteger]", + "_SupportsDType[np.dtype[np.unsignedinteger]]", + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, +] +_DTypeLikeInt = Union[ + Type[int], + Type[np.signedinteger], + "np.dtype[np.signedinteger]", + "_SupportsDType[np.dtype[np.signedinteger]]", + _UInt8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, +] +_DTypeLikeFloat = Union[ + Type[float], + Type[np.floating], + "np.dtype[np.floating]", + "_SupportsDType[np.dtype[np.floating]]", + _Float16Codes, + _Float32Codes, + _Float64Codes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, +] +_DTypeLikeComplex = Union[ + Type[complex], + Type[np.complexfloating], + "np.dtype[np.complexfloating]", + "_SupportsDType[np.dtype[np.complexfloating]]", + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_DTypeLikeDT64 = Union[ + Type[np.timedelta64], + "np.dtype[np.timedelta64]", + "_SupportsDType[np.dtype[np.timedelta64]]", + _TD64Codes, +] +_DTypeLikeTD64 = Union[ + Type[np.datetime64], + "np.dtype[np.datetime64]", + "_SupportsDType[np.dtype[np.datetime64]]", + _DT64Codes, +] +_DTypeLikeStr = Union[ + Type[str], + Type[np.str_], + "np.dtype[np.str_]", + "_SupportsDType[np.dtype[np.str_]]", + _StrCodes, +] +_DTypeLikeBytes = Union[ + Type[bytes], + Type[np.bytes_], + "np.dtype[np.bytes_]", + "_SupportsDType[np.dtype[np.bytes_]]", + _BytesCodes, +] +_DTypeLikeVoid = Union[ + Type[np.void], + "np.dtype[np.void]", + "_SupportsDType[np.dtype[np.void]]", + _VoidCodes, + _VoidDTypeLike, +] +_DTypeLikeObject = Union[ + type, + "np.dtype[np.object_]", + "_SupportsDType[np.dtype[np.object_]]", + _ObjectCodes, +] From 8ef114b9c98ca5a04b67fb072ff4a0f5a06de894 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 26 Jan 2021 18:14:55 +0000 Subject: [PATCH 0421/1270] Fix example in array subclassing doc (#18235) --- doc/source/user/basics.dispatch.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index c0e1cf9ba577..147f86c9837f 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -56,7 +56,7 @@ array([[2., 0., 0., 0., 0.], Notice that the return type is a standard ``numpy.ndarray``. ->>> type(arr) +>>> type(np.multiply(arr, 2)) numpy.ndarray How can we pass our custom array type through this function? Numpy allows a From 4a28dff89766b30dfa82f41f45ea03fdc70accd4 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 25 Jan 2021 12:01:42 -0600 Subject: [PATCH 0422/1270] DOC: Clarify the type alias deprecation message This tries to clarify the type alias deprecation message slightly to give more guidance on how to review the replacement (if desired). --- doc/source/release/1.20.0-notes.rst | 67 ++++++++++++++++-------- numpy/__init__.py | 79 +++++++++++++++++++---------- 2 files changed, 99 insertions(+), 47 deletions(-) diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index e26aa0d40579..d191e9ee1787 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -75,32 +75,59 @@ Using the aliases of builtin types like ``np.int`` is deprecated ---------------------------------------------------------------- For a long time, ``np.int`` has been an alias of the builtin ``int``. This is -repeatedly a cause of confusion for newcomers, and is also simply not useful. +repeatedly a cause of confusion for newcomers, and existed mainly for historic +reasons. These aliases have been deprecated. The table below shows the full list of deprecated aliases, along with their exact meaning. Replacing uses of items in the first column with the contents of the second column will work identically and silence the deprecation warning. -In many cases, it may have been intended to use the types from the third column. -Be aware that use of these types may result in subtle but desirable behavior -changes. - -================== ================================= ================================================================== -Deprecated name Identical to Possibly intended numpy type -================== ================================= ================================================================== -``numpy.bool`` ``bool`` `numpy.bool_` -``numpy.int`` ``int`` `numpy.int_` (default int dtype), `numpy.cint` (C ``int``) -``numpy.float`` ``float`` `numpy.float_`, `numpy.double` (equivalent) -``numpy.complex`` ``complex`` `numpy.complex_`, `numpy.cdouble` (equivalent) -``numpy.object`` ``object`` `numpy.object_` -``numpy.str`` ``str`` `numpy.str_` -``numpy.long`` ``int`` (``long`` on Python 2) `numpy.int_` (C ``long``), `numpy.longlong` (largest integer type) -``numpy.unicode`` ``str`` (``unicode`` on Python 2) `numpy.unicode_` -================== ================================= ================================================================== - -Note that for technical reasons these deprecation warnings will only be emitted -on Python 3.7 and above. +The third column lists alternative NumPy names which may occasionally be +preferential. See also :doc:`user/basics.types` for additional details. + +================= ============ ================================================================== +Deprecated name Identical to NumPy scalar type names +================= ============ ================================================================== +``numpy.bool`` ``bool`` `numpy.bool_` +``numpy.int`` ``int`` `numpy.int_` (default), ``numpy.int64``, or ``numpy.int32`` +``numpy.float`` ``float`` `numpy.float64`, `numpy.float_`, `numpy.double` (equivalent) +``numpy.complex`` ``complex`` `numpy.complex128`, `numpy.complex_`, `numpy.cdouble` (equivalent) +``numpy.object`` ``object`` `numpy.object_` +``numpy.str`` ``str`` `numpy.str_` +``numpy.long`` ``int`` `numpy.int_` (C ``long``), `numpy.longlong` (largest integer type) +``numpy.unicode`` ``str`` `numpy.unicode_` +================= ============ ================================================================== + +To give a clear guideline for the vast majority of cases, for the types +``bool``, ``object``, ``str`` (and ``unicode``) using the plain version +is shorter and clear, and generally a good replacement. +For ``float`` and ``complex`` you can use ``float64`` and ``complex128`` +if you wish to be more explicit about the precision. + +For ``np.int`` a direct replacement with ``np.int_`` or ``int`` is also +good, but the precision depends on the computer and operating system. +If you want to be more explicit and review the current use, you have the +following alternatives: + +* ``np.int64`` or ``np.int32`` to specify the precision exactly. + This ensures that results cannot depend on the computer or operating system. +* ``np.int_`` or ``int`` (the default), but be aware that it depends on + the computer and operating system. +* The C types: ``np.cint`` (int), ``np.int_`` (long), ``np.longlong``, +* ``np.intp`` which is 32bit on 32bit machines 64bit on 64bit machines. + This can be the best type to use for indexing. + +When used with ``np.dtype(...)`` or ``dtype=...`` changing it to the +NumPy name as mentioned above will have no effect on the output. +If used as a scalar with:: + + np.float(123) + +changing it can subtly change the result. In this case, the Python version +``float(123)`` or ``int(12.)`` is normally preferable, although the NumPy +version may be useful for consistency with NumPy arrays. For example, since +NumPy behaves differently for things like division by zero. (`gh-14882 `__) diff --git a/numpy/__init__.py b/numpy/__init__.py index a242bb7dfaae..3fadc7a106e8 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -165,33 +165,58 @@ # Deprecations introduced in NumPy 1.20.0, 2020-06-06 import builtins as _builtins - __deprecated_attrs__.update({ - n: ( - getattr(_builtins, n), - "`np.{n}` is a deprecated alias for the builtin `{n}`. " - "Use `{n}` by itself, which is identical in behavior, to silence " - "this warning. " - "If you specifically wanted the numpy scalar type, use `np.{n}_` " - "here." - .format(n=n) - ) - for n in ["bool", "int", "float", "complex", "object", "str"] - }) - __deprecated_attrs__.update({ - n: ( - getattr(compat, n), - "`np.{n}` is a deprecated alias for `np.compat.{n}`. " - "Use `np.compat.{n}` by itself, which is identical in behavior, " - "to silence this warning. " - "In the likely event your code does not need to work on Python 2 " - "you can use the builtin ``{n2}`` for which ``np.compat.{n}`` is " - "itself an alias. " - "If you specifically wanted the numpy scalar type, use `np.{n2}_` " - "here." - .format(n=n, n2=n2) - ) - for n, n2 in [("long", "int"), ("unicode", "str")] - }) + + _msg = ( + "`np.{n}` is a deprecated alias for the builtin `{n}`. " + "To silence this warning, use `{n}` by itself. Doing this will not " + "modify any behavior and is safe. {extended_msg}\n" + "Deprecated in NumPy 1.20; for more details and guidance: " + "https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("bool", _specific_msg.format("bool_")), + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + for n, extended_msg in _type_info: + __deprecated_attrs__[n] = (getattr(_builtins, n), + _msg.format(n=n, extended_msg=extended_msg)) + + del n, extended_msg + + _msg = ( + "`np.{n}` is a deprecated alias for `np.compat.{n}`. " + "To silence this warning, use `np.compat.{n}` by itself. " + "In the likely event your code does not need to work on Python 2 " + "you can use the builtin `{n2}` for which `np.compat.{n}` is itself " + "an alias. Doing this will not modify any behaviour and is safe. " + "{extended_msg}\n" + "Deprecated in NumPy 1.20; for more details and guidance: " + "https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + __deprecated_attrs__["long"] = ( + getattr(compat, "long"), + _msg.format(n="long", n2="int", + extended_msg=_int_extended_msg.format("long"))) + + __deprecated_attrs__["unicode"] = ( + getattr(compat, "long"), + _msg.format(n="unciode", n2="str", + extended_msg=_specific_msg.format("str_"))) + + del _msg, _specific_msg, _int_extended_msg, _type_info, _builtins from .core import round, abs, max, min # now that numpy modules are imported, can initialize limits From 4eb79d8cb0ffabd64f1aae727ecd1a7b749501d3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 26 Jan 2021 12:24:36 -0600 Subject: [PATCH 0423/1270] Update doc/source/release/1.20.0-notes.rst Co-authored-by: Eric Wieser --- doc/source/release/1.20.0-notes.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index d191e9ee1787..aab5b34a9992 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -106,7 +106,8 @@ For ``float`` and ``complex`` you can use ``float64`` and ``complex128`` if you wish to be more explicit about the precision. For ``np.int`` a direct replacement with ``np.int_`` or ``int`` is also -good, but the precision depends on the computer and operating system. +good and will not change behavior, but the precision will continue to depend +on the computer and operating system. If you want to be more explicit and review the current use, you have the following alternatives: @@ -1000,4 +1001,3 @@ The former result can still be obtained with:: (`gh-16841 `__) - From 08c036c50be6f9bd45b5f07ccc7eca0bfdc77164 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 26 Jan 2021 13:06:45 -0600 Subject: [PATCH 0424/1270] Update numpy/__init__.py Co-authored-by: Eric Wieser --- numpy/__init__.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 3fadc7a106e8..01f970345fe3 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -190,11 +190,10 @@ ("str", _specific_msg.format("str_")), ("int", _int_extended_msg.format("int"))] - for n, extended_msg in _type_info: - __deprecated_attrs__[n] = (getattr(_builtins, n), - _msg.format(n=n, extended_msg=extended_msg)) - - del n, extended_msg + __deprecated_attrs__.update({ + n: (getattr(_builtins, n), _msg.format(n=n, extended_msg=extended_msg)) + for n, extended_msg in _type_info + }) _msg = ( "`np.{n}` is a deprecated alias for `np.compat.{n}`. " @@ -422,4 +421,3 @@ def _mac_os_check(): from ._version import get_versions __version__ = get_versions()['version'] del get_versions - From 7114776a08363a24ae46ea16d659b7b0cdd0c650 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 26 Jan 2021 13:07:42 -0600 Subject: [PATCH 0425/1270] Apply suggestions from code review Co-authored-by: Eric Wieser --- doc/source/release/1.20.0-notes.rst | 7 +++---- numpy/__init__.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index aab5b34a9992..453946390120 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -115,7 +115,7 @@ following alternatives: This ensures that results cannot depend on the computer or operating system. * ``np.int_`` or ``int`` (the default), but be aware that it depends on the computer and operating system. -* The C types: ``np.cint`` (int), ``np.int_`` (long), ``np.longlong``, +* The C types: ``np.cint`` (int), ``np.int_`` (long), ``np.longlong``. * ``np.intp`` which is 32bit on 32bit machines 64bit on 64bit machines. This can be the best type to use for indexing. @@ -127,8 +127,8 @@ If used as a scalar with:: changing it can subtly change the result. In this case, the Python version ``float(123)`` or ``int(12.)`` is normally preferable, although the NumPy -version may be useful for consistency with NumPy arrays. For example, since -NumPy behaves differently for things like division by zero. +version may be useful for consistency with NumPy arrays (for example, +NumPy behaves differently for things like division by zero). (`gh-14882 `__) @@ -1000,4 +1000,3 @@ The former result can still be obtained with:: (`gh-16841 `__) - diff --git a/numpy/__init__.py b/numpy/__init__.py index 01f970345fe3..27b3d33d5b9d 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -179,7 +179,7 @@ _int_extended_msg = ( "When replacing `np.{}`, you may wish to use e.g. `np.int64` " "or `np.int32` to specify the precision. If you wish to review " - "your current use check the release note link for " + "your current use, check the release note link for " "additional information.") _type_info = [ From f2024700546c773970d8599bb37cd87b1f584f5d Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Tue, 26 Jan 2021 11:54:52 -0800 Subject: [PATCH 0426/1270] Fix ref to user guide in release notes. --- doc/source/release/1.20.0-notes.rst | 2 +- doc/source/user/basics.types.rst | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index 453946390120..5d3b5f63dbed 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -84,7 +84,7 @@ the first column with the contents of the second column will work identically and silence the deprecation warning. The third column lists alternative NumPy names which may occasionally be -preferential. See also :doc:`user/basics.types` for additional details. +preferential. See also :ref:`basics.types` for additional details. ================= ============ ================================================================== Deprecated name Identical to NumPy scalar type names diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 781dd66e525f..64b6dcf50412 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -1,3 +1,5 @@ +.. _basics.types: + ********** Data types ********** From edab247184252508b51f6012bcf86c438d1f596e Mon Sep 17 00:00:00 2001 From: James Gerity Date: Tue, 26 Jan 2021 17:06:23 -0500 Subject: [PATCH 0427/1270] Mark example as random --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index cba13ed59d8e..93b1d6160675 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -669,7 +669,7 @@ cdef class Generator: replacement: >>> rng.choice([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 2, replace=False) - array([[3, 4, 5], + array([[3, 4, 5], # random [0, 1, 2]]) Generate a non-uniform random sample from np.arange(5) of size From f7d26c28190b430407e35c2169af88935353403c Mon Sep 17 00:00:00 2001 From: James Gerity Date: Tue, 26 Jan 2021 17:08:06 -0500 Subject: [PATCH 0428/1270] Clarify description of example --- numpy/random/_generator.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 93b1d6160675..0a41f13b6d42 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -665,8 +665,8 @@ cdef class Generator: array([3,1,0]) # random >>> #This is equivalent to rng.permutation(np.arange(5))[:3] - Generate a uniform random sample from a 2-D array, without - replacement: + Generate a uniform random sample from a 2-D array along the first + axis (the default), without replacement: >>> rng.choice([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 2, replace=False) array([[3, 4, 5], # random From 78dd98729ab455c89c8ec50da90da4d35a6c4a48 Mon Sep 17 00:00:00 2001 From: Ryan Soklaski Date: Tue, 26 Jan 2021 21:04:10 -0500 Subject: [PATCH 0429/1270] DOC: __array__ accepts a dtype argument --- doc/source/user/basics.dispatch.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 147f86c9837f..089a7df17063 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -22,8 +22,8 @@ example that has rather narrow utility but illustrates the concepts involved. ... self._i = value ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) +... def __array__(self, dtype=None): +... return self._i * np.eye(self._N, dtype=dtype) Our custom array can be instantiated like: @@ -84,8 +84,8 @@ For this example we will only handle the method ``__call__`` ... self._i = value ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) +... def __array__(self, dtype=None): +... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': ... N = None @@ -133,8 +133,8 @@ conveniently by inheriting from the mixin ... self._i = value ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) +... def __array__(self, dtype=None): +... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': ... N = None @@ -171,8 +171,8 @@ functions to our custom variants. ... self._i = value ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) +... def __array__(self, dtype=None): +... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': ... N = None From 9b2a8dee2185bcecb27ae06172082a8046b66527 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 27 Jan 2021 11:26:07 +0800 Subject: [PATCH 0430/1270] use more accurate words. --- numpy/core/src/common/simd/avx2/arithmetic.h | 2 +- numpy/core/src/common/simd/avx512/arithmetic.h | 2 +- numpy/core/src/common/simd/neon/arithmetic.h | 2 +- numpy/core/src/common/simd/sse/arithmetic.h | 2 +- numpy/core/src/common/simd/vsx/arithmetic.h | 2 +- numpy/core/tests/test_simd.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index c4c5f2093844..4b8258759266 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -157,7 +157,7 @@ NPY_FINLINE double npyv_sum_f64(npyv_f64 a) return _mm_cvtsd_f64(sum); } -// extend sum across vector +// expand the source vector and performs sum reduce NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { __m256i four = _mm256_sad_epu8(a, _mm256_setzero_si256()); diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index a6e448baeed0..450da7ea5484 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -192,7 +192,7 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) } #endif -// extend sum across vector +// expand the source vector and performs sum reduce NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { #ifdef NPY_HAVE_AVX512BW diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index af34299a0ed9..69a49f571e1f 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -159,7 +159,7 @@ } #endif -// extend sum across vector +// expand the source vector and performs sum reduce #if NPY_SIMD_F64 #define npyv_sumup_u8 vaddlvq_u8 #define npyv_sumup_u16 vaddlvq_u16 diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index fcb0a1716cc6..c21b7da2d7e4 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -188,7 +188,7 @@ NPY_FINLINE double npyv_sum_f64(npyv_f64 a) #endif } -// extend sum across vector +// expand the source vector and performs sum reduce NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { __m128i two = _mm_sad_epu8(a, _mm_setzero_si128()); diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index 339677857b7a..7c4e32f27ad1 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -142,7 +142,7 @@ NPY_FINLINE double npyv_sum_f64(npyv_f64 a) return vec_extract(a, 0) + vec_extract(a, 1); } -// extend sum across vector +// expand the source vector and performs sum reduce NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { const npyv_u32 zero = npyv_zero_u32(); diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 3efd328dac20..1d1a111be83f 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -750,7 +750,7 @@ def test_arithmetic_reduce_sum(self): def test_arithmetic_reduce_sumup(self): """ - Test overflow protect reduce sumup intrinics: + Test extend reduce sum intrinics: npyv_sumup_##sfx """ if self.sfx not in ("u8", "u16"): From 7060d5e28f735199df6b465755ddd1d3bc7ed07e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 27 Jan 2021 17:01:37 +0100 Subject: [PATCH 0431/1270] MAINT: Removed annotations for `__NUMPY_SETUP__`, `__deprecated_attrs__` and `__expired_functions__` Removing them as their "public" nature is somewhat questionable. --- numpy/__init__.pyi | 3 --- numpy/typing/tests/data/fail/modules.py | 4 ++++ numpy/typing/tests/data/pass/modules.py | 3 --- numpy/typing/tests/data/reveal/modules.py | 3 --- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 27881a7259af..911b496df69f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -315,9 +315,6 @@ __all__: List[str] __path__: List[str] __version__: str __git_version__: str -__NUMPY_SETUP__: bool -__deprecated_attrs__: Dict[str, Tuple[type, str]] -__expired_functions__: Dict[str, str] DataSource: Any MachAr: Any diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py index 5e2d820abc85..b80fd9edeae2 100644 --- a/numpy/typing/tests/data/fail/modules.py +++ b/numpy/typing/tests/data/fail/modules.py @@ -8,3 +8,7 @@ np.sys # E: Module has no attribute np.os # E: Module has no attribute np.math # E: Module has no attribute + +np.__NUMPY_SETUP__ # E: Module has no attribute +np.__deprecated_attrs__ # E: Module has no attribute +np.__expired_functions__ # E: Module has no attribute diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py index 3ac21531185d..013841ae74d9 100644 --- a/numpy/typing/tests/data/pass/modules.py +++ b/numpy/typing/tests/data/pass/modules.py @@ -18,6 +18,3 @@ np.__path__ np.__version__ np.__git_version__ -np.__NUMPY_SETUP__ -np.__deprecated_attrs__ -np.__expired_functions__ diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py index 3ff44b6a7204..8e4eab50f3b2 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.py @@ -23,6 +23,3 @@ reveal_type(np.__path__) # E: list[builtins.str] reveal_type(np.__version__) # E: str reveal_type(np.__git_version__) # E: str -reveal_type(np.__NUMPY_SETUP__) # E: bool -reveal_type(np.__deprecated_attrs__) # E: dict[builtins.str, Tuple[builtins.type, builtins.str]] -reveal_type(np.__expired_functions__) # E: dict[builtins.str, builtins.str] From 491d26b2bcbc3b1c57c3310d8a81f694c3450da3 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 27 Jan 2021 22:48:58 +0100 Subject: [PATCH 0432/1270] BLD: fix issue with `bdist_egg`, which made `make dist` in doc/ fail This issue came in because of commit 9b3f65096e a month ago. `bdist_egg` has always been missing from the command list, but that wasn't a problem because missing commands were simply ignored by the validation. After that commit we started raising a RuntimeError instead. --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6033916c2595..4c7d825df781 100755 --- a/setup.py +++ b/setup.py @@ -257,7 +257,8 @@ def parse_setuppy_commands(): # below and not standalone. Hence they're not added to good_commands. good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py', 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm', - 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src',) + 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src', + 'bdist_egg') for command in good_commands: if command in args: From f305d5962614e98eea5d10a2140eee7e16ab9aca Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Wed, 27 Jan 2021 17:43:33 -0800 Subject: [PATCH 0433/1270] DOC: Misc numpydoc format fixes Via prototype docstring autoreformatter; and cherry-picked to mostly include spacing issues around colons in parameters and see also. When no space is present numpydoc tend to miss-parse those sections A couple of typos are fixed as well. --- numpy/core/arrayprint.py | 2 +- numpy/core/einsumfunc.py | 8 +++----- numpy/core/fromnumeric.py | 9 +++------ numpy/core/multiarray.py | 6 +++--- numpy/core/numeric.py | 5 ++--- numpy/core/records.py | 12 ++++++------ numpy/core/setup_common.py | 4 ++-- numpy/core/shape_base.py | 2 +- 8 files changed, 21 insertions(+), 27 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 94ec8ed3446a..3fade4824bb3 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -538,7 +538,7 @@ def array2string(a, max_line_width=None, precision=None, separator : str, optional Inserted between elements. prefix : str, optional - suffix: str, optional + suffix : str, optional The length of the prefix and suffix strings are used to respectively align and wrap the output. An array is typically printed as:: diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index e0942becaaa5..18157641aaf4 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -327,7 +327,7 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): Set that represents the rhs side of the overall einsum subscript idx_dict : dictionary Dictionary of index sizes - memory_limit_limit : int + memory_limit : int The maximum number of elements in a temporary array Returns @@ -1061,14 +1061,12 @@ def einsum(*operands, out=None, optimize=False, **kwargs): See Also -------- einsum_path, dot, inner, outer, tensordot, linalg.multi_dot - - einops: + einops : similar verbose interface is provided by `einops `_ package to cover additional operations: transpose, reshape/flatten, repeat/tile, squeeze/unsqueeze and reductions. - - opt_einsum: + opt_einsum : `opt_einsum `_ optimizes contraction order for einsum-like expressions in backend-agnostic manner. diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index c8de48ff88be..658b1aca584b 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1381,7 +1381,7 @@ def resize(a, new_shape): -------- np.reshape : Reshape an array without changing the total size. np.pad : Enlarge and pad an array. - np.repeat: Repeat elements of an array. + np.repeat : Repeat elements of an array. ndarray.resize : resize an array in-place. Notes @@ -2007,7 +2007,7 @@ def compress(condition, a, axis=None, out=None): -------- take, choose, diag, diagonal, select ndarray.compress : Equivalent method in ndarray - extract: Equivalent method when working on 1-D arrays + extract : Equivalent method when working on 1-D arrays :ref:`ufuncs-output-type` Examples @@ -2475,14 +2475,11 @@ def cumsum(a, axis=None, dtype=None, out=None): result has the same size as `a`, and the same shape as `a` if `axis` is not None or `a` is a 1-d array. - See Also -------- sum : Sum array elements. - trapz : Integration of array values using the composite trapezoidal rule. - - diff : Calculate the n-th discrete difference along given axis. + diff : Calculate the n-th discrete difference along given axis. Notes ----- diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index 07179a627eec..b7277ac240d2 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -1441,7 +1441,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): See Also -------- - busdaycalendar: An object that specifies a custom set of valid days. + busdaycalendar : An object that specifies a custom set of valid days. busday_offset : Applies an offset counted in valid days. busday_count : Counts how many valid days are in a half-open date range. @@ -1516,7 +1516,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, See Also -------- - busdaycalendar: An object that specifies a custom set of valid days. + busdaycalendar : An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_count : Counts how many valid days are in a half-open date range. @@ -1598,7 +1598,7 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, See Also -------- - busdaycalendar: An object that specifies a custom set of valid days. + busdaycalendar : An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index e776bd43bbd4..086439656816 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -1427,12 +1427,11 @@ def moveaxis(a, source, destination): See Also -------- - transpose: Permute the dimensions of an array. - swapaxes: Interchange two axes of an array. + transpose : Permute the dimensions of an array. + swapaxes : Interchange two axes of an array. Examples -------- - >>> x = np.zeros((3, 4, 5)) >>> np.moveaxis(x, 0, -1).shape (4, 5, 3) diff --git a/numpy/core/records.py b/numpy/core/records.py index 00d456658bc4..708c11f69653 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -962,16 +962,16 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, Parameters ---------- - obj: any + obj : any Input object. See Notes for details on how various input types are treated. - dtype: data-type, optional + dtype : data-type, optional Valid dtype for array. - shape: int or tuple of ints, optional + shape : int or tuple of ints, optional Shape of each array. - offset: int, optional + offset : int, optional Position in the file or buffer to start reading from. - strides: tuple of ints, optional + strides : tuple of ints, optional Buffer (`buf`) is interpreted according to these strides (strides define how many bytes each array element, row, column, etc. occupy in memory). @@ -979,7 +979,7 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, If `dtype` is ``None``, these arguments are passed to `numpy.format_parser` to construct a dtype. See that function for detailed documentation. - copy: bool, optional + copy : bool, optional Whether to copy the input object (True), or to use a reference instead. This option only applies when the input is an ndarray or recarray. Defaults to True. diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 2d85e071841d..378d93c066c4 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -317,8 +317,8 @@ def pyod(filename): out : seq list of lines of od output - Note - ---- + Notes + ----- We only implement enough to get the necessary information for long double representation, this is not intended as a compatible replacement for od. """ diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index e90358ba5f53..89e98ab3072d 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -607,7 +607,7 @@ def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): The arrays to check max_depth : list of int The number of nested lists - result_ndim: int + result_ndim : int The number of dimensions in thefinal array. Returns From 86228d61f955a648b1e5110d293caadd72f1d6ee Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Wed, 27 Jan 2021 17:57:03 -0800 Subject: [PATCH 0434/1270] DOC: See also -> See Also (casing) Numpydoc seem to suggest Also should be uppercase, and as far as I can tell this is the main spelling found in this code base: $ rg '^ +See also$' | wc -l 109 $ rg '^ +See Also$' | wc -l 814 This commit update one offending file that contain ~90 lowercase `See also`. --- numpy/core/defchararray.py | 182 ++++++++++++++++++------------------- 1 file changed, 91 insertions(+), 91 deletions(-) diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index 9d7b54a1a144..ab1166ad263f 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -273,7 +273,7 @@ def str_len(a): out : ndarray Output array of integers - See also + See Also -------- builtins.len """ @@ -368,7 +368,7 @@ def mod(a, values): out : ndarray Output array of str or unicode, depending on input types - See also + See Also -------- str.__mod__ @@ -398,7 +398,7 @@ def capitalize(a): Output array of str or unicode, depending on input types - See also + See Also -------- str.capitalize @@ -443,7 +443,7 @@ def center(a, width, fillchar=' '): Output array of str or unicode, depending on input types - See also + See Also -------- str.center @@ -485,7 +485,7 @@ def count(a, sub, start=0, end=None): out : ndarray Output array of ints. - See also + See Also -------- str.count @@ -534,7 +534,7 @@ def decode(a, encoding=None, errors=None): ------- out : ndarray - See also + See Also -------- str.decode @@ -580,7 +580,7 @@ def encode(a, encoding=None, errors=None): ------- out : ndarray - See also + See Also -------- str.encode @@ -620,7 +620,7 @@ def endswith(a, suffix, start=0, end=None): out : ndarray Outputs an array of bools. - See also + See Also -------- str.endswith @@ -672,7 +672,7 @@ def expandtabs(a, tabsize=8): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.expandtabs @@ -708,7 +708,7 @@ def find(a, sub, start=0, end=None): out : ndarray or int Output array of ints. Returns -1 if `sub` is not found. - See also + See Also -------- str.find @@ -737,7 +737,7 @@ def index(a, sub, start=0, end=None): out : ndarray Output array of ints. Returns -1 if `sub` is not found. - See also + See Also -------- find, str.find @@ -765,7 +765,7 @@ def isalnum(a): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.isalnum """ @@ -791,7 +791,7 @@ def isalpha(a): out : ndarray Output array of bools - See also + See Also -------- str.isalpha """ @@ -817,7 +817,7 @@ def isdigit(a): out : ndarray Output array of bools - See also + See Also -------- str.isdigit """ @@ -844,7 +844,7 @@ def islower(a): out : ndarray Output array of bools - See also + See Also -------- str.islower """ @@ -871,7 +871,7 @@ def isspace(a): out : ndarray Output array of bools - See also + See Also -------- str.isspace """ @@ -897,7 +897,7 @@ def istitle(a): out : ndarray Output array of bools - See also + See Also -------- str.istitle """ @@ -924,7 +924,7 @@ def isupper(a): out : ndarray Output array of bools - See also + See Also -------- str.isupper """ @@ -953,7 +953,7 @@ def join(sep, seq): out : ndarray Output array of str or unicode, depending on input types - See also + See Also -------- str.join """ @@ -988,7 +988,7 @@ def ljust(a, width, fillchar=' '): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.ljust @@ -1021,7 +1021,7 @@ def lower(a): out : ndarray, {str, unicode} Output array of str or unicode, depending on input type - See also + See Also -------- str.lower @@ -1066,7 +1066,7 @@ def lstrip(a, chars=None): out : ndarray, {str, unicode} Output array of str or unicode, depending on input type - See also + See Also -------- str.lstrip @@ -1127,7 +1127,7 @@ def partition(a, sep): The output array will have an extra dimension with 3 elements per input element. - See also + See Also -------- str.partition @@ -1163,7 +1163,7 @@ def replace(a, old, new, count=None): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.replace @@ -1197,7 +1197,7 @@ def rfind(a, sub, start=0, end=None): out : ndarray Output array of ints. Return -1 on failure. - See also + See Also -------- str.rfind @@ -1227,7 +1227,7 @@ def rindex(a, sub, start=0, end=None): out : ndarray Output array of ints. - See also + See Also -------- rfind, str.rindex @@ -1258,7 +1258,7 @@ def rjust(a, width, fillchar=' '): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.rjust @@ -1299,7 +1299,7 @@ def rpartition(a, sep): type. The output array will have an extra dimension with 3 elements per input element. - See also + See Also -------- str.rpartition @@ -1339,7 +1339,7 @@ def rsplit(a, sep=None, maxsplit=None): out : ndarray Array of list objects - See also + See Also -------- str.rsplit, split @@ -1378,7 +1378,7 @@ def rstrip(a, chars=None): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.rstrip @@ -1423,7 +1423,7 @@ def split(a, sep=None, maxsplit=None): out : ndarray Array of list objects - See also + See Also -------- str.split, rsplit @@ -1459,7 +1459,7 @@ def splitlines(a, keepends=None): out : ndarray Array of list objects - See also + See Also -------- str.splitlines @@ -1495,7 +1495,7 @@ def startswith(a, prefix, start=0, end=None): out : ndarray Array of booleans - See also + See Also -------- str.startswith @@ -1528,7 +1528,7 @@ def strip(a, chars=None): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.strip @@ -1569,7 +1569,7 @@ def swapcase(a): out : ndarray, {str, unicode} Output array of str or unicode, depending on input type - See also + See Also -------- str.swapcase @@ -1609,7 +1609,7 @@ def title(a): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.title @@ -1654,7 +1654,7 @@ def translate(a, table, deletechars=None): out : ndarray Output array of str or unicode, depending on input type - See also + See Also -------- str.translate @@ -1687,7 +1687,7 @@ def upper(a): out : ndarray, {str, unicode} Output array of str or unicode, depending on input type - See also + See Also -------- str.upper @@ -1726,7 +1726,7 @@ def zfill(a, width): out : ndarray, {str, unicode} Output array of str or unicode, depending on input type - See also + See Also -------- str.zfill @@ -1760,7 +1760,7 @@ def isnumeric(a): out : ndarray, bool Array of booleans of same shape as `a`. - See also + See Also -------- unicode.isnumeric @@ -1792,7 +1792,7 @@ def isdecimal(a): out : ndarray, bool Array of booleans identical in shape to `a`. - See also + See Also -------- unicode.isdecimal @@ -2004,7 +2004,7 @@ def __eq__(self, other): """ Return (self == other) element-wise. - See also + See Also -------- equal """ @@ -2014,7 +2014,7 @@ def __ne__(self, other): """ Return (self != other) element-wise. - See also + See Also -------- not_equal """ @@ -2024,7 +2024,7 @@ def __ge__(self, other): """ Return (self >= other) element-wise. - See also + See Also -------- greater_equal """ @@ -2034,7 +2034,7 @@ def __le__(self, other): """ Return (self <= other) element-wise. - See also + See Also -------- less_equal """ @@ -2044,7 +2044,7 @@ def __gt__(self, other): """ Return (self > other) element-wise. - See also + See Also -------- greater """ @@ -2054,7 +2054,7 @@ def __lt__(self, other): """ Return (self < other) element-wise. - See also + See Also -------- less """ @@ -2065,7 +2065,7 @@ def __add__(self, other): Return (self + other), that is string concatenation, element-wise for a pair of array_likes of str or unicode. - See also + See Also -------- add """ @@ -2076,7 +2076,7 @@ def __radd__(self, other): Return (other + self), that is string concatenation, element-wise for a pair of array_likes of `string_` or `unicode_`. - See also + See Also -------- add """ @@ -2087,7 +2087,7 @@ def __mul__(self, i): Return (self * i), that is string multiple concatenation, element-wise. - See also + See Also -------- multiply """ @@ -2098,7 +2098,7 @@ def __rmul__(self, i): Return (self * i), that is string multiple concatenation, element-wise. - See also + See Also -------- multiply """ @@ -2110,7 +2110,7 @@ def __mod__(self, i): (interpolation), element-wise for a pair of array_likes of `string_` or `unicode_`. - See also + See Also -------- mod """ @@ -2145,7 +2145,7 @@ def capitalize(self): Return a copy of `self` with only the first character of each element capitalized. - See also + See Also -------- char.capitalize @@ -2157,7 +2157,7 @@ def center(self, width, fillchar=' '): Return a copy of `self` with its elements centered in a string of length `width`. - See also + See Also -------- center """ @@ -2168,7 +2168,7 @@ def count(self, sub, start=0, end=None): Returns an array with the number of non-overlapping occurrences of substring `sub` in the range [`start`, `end`]. - See also + See Also -------- char.count @@ -2179,7 +2179,7 @@ def decode(self, encoding=None, errors=None): """ Calls `str.decode` element-wise. - See also + See Also -------- char.decode @@ -2190,7 +2190,7 @@ def encode(self, encoding=None, errors=None): """ Calls `str.encode` element-wise. - See also + See Also -------- char.encode @@ -2202,7 +2202,7 @@ def endswith(self, suffix, start=0, end=None): Returns a boolean array which is `True` where the string element in `self` ends with `suffix`, otherwise `False`. - See also + See Also -------- char.endswith @@ -2214,7 +2214,7 @@ def expandtabs(self, tabsize=8): Return a copy of each string element where all tab characters are replaced by one or more spaces. - See also + See Also -------- char.expandtabs @@ -2226,7 +2226,7 @@ def find(self, sub, start=0, end=None): For each element, return the lowest index in the string where substring `sub` is found. - See also + See Also -------- char.find @@ -2237,7 +2237,7 @@ def index(self, sub, start=0, end=None): """ Like `find`, but raises `ValueError` when the substring is not found. - See also + See Also -------- char.index @@ -2250,7 +2250,7 @@ def isalnum(self): are alphanumeric and there is at least one character, false otherwise. - See also + See Also -------- char.isalnum @@ -2263,7 +2263,7 @@ def isalpha(self): are alphabetic and there is at least one character, false otherwise. - See also + See Also -------- char.isalpha @@ -2275,7 +2275,7 @@ def isdigit(self): Returns true for each element if all characters in the string are digits and there is at least one character, false otherwise. - See also + See Also -------- char.isdigit @@ -2288,7 +2288,7 @@ def islower(self): string are lowercase and there is at least one cased character, false otherwise. - See also + See Also -------- char.islower @@ -2301,7 +2301,7 @@ def isspace(self): characters in the string and there is at least one character, false otherwise. - See also + See Also -------- char.isspace @@ -2313,7 +2313,7 @@ def istitle(self): Returns true for each element if the element is a titlecased string and there is at least one character, false otherwise. - See also + See Also -------- char.istitle @@ -2326,7 +2326,7 @@ def isupper(self): string are uppercase and there is at least one character, false otherwise. - See also + See Also -------- char.isupper @@ -2338,7 +2338,7 @@ def join(self, seq): Return a string which is the concatenation of the strings in the sequence `seq`. - See also + See Also -------- char.join @@ -2350,7 +2350,7 @@ def ljust(self, width, fillchar=' '): Return an array with the elements of `self` left-justified in a string of length `width`. - See also + See Also -------- char.ljust @@ -2362,7 +2362,7 @@ def lower(self): Return an array with the elements of `self` converted to lowercase. - See also + See Also -------- char.lower @@ -2374,7 +2374,7 @@ def lstrip(self, chars=None): For each element in `self`, return a copy with the leading characters removed. - See also + See Also -------- char.lstrip @@ -2385,7 +2385,7 @@ def partition(self, sep): """ Partition each element in `self` around `sep`. - See also + See Also -------- partition """ @@ -2396,7 +2396,7 @@ def replace(self, old, new, count=None): For each element in `self`, return a copy of the string with all occurrences of substring `old` replaced by `new`. - See also + See Also -------- char.replace @@ -2409,7 +2409,7 @@ def rfind(self, sub, start=0, end=None): where substring `sub` is found, such that `sub` is contained within [`start`, `end`]. - See also + See Also -------- char.rfind @@ -2421,7 +2421,7 @@ def rindex(self, sub, start=0, end=None): Like `rfind`, but raises `ValueError` when the substring `sub` is not found. - See also + See Also -------- char.rindex @@ -2433,7 +2433,7 @@ def rjust(self, width, fillchar=' '): Return an array with the elements of `self` right-justified in a string of length `width`. - See also + See Also -------- char.rjust @@ -2444,7 +2444,7 @@ def rpartition(self, sep): """ Partition each element in `self` around `sep`. - See also + See Also -------- rpartition """ @@ -2455,7 +2455,7 @@ def rsplit(self, sep=None, maxsplit=None): For each element in `self`, return a list of the words in the string, using `sep` as the delimiter string. - See also + See Also -------- char.rsplit @@ -2467,7 +2467,7 @@ def rstrip(self, chars=None): For each element in `self`, return a copy with the trailing characters removed. - See also + See Also -------- char.rstrip @@ -2479,7 +2479,7 @@ def split(self, sep=None, maxsplit=None): For each element in `self`, return a list of the words in the string, using `sep` as the delimiter string. - See also + See Also -------- char.split @@ -2491,7 +2491,7 @@ def splitlines(self, keepends=None): For each element in `self`, return a list of the lines in the element, breaking at line boundaries. - See also + See Also -------- char.splitlines @@ -2503,7 +2503,7 @@ def startswith(self, prefix, start=0, end=None): Returns a boolean array which is `True` where the string element in `self` starts with `prefix`, otherwise `False`. - See also + See Also -------- char.startswith @@ -2515,7 +2515,7 @@ def strip(self, chars=None): For each element in `self`, return a copy with the leading and trailing characters removed. - See also + See Also -------- char.strip @@ -2527,7 +2527,7 @@ def swapcase(self): For each element in `self`, return a copy of the string with uppercase characters converted to lowercase and vice versa. - See also + See Also -------- char.swapcase @@ -2540,7 +2540,7 @@ def title(self): string: words start with uppercase characters, all remaining cased characters are lowercase. - See also + See Also -------- char.title @@ -2554,7 +2554,7 @@ def translate(self, table, deletechars=None): `deletechars` are removed, and the remaining characters have been mapped through the given translation table. - See also + See Also -------- char.translate @@ -2566,7 +2566,7 @@ def upper(self): Return an array with the elements of `self` converted to uppercase. - See also + See Also -------- char.upper @@ -2578,7 +2578,7 @@ def zfill(self, width): Return the numeric string left-filled with zeros in a string of length `width`. - See also + See Also -------- char.zfill @@ -2590,7 +2590,7 @@ def isnumeric(self): For each element in `self`, return True if there are only numeric characters in the element. - See also + See Also -------- char.isnumeric @@ -2602,7 +2602,7 @@ def isdecimal(self): For each element in `self`, return True if there are only decimal characters in the element. - See also + See Also -------- char.isdecimal From bb8990e5eba4927716ad8a93e9b92c0f3de69386 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Thu, 28 Jan 2021 09:11:14 -0800 Subject: [PATCH 0435/1270] DOC: cleanup of numpy/polynomial. Numpydoc format says that the colon need o be omitted if there is no type, there were also some empty Examples Sections --- numpy/polynomial/_polybase.py | 3 --- numpy/polynomial/chebyshev.py | 3 --- numpy/polynomial/legendre.py | 6 ------ numpy/polynomial/polyutils.py | 18 +++++++++--------- 4 files changed, 9 insertions(+), 21 deletions(-) diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index ef3f9896da01..b04b8e66b09c 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -757,9 +757,6 @@ def convert(self, domain=None, kind=None, window=None): Conversion between domains and class types can result in numerically ill defined series. - Examples - -------- - """ if kind is None: kind = self.__class__ diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 4d0a4f483bbf..d24fc738fcf4 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1149,9 +1149,6 @@ def chebval(x, c, tensor=True): ----- The evaluation uses Clenshaw recursion, aka synthetic division. - Examples - -------- - """ c = np.array(c, ndmin=1, copy=True) if c.dtype.char in '?bBhHiIlLqQpP': diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 23ddd07cacbd..cd4da2a79e75 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -605,9 +605,6 @@ def legpow(c, pow, maxpower=16): -------- legadd, legsub, legmulx, legmul, legdiv - Examples - -------- - """ return pu._pow(legmul, c, pow, maxpower) @@ -890,9 +887,6 @@ def legval(x, c, tensor=True): ----- The evaluation uses Clenshaw recursion, aka synthetic division. - Examples - -------- - """ c = np.array(c, ndmin=1, copy=False) if c.dtype.char in '?bBhHiIlLqQpP': diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index d81ee975413c..01879ecbcebb 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -509,7 +509,7 @@ def _fromroots(line_f, mul_f, roots): The ``line`` function, such as ``polyline`` mul_f : function(array_like, array_like) -> ndarray The ``mul`` function, such as ``polymul`` - roots : + roots See the ``fromroots`` functions for more detail """ if len(roots) == 0: @@ -537,7 +537,7 @@ def _valnd(val_f, c, *args): ---------- val_f : function(array_like, array_like, tensor: bool) -> array_like The ``val`` function, such as ``polyval`` - c, args : + c, args See the ``vald`` functions for more detail """ args = [np.asanyarray(a) for a in args] @@ -567,7 +567,7 @@ def _gridnd(val_f, c, *args): ---------- val_f : function(array_like, array_like, tensor: bool) -> array_like The ``val`` function, such as ``polyval`` - c, args : + c, args See the ``gridd`` functions for more detail """ for xi in args: @@ -586,7 +586,7 @@ def _div(mul_f, c1, c2): ---------- mul_f : function(array_like, array_like) -> array_like The ``mul`` function, such as ``polymul`` - c1, c2 : + c1, c2 See the ``div`` functions for more detail """ # c1, c2 are trimmed copies @@ -646,7 +646,7 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): ---------- vander_f : function(array_like, int) -> ndarray The 1d vander function, such as ``polyvander`` - c1, c2 : + c1, c2 See the ``fit`` functions for more detail """ x = np.asarray(x) + 0.0 @@ -732,12 +732,12 @@ def _pow(mul_f, c, pow, maxpower): Parameters ---------- - vander_f : function(array_like, int) -> ndarray - The 1d vander function, such as ``polyvander`` - pow, maxpower : - See the ``pow`` functions for more detail mul_f : function(array_like, array_like) -> ndarray The ``mul`` function, such as ``polymul`` + c : array_like + 1-D array of array of series coefficients + pow, maxpower + See the ``pow`` functions for more detail """ # c is a trimmed copy [c] = as_series([c]) From 7aebdfa50254270085fe6a24206d35d31e43c939 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Thu, 28 Jan 2021 09:04:18 -0800 Subject: [PATCH 0436/1270] DOC: more mist fixes of syntax. Space before colon, or missing colon in see-also, typo in parameter names, casing in See Also. --- numpy/ma/core.py | 13 ++++++------- numpy/ma/extras.py | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 54cb12f17e64..38a0a8b50ddc 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -399,10 +399,10 @@ def _recursive_set_fill_value(fillvalue, dt): Parameters ---------- - fillvalue: scalar or array_like + fillvalue : scalar or array_like Scalar or array representing the fill value. If it is of shorter length than the number of fields in dt, it will be resized. - dt: dtype + dt : dtype The structured dtype for which to create the fill value. Returns @@ -5220,7 +5220,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): -------- numpy.ndarray.mean : corresponding function for ndarrays numpy.mean : Equivalent function - numpy.ma.average: Weighted average. + numpy.ma.average : Weighted average. Examples -------- @@ -6913,8 +6913,7 @@ def compressed(x): See Also -------- - ma.MaskedArray.compressed - Equivalent method. + ma.MaskedArray.compressed : Equivalent method. """ return asanyarray(x).compressed() @@ -7343,12 +7342,12 @@ def choose(indices, choices, out=None, mode='raise'): Given an array of integers and a list of n choice arrays, this method will create a new array that merges each of the choice arrays. Where a - value in `a` is i, the new array will have the value that choices[i] + value in `index` is i, the new array will have the value that choices[i] contains in the same place. Parameters ---------- - a : ndarray of ints + indices : ndarray of ints This array must contain integers in ``[0, n-1]``, where n is the number of choices. choices : sequence of arrays diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 96e64914a5df..a775a15bfcfc 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1217,7 +1217,7 @@ def union1d(ar1, ar2): The output is always a masked array. See `numpy.union1d` for more details. - See also + See Also -------- numpy.union1d : Equivalent function for ndarrays. From 4c4f201e3d16b57e1ba03a5c12ed6343cca447ae Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 29 Jan 2021 07:49:22 +0100 Subject: [PATCH 0437/1270] DOC: improve description of `NoValue`. [ci skip] --- numpy/_globals.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/numpy/_globals.py b/numpy/_globals.py index 9f44c7729668..4a8c266d30db 100644 --- a/numpy/_globals.py +++ b/numpy/_globals.py @@ -58,8 +58,20 @@ class _NoValueType: """Special keyword value. The instance of this class may be used as the default value assigned to a - deprecated keyword in order to check if it has been given a user defined - value. + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + """ __instance = None def __new__(cls): From 32aa366e0b0512ff71bcac49cae767cc169443f9 Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 29 Jan 2021 12:10:59 +0200 Subject: [PATCH 0438/1270] add an 'apt update' --- tools/travis-before-install.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh index 81737af89e64..65aa4ad13108 100755 --- a/tools/travis-before-install.sh +++ b/tools/travis-before-install.sh @@ -9,6 +9,7 @@ free -m df -h ulimit -a +sudo apt update sudo apt install gfortran eatmydata libgfortran5 if [ "$USE_DEBUG" ] From d788788f8233d96e647c64bd4070305146b9ff92 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 29 Jan 2021 12:16:37 +0100 Subject: [PATCH 0439/1270] MAINT: typo fix: `_UInt8Codes` -> `_Int8Codes` --- numpy/typing/_dtype_like.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py index 9b79f9b68520..f86b4a67c625 100644 --- a/numpy/typing/_dtype_like.py +++ b/numpy/typing/_dtype_like.py @@ -154,7 +154,7 @@ def dtype(self) -> _DType_co: ... Type[np.signedinteger], "np.dtype[np.signedinteger]", "_SupportsDType[np.dtype[np.signedinteger]]", - _UInt8Codes, + _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, From 881dd870c8eb6ac6dd774e2dec78497879971c99 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 29 Jan 2021 14:17:14 -0600 Subject: [PATCH 0440/1270] Update numpy/core/src/multiarray/mapping.c --- numpy/core/src/multiarray/mapping.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 0ebb337b0898..8b9b67387181 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -2371,8 +2371,6 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices, /* Before contunuing, ensure that there are not too fancy indices */ if (indices[i].type & HAS_FANCY) { - assert(indices[i].type == HAS_FANCY || - indices[i].type == HAS_0D_BOOL); if (NPY_UNLIKELY(j >= NPY_MAXDIMS)) { PyErr_Format(PyExc_IndexError, "too many advanced (array) indices. This probably " From 83248736e8dfde53aae8846fdfcd98488ec045e6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 29 Jan 2021 14:23:58 -0700 Subject: [PATCH 0441/1270] MAINT: Remove stray quote. --- doc/neps/nep-0023-backwards-compatibility.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index 27b79d436317..015df8964ea5 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -79,7 +79,7 @@ extensibility. Fixes for clear bugs are exempt from this backwards compatibility policy. However, in case of serious impact on users even bug fixes may have to be delayed for one or more releases. For example, if a downstream library would no -longer build or would give incorrect results." +longer build or would give incorrect results. Strategies related to deprecations From 48808e1a6a775283b2d482d65f93390ddeb534af Mon Sep 17 00:00:00 2001 From: Ryan Polley Date: Sat, 30 Jan 2021 10:50:12 -0600 Subject: [PATCH 0442/1270] DOC: replace 'this platform' with the actual platform in the scalar type documentation (#18085) In the scalar documentation it's unclear that 'this platform' refers to the platform that the doc build job ran on, so replace it with the name of the platform using python's platform module --- numpy/core/_add_newdocs_scalars.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py index b9b151224e61..d31f0037d834 100644 --- a/numpy/core/_add_newdocs_scalars.py +++ b/numpy/core/_add_newdocs_scalars.py @@ -6,6 +6,7 @@ from numpy.core import dtype from numpy.core import numerictypes as _numerictypes from numpy.core.function_base import add_newdoc +import platform ############################################################################## # @@ -49,6 +50,8 @@ def type_aliases_gen(): ]) + + def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): # note: `:field: value` is rST syntax which renders as field lists. o = getattr(_numerictypes, obj) @@ -56,7 +59,7 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): character_code = dtype(o).char canonical_name_doc = "" if obj == o.__name__ else ":Canonical name: `numpy.{}`\n ".format(obj) alias_doc = ''.join(":Alias: `numpy.{}`\n ".format(alias) for alias in fixed_aliases) - alias_doc += ''.join(":Alias on this platform: `numpy.{}`: {}.\n ".format(alias, doc) + alias_doc += ''.join(":Alias on this platform ({} {}): `numpy.{}`: {}.\n ".format(platform.system(), platform.machine(), alias, doc) for (alias_type, alias, doc) in possible_aliases if alias_type is o) docstring = """ {doc} From ef639359f4862762d97c215990f86a7a622c0f0d Mon Sep 17 00:00:00 2001 From: Mitchell Faas Date: Sun, 31 Jan 2021 00:51:28 +0100 Subject: [PATCH 0443/1270] ENH: Added sanity check to printoptions See issue #18254 --- numpy/core/arrayprint.py | 7 +++++++ numpy/core/tests/test_arrayprint.py | 3 +++ 2 files changed, 10 insertions(+) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 3fade4824bb3..2d772c778fb5 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -78,6 +78,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if legacy not in [None, False, '1.13']: warnings.warn("legacy printing option can currently only be '1.13' or " "`False`", stacklevel=3) + if threshold is not None: # forbid the bad threshold arg suggested by stack overflow, gh-12351 if not isinstance(threshold, numbers.Number): @@ -85,6 +86,12 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if np.isnan(threshold): raise ValueError("threshold must be non-NAN, try " "sys.maxsize for untruncated representation") + + if precision is not None: + # forbid the bad precision arg as suggested by issue #18254 + if not isinstance(precision, int): + raise TypeError('precision must be an integer') + return options diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index a2703d81b2cc..2c5f1577ddab 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -923,6 +923,9 @@ def test_bad_args(self): assert_raises(TypeError, np.set_printoptions, threshold='1') assert_raises(TypeError, np.set_printoptions, threshold=b'1') + assert_raises(TypeError, np.set_printoptions, precision='1') + assert_raises(TypeError, np.set_printoptions, precision=1.5) + def test_unicode_object_array(): expected = "array(['é'], dtype=object)" x = np.array([u'\xe9'], dtype=object) From b0f9fccca3c312c7473a638c5aa0a9faa179a95c Mon Sep 17 00:00:00 2001 From: Nicholas McKibben Date: Sat, 30 Jan 2021 18:42:32 -0700 Subject: [PATCH 0444/1270] FIX: random: C linkage for random distributions --- numpy/core/include/numpy/random/distributions.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/core/include/numpy/random/distributions.h b/numpy/core/include/numpy/random/distributions.h index c474c4d14a3d..3ffacc8f9ee0 100644 --- a/numpy/core/include/numpy/random/distributions.h +++ b/numpy/core/include/numpy/random/distributions.h @@ -1,6 +1,10 @@ #ifndef _RANDOMDGEN__DISTRIBUTIONS_H_ #define _RANDOMDGEN__DISTRIBUTIONS_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "Python.h" #include "numpy/npy_common.h" #include @@ -197,4 +201,8 @@ static NPY_INLINE double next_double(bitgen_t *bitgen_state) { return bitgen_state->next_double(bitgen_state->state); } +#ifdef __cplusplus +} +#endif + #endif From 3b6414c60ae869affdee4fb0bee2ec7b6a602efc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 30 Jan 2021 13:50:57 -0700 Subject: [PATCH 0445/1270] REL: Update master after 1.20.0 release. --- .mailmap | 7 +- doc/RELEASE_WALKTHROUGH.rst.txt | 4 + doc/changelog/1.20.0-changelog.rst | 170 +++++++++++++++++----------- doc/source/release/1.20.0-notes.rst | 4 +- 4 files changed, 115 insertions(+), 70 deletions(-) diff --git a/.mailmap b/.mailmap index f3fd63519e3b..77df36b08d82 100644 --- a/.mailmap +++ b/.mailmap @@ -55,6 +55,7 @@ Ben Nathanson bjnath Benjamin Root Ben Root Benjamin Root weathergod Bernardt Duvenhage bduvenhage +Bernie Gray bernie gray Bertrand Lefebvre bertrand Bertrand Lefebvre Bertrand Bharat Raghunathan Bharat123Rox @@ -76,6 +77,7 @@ Christoph Gohlke cgohlke Christoph Gohlke Christolph Gohlke Chunlin Fang Qiyu8 Chunlin Fang qiyu8 +Chunlin Fang qiyu8 Chunlin Fang Chunlin Colin Snyder <47012605+colinsnyder@users.noreply.github.com> colinsnyder <47012605+colinsnyder@users.noreply.github.com> Daniel B Allan danielballan @@ -138,7 +140,8 @@ Jaime Fernandez Jaime Fernandez Jaime Fernandez jaimefrio Jaime Fernandez Jaime Jakob Jakobson Jakob -Jakob Jacobson jakobjakobson13 <43045863+jakobjakobson13@users.noreply.github.com> +Jakob Jakobson Jakob Jacobson +Jakob Jakobson jakobjakobson13 <43045863+jakobjakobson13@users.noreply.github.com> James Webber jamestwebber Jarrod Millman Jarrod Millman Jason Grout Jason Grout @@ -270,6 +273,8 @@ Tim Cera tim cera Tim Teichmann tteichmann Tim Teichmann tteichmann <44259103+tteichmann@users.noreply.github.com> Tirth Patel tirthasheshpatel +Tobias Pitters CloseChoice +Tobias Pitters <31857876+CloseChoice@users.noreply.github.com> Tom Boyd pezcore Tom Poole tpoole Tony LaTorre tlatorre diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt index 733f681af704..1d3f54edf16b 100644 --- a/doc/RELEASE_WALKTHROUGH.rst.txt +++ b/doc/RELEASE_WALKTHROUGH.rst.txt @@ -252,6 +252,10 @@ work:: $ firefox doc/build/merge/index.html +Update the stable link:: + + $ ln -sfn 1.19 stable + Once everything seems satisfactory, commit and upload the changes:: $ pushd doc/build/merge diff --git a/doc/changelog/1.20.0-changelog.rst b/doc/changelog/1.20.0-changelog.rst index 5db423c41fed..f0c2a27234f7 100644 --- a/doc/changelog/1.20.0-changelog.rst +++ b/doc/changelog/1.20.0-changelog.rst @@ -2,7 +2,7 @@ Contributors ============ -A total of 182 people contributed to this release. People with a "+" by their +A total of 184 people contributed to this release. People with a "+" by their names contributed a patch for the first time. * Aaron Meurer + @@ -18,7 +18,7 @@ names contributed a patch for the first time. * Alexander Kuhn-Regnier + * Allen Downey + * Andras Deak -* Andrea Olivo andryandrew@gmail.com andryandrew + +* Andrea Olivo + * Andrew Eckart + * Anirudh Subramanian * Anthony Byuraev + @@ -29,6 +29,7 @@ names contributed a patch for the first time. * Ben Derrett + * Ben Elliston + * Ben Nathanson + +* Bernie Gray + * Bharat Medasani + * Bharat Raghunathan * Bijesh Mohan + @@ -42,6 +43,7 @@ names contributed a patch for the first time. * Charles Harris * Chris Brown + * Chris Vavaliaris + +* Christoph Gohlke * Chunlin Fang * CloseChoice + * Daniel G. A. Smith + @@ -49,13 +51,14 @@ names contributed a patch for the first time. * Daniel Vanzo + * David Pitchford + * Davide Dal Bosco + +* Derek Homeier * Dima Kogan + * Dmitry Kutlenkov + * Douglas Fenstermacher + * Dustin Spicuzza + * E. Madison Bray + * Elia Franzella + -* Enrique Matías Sánchez (Quique) + +* Enrique Matías Sánchez + * Erfan Nariman | Veneficus + * Eric Larson * Eric Moore @@ -63,6 +66,7 @@ names contributed a patch for the first time. * Erik M. Bray * EthanCJ-git + * Etienne Guesnet + +* FX Coudert + * Felix Divo * Frankie Robertson + * Ganesh Kathiresan @@ -76,7 +80,6 @@ names contributed a patch for the first time. * InessaPawson + * Isabela Presedo-Floyd + * Isuru Fernando -* Jakob Jacobson + * Jakob Jakobson + * Jakub Wilk * James Myatt + @@ -155,6 +158,7 @@ names contributed a patch for the first time. * Tina Oberoi * Tirth Patel * Tobias Pitters + +* Tomoki, Karatsu + * Tyler Reddy * Veniamin Petrenko + * Wansoo Kim + @@ -168,7 +172,6 @@ names contributed a patch for the first time. * Zac Hatfield-Dodds * Zuhair Ali-Khan + * @abhilash42 + -* @bernie gray + * @danbeibei + * @dojafrat * @dpitch40 + @@ -178,7 +181,6 @@ names contributed a patch for the first time. * @leeyspaul + * @mitch + * @prateek arora + -* @qiyu8 + * @serge-sans-paille + * @skywalker + * @stphnlyd + @@ -191,19 +193,19 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 650 pull requests were merged for this release. +A total of 684 pull requests were merged for this release. * `#13516 `__: ENH: enable multi-platform SIMD compiler optimizations * `#14779 `__: NEP 36 (fair play) * `#14882 `__: DEP: Deprecate aliases of builtin types in python 3.7+ -* `#15037 `__: BUG: ``np.resize`` negative shape and subclasses edge case fixes -* `#15121 `__: ENH: random: Add the method ``permuted`` to Generator. +* `#15037 `__: BUG: `np.resize` negative shape and subclasses edge case fixes +* `#15121 `__: ENH: random: Add the method `permuted` to Generator. * `#15162 `__: BUG,MAINT: Fix issues with non-reduce broadcasting axes * `#15471 `__: BUG: Ensure PyArray_FromScalar always returns the requested dtype * `#15507 `__: NEP 42: Technical decisions for new DTypes * `#15508 `__: API: Create Preliminary DTypeMeta class and np.dtype subclasses * `#15604 `__: MAINT: Avoid exception in NpzFile destructor if constructor raises... -* `#15666 `__: ENH: Improved ``__str__`` for polynomials +* `#15666 `__: ENH: Improved `__str__` for polynomials * `#15759 `__: BUILD: Remove Accelerate support * `#15791 `__: [DOC] Added tutorial about the numpy.ma module. * `#15852 `__: ENH: Add where argument to np.mean @@ -212,7 +214,7 @@ A total of 650 pull requests were merged for this release. * `#15997 `__: ENH: improve printing of arrays with multi-line reprs * `#16130 `__: DOC: Correct documentation of ``__array__`` when used as output... * `#16134 `__: ENH: Implement concatenate dtype and casting keyword arguments -* `#16156 `__: DEP: Deprecate ``numpy.dual``. +* `#16156 `__: DEP: Deprecate `numpy.dual`. * `#16161 `__: BUG: Potential fix for divmod(1.0, 0.0) to raise divbyzero and... * `#16167 `__: DOC: Increase guidance and detail of np.polynomial docstring * `#16174 `__: DOC: Add transition note to all lib/poly functions @@ -225,7 +227,7 @@ A total of 650 pull requests were merged for this release. * `#16247 `__: ENH:Umath Replace raw SIMD of unary float point(32-64) with NPYV... * `#16248 `__: MRG, ENH: added edge keyword argument to digitize * `#16257 `__: DOC: Update the f2py section of the "Using Python as Glue" page. -* `#16260 `__: DOC: Improve ``rec.array`` function documentation (#15853) +* `#16260 `__: DOC: Improve `rec.array` function documentation (#15853) * `#16266 `__: ENH: include dt64/td64 isinstance checks in ``__init__.pxd`` * `#16267 `__: DOC: Clarifications for np.std * `#16273 `__: BUG: Order percentile monotonically @@ -258,7 +260,7 @@ A total of 650 pull requests were merged for this release. * `#16334 `__: ENH: Use AVX-512 for np.isnan, np.infinite, np.isinf and np.signbit * `#16336 `__: BUG: Fix refcounting in add_newdoc * `#16337 `__: CI: Create a link for the circleCI artifact -* `#16348 `__: BUG: Fix dtype leak in ``PyArray_FromAny`` error path +* `#16348 `__: BUG: Fix dtype leak in `PyArray_FromAny` error path * `#16349 `__: BUG: Indentation for docstrings * `#16351 `__: BUG: Fix small leaks in error path and ``empty_like`` with shape * `#16362 `__: MAINT: Streamline download-wheels. @@ -306,7 +308,7 @@ A total of 650 pull requests were merged for this release. * `#16471 `__: BLD: install mingw32 v7.3.0 for win32 * `#16472 `__: DOC: Fixes for 18 broken links * `#16474 `__: MAINT: use zip instead of range in piecewise -* `#16476 `__: ENH: add ``norm=forward,backward`` to numpy.fft functions +* `#16476 `__: ENH: add `norm=forward,backward` to numpy.fft functions * `#16482 `__: SIMD: Optimize the performace of np.packbits in ARM-based machine. * `#16485 `__: BUG: Fix result when a gufunc output broadcasts the inputs. * `#16500 `__: DOC: Point Contributing page to new NEP 45 @@ -337,7 +339,7 @@ A total of 650 pull requests were merged for this release. * `#16572 `__: BUG: fix sin/cos bug when input is strided array * `#16574 `__: MAINT: fix name of first parameter to dtype constructor in type... * `#16581 `__: DOC: Added an example for np.transpose(4d_array) -* `#16583 `__: MAINT: changed ``np.generic`` arguments to positional-only +* `#16583 `__: MAINT: changed np.generic arguments to positional-only * `#16590 `__: DOC: Clarify dtype default for logspace and geomspace * `#16591 `__: DOC: Disallow complex args in arange * `#16592 `__: BUG: Raise TypeError for float->timedelta promotion @@ -362,7 +364,7 @@ A total of 650 pull requests were merged for this release. * `#16633 `__: MAINT: lib: Some code clean up in loadtxt * `#16635 `__: BENCH: remove obsolete goal_time param * `#16639 `__: BUG: Fix uint->timedelta promotion to raise TypeError -* `#16642 `__: MAINT: Replace ``PyUString_GET_SIZE`` with ``PyUnicode_GetLength``. +* `#16642 `__: MAINT: Replace `PyUString_GET_SIZE` with `PyUnicode_GetLength`. * `#16643 `__: REL: Fix outdated docs link * `#16646 `__: TST: add a static typing test for memoryviews as ArrayLikes * `#16647 `__: ENH: Added annotations to 8 functions from np.core.fromnumeric @@ -378,11 +380,11 @@ A total of 650 pull requests were merged for this release. * `#16674 `__: TST: Add extra debugging information to CPU features detection * `#16675 `__: ENH: Add support for file like objects to np.core.records.fromfile * `#16683 `__: DOC: updated gcc minimum recommend version to build from source -* `#16684 `__: MAINT: Allow ``None`` to be passed to certain ``generic`` subclasses +* `#16684 `__: MAINT: Allow `None` to be passed to certain `generic` subclasses * `#16690 `__: DOC: fixed docstring for descr_to_dtype -* `#16691 `__: DOC: Remove "matrix" from ``triu`` docstring. +* `#16691 `__: DOC: Remove "matrix" from `triu` docstring. * `#16696 `__: MAINT: add py.typed sentinel to package manifest -* `#16699 `__: MAINT: Fixup quantile tests to not use ``np.float`` +* `#16699 `__: MAINT: Fixup quantile tests to not use `np.float` * `#16702 `__: BLD: Add CPU entry for Emscripten / WebAssembly * `#16704 `__: TST: Disable Python 3.9-dev testing. * `#16706 `__: DOC: Add instruction about stable symlink @@ -395,7 +397,7 @@ A total of 650 pull requests were merged for this release. * `#16730 `__: ENH: Use f90 compiler specified in f2py command line args for... * `#16731 `__: DOC: reword random c-api introduction, cython is documented in... * `#16735 `__: DOC: Tweak a sentence about broadcasting. -* `#16736 `__: DOC: Prepend ``ma.`` to references in ``numpy.ma`` +* `#16736 `__: DOC: Prepend `ma.` to references in ``numpy.ma`` * `#16738 `__: DOC: Remove redundant word * `#16742 `__: DOC: add unique() to See Also of repeat() * `#16743 `__: DOC: add example to unique() and make connection to repeat() @@ -465,7 +467,7 @@ A total of 650 pull requests were merged for this release. * `#16886 `__: DOC: Fix types including curly braces * `#16887 `__: DOC: Remove the links for ``True`` and ``False`` * `#16888 `__: ENH: Integrate the new CPU dispatcher with umath generator -* `#16894 `__: DOC: Fix wrong markups in ``arrays.dtypes`` +* `#16894 `__: DOC: Fix wrong markups in `arrays.dtypes` * `#16896 `__: DOC: Remove links for C codes * `#16897 `__: DOC: Fix the declarations of C fuctions * `#16899 `__: MNT: also use Py_SET_REFCNT instead of Py_REFCNT @@ -484,17 +486,17 @@ A total of 650 pull requests were merged for this release. * `#16941 `__: BUG: Allow array-like types to be coerced as object array elements * `#16943 `__: DEP: Deprecate size-one ragged array coercion * `#16944 `__: Change the name of the folder "icons" to "logo". -* `#16949 `__: ENH: enable colors for ``runtests.py --ipython`` +* `#16949 `__: ENH: enable colors for `runtests.py --ipython` * `#16950 `__: DOC: Clarify input to irfft/irfft2/irfftn * `#16952 `__: MAINT: Bump hypothesis from 5.20.2 to 5.23.2 * `#16953 `__: update numpy/lib/arraypad.py with appropriate chain exception * `#16957 `__: MAINT: Use arm64 instead of aarch64 on travisCI. * `#16962 `__: MAINT: Chain exception in ``distutils/fcompiler/environment.py``. -* `#16966 `__: MAINT: Added the ``order`` parameter to ``np.array()`` +* `#16966 `__: MAINT: Added the `order` parameter to `np.array()` * `#16969 `__: ENH: Add Neon SIMD implementations for add, sub, mul, and div * `#16973 `__: DOC: Fixed typo in lib/recfunctions.py * `#16974 `__: TST: Add pypy win32 CI testing. -* `#16982 `__: ENH: Increase the use of ``Literal`` types +* `#16982 `__: ENH: Increase the use of `Literal` types * `#16986 `__: ENH: Add NumPy declarations to be used by Cython 3.0+ * `#16988 `__: DOC: Add the new NumPy logo to Sphinx pages * `#16991 `__: MAINT: Bump hypothesis from 5.23.2 to 5.23.9 @@ -508,7 +510,7 @@ A total of 650 pull requests were merged for this release. * `#17028 `__: DOC: Disclaimer for FFT library * `#17029 `__: MAINT: Add error return to all casting functionality and NpyIter * `#17033 `__: BUG: fix a compile and a test warning -* `#17036 `__: DOC: Clarify that ``np.char`` comparison functions always return... +* `#17036 `__: DOC: Clarify that `np.char` comparison functions always return... * `#17039 `__: DOC: Use a less ambiguous example for array_split * `#17041 `__: MAINT: Bump hypothesis from 5.23.9 to 5.23.12 * `#17048 `__: STY: core._internal style fixups @@ -528,7 +530,7 @@ A total of 650 pull requests were merged for this release. * `#17109 `__: MAINT: Split einsum into multiple files * `#17112 `__: BUG: Handle errors from the PyCapsule API * `#17115 `__: DOC: Fix spacing in vectorize doc -* `#17116 `__: API: Remove ``np.ctypeslib.ctypes_load_library`` +* `#17116 `__: API: Remove `np.ctypeslib.ctypes_load_library` * `#17119 `__: DOC: make spacing consistent in NEP 41 bullet points * `#17121 `__: BUG: core: fix ilp64 blas dot/vdot/... for strides > int32 max * `#17123 `__: ENH: allow running mypy through runtests.py @@ -540,39 +542,39 @@ A total of 650 pull requests were merged for this release. * `#17141 `__: MAINT: Make arrayprint str and repr the ndarray defaults. * `#17142 `__: DOC: NEP-42: Fix a few typos. * `#17143 `__: MAINT: Change handling of the expired financial functions. -* `#17144 `__: ENH: Add annotations to 3 functions in ``np.core.function_base`` +* `#17144 `__: ENH: Add annotations to 3 functions in `np.core.function_base` * `#17145 `__: MAINT, BUG: Replace uses of PyString_AsString. * `#17146 `__: MAINT: ``Replace PyUString_*`` by ``PyUnicode_*`` equivalents. * `#17149 `__: MAINT: Replace PyInt macros with their PyLong replacement * `#17150 `__: ENH: Add support for the abstract scalars to cython code * `#17151 `__: BUG: Fix incorrect cython definition of npy_cfloat -* `#17152 `__: MAINT: Clean up some ``Npy_`` vs ``Py_`` macro usage +* `#17152 `__: MAINT: Clean up some Npy_ vs Py_ macro usage * `#17154 `__: DOC: Remove references to PyCObject * `#17159 `__: DOC: Update numpy4matlab * `#17160 `__: Clean up some more bytes vs unicode handling * `#17161 `__: BUG: Remove Void special case for "safe casting" * `#17163 `__: MAINT: Remove redundant headers -* `#17164 `__: MAINT: Remove ``NPY_COPY_PYOBJECT_PTR`` +* `#17164 `__: MAINT: Remove NPY_COPY_PYOBJECT_PTR * `#17167 `__: BLD: Merge the npysort library into multiarray * `#17168 `__: TST: Add tests mapping out the rules for metadata in promotion * `#17171 `__: BUG: revert trim_zeros changes from gh-16911 -* `#17172 `__: ENH: Make ``np.complexfloating`` generic w.r.t. ``np.floating`` +* `#17172 `__: ENH: Make `np.complexfloating` generic w.r.t. `np.floating` * `#17176 `__: MAINT/ENH: datetime: remove calls to PyUnicode_AsASCIIString,... -* `#17180 `__: ENH: Added missing methods to ``np.flatiter`` +* `#17180 `__: ENH: Added missing methods to `np.flatiter` * `#17181 `__: DOC: Correct error in description of ndarray.base -* `#17182 `__: DOC: Document ``dtype.metadata`` +* `#17182 `__: DOC: Document `dtype.metadata` * `#17186 `__: MAINT: Use utf8 strings in more of datetime -* `#17188 `__: MAINT: Add placeholder stubs for ``ndarray`` and ``generic`` +* `#17188 `__: MAINT: Add placeholder stubs for `ndarray` and `generic` * `#17191 `__: MAINT: Bump hypothesis from 5.26.0 to 5.30.0 * `#17193 `__: MAINT: Remove some callers of functions in numpy.compat * `#17195 `__: ENH: Make the window functions exactly symmetric * `#17197 `__: MAINT: Improve error handling in npy_cpu_init -* `#17199 `__: DOC: Fix the documented signatures of four ``ufunc`` methods -* `#17201 `__: MAINT: Make the ``NPY_CPU_DISPATCH_CALL`` macros expressions not... +* `#17199 `__: DOC: Fix the documented signatures of four `ufunc` methods +* `#17201 `__: MAINT: Make the `NPY_CPU_DISPATCH_CALL` macros expressions not... * `#17204 `__: DOC: Fixed headings for tutorials so they appear at new theme... * `#17210 `__: DOC: Canonical_urls -* `#17214 `__: MAINT: Fix various issues with the ``np.generic`` annotations -* `#17219 `__: BLD: enabled negation of library choices in ``NPY_*_ORDER`` +* `#17214 `__: MAINT: Fix various issues with the `np.generic` annotations +* `#17219 `__: BLD: enabled negation of library choices in NPY_*_ORDER * `#17220 `__: BUG, DOC: comment out metadata added via javascript * `#17222 `__: MAINT, DOC: move informational files from numpy.doc.*.py to their... * `#17223 `__: MAINT: use sysconfig not distutils.sysconfig where possible @@ -593,9 +595,9 @@ A total of 650 pull requests were merged for this release. * `#17260 `__: MAINT: Bump pydata-sphinx-theme from 0.3.2 to 0.4.0 * `#17263 `__: DOC: add new glossary terms * `#17264 `__: DOC: remove some glosssary terms -* `#17267 `__: TST: Fix the path to ``mypy.ini`` in ``runtests.py`` +* `#17267 `__: TST: Fix the path to `mypy.ini` in `runtests.py` * `#17268 `__: BUG: sysconfig attributes/distutils issue -* `#17273 `__: ENH: Annotate the arithmetic operations of ``ndarray`` and ``generic`` +* `#17273 `__: ENH: Annotate the arithmetic operations of `ndarray` and `generic` * `#17278 `__: MAINT: Merge together index page content into a single file * `#17279 `__: DOC: Fix a typo in shape_base. * `#17284 `__: ENH: Pass optimizations arguments to asv build @@ -614,23 +616,23 @@ A total of 650 pull requests were merged for this release. * `#17304 `__: BUILD: pin pygments to 2.6.1, 2.7.0 breaks custom NumPyC lexer * `#17307 `__: MAINT: Bump hypothesis from 5.33.0 to 5.35.1 * `#17308 `__: MAINT: Bump pytest from 6.0.1 to 6.0.2 -* `#17309 `__: MAINT: Move the ``fromnumeric`` annotations to their own stub file +* `#17309 `__: MAINT: Move the `fromnumeric` annotations to their own stub file * `#17312 `__: MAINT: Syntax-highlight .src files on github * `#17313 `__: MAINT: Mark vendored/generated files in .gitattributes * `#17315 `__: MAINT: Cleanup f2py/cfuncs.py * `#17319 `__: BUG: Set deprecated fields to null in PyArray_InitArrFuncs * `#17320 `__: BUG: allow registration of hard-coded structured dtypes * `#17326 `__: ENH: Add annotations for five array construction functions -* `#17329 `__: DOC: Fix incorrect ``.. deprecated::`` syntax that led to this... -* `#17330 `__: DOC: improve ``issubdtype`` and scalar type docs -* `#17331 `__: DOC: Remove the tables of scalar types, and use ``..autoclass``... +* `#17329 `__: DOC: Fix incorrect `.. deprecated::` syntax that led to this... +* `#17330 `__: DOC: improve `issubdtype` and scalar type docs +* `#17331 `__: DOC: Remove the tables of scalar types, and use `..autoclass`... * `#17332 `__: DOC, BLD: update lexer highlighting and make numpydocs a regular... * `#17334 `__: MAINT: Chaining exceptions in npyio.py * `#17337 `__: NEP: Regenerate table in NEP 29 (add numpy 1.18 and 1.19 to list) * `#17338 `__: DOC: Fix syntax errors in docstrings for versionchanged, versionadded * `#17340 `__: SIMD: Add partial/non-contig load and store intrinsics for 32/64-bit * `#17344 `__: ENH, BLD: Support for the NVIDIA HPC SDK nvfortran compiler -* `#17346 `__: BLD,BUG: Fix a macOS build failure when ``NPY_BLAS_ORDER=""`` +* `#17346 `__: BLD,BUG: Fix a macOS build failure when `NPY_BLAS_ORDER=""` * `#17350 `__: DEV: Add PR prefix labeler and numpy prefix mapping * `#17352 `__: DOC: Guide to writing how-tos * `#17353 `__: DOC: How-to guide for I/O @@ -640,7 +642,7 @@ A total of 650 pull requests were merged for this release. * `#17364 `__: MAINT: Finish replacing PyInt_Check * `#17369 `__: DOC: distutils: Remove an obsolete paragraph. * `#17370 `__: NEP: Edit nep-0042 for more clarity -* `#17372 `__: ENH: Add annotations for remaining ``ndarray`` / ``generic`` non-magic... +* `#17372 `__: ENH: Add annotations for remaining `ndarray` / `generic` non-magic... * `#17373 `__: BUG: Fixes module data docstrings. * `#17375 `__: DOC: Fix default_rng docstring * `#17377 `__: BUG: ensure _UFuncNoLoopError can be pickled @@ -659,12 +661,12 @@ A total of 650 pull requests were merged for this release. * `#17402 `__: DOC: Add arraysetops to an autosummary * `#17404 `__: MAINT: Replace PyUString_ConcatAndDel in nditer_constr.c. * `#17405 `__: MAINT: Replace PyUString_ConcatAndDel in mapping.c. -* `#17406 `__: ENH: Replace the module-level ``__getattr__`` with explicit type... +* `#17406 `__: ENH: Replace the module-level `__getattr__` with explicit type... * `#17407 `__: DOC: in PR template, set expectations for PR review timeline * `#17409 `__: MAINT: Cleanup remaining PyUString_ConcatAndDel use. * `#17410 `__: API: Special case how numpy scalars are coerced to signed integer * `#17411 `__: TST: Mark the typing tests as slow -* `#17412 `__: DOC: Fix a parameter type in the ``putmask`` docs +* `#17412 `__: DOC: Fix a parameter type in the `putmask` docs * `#17418 `__: DOC: adding operational form documentation for array ops * `#17419 `__: DEP: Deprecate coercion to subarray dtypes * `#17421 `__: BUG: Fix memory leak in array-coercion error paths @@ -672,7 +674,7 @@ A total of 650 pull requests were merged for this release. * `#17423 `__: DOC: Remove bogus reference to _a_ * `#17424 `__: DOC: Fix formatting issues in description of .c.src files * `#17427 `__: NEP: nep-0029 typo correction -* `#17429 `__: MAINT: Move aliases for common scalar unions to ``numpy.typing`` +* `#17429 `__: MAINT: Move aliases for common scalar unions to `numpy.typing` * `#17430 `__: BUG: Fix memoryleaks related to NEP 37 function overrides * `#17431 `__: DOC: Fix the links for ``Ellipsis`` * `#17432 `__: DOC: add references to einops and opt_einsum @@ -683,7 +685,7 @@ A total of 650 pull requests were merged for this release. * `#17440 `__: DOC: Cleaner template for PRs * `#17442 `__: MAINT: fix exception chaining in format.py * `#17443 `__: ENH: Warn on unsupported Python 3.10+ -* `#17444 `__: ENH: Add ``Typing :: Typed`` to the PyPi classifier +* `#17444 `__: ENH: Add `Typing :: Typed` to the PyPi classifier * `#17445 `__: DOC: Fix the references for macros * `#17447 `__: NEP: update NEP 42 with discussion of type hinting applications * `#17448 `__: DOC: Remove CoC pages from Sphinx @@ -697,16 +699,16 @@ A total of 650 pull requests were merged for this release. * `#17468 `__: DOC: add some missing scalar aliases * `#17472 `__: TST: Fix doctest for full_like * `#17473 `__: MAINT: py3k: remove os.fspath and os.PathLike backports -* `#17474 `__: MAINT: Move the ``np.core.numeric`` annotations to their own stub... -* `#17479 `__: ENH: type ``np.unicode_`` as ``np.str_`` +* `#17474 `__: MAINT: Move the `np.core.numeric` annotations to their own stub... +* `#17479 `__: ENH: type np.unicode_ as np.str_ * `#17481 `__: DOC: Fix the entries for members of structures -* `#17483 `__: DOC: Fix the references for ``random.*`` +* `#17483 `__: DOC: Fix the references for `random.*` * `#17485 `__: BLD: circleCI- merge before build, add -n to sphinx * `#17487 `__: MAINT: Remove duplicate placeholder annotations * `#17497 `__: DOC: Use consistent lowercase on docs landing page * `#17498 `__: MAINT: fix incompatible type comparison in numpy.lib.utils.info * `#17501 `__: BUG: Fix failures in master related to userdtype registeration -* `#17502 `__: BUG: remove ``sys`` from the type stubs +* `#17502 `__: BUG: remove `sys` from the type stubs * `#17503 `__: DOC: Fix empty 'C style guide' page * `#17504 `__: DOC: Rename 'Quickstart tutorial' * `#17508 `__: ENH: Added the Final feature for all constants @@ -724,15 +726,15 @@ A total of 650 pull requests were merged for this release. * `#17537 `__: MAINT: Bump hypothesis from 5.37.0 to 5.37.1 * `#17538 `__: MAINT: Bump pydata-sphinx-theme from 0.4.0 to 0.4.1 * `#17539 `__: MAINT: Bump mypy from 0.782 to 0.790 -* `#17540 `__: ENH: Make ``np.number`` generic with respect to its precision +* `#17540 `__: ENH: Make `np.number` generic with respect to its precision * `#17541 `__: CI: fix conditional for PR merge command -* `#17546 `__: MAINT: explicit disabling ``CCompilerOpt`` in F2PY +* `#17546 `__: MAINT: explicit disabling `CCompilerOpt` in F2PY * `#17548 `__: BUG: Cygwin Workaround for #14787 on affected platforms * `#17549 `__: DOC: Fix the entries of C functions * `#17555 `__: DOC: Fix wrong blockquotes * `#17558 `__: DOC: MAINT: Add NEP 43 links to NEP 42 * `#17559 `__: DOC: Remove directives for some constants -* `#17564 `__: MAINT: Update the annotations in ``np.core.numeric`` +* `#17564 `__: MAINT: Update the annotations in `np.core.numeric` * `#17570 `__: DOC: Add the entry for ``NPY_FEATURE_VERSION`` * `#17571 `__: DOC: Fix typos * `#17572 `__: ENH: Add annotations for three new constants @@ -740,27 +742,27 @@ A total of 650 pull requests were merged for this release. * `#17577 `__: BUG: Respect dtype of all-zero argument to poly1d * `#17578 `__: NEP36: include additional feedback * `#17580 `__: MAINT: Cleanup swig for Python 3. -* `#17581 `__: MAINT: Move the ``np.core.numerictypes`` annotations to their own... +* `#17581 `__: MAINT: Move the `np.core.numerictypes` annotations to their own... * `#17583 `__: MAINT: Bump hypothesis from 5.37.1 to 5.37.3 -* `#17584 `__: ENH: Add annotations for ``np.core._type_aliases`` +* `#17584 `__: ENH: Add annotations for `np.core._type_aliases` * `#17594 `__: DOC: Typo in lexsort docstring * `#17596 `__: DEP,BUG: Coercion/cast of array to a subarray dtype will be fixed * `#17597 `__: TST: Clean up the errors of the typing tests * `#17598 `__: BUG: Fixed file handle leak in array_tofile. -* `#17601 `__: TST: Fix a broken ``np.core.numeric`` test +* `#17601 `__: TST: Fix a broken `np.core.numeric` test * `#17603 `__: MAINT: Mark dead code as intentional for clang. * `#17607 `__: DOC: removed old references to submodule licenses * `#17608 `__: DOC: Fix typos (general documentation) * `#17610 `__: Fully qualify license trove classifier * `#17611 `__: BUG: mac dylib treated as part of extra objects by f2py -* `#17613 `__: ENH: Add annotations for 9 ``ndarray``/``generic`` magic methods +* `#17613 `__: ENH: Add annotations for 9 `ndarray`/`generic` magic methods * `#17614 `__: DOC: Fix the document for arrays interface * `#17618 `__: MAINT: Conversion of some strings to f-strings * `#17619 `__: DOC: Fix some references * `#17621 `__: TST: Valid docstring for config_py function show() * `#17622 `__: MAINT: Conversion of some strings to fstrings, part II * `#17623 `__: MAINT: Conversion of some strings to fstrings, part III -* `#17624 `__: DOC: Tidy up references to ``str_`` / ``bytes_`` +* `#17624 `__: DOC: Tidy up references to str_ / bytes_ * `#17625 `__: MAINT: Conversion of some strings to fstrings, part iv * `#17627 `__: DOC: Fix the references for ``__array_*__`` * `#17628 `__: DOC: Add entries for macros @@ -771,8 +773,8 @@ A total of 650 pull requests were merged for this release. * `#17639 `__: MAINT: Bump hypothesis from 5.37.3 to 5.38.0 * `#17641 `__: MAINT, BLD: update to OpenBLAS v0.3.12 * `#17642 `__: DOC: Fix reference to atleast_1d -* `#17643 `__: ENH: Add annotations for ``np.core._ufunc_config`` -* `#17644 `__: ENH: Add annotations for ``np.core.shape_base`` +* `#17643 `__: ENH: Add annotations for `np.core._ufunc_config` +* `#17644 `__: ENH: Add annotations for `np.core.shape_base` * `#17645 `__: BUG: fix np.timedelta64('nat').__format__ throwing an exception * `#17654 `__: BUG: f2py incorrectly translates dimension declarations. * `#17655 `__: BLD: Fix installing Numpy on z/OS @@ -833,13 +835,47 @@ A total of 650 pull requests were merged for this release. * `#17829 `__: MAINT: Bump hypothesis from 5.41.2 to 5.41.3 * `#17830 `__: TST: Add back durations flag for DEBUG builds. * `#17832 `__: BUG: Fix subarray dtype used with too large count in fromfile -* `#17833 `__: BUG: Fix pickling of scalars with ``NPY_LISTPICKLE`` -* `#17838 `__: DOC: Update the ``numpy.typing`` documentation +* `#17833 `__: BUG: Fix pickling of scalars with NPY_LISTPICKLE +* `#17838 `__: DOC: Update the `numpy.typing` documentation * `#17841 `__: DOC: Fixing boilerplate code example -* `#17844 `__: MAINT: Add ``__all__`` to ``numpy.typing`` +* `#17844 `__: MAINT: Add ``__all__`` to `numpy.typing` * `#17848 `__: DOC: Add release note for gh-16161. * `#17855 `__: BUG: Fix incorrect C function prototypes/declarations. * `#17857 `__: MAINT: Prepare for the NumPy 1.20.x branch. * `#17869 `__: BUG, TST: use python-version not PYTHON_VERSION -* `#17875 `__: DOC: Prepare for 1.20.0 release * `#17879 `__: BUG: Fix buffer readflag errors and small leaks +* `#17893 `__: DOC: Prepare for 1.20.0 release +* `#17898 `__: MAINT: Remove remaining uses of Python 3.6. +* `#17899 `__: TST: use latest pypy37 not pypy36 +* `#17901 `__: MAINT: clean up a spurious warning in numpy/typing/setup.py +* `#17904 `__: ENH: Speed up default ``where`` in the reduce-like method +* `#17915 `__: TST: remove stray '+' from f-string upgrade +* `#17916 `__: ENH: add support for fujitsu compiler to numpy. +* `#17922 `__: BUG: 'bool' object has no attribute 'ndim' +* `#17931 `__: DOC: Update release notes to mention ``type(dtype) is not np.dtype`` +* `#17990 `__: BUG: Replace f-string in setup.py +* `#18015 `__: BUG: Ignore fewer errors during array-coercion +* `#18016 `__: BUG: Fix a MacOS build failure +* `#18017 `__: TST: Fix crosstalk issues with polynomial str tests. +* `#18018 `__: TST: Ensure tests are not sensitive to execution order +* `#18019 `__: BLD: update to OpenBLAS 0.3.13 +* `#18024 `__: DEP: Futurewarn on requiring __len__ on array-likes +* `#18035 `__: BUG: make a variable volatile to work around clang compiler bug +* `#18049 `__: TST: add back sdist test run +* `#18063 `__: BUG: Fix concatenation when the output is "S" or "U" +* `#18064 `__: BLD, BUG: Fix detecting aarch64 on macOS +* `#18068 `__: REL: Prepare for 1.20.0rc2 release. +* `#18108 `__: BUG, BLD: Generate the main dispatcher config header into the... +* `#18120 `__: BUG, SIMD: Fix _simd module build for 64bit ARM/NEON clang +* `#18127 `__: REL: Update 1.20.x after 1.19.5 release. +* `#18130 `__: BUG: Fix promotion of half and string +* `#18146 `__: BUG, MAINT: improve avx512 mask logical operations +* `#18154 `__: BUG: Promotion between strings and objects was assymetric +* `#18192 `__: MAINT: Use explicit reexports for numpy.typing objects +* `#18201 `__: BUG: Keep ignoring most errors during array-protocol lookup +* `#18219 `__: MAINT: random shuffle: warn on unrecognized objects, fix empty... +* `#18231 `__: BLD: update OpenBLAS to af2b0d02 +* `#18237 `__: DOC: Clarify the type alias deprecation message +* `#18257 `__: BUG: Ensure too many advanced indices raises an exception +* `#18258 `__: MAINT: add an 'apt update' +* `#18259 `__: DOC: Prepare for the NumPy 1.20.0 release. diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index 5d3b5f63dbed..b8b7a0c79a6e 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -3,8 +3,8 @@ ========================== NumPy 1.20.0 Release Notes ========================== -This NumPy release is the largest so made to date, some 648 PRs contributed by -182 people have been merged. See the list of highlights below for more details. +This NumPy release is the largest so made to date, some 684 PRs contributed by +184 people have been merged. See the list of highlights below for more details. The Python versions supported for this release are 3.7-3.9, support for Python 3.6 has been dropped. Highlights are From 5eadc233eafb80706d0f626caa1c68a747f8ce9b Mon Sep 17 00:00:00 2001 From: mckib2 Date: Sat, 30 Jan 2021 20:07:17 -0700 Subject: [PATCH 0446/1270] BUG: cffi parser ignores __cplusplus guards --- numpy/random/_examples/cffi/parse.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/numpy/random/_examples/cffi/parse.py b/numpy/random/_examples/cffi/parse.py index 73d8646c7c4a..daff6bdece01 100644 --- a/numpy/random/_examples/cffi/parse.py +++ b/numpy/random/_examples/cffi/parse.py @@ -17,11 +17,20 @@ def parse_distributions_h(ffi, inc_dir): continue s.append(line) ffi.cdef('\n'.join(s)) - + with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid: s = [] in_skip = 0 + ignoring = False for line in fid: + # check for and remove extern "C" guards + if ignoring: + if line.strip().startswith('#endif'): + ignoring = False + continue + if line.strip().startswith('#ifdef __cplusplus'): + ignoring = True + # massage the include file if line.strip().startswith('#'): continue From da39ec1322f93732b7a80402e3a1f1d0ddcf3a7b Mon Sep 17 00:00:00 2001 From: Mitchell Faas Date: Sun, 31 Jan 2021 16:21:47 +0100 Subject: [PATCH 0447/1270] BUG: Now allows for all integer types Fixed a bug where precision couldn't be a non-native integer. See comments on pull request #18263 for more info. --- numpy/core/arrayprint.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 2d772c778fb5..ffcddd9b206f 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -41,6 +41,7 @@ from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, flexible) from .overrides import array_function_dispatch, set_module +from operator import index import warnings import contextlib @@ -89,7 +90,9 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if precision is not None: # forbid the bad precision arg as suggested by issue #18254 - if not isinstance(precision, int): + try: + options['precision'] = index(precision) + except TypeError: raise TypeError('precision must be an integer') return options From c47ddc18e490ae0b1a8422b2ce6e2b7b5781f87e Mon Sep 17 00:00:00 2001 From: Mitchell-Faas <35742861+Mitchell-Faas@users.noreply.github.com> Date: Sun, 31 Jan 2021 16:28:47 +0100 Subject: [PATCH 0448/1270] ENH: Improved error traceback in set_printoptions Co-authored-by: Eric Wieser --- numpy/core/arrayprint.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index ffcddd9b206f..7c43d6f756fc 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -92,8 +92,8 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, # forbid the bad precision arg as suggested by issue #18254 try: options['precision'] = index(precision) - except TypeError: - raise TypeError('precision must be an integer') + except TypeError as e: + raise TypeError('precision must be an integer') from e return options From 3e285e43b4e430b29711e53cded86eab00f85db2 Mon Sep 17 00:00:00 2001 From: Mitchell Faas Date: Sun, 31 Jan 2021 17:06:57 +0100 Subject: [PATCH 0449/1270] MAINT: import refactor --- numpy/core/arrayprint.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 7c43d6f756fc..5c1d6cb63c6c 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -41,7 +41,7 @@ from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, flexible) from .overrides import array_function_dispatch, set_module -from operator import index +import operator import warnings import contextlib @@ -91,7 +91,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if precision is not None: # forbid the bad precision arg as suggested by issue #18254 try: - options['precision'] = index(precision) + options['precision'] = operator.index(precision) except TypeError as e: raise TypeError('precision must be an integer') from e From 71ac6b78a14206a8e3b8f5f7e55d0af8ef6b4bf5 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sun, 31 Jan 2021 08:33:32 -0800 Subject: [PATCH 0450/1270] DOC: Nupydoc format space before `:` in Parameters Missing `s` in two spellings. --- numpy/lib/nanfunctions.py | 2 +- numpy/random/_pickle.py | 6 +++--- tools/refguide_check.py | 12 +++++------- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 409016adb7dd..a02ad779f3a7 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -613,7 +613,7 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): -------- numpy.sum : Sum across array propagating NaNs. isnan : Show which elements are NaN. - isfinite: Show which elements are not NaN or +/-inf. + isfinite : Show which elements are not NaN or +/-inf. Notes ----- diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 29ff696448fe..71b01d6cd1e5 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -19,7 +19,7 @@ def __generator_ctor(bit_generator_name='MT19937'): Parameters ---------- - bit_generator_name: str + bit_generator_name : str String containing the core BitGenerator Returns @@ -42,7 +42,7 @@ def __bit_generator_ctor(bit_generator_name='MT19937'): Parameters ---------- - bit_generator_name: str + bit_generator_name : str String containing the name of the BitGenerator Returns @@ -65,7 +65,7 @@ def __randomstate_ctor(bit_generator_name='MT19937'): Parameters ---------- - bit_generator_name: str + bit_generator_name : str String containing the core BitGenerator Returns diff --git a/tools/refguide_check.py b/tools/refguide_check.py index ddcc1028d5d8..9a6d1c9f85b4 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -166,9 +166,8 @@ def short_path(path, cwd=None): Parameters ---------- - path: str or None - - cwd: str or None + path : str or None + cwd : str or None Returns ------- @@ -344,8 +343,8 @@ def is_deprecated(f): """ Check if module `f` is deprecated - Parameter - --------- + Parameters + ---------- f : ModuleType Returns @@ -785,8 +784,7 @@ def _run_doctests(tests, full_name, verbose, doctest_warnings): full_name : str verbose : bool - - doctest_warning : bool + doctest_warnings : bool Returns ------- From c538032491490e887007fd3f40453905f7b94b3d Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Sun, 31 Jan 2021 14:11:37 -0800 Subject: [PATCH 0451/1270] DOC: Numpydoc warning incorrect underline length. This should make numpydoc complain less. --- numpy/ctypeslib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index e8f7750fe5ff..7d2b3c2a86ea 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -4,7 +4,7 @@ ============================ See Also ---------- +-------- load_library : Load a C library. ndpointer : Array restype/argtype with verification. as_ctypes : Create a ctypes array from an ndarray. From b62ffc5a3584f6f946b7fbc37336d2cd63ea943d Mon Sep 17 00:00:00 2001 From: tautaus Date: Sun, 31 Jan 2021 21:56:46 -0500 Subject: [PATCH 0452/1270] See #15986. Chain exceptions in linalg --- numpy/linalg/lapack_lite/make_lite.py | 4 ++-- numpy/linalg/tests/test_build.py | 4 ++-- numpy/linalg/tests/test_linalg.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py index cf15b2541361..b145f6c4f8b5 100755 --- a/numpy/linalg/lapack_lite/make_lite.py +++ b/numpy/linalg/lapack_lite/make_lite.py @@ -261,8 +261,8 @@ def runF2C(fortran_filename, output_dir): subprocess.check_call( ["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename] ) - except subprocess.CalledProcessError: - raise F2CError + except subprocess.CalledProcessError as e: + raise F2CError from e def scrubF2CSource(c_file): with open(c_file) as fo: diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py index 4859226d99a4..868341ff298c 100644 --- a/numpy/linalg/tests/test_build.py +++ b/numpy/linalg/tests/test_build.py @@ -15,8 +15,8 @@ def __init__(self): try: p = Popen(self.cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() - except OSError: - raise RuntimeError(f'command {self.cmd} cannot be run') + except OSError as e: + raise RuntimeError(f'command {self.cmd} cannot be run') from e def get_dependencies(self, lfile): p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 21fab58e1d42..8a270f194147 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -348,10 +348,10 @@ def check_cases(self, require=set(), exclude=set()): try: case.check(self.do) - except Exception: + except Exception as e: msg = f'In test case: {case!r}\n\n' msg += traceback.format_exc() - raise AssertionError(msg) + raise AssertionError(msg) from e class LinalgSquareTestCase(LinalgTestCase): From 94662713ca707cf75120b197c7c7d0e3f2e62221 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Sun, 31 Jan 2021 22:41:35 -0800 Subject: [PATCH 0453/1270] ENH: defer the import of shutil --- numpy/lib/_datasource.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 7a23b165158c..c790a6462a28 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -35,7 +35,6 @@ """ import os -import shutil import io from numpy.core.overrides import set_module @@ -257,6 +256,8 @@ def __init__(self, destpath=os.curdir): def __del__(self): # Remove temp directories if hasattr(self, '_istmpdest') and self._istmpdest: + import shutil + shutil.rmtree(self._destpath) def _iszip(self, filename): @@ -319,8 +320,9 @@ def _cache(self, path): Creates a copy of the file in the datasource cache. """ - # We import these here because importing urllib is slow and + # We import these here because importing them is slow and # a significant fraction of numpy's total import time. + import shutil from urllib.request import urlopen from urllib.error import URLError From f5983b8ad6d8ed0537664e2a36dd35017eb047d5 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 1 Feb 2021 07:33:21 +0000 Subject: [PATCH 0454/1270] MAINT: Bump hypothesis from 6.0.3 to 6.1.1 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.0.3 to 6.1.1. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.0.3...hypothesis-python-6.1.1) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index a0956a4b061e..3f97ce5253ee 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.0.3 +hypothesis==6.1.1 pytest==6.2.1 pytz==2020.5 pytest-cov==2.11.1 From 8936d5b5050fcc196bfbc058c9eb70f9cdacdd56 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 1 Feb 2021 07:35:06 +0000 Subject: [PATCH 0455/1270] MAINT: Bump pydata-sphinx-theme from 0.4.2 to 0.4.3 Bumps [pydata-sphinx-theme](https://github.com/pandas-dev/pydata-sphinx-theme) from 0.4.2 to 0.4.3. - [Release notes](https://github.com/pandas-dev/pydata-sphinx-theme/releases) - [Changelog](https://github.com/pydata/pydata-sphinx-theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/pandas-dev/pydata-sphinx-theme/compare/v0.4.2...v0.4.3) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 26be985bbf4c..adcd6bce9986 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -4,4 +4,4 @@ ipython scipy matplotlib pandas -pydata-sphinx-theme==0.4.2 +pydata-sphinx-theme==0.4.3 From 472e5e01be709a0b40a727f97208a8222c4b28ef Mon Sep 17 00:00:00 2001 From: Mitchell Faas Date: Mon, 1 Feb 2021 12:35:15 +0100 Subject: [PATCH 0456/1270] MAINT: Updated annotations --- numpy/core/arrayprint.pyi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi index b2565b3451bd..d2a5fdef963f 100644 --- a/numpy/core/arrayprint.pyi +++ b/numpy/core/arrayprint.pyi @@ -24,9 +24,9 @@ from numpy import ( from numpy.typing import ArrayLike, _CharLike_co, _FloatLike_co if sys.version_info > (3, 8): - from typing import Literal, TypedDict + from typing import Literal, TypedDict, SupportsIndex else: - from typing_extensions import Literal, TypedDict + from typing_extensions import Literal, TypedDict, SupportsIndex _FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] @@ -62,7 +62,7 @@ class _FormatOptions(TypedDict): legacy: Literal[False, "1.13"] def set_printoptions( - precision: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., threshold: Optional[int] = ..., edgeitems: Optional[int] = ..., linewidth: Optional[int] = ..., @@ -79,7 +79,7 @@ def get_printoptions() -> _FormatOptions: ... def array2string( a: ndarray[Any, Any], max_line_width: Optional[int] = ..., - precision: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., suppress_small: Optional[bool] = ..., separator: str = ..., prefix: str = ..., @@ -117,20 +117,20 @@ def format_float_positional( def array_repr( arr: ndarray[Any, Any], max_line_width: Optional[int] = ..., - precision: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., suppress_small: Optional[bool] = ..., ) -> str: ... def array_str( a: ndarray[Any, Any], max_line_width: Optional[int] = ..., - precision: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., suppress_small: Optional[bool] = ..., ) -> str: ... def set_string_function( f: Optional[Callable[[ndarray[Any, Any]], str]], repr: bool = ... ) -> None: ... def printoptions( - precision: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., threshold: Optional[int] = ..., edgeitems: Optional[int] = ..., linewidth: Optional[int] = ..., From 71db8aac852135a0014688085d1a9f4e3d34490d Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 1 Feb 2021 13:49:42 +0000 Subject: [PATCH 0457/1270] MAINT: Bump pytest from 6.2.1 to 6.2.2 Bumps [pytest](https://github.com/pytest-dev/pytest) from 6.2.1 to 6.2.2. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/6.2.1...6.2.2) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 3f97ce5253ee..70bc2a365467 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -2,7 +2,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 hypothesis==6.1.1 -pytest==6.2.1 +pytest==6.2.2 pytz==2020.5 pytest-cov==2.11.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' From 044550d603b0d7bbe201d703354dd4857bb3606d Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Mon, 1 Feb 2021 09:01:17 -0700 Subject: [PATCH 0458/1270] MAINT: gracefully shuffle memoryviews * allow graceful shuffling of memoryviews, with same behavior as arrays, instead of producing a warning on `memoryview` shuffle --- numpy/random/_generator.pyx | 3 +++ numpy/random/mtrand.pyx | 3 +++ numpy/random/tests/test_random.py | 15 +++++++++++++++ 3 files changed, 21 insertions(+) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 0a41f13b6d42..2976429402a9 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4398,6 +4398,9 @@ cdef class Generator: char* x_ptr char* buf_ptr + if isinstance(x, memoryview): + x = np.asarray(x) + axis = normalize_axis_index(axis, np.ndim(x)) if type(x) is np.ndarray and x.ndim == 1 and x.size: diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 814630c034b0..daab2c6f10fc 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4436,6 +4436,9 @@ cdef class RandomState: char* x_ptr char* buf_ptr + if isinstance(x, memoryview): + x = np.asarray(x) + if type(x) is np.ndarray and x.ndim == 1 and x.size: # Fast, statically typed path: shuffle the underlying buffer. # Only for non-empty, 1d objects of class ndarray (subclasses such diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index c13fc39e3339..5f8b39ef94fe 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -510,6 +510,21 @@ def test_shuffle_masked(self): assert_equal( sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + def test_shuffle_memoryview(self): + # gh-18273 + # allow graceful handling of memoryviews + # (treat the same as arrays) + np.random.seed(self.seed) + a = np.arange(5).data + np.random.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) + rng = np.random.RandomState(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) + rng = np.random.default_rng(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [4, 1, 0, 3, 2]) + def test_beta(self): np.random.seed(self.seed) actual = np.random.beta(.1, .9, size=(3, 2)) From 709b6713502792438d0569f737f3a2d78dbeb74b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 17:13:45 +0100 Subject: [PATCH 0459/1270] ENH: Add annotations for the remaining `np.generic` aliases --- numpy/__init__.pyi | 18 +++++++++++------- numpy/typing/tests/data/pass/scalars.py | 5 +++++ numpy/typing/tests/data/reveal/scalars.py | 5 +++++ 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fe9dc5914c48..9801464bd792 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -335,7 +335,6 @@ bincount: Any bitwise_not: Any blackman: Any bmat: Any -bool8: Any broadcast: Any broadcast_arrays: Any broadcast_to: Any @@ -343,7 +342,6 @@ busday_count: Any busday_offset: Any busdaycalendar: Any byte_bounds: Any -bytes0: Any c_: Any can_cast: Any cast: Any @@ -351,7 +349,6 @@ chararray: Any column_stack: Any common_type: Any compare_chararrays: Any -complex256: Any concatenate: Any conj: Any copy: Any @@ -387,7 +384,6 @@ fix: Any flip: Any fliplr: Any flipud: Any -float128: Any format_parser: Any frombuffer: Any fromfile: Any @@ -471,7 +467,6 @@ nditer: Any nested_iters: Any newaxis: Any numarray: Any -object0: Any ogrid: Any packbits: Any pad: Any @@ -522,7 +517,6 @@ sinc: Any sort_complex: Any source: Any split: Any -string_: Any take_along_axis: Any tile: Any trapz: Any @@ -545,7 +539,6 @@ unwrap: Any vander: Any vdot: Any vectorize: Any -void0: Any vsplit: Any where: Any who: Any @@ -1667,6 +1660,8 @@ class bool_(generic): __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] +bool8 = bool_ + class object_(generic): def __init__(self, __value: object = ...) -> None: ... @property @@ -1674,6 +1669,8 @@ class object_(generic): @property def imag(self: _ArraySelf) -> _ArraySelf: ... +object0 = object_ + class datetime64(generic): @overload def __init__( @@ -1869,6 +1866,7 @@ class floating(inexact[_NBit1]): float16 = floating[_16Bit] float32 = floating[_32Bit] float64 = floating[_64Bit] +float128 = floating[_128Bit] half = floating[_NBitHalf] single = floating[_NBitSingle] @@ -1903,6 +1901,7 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): complex64 = complexfloating[_32Bit, _32Bit] complex128 = complexfloating[_64Bit, _64Bit] +complex256 = complexfloating[_128Bit, _128Bit] csingle = complexfloating[_NBitSingle, _NBitSingle] singlecomplex = complexfloating[_NBitSingle, _NBitSingle] @@ -1925,6 +1924,8 @@ class void(flexible): self, val: ArrayLike, dtype: DTypeLike, offset: int = ... ) -> None: ... +void0 = void + class character(flexible): # type: ignore def __int__(self) -> int: ... def __float__(self) -> float: ... @@ -1940,6 +1941,9 @@ class bytes_(character, bytes): self, __value: str, encoding: str = ..., errors: str = ... ) -> None: ... +string_ = bytes_ +bytes0 = bytes_ + class str_(character, str): @overload def __init__(self, __value: object = ...) -> None: ... diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 2f2643e8eb33..c3f4ddbccd3f 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -166,6 +166,11 @@ def __float__(self) -> float: # Aliases np.str0() +np.bool8() +np.bytes0() +np.string_() +np.object0() +np.void0(0) np.byte() np.short() diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py index faa7ac3d2883..fa94aa49b974 100644 --- a/numpy/typing/tests/data/reveal/scalars.py +++ b/numpy/typing/tests/data/reveal/scalars.py @@ -30,6 +30,11 @@ # Aliases reveal_type(np.unicode_()) # E: numpy.str_ reveal_type(np.str0()) # E: numpy.str_ +reveal_type(np.bool8()) # E: numpy.bool_ +reveal_type(np.bytes0()) # E: numpy.bytes_ +reveal_type(np.string_()) # E: numpy.bytes_ +reveal_type(np.object0()) # E: numpy.object_ +reveal_type(np.void0(0)) # E: numpy.void reveal_type(np.byte()) # E: {byte} reveal_type(np.short()) # E: {short} From 234f6d705fe0e2787c9238ca13a3ec80a474aaa5 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 17:53:19 +0100 Subject: [PATCH 0460/1270] TST: Pin `typing_extensions` to the latest version --- test_requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 70bc2a365467..b7b51f2e22f6 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -11,5 +11,6 @@ cffi # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # - Mypy doesn't currently work on Python 3.9 +# - There is no point in installing typing_extensions without mypy mypy==0.800; platform_python_implementation != "PyPy" -typing_extensions +typing_extensions==3.7.4.3; platform_python_implementation != "PyPy" From 5639e0ec8d7b8fc96fe44b1cf35a8bf77413791f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 18:00:18 +0100 Subject: [PATCH 0461/1270] DOC: Removed an outdated comment --- test_requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index b7b51f2e22f6..05788d7dbf62 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -10,7 +10,6 @@ pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' cffi # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy -# - Mypy doesn't currently work on Python 3.9 # - There is no point in installing typing_extensions without mypy mypy==0.800; platform_python_implementation != "PyPy" typing_extensions==3.7.4.3; platform_python_implementation != "PyPy" From df45cf1f6bdf64b5438a862a5e195198ea8c9710 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 17:56:41 +0100 Subject: [PATCH 0462/1270] MAINT: Import `SupportsIndex` from typing-extensions These days `SupportsIndex` is available in typing-extensions (and recognized by mypy) --- numpy/__init__.pyi | 4 +--- numpy/core/function_base.pyi | 5 +---- numpy/core/shape_base.pyi | 4 +--- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fe9dc5914c48..95517790d505 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -153,9 +153,7 @@ from typing import ( if sys.version_info >= (3, 8): from typing import Literal, Protocol, SupportsIndex, Final else: - from typing_extensions import Literal, Protocol, Final - class SupportsIndex(Protocol): - def __index__(self) -> int: ... + from typing_extensions import Literal, Protocol, SupportsIndex, Final # Ensures that the stubs are picked up from numpy import ( diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi index d353543f6d47..d4543f28141b 100644 --- a/numpy/core/function_base.pyi +++ b/numpy/core/function_base.pyi @@ -7,10 +7,7 @@ from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co if sys.version_info >= (3, 8): from typing import SupportsIndex, Literal else: - from typing_extensions import Literal, Protocol - - class SupportsIndex(Protocol): - def __index__(self) -> int: ... + from typing_extensions import SupportsIndex, Literal # TODO: wait for support for recursive types _ArrayLikeNested = Sequence[Sequence[Any]] diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi index b20598b1ab5b..ec40a88143b1 100644 --- a/numpy/core/shape_base.pyi +++ b/numpy/core/shape_base.pyi @@ -7,9 +7,7 @@ from numpy.typing import ArrayLike if sys.version_info >= (3, 8): from typing import SupportsIndex else: - from typing_extensions import Protocol - class SupportsIndex(Protocol): - def __index__(self) -> int: ... + from typing_extensions import SupportsIndex _ArrayType = TypeVar("_ArrayType", bound=ndarray) From 12d99670040c265a615694fd3b8ae1492807c93f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 16:21:59 +0100 Subject: [PATCH 0463/1270] MAINT: Add missing `__all__` annotations --- numpy/char.pyi | 4 +++- numpy/ctypeslib.pyi | 4 +++- numpy/emath.pyi | 4 +++- numpy/f2py/__init__.pyi | 4 +++- numpy/lib/__init__.pyi | 4 +++- numpy/ma/__init__.pyi | 4 +++- numpy/matrixlib/__init__.pyi | 4 +++- numpy/random/__init__.pyi | 4 +++- numpy/rec.pyi | 4 +++- numpy/testing/__init__.pyi | 4 +++- numpy/typing/tests/data/pass/modules.py | 13 ++++++++++++- numpy/typing/tests/data/reveal/modules.py | 13 ++++++++++++- 12 files changed, 54 insertions(+), 12 deletions(-) diff --git a/numpy/char.pyi b/numpy/char.pyi index 0e7342c0bf3b..0d5448298dd6 100644 --- a/numpy/char.pyi +++ b/numpy/char.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] equal: Any not_equal: Any diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi index cacc97d68ea7..54089c881056 100644 --- a/numpy/ctypeslib.pyi +++ b/numpy/ctypeslib.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] load_library: Any ndpointer: Any diff --git a/numpy/emath.pyi b/numpy/emath.pyi index 032ec9505ad1..5aae84b6c1c6 100644 --- a/numpy/emath.pyi +++ b/numpy/emath.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] sqrt: Any log: Any diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index 602517957ca3..50594c1e3667 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] run_main: Any compile: Any diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 413e2ae1b5d0..0d0e4c2e12d3 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] emath: Any math: Any diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index d1259abcce73..66dfe40de6a5 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] core: Any extras: Any diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index b240bb327a13..b9005c4aa4c9 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] matrix: Any bmat: Any diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index f7c3cfafe2cf..bd5ece536acb 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] beta: Any binomial: Any diff --git a/numpy/rec.pyi b/numpy/rec.pyi index c70ee5374ec4..849ccd0221fc 100644 --- a/numpy/rec.pyi +++ b/numpy/rec.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] record: Any recarray: Any diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index c394a387d5df..7dad2c9db0dd 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] assert_equal: Any assert_almost_equal: Any diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py index 013841ae74d9..2fdb69eb3eab 100644 --- a/numpy/typing/tests/data/pass/modules.py +++ b/numpy/typing/tests/data/pass/modules.py @@ -1,4 +1,5 @@ import numpy as np +from numpy import f2py np.char np.ctypeslib @@ -14,7 +15,17 @@ np.testing np.version -np.__all__ np.__path__ np.__version__ np.__git_version__ + +np.__all__ +np.char.__all__ +np.ctypeslib.__all__ +np.emath.__all__ +np.lib.__all__ +np.ma.__all__ +np.random.__all__ +np.rec.__all__ +np.testing.__all__ +f2py.__all__ diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py index 8e4eab50f3b2..75513f2b08c9 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.py @@ -1,4 +1,5 @@ import numpy as np +from numpy import f2py reveal_type(np) # E: ModuleType @@ -19,7 +20,17 @@ # TODO: Remove when annotations have been added to `np.testing.assert_equal` reveal_type(np.testing.assert_equal) # E: Any -reveal_type(np.__all__) # E: list[builtins.str] reveal_type(np.__path__) # E: list[builtins.str] reveal_type(np.__version__) # E: str reveal_type(np.__git_version__) # E: str + +reveal_type(np.__all__) # E: list[builtins.str] +reveal_type(np.char.__all__) # E: list[builtins.str] +reveal_type(np.ctypeslib.__all__) # E: list[builtins.str] +reveal_type(np.emath.__all__) # E: list[builtins.str] +reveal_type(np.lib.__all__) # E: list[builtins.str] +reveal_type(np.ma.__all__) # E: list[builtins.str] +reveal_type(np.random.__all__) # E: list[builtins.str] +reveal_type(np.rec.__all__) # E: list[builtins.str] +reveal_type(np.testing.__all__) # E: list[builtins.str] +reveal_type(f2py.__all__) # E: list[builtins.str] From c21509a595eea2cf9d64893a58adb5abcce68bb1 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 16:31:16 +0100 Subject: [PATCH 0464/1270] MAINT: Add previously-missing placeholder annotations --- numpy/char.pyi | 1 + numpy/ctypeslib.pyi | 1 + numpy/lib/__init__.pyi | 1 + numpy/rec.pyi | 5 +++++ 4 files changed, 8 insertions(+) diff --git a/numpy/char.pyi b/numpy/char.pyi index 0d5448298dd6..0e3596bb278b 100644 --- a/numpy/char.pyi +++ b/numpy/char.pyi @@ -53,3 +53,4 @@ isnumeric: Any isdecimal: Any array: Any asarray: Any +chararray: Any diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi index 54089c881056..125c20f8975a 100644 --- a/numpy/ctypeslib.pyi +++ b/numpy/ctypeslib.pyi @@ -7,3 +7,4 @@ ndpointer: Any c_intp: Any as_ctypes: Any as_array: Any +as_ctypes_type: Any diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 0d0e4c2e12d3..a8eb242074b6 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -177,3 +177,4 @@ nanquantile: Any histogram: Any histogramdd: Any histogram_bin_edges: Any +NumpyVersion: Any diff --git a/numpy/rec.pyi b/numpy/rec.pyi index 849ccd0221fc..883e2dd5b7f2 100644 --- a/numpy/rec.pyi +++ b/numpy/rec.pyi @@ -5,3 +5,8 @@ __all__: List[str] record: Any recarray: Any format_parser: Any +fromarrays: Any +fromrecords: Any +fromstring: Any +fromfile: Any +array: Any From 0f3681485b3fc9761e2b9db4b79635c0e7413bf2 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 16:31:34 +0100 Subject: [PATCH 0465/1270] MAINT: Add `as_ctypes_type` to `np.ctypeslib.__all__` --- numpy/ctypeslib.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 7d2b3c2a86ea..dbc683a6b24b 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -49,7 +49,8 @@ >>> _lib.foo_func(out, len(out)) #doctest: +SKIP """ -__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array'] +__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array', + 'as_ctypes_type'] import os from numpy import ( From 0659217f1aee689c2e47d531ca2e26b5dc2ba224 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 16:35:53 +0100 Subject: [PATCH 0466/1270] MAINT: Add previously missing objects to `np.rec.__all__` * `fromarrays` * `fromrecords` * `fromstring` * `fromfile` * `array` --- numpy/core/__init__.py | 4 ++-- numpy/core/records.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index e8d3a381b602..f22c86f59c4a 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -75,7 +75,7 @@ from .fromnumeric import * from . import defchararray as char from . import records as rec -from .records import * +from .records import record, recarray, format_parser from .memmap import * from .defchararray import chararray from . import function_base @@ -106,7 +106,7 @@ __all__ = ['char', 'rec', 'memmap'] __all__ += numeric.__all__ __all__ += fromnumeric.__all__ -__all__ += rec.__all__ +__all__ += ['record', 'recarray', 'format_parser'] __all__ += ['chararray'] __all__ += function_base.__all__ __all__ += machar.__all__ diff --git a/numpy/core/records.py b/numpy/core/records.py index 708c11f69653..a626a05893d8 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -45,7 +45,10 @@ from .arrayprint import get_printoptions # All of the functions allow formats to be a dtype -__all__ = ['record', 'recarray', 'format_parser'] +__all__ = [ + 'record', 'recarray', 'format_parser', + 'fromarrays', 'fromrecords', 'fromstring', 'fromfile', 'array', +] ndarray = sb.ndarray From c4ce5978362443433ee95bd34b3d68d2c9c1cc8a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 1 Feb 2021 14:41:55 -0600 Subject: [PATCH 0467/1270] MAINT: Move transferdata into buffer-wise struct This hopefully slightly simplifies the NpyIter struct in order to be able to add a new "Context" more easily. --- numpy/core/src/multiarray/nditer_api.c | 55 ++++----- numpy/core/src/multiarray/nditer_constr.c | 140 ++++++++++------------ numpy/core/src/multiarray/nditer_impl.h | 34 ++++-- 3 files changed, 108 insertions(+), 121 deletions(-) diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 059f2c437b15..fc80c1f11fd4 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -1557,6 +1557,8 @@ NpyIter_DebugPrint(NpyIter *iter) if (itflags&NPY_ITFLAG_BUFFER) { NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); + NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); + printf("| BufferData:\n"); printf("| BufferSize: %d\n", (int)NBF_BUFFERSIZE(bufferdata)); printf("| Size: %d\n", (int)NBF_SIZE(bufferdata)); @@ -1598,19 +1600,19 @@ NpyIter_DebugPrint(NpyIter *iter) } printf("| ReadTransferFn: "); for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_READTRANSFERFN(bufferdata)[iop]); + printf("%p ", (void *)transferinfo[iop].read.func); printf("\n"); printf("| ReadTransferData: "); for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_READTRANSFERDATA(bufferdata)[iop]); + printf("%p ", (void *)transferinfo[iop].read.auxdata); printf("\n"); printf("| WriteTransferFn: "); for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_WRITETRANSFERFN(bufferdata)[iop]); + printf("%p ", (void *)transferinfo[iop].write.func); printf("\n"); printf("| WriteTransferData: "); for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_WRITETRANSFERDATA(bufferdata)[iop]); + printf("%p ", (void *)transferinfo[iop].write.auxdata); printf("\n"); printf("| Buffers: "); for (iop = 0; iop < nop; ++iop) @@ -1889,9 +1891,6 @@ npyiter_copy_from_buffers(NpyIter *iter) npy_intp reduce_outerdim = 0; npy_intp *reduce_outerstrides = NULL; - PyArray_StridedUnaryOp *stransfer = NULL; - NpyAuxData *transferdata = NULL; - npy_intp axisdata_incr = NIT_AXISDATA_SIZEOF(itflags, ndim, nop) / NPY_SIZEOF_INTP; @@ -1909,9 +1908,8 @@ npyiter_copy_from_buffers(NpyIter *iter) transfersize *= NBF_REDUCE_OUTERSIZE(bufferdata); } + NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); for (iop = 0; iop < nop; ++iop) { - stransfer = NBF_WRITETRANSFERFN(bufferdata)[iop]; - transferdata = NBF_WRITETRANSFERDATA(bufferdata)[iop]; buffer = buffers[iop]; /* * Copy the data back to the arrays. If the type has refs, @@ -1920,7 +1918,7 @@ npyiter_copy_from_buffers(NpyIter *iter) * The flag USINGBUFFER is set when the buffer was used, so * only copy back when this flag is on. */ - if ((stransfer != NULL) && + if ((transferinfo[iop].write.func != NULL) && (op_itflags[iop]&(NPY_OP_ITFLAG_WRITE|NPY_OP_ITFLAG_USINGBUFFER)) == (NPY_OP_ITFLAG_WRITE|NPY_OP_ITFLAG_USINGBUFFER)) { npy_intp op_transfersize; @@ -2011,8 +2009,8 @@ npyiter_copy_from_buffers(NpyIter *iter) dst_coords, axisdata_incr, dst_shape, axisdata_incr, op_transfersize, dtypes[iop]->elsize, - (PyArray_MaskedStridedUnaryOp *)stransfer, - transferdata) < 0) { + (PyArray_MaskedStridedUnaryOp *)transferinfo[iop].write.func, + transferinfo[iop].write.auxdata) < 0) { return -1; } } @@ -2024,8 +2022,8 @@ npyiter_copy_from_buffers(NpyIter *iter) dst_coords, axisdata_incr, dst_shape, axisdata_incr, op_transfersize, dtypes[iop]->elsize, - stransfer, - transferdata) < 0) { + transferinfo[iop].write.func, + transferinfo[iop].write.auxdata) < 0) { return -1; } } @@ -2037,14 +2035,15 @@ npyiter_copy_from_buffers(NpyIter *iter) * The flag USINGBUFFER is set when the buffer was used, so * only decrement refs when this flag is on. */ - else if (stransfer != NULL && + else if (transferinfo[iop].write.func != NULL && (op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER) != 0) { NPY_IT_DBG_PRINT1("Iterator: Freeing refs and zeroing buffer " "of operand %d\n", (int)iop); /* Decrement refs */ - if (stransfer(NULL, 0, buffer, dtypes[iop]->elsize, - transfersize, dtypes[iop]->elsize, - transferdata) < 0) { + if (transferinfo[iop].write.func( + NULL, 0, buffer, dtypes[iop]->elsize, + transfersize, dtypes[iop]->elsize, + transferinfo[iop].write.auxdata) < 0) { /* Since this should only decrement, it should never error */ assert(0); return -1; @@ -2094,9 +2093,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) npy_intp *reduce_outerstrides = NULL; char **reduce_outerptrs = NULL; - PyArray_StridedUnaryOp *stransfer = NULL; - NpyAuxData *transferdata = NULL; - /* * Have to get this flag before npyiter_checkreducesize sets * it for the next iteration. @@ -2207,13 +2203,14 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) is_onestride = 1; } + NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); for (iop = 0; iop < nop; ++iop) { /* * If the buffer is write-only, these two are NULL, and the buffer * pointers will be set up but the read copy won't be done */ - stransfer = NBF_READTRANSFERFN(bufferdata)[iop]; - transferdata = NBF_READTRANSFERDATA(bufferdata)[iop]; + PyArray_StridedUnaryOp *stransfer = transferinfo[iop].read.func; + switch (op_itflags[iop]& (NPY_OP_ITFLAG_BUFNEVER| NPY_OP_ITFLAG_CAST| @@ -2554,12 +2551,12 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) (int)iop, (int)op_transfersize); if (PyArray_TransferNDimToStrided( - ndim_transfer, ptrs[iop], dst_stride, - ad_ptrs[iop], src_strides, axisdata_incr, - src_coords, axisdata_incr, - src_shape, axisdata_incr, - op_transfersize, src_itemsize, - stransfer, transferdata) < 0) { + ndim_transfer, ptrs[iop], dst_stride, + ad_ptrs[iop], src_strides, axisdata_incr, + src_coords, axisdata_incr, + src_shape, axisdata_incr, + op_transfersize, src_itemsize, + stransfer, transferinfo[iop].read.auxdata) < 0) { return -1; } } diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index b379a28ac3ae..dcf8f412f365 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -235,8 +235,8 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, NBF_SIZE(bufferdata) = 0; memset(NBF_BUFFERS(bufferdata), 0, nop*NPY_SIZEOF_INTP); memset(NBF_PTRS(bufferdata), 0, nop*NPY_SIZEOF_INTP); - memset(NBF_READTRANSFERDATA(bufferdata), 0, nop*NPY_SIZEOF_INTP); - memset(NBF_WRITETRANSFERDATA(bufferdata), 0, nop*NPY_SIZEOF_INTP); + /* Ensure that the transferdata/auxdata is NULLed */ + memset(NBF_TRANSFERINFO(bufferdata), 0, nop * sizeof(NpyIter_TransferInfo)); } /* Fill in the AXISDATA arrays and set the ITERSIZE field */ @@ -577,13 +577,11 @@ NpyIter_Copy(NpyIter *iter) NpyIter_BufferData *bufferdata; npy_intp buffersize, itemsize; char **buffers; - NpyAuxData **readtransferdata, **writetransferdata; bufferdata = NIT_BUFFERDATA(newiter); buffers = NBF_BUFFERS(bufferdata); - readtransferdata = NBF_READTRANSFERDATA(bufferdata); - writetransferdata = NBF_WRITETRANSFERDATA(bufferdata); buffersize = NBF_BUFFERSIZE(bufferdata); + NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); for (iop = 0; iop < nop; ++iop) { if (buffers[iop] != NULL) { @@ -599,27 +597,27 @@ NpyIter_Copy(NpyIter *iter) } } - if (readtransferdata[iop] != NULL) { + if (transferinfo[iop].read.auxdata != NULL) { if (out_of_memory) { - readtransferdata[iop] = NULL; + transferinfo[iop].read.auxdata = NULL; } else { - readtransferdata[iop] = - NPY_AUXDATA_CLONE(readtransferdata[iop]); - if (readtransferdata[iop] == NULL) { + transferinfo[iop].read.auxdata = + NPY_AUXDATA_CLONE(transferinfo[iop].read.auxdata); + if (transferinfo[iop].read.auxdata == NULL) { out_of_memory = 1; } } } - if (writetransferdata[iop] != NULL) { + if (transferinfo[iop].write.auxdata != NULL) { if (out_of_memory) { - writetransferdata[iop] = NULL; + transferinfo[iop].write.auxdata = NULL; } else { - writetransferdata[iop] = - NPY_AUXDATA_CLONE(writetransferdata[iop]); - if (writetransferdata[iop] == NULL) { + transferinfo[iop].write.auxdata = + NPY_AUXDATA_CLONE(transferinfo[iop].write.auxdata); + if (transferinfo[iop].write.auxdata == NULL) { out_of_memory = 1; } } @@ -688,25 +686,21 @@ NpyIter_Deallocate(NpyIter *iter) NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); char **buffers; - NpyAuxData **transferdata; /* buffers */ buffers = NBF_BUFFERS(bufferdata); for (iop = 0; iop < nop; ++iop, ++buffers) { PyArray_free(*buffers); } + + NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); /* read bufferdata */ - transferdata = NBF_READTRANSFERDATA(bufferdata); - for(iop = 0; iop < nop; ++iop, ++transferdata) { - if (*transferdata) { - NPY_AUXDATA_FREE(*transferdata); + for (iop = 0; iop < nop; ++iop, ++transferinfo) { + if (transferinfo->read.auxdata) { + NPY_AUXDATA_FREE(transferinfo->read.auxdata); } - } - /* write bufferdata */ - transferdata = NBF_WRITETRANSFERDATA(bufferdata); - for(iop = 0; iop < nop; ++iop, ++transferdata) { - if (*transferdata) { - NPY_AUXDATA_FREE(*transferdata); + if (transferinfo->write.auxdata) { + NPY_AUXDATA_FREE(transferinfo->write.auxdata); } } } @@ -3142,13 +3136,8 @@ npyiter_allocate_transfer_functions(NpyIter *iter) PyArrayObject **op = NIT_OPERANDS(iter); PyArray_Descr **op_dtype = NIT_DTYPES(iter); npy_intp *strides = NAD_STRIDES(axisdata), op_stride; - PyArray_StridedUnaryOp **readtransferfn = NBF_READTRANSFERFN(bufferdata), - **writetransferfn = NBF_WRITETRANSFERFN(bufferdata); - NpyAuxData **readtransferdata = NBF_READTRANSFERDATA(bufferdata), - **writetransferdata = NBF_WRITETRANSFERDATA(bufferdata); + NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); - PyArray_StridedUnaryOp *stransfer = NULL; - NpyAuxData *transferdata = NULL; int needs_api = 0; for (iop = 0; iop < nop; ++iop) { @@ -3174,17 +3163,15 @@ npyiter_allocate_transfer_functions(NpyIter *iter) PyArray_DESCR(op[iop]), op_dtype[iop], move_references, - &stransfer, - &transferdata, + &transferinfo[iop].read.func, + &transferinfo[iop].read.auxdata, &needs_api) != NPY_SUCCEED) { iop -= 1; /* This one cannot be cleaned up yet. */ goto fail; } - readtransferfn[iop] = stransfer; - readtransferdata[iop] = transferdata; } else { - readtransferfn[iop] = NULL; + transferinfo[iop].read.func = NULL; } if (flags & NPY_OP_ITFLAG_WRITE) { int move_references = 1; @@ -3200,38 +3187,35 @@ npyiter_allocate_transfer_functions(NpyIter *iter) * could be inconsistent. */ if (PyArray_GetMaskedDTypeTransferFunction( - (flags & NPY_OP_ITFLAG_ALIGNED) != 0, - op_dtype[iop]->elsize, - op_stride, - (strides[maskop] == mask_dtype->elsize) ? - mask_dtype->elsize : - NPY_MAX_INTP, - op_dtype[iop], - PyArray_DESCR(op[iop]), - mask_dtype, - move_references, - (PyArray_MaskedStridedUnaryOp **)&stransfer, - &transferdata, - &needs_api) != NPY_SUCCEED) { + (flags & NPY_OP_ITFLAG_ALIGNED) != 0, + op_dtype[iop]->elsize, + op_stride, + (strides[maskop] == mask_dtype->elsize) ? + mask_dtype->elsize : NPY_MAX_INTP, + op_dtype[iop], + PyArray_DESCR(op[iop]), + mask_dtype, + move_references, + (PyArray_MaskedStridedUnaryOp **)&transferinfo[iop].write.func, + &transferinfo[iop].write.auxdata, + &needs_api) != NPY_SUCCEED) { goto fail; } } else { if (PyArray_GetDTypeTransferFunction( - (flags & NPY_OP_ITFLAG_ALIGNED) != 0, - op_dtype[iop]->elsize, - op_stride, - op_dtype[iop], - PyArray_DESCR(op[iop]), - move_references, - &stransfer, - &transferdata, - &needs_api) != NPY_SUCCEED) { + (flags & NPY_OP_ITFLAG_ALIGNED) != 0, + op_dtype[iop]->elsize, + op_stride, + op_dtype[iop], + PyArray_DESCR(op[iop]), + move_references, + &transferinfo[iop].write.func, + &transferinfo[iop].write.auxdata, + &needs_api) != NPY_SUCCEED) { goto fail; } } - writetransferfn[iop] = stransfer; - writetransferdata[iop] = transferdata; } /* If no write back but there are references make a decref fn */ else if (PyDataType_REFCHK(op_dtype[iop])) { @@ -3241,25 +3225,23 @@ npyiter_allocate_transfer_functions(NpyIter *iter) * src references. */ if (PyArray_GetDTypeTransferFunction( - (flags & NPY_OP_ITFLAG_ALIGNED) != 0, - op_dtype[iop]->elsize, 0, - op_dtype[iop], NULL, - 1, - &stransfer, - &transferdata, - &needs_api) != NPY_SUCCEED) { + (flags & NPY_OP_ITFLAG_ALIGNED) != 0, + op_dtype[iop]->elsize, 0, + op_dtype[iop], NULL, + 1, + &transferinfo[iop].write.func, + &transferinfo[iop].write.auxdata, + &needs_api) != NPY_SUCCEED) { goto fail; } - writetransferfn[iop] = stransfer; - writetransferdata[iop] = transferdata; } else { - writetransferfn[iop] = NULL; + transferinfo[iop].write.func = NULL; } } else { - readtransferfn[iop] = NULL; - writetransferfn[iop] = NULL; + transferinfo[iop].read.func = NULL; + transferinfo[iop].write.func = NULL; } } @@ -3272,13 +3254,13 @@ npyiter_allocate_transfer_functions(NpyIter *iter) fail: for (i = 0; i < iop+1; ++i) { - if (readtransferdata[iop] != NULL) { - NPY_AUXDATA_FREE(readtransferdata[iop]); - readtransferdata[iop] = NULL; + if (transferinfo[iop].read.auxdata != NULL) { + NPY_AUXDATA_FREE(transferinfo[iop].read.auxdata); + transferinfo[iop].read.auxdata = NULL; } - if (writetransferdata[iop] != NULL) { - NPY_AUXDATA_FREE(writetransferdata[iop]); - writetransferdata[iop] = NULL; + if (transferinfo[iop].write.auxdata != NULL) { + NPY_AUXDATA_FREE(transferinfo[iop].write.auxdata); + transferinfo[iop].write.auxdata = NULL; } } return 0; diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h index 378d6f7117b3..993eae835b13 100644 --- a/numpy/core/src/multiarray/nditer_impl.h +++ b/numpy/core/src/multiarray/nditer_impl.h @@ -148,8 +148,9 @@ struct NpyIter_InternalOnly { char iter_flexdata; }; -typedef struct NpyIter_AD NpyIter_AxisData; -typedef struct NpyIter_BD NpyIter_BufferData; +typedef struct NpyIter_AxisData_tag NpyIter_AxisData; +typedef struct NpyIter_TransferInfo_tag NpyIter_TransferInfo; +typedef struct NpyIter_BufferData_tag NpyIter_BufferData; typedef npy_int16 npyiter_opitflags; @@ -167,7 +168,8 @@ typedef npy_int16 npyiter_opitflags; #define NIT_OPITFLAGS_SIZEOF(itflags, ndim, nop) \ (NPY_INTP_ALIGNED(sizeof(npyiter_opitflags) * nop)) #define NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop) \ - ((itflags&NPY_ITFLAG_BUFFER) ? ((NPY_SIZEOF_INTP)*(6 + 9*nop)) : 0) + ((itflags&NPY_ITFLAG_BUFFER) ? ( \ + (NPY_SIZEOF_INTP)*(6 + 5*nop) + sizeof(NpyIter_TransferInfo) * nop) : 0) /* Byte offsets of the iterator members starting from iter->iter_flexdata */ #define NIT_PERM_OFFSET() \ @@ -229,11 +231,22 @@ typedef npy_int16 npyiter_opitflags; &(iter)->iter_flexdata + NIT_AXISDATA_OFFSET(itflags, ndim, nop))) /* Internal-only BUFFERDATA MEMBER ACCESS */ -struct NpyIter_BD { +struct _transferdata { + PyArray_StridedUnaryOp *func; + NpyAuxData *auxdata; +}; + +struct NpyIter_TransferInfo_tag { + struct _transferdata read; + struct _transferdata write; +}; + +struct NpyIter_BufferData_tag { npy_intp buffersize, size, bufiterend, reduce_pos, reduce_outersize, reduce_outerdim; npy_intp bd_flexdata; }; + #define NBF_BUFFERSIZE(bufferdata) ((bufferdata)->buffersize) #define NBF_SIZE(bufferdata) ((bufferdata)->size) #define NBF_BUFITEREND(bufferdata) ((bufferdata)->bufiterend) @@ -248,19 +261,14 @@ struct NpyIter_BD { (&(bufferdata)->bd_flexdata + 2*(nop))) #define NBF_REDUCE_OUTERPTRS(bufferdata) ((char **) \ (&(bufferdata)->bd_flexdata + 3*(nop))) -#define NBF_READTRANSFERFN(bufferdata) ((PyArray_StridedUnaryOp **) \ +#define NBF_BUFFERS(bufferdata) ((char **) \ (&(bufferdata)->bd_flexdata + 4*(nop))) -#define NBF_READTRANSFERDATA(bufferdata) ((NpyAuxData **) \ +/* NOTE: We assume that transferdata is a multiple of npy_intp in size. */ +#define NBF_TRANSFERINFO(bufferdata) ((NpyIter_TransferInfo *) \ (&(bufferdata)->bd_flexdata + 5*(nop))) -#define NBF_WRITETRANSFERFN(bufferdata) ((PyArray_StridedUnaryOp **) \ - (&(bufferdata)->bd_flexdata + 6*(nop))) -#define NBF_WRITETRANSFERDATA(bufferdata) ((NpyAuxData **) \ - (&(bufferdata)->bd_flexdata + 7*(nop))) -#define NBF_BUFFERS(bufferdata) ((char **) \ - (&(bufferdata)->bd_flexdata + 8*(nop))) /* Internal-only AXISDATA MEMBER ACCESS. */ -struct NpyIter_AD { +struct NpyIter_AxisData_tag { npy_intp shape, index; npy_intp ad_flexdata; }; From 0b0135251984667d0609f126eba973cbaa34cb6f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 1 Feb 2021 15:46:40 -0600 Subject: [PATCH 0468/1270] MAINT: Do not use `stransfer` to indicate NPY_OP_ITFLAG_USINGBUFFER --- numpy/core/src/multiarray/nditer_api.c | 27 +++++++++++--------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index fc80c1f11fd4..fceb58f33218 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -2205,11 +2205,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); for (iop = 0; iop < nop; ++iop) { - /* - * If the buffer is write-only, these two are NULL, and the buffer - * pointers will be set up but the read copy won't be done - */ - PyArray_StridedUnaryOp *stransfer = transferinfo[iop].read.func; switch (op_itflags[iop]& (NPY_OP_ITFLAG_BUFNEVER| @@ -2228,8 +2223,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) * could be zero, but strides[iop] was initialized * to the first non-trivial stride. */ - stransfer = NULL; /* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */ + assert(!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER)); break; /* Never need to buffer this operand */ case NPY_OP_ITFLAG_BUFNEVER|NPY_OP_ITFLAG_REDUCE: @@ -2241,8 +2236,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) * could be zero, but strides[iop] was initialized * to the first non-trivial stride. */ - stransfer = NULL; /* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */ + assert(!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER)); break; /* Just a copy */ case 0: @@ -2260,7 +2255,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) if (is_onestride) { ptrs[iop] = ad_ptrs[iop]; strides[iop] = ad_strides[iop]; - stransfer = NULL; /* Signal that the buffer is not being used */ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); } @@ -2275,7 +2269,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) strides[iop] = ad_strides[iop]; reduce_outerstrides[iop] = NAD_STRIDES(reduce_outeraxisdata)[iop]; - stransfer = NULL; /* Signal that the buffer is not being used */ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); } @@ -2306,7 +2299,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) NPY_IT_DBG_PRINT1("reduce op %d all one stride\n", (int)iop); ptrs[iop] = ad_ptrs[iop]; reduce_outerstrides[iop] = 0; - stransfer = NULL; /* Signal that the buffer is not being used */ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); } @@ -2321,7 +2313,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) /* Outer reduce loop advances by one item */ reduce_outerstrides[iop] = NAD_STRIDES(reduce_outeraxisdata)[iop]; - stransfer = NULL; /* Signal that the buffer is not being used */ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); } @@ -2347,7 +2338,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) ptrs[iop] = ad_ptrs[iop]; strides[iop] = ad_strides[iop]; reduce_outerstrides[iop] = 0; - stransfer = NULL; /* Signal that the buffer is not being used */ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); } @@ -2362,7 +2352,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) /* Outer reduce loop advances by one item */ reduce_outerstrides[iop] = NAD_STRIDES(reduce_outeraxisdata)[iop]; - stransfer = NULL; /* Signal that the buffer is not being used */ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); } @@ -2440,7 +2429,12 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) break; } - if (stransfer != NULL) { + /* + * If OP_ITFLAG_USINGBUFFER is enabled and the read func is not NULL, + * the buffer needs to be read. + */ + if (op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER && + transferinfo[iop].read.func != NULL) { npy_intp src_itemsize; npy_intp op_transfersize; @@ -2451,7 +2445,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) src_itemsize = PyArray_DTYPE(operands[iop])->elsize; - /* If stransfer wasn't set to NULL, buffering is required */ + /* If we reach here, buffering is required */ any_buffered = 1; /* @@ -2556,7 +2550,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) src_coords, axisdata_incr, src_shape, axisdata_incr, op_transfersize, src_itemsize, - stransfer, transferinfo[iop].read.auxdata) < 0) { + transferinfo[iop].read.func, + transferinfo[iop].read.auxdata) < 0) { return -1; } } From e1f12afd409e3b5cb9cc12ef0933f1299e6109a0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 1 Feb 2021 16:56:40 -0700 Subject: [PATCH 0469/1270] BUG: Fix typo in ``numpy.__init__.py`` Fix use of "long" when "unicode" was intended. Also fix spelling of "unicode". Closes #18287 --- numpy/__init__.py | 4 ++-- numpy/core/tests/test_deprecations.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 0f57adcb2bd9..6e0b60913b80 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -212,8 +212,8 @@ extended_msg=_int_extended_msg.format("long"))) __deprecated_attrs__["unicode"] = ( - getattr(compat, "long"), - _msg.format(n="unciode", n2="str", + getattr(compat, "unicode"), + _msg.format(n="unicode", n2="str", extended_msg=_specific_msg.format("str_"))) del _msg, _specific_msg, _int_extended_msg, _type_info, _builtins diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 53441d9fef62..459a89eaad94 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -687,16 +687,16 @@ class TestDeprecatedGlobals(_DeprecationTestCase): reason='module-level __getattr__ not supported') def test_type_aliases(self): # from builtins - self.assert_deprecated(lambda: np.bool) - self.assert_deprecated(lambda: np.int) - self.assert_deprecated(lambda: np.float) - self.assert_deprecated(lambda: np.complex) - self.assert_deprecated(lambda: np.object) - self.assert_deprecated(lambda: np.str) + self.assert_deprecated(lambda: np.bool(True)) + self.assert_deprecated(lambda: np.int(1)) + self.assert_deprecated(lambda: np.float(1)) + self.assert_deprecated(lambda: np.complex(1)) + self.assert_deprecated(lambda: np.object()) + self.assert_deprecated(lambda: np.str('abc')) # from np.compat - self.assert_deprecated(lambda: np.long) - self.assert_deprecated(lambda: np.unicode) + self.assert_deprecated(lambda: np.long(1)) + self.assert_deprecated(lambda: np.unicode('abc')) class TestMatrixInOuter(_DeprecationTestCase): From 389b8a75a2792da258440acea1dabcc79620ba8b Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Mon, 1 Feb 2021 18:03:04 -0700 Subject: [PATCH 0470/1270] MAINT: PR 18282 revisions * based on reviewer feedback, instead of explicitly supporting `memoryview` shuffling via `asarray()`, support the shuffling implicitly by using `Sequence` instead of `MutableSequence` --- numpy/random/_generator.pyx | 7 ++----- numpy/random/mtrand.pyx | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 2976429402a9..1c4689a7003b 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2,7 +2,7 @@ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 import operator import warnings -from collections.abc import MutableSequence +from collections.abc import Sequence from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer from cpython cimport (Py_INCREF, PyFloat_AsDouble) @@ -4398,9 +4398,6 @@ cdef class Generator: char* x_ptr char* buf_ptr - if isinstance(x, memoryview): - x = np.asarray(x) - axis = normalize_axis_index(axis, np.ndim(x)) if type(x) is np.ndarray and x.ndim == 1 and x.size: @@ -4443,7 +4440,7 @@ cdef class Generator: x[i] = buf else: # Untyped path. - if not isinstance(x, MutableSequence): + if not isinstance(x, Sequence): # See gh-18206. We may decide to deprecate here in the future. warnings.warn( "`x` isn't a recognized object; `shuffle` is not guaranteed " diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index daab2c6f10fc..df8d7e380275 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -2,7 +2,7 @@ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 import operator import warnings -from collections.abc import MutableSequence +from collections.abc import Sequence import numpy as np @@ -4436,9 +4436,6 @@ cdef class RandomState: char* x_ptr char* buf_ptr - if isinstance(x, memoryview): - x = np.asarray(x) - if type(x) is np.ndarray and x.ndim == 1 and x.size: # Fast, statically typed path: shuffle the underlying buffer. # Only for non-empty, 1d objects of class ndarray (subclasses such @@ -4476,7 +4473,7 @@ cdef class RandomState: x[i] = buf else: # Untyped path. - if not isinstance(x, MutableSequence): + if not isinstance(x, Sequence): # See gh-18206. We may decide to deprecate here in the future. warnings.warn( "`x` isn't a recognized object; `shuffle` is not guaranteed " From 2d58affcab882a00f02a9affec6ce353d0d5c5b5 Mon Sep 17 00:00:00 2001 From: Abhay Raghuvanshi Date: Tue, 2 Feb 2021 22:30:15 +0530 Subject: [PATCH 0471/1270] Fixed exception cause in cov_template.py --- numpy/distutils/conv_template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py index e46db066302f..65efab0625e0 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/distutils/conv_template.py @@ -218,7 +218,7 @@ def replace(match): val = env[name] except KeyError: msg = 'line %d: no definition of key "%s"'%(line, name) - raise ValueError(msg) + raise ValueError(msg) from None return val code = [lineno] From 45c414cc6b63f94b3bbb966ca59d051a4579b3d9 Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Tue, 2 Feb 2021 13:39:09 -0500 Subject: [PATCH 0472/1270] MAINT: Fix typo in PyArray_RegisterDataType error --- numpy/core/src/multiarray/usertypes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c index a1ed46f13a60..15d46800c471 100644 --- a/numpy/core/src/multiarray/usertypes.c +++ b/numpy/core/src/multiarray/usertypes.c @@ -235,7 +235,7 @@ PyArray_RegisterDataType(PyArray_Descr *descr) !PyDict_CheckExact(descr->fields)) { PyErr_Format(PyExc_ValueError, "Failed to register dtype for %S: Legacy user dtypes " - "using `NPY_ITEM_IS_POINTER` or `NPY_ITEM_REFCOUNT` are" + "using `NPY_ITEM_IS_POINTER` or `NPY_ITEM_REFCOUNT` are " "unsupported. It is possible to create such a dtype only " "if it is a structured dtype with names and fields " "hardcoded at registration time.\n" From b3aeb8391f3cfd0a10292780b481d0bd1539a0c4 Mon Sep 17 00:00:00 2001 From: Mary Date: Tue, 2 Feb 2021 13:55:23 -0800 Subject: [PATCH 0473/1270] DOC: Corrected numpy.power example. See #18256 --- doc/source/user/basics.types.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 64b6dcf50412..a9e210c3a323 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -270,7 +270,7 @@ Overflow Errors The fixed size of NumPy numeric types may cause overflow errors when a value requires more memory than available in the data type. For example, -`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers, +`numpy.power` evaluates ``100 ** 8`` correctly for 64-bit integers, but gives 1874919424 (incorrect) for a 32-bit integer. >>> np.power(100, 8, dtype=np.int64) From 8fe747ed0911aefa12d6f6d00573953239e03a91 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 2 Feb 2021 16:50:10 -0600 Subject: [PATCH 0474/1270] MAINT: Add variable length field at end of struct. This (unused) field should make absolutely sure that what follows will be intp aligned, so it might be good (although I have no real doubts that it is already). --- numpy/core/src/multiarray/nditer_impl.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h index 993eae835b13..bcc3ab7b98b3 100644 --- a/numpy/core/src/multiarray/nditer_impl.h +++ b/numpy/core/src/multiarray/nditer_impl.h @@ -239,6 +239,8 @@ struct _transferdata { struct NpyIter_TransferInfo_tag { struct _transferdata read; struct _transferdata write; + /* Probably unnecessary, but make sure what follows is intp aligned: */ + npy_intp _unused_ensure_alignment[]; }; struct NpyIter_BufferData_tag { @@ -263,7 +265,6 @@ struct NpyIter_BufferData_tag { (&(bufferdata)->bd_flexdata + 3*(nop))) #define NBF_BUFFERS(bufferdata) ((char **) \ (&(bufferdata)->bd_flexdata + 4*(nop))) -/* NOTE: We assume that transferdata is a multiple of npy_intp in size. */ #define NBF_TRANSFERINFO(bufferdata) ((NpyIter_TransferInfo *) \ (&(bufferdata)->bd_flexdata + 5*(nop))) From d80593148f56f1880b83c33eac9fe361fccab5a0 Mon Sep 17 00:00:00 2001 From: Abhay Raghuvanshi Date: Wed, 3 Feb 2021 08:02:01 +0530 Subject: [PATCH 0475/1270] using logo from branding/logo/primary --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9271a5d28872..9207d00c5e97 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# NumPy +# NumPy [![Travis CI](https://img.shields.io/travis/com/numpy/numpy/master?label=Travis%20CI)]( https://travis-ci.com/github/numpy/numpy) From dabf1faf9f5fe5c047c41d54662d828174f89dd4 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 3 Feb 2021 17:01:30 +0200 Subject: [PATCH 0476/1270] BLD: rearrange Azure build jobs --- azure-pipelines.yml | 60 ++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b56d596a594c..71728734cdd7 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -18,6 +18,35 @@ stages: - stage: InitialTests jobs: + # Native build is based on gcc flag `-march=native` + - job: Linux_baseline_native + pool: + vmImage: 'ubuntu-20.04' + steps: + - script: | + if ! `gcc 2>/dev/null`; then + sudo apt install gcc + fi + sudo apt install python3 + sudo apt install python3-dev + # python3 has no setuptools, so install one to get us going + python3 -m pip install --user --upgrade pip 'setuptools<49.2.0' + python3 -m pip install --user -r test_requirements.txt + displayName: 'install python/requirements' + - script: | + python3 runtests.py --show-build-log --cpu-baseline=native --cpu-dispatch=none \ + --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml + displayName: 'Run native baseline Build / Tests' + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true + testRunTitle: 'Publish test results for baseline/native' + +- stage: ComprehensiveTests + jobs: + - job: WindowsFast pool: @@ -32,11 +61,6 @@ stages: steps: - template: azure-steps-windows.yml - -- stage: ComprehensiveTests - jobs: - - - job: Linux_Python_38_32bit_full_with_asserts pool: vmImage: 'ubuntu-18.04' @@ -263,28 +287,4 @@ stages: failTaskOnFailedTests: true testRunTitle: 'Publish test results for gcc 4.8' - # Native build is based on gcc flag `-march=native` - - job: Linux_baseline_native - pool: - vmImage: 'ubuntu-20.04' - steps: - - script: | - if ! `gcc 2>/dev/null`; then - sudo apt install gcc - fi - sudo apt install python3 - sudo apt install python3-dev - # python3 has no setuptools, so install one to get us going - python3 -m pip install --user --upgrade pip 'setuptools<49.2.0' - python3 -m pip install --user -r test_requirements.txt - displayName: 'install python/requirements' - - script: | - python3 runtests.py --show-build-log --cpu-baseline=native --cpu-dispatch=none \ - --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml - displayName: 'Run native baseline Build / Tests' - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for baseline/native' + From d7c8f3dfab63852870d94c43b7b6d667700710a8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 3 Feb 2021 14:09:52 -0600 Subject: [PATCH 0477/1270] TST: Add test for the nditer debug_print --- numpy/core/tests/test_nditer.py | 85 +++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 94f61baca12f..536f812ee616 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1,6 +1,8 @@ import sys import pytest +import subprocess, textwrap, re + import numpy as np import numpy.core._multiarray_tests as _multiarray_tests from numpy import array, arange, nditer, all @@ -3014,3 +3016,86 @@ def test_partial_iteration_error(in_dtype, buf_dtype): it.iternext() assert count == sys.getrefcount(value) + + +def test_debug_print(): + """ + Matches the expected output of a debug print with the actual output. + Note that the iterator dump should not be considered stable API, + this test is mainly to ensure the print does not crash. + + Currently uses a subprocess to avoid dealing with the C level `printf`s. + """ + # the expected output with all addresses and sizes stripped (they vary + # and/or are platform dependend). + expected = textwrap.dedent(""" + ------ BEGIN ITERATOR DUMP ------ + | Iterator Address: + | ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS + | NDim: 2 + | NOp: 2 + | IterSize: 50 + | IterStart: 0 + | IterEnd: 50 + | IterIndex: 0 + | Iterator SizeOf: + | BufferData SizeOf: + | AxisData SizeOf: + | + | Perm: 0 1 + | DTypes: + | DTypes: dtype('float64') dtype('int32') + | InitDataPtrs: + | BaseOffsets: 0 0 + | Operands: + | Operand DTypes: dtype('int64') dtype('float64') + | OpItFlags: + | Flags[0]: READ CAST ALIGNED + | Flags[1]: READ WRITE CAST ALIGNED REDUCE + | + | BufferData: + | BufferSize: 50 + | Size: 5 + | BufIterEnd: 5 + | REDUCE Pos: 0 + | REDUCE OuterSize: 10 + | REDUCE OuterDim: 1 + | Strides: 8 4 + | Ptrs: + | REDUCE Outer Strides: 40 0 + | REDUCE Outer Ptrs: + | ReadTransferFn: + | ReadTransferData: (nil) (nil) + | WriteTransferFn: (nil) + | WriteTransferData: (nil) (nil) + | Buffers: + | + | AxisData[0]: + | Shape: 5 + | Index: 0 + | Strides: 16 8 + | Ptrs: + | AxisData[1]: + | Shape: 10 + | Index: 0 + | Strides: 80 0 + | Ptrs: + ------- END ITERATOR DUMP ------- + """).strip() + + code = textwrap.dedent(""" + import numpy as np + arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2] + arr2 = np.arange(5.) + it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe", + flags=["reduce_ok", "buffered"], + op_flags=[["readonly"], ["readwrite"]]) + it.debug_print() + """) + res = subprocess.check_output([sys.executable, "-c", code], text=True) + res = res.strip() + + for res_line, expected_line in zip(res.splitlines(), expected.splitlines()): + # The actual output may have additional pointers listed that are + # stripped from the example output: + assert res_line.startswith(expected_line) From 64a2c69f234daf66eb1f7a2ae3afdaea1eff91ea Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 3 Feb 2021 16:56:46 -0600 Subject: [PATCH 0478/1270] BUG: Add further tests and fix existing npyiter cleanup bugs --- numpy/core/src/multiarray/nditer_pywrap.c | 10 ++++-- numpy/core/tests/test_nditer.py | 43 ++++++++++++++++++++--- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index 8839d1be717d..e0ec9d5cff87 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -1134,6 +1134,9 @@ static void npyiter_dealloc(NewNpyArrayIterObject *self) { if (self->iter) { + /* Store error, so that WriteUnraisable cannot clear an existing one */ + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); if (npyiter_has_writeback(self->iter)) { if (PyErr_WarnEx(PyExc_RuntimeWarning, "Temporary data has not been written back to one of the " @@ -1152,10 +1155,13 @@ npyiter_dealloc(NewNpyArrayIterObject *self) } } } - NpyIter_Deallocate(self->iter); + if (!NpyIter_Deallocate(self->iter)) { + PyErr_WriteUnraisable(Py_None); + } self->iter = NULL; Py_XDECREF(self->nested_child); self->nested_child = NULL; + PyErr_Restore(exc, val, tb); } Py_TYPE(self)->tp_free((PyObject*)self); } @@ -2320,7 +2326,7 @@ npyiter_close(NewNpyArrayIterObject *self) self->iter = NULL; Py_XDECREF(self->nested_child); self->nested_child = NULL; - if (ret < 0) { + if (ret != NPY_SUCCEED) { return NULL; } Py_RETURN_NONE; diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 536f812ee616..600d0f98bcf8 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1963,10 +1963,45 @@ def test_iter_buffered_cast_structured_type(): sdt2 = [('b', 'O'), ('a', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - assert_raises(ValueError, lambda : ( - nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2))) + for intent in ["readwrite", "readonly", "writeonly"]: + # If the following assert fails, the place where the error is raised + # within nditer may change. That is fine, but it may make sense for + # a new (hard to design) test to replace it: + assert np.can_cast(a.dtype, sdt2, casting="unsafe") + simple_arr = np.array([1, 2], dtype="i") # succeeds but needs clean up + with pytest.raises(ValueError): + nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent], + casting='unsafe', op_dtypes=["f", sdt2]) + + +def test_buffered_cast_error_paths(): + with pytest.raises(ValueError): + # The input is cast into an `S3` buffer + np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"], + casting="unsafe", flags=["buffered"]) + + # The `M8[ns]` is cast into the `S3` output + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + with pytest.raises(ValueError): + with it: + buf = next(it) + buf[...] = "a" # cannot be converted to int. + + # The following gives an unraisable error, there are probably better + # ways to test that: + code = textwrap.dedent(""" + import numpy as np + + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + buf = next(it) + buf[...] = "a" + del buf, it # Flushing only happens during deallocate right now. + """) + res = subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + assert "ValueError" in res def test_iter_buffered_cast_subarray(): From 5cbe94b4c21da1e6c57a5f5ab49d02f4ff89e7ed Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 3 Feb 2021 18:20:49 -0600 Subject: [PATCH 0479/1270] TST,MAINT: Avoid subprocess for the debugprint test --- numpy/core/tests/test_nditer.py | 40 ++++++++++++++++----------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 600d0f98bcf8..012957b5d0e4 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1,7 +1,7 @@ import sys import pytest -import subprocess, textwrap, re +import textwrap, subprocess import numpy as np import numpy.core._multiarray_tests as _multiarray_tests @@ -1988,8 +1988,8 @@ def test_buffered_cast_error_paths(): buf = next(it) buf[...] = "a" # cannot be converted to int. - # The following gives an unraisable error, there are probably better - # ways to test that: + # The following gives an unraisable error. Pytest sometimes captures that + # (depending on version). So this can probably be cleaned out in the future: code = textwrap.dedent(""" import numpy as np @@ -3053,7 +3053,7 @@ def test_partial_iteration_error(in_dtype, buf_dtype): assert count == sys.getrefcount(value) -def test_debug_print(): +def test_debug_print(capfd): """ Matches the expected output of a debug print with the actual output. Note that the iterator dump should not be considered stable API, @@ -3063,7 +3063,7 @@ def test_debug_print(): """ # the expected output with all addresses and sizes stripped (they vary # and/or are platform dependend). - expected = textwrap.dedent(""" + expected = """ ------ BEGIN ITERATOR DUMP ------ | Iterator Address: | ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS @@ -3116,21 +3116,19 @@ def test_debug_print(): | Strides: 80 0 | Ptrs: ------- END ITERATOR DUMP ------- - """).strip() - - code = textwrap.dedent(""" - import numpy as np - arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2] - arr2 = np.arange(5.) - it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe", - flags=["reduce_ok", "buffered"], - op_flags=[["readonly"], ["readwrite"]]) - it.debug_print() - """) - res = subprocess.check_output([sys.executable, "-c", code], text=True) - res = res.strip() - - for res_line, expected_line in zip(res.splitlines(), expected.splitlines()): + """.strip().splitlines() + + arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2] + arr2 = np.arange(5.) + it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe", + flags=["reduce_ok", "buffered"], + op_flags=[["readonly"], ["readwrite"]]) + it.debug_print() + res = capfd.readouterr().out + res = res.strip().splitlines() + + assert len(res) == len(expected) + for res_line, expected_line in zip(res, expected): # The actual output may have additional pointers listed that are # stripped from the example output: - assert res_line.startswith(expected_line) + assert res_line.startswith(expected_line.strip()) From 1053f9b6b990321ab2458f4b5c52b97eebe5355d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 3 Feb 2021 19:14:35 -0600 Subject: [PATCH 0480/1270] TST: Clear out final (nil) pointers On windows they are printed as zeros, it would have been nice to keep it, but isn't worth the trouble --- numpy/core/tests/test_nditer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 012957b5d0e4..c884f46906d4 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -3100,9 +3100,9 @@ def test_debug_print(capfd): | REDUCE Outer Strides: 40 0 | REDUCE Outer Ptrs: | ReadTransferFn: - | ReadTransferData: (nil) (nil) - | WriteTransferFn: (nil) - | WriteTransferData: (nil) (nil) + | ReadTransferData: + | WriteTransferFn: + | WriteTransferData: | Buffers: | | AxisData[0]: From 5bc12cd3243c805b05f0bb8d4e2ccf512bca1d38 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 3 Feb 2021 19:24:08 -0600 Subject: [PATCH 0481/1270] TST: Split out unraisable test, add comment and skip on pypy --- numpy/core/tests/test_nditer.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index c884f46906d4..708241542f0a 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1,7 +1,8 @@ import sys import pytest -import textwrap, subprocess +import textwrap +import subprocess import numpy as np import numpy.core._multiarray_tests as _multiarray_tests @@ -1988,17 +1989,21 @@ def test_buffered_cast_error_paths(): buf = next(it) buf[...] = "a" # cannot be converted to int. +@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") +def test_buffered_cast_error_paths_unraisable(): # The following gives an unraisable error. Pytest sometimes captures that - # (depending on version). So this can probably be cleaned out in the future: + # (depending python and/or pytest version). So with Python>=3.8 this can + # probably be cleaned out in the future to check for + # pytest.PytestUnraisableExceptionWarning: code = textwrap.dedent(""" - import numpy as np - - it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], - op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) - buf = next(it) - buf[...] = "a" - del buf, it # Flushing only happens during deallocate right now. - """) + import numpy as np + + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + buf = next(it) + buf[...] = "a" + del buf, it # Flushing only happens during deallocate right now. + """) res = subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.STDOUT, text=True) assert "ValueError" in res From 33953d21e735f9e31558a7c848970524ea9e2137 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 3 Feb 2021 20:04:42 -0600 Subject: [PATCH 0482/1270] TST: Split out failure test and ensure it requires cleanup Adding the array is not sufficient to hit all code lines (although most), because auxdata is NULL for most inputs. --- numpy/core/tests/test_nditer.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 708241542f0a..4271d2d96f36 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1958,6 +1958,8 @@ def test_iter_buffered_cast_structured_type(): [np.array((1, 2, 3), dtype=sdt2), np.array((4, 5, 6), dtype=sdt2)]) + +def test_iter_buffered_cast_structured_type_failure_with_cleanup(): # make sure struct type -> struct type with different # number of fields fails sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] @@ -1967,12 +1969,13 @@ def test_iter_buffered_cast_structured_type(): for intent in ["readwrite", "readonly", "writeonly"]: # If the following assert fails, the place where the error is raised # within nditer may change. That is fine, but it may make sense for - # a new (hard to design) test to replace it: + # a new (hard to design) test to replace it. The `simple_arr` is + # designed to require a multi-step cast (due to having fields). assert np.can_cast(a.dtype, sdt2, casting="unsafe") - simple_arr = np.array([1, 2], dtype="i") # succeeds but needs clean up + simple_arr = np.array([1, 2], dtype="i,i") # requires clean up with pytest.raises(ValueError): nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent], - casting='unsafe', op_dtypes=["f", sdt2]) + casting='unsafe', op_dtypes=["f,f", sdt2]) def test_buffered_cast_error_paths(): From d3763198673ffc1092539041b8bd23134ae22bee Mon Sep 17 00:00:00 2001 From: Nicholas McKibben Date: Thu, 4 Feb 2021 12:13:39 -0700 Subject: [PATCH 0483/1270] BUG: don't mutate list of fake libraries while iterating over it (#18295) * BUG: don't mutate list of fake libraries while iterating over it * BUG: iterate over copy of list * TST: add build test for build_ext fix (#1) * TST: add build test for build_ext fix * TST: clearer test name * STY: use triple quotes instead of lists of strings * FIX: check for f77 compiler before test is run * DOC: add comment explaining that a list copy is necessary --- numpy/distutils/command/build_ext.py | 7 ++- numpy/distutils/tests/test_build_ext.py | 72 +++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 numpy/distutils/tests/test_build_ext.py diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 448f7941cd93..99c6be873683 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -569,8 +569,11 @@ def _process_unlinkable_fobjects(self, objects, libraries, objects = list(objects) unlinkable_fobjects = list(unlinkable_fobjects) - # Expand possible fake static libraries to objects - for lib in libraries: + # Expand possible fake static libraries to objects; + # make sure to iterate over a copy of the list as + # "fake" libraries will be removed as they are + # enountered + for lib in libraries[:]: for libdir in library_dirs: fake_lib = os.path.join(libdir, lib + '.fobjects') if os.path.isfile(fake_lib): diff --git a/numpy/distutils/tests/test_build_ext.py b/numpy/distutils/tests/test_build_ext.py new file mode 100644 index 000000000000..c007159f520e --- /dev/null +++ b/numpy/distutils/tests/test_build_ext.py @@ -0,0 +1,72 @@ +'''Tests for numpy.distutils.build_ext.''' + +import os +import subprocess +import sys +from textwrap import indent, dedent +import pytest + +@pytest.mark.slow +def test_multi_fortran_libs_link(tmp_path): + ''' + Ensures multiple "fake" static libraries are correctly linked. + see gh-18295 + ''' + + # We need to make sure we actually have an f77 compiler. + # This is nontrivial, so we'll borrow the utilities + # from f2py tests: + from numpy.f2py.tests.util import has_f77_compiler + if not has_f77_compiler(): + pytest.skip('No F77 compiler found') + + # make some dummy sources + with open(tmp_path / '_dummy1.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_one() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy2.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_two() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy.c', 'w') as fid: + # doesn't need to load - just needs to exist + fid.write('int PyInit_dummyext;') + + # make a setup file + with open(tmp_path / 'setup.py', 'w') as fid: + srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') + fid.write(dedent(f'''\ + def configuration(parent_package="", top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration("", parent_package, top_path) + config.add_library("dummy1", sources=["_dummy1.f"]) + config.add_library("dummy2", sources=["_dummy2.f"]) + config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) + return config + + + if __name__ == "__main__": + import sys + sys.path.insert(0, r"{srctree}") + from numpy.distutils.core import setup + setup(**configuration(top_path="").todict())''')) + + # build the test extensino and "install" into a temporary directory + build_dir = tmp_path + subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', + '--prefix', str(tmp_path / 'installdir'), + '--record', str(tmp_path / 'tmp_install_log.txt'), + ], + cwd=str(build_dir), + ) + # get the path to the so + so = None + with open(tmp_path /'tmp_install_log.txt') as fid: + for line in fid: + if 'dummyext' in line: + so = line.strip() + break + assert so is not None From bcb168a56d1c47b877568ce51ce90a1ced89f007 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 4 Feb 2021 14:16:52 -0600 Subject: [PATCH 0484/1270] BUG: Allow unmodified use of isclose, allclose, etc. with timedelta Disallowing timedelta64+float promotion (to timedelta64) in all cases (previously it was assymetric and "half allowed") meant that isclose, allclose, np.ma.allclose, and assert_arrays_almost_equal (which uses isclose), would stop work for timedelta64. Hardcoding that timedelta64 is passed on unmodified retains the old behaviour. It may make sense to deprecate or change this behaviour in the future, but for the 1.20 release, the behaviour should be as much unmodified as possible. Closes gh-18286 --- numpy/core/numeric.py | 9 +++++++-- numpy/core/tests/test_numeric.py | 9 +++++++++ numpy/ma/core.py | 11 ++++++++--- numpy/ma/tests/test_core.py | 7 +++++++ numpy/testing/tests/test_utils.py | 5 +++++ 5 files changed, 36 insertions(+), 5 deletions(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 086439656816..89f56fa0995f 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2350,8 +2350,13 @@ def within_tol(x, y, atol, rtol): # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). # This will cause casting of x later. Also, make sure to allow subclasses # (e.g., for numpy.ma). - dt = multiarray.result_type(y, 1.) - y = array(y, dtype=dt, copy=False, subok=True) + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dt = multiarray.result_type(y, 1.) + y = array(y, dtype=dt, copy=False, subok=True) xfin = isfinite(x) yfin = isfinite(y) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index f8b388b6f99a..a697e5faf4ba 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2578,6 +2578,15 @@ def test_non_finite_scalar(self): assert_(np.isclose(0, np.inf) is np.False_) assert_(type(np.isclose(0, np.inf)) is np.bool_) + def test_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert np.isclose(a, a, atol=0, equal_nan=True).all() + assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() + assert np.allclose(a, a, atol=0, equal_nan=True) + assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + class TestStdVar: def setup(self): diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 38a0a8b50ddc..cda2eeb343f2 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7869,9 +7869,14 @@ def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. - dtype = np.result_type(y, 1.) - if y.dtype != dtype: - y = masked_array(y, dtype=dtype, copy=False) + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) m = mask_or(getmask(x), getmask(y)) xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 0b2e7303cb72..f4078062521f 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -3037,6 +3037,13 @@ def test_allclose(self): a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) assert_(allclose(a, a)) + def test_allclose_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, 4]], dtype="m8[ns]") + assert allclose(a, a, atol=0) + assert allclose(a, a, atol=np.timedelta64(1, "ns")) + def test_allany(self): # Checks the any/all methods/functions. x = np.array([[0.13, 0.26, 0.90], diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index c3b9e04b693f..261ed9705947 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -904,6 +904,11 @@ def test_report_max_relative_error(self): msg = str(exc_info.value) assert_('Max relative difference: 0.5' in msg) + def test_timedelta(self): + # see gh-18286 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) + class TestArrayAlmostEqualNulp: From 8f6a5d81b892165b1ca959b43a4871e9f8a14a4c Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Thu, 4 Feb 2021 14:54:19 -0600 Subject: [PATCH 0485/1270] Use platform name in openblas_support and add macosx-arm64 support instead of mixing OS name and architecture into arch variable --- tools/openblas_support.py | 114 +++++++++++++++++++++----------------- 1 file changed, 63 insertions(+), 51 deletions(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 1300795bb9a1..89ecbff70ab8 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -2,6 +2,7 @@ import hashlib import os import platform +import sysconfig import sys import shutil import tarfile @@ -16,24 +17,33 @@ OPENBLAS_LONG = 'v0.3.13-62-gaf2b0d02' BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' -ARCHITECTURES = ['', 'windows', 'darwin', 'aarch64', 'x86_64', - 'i686', 'ppc64le', 's390x'] +ARCHITECTURES = ['arm64', 'aarch64', 'x86_64', 'i686', 'ppc64le', 's390x'] +SUPPORTED_PLATFORMS = [ + 'linux-aarch64', + 'linux-x86_64', + 'linux-i686', + 'linux-ppc64le', + 'linux-s390x', + 'win-amd64', + 'win-32', + 'macosx-x86_64', + 'macosx-arm64', +] IS_32BIT = sys.maxsize < 2**32 -def get_arch(): - if platform.system() == 'Windows': - ret = 'windows' - elif platform.system() == 'Darwin': - ret = 'darwin' - else: - ret = platform.uname().machine - # What do 32 bit machines report? - # If they are a docker, they can report x86_64 - if 'x86' in ret and IS_32BIT: - ret = 'i686' - assert ret in ARCHITECTURES, f'invalid architecture {ret}' - return ret +def get_plat(): + plat = sysconfig.get_platform() + plat_split = plat.split("-") + arch = plat_split[-1] + if arch == "win32": + plat = "win-32" + elif arch in ["universal2", "intel"]: + plat = f"macosx-{platform.uname().machine}" + elif len(plat_split) > 2: + plat = f"{plat_split[0]}-{arch}" + assert plat in SUPPORTED_PLATFORMS, f'invalid platform {plat}' + return plat def get_ilp64(): @@ -55,30 +65,34 @@ def get_manylinux(arch): return ret -def download_openblas(target, arch, ilp64, is_32bit): - ml_ver = get_manylinux(arch) +def download_openblas(target, plat, ilp64): + osname, arch = plat.split("-") fnsuffix = {None: "", "64_": "64_"}[ilp64] filename = '' headers = {'User-Agent': ('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 ; ' '(KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3')} - if arch in ('aarch64', 'ppc64le', 's390x', 'x86_64', 'i686'): + suffix = None + if osname == "linux": + ml_ver = get_manylinux(arch) suffix = f'manylinux{ml_ver}_{arch}.tar.gz' - filename = f'{BASEURL}/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}' typ = 'tar.gz' - elif arch == 'darwin': + elif plat == 'macosx-x86_64': suffix = 'macosx_10_9_x86_64-gf_1becaaa.tar.gz' - filename = f'{BASEURL}/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}' typ = 'tar.gz' - elif arch == 'windows': - if is_32bit: + elif plat == 'macosx-arm64': + suffix = 'macosx_11_0_arm64-gf_f10e307.tar.gz' + typ = 'tar.gz' + elif osname == 'win': + if plat == "win-32": suffix = 'win32-gcc_8_1_0.zip' else: suffix = 'win_amd64-gcc_8_1_0.zip' - filename = f'{BASEURL}/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}' typ = 'zip' - if not filename: + + if not suffix: return None + filename = f'{BASEURL}/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}' req = Request(url=filename, headers=headers) try: response = urlopen(req) @@ -99,7 +113,7 @@ def download_openblas(target, arch, ilp64, is_32bit): return typ -def setup_openblas(arch=get_arch(), ilp64=get_ilp64(), is_32bit=IS_32BIT): +def setup_openblas(plat=get_plat(), ilp64=get_ilp64()): ''' Download and setup an openblas library for building. If successful, the configuration script will find it automatically. @@ -111,12 +125,13 @@ def setup_openblas(arch=get_arch(), ilp64=get_ilp64(), is_32bit=IS_32BIT): To determine success, do ``os.path.exists(msg)`` ''' _, tmp = mkstemp() - if not arch: - raise ValueError('unknown architecture') - typ = download_openblas(tmp, arch, ilp64, is_32bit) + if not plat: + raise ValueError('unknown platform') + typ = download_openblas(tmp, plat, ilp64) if not typ: return '' - if arch == 'windows': + osname, arch = plat.split("-") + if osname == 'win': if not typ == 'zip': return f'expecting to download zipfile on windows, not {typ}' return unpack_windows_zip(tmp) @@ -216,23 +231,22 @@ def make_init(dirname): """)) -def test_setup(arches): +def test_setup(plats): ''' Make sure all the downloadable files exist and can be opened ''' def items(): - """ yields all combinations of arch, ilp64, is_32bit + """ yields all combinations of arch, ilp64 """ - for arch in arches: - yield arch, None, False - if arch not in ('i686',): - yield arch, '64_', False - if arch in ('windows',): - yield arch, None, True - if arch in ('i686', 'x86_64'): + for plat in plats: + yield plat, None + osname, arch = plat.split("-") + if arch not in ('i686', 'arm64', '32'): + yield plat, '64_' + if osname == "linux" and arch in ('i686', 'x86_64'): oldval = os.environ.get('MB_ML_VER', None) os.environ['MB_ML_VER'] = '1' - yield arch, None, False + yield plat, None # Once we create x86_64 and i686 manylinux2014 wheels... # os.environ['MB_ML_VER'] = '2014' # yield arch, None, False @@ -242,25 +256,23 @@ def items(): os.environ.pop('MB_ML_VER') errs = [] - for arch, ilp64, is_32bit in items(): - if arch == '': - continue - if arch not in arches: + for plat, ilp64 in items(): + osname, _ = plat.split("-") + if plat not in plats: continue target = None try: try: - target = setup_openblas(arch, ilp64, is_32bit) + target = setup_openblas(plat, ilp64) except Exception as e: - print(f'Could not setup {arch} with ilp64 {ilp64}, ' - f'32bit {is_32bit}:') + print(f'Could not setup {plat} with ilp64 {ilp64}, ') print(e) errs.append(e) continue if not target: - raise RuntimeError(f'Could not setup {arch}') + raise RuntimeError(f'Could not setup {plat}') print(target) - if arch == 'windows': + if osname == 'win': if not target.endswith('.a'): raise RuntimeError("Not .a extracted!") else: @@ -311,7 +323,7 @@ def test_version(expected_version, ilp64=get_ilp64()): 'architecture') parser.add_argument('--test', nargs='*', default=None, help='Test different architectures. "all", or any of ' - f'{ARCHITECTURES}') + f'{SUPPORTED_PLATFORMS}') parser.add_argument('--check_version', nargs='?', default='', help='Check provided OpenBLAS version string ' 'against available OpenBLAS') @@ -322,6 +334,6 @@ def test_version(expected_version, ilp64=get_ilp64()): print(setup_openblas()) else: if len(args.test) == 0 or 'all' in args.test: - test_setup(ARCHITECTURES) + test_setup(SUPPORTED_PLATFORMS) else: test_setup(args.test) From 4dbfc815d468bbf17207083932a11a6e29579a8d Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Thu, 4 Feb 2021 15:17:29 -0600 Subject: [PATCH 0486/1270] Remove unused ARCHITECTURES variable --- tools/openblas_support.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 89ecbff70ab8..4a691aba0b46 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -17,7 +17,6 @@ OPENBLAS_LONG = 'v0.3.13-62-gaf2b0d02' BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' -ARCHITECTURES = ['arm64', 'aarch64', 'x86_64', 'i686', 'ppc64le', 's390x'] SUPPORTED_PLATFORMS = [ 'linux-aarch64', 'linux-x86_64', From b3cb7752a456f18eb559f759cba35dd6cb82eb8e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 4 Feb 2021 22:42:40 +0100 Subject: [PATCH 0487/1270] CI: fix when GitHub Actions builds trigger, and allow ci skips Similar to what SciPy does. Right now, it triggers even on pushes to branches on forks, which is useless and generates lots of notifications on failures and wasted resources if no failures. --- .github/workflows/build_test.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index ce0c492e6f98..1363d93276db 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -1,6 +1,14 @@ name: Build_Test -on: [push, pull_request] +on: + push: + branches: + - master + - maintenance/** + pull_request: + branches: + - master + - maintenance/** defaults: run: @@ -12,6 +20,7 @@ env: jobs: smoke_test: + if: "github.repository == 'numpy/numpy' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 From b765975ca89457fea91a63b94075c9e66465e2ea Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 4 Feb 2021 15:24:39 -0600 Subject: [PATCH 0488/1270] BUG: Allow pickling all relevant DType types/classes Introducing the metaclass without a canonical top-level name broke pickling of `type(np.dtype(...))` (which was always just `np.dtype`). While a better solution will likely be possible by making the DTypes HeapTypes and this solution may not work for all imaginable cases (i.e. it is plausible for a dtype to not have a scalar type associated), using a `copyreg` registration for the metaclass surprisingly works without any issues and seems like the simplest solution right now. Closes gh-16692, gh-18325 --- numpy/core/__init__.py | 22 +++++++++++++++++++++- numpy/core/tests/test_dtype.py | 16 +++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index f22c86f59c4a..dad9293e1a19 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -125,6 +125,7 @@ def _ufunc_reconstruct(module, name): mod = __import__(module, fromlist=[name]) return getattr(mod, name) + def _ufunc_reduce(func): # Report the `__name__`. pickle will try to find the module. Note that # pickle supports for this `__name__` to be a `__qualname__`. It may @@ -134,12 +135,31 @@ def _ufunc_reduce(func): return func.__name__ +def _DType_reconstruct(scalar_type): + # This is a work-around to pickle type(np.dtype(np.float64)), etc. + # and it should eventually be replaced with a better solution, e.g. when + # DTypes become HeapTypes. + return type(dtype(scalar_type)) + + +def _DType_reduce(DType): + # To pickle a DType without having to add top-level names, pickle the + # scalar type for now (and assume that reconstruction will be possible). + if DType is dtype: + return "dtype" # must pickle `np.dtype` as a singleton. + scalar_type = DType.type # pickle the scalar type for reconstruction + return _DType_reconstruct, (scalar_type,) + + import copyreg copyreg.pickle(ufunc, _ufunc_reduce) -# Unclutter namespace (must keep _ufunc_reconstruct for unpickling) +copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct) + +# Unclutter namespace (must keep _*_reconstruct for unpickling) del copyreg del _ufunc_reduce +del _DType_reduce from numpy._pytesttester import PytestTester test = PytestTester(__name__) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 03e0e172abbd..528486a05b26 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1019,7 +1019,12 @@ class TestPickling: def check_pickling(self, dtype): for proto in range(pickle.HIGHEST_PROTOCOL + 1): - pickled = pickle.loads(pickle.dumps(dtype, proto)) + buf = pickle.dumps(dtype, proto) + # The dtype pickling itself pickles `np.dtype` if it is pickled + # as a singleton `dtype` should be stored in the buffer: + assert b"_DType_reconstruct" not in buf + assert b"dtype" in buf + pickled = pickle.loads(buf) assert_equal(pickled, dtype) assert_equal(pickled.descr, dtype.descr) if dtype.metadata is not None: @@ -1075,6 +1080,15 @@ def test_metadata(self): dt = np.dtype(int, metadata={'datum': 1}) self.check_pickling(dt) + @pytest.mark.parametrize("DType", + [type(np.dtype(t)) for t in np.typecodes['All']] + + [np.dtype(rational), np.dtype]) + def test_pickle_types(self, DType): + # Check that DTypes (the classes/types) roundtrip when pickling + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + roundtrip_DType = pickle.loads(pickle.dumps(DType, proto)) + assert roundtrip_DType is DType + def test_rational_dtype(): # test for bug gh-5719 From 87c5d51a32b406a9872428b9ca1db6c5242dded6 Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Fri, 5 Feb 2021 16:57:56 +0600 Subject: [PATCH 0489/1270] Replaced manual sums with horizontal simd sums for count_nonzero_16/64 --- numpy/core/src/multiarray/item_selection.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 373286d2305f..f92327827cca 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2218,8 +2218,6 @@ count_nonzero_int16_simd(npy_int16 *d, npy_uintp unrollx) const npyv_u16 vzero = npyv_zero_u16(); npy_int16 *target = d; - npy_uint16 sums[npyv_nlanes_u16]; - while (d Date: Fri, 5 Feb 2021 13:11:10 +0200 Subject: [PATCH 0490/1270] TST: use setup-python action for pypy, disable win64 pypy --- .github/workflows/build_test.yml | 18 +++--------------- azure-pipelines.yml | 10 +++++----- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index ce0c492e6f98..c45563d25fcd 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -182,21 +182,9 @@ jobs: with: submodules: recursive fetch-depth: 0 - - name: get_pypy - run: | - wget -q https://downloads.python.org/pypy/pypy3.7-v7.3.3-linux64.tar.bz2 -O pypy.tar.bz2 - mkdir -p pypy3 - (cd pypy3; tar --strip-components=1 -xf ../pypy.tar.bz2) - pypy3/bin/pypy3 -mensurepip - pypy3/bin/pypy3 -m pip install --upgrade pip wheel - if [ ! -e pypy3/bin/python ] - then - pushd pypy3/bin - ln -s pypy3 python - popd - fi - echo $PWD/pypy3/bin >> $GITHUB_PATH - + - uses: actions/setup-python@v2 + with: + python-version: pypy-3.7 - uses: ./.github/actions sdist: diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 71728734cdd7..d55bb6cef481 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -230,11 +230,11 @@ stages: PYTHON_ARCH: 'x64' TEST_MODE: full BITS: 64 - PyPy37-64bit-full: - PYTHON_VERSION: 'PyPy3.7' - PYTHON_ARCH: 'x64' - TEST_MODE: fast - BITS: 64 + #PyPy37-64bit-full: + # PYTHON_VERSION: 'PyPy3.7' + # PYTHON_ARCH: 'x64' + # TEST_MODE: fast + # BITS: 64 Python38-32bit-fast: PYTHON_VERSION: '3.8' PYTHON_ARCH: 'x86' From 6426c476f940188241a51ec909306d21bc47227f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 25 Jan 2021 17:33:41 +0100 Subject: [PATCH 0491/1270] ENH: Add annotations for 16 ufunc-based `ndarray` magic methods --- numpy/__init__.pyi | 344 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 328 insertions(+), 16 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b29ba38da81f..5f731683dc4e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -11,6 +11,7 @@ from numpy.typing import ( ArrayLike, _ArrayND, _ArrayOrScalar, + _SupportsArray, _NestedSequence, _RecursiveSequence, _ArrayLikeBool_co, @@ -1839,22 +1840,333 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): other: _RecursiveSequence, ) -> Any: ... - def __floordiv__(self, other: ArrayLike) -> Any: ... - def __rfloordiv__(self, other: ArrayLike) -> Any: ... - def __pow__(self, other: ArrayLike) -> Any: ... - def __rpow__(self, other: ArrayLike) -> Any: ... - def __truediv__(self, other: ArrayLike) -> Any: ... - def __rtruediv__(self, other: ArrayLike) -> Any: ... - def __lshift__(self, other: ArrayLike) -> Any: ... - def __rlshift__(self, other: ArrayLike) -> Any: ... - def __rshift__(self, other: ArrayLike) -> Any: ... - def __rrshift__(self, other: ArrayLike) -> Any: ... - def __and__(self, other: ArrayLike) -> Any: ... - def __rand__(self, other: ArrayLike) -> Any: ... - def __xor__(self, other: ArrayLike) -> Any: ... - def __rxor__(self, other: ArrayLike) -> Any: ... - def __or__(self, other: ArrayLike) -> Any: ... - def __ror__(self, other: ArrayLike) -> Any: ... + @overload + def __floordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __floordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[int64]: ... + @overload + def __floordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __floordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __floordiv__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __floordiv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __floordiv__( + self: _ArrayND[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rfloordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rfloordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[int64]: ... + @overload + def __rfloordiv__(self: _ArrayND[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __rfloordiv__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rfloordiv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rfloordiv__( + self: _ArrayND[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __pow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __pow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + @overload + def __pow__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __pow__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __pow__( + self: _ArrayND[Union[bool_, number[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rpow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rpow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + @overload + def __rpow__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rpow__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rpow__( + self: _ArrayND[Union[bool_, number[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __truediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayOrScalar[float64]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[float64]: ... + @overload + def __truediv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __truediv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __truediv__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __truediv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __truediv__( + self: _ArrayND[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rtruediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayOrScalar[float64]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[float64]: ... + @overload + def __rtruediv__(self: _ArrayND[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + @overload + def __rtruediv__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rtruediv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rtruediv__( + self: _ArrayND[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __lshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __lshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __lshift__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __lshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __lshift__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rlshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rlshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __rlshift__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rlshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rlshift__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __rshift__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rshift__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rrshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rrshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __rrshift__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rrshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rrshift__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __and__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __and__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __and__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __and__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __and__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rand__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rand__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __rand__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rand__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rand__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __xor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __xor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __xor__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __xor__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __xor__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rxor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rxor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __rxor__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __rxor__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rxor__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __or__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __or__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __or__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __or__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __or__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __ror__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ror__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + @overload + def __ror__(self: _ArrayND[object_], other: Any) -> Any: ... + @overload + def __ror__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __ror__( + self: _ArrayND[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... # `np.generic` does not support inplace operations def __iadd__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... From 7bbe17fc2bf27a9087e9ef17f1514cfde16b0272 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 25 Jan 2021 17:34:39 +0100 Subject: [PATCH 0492/1270] MAINT: Clarify that `timdelta64 / bool` raises --- numpy/typing/_callable.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 1591ca144591..0e34cd79961e 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -143,6 +143,8 @@ class _TD64Div(Protocol[_NumberType_co]): @overload def __call__(self, __other: timedelta64) -> _NumberType_co: ... @overload + def __call__(self, __other: _BoolLike_co) -> NoReturn: ... + @overload def __call__(self, __other: _FloatLike_co) -> timedelta64: ... class _IntTrueDiv(Protocol[_NBit1]): From 92d6159e08d3773d52f6ea86753aef417e5fa8c3 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 25 Jan 2021 18:15:51 +0100 Subject: [PATCH 0493/1270] TST: Add typing tests for `ndarray.__floordivide__` --- numpy/typing/tests/data/fail/arithmetic.py | 37 +++++++ numpy/typing/tests/data/pass/arithmetic.py | 100 ++++++++++++++++++ numpy/typing/tests/data/reveal/arithmetic.py | 102 +++++++++++++++++++ 3 files changed, 239 insertions(+) diff --git a/numpy/typing/tests/data/fail/arithmetic.py b/numpy/typing/tests/data/fail/arithmetic.py index bad7040b945a..1ca8a7b70092 100644 --- a/numpy/typing/tests/data/fail/arithmetic.py +++ b/numpy/typing/tests/data/fail/arithmetic.py @@ -6,17 +6,25 @@ td = np.timedelta64(0, "D") AR_b: np.ndarray[Any, np.dtype[np.bool_]] +AR_u: np.ndarray[Any, np.dtype[np.uint32]] +AR_i: np.ndarray[Any, np.dtype[np.int64]] AR_f: np.ndarray[Any, np.dtype[np.float64]] AR_c: np.ndarray[Any, np.dtype[np.complex128]] AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] AR_M: np.ndarray[Any, np.dtype[np.datetime64]] +ANY: Any + AR_LIKE_b: List[bool] +AR_LIKE_u: List[np.uint32] +AR_LIKE_i: List[int] AR_LIKE_f: List[float] AR_LIKE_c: List[complex] AR_LIKE_m: List[np.timedelta64] AR_LIKE_M: List[np.datetime64] +# Array subtraction + # NOTE: mypys `NoReturn` errors are, unfortunately, not that great _1 = AR_b - AR_LIKE_b # E: Need type annotation _2 = AR_LIKE_b - AR_b # E: Need type annotation @@ -34,6 +42,35 @@ AR_m - AR_LIKE_M # E: Unsupported operand types AR_LIKE_m - AR_M # E: Unsupported operand types +# array floor division + +AR_M // AR_LIKE_b # E: Unsupported operand types +AR_M // AR_LIKE_u # E: Unsupported operand types +AR_M // AR_LIKE_i # E: Unsupported operand types +AR_M // AR_LIKE_f # E: Unsupported operand types +AR_M // AR_LIKE_c # E: Unsupported operand types +AR_M // AR_LIKE_m # E: Unsupported operand types +AR_M // AR_LIKE_M # E: Unsupported operand types + +AR_b // AR_LIKE_M # E: Unsupported operand types +AR_u // AR_LIKE_M # E: Unsupported operand types +AR_i // AR_LIKE_M # E: Unsupported operand types +AR_f // AR_LIKE_M # E: Unsupported operand types +AR_c // AR_LIKE_M # E: Unsupported operand types +AR_m // AR_LIKE_M # E: Unsupported operand types +AR_M // AR_LIKE_M # E: Unsupported operand types + +_3 = AR_m // AR_LIKE_b # E: Need type annotation +AR_m // AR_LIKE_c # E: Unsupported operand types + +AR_b // AR_LIKE_m # E: Unsupported operand types +AR_u // AR_LIKE_m # E: Unsupported operand types +AR_i // AR_LIKE_m # E: Unsupported operand types +AR_f // AR_LIKE_m # E: Unsupported operand types +AR_c // AR_LIKE_m # E: Unsupported operand types + +# Scalars + b_ - b_ # E: No overload variant dt + dt # E: Unsupported operand types diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 4840d1fab1e7..86e7a92a801d 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -36,6 +36,12 @@ def __sub__(self, value: Any) -> Object: def __rsub__(self, value: Any) -> Object: return self + def __floordiv__(self, value: Any) -> Object: + return self + + def __rfloordiv__(self, value: Any) -> Object: + return self + AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True]) AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) @@ -167,6 +173,100 @@ def __rsub__(self, value: Any) -> Object: AR_LIKE_c - AR_O AR_LIKE_O - AR_O +# Array floor division + +AR_b // AR_LIKE_b +AR_b // AR_LIKE_u +AR_b // AR_LIKE_i +AR_b // AR_LIKE_f +AR_b // AR_LIKE_c +AR_b // AR_LIKE_O + +AR_LIKE_b // AR_b +AR_LIKE_u // AR_b +AR_LIKE_i // AR_b +AR_LIKE_f // AR_b +AR_LIKE_c // AR_b +AR_LIKE_O // AR_b + +AR_u // AR_LIKE_b +AR_u // AR_LIKE_u +AR_u // AR_LIKE_i +AR_u // AR_LIKE_f +AR_u // AR_LIKE_c +AR_u // AR_LIKE_O + +AR_LIKE_b // AR_u +AR_LIKE_u // AR_u +AR_LIKE_i // AR_u +AR_LIKE_f // AR_u +AR_LIKE_c // AR_u +AR_LIKE_m // AR_u +AR_LIKE_O // AR_u + +AR_i // AR_LIKE_b +AR_i // AR_LIKE_u +AR_i // AR_LIKE_i +AR_i // AR_LIKE_f +AR_i // AR_LIKE_c +AR_i // AR_LIKE_O + +AR_LIKE_b // AR_i +AR_LIKE_u // AR_i +AR_LIKE_i // AR_i +AR_LIKE_f // AR_i +AR_LIKE_c // AR_i +AR_LIKE_m // AR_i +AR_LIKE_O // AR_i + +AR_f // AR_LIKE_b +AR_f // AR_LIKE_u +AR_f // AR_LIKE_i +AR_f // AR_LIKE_f +AR_f // AR_LIKE_c +AR_f // AR_LIKE_O + +AR_LIKE_b // AR_f +AR_LIKE_u // AR_f +AR_LIKE_i // AR_f +AR_LIKE_f // AR_f +AR_LIKE_c // AR_f +AR_LIKE_m // AR_f +AR_LIKE_O // AR_f + +AR_c // AR_LIKE_b +AR_c // AR_LIKE_u +AR_c // AR_LIKE_i +AR_c // AR_LIKE_f +AR_c // AR_LIKE_c + +AR_LIKE_b // AR_c +AR_LIKE_u // AR_c +AR_LIKE_i // AR_c +AR_LIKE_f // AR_c +AR_LIKE_c // AR_c +AR_LIKE_O // AR_c + +AR_m // AR_LIKE_u +AR_m // AR_LIKE_i +AR_m // AR_LIKE_f +AR_m // AR_LIKE_m + +AR_LIKE_m // AR_m + +AR_O // AR_LIKE_b +AR_O // AR_LIKE_u +AR_O // AR_LIKE_i +AR_O // AR_LIKE_f +AR_O // AR_LIKE_c +AR_O // AR_LIKE_O + +AR_LIKE_b // AR_O +AR_LIKE_u // AR_O +AR_LIKE_i // AR_O +AR_LIKE_f // AR_O +AR_LIKE_O // AR_O + # unary ops -c16 diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py index 1a0f595c50c0..b45330910da5 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.py @@ -159,6 +159,108 @@ reveal_type(AR_LIKE_M - AR_O) # E: Any reveal_type(AR_LIKE_O - AR_O) # E: Any +# Array floor division + +reveal_type(AR_b // AR_LIKE_b) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] +reveal_type(AR_b // AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_b // AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_b // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_b // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_b // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_b) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] +reveal_type(AR_LIKE_u // AR_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_LIKE_i // AR_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_f // AR_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c // AR_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_O // AR_b) # E: Any + +reveal_type(AR_u // AR_LIKE_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_u // AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_u // AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_u // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_u // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_u // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_LIKE_u // AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] +reveal_type(AR_LIKE_i // AR_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_f // AR_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c // AR_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_m // AR_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_O // AR_u) # E: Any + +reveal_type(AR_i // AR_LIKE_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_i // AR_LIKE_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_i // AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_i // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_i // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_i // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_u // AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_i // AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_LIKE_f // AR_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c // AR_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_m // AR_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_O // AR_i) # E: Any + +reveal_type(AR_f // AR_LIKE_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f // AR_LIKE_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f // AR_LIKE_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_f // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_f // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_u // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_i // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_f // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_LIKE_c // AR_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_m // AR_f) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_O // AR_f) # E: Any + +reveal_type(AR_c // AR_LIKE_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c // AR_LIKE_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c // AR_LIKE_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c // AR_LIKE_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_u // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_i // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_f // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_c // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_O // AR_c) # E: Any + +reveal_type(AR_m // AR_LIKE_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m // AR_LIKE_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m // AR_LIKE_f) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m // AR_LIKE_m) # E: Union[{int64}, numpy.ndarray[Any, numpy.dtype[{int64}]]] +reveal_type(AR_m // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_m // AR_m) # E: Union[{int64}, numpy.ndarray[Any, numpy.dtype[{int64}]]] +reveal_type(AR_LIKE_O // AR_m) # E: Any + +reveal_type(AR_O // AR_LIKE_b) # E: Any +reveal_type(AR_O // AR_LIKE_u) # E: Any +reveal_type(AR_O // AR_LIKE_i) # E: Any +reveal_type(AR_O // AR_LIKE_f) # E: Any +reveal_type(AR_O // AR_LIKE_c) # E: Any +reveal_type(AR_O // AR_LIKE_m) # E: Any +reveal_type(AR_O // AR_LIKE_M) # E: Any +reveal_type(AR_O // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_O) # E: Any +reveal_type(AR_LIKE_u // AR_O) # E: Any +reveal_type(AR_LIKE_i // AR_O) # E: Any +reveal_type(AR_LIKE_f // AR_O) # E: Any +reveal_type(AR_LIKE_c // AR_O) # E: Any +reveal_type(AR_LIKE_m // AR_O) # E: Any +reveal_type(AR_LIKE_M // AR_O) # E: Any +reveal_type(AR_LIKE_O // AR_O) # E: Any + # unary ops reveal_type(-c16) # E: {complex128} From b3e44cc6e5ba721670c245bc620a0bd7e2913630 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 1 Feb 2021 23:56:27 +0100 Subject: [PATCH 0494/1270] TST: Provide a more useful exception message if the `pass` tests fail --- numpy/typing/tests/test_typing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 324312a92af6..fc42e87b1397 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -89,7 +89,8 @@ def get_test_cases(directory): def test_success(path): # Alias `OUTPUT_MYPY` so that it appears in the local namespace output_mypy = OUTPUT_MYPY - assert path not in output_mypy + if path in output_mypy: + raise AssertionError("\n".join(v for v in output_mypy[path].values())) @pytest.mark.slow From 8f0aaf522293f8a72c96bc38773c4cd9f0d552c8 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 2 Feb 2021 23:22:13 +0100 Subject: [PATCH 0495/1270] STY: Remove the string-encasing of certain annotations String-encasing is redunant now that we can use `from __future__ import annotations` --- numpy/typing/mypy_plugin.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index bdd5c50f3389..813ff16d29f5 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -1,5 +1,7 @@ """A module containing `numpy`-specific plugins for mypy.""" +from __future__ import annotations + import typing as t import numpy as np @@ -39,10 +41,10 @@ def _get_precision_dict() -> t.Dict[str, str]: #: A dictionary mapping type-aliases in `numpy.typing._nbit` to #: concrete `numpy.typing.NBitBase` subclasses. -_PRECISION_DICT = _get_precision_dict() +_PRECISION_DICT: t.Final = _get_precision_dict() -def _hook(ctx: "AnalyzeTypeContext") -> "Type": +def _hook(ctx: AnalyzeTypeContext) -> Type: """Replace a type-alias with a concrete ``NBitBase`` subclass.""" typ, _, api = ctx name = typ.name.split(".")[-1] @@ -50,7 +52,7 @@ def _hook(ctx: "AnalyzeTypeContext") -> "Type": return api.named_type(name_new) -if MYPY_EX is None: +if t.TYPE_CHECKING or MYPY_EX is None: class _NumpyPlugin(Plugin): """A plugin for assigning platform-specific `numpy.number` precisions.""" @@ -64,6 +66,6 @@ def plugin(version: str) -> t.Type[_NumpyPlugin]: return _NumpyPlugin else: - def plugin(version: str) -> t.Type["_NumpyPlugin"]: + def plugin(version: str) -> t.Type[_NumpyPlugin]: """An entry-point for mypy.""" raise MYPY_EX From a1640ad416c427d397695f51011000a1d7583f22 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 3 Feb 2021 00:37:19 +0100 Subject: [PATCH 0496/1270] ENH: Add support for all potential extended-precision `np.number`s --- numpy/__init__.pyi | 18 ++++++++++--- numpy/typing/_extended_precision.py | 42 +++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 4 deletions(-) create mode 100644 numpy/typing/_extended_precision.py diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b29ba38da81f..0312dfad0b1e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -127,6 +127,20 @@ from numpy.typing._callable import ( _NumberOp, _ComparisonOp, ) +from numpy.typing._extended_precision import ( + uint128 as uint128, + uint256 as uint256, + int128 as int128, + int256 as int256, + float80 as float80, + float96 as float96, + float128 as float128, + float256 as float256, + complex160 as complex160, + complex192 as complex192, + complex256 as complex256, + complex512 as complex512, +) from typing import ( Any, @@ -2195,7 +2209,6 @@ class floating(inexact[_NBit1]): float16 = floating[_16Bit] float32 = floating[_32Bit] float64 = floating[_64Bit] -float128 = floating[_128Bit] half = floating[_NBitHalf] single = floating[_NBitSingle] @@ -2230,7 +2243,6 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): complex64 = complexfloating[_32Bit, _32Bit] complex128 = complexfloating[_64Bit, _64Bit] -complex256 = complexfloating[_128Bit, _128Bit] csingle = complexfloating[_NBitSingle, _NBitSingle] singlecomplex = complexfloating[_NBitSingle, _NBitSingle] @@ -2284,8 +2296,6 @@ class str_(character, str): unicode_ = str_ str0 = str_ -# TODO: Platform dependent types: float128, complex256, float96 - def array( object: object, dtype: DTypeLike = ..., diff --git a/numpy/typing/_extended_precision.py b/numpy/typing/_extended_precision.py new file mode 100644 index 000000000000..3f1ce2038282 --- /dev/null +++ b/numpy/typing/_extended_precision.py @@ -0,0 +1,42 @@ +"""A module with platform-specific extended precision `numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +from typing import TYPE_CHECKING + +import numpy as np +from . import ( + _80Bit, + _96Bit, + _128Bit, + _256Bit, +) + +if TYPE_CHECKING: + uint128 = np.unsignedinteger[_128Bit] + uint256 = np.unsignedinteger[_256Bit] + int128 = np.signedinteger[_128Bit] + int256 = np.signedinteger[_256Bit] + float80 = np.floating[_80Bit] + float96 = np.floating[_96Bit] + float128 = np.floating[_128Bit] + float256 = np.floating[_256Bit] + complex160 = np.complexfloating[_80Bit, _80Bit] + complex192 = np.complexfloating[_96Bit, _96Bit] + complex256 = np.complexfloating[_128Bit, _128Bit] + complex512 = np.complexfloating[_256Bit, _256Bit] +else: + uint128 = NotImplemented + uint256 = NotImplemented + int128 = NotImplemented + int256 = NotImplemented + float80 = NotImplemented + float96 = NotImplemented + float128 = NotImplemented + float256 = NotImplemented + complex160 = NotImplemented + complex192 = NotImplemented + complex256 = NotImplemented + complex512 = NotImplemented From af7106f76e3eb2ab6ada000ced512951f378b5fc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 3 Feb 2021 00:50:40 +0100 Subject: [PATCH 0497/1270] ENH: Add a plugin for exposing platform-specific extended-precision `np.number`s --- numpy/__init__.pyi | 3 ++ numpy/typing/__init__.py | 21 ++++++++----- numpy/typing/mypy_plugin.py | 60 +++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 8 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0312dfad0b1e..e5d5536b83c5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -127,6 +127,9 @@ from numpy.typing._callable import ( _NumberOp, _ComparisonOp, ) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable +# to the specific platform from numpy.typing._extended_precision import ( uint128 as uint128, uint256 as uint256, diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 8147789fb7c7..8f5df483b4af 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -22,14 +22,19 @@ Mypy plugin ----------- -A mypy_ plugin is available for automatically assigning the (platform-dependent) -precisions of certain `~numpy.number` subclasses, including the likes of -`~numpy.int_`, `~numpy.intp` and `~numpy.longlong`. See the documentation on -:ref:`scalar types ` for a comprehensive overview -of the affected classes. - -Note that while usage of the plugin is completely optional, without it the -precision of above-mentioned classes will be inferred as `~typing.Any`. +A mypy_ plugin is distributed in `numpy.typing` for managing a number of +platform-specific annotations. Its function can be split into to parts: + +* Assigning the (platform-dependent) precisions of certain `~numpy.number` subclasses, + including the likes of `~numpy.int_`, `~numpy.intp` and `~numpy.longlong`. + See the documentation on :ref:`scalar types ` for a + comprehensive overview of the affected classes. without the plugin the precision + of all relevant classes will be inferred as `~typing.Any`. +* Removing all extended-precision `~numpy.number` subclasses that are unavailable + for the platform in question. Most notable this includes the likes of + `~numpy.float128` and `~numpy.complex256`. Without the plugin *all* + extended-precision types will, as far as mypy is concerned, be available + to all platforms. To enable the plugin, one must add it to their mypy `configuration file`_: diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 813ff16d29f5..901bf4fb121e 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -10,6 +10,9 @@ import mypy.types from mypy.types import Type from mypy.plugin import Plugin, AnalyzeTypeContext + from mypy.nodes import MypyFile, ImportFrom, Statement + from mypy.build import PRI_MED + _HookFunc = t.Callable[[AnalyzeTypeContext], Type] MYPY_EX: t.Optional[ModuleNotFoundError] = None except ModuleNotFoundError as ex: @@ -39,10 +42,32 @@ def _get_precision_dict() -> t.Dict[str, str]: return ret +def _get_extended_precision_list() -> t.List[str]: + extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble] + extended_names = { + "uint128", + "uint256", + "int128", + "int256", + "float80", + "float96", + "float128", + "float256", + "complex160", + "complex192", + "complex256", + "complex512", + } + return [i.__name__ for i in extended_types if i.__name__ in extended_names] + + #: A dictionary mapping type-aliases in `numpy.typing._nbit` to #: concrete `numpy.typing.NBitBase` subclasses. _PRECISION_DICT: t.Final = _get_precision_dict() +#: A list with the names of all extended precision `np.number` subclasses. +_EXTENDED_PRECISION_LIST: t.Final = _get_extended_precision_list() + def _hook(ctx: AnalyzeTypeContext) -> Type: """Replace a type-alias with a concrete ``NBitBase`` subclass.""" @@ -53,14 +78,49 @@ def _hook(ctx: AnalyzeTypeContext) -> Type: if t.TYPE_CHECKING or MYPY_EX is None: + def _index(iterable: t.Iterable[Statement], id: str) -> int: + """Identify the first ``ImportFrom`` instance the specified `id`.""" + for i, value in enumerate(iterable): + if getattr(value, "id", None) == id: + return i + else: + raise ValueError("Failed to identify a `ImportFrom` instance " + f"with the following id: {id!r}") + class _NumpyPlugin(Plugin): """A plugin for assigning platform-specific `numpy.number` precisions.""" def get_type_analyze_hook(self, fullname: str) -> t.Optional[_HookFunc]: + """Set the precision of platform-specific `numpy.number` subclasses. + + For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`. + """ if fullname in _PRECISION_DICT: return _hook return None + def get_additional_deps(self, file: MypyFile) -> t.List[t.Tuple[int, str, int]]: + """Import platform-specific extended-precision `numpy.number` subclasses. + + For example: `numpy.float96`, `numpy.float128` and `numpy.complex256`. + """ + ret = [(PRI_MED, file.fullname, -1)] + if file.fullname == "numpy": + # Import ONLY the extended precision types available to the + # platform in question + imports = ImportFrom( + "numpy.typing._extended_precision", 0, + names=[(v, v) for v in _EXTENDED_PRECISION_LIST], + ) + imports.is_top_level = True + + # Replace the much broader extended-precision import + # (defined in `numpy/__init__.pyi`) with a more specific one + for lst in [file.defs, file.imports]: # type: t.List[Statement] + i = _index(lst, "numpy.typing._extended_precision") + lst[i] = imports + return ret + def plugin(version: str) -> t.Type[_NumpyPlugin]: """An entry-point for mypy.""" return _NumpyPlugin From 3164ad145d51210f669fdddceb72a5f8317b5bd6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 3 Feb 2021 01:50:53 +0100 Subject: [PATCH 0498/1270] TST: Add tests for the extended-precision plugin --- .../tests/data/misc/extended_precision.py | 17 ++++++ numpy/typing/tests/test_typing.py | 58 ++++++++++++++++++- 2 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 numpy/typing/tests/data/misc/extended_precision.py diff --git a/numpy/typing/tests/data/misc/extended_precision.py b/numpy/typing/tests/data/misc/extended_precision.py new file mode 100644 index 000000000000..1e495e4f3cc4 --- /dev/null +++ b/numpy/typing/tests/data/misc/extended_precision.py @@ -0,0 +1,17 @@ +import numpy as np + +reveal_type(np.uint128()) +reveal_type(np.uint256()) + +reveal_type(np.int128()) +reveal_type(np.int256()) + +reveal_type(np.float80()) +reveal_type(np.float96()) +reveal_type(np.float128()) +reveal_type(np.float256()) + +reveal_type(np.complex160()) +reveal_type(np.complex192()) +reveal_type(np.complex256()) +reveal_type(np.complex512()) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 324312a92af6..905423641cde 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -8,7 +8,7 @@ import pytest import numpy as np -from numpy.typing.mypy_plugin import _PRECISION_DICT +from numpy.typing.mypy_plugin import _PRECISION_DICT, _EXTENDED_PRECISION_LIST try: from mypy import api @@ -22,6 +22,7 @@ PASS_DIR = os.path.join(DATA_DIR, "pass") FAIL_DIR = os.path.join(DATA_DIR, "fail") REVEAL_DIR = os.path.join(DATA_DIR, "reveal") +MISC_DIR = os.path.join(DATA_DIR, "misc") MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") @@ -51,7 +52,7 @@ def run_mypy() -> None: if os.path.isdir(CACHE_DIR): shutil.rmtree(CACHE_DIR) - for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR): + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): # Run mypy stdout, stderr, _ = api.run([ "--config-file", @@ -157,15 +158,27 @@ def _construct_format_dict(): "uint16": "numpy.unsignedinteger[numpy.typing._16Bit]", "uint32": "numpy.unsignedinteger[numpy.typing._32Bit]", "uint64": "numpy.unsignedinteger[numpy.typing._64Bit]", + "uint128": "numpy.unsignedinteger[numpy.typing._128Bit]", + "uint256": "numpy.unsignedinteger[numpy.typing._256Bit]", "int8": "numpy.signedinteger[numpy.typing._8Bit]", "int16": "numpy.signedinteger[numpy.typing._16Bit]", "int32": "numpy.signedinteger[numpy.typing._32Bit]", "int64": "numpy.signedinteger[numpy.typing._64Bit]", + "int128": "numpy.signedinteger[numpy.typing._128Bit]", + "int256": "numpy.signedinteger[numpy.typing._256Bit]", "float16": "numpy.floating[numpy.typing._16Bit]", "float32": "numpy.floating[numpy.typing._32Bit]", "float64": "numpy.floating[numpy.typing._64Bit]", + "float80": "numpy.floating[numpy.typing._80Bit]", + "float96": "numpy.floating[numpy.typing._96Bit]", + "float128": "numpy.floating[numpy.typing._128Bit]", + "float256": "numpy.floating[numpy.typing._256Bit]", "complex64": "numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]", "complex128": "numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]", + "complex160": "numpy.complexfloating[numpy.typing._80Bit, numpy.typing._80Bit]", + "complex192": "numpy.complexfloating[numpy.typing._96Bit, numpy.typing._96Bit]", + "complex256": "numpy.complexfloating[numpy.typing._128Bit, numpy.typing._128Bit]", + "complex512": "numpy.complexfloating[numpy.typing._256Bit, numpy.typing._256Bit]", "ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]", "ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]", @@ -267,3 +280,44 @@ def test_code_runs(path): spec = importlib.util.spec_from_file_location(f"{dirname}.{filename}", path) test_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(test_module) + + +LINENO_MAPPING = { + 3: "uint128", + 4: "uint256", + 6: "int128", + 7: "int256", + 9: "float80", + 10: "float96", + 11: "float128", + 12: "float256", + 14: "complex160", + 15: "complex192", + 16: "complex256", + 17: "complex512", +} + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +def test_extended_precision() -> None: + path = os.path.join(MISC_DIR, "extended_precision.py") + output_mypy = OUTPUT_MYPY + assert path in output_mypy + + for _msg in output_mypy[path]: + *_, _lineno, msg_typ, msg = _msg.split(":") + lineno = int(_lineno) + msg_typ = msg_typ.strip() + assert msg_typ in {"error", "note"} + + if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST: + if msg_typ == "error": + raise ValueError(f"Unexpected reveal line format: {lineno}") + else: + marker = FORMAT_DICT[LINENO_MAPPING[lineno]] + _test_reveal(path, marker, msg, lineno) + else: + if msg_typ == "error": + marker = "Module has no attribute" + _test_fail(path, marker, msg, lineno) From cff4ec0d7c4140a1906716dcb536f6a62e4eec69 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 3 Feb 2021 02:08:26 +0100 Subject: [PATCH 0499/1270] ENH: Add support for extended-precision `np.number` arithmetic --- numpy/typing/_callable.py | 46 ++++++++++++-------- numpy/typing/tests/data/reveal/arithmetic.py | 18 +++++++- 2 files changed, 45 insertions(+), 19 deletions(-) diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 1591ca144591..a196bdd75910 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -38,7 +38,7 @@ complexfloating, complex128, ) -from ._nbit import _NBitInt +from ._nbit import _NBitInt, _NBitDouble from ._scalars import ( _BoolLike_co, _IntLike_co, @@ -133,7 +133,7 @@ def __call__(self, __other: _BoolLike_co) -> _2Tuple[int8]: ... @overload # platform dependent def __call__(self, __other: int) -> _2Tuple[int_]: ... @overload - def __call__(self, __other: float) -> _2Tuple[float64]: ... + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... @overload def __call__(self, __other: _IntType) -> _2Tuple[_IntType]: ... @overload @@ -149,11 +149,13 @@ class _IntTrueDiv(Protocol[_NBit1]): @overload def __call__(self, __other: bool) -> floating[_NBit1]: ... @overload - def __call__(self, __other: int) -> floating[_NBitInt]: ... + def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... @overload - def __call__(self, __other: float) -> float64: ... + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload - def __call__(self, __other: complex) -> complex128: ... + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... @overload def __call__(self, __other: integer[_NBit2]) -> floating[Union[_NBit1, _NBit2]]: ... @@ -166,9 +168,11 @@ def __call__( self, __other: Union[int, signedinteger[Any]] ) -> Union[signedinteger[Any], float64]: ... @overload - def __call__(self, __other: float) -> float64: ... + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload - def __call__(self, __other: complex) -> complex128: ... + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... @overload def __call__( self, __other: unsignedinteger[_NBit2] @@ -194,7 +198,7 @@ def __call__( self, __other: Union[int, signedinteger[Any]] ) -> Union[signedinteger[Any], float64]: ... @overload - def __call__(self, __other: float) -> float64: ... + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload def __call__( self, __other: unsignedinteger[_NBit2] @@ -208,7 +212,7 @@ def __call__( self, __other: Union[int, signedinteger[Any]] ) -> Union[_2Tuple[signedinteger[Any]], _2Tuple[float64]]: ... @overload - def __call__(self, __other: float) -> _2Tuple[float64]: ... + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... @overload def __call__( self, __other: unsignedinteger[_NBit2] @@ -220,9 +224,11 @@ def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... @overload def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... @overload - def __call__(self, __other: float) -> float64: ... + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload - def __call__(self, __other: complex) -> complex128: ... + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... @overload def __call__( self, __other: signedinteger[_NBit2] @@ -244,7 +250,7 @@ def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... @overload def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... @overload - def __call__(self, __other: float) -> float64: ... + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload def __call__( self, __other: signedinteger[_NBit2] @@ -256,7 +262,7 @@ def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ... @overload def __call__(self, __other: int) -> _2Tuple[signedinteger[Union[_NBit1, _NBitInt]]]: ... @overload - def __call__(self, __other: float) -> _2Tuple[float64]: ... + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... @overload def __call__( self, __other: signedinteger[_NBit2] @@ -268,9 +274,11 @@ def __call__(self, __other: bool) -> floating[_NBit1]: ... @overload def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... @overload - def __call__(self, __other: float) -> float64: ... + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload - def __call__(self, __other: complex) -> complex128: ... + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... @overload def __call__( self, __other: Union[integer[_NBit2], floating[_NBit2]] @@ -282,7 +290,7 @@ def __call__(self, __other: bool) -> floating[_NBit1]: ... @overload def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... @overload - def __call__(self, __other: float) -> float64: ... + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload def __call__( self, __other: Union[integer[_NBit2], floating[_NBit2]] @@ -294,7 +302,7 @@ def __call__(self, __other: bool) -> _2Tuple[floating[_NBit1]]: ... @overload def __call__(self, __other: int) -> _2Tuple[floating[Union[_NBit1, _NBitInt]]]: ... @overload - def __call__(self, __other: float) -> _2Tuple[float64]: ... + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... @overload def __call__( self, __other: Union[integer[_NBit2], floating[_NBit2]] @@ -306,7 +314,9 @@ def __call__(self, __other: bool) -> complexfloating[_NBit1, _NBit1]: ... @overload def __call__(self, __other: int) -> complexfloating[Union[_NBit1, _NBitInt], Union[_NBit1, _NBitInt]]: ... @overload - def __call__(self, __other: Union[float, complex]) -> complex128: ... + def __call__( + self, __other: Union[float, complex] + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... @overload def __call__( self, diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py index 1a0f595c50c0..9ac96cac6a84 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.py @@ -1,5 +1,9 @@ from typing import Any, List import numpy as np +import numpy.typing as npt + +# Can't directly import `np.float128` as it is not available on all platforms +f16: np.floating[npt._128Bit] c16 = np.complex128() f8 = np.float64() @@ -161,6 +165,7 @@ # unary ops +reveal_type(-f16) # E: {float128} reveal_type(-c16) # E: {complex128} reveal_type(-c8) # E: {complex64} reveal_type(-f8) # E: {float64} @@ -172,6 +177,7 @@ reveal_type(-td) # E: numpy.timedelta64 reveal_type(-AR_f) # E: Any +reveal_type(+f16) # E: {float128} reveal_type(+c16) # E: {complex128} reveal_type(+c8) # E: {complex64} reveal_type(+f8) # E: {float64} @@ -183,6 +189,7 @@ reveal_type(+td) # E: numpy.timedelta64 reveal_type(+AR_f) # E: Any +reveal_type(abs(f16)) # E: {float128} reveal_type(abs(c16)) # E: {float64} reveal_type(abs(c8)) # E: {float32} reveal_type(abs(f8)) # E: {float64} @@ -230,6 +237,7 @@ reveal_type(b_ / u8) # E: {float64} reveal_type(b_ / u4) # E: {float64} reveal_type(b_ / f) # E: {float64} +reveal_type(b_ / f16) # E: {float128} reveal_type(b_ / f8) # E: {float64} reveal_type(b_ / f4) # E: {float32} reveal_type(b_ / c) # E: {complex128} @@ -244,6 +252,7 @@ reveal_type(u8 / b_) # E: {float64} reveal_type(u4 / b_) # E: {float64} reveal_type(f / b_) # E: {float64} +reveal_type(f16 / b_) # E: {float128} reveal_type(f8 / b_) # E: {float64} reveal_type(f4 / b_) # E: {float32} reveal_type(c / b_) # E: {complex128} @@ -252,6 +261,7 @@ # Complex +reveal_type(c16 + f16) # E: {complex256} reveal_type(c16 + c16) # E: {complex128} reveal_type(c16 + f8) # E: {complex128} reveal_type(c16 + i8) # E: {complex128} @@ -262,10 +272,10 @@ reveal_type(c16 + b) # E: {complex128} reveal_type(c16 + c) # E: {complex128} reveal_type(c16 + f) # E: {complex128} - reveal_type(c16 + i) # E: {complex128} reveal_type(c16 + AR_f) # E: Any +reveal_type(f16 + c16) # E: {complex256} reveal_type(c16 + c16) # E: {complex128} reveal_type(f8 + c16) # E: {complex128} reveal_type(i8 + c16) # E: {complex128} @@ -279,6 +289,7 @@ reveal_type(i + c16) # E: {complex128} reveal_type(AR_f + c16) # E: Any +reveal_type(c8 + f16) # E: {complex256} reveal_type(c8 + c16) # E: {complex128} reveal_type(c8 + f8) # E: {complex128} reveal_type(c8 + i8) # E: {complex128} @@ -292,6 +303,7 @@ reveal_type(c8 + i) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] reveal_type(c8 + AR_f) # E: Any +reveal_type(f16 + c8) # E: {complex256} reveal_type(c16 + c8) # E: {complex128} reveal_type(f8 + c8) # E: {complex128} reveal_type(i8 + c8) # E: {complex128} @@ -307,6 +319,7 @@ # Float +reveal_type(f8 + f16) # E: {float128} reveal_type(f8 + f8) # E: {float64} reveal_type(f8 + i8) # E: {float64} reveal_type(f8 + f4) # E: {float64} @@ -318,6 +331,7 @@ reveal_type(f8 + i) # E: {float64} reveal_type(f8 + AR_f) # E: Any +reveal_type(f16 + f8) # E: {float128} reveal_type(f8 + f8) # E: {float64} reveal_type(i8 + f8) # E: {float64} reveal_type(f4 + f8) # E: {float64} @@ -329,6 +343,7 @@ reveal_type(i + f8) # E: {float64} reveal_type(AR_f + f8) # E: Any +reveal_type(f4 + f16) # E: {float128} reveal_type(f4 + f8) # E: {float64} reveal_type(f4 + i8) # E: {float64} reveal_type(f4 + f4) # E: {float32} @@ -340,6 +355,7 @@ reveal_type(f4 + i) # E: numpy.floating[{_NBitInt}] reveal_type(f4 + AR_f) # E: Any +reveal_type(f16 + f4) # E: {float128} reveal_type(f8 + f4) # E: {float64} reveal_type(i8 + f4) # E: {float64} reveal_type(f4 + f4) # E: {float32} From 53496cdf82d6c9a3ddb40c12746d3257d9e09396 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 4 Feb 2021 16:10:30 +0100 Subject: [PATCH 0500/1270] REL: Add a release note for the mypy plugin update --- .../upcoming_changes/18322.new_feature.rst | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 doc/release/upcoming_changes/18322.new_feature.rst diff --git a/doc/release/upcoming_changes/18322.new_feature.rst b/doc/release/upcoming_changes/18322.new_feature.rst new file mode 100644 index 000000000000..5330b9a978e0 --- /dev/null +++ b/doc/release/upcoming_changes/18322.new_feature.rst @@ -0,0 +1,22 @@ +Let the mypy plugin manage extended-precision `numpy.number` subclasses +----------------------------------------------------------------------- + +The mypy_ plugin, introduced in `numpy/numpy#17843`_, has been expanded: +the plugin now removes annotations for platform-specific extended-precision +types that are not available to the platform in question. +For example, it will remove `~numpy.float128` when not available. + +Without the plugin *all* extended-precision types will, as far as mypy is concerned, +be available on all platforms. + +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + + +.. _mypy: http://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html +.. _`numpy/numpy#17843`: https://github.com/numpy/numpy/pull/17843 From 12a407cb35740bccf0f81696426398ca05ede7de Mon Sep 17 00:00:00 2001 From: Matthias Geier Date: Fri, 5 Feb 2021 18:04:11 +0100 Subject: [PATCH 0501/1270] DOC: Fix whitespace before "last updated" on overview page --- doc/source/_templates/indexcontent.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html index 6dd6bf9b0851..64f472f3abc8 100644 --- a/doc/source/_templates/indexcontent.html +++ b/doc/source/_templates/indexcontent.html @@ -7,7 +7,7 @@

{{ docstitle|e }}

Welcome! This is the documentation for NumPy {{ release|e }} - {% if last_updated %}, last updated {{ last_updated|e }}{% endif %}. + {%- if last_updated %}, last updated {{ last_updated|e }}{% endif %}.

For users:

From 46b5b0d428bbe3216140e799ca4aaa2507b8293e Mon Sep 17 00:00:00 2001 From: melissawm Date: Fri, 5 Feb 2021 14:05:34 -0300 Subject: [PATCH 0502/1270] DOC: Discussion on the @ operator and the matrix class --- doc/source/reference/arrays.classes.rst | 2 ++ doc/source/reference/arrays.ndarray.rst | 6 +++--- doc/source/reference/routines.linalg.rst | 16 ++++++++++++++++ numpy/core/code_generators/ufunc_docstrings.py | 2 +- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 3a4ed21689e5..92c271f6b964 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -330,6 +330,8 @@ NumPy provides several hooks that classes can customize: returned by :func:`__array__`. This practice will return ``TypeError``. +.. _matrix-objects: + Matrix objects ============== diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 191367058dba..106ccd7cbc04 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -567,10 +567,10 @@ Matrix Multiplication: .. note:: Matrix operators ``@`` and ``@=`` were introduced in Python 3.5 - following PEP465. NumPy 1.10.0 has a preliminary implementation of ``@`` - for testing purposes. Further documentation can be found in the - :func:`matmul` documentation. + following PEP465_ and are available since NumPy 1.10.0. Further + information can be found in the :func:`matmul` documentation. +.. _PEP465: https://www.python.org/dev/peps/pep-0465/ Special methods =============== diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst index 86e168b262a2..97bdf3974916 100644 --- a/doc/source/reference/routines.linalg.rst +++ b/doc/source/reference/routines.linalg.rst @@ -30,10 +30,26 @@ flexible broadcasting options. For example, `numpy.linalg.solve` can handle "stacked" arrays, while `scipy.linalg.solve` accepts only a single square array as its first argument. +.. note:: + + The term *matrix* as it is used on this page indicates a 2d `numpy.array` + object, and *not* a `numpy.matrix` object. The latter is no longer + recommended, even for linear algebra. See + :ref:`the matrix object documentation` for + more information. + +The ``@`` operator +------------------ + +Introduced in NumPy 1.10.0, the ``@`` and ``@=`` operators are preferable to +other methods when computing the matrix product between 2d arrays. The +:func:`numpy.matmul` function implements the semantics of the ``@`` operator. + .. currentmodule:: numpy Matrix and vector products -------------------------- + .. autosummary:: :toctree: generated/ diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 04181fbc2e92..fe3f88eb2fe2 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -2808,7 +2808,7 @@ def add_newdoc(place, name, doc): (9, 5, 7, 3) >>> # n is 7, k is 4, m is 3 - The matmul function implements the semantics of the `@` operator introduced + The matmul function implements the semantics of the ``@`` operator introduced in Python 3.5 following PEP465. Examples From a1a0b6acc1ea5d6fd84443e071f62c874b927f99 Mon Sep 17 00:00:00 2001 From: Matthias Geier Date: Fri, 5 Feb 2021 17:46:16 +0100 Subject: [PATCH 0503/1270] DOC: remove pygments_style from conf.py --- doc/source/conf.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e76f60063408..e363a8d5ec8e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -138,9 +138,6 @@ class PyTypeObject(ctypes.Structure): # output. They are ignored by default. #show_authors = False -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - def setup(app): # add a config value for `ifconfig` directives app.add_config_value('python_version_major', str(sys.version_info.major), 'env') From 5248f4ba50e95bba83e3e07f3271241e25b0421c Mon Sep 17 00:00:00 2001 From: melissawm Date: Fri, 5 Feb 2021 15:06:13 -0300 Subject: [PATCH 0504/1270] Applying suggestions from PR review. --- doc/source/reference/arrays.ndarray.rst | 4 +--- doc/source/reference/routines.linalg.rst | 2 +- numpy/core/code_generators/ufunc_docstrings.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 106ccd7cbc04..0d7bf030411f 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -567,11 +567,9 @@ Matrix Multiplication: .. note:: Matrix operators ``@`` and ``@=`` were introduced in Python 3.5 - following PEP465_ and are available since NumPy 1.10.0. Further + following :pep:`465` and are available since NumPy 1.10.0. Further information can be found in the :func:`matmul` documentation. -.. _PEP465: https://www.python.org/dev/peps/pep-0465/ - Special methods =============== diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst index 97bdf3974916..f9d37af8875b 100644 --- a/doc/source/reference/routines.linalg.rst +++ b/doc/source/reference/routines.linalg.rst @@ -43,7 +43,7 @@ The ``@`` operator Introduced in NumPy 1.10.0, the ``@`` and ``@=`` operators are preferable to other methods when computing the matrix product between 2d arrays. The -:func:`numpy.matmul` function implements the semantics of the ``@`` operator. +:func:`numpy.matmul` function implements the ``@`` operator. .. currentmodule:: numpy diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index fe3f88eb2fe2..2f75cb41fb0b 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -2809,7 +2809,7 @@ def add_newdoc(place, name, doc): >>> # n is 7, k is 4, m is 3 The matmul function implements the semantics of the ``@`` operator introduced - in Python 3.5 following PEP465. + in Python 3.5 following :pep:`465`. Examples -------- From e5a5c7a90feeafe8596bb3e6ce9d587282a572f5 Mon Sep 17 00:00:00 2001 From: zolboo1024 Date: Fri, 5 Feb 2021 14:55:43 -0500 Subject: [PATCH 0505/1270] Specified the return type for trapz to include the possibility of ndarray being returned --- numpy/lib/function_base.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 276ffa5d4f4b..d33a0fa7de05 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4100,9 +4100,12 @@ def trapz(y, x=None, dx=1.0, axis=-1): Returns ------- - trapz : float - Definite integral as approximated by trapezoidal rule. - + trapz : float or ndarray + Definite integral of 'y' = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If 'y' is a 1-dimensional array, + then the result is a float. If 'n' is greater than 1, then the result + is an 'n-1' dimensional array. + See Also -------- sum, cumsum @@ -4245,7 +4248,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): See Also -------- mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. - ogrid : Construct an open multi-dimensional "meshgrid" using indexing + ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples From 4152443b1d268951401e0be1351eccbddd572e97 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Thu, 4 Feb 2021 18:32:54 +0100 Subject: [PATCH 0506/1270] MAINT: Avoid moveaxis overhead in median. This change speeds up taking the median of 1001 floats by ~20%, as measured by `python -mtimeit -s 'import numpy as np; x = np.random.randn(1001)' -- 'np.median(x)'` --- benchmarks/benchmarks/bench_function_base.py | 8 ++++++++ numpy/lib/utils.py | 3 +-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index b1e59274986c..062843d10cc0 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -47,6 +47,8 @@ class Median(Benchmark): def setup(self): self.e = np.arange(10000, dtype=np.float32) self.o = np.arange(10001, dtype=np.float32) + self.tall = np.random.random((10000, 20)) + self.wide = np.random.random((20, 10000)) def time_even(self): np.median(self.e) @@ -66,6 +68,12 @@ def time_even_small(self): def time_odd_small(self): np.median(self.o[:500], overwrite_input=True) + def time_tall(self): + np.median(self.tall, axis=-1) + + def time_wide(self): + np.median(self.wide, axis=0) + class Percentile(Benchmark): def setup(self): diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index f7e176cf3230..8f5c6eea39ba 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -1026,8 +1026,7 @@ def _median_nancheck(data, result, axis, out): """ if data.size == 0: return result - data = np.moveaxis(data, axis, -1) - n = np.isnan(data[..., -1]) + n = np.isnan(data.take(-1, axis=axis)) # masked NaN values are ok if np.ma.isMaskedArray(n): n = n.filled(False) From 5990b67f8ca5135ceb5e5d60eebca215d54d13a9 Mon Sep 17 00:00:00 2001 From: Jamie Date: Sat, 6 Feb 2021 17:32:25 +0000 Subject: [PATCH 0507/1270] Added sentence to docstring of histogram_bin_edges --- numpy/lib/histograms.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 7af67a7ee69d..bd8c6cb8eab1 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -562,7 +562,9 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): below, :math:`h` is the binwidth and :math:`n_h` is the number of bins. All estimators that compute bin counts are recast to bin width using the `ptp` of the data. The final bin count is obtained from - ``np.round(np.ceil(range / h))``. + ``np.round(np.ceil(range / h))``. The final bin width is often less + than what is returned by the formulas below, in order to allow the + bins to fit the entire range of the data. 'auto' (maximum of the 'sturges' and 'fd' estimators) A compromise to get a good value. For small datasets the Sturges From c78e4f69a60e81e5e0e0bda6b950440afc737222 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 6 Feb 2021 22:10:36 +0100 Subject: [PATCH 0508/1270] DEP: doc-deprecate BLAS_SRC/LAPACK_SRC Building BLAS and LAPACK from sources in a NumPy build makes very little sense in 2021. Therefore remove the documentation for this from `site.cfg.example`, and doc-deprecate it in `distutils/system_info.py`. It's hard to properly deprecate and not worth doing, just de-emphasizing is enough. [ci skip] --- numpy/distutils/system_info.py | 6 ++++++ site.cfg.example | 11 +---------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 13f9da0fb684..2e355a34aa49 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1549,6 +1549,9 @@ def calc_info(self): class lapack_src_info(system_info): + # LAPACK_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. section = 'lapack_src' dir_env_var = 'LAPACK_SRC' notfounderror = LapackSrcNotFoundError @@ -2468,6 +2471,9 @@ def calc_info(self): return class blas_src_info(system_info): + # BLAS_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. section = 'blas_src' dir_env_var = 'BLAS_SRC' notfounderror = BlasSrcNotFoundError diff --git a/site.cfg.example b/site.cfg.example index c809303a205c..1a6b36d2c6eb 100644 --- a/site.cfg.example +++ b/site.cfg.example @@ -28,7 +28,7 @@ # extensions with this dependency. Use the character given by os.pathsep # to separate the items in the list. Note that this character is known to # vary on some unix-like systems; if a colon does not work, try a comma. -# This also applies to include_dirs and src_dirs (see below). +# This also applies to include_dirs. # On UN*X-type systems (OS X, most BSD and Linux systems): # library_dirs = /usr/lib:/usr/local/lib # On Windows: @@ -40,15 +40,6 @@ # List of directories to add to the header file search path. # include_dirs = /usr/include:/usr/local/include # -# src_dirs -# List of directories that contain extracted source code for the -# dependency. For some dependencies, numpy.distutils will be able to build -# them from source if binaries cannot be found. The FORTRAN BLAS and -# LAPACK libraries are one example. However, most dependencies are more -# complicated and require actual installation that you need to do -# yourself. -# src_dirs = /home/username/src/BLAS_SRC:/home/username/src/LAPACK_SRC -# # search_static_first # Boolean (one of (0, false, no, off) for False or (1, true, yes, on) for # True) to tell numpy.distutils to prefer static libraries (.a) over From 29f0e8fa1ff5ccda3d92c949eb9d09c5d8327aa3 Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Sun, 7 Feb 2021 00:13:39 +0200 Subject: [PATCH 0509/1270] BUG: Fix missing signed_char dependency. Closes #18335. --- numpy/f2py/cb_rules.py | 1 + numpy/f2py/tests/test_callback.py | 40 +++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 3068dc897b10..60bc1ad1142c 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -342,6 +342,7 @@ isarray: '#ctype# *', isstring: '#ctype#' }, + 'need': {l_or(isscalar, isarray, isstring): '#ctype#'}, # untested with multiple args 'strarglens': {isstring: ',int #varname_i#_cb_len'}, 'strarglens_td': {isstring: ',int'}, # untested with multiple args diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 37736af21285..4d4f2b443a99 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -236,3 +236,43 @@ def incr(x): y = np.array([1, 2, 3], dtype=np.int64) r = self.module.gh17797(incr, y) assert r == 123 + 1 + 2 + 3 + + +class TestGH18335(util.F2PyTest): + """The reproduction of the reported issue requires specific input that + extensions may break the issue conditions, so the reproducer is + implemented as a separate test class. Do not extend this test with + other tests! + """ + + suffix = '.f90' + + code = textwrap.dedent( + """ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 + """) + + def test_gh18335(self): + + def foo(x): + x[0] += 1 + + y = np.array([1, 2, 3], dtype=np.int8) + r = self.module.gh18335(foo) + assert r == 123 + 1 From 62b82961e12cc13379fb67f03c71062f94285cd1 Mon Sep 17 00:00:00 2001 From: Aerysv <53720019+Aerysv@users.noreply.github.com> Date: Sat, 6 Feb 2021 23:14:20 +0100 Subject: [PATCH 0510/1270] DOC: Change license date 2020 -> 2021 (gh-18346) A very small contribution to update the license date from 2020 to 2021 :) --- LICENSE.txt | 2 +- doc/source/conf.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LICENSE.txt b/LICENSE.txt index 8ce64521862e..4723d4ea009e 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2005-2020, NumPy Developers. +Copyright (c) 2005-2021, NumPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/doc/source/conf.py b/doc/source/conf.py index e363a8d5ec8e..eff5a90f1d4e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -98,7 +98,7 @@ class PyTypeObject(ctypes.Structure): # General substitutions. project = 'NumPy' -copyright = '2008-2020, The SciPy community' +copyright = '2008-2021, The SciPy community' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. From 9e9d3e53a7b57bbbff03c214f6563b47a361ae9b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 6 Feb 2021 15:57:13 -0600 Subject: [PATCH 0511/1270] CI: CircleCI seems to occasionally time out, increase the limit CircleCI seems to default to a 10 minute timeout on no output, simply increase it to 30 minutes to ensure the build should succeed. It seems the build does take 25+ minutes (maybe depending on the machine it runs on). Technically, 30 minutes is probably more than necessary, since there is a print in the middle, but that is just a warning that could go away at some point. --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9324de943607..ea50ca84fa52 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -62,6 +62,7 @@ jobs: - run: name: build devdocs + no_output_timeout: 30m command: | . venv/bin/activate cd doc From 65892ef68f3a075578516b0630023d2cd4b832bc Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Sun, 7 Feb 2021 12:48:39 +0600 Subject: [PATCH 0512/1270] fixed CI errors and optimized further simd_16 and simd_32 --- numpy/core/src/multiarray/item_selection.c | 34 +++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index f92327827cca..b0133983af34 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2159,8 +2159,8 @@ static NPY_INLINE NPY_GCC_OPT_3 npyv_u16x2 count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_count) { npyv_u16x2 vsum16; - vsum16.val[0] = vsum16.val[1] = npyv_zero_u16(); // Setting a vector of 0s (16 maybe) - npy_intp lane_max = 0; // scalar 0 + vsum16.val[0] = vsum16.val[1] = npyv_zero_u16(); + npy_intp lane_max = 0; while (*d < end && lane_max <= max_count - NPY_MAX_UINT8) { npyv_u8 vsum8 = count_zero_bytes_u8(d, end, NPY_MAX_UINT8); npyv_u16x2 part = npyv_expand_u16_u8(vsum8); @@ -2208,16 +2208,16 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) static NPY_INLINE NPY_GCC_OPT_3 npy_uintp -count_nonzero_int16_simd(npy_int16 *d, npy_uintp unrollx) +count_nonzero_int16_simd(npy_uint16 *d, npy_uintp unrollx) { npy_uintp zero_count = 0; - const npy_uintp innerloop_jump = NPY_MAX_UINT16; - const npy_int16 *end = d + unrollx; + npy_uintp innerloop_jump = NPY_MAX_UINT16 * npyv_nlanes_u16; + npy_uint16 *end = d + unrollx; const npyv_u16 vone = npyv_setall_u16(1); const npyv_u16 vzero = npyv_zero_u16(); - npy_int16 *target = d; + npy_uint16 *target = d; while (dtype_num >= NPY_INT16 && dtype->type_num <= NPY_UINT64) { - return count_nonzero_int(PyArray_NDIM(self), (void *) PyArray_DATA(self), + return count_nonzero_int(PyArray_NDIM(self), (char *) PyArray_DATA(self), PyArray_DIMS(self), PyArray_STRIDES(self), dtype->type_num); } From 022cc66e425b12680b252340d56c11d87d3c8765 Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Sun, 7 Feb 2021 16:43:03 +0600 Subject: [PATCH 0513/1270] some fixes for the build problems --- numpy/core/src/multiarray/item_selection.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index b0133983af34..d793b64efac0 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -30,6 +30,8 @@ #include "array_coercion.h" #include "simd/simd.h" +#include + static NPY_GCC_OPT_3 NPY_INLINE int npy_fasttake_impl( char *dest, char *src, const npy_intp *indices, @@ -2206,12 +2208,17 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) return unrollx - zero_count; } +#define safe_ptr_addition_uint16(result, ptr, adder) \ + result = ((((uint64_t) ptr) + (((uint64_t) adder) << 1)) == ((uint64_t) (ptr + adder))) ? (ptr+adder) : (npy_uint16 *) NPY_MAX_UINTP; + +#define safe_ptr_addition_uint32(result, ptr, adder) \ + result = ((((uint64_t) ptr) + (((uint64_t) adder) << 2)) == ((uint64_t) (ptr + adder))) ? (ptr+adder) : (npy_uint32 *) NPY_MAX_UINTP; static NPY_INLINE NPY_GCC_OPT_3 npy_uintp count_nonzero_int16_simd(npy_uint16 *d, npy_uintp unrollx) { npy_uintp zero_count = 0; - npy_uintp innerloop_jump = NPY_MAX_UINT16 * npyv_nlanes_u16; + uint64_t innerloop_jump = NPY_MAX_UINT16 * npyv_nlanes_u16; npy_uint16 *end = d + unrollx; const npyv_u16 vone = npyv_setall_u16(1); @@ -2220,7 +2227,8 @@ count_nonzero_int16_simd(npy_uint16 *d, npy_uintp unrollx) npy_uint16 *target = d; while (d Date: Sun, 7 Feb 2021 18:04:10 +0600 Subject: [PATCH 0514/1270] another attempt to fix build issues --- numpy/core/src/multiarray/item_selection.c | 38 ++++++++++------------ 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index d793b64efac0..06513ab9a351 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2208,17 +2208,12 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) return unrollx - zero_count; } -#define safe_ptr_addition_uint16(result, ptr, adder) \ - result = ((((uint64_t) ptr) + (((uint64_t) adder) << 1)) == ((uint64_t) (ptr + adder))) ? (ptr+adder) : (npy_uint16 *) NPY_MAX_UINTP; - -#define safe_ptr_addition_uint32(result, ptr, adder) \ - result = ((((uint64_t) ptr) + (((uint64_t) adder) << 2)) == ((uint64_t) (ptr + adder))) ? (ptr+adder) : (npy_uint32 *) NPY_MAX_UINTP; static NPY_INLINE NPY_GCC_OPT_3 npy_uintp count_nonzero_int16_simd(npy_uint16 *d, npy_uintp unrollx) { npy_uintp zero_count = 0; - uint64_t innerloop_jump = NPY_MAX_UINT16 * npyv_nlanes_u16; + uint64_t innerloop_jump = NPY_MAX_UINT16; npy_uint16 *end = d + unrollx; const npyv_u16 vone = npyv_setall_u16(1); @@ -2227,14 +2222,15 @@ count_nonzero_int16_simd(npy_uint16 *d, npy_uintp unrollx) npy_uint16 *target = d; while (d> 1); + target = (npy_uint16*) PyArray_MIN(target_tmp, (uint64_t) end); + for (; d> 2); + target = (npy_uint32*) PyArray_MIN(target_tmp, (uint64_t) end); + for (; d Date: Sun, 7 Feb 2021 18:53:34 +0600 Subject: [PATCH 0515/1270] removed the target variable and changed the loop as suggested by Sayed Adel --- numpy/core/src/multiarray/item_selection.c | 28 +++++++--------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 06513ab9a351..f2da62ae96d4 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2213,23 +2213,17 @@ static NPY_INLINE NPY_GCC_OPT_3 npy_uintp count_nonzero_int16_simd(npy_uint16 *d, npy_uintp unrollx) { npy_uintp zero_count = 0; - uint64_t innerloop_jump = NPY_MAX_UINT16; npy_uint16 *end = d + unrollx; const npyv_u16 vone = npyv_setall_u16(1); const npyv_u16 vzero = npyv_zero_u16(); - npy_uint16 *target = d; while (d> 1); - target = (npy_uint16*) PyArray_MIN(target_tmp, (uint64_t) end); - for (; d> 2); - target = (npy_uint32*) PyArray_MIN(target_tmp, (uint64_t) end); - for (; d Date: Sun, 7 Feb 2021 21:52:15 +0200 Subject: [PATCH 0516/1270] [MAINT] Fix regression in using --capi-api flag and callbacks --- numpy/f2py/rules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 4e1cf0c7daa0..b9cbc5487278 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -809,7 +809,7 @@ """, {debugcapi: ["""\ fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); - CFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, """\ CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); From 4874af99fcb9c9af869536a2bd390e9e94d07b8a Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Sun, 7 Feb 2021 21:54:48 +0200 Subject: [PATCH 0517/1270] ENH: Share memory of read-only intent(in) arrays. Closes #16347 --- numpy/f2py/src/fortranobject.c | 4 ++-- numpy/f2py/tests/test_array_from_pyobj.py | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 3275f90ad2cb..b9ef18701ce3 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -799,7 +799,7 @@ PyArrayObject* array_from_pyobj(const int type_num, && ARRAY_ISCOMPATIBLE(arr,type_num) && F2PY_CHECK_ALIGNMENT(arr, intent) ) { - if ((intent & F2PY_INTENT_C)?PyArray_ISCARRAY(arr):PyArray_ISFARRAY(arr)) { + if ((intent & F2PY_INTENT_C)?PyArray_ISCARRAY_RO(arr):PyArray_ISFARRAY_RO(arr)) { if ((intent & F2PY_INTENT_OUT)) { Py_INCREF(arr); } @@ -807,9 +807,9 @@ PyArrayObject* array_from_pyobj(const int type_num, return arr; } } - if (intent & F2PY_INTENT_INOUT) { strcpy(mess, "failed to initialize intent(inout) array"); + /* Must use PyArray_IS*ARRAY because intent(inout) requires writable input */ if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) strcat(mess, " -- input not contiguous"); if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 0524da342864..77149e4e7a0b 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -233,6 +233,7 @@ def __init__(self, typ, dims, intent, obj): order=self.intent.is_intent('c') and 'C' or 'F') assert_(self.pyarr.dtype == typ, repr((self.pyarr.dtype, typ))) + self.pyarr.setflags(write=self.arr.flags['WRITEABLE']) assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) self.pyarr_attr = wrap.array_attrs(self.pyarr) @@ -326,6 +327,18 @@ def test_in_from_2casttype(self): else: assert_(not a.has_shared_memory(), repr(t.dtype)) + @pytest.mark.parametrize('write', ['w', 'ro']) + @pytest.mark.parametrize('order', ['C', 'F']) + @pytest.mark.parametrize('inp', ['2seq', '23seq']) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies + """ + seq = getattr(self, 'num' + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, ((order=='C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + def test_inout_2seq(self): obj = np.array(self.num2seq, dtype=self.type.dtype) a = self.array([len(self.num2seq)], intent.inout, obj) From 22a134460738b7c3ba8770cd23d7cbfef0c34117 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 7 Feb 2021 14:36:48 -0700 Subject: [PATCH 0518/1270] REL: Update master after 1.20.1 release. --- .mailmap | 2 ++ doc/changelog/1.20.1-changelog.rst | 36 ++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/1.20.1-notes.rst | 53 +++++++++++++++++++++++++++++ 4 files changed, 92 insertions(+) create mode 100644 doc/changelog/1.20.1-changelog.rst create mode 100644 doc/source/release/1.20.1-notes.rst diff --git a/.mailmap b/.mailmap index 77df36b08d82..4726668e8b40 100644 --- a/.mailmap +++ b/.mailmap @@ -85,6 +85,7 @@ Daniel da Silva Daniel da Silva Daniel da Silva Daniel da Silva Daniel Hrisca danielhrisca Daniel J Farrell danieljfarrell +Daniel Montes <53720019+Aerysv@users.noreply.github.com> Aerysv <53720019+Aerysv@users.noreply.github.com> Daniel Müllner Daniel Daniel Müllner dmuellner Daniel Rasmussen drasmuss @@ -216,6 +217,7 @@ Nathaniel J. Smith njsmith Naveen Arunachalam naveenarun Nicolas Scheffer Nicolas Scheffer Nicholas A. Del Grosso nickdg +Nicholas McKibben mckib2 Nick Minkyu Lee fivemok <9394929+fivemok@users.noreply.github.com> Ondřej Čertík Ondrej Certik Óscar Villellas Guillén ovillellas diff --git a/doc/changelog/1.20.1-changelog.rst b/doc/changelog/1.20.1-changelog.rst new file mode 100644 index 000000000000..215cdca3c5e0 --- /dev/null +++ b/doc/changelog/1.20.1-changelog.rst @@ -0,0 +1,36 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bas van Beek +* Charles Harris +* Nicholas McKibben + +* Pearu Peterson +* Ralf Gommers +* Sebastian Berg +* Tyler Reddy +* @Aerysv + + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#18306 `__: MAINT: Add missing placeholder annotations +* `#18310 `__: BUG: Fix typo in ``numpy.__init__.py`` +* `#18326 `__: BUG: don't mutate list of fake libraries while iterating over... +* `#18327 `__: MAINT: gracefully shuffle memoryviews +* `#18328 `__: BUG: Use C linkage for random distributions +* `#18336 `__: CI: fix when GitHub Actions builds trigger, and allow ci skips +* `#18337 `__: BUG: Allow unmodified use of isclose, allclose, etc. with timedelta +* `#18345 `__: BUG: Allow pickling all relevant DType types/classes +* `#18351 `__: BUG: Fix missing signed_char dependency. Closes #18335. +* `#18352 `__: DOC: Change license date 2020 -> 2021 +* `#18353 `__: CI: CircleCI seems to occasionally time out, increase the limit +* `#18354 `__: BUG: Fix f2py bugs when wrapping F90 subroutines. +* `#18356 `__: MAINT: crackfortran regex simplify +* `#18357 `__: BUG: threads.h existence test requires GLIBC > 2.12. +* `#18359 `__: REL: Prepare for the NumPy 1.20.1 release. diff --git a/doc/source/release.rst b/doc/source/release.rst index e4b3f6af64ab..1ea46e232999 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release Notes :maxdepth: 3 1.21.0 + 1.20.1 1.20.0 1.19.5 1.19.4 diff --git a/doc/source/release/1.20.1-notes.rst b/doc/source/release/1.20.1-notes.rst new file mode 100644 index 000000000000..f95b5847ddcd --- /dev/null +++ b/doc/source/release/1.20.1-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.20.1 Release Notes +========================== + +NumPy 1,20.1 is a rapid bugfix release fixing several bugs and regressions +reported after the 1.20.0 release. + + +Highlights +========== + +- The distutils bug that caused problems with downstream projects is fixed. +- The ``random.shuffle`` regression is fixed. + + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bas van Beek +* Charles Harris +* Nicholas McKibben + +* Pearu Peterson +* Ralf Gommers +* Sebastian Berg +* Tyler Reddy +* @Aerysv + + + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#18306 `__: MAINT: Add missing placeholder annotations +* `#18310 `__: BUG: Fix typo in ``numpy.__init__.py`` +* `#18326 `__: BUG: don't mutate list of fake libraries while iterating over... +* `#18327 `__: MAINT: gracefully shuffle memoryviews +* `#18328 `__: BUG: Use C linkage for random distributions +* `#18336 `__: CI: fix when GitHub Actions builds trigger, and allow ci skips +* `#18337 `__: BUG: Allow unmodified use of isclose, allclose, etc. with timedelta +* `#18345 `__: BUG: Allow pickling all relevant DType types/classes +* `#18351 `__: BUG: Fix missing signed_char dependency. Closes #18335. +* `#18352 `__: DOC: Change license date 2020 -> 2021 +* `#18353 `__: CI: CircleCI seems to occasionally time out, increase the limit +* `#18354 `__: BUG: Fix f2py bugs when wrapping F90 subroutines. +* `#18356 `__: MAINT: crackfortran regex simplify +* `#18357 `__: BUG: threads.h existence test requires GLIBC > 2.12. +* `#18359 `__: REL: Prepare for the NumPy 1.20.1 release. From 534132e17cb2ab071507053bb40f1261d993b4b2 Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Mon, 8 Feb 2021 04:13:45 +0600 Subject: [PATCH 0519/1270] Modified PyArray_CountNonzero to discriminate between types based on elsize --- numpy/core/src/multiarray/item_selection.c | 59 ++++++++++++++++++---- 1 file changed, 49 insertions(+), 10 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index f2da62ae96d4..c0eec4c84f66 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2279,7 +2279,7 @@ count_nonzero_int64_simd(npy_uint64 *d, npy_uintp unrollx) static NPY_INLINE NPY_GCC_OPT_3 npy_intp -count_nonzero_int(int ndim, char *data, const npy_intp *ashape, const npy_intp *astrides, int type_num) +count_nonzero_int(int ndim, char *data, const npy_intp *ashape, const npy_intp *astrides, int elsize) { int idim; npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS]; @@ -2342,13 +2342,13 @@ count_nonzero_int(int ndim, char *data, const npy_intp *ashape, const npy_intp * NPY_BEGIN_THREADS_THRESHOLDED(shape[0]); - if (type_num == NPY_INT16 || type_num == NPY_UINT16) { + if (elsize == 2) { _ITERATE_I16; } - else if (type_num == NPY_INT32 || type_num == NPY_UINT32) { + else if (elsize == 4) { _ITERATE_I32; } - else if (type_num == NPY_INT64 || type_num == NPY_UINT64) { + else if (elsize == 8) { _ITERATE_I64; } @@ -2453,16 +2453,55 @@ PyArray_CountNonzero(PyArrayObject *self) /* Special low-overhead version specific to the boolean type */ dtype = PyArray_DESCR(self); - if (dtype->type_num >= NPY_INT16 && dtype->type_num <= NPY_UINT64) { - return count_nonzero_int(PyArray_NDIM(self), (char *) PyArray_DATA(self), - PyArray_DIMS(self), PyArray_STRIDES(self), dtype->type_num); - } + // if (dtype->type_num >= NPY_INT16 && dtype->type_num <= NPY_UINT64) { + // return count_nonzero_int(PyArray_NDIM(self), (char *) PyArray_DATA(self), + // PyArray_DIMS(self), PyArray_STRIDES(self), dtype->type_num); + // } + + // if (dtype->type_num == NPY_BOOL || dtype->type_num == NPY_INT8 || dtype->type_num == NPY_UINT8) { + // return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self), + // PyArray_DIMS(self), PyArray_STRIDES(self)); + // } + + switch(dtype->kind) { + case 'u': + { + if (dtype->elsize == 1) + return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self), + PyArray_DIMS(self), PyArray_STRIDES(self)); + + if (dtype->elsize >=2 && dtype->elsize <= 8) + return count_nonzero_int( + PyArray_NDIM(self), PyArray_BYTES(self), PyArray_DIMS(self), + PyArray_STRIDES(self), dtype->elsize + ); - if (dtype->type_num == NPY_BOOL || dtype->type_num == NPY_INT8 || dtype->type_num == NPY_UINT8) { - return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self), + break; + } + case 'i': + { + if (dtype->elsize == 1) + return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self), PyArray_DIMS(self), PyArray_STRIDES(self)); + + if (dtype->elsize >=2 && dtype->elsize <= 8) + return count_nonzero_int( + PyArray_NDIM(self), PyArray_BYTES(self), PyArray_DIMS(self), + PyArray_STRIDES(self), dtype->elsize + ); + + break; + } + case 'b': + { + if (dtype->elsize == 1) + return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self), + PyArray_DIMS(self), PyArray_STRIDES(self)); + + } } + nonzero = PyArray_DESCR(self)->f->nonzero; /* If it's a trivial one-dimensional loop, don't use an iterator */ From 1eb91a33202416f582dbf389e44409290922734d Mon Sep 17 00:00:00 2001 From: Touqir Sajed Date: Mon, 8 Feb 2021 05:46:06 +0600 Subject: [PATCH 0520/1270] Ensured overflow does not happen for 16 and 32 bit ints --- numpy/core/src/multiarray/item_selection.c | 34 +++++++++++++++++----- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index c0eec4c84f66..9de3446352de 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -2133,10 +2133,6 @@ count_nonzero_bytes_384(const npy_uint64 * w) #if NPY_SIMD -/* - -*/ - /* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */ static NPY_INLINE NPY_GCC_OPT_3 npyv_u8 count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_count) @@ -2209,23 +2205,37 @@ count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx) } +#define safe_ptr_addition_uint16(result, ptr, adder) \ + result = ((((uint64_t) ptr) + (((uint64_t) adder) << 1)) == ((uint64_t) (ptr + adder))) ? (ptr+adder) : (npy_uint16 *) NPY_MAX_UINTP; + +#define safe_ptr_addition_uint32(result, ptr, adder) \ + result = ((((uint64_t) ptr) + (((uint64_t) adder) << 2)) == ((uint64_t) (ptr + adder))) ? (ptr+adder) : (npy_uint32 *) NPY_MAX_UINTP; + static NPY_INLINE NPY_GCC_OPT_3 npy_uintp count_nonzero_int16_simd(npy_uint16 *d, npy_uintp unrollx) { npy_uintp zero_count = 0; + uint64_t innerloop_jump = NPY_MAX_UINT16 * npyv_nlanes_u16; npy_uint16 *end = d + unrollx; const npyv_u16 vone = npyv_setall_u16(1); const npyv_u16 vzero = npyv_zero_u16(); + npy_uint16 *target = d; while (d Date: Sun, 7 Feb 2021 18:49:49 -0800 Subject: [PATCH 0521/1270] DOC: Update landing page to match table of contents - Step 1 - See #17845 --- doc/source/_templates/indexcontent.html | 31 +++++++++++-------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html index 64f472f3abc8..184a3ca64e2c 100644 --- a/doc/source/_templates/indexcontent.html +++ b/doc/source/_templates/indexcontent.html @@ -14,23 +14,22 @@

{{ docstitle|e }}

+ - - + - - - - + + List of the most important terms

@@ -41,10 +40,8 @@

{{ docstitle|e }}

Contributing to NumPy

- - +

U2+Ed_YcUl(FMYjg};Ni0wUS%QkYd()%|L}&Y9tJzm+27yL^oh%=B*IDX;G8 z-S7Tj;CRiIr}Y(XCT!uDZ0(hI!AG2&uA9 z2_st#UQR$%?6@OyM-+Hudq%3l$Ssd=A47x&t-f^wfcv7FY`8c9QIuHV zR&oh9)a>xf?CJs;ok5i6-6-3Z$41Dx=`|rV5Z}@-+3|fGe$ZNYRKczd5XBa+zoCwg z$YytZSA~OdQdX4?!jsNNbk+`zRHlIKVxKDVZaCB6(#*T@1~2VDT*f} z1su~clr6H%vkp__`UjcyZnI=nK)2~A;j3A9KPcgun88TEoDobGq1hC0!*+q1R#7h2 zoK9(yyBO-8skT~}D|{cjMQ+)Q9W?e;_ZLNNe#z19zs}5Q7XsEsLUVC|41V zoE&hsNe+#xnOD2^HcPh9^rX-9SPV8x1X&zuw3P=a!vE+kWc<5GI5NO_(3ULwh>Y}m z)!(^@IeLHI@{ZF4)zJe=GxgVS4q|h`&U7 zHTF*qtG>tMP~SBK(2tMH^u=$bvGQHyL?<9=_O^1Yhh`METxrbaIEwvS)bTPY``NWc zx6$j*Ws^ToZ`nm=xAH#Rh#tuH6!a~3o0Mc2e@rLcJk8$%6Sa}PCz8Cv%y!IHCvI4z z6qxDHDq6_Rgjhd$+Wc6kNCH`9IOXUkis(UGX7nxRA5&@=q56v5?kxeZ;k)ESR`Zi6 zwY~=tS;Jpe>!VK&SYQa-44%4er~eGS$wu~C&+asTAixC+taN@)yl@(*edjhY{p zrjT6RDgv-ldcFJ?)1s{712BTkZUxyDbYH8|jcRjo$1l}pGnX!3HIw*iZnTKD5B1no zaAI#)UmW;%uyzpx%Ds!-==a+F?s7=$@$sUpw4`jw;lF2$7l9C3lJxPdt2XH}=U)1| z#I;-!3;b~|!tA74bJClO$2l5SF8FCmS8^@-IzGzEZfUJK&w9gA-|l4i9X6diQ3{Bqp%5jzA+Hf@iowdc$_7?%Co(>Ql0g|D%MoZwP!=P@cWPzZ7WpR(|fgsgnK@ zX`-3ov>odGxnVDv9&LWe#YTG9(tm`1AuW4FWDkf5^!dF{a+ zq8hdAOjKY^*@=HGRzf#$Uqg$UjJ8g^HNR83#0|Y7!uwWm)KaWYtp&C=)gLM9Bb!qU zJU0pAmTH^H(s;_5N$TI_R!cVPZ_(=kOl;B|l5*kN+Y8pp9=&w_MzR`#rO*n2O67(Q zittv)bbzZ$rI(*uqegm_v|V#%Jchbg27VvpDVRa~xRIM$?L;omqdyl#U0%C^w|I^gn6si zd#y`wwa-)^3hK#W+V)2IzX$PU0NJ@5tlT3;oOK5FaBYwePC;+cW$%*cMAu7hXyXMn z0+%~|)=`ZnC^_jY@au!<(Y@Og=hKl3&PQs4fC?X=eYB>Xm`65v>2HhN0+%yg#+8XL z8(yFb`T*U5KtUHk8o-%SSfg8Tj_F$^+5%%2%#6p| zhhuh=wkuQ?ebLs8K5^2XWVk2or|N_u!`Xy1Vvks$+0g6lBT0ya>LjJl?FWJSefZQd zOqo87RPn=MG)BHA*gln)da~^Rk1Op2rS1yuh;N|x6Tj%$q>hE>+6UR)M3~a1X3ugD zXAeRHrZUhyxn$mT#p@9$`Yx7NTE0gVNIBQ@+J>z)0)XU;DF;wWA0V&(1cG@3`^I?K zxINk@p3X_4^86g;>}eeph+Or`n~|E1 zyX+6zyLf>m(>V^B7+J5?OX>)$-)>@*>3(E)WHP)aoV2u$?7H(Har9>gkm;*k7%_LN z7a>@D3G@ygQ>a@QnN(xu($jd()K0oWfHIW}<~BZJ3gHa%eO&JLZbBOq-<5KIKO^pw z?{ry6FI?E6*QwZ3)`58~Q9B5=P*%i-*L_b#;wo(=VQiMz+AAL0zH9n3OnSnI|^Qopzr_Aal8?}90B43@c52`GZ-Le zpVxHNUld)-!wP6uTmXyCg}OrBp151l5^Q~M4t{@DNU-h0S8qjk)EQfpo{ny z%kuM=+rq#fg?mXj<$ew2ZvyEUCW=x+{$3IQ{)Y8FUINfp#G`u2#^%asX%%s=hmH16 z18qu07w`SMl!cD?((3@gsWe#*kyid2Bc*%-)58Qp^;R$uG3QptTA4}nk?5q3p!zC{ z>Wz_o=-P2{zznFPzc?$kAw$)zm6^CTi=kB}kl#}NUIGpp^>}FrNQjOW)mh39U;+|u zn=1zouYx$s!Ao-sTK_uOl!^gBBmgiAC{`O;!MsKQ$lq3Rkf?WBVy$IjB89c}#(0oXt(GYu-HX`BvHoW z4phC{O8|ws{Q9WP`hod>e1u8z8Un1o$wcakWA2OrknHw@j@A4}5ktU^EnKu$6jQK% z?X|l=^PvyL4DS6ONW!?o6I-ZHp0bS|dq9*3cS0|R7mcd$lx=$ezyq{B>+NGUY!pQo zPdlpmdj?b(gb+hjof&s*+}S(Wk2DEP`Q(Mq{)dRg!_S#(H~1Xr#t3#^`8C z1_gKsJX;k}a#FF&*VJznO2i$)=`z0p-U8s^{TJN8A`b|7w2D`+=5w8~ACC!q(OQ`r z=-JV;{pfreR1WiMtmuB4i_@+?Rljx06{R>O3tf_AJOu6<>|-lzb5Ac0oI}@(Fn1Vp z>(>4Ui&}9PTk&Uki4j%w@V`q5B**#oqURxV0GUbIu7rHlf8D(0kY5DQ>0G>O@;=Ef z*4U08G;^geplWwNH=s8Sw*oN>@!Z*jUP&_a{k$S)F(0t>#I4bxALRfp_hTje{~M}1 z>5A%32tX94Tr9sN9I^O=nY=Xla)|V>e)|?y6^RX89Y8GAWr{G-T+{aa9EGs6r?brm_krqS$Z&h{op@K(*;cIv7m>FC}=rL9*SU@z!-<0Oog%4KXFd99klsCP;L@1fVIGW$BqNFd_eC&(`hiwHlF!vX#J&V= z3lLkF=wa9sW|M0u`wE&JdIPB8rfMh{CIF>xj@6#k&F(U%<}NKHlb9vnj5u$*`jl_( z-7i&3LS!s7KH@IwsR3Q0NJngzmGI(Kn|v&6W8E=<>0lEicB7|ai4p7V%7psv*Tul= z8)yswaBkmU^zV65al9kCKeON5kF(Y#Ub2MEIf3ve*B)eeUqn|@$F2K`lV~fu{38O* zD10Yamu)PdKI9hG0*Q_N8Mk4_um5CdpXcN{MzO7Zg(k5jI$?a3&6(?0|NgH96Q=Cr z1d*mdvYNX8_a(q=_>)%0y2gjWuw*Xb7CQU)(_c&GIGDdoMDApp!(?-JHJGH1O~1&D zi_M(*cf?w<9LH((By{-RK_8{B>;O8HO&}Z+Tz9?EBx&nY{fno-6g`=-$7`4Ub zQeyW7l9X6Wm&6tc_7$Gh_o@Y0Y%cG+>z4AiOroxkxdThN0UhJD{RjkYa`(#(o&Zx^ z2>-nWi(^(wr1hJ7js>9m6%v-1#a5{W#5!8zL%!gat(nOVe=UWlD78`lG+>%zKz%7{ z5ogccx^}_lmqmoje?Zs&yWWCNSAeS6=C2VvN8azKUiE9eg2g?E{}@h>>YxhF(WEM> zfm;+T=2XcY#?KQW4Ok^L;$FIdtY9``A!tt`ujcc36@sm zHy?aQt4xdM{bPAoy~DiEMaV)+=25F=C=xj@Ah*N7M7iYnYt~vHHwcAB^QvE{1V|F# z)cw*ZSFP4^BK}}V*DhKiQ9u>TO~$IHBhd6SvEQQQZP`t{EEK(f@@s-P&$*MajG;J;UEk1Y`vQs5Nft_5&d@}ne4r%Z{a?*gMPKNl z{R#{eLt%6+7u;oyU;sGu){Ar+AdtAihVumX7p=0-eqncKROca*>?%1r(ME0g6Q5cz zXMX0g3TdfUz8TBsdK^)5Jaqx_7!3C85;7T*7Xjb{5IL^-LU~^X1qP0=wf9n8)VZ*gbhPIhjyn>TR2h=(JH~{ zUsDr@Pk{kJ$N_hS1C_dViA3Ujz+&FoK?k04B?hnzoyIp(Bk>os#88hT&>Rl4#J`_a zzp=4Q{PRsko*rD1#=+Y5uTL1R|0f&H0|AE39~K0QGY4D29Bk|2rhZz^L7`lg0i!S1 zK-^-qHN0jOLPfRyGS_>A3A@&@kwYu8FwuU!m;*mU@}lbM=pQoLG;h>25w#gs5~89I zZuJW$2-MO+9o7grScbF@=vad_F(KH*n|y+c!TjI)K8?LwVmbg-#OqG@%V(}U#kBUd z^tN8n-YyDMe$HFd0+i=r6SYIdl8n`e2>W`sr?eyIL4dw=|5*bR^|5mOhmIA3p}(-( zTt%#nu|HG;6c_BXLq%d(WEiM$Lnu7+rV{rOD6cqJ4V9jG_bdCx5?w)ebPsjqbg3YApdvHKOMqN2n1Ks!K3BjeaTYtkdqW z%{B&ibt-IfSnGW@MzKPVS31JG9wr`q?n z+xP0=Zw;QswlzQ>fpvlUQ|Esi-U%}=LEzsXlC9K&$D3bm_Zc&v^ZgpF9iV2W!zgzb_MNXw2Cg=${{PV3Z;ewhtP@ihg1p&%b zg(@G2u+i8Eje-uvUpQ)u?gH2-yKcMr#GS9!;y_g_^BbV@)ElAq*5#${LFK98y(4%} zxp=3ijt7GrIMvQZV8c7luA7vyB1p#|Q!*v@SrL!nAdICC)vCwt?}^urj!HtzF#|pi zvn{Ys1&f>=#aPHy0e76;!mV)ZPO3=p8LPp zdhd8B{P=PFjHD!F?@AG}$7M_5oy_b_IGgOfN6M-c&RG$0va+{`OU|CzBb&4L`o50$ z`}6&LAHU!IQxEsL*Ld#rdcOX)V}GcV$;tvo0#m(_kWvp0+K|*#U;bz<(h|F-;Kf8f zm&^YX8gB%^5%Q9Ywc29HHxfIeZU6u5;C*`nkFiRKVmt1LU*6k`1Ou-b1Yx%6qbf*W z+mJ>HW}&TNCNcKW?y);i#!tdtR&wz`vG`vNyWMBX`b+TXrp})A*#T9o+dKw?7YL60 zC1d{9kE{=RXypD5>hhj5uNjI3bB?{}d}HE`bRU8_!1@g1i7gbq2n~GXJjORM9n;-h zJBBL@JURJzUJsW*mzAHmu7xRnh2L@KugS6!RxEzMJ!}?xDux}iW)9ZwUPwMYJyl7F zwXe3mSs3@{N(n_^f|2*qYF=Tzw~kh9?&1eS-MAiWSjFS)%JTtpx!KAKONr8oPMJ;M zf{K-?I)6*zB*rcd_D{Wxks7#{SiIfAm$t>6BmRqG#pY1E_%I;x8tle>$f9})9>`MZ zKrcex@eC>V!B9xLO2H9#|OR(IMOL-h>qvnCv9$xEAW?3>$0b183D=# z%B5z`VvY@!V6MvG!c(Haw%1pjOHN9n?u<{vw>!$+tg}1J%~s?!(?*yXa|MvxP~`ii zF|2dbDKyih6#zE@-D~ezhs?|Ue-+_tD8 z>~@Hbuvf*v?4U=^=L>l|7ti=IiK4Tbs!N#y2OB2#-3q`nw3zYlu#1V7sh&^}j(1@T zrmaToGDCd9g-r^Ce8%6nWz96j$@c3%uw~7{lRivNX|T5)W=3Mi_l@0OW(qLThjksV zC$mEcrDY}NwVJ)r{wQXb4AyPXjfYXJBB+4hsR7xh*y#k?SP(2Z3MHF z?AMGV8%GQtI8m205o`m+S5`I99C6GiR+a19gJvptJ7r?1cChDu~2gqb)yPIp5} zHpdKgpdRsEYmgjNDH2tSt>v&i`$aAU$F>DX&v;F?pP7XIl9F>#vI}6qi!FfM(Gj_z zWsKKSRzjzoWn^|(3C*U2?b$GS+DWW~ObpJl|4)_P=r*=Qf`V+k5v0m+>cKsGJ6-#i?cN2bWMtdP)Ii#zaNms8x5|cobJ)^TB~}@c z7&|+##!~A=-CW}{aM$9_FACrRFCHR)vJHZ3pWoF6*`u7^9sZr1YT1mEV4@}sOI?BU zPKlaE4HFIv2)wqZZO!p9vZ*m8O$m*O9UvvJV{AR(=!>?d&|=i>+Y+(l;bh3Wb7PqO zmA^#EGrYzZD3W;l3?>&OXf1;$-&J?=Hkl-D@g| zA%yPEkDJtt^u*Bb_iii_=3jp6U@mMex2;#m{fi>Axg}Ls(mj$er74&iYSss(BXp2hJzT;l&b!J{{zrb z7)w9vul~MPXN(38mwvba(6e!E4iB_L z&neJy*`QGf?f129r%jCAy z_crkNzPAu{3PK)>>m4QDog*6qZ)a9^AO0z&H7m+xZ3Nx7qQU;CP+T{y1a{C{#Rtm5 zTbEO%{}DKbY%NQ2g^zDWXj=y~B9mr`>3_S? ztSUp}0y{ zY+g$851wDqcC+tG5fK z;t%P&+$bM!zYJ_^WrZlEr8({x#=gl+yy{-f`do_c7Lzmu*I;#Z6CIoZJr^VDINK~dD_n_tXKC|;T2{dFH_ zh*7m=8jPq)yDGe;M&~g^kwf;do5p*`GPHqsQPU z|I^-(B#T;J^B;C*uWlZa;{kk&NJJ8p|A!PrREO!*yE4cd9Zfb)91V1J7;DolFC7*V zI|_L^@83@pTHXgZ8YmkW85AVlVCk=9CEpg8libgo)p1h#7?#@WM*g_rV1n?t#X{4S z^P>MpdH`@79><22jIz{=b#fEo4wX#Qb+~EYK=XG0S))TQd@eb9g%WYkqrB1q^DtokY)h2TRB=r7aU+i8 z!YUW@wxc;`aF9f7Gic?A4nt?NLQdecoEHK?ag?*L*7?rJ`$GLk zHNJ;iyVt0$?8Q5xx0Mz`;;#fZH|_tW2Z-`Rm0YSel#`Dye2YoL5IeIi~y(V>>$aod5G^jGl_W|K0Ttj_x zOeIxpB=ltKvKuG7kr|@zT{x_r!!C-Ghuztwi=(K*U|Ob}V}0*2(J-<;Sdnk$`&cGg zR^@T_qtu>FYaOXRVgZ~+^9g7Hy^S*uXr=+-wLU&R4mbx6ZG-xwklh_x^Y5*Rap0%H z<>(&%x7A(U-(fKSUW|TNI#$%vbL0Bm$_0_D5V=#PtrssWs~Ef1 zm72~jCouV*5vTafYKiwuQf{1e`xB=&#)YJbd3Rj5wUytc+f1I;loe)4Yc9I#w6=(W zKt=<&rQB9+`(MVd^R4Ikq+MK?gfyDBs5#|Hj%%w)*UV0Y_}Jc)qD${0GnLurCN~zV zdkvZGTgB*RJJ8BC^lcxJ!Egh)Boh+c_PE5jFYcH^0(IF}BFL_MM;7u>b%#Do_m3Xd z35Ughjyh!$LoZsj{vARS#0a_h@Spkmu z`BmVb9oo{jcaH(G-kKSA+x9VK&7uC0Z6V*-nbMKux_}QERNwpYeX;*W>y*6SAYrHmg%B|{%m7@6}HDDi%w zziMBk#R4Ow#W&10vFWktx`!9WS4QLJs2B|m`kgf_D{OD!6AxzW?pJ%-|u=ZfY&a9F=BK)*Y0~2UgPq| zH7io$d_8Y=`rN^J?5QT30E%^VbW|*v6>UBbF)7Z7_M8`(d;!S|BJa|mG><03;I{)l z3&y;uRHa);Rm`cQ;XyM&9=3+kw}(4C1R*pqPc$Amx51AOb)ubnNg55mLX#NM?7mf# znjB7MeP%5|g)@t>x#fqVP#wz7Y6E~FsP1`~>G~=QVw3X_yIMlkFG1Jm(5ClMMswM2 z?8(zj{0QH%)iUcK%7v26A*)XB~|`#`B+mQf3mmhLj*{I8djLgarl4jrj?;MR!nFCHX%j9F3 zQrIZ@!6q)?6N7-t1ojm4U<2>q(|wIBBf^x`#O-e7M*|&64+ELgLr!8G%=-GIfPkt` zCpcW7Lvmc1CuoTP2~6I+lJ7ztXzDg2<9yh(_2b8n_qstN+X?@IcAQj`47D&LVXK3k z`T@IrD&=gLlAuljcJtOX%pQv}0FH^vm45*C0z309x=MX7=KaxJEYu9jO zd;sQ)yqz#g`t^G4Se4Xxn>oG6i=cgfxl-&|UM8HO-jA?_{YWjjBdu;jxF=$$GCT)rPoAO?_D#m6}&(vM2n}hZt3umUMC#eG!`Ok zWcSem$-kCJt8-ZVK!Z>y6k;k0_R#&NE6|c-8@mJPy9x>NCU^gc`}7~l4PX+Z#tj`rMFV_) zQ^mA?*ADQofnkRad-f=B6RT7b-pG4TK$LkJ`!6M5!s|is^MASVCzhJIT&Sn8@{Kwl zzmz&~Z>IJ>KH=J5nZROh!0|=ZT>-%@*g1KBO|f-y-G6N(0J@d<`!lIHGG*CEH4MjwU<%75te`vXt8kL{fhd+ zVXbO!V5&+;s`ndzBn*sy=6uXIScF}kmqzpZKcnp_M9PiJ3`|V*6x^ZifVt}o2sZ{q zEKz7kqqF&*S?F8r0dMzxVAG2n6A~z*s}ghZ@bGYi)8X|A|1tj!Z{QUu5`F#p_1BnZR+cg~{q%rquYuG3mfRozr6Dga^q+`*_Qhki z=714<>?E=)~LJrLpULSOGs z?Q*b6xV~l~vxcR5{3>Kdk8|e*Qy29(&!G()g9nZdW@8I2Ms5(&$efC-Jq5BskI1Mx zf{t3N;|AoR(a8M35hZp3f*D$qB+DVN4QAHDIku?^j54!DmIP^eCHPUAh* z3la9lsxC8G#8t)H!@Ozy<)_j8SXEkY9ftYninoBg@s5A~MQa*h%?9yl>03!oG zIQdjTg*!_rf(F_(6iaf6h1>xOa|H4Dex0}9Q4uQOy271yGt0zlv%TP*m zxZ(nrvhs2U1t&6*x*`T@M8^${$m=k0^iHhO_`k{vXuW#HU`2osm)0}v;n;ux4=hKEkJ{z&(dvgDKlExQL^HQ{D!?ZNJ?ZJ7uA2#q_fZrMvq* z00Se>qbkS;Jn~hr?45oOSMeFqd$8>k2`!#0{24ak@SqX11r4BHz1;k(5UDPq6F4S! z(l%F7*t1A;a4%(^l*a(@Gx+C7m)9z{?+cjpf0E?fea1?9ito+pe?d zl)!F@3kCc)#t_9J_zHZ0`2of{hC(t8GvPRaRjH0^|ISSv^9V~?uymh{6ocv>MpO|z zXmFv55m*UEjBIS!>APLlh>!WO^S^WQ^z=*$XTcIQoOUs_@?El>YZ|Z{B)YOZYSM^s zWXcQ(d2!1C9_Nn^F=3}P*kAp#ij~*U&J4;}co62zprmj$)~fkW8@KcrfvP45gum5e zEG=v8i9t@V!*2~WA5*P#J?>KaHF01+JRgc^!a#V3y4*+Fu`rZYHu$K(vmrM}Er2&J zFtN(Sy5^5bqRojf$3&(I--^9P9GIM5PV2k$P=vDgCxa5qG&gF&lx-2J93W` zYwati@2gz24@hmIo&+vcVNwAaT{_DD+^dUBEv?ndU`D}Om3m(`JF~!EV5wfyMz9YbO|n`1Q-d} z^;v(7U!e)Ggr(Q?LT2*J_WHj`m*w2UYRnXuPR&S2U`s&3ze5QQYanF_Aw~=p3Bt9^ zlD?H-5j|G?93hteUty3k7=B=t?|);R$F2~blH)}Et&{-F#MNnkVFoQ^0j@!xo*?T$ zV?7AuFBY9{)VFt@0~UtXWn#$}2-xnHyE)WBQlC8_iBiyb90+e)w6+;1EmV;R@~vMB zR&={cI&_)4+mGy#V$7%ufQ4RxP$ubZK~Z<*vbzMsH2Qcx{OzI*Hau4$ML}=h^-g%! z(!_3Sn+-cBC)C$_*5(IwlS4`=7rK0AU6$Go#*Uodx@^BXapP%?ErI%vajB0PWh;Gh z7wg6&6Me{BarNp|dDs=3Sgg?r{vCP?@V9bDa$z)XY4A|fM+96D@Gz^@k)-2EfBl?^ zr*-mpLb~zI?s?;=`J$ZRy+uW#3Ut4vMWtm5lk6;2z9mTz3uzxf z&u>nb`6ehcP)mON;ax3s#`xl}hNUxee|?fUMLRYohP$&&B~3D40*Z-11kvNhUrfFlJs4>{5CNFITV&7N`zaPI zySM10_F1`FSqyfjeuP9`@r*0#{gz26*0LY9Zb_IEWCgWdK{6$OLu zx{XTLfKGAeztiY)M>?>xh^?^?Wfz>=yWUg+sh?!V7KEc{Rd zPV@v=X{p{mPxfUY&8^VVINLmA^ws|7d7glAza>k+&(icSFdcH`@bGqSJ%#;5bvaF; zeh$i|Q^Q`A*={OKJsvaog`^_nZY9O%to74Zo~I?VgF#uka-qouej6cbuaI}AG(yku z5$W{bDQ{&adk!_VCB5SlKF*%2DN{q1>`ib4c)~o4ITt#gM zd*Z_S^R@^7l%l6zWWMUNL}ez1@uGy76j** zQxN@G=t|p{{P{7)Q)zRAe;~=|Mrcx^5x(DqRT~XXc1~dF>hI?zF*#I}nh5?M*)z@G zY95;wza=hl{EadRbw1X(Q*1BP;_276p6p0qTOJfYD(+%>{fNcoq&kw?1{nVg_fZ|d zjh3@*1(MQg$O;CDZ%ReeVcM*)``{_Q=FI-Hw63nMoM`J=ELC^{o!Y8Dkc#BUPX12MF_6Sdtx)KNkY=Z)(SSnT3E z;lE~{3>0fTXy7M*VG~!8rVmCzh%8lL(fj7T?$cP!;o3#-L1pJ1k^4BudTnHsx1{|} z_WBr&k>7b-S#DE*rCy(WY`A0Hwh?##mlx!v6+#a*6~=amOTy``>TasVtP_e?coCeE z<@=T113}EBDBw*?I_v>YCmv>Nq5@;tGjB~>>hz7rgDlQxoMccAVr3Z{8ynSMayhgT zTFpAhUs%-();G?@{3MgKZ*L*fMixQw(mPYH@2l~=jFMAOlN@J%8bYgYh@pYRR<1exkJ&eb{LbaAS45bt z{+Rc~EFxw+N%>O)raWI&b!6?SG#Y@%7?S@NZK?7QJXSx)ZkDF#L!ks&H7<8UGqc8$^}4V^jQ5)>deyV0*pyo#akAde=U`_^eWCYC zRN_$LWYDdJMh00STA=H9|F_-q?+HXJD{F4+eNIVE)_Ka~`I8}Gq_b)^eG+q1!DAt( zwKS1Ksi&ZDCuq+#9Z9>g{-W&X@%}nVnp6OWp}LW5n|@nd^)Xpuh}Bx0kxoox$Av=% z%q2YPlJv7mFBW_&EyyK~)7LS?3ifkjhDBpyH$RlS;3}QE^k`vzcT~uKrk~DiiOyRO z$XH`Qlv=y1t*vd&qB;@J8TcmK*69$I(zaNC8aNqPzt93}waMUF+qM!y;gk>`{ylRD zWU(afrDj3#aJ)hR-z}3r0i>mvwjs%{jEoOa-&tTOrJ(OWo5tBOvHSH z6E3x+^k-i8N1;%i37j;1>si;|F5pc+OAouY=KoDMT`u(O!|lFYV8veY{3R`QTT(yH zzDmNRbFa|zXP3$C?tbb7Oy?V$$1-x9R$?C8_L?0!LrNu)<%P)m1@*x%dv6WS({}%w zo}PZ@Y9lrUt>xu}{0k0w&=cS^wPu{$ z`*w2AACj`n+kN{1_Z0g--|7A-YM&$#a5h#|6vE=qE!-8z+UqU_3__&5%&AjJLOA6A zdX@i?y0k1SWm4kill2Ou0QW zLydZtG22`LPl%Xwz6`m?{pg0{M6Jk$%rYjCJ4v2d5IU1^5k3>?thRQ~bS2vtZz8=( zviQ|%J??)+C%Zcg_lz85}u#cxVr>K?3hrLrKl`5~Smtr`sv% z7Z>mzXG>m9Ds}tZj)3vlC*`-y|7q8ScJ*$AL{@gI6fdPPwg2bHbJN{lkcQru29Sy? zhiNF`Hl{{Nzy}xl=;7Btk1*mv9=6G9!q?X9Bga!?4*n4Dgc$vKt@k*4;%ZVTWGN$Z zuuvsc+^Jw?IwEb<Hax|8`<5kLj)aX!qEEpUwzBf@-LG1z+ZB$j3g`_ns5$Zm2OBwy)Q067@ z#okRLGw|%UU8wCo-tNK=a5D^qh!?_0ZwE@ynjJ~NpFtB2Zvf|of!c#683?uE0)X;4 zlkgg1tG`Bks3FONAk}VB{3M5Zi3Fx)-m3{5tfGbaU5VM=FEKY8o12>%Util%AHm4i z{F6iNQE%IE5huL}m}4bopPdTKKyx46VF7{U6$=^F5;IHr+dZy_2il9r;n7o0?dI*Uh-wZLIc8u7=sl(wA(> z0Jyhpr7#Ly{0l;ANmZOTSf1wxH1y`Lm!0{Iii!#@*pTbbF8A6;QuH~30{docdU3L{ z%#YJ|X?wU|ZHVG8HnZmysn=dj6Ze`iC=X}xFHrLDQmn+=FxbF3;a_l`V_SnnsR_OnMCtJD4jJS zugn(g3RthlPK7l;N37u0V)b=M<1K^IU)2HU^(`ymQ3tvtr3i$kT~Yw~wkI$yuJ{l7 z8l0cS$_-SsqN1aX^0}eaW6x$EkIRDlp7k2e z|JW}y#QWy_1H@s$fNLI58I82tyVrQM zu_V{-)U$uMxcDpBty%5_eU-88+q7jLqyoKWZ7X+2Iw!%l0~^BtYMN#j(86Gv*pinZ zuJTPXNZ=feX8arf#z;NL*!<`5A0$sNBA(}x=z2B9p;!Y17>QSgh$Kaq=;WNnbiJ_#768{kJORi?}YWWwbNMa zhJKYDI~hDFb7~*hzBg8520VH?+^Bc8x;TsTsp!~%liBI6fV1sLzfXk?XGgsg3I;3> z)@x2yEu~6cf1i?o2OO!KFP(p{sW~%Ul#;i0?JJ&sW@y!5)IR!#bj6>l4HJhr5LV zeoL(1pPhH;pi>o{&cq)*I>^*s=sy23jRPqXSNq`Sd46^p#(feBuV#e3C0h^rQSsx3 z#nH+*P1nH2$Wc5PNV)y&l$ZzJGpZ z>NG9PtstV9-r&6EA4>7r$O-DZ6x@qV$N{dil?%kIw>^Deeakd^0Mm9d8*mb<3B`x# z%r%_=N4`mso(cALzj4aVjn!jGlYUn*_I9Ld7cAD6S%QNZ$}PjslbOC5a+Sn80CNkL zfO0?O zCwWdp$Mf=M6;j-PD=zKP*DgLg-?fo462`mTjw7dyu6K75{#C%+&%HaH$f(kNz=}x4 z;;*;z7yOU<(qh7S`)u~j?{=F5lgdO1yW%OKKyGLMuN+Hnd>wqRD7Bg@m4=7RQ2m=^ zu>Mh`Vt5gsD$mGEfZw&JnoWZ5$kymUxca%i0233FP3+C?VhK1QN0$F)q_M`tQ;_@z z^ENgaBwEilDN-$zl@lQOX^{fOkLn73_qULF@U_z2lS|8cdX2u>JHm#>x+V>)R6rp2 zZXk%CuRWat2EL8CZ^Z^45OgCBwJwNwlGHxr&`d@~MIC1L@;)7>G?@ky+=QuH_M@bq|JtmXk!iyBLN#F5+3F`As;=7M>hUtFd7 zhB^FE;(kZ{UaypA6BXRVxl!230|fyikbS|zzDcn| z?$E99WdI6zD~-+u`24BjUcGQxuaC;nL@~t&XC03I0w?3^{?McIV&3R}zmH@ob3Ulq zpR+9yC~hVxxt;Wwb-wnM$lh$V)6XY#GAE8FhuRHC0~-rc=U#cYa`HesVV}&>M-9_O zv~MDnnmM79RAYSKXGi2?8K7uDmOef;`-tard`q0=xS-uk42TYs$n#Zzna=vTGeGjak`EWw* zVI^j)st+!9<2O+c$DFKBpX6^P<{F(+l@it1NM!MCN92<1uqJ4>{nN)BD01iF8yTl4 zC?7MK4%rwVYcyd^bSyo!uGO&^Y-d75L_{|a8SHfCxV8R!dY+JH;0*EE@RC#K? znNvPb#-#qF>)!W9j;#RYnqRm@(N4Cl+kY$n@CiTw16sRu+$DafL=UrZA!a!n7aV$d zH>ZDI^cj9t(!@_A|1r(AOS-GAq__2Sb?pbwT3|XuyzrAUGM*s9wgriHb*xY={nmn2 zgf+{&>b3H%L@pj1#02|tE}JRzxMFf{ZrLRMQBtTD8y}cx*j|w8x%# zJT1(|r+Ht{H1hyD_j}mB5T$s;)N-a|u(I^~ECy@Ow^WH4SrG#tw0;Y8dV622<0~KB zV6&-DE&TS}sBDAhcL~*fWtZjPC&i7aJ7rEzLR9^-@Zo2L7QM2=1%RX__&xo;{>V1A z^P=5;wR@_9?;iEr=`^yC0jhW^HV>UUKV*qRCC49<@O|!RPgJ0agIn^se>zr+a-ADvlj?Fhj1#<^(x*8OWtCV(vv|&tx z+%u4OfjoQqSOcUGLGIAz>3|gAhodq!f!s{lzLi_xAr>#g=A7#CTSIQnKH_l&`%fve zvL`bmkHxV5H`Rh~(a(u4@Mwi5(df&+dOb?jkCOD$q1N;XM1o3s_zM`ToFh`zUDI@q z#DTELf-K4I1$y5xh86~csr0Uy6_Q0QEBZS!ov2Pqb+3X37hMQjIT*u5$;mF15RoEKORBRMJWDfp#B%Ph1?acGT>r>+Kz%FXf}bP}KTS=(rIo-8g}%{D8} zeFs|9ND123yX|Jo$u}ogYfSJv=?Tx5HUFFcllcXPx7cMcVSF-97xZQFAkO6erJ-qW z-76Gj&~24u%SV=ObCjfAcS#&)T7xegdNXR5n#!%aA7~pctrRvmoJh<}3hRCW5VaDZ zUP>X3tFPw-eUBW4zP2QP9$<(p)#93mf&Q_?1SmPfIMRO6*YJ-*ul*ww__2N;Fk3*1 z8A!gq3xd7s!ykEFKq%L@e|~-o>F(aL64B%8__bpNGjhpmhxBEMVv>>=bVlv7E8)oW z7uCGV&y$pq-!z1JW&NvpJ2kbCoQRPxJ2nZ;ikqplb`E(WT=O^b=XJiQiEINKv{306 zY=l>RJX=RIF?Y!@Vub>$c@RP0=ew=JhI}sIp=!^7rMmRjU_0Oqj@)m8D?(9T^%&0y zzdskMi&u(zz#>&DU;^lsJ}MWbhWe$J4qfhdokyZ05BqCw0b2$=<@R89Fq}~=^F_0v zHQ#f%@RI;fYvq@&MFV4oU+=I+M2_^FnF6%g<90SFPR4jn-p~jP z=^O!-FIgZ&nNSXkvJtq*Ta^mUR*~M8mfAC?B7^-Nr;%iT&_C}blwU2lz9MTl_V4C0 zYi?lWey*$+f>d?K1tXKlj^ypAj&KyEHoPX#60zTDvW*d}d?4g|dMvg4^wIoA!09AU z9otM-lVZR@YjN+Vh224q3TTxqOGgxvH8nZe&KGo5{q)2%+flyOkpQ7DLMF;%zrVfQ z^=~47Fsj>#?gC~~7ck2@hb18u`i#2XbZe|`o@^yFFi2EPp3Fm=vyUiX-+}h$mEw76 zx(jk&%3QxkCx}(4F>v_9ec~-$@EVv{hi51~fSh zgBSSiZtbY(4)YgI6+5qdeDo8aC0Z|&t*33S(|AlidHr|f6a?t0 zrXv*zRcm1XdQRyLKKv`Bu-|?SN`wM`fN7ODp8xnNhMyGXEs?=xxD5CR{A`^0&Q-5QNGGqkn{lEf3Aj1_FTR^-v}@XTb)2D63CgFout5V z7Q0dvCz}=HkbI|m6%C9Mu{4`*EeD&i0pcz?NX|>i2&*6`LW1bpH&;_?zYh!JFz$TV zNABbRU`>|#r@B%911;+?i1H+frd|-Ee>G-6-!Q1=#)v(-wYkFi^p*sF|ATZE>t!+mHO)+Ho0_&B~tu=gmatZrY zx%}@;AotuGX6S%3KHp?fkfe6BtHk5b=^P|j@ymh!_p)sw`^1ykmP6K$4=3sB2G_?H zHF)I`a|2AXZJAHfA2}F3VpMZBJE^CU3^+6R4;4Y~@!jlBcXWZ@?>DykC7+gM${d!y zRoPYs(;%Zsx4fXxIfsYFPeE_bGe;l@m1+6&<=EE&d(;U$8|QPzDJ19P z0Y^N|U1w`5uLD*{&TgIWKz**$(mF_(Ln+#(F+XyPuShb|5=v^K$D>n8)~9Z0zL;CWY@(B5AkN;s>h}GmImcwU7!aQH z(51h=19`op&Fo-YPbbDW97|cKs@*d-}U_Y2ez6#M}-M?(uhsowB8o%r(OZKidD!fi0y18jUW+r4)wmo!S5P za21rk<={a}-ECB0YNpw`dL2S*f$xn@5oU>a|&Yt_VkZLbS*>LSCVELvhL(mxA=MADSMXA?tywd&{Pv?n*HKW ze~O6X&ov7@qU6@`a85#DLk+&xbrk}Mua+lZRUJ|>Hj_U_T#I1jSiscvdwAtf|?DU52ibK zAYvB@QnE(b*h-TA2VuC}awv{eNdMUchl7=&T2Ze0g_SY4r?OnoQd$@OzDDi^P(*r> z5DmIeqpQAqCo|{hol#}9l*)A%O|I%18ej_an&42voK3S0imacZItJ6( zOtYOi%aJz!3x^WyjfXLUTb>@p1tr{ zozX*!7nmwm&yxoj`zkr5uoWP_eEwQ01Dcec*&m(GOuD;W`pP@XW@Hx%8Tf-_NzbPi znq=p$*c&cOYIm^vMUCy->$T-B#{^~{`q1TP5AQ2cuT}vRV5=^AUAsoUipqy z_9ef{8~&?&W7#E7FE-b9(z6JdV9WTkPPF06mVi8$w-G5#MXfY#T>&tcd zlF+EP@3T)4R`fJU6u*!@jBaxVKBTaum6QOkCPhbl1eL0?pGMC&9@Fd`6ODa0G6sbZ zg{6i!R8C5H5}Fe3|2$E(9<4;WyUvlt_T9rD$mER^b0yTrGtI6Fha)+JweqhF3ZhHr zNJ9pUlY1~%>HG-9`;y2;L>@hQ)SE=6^Lp1g(xk5z50Nq&rwf$6{krhFEscF)?8jy` zO=CoTLA9;K1^HQ0z?Nms7m%{EHdhnQ_MdT$JJ0f*FV}vL zHXNxhcm}b3@rwf~w9AbKS-mF3+G3~jx;iej;8)(hQja?Y>Ol9Z2zQzY0Tk>x=hJ4+ zM8SjFz-w?_akDT`oNNfSPqcn~iH9g&^+P(jlnf<<(G?aq(@f9QDv1n2!K`EPI3sQR z&($=SHeX8RlRR?p<_F6Nin0!F_VZEMpm@M>HOAR~YDBzH@D)W@35Q+(mzJ?F zLkya!M^v!4ARYm8ejn}=B6L&0&FiO#XlL8Dfw3zu+O_KrL3`#niQ?(udKj=W>g_b%!Up)}xUE_zK_T}zR0I?Z56+EA71HZ{B^3S>$A{G+uA#hf#XY9kifOw( z#2ABBr9^g)yB(R{FX&)LB@G8|AaL4)QBHg#WHyT~aw4#m#@ZD4yuI|Q*!&|h0H`a! zvPRxML1^j7zcc={BHKMVbcs|j<7Sz7-g3+#8PU0NbHpMP?))SBnhK!0>b;&!0OQ{7 z?owm$;?RhO?{A5{hQmIonghq47Y^}7YHb)1&~Ngz!5ab_~!Z_hD6 zv_e|dFmdUm;dFP5Nk8$LDQhz+Y-^-exUfT3ND3+iEmTUh4rx+kYikhO2Frmjz#pnj z`X*0n0`>#GvzZYJ;6MlkXqy!Mi1?#1{fBjv$93msQUqhedE+h)!i_wvI;`>nK`qDNA|7>gs=;lw9QeOwa(J|$u`@kpv-`SOv`BIW2{)< z|Ms!GGIG$VuN?K9Q{kGx9ke-3A+%HSWTW|{ooxFzvQJyB7eZe?v2D1N zx9`iXCi@)@w1&1UyIyq`xC?b$VeOVJbsTA|4W6tiQUh_gp}U0~-5-4>tdB)aS-pR^ z!cAvyc-thoOBF3SbJ^$_xrQo9EY$wPJpwAR(_`m>9fCamO177LXUtlqwFo%i-DH`n zfUt@|XK{n|FDduY8Xrkh%@DyouSX<9;dkvc4m{468lrHe$w=QytCgljBA;2yenBcI zgPm+71h`2a)yuwnz$8_#;@bdP?yG}l{s%6;O5#HY>zk*M3n_2cp-aU+RIp$!ddpT^ zXlFUY%c4gh{ZL6h1|r;#luKcu?#|Tzhpevvi?aFRMHEHAKTYyU&BZyE{AY%$ak}oH_A(&*nCI@Jd;m z2Paew!hGih&Td!InV_Q&KVJjEKR6i69DsJe1|}532*x0XUBiLH)wnyg2K3V`DyD#E z{vY=I;zpg|Hv@f2_wmIGY~+Y%gwmes;F}hBtrmwS7W<#eTZT;;7W!F;)*~vL-cWELfk>QN&X9=6r>=d!tA~5~)B${gPL$N>A7ag`Z*9Y}!GW zQuhU^&BJ&d;Cqe&t}Jr{I12LxFVr3#5M0TFl{`#)T3j%g-NCAFGW^pAx-`fZlIUSF z$NXwLjH863uj?Msb97WXNDlbmR_16&lQz>5uN)OfK$j3Hh3j80b@h&TrO0dLA)>f; zUFn8|cHC`v_0Yd<^0@R5Cy$=4f%A(yE5ZGb(_z-~9FJU7i}MH`esG@8!hS5($ABrJ z@KW9?4t5~gpH%EIiA{n?&$>5Q8Li{TvIJojb35D}4r%DhvJtBRCnoKGvENSDsoOdW zqtjy$P|wfDd6iBJm>kUFx|+a*fgPo|gBrk!wP?#;>>WMY9v5}hDFcZwxgLWDxhQAte~zJhk)jikDAN#qy`cG% zBoIv%-}d_do(zwe?eSh094qWWbKL?lo%Qtm3aB;fnU4j`Z)q)@Z>5Zizuz;3-K*mz#ACAbFvG6K<5yUCCfXspi@C`9>>gvE0K?Zd^Mrwi}Br3 z^?w-x$cy&lNQ<|DPO(pNK+0;b=|%J#tq(@jY{?jtt$Qj=muYSPp+?w5?isf7w=>gd z6W-PjO{~0;ad+)aWgySO6sG!DCTN9jeR>|hiMe69<(s%y=m?w_2#)0fD`4!Bqio;j zBq*FB*!(z7Kx9~iOzg1pg;&)9v)I0<$VU3vPA)O(7=*dt@ADx?ir(`h@#0Y&gSC5& z)b`5QZx^bs%h_Mm5q)t20`HyR68t0hdZbo1~|cjy`|9%!x1aHxnNa7tBIzG7{H@pxuOx z^|;^AL9?Im^L6)ToiDRKZO>rSz*fQjK~fQ6smAZemMTd3PE(tFjK8KG{(--laPjrl zD^<^vWsT-Kem)uTy%%RcUJz9;4Vr%5rvI${;l<*DCN3dSnGxKG#n31pD1fDZfosHiCQaw{SlUPMw@$7@J; zCU4&?1yOW*;ZG8B&yXfHA6=I*{tG7>8>KS4fl`Pp&xQzh9q?T*)1+DRR$7Kk6(sQlFgUkLP+YRL(hTh6Ac@mnM&?Elbg&#bf{6i;dta7 z#}+3-(!;pWERcM-4wjPW1BkzT*cGq<1G~u1LR+V{1@tdOghfU5zXX-~5KB}>%AI>N zmnL6}%_LLt#24GZGWRir64Wwkeb;uq~Uw)*SBH4m37$Eb2YWJ z1^BlPmM9dFZu!eK+GAqx!vcLF)8uZGZf$ z6!Mo?>d%>PxX`JQGZzYVb6MwMJLV3}5J>-{V}hXD+Y@DS!JRhA~3eT@OF#g1uJC z8K2o&#fe^T@_h9C$bfDyne~Prj@0<*f$03G#q;EI)AvdQF#a2D0xE!3pV0ddmw??<+Ej7(bNV8yF4+^+F-aR zva_2IcO8NmE-JAF-cv4OXl{Fj`v=u#>}ZAe~0C$@p{D^5bkcP z*EgePMV!$|F0rU$!3|!KP>ocB!3Xxk0!=>l(c3znGz_^OMO$+TUndsiCxpX03It1t zlMUdvDqY$N8XQf=5+n{Kx&5+l?&N`E*SeybJ%KDYBnZ(SQTjw^62~hPXR~|KTWgHwi*U}Ty zGWut{-oTG0ji#m{+Md6|2}k%IdJ>87Rk&ix0Q#Jp7u0eXSMOR6Qu9xq?QHC|^VxtqU2PL#&MKS2` zb@&%_a10nL-snWQp&rXcniz^-3}?TW$+ z%(Z(-RjQ#7_uH=0Ys@d6KVNY6V|BQumU^{ou^UWqr?_#JwQ=jv2Bd1J9qtq;lx+Ez z)O8m(IH@(==Rj7>D^$q3GsG`4i-hL7pUy8-HeAosUg6f_cO{pEw4Y z`e>Acc&lE1;36$RaAfk(fb7l7Ozwk`$W2U}gVphhL}J@{DPy(n3g=Q-vAZ*K(IA$! z=R2LVG&sUR&9T5|Ut;~L+Kve^yJg=Z5bmCz zF}cn}*OGO2?9+zi1Zyyg>#{!BmkBbM4rM2}3J7ysDWxYzZ#A>9trs4d@>z}Zp}OQZ z^+C8b93FHzt%1Cv!`@^*3sJrVzJLN=$AM>oOK<_{l?2j!rS?Z7)hnX~;aCqH_la;|dt#kO+&M^#ku+DEtO)a6z{h=yL zG2L!CtA=j@j$2N&(rtyfmzpt{|HcsQso@^bnpqc0H)vc>{%Tx&*ImZ%{a2!h28SaxJ2wR!#uwV*+;;UsR-cbePlOi8LvQzV)J~y1mGyu}O`Hc^ z=mO@JcN1cMDh@RbBU=4x7Y^iE}fs==i0n4JXrL!N9}jqVX!BOcBp+H-kDo zFg4e3kbdSe$-cV*U0YjA$) z2BOhK$xfTi@NK2T2EnxzhV(hQs(_E)k@JfNqRqUGh2^riV_W-q^-N{fpN~y`39{!$ z-szE@SVuz}0Azqa=hTm+=yU2{tw4e|`@B=hUbGLnBNMC^>=t3G z1_Nf;w{5~fKo2tz<-A@`;kTUbP3biCl;M+P*R7@pToF%^3)z8vSC2F`v zvE4}OHZZ7h;Ykf&Dqfd!;ZgF z$qN!W!fP=O6dFRKqN6P)_zNdm7((?6k*RhvPG@f*s)s28OAYFyf>l<8Ut|{#wtQ3@ zk+Q@1$$q{u;?ACH_iO^K7Wt=%LFjWPVur0H0$mP|cJTsqfUJp#KWVUkjc3JXe(LA- z{0`(z`HWbCa_39VQaJ_x_zrK=`v>G5WhhkzIBXf^2wN=O2(CLDQrK9hPY6w&M@9{_ z`dAvBy;8c%F3D8DMVe5+Y3HwBY`#!yq%HH3p{;$;c1N{}*G{M_S|??+09ojY%A$Ck zMvzjB`}7ANjO&+4vixMX?mkg43dmlypdqY_X{}3-P4npxd(z4P&X5Q05yp#+w2tb`=46Lfqv# zDa*q{7&TFs-FT9!ds1ZpTj+b*POONe_zafjg~=PK(3hFbJIsygEIs5(shx^(Z={k0 z0hY#GQNtUqHF&1|T(KpV&i+O>Du=?4H36eU`ziO$#|eBb&a;z=g`DHrIrfJo@5Q*Qqv#>6T^LVduE*r;%#V?$PEGa`_cZ!Q;@KKYi>B@jp~G6; zRpytV7Jv7vmK5Se1)VGwG0saO$s{~#7(jeHJ0x~rJ zm@gO;LScw8U}S`%(#0GaUNdPY4euJH?bFYESByKDiG5d{ki6IS>|}LLgk2v>Ex=3$ zGAwsTgf?#O+FGl@K+^RMjH*$?v{~R{-kb)JXQ#QDd8ZCtz4^~zIEeN!@d_(9cOh~S6?WfUF^mIG^bIw zoW17ZRL_V9^3`b5{?qSiwbN`8sH|d8UrJfXb+wHl`_FgJ^+X~Rv-zsF+=BvfnsQGFraG1rf$)V;% z_CQtt^N>gr6MD=_#6lkz#$)%K(>eCB*Fa5!e7b(- z4N>4;N%@RA2569NGv}u>=jW9MT1;kOe;hVJAv$KwSK|cMpkY@lh#cX)jxH}XAZMRlq!8O^ z-rqkd+As> z-Na(Ze*NQyJ??3djW>MbJnzJ41o0x?$^n%BL$ds{=*|2<be80if}jQ zr4TbLC$Hb*b0#; zs8#-ckwg((oF%$VruQ08)iST~7@&bUiI7b0!oqkZh{psKlbYOu!D~}dLT|vEd zuZKN=&+>sh{X3!Sdwy3i1lKIt?fLdWsc`r>5BVk+OI3oc{I{))lO)aO=psr-QJpq?vC-N69=v#LZ_NfNmE95#RfmE2;o(ZK%4u^xB&dY zMJ{vTh#_zQ-H+?}8Rm`f-UKlTPrFFz6Jfb#3+XlQ0gzCrNYoUt^pAJ?4-e%vJZp5k z(;Ei}3A%Q!2wjGNF-1h&E$vP|+3Y`)>82ok!hEA-gWXZWh2zl!qBY`IKp#X-#N52L z>#hsR4fU5?dSy4$R7IS88ENDFP`Z_kVc(P&9+fLE_9+)nMdNn&{Tu^MZ0)Wp^N4_$ z3lZG00q*R8_4?Yc-vBca)re4Ua#7T_8EV-RLMYcdgM*F)GSv*1paQ$qFdEhZI_Yl3 z;rqu19A!gCyOCjU5^;^SPsHWW@F-~Arp?vrGD~4HbsZ2(Ga4KeG3qvE?XS+OjKWk4 zgz)0$9^s*D)k0n#0WJWwY9BFup*dNgTk9M3ydMqd)a83BlOYnMWMnnR0xt@>SE0az zs0Kwxe1219=7K83rMr21Z#jag=eITS-x?$Yr2t<`=T`dQ~_ z|DlUPY$ah4{a+Qm5?477lJCMDB$h#JB!G%|!Sh{EqL*K3 zHNyXvfW=Su@ymt(%K%3Mc>yj|>%ZWO(F+RXQdg#-f^m&M88^W?x_Y(m5h8E3D|)a-nGOd!`uT5*ZGl=Uq==lK~ja-oB$|b z>&AFO$*XXlqTz*cP~4S5Ed+ERJ4&JACUGyaao1>zBNLbz-N9xcl0gQv6F{G+K!-(s zM#}}l&|%sKB>;l}mva}{7=A~22F}rNwv>P=-EGaZkI=~C^?%tdK}(J-T3bkrFDn0I`baWP1=JQtN1PpuKS}d*8n79lN)I;^)n&I6oBIjUahnAwOhz zx`8UrJzj0cw@VdsN<7TersEjih~pE07Q>5DFSR1t)<4qY+IORwJ4qoo+$}V87xZZ)%nVF(eyM0esTU-YY zpkY+83SD|E1={NB^DuIstaeQ_9qC<_i?!LO`E_F$^p^#;-$3h&fnOjqoZbzL2)|eG z&IesFi?tIZ-DBtM{;5;^ZT?2o^$8?P;YZy`tMw=?Uc`vu8V#@S5EV=jc z%^lu|f6V%rE-(V5XrMRv{3t+|KS%&+d75vMr#2mYvAuf zUNA6r`~&^01Di^v;LDsVY`jm|3b-4AAEa$1of4OkhXmAPf1K78ZE3n86fLx%27p60S9l1sH6O=7cP*GFs zc#K9sv!G1j&evzE?>GGXNTn89Z12xHq>95KHD54LcF)8i!;5qXPQc&fe}0C>LPCBb zq*_+>g+PV;Sq+hjzIbHq$~S(WDt}*+?g{|uj^1eFQ4%< zL0pk7?NdksW(Nm|cNc{j!<3DCFww=d;+Gn_5$+3 z6CQZ!)s!j&_q^Je^J6_0_HVg#{0<+Yt!fd#dYT#h>6c+IahXZ7$L^H2)_GQvua^)W zQHJjy4Y{cMZTypyiG5t8M+kf;!0~@>sv2qi%lru~wV{z8YseEm*Q<7|uf`8O`~sqY z1Dlok>4gIu2u$g<-}HE>^HM$!WEps@5zlXu(FH@R8(1tEv?T$oi+&i93wrn<__-(8 z;;#qWzWh03iaqPWU2>RCd#2<}Jt}cNRt&5o4E?hw0paE&xwwk=@vs0&R zTuBi2eXUopQ&-auK-r+R@RZ;)VD-d*z5pZwaJLRRFCP!7PI{I;2T(*TiS;f%exf!f z&;f}aW%uApapLTI9)<83iwyV7P28W%Bb*vuLxqltotBdiCy07+bc zTsY+~2qG}beDsarN><3V1bvY``r4i4<9{+38KvjbP~X1d$YaX)DS);7c>uPwEQP${ z8A=iqTbhONjD)CRL)CM!A}6`n?QF~rIR}#wiKA&ZBhqEC6{p_MNCVVy`7+XSF`Q@D z$R3S3@>+eMii=h!wGZ2%M13&kD6YfPJL)4RKGw(ElE@r=n49bnEh&RW$RZGDj8^)Y z&kca-5FY0-4$Zuhay&B@K0cpsj9}}H6TI@vE5aZ)nzk%F`q3E^XCI5$k6z?>s4Gk% zc3#yq2P&5x=^8QUb|hxzS(#-#0Ko73p}37SbRBKp1r(!;WHBf!c@B69b~tM=qRO+& z`27xkeW%wYz+LdHhJ{}s!$oc2&oHUr@O)&gnD3Adog(wMjM%rnmV&&o`#npmy?gV< zaN}m#_cq3znMer*tXj(|FAxn2@2&JqkcjSJ;_aM0mbY9Jq!4R1uCO&}jfn;jL+|uk zM(m@~nsZUQny&!b?{7BR0c-@+^e;&fR{fM)){+EeucLk`;k$()T!-mfpK&k}eJkiL zEv~*^Qpis@C)MVpe3axp*Wo1JVt#sNsd}Xt`(Xz?@5r+BN&T!1BPL>g)A*O5IQ(7v zF#kZjE?a}zx&PieHFuJCFY(F7SjoBd_q6+89JAK=QcMW=f6>;FVBuab3lw;O^nSc+ zV{d%D=!dN%KU9i~xJNo%Vzu$iB}owQ6hggURa(Y{oF z(X97TIlr#Hpst|4Ze=@=dd@`7ODAWhq+rIg$&0F5Ny|u!|4uv?$dhjjt=l=f8NkBz z?93mSfqg=3&JH7Ng$+gqM)Oz6v$E^)FnVHK1G>-Bt>mtLxixC%b>jb?k~T{(ot~VJ z_#7E$guA~L*UC9$!cD{NHBHis-NO3o9Hcb`q>(F(I(bc4(c+5qt}sFbPg7R6uLQWQ zDx`nB5wf`6@Z1p*E%2{k^Drk>v;&S5yq?>qt1;+LT?oyPaH}3N#O7DD_%qGA5A>f;dXaJOhe4_IzXo`_7UdSF1`&= zU?9)BN13ckdX!yMO5r^k7l?>350*xy$o(DF%V%M3L63HPqZygo?n!e=`bu-5QE$O% z`c5q2`Xgpz0({@8bPEfM&|*Ahb&6hos^}73Ld~8(BmeoP{5Dm4Z~u)u-kYkIqazc8 z*f$bi_7afxx|k9%yAN;|o0!^Y8JM4D9mD%06k{%){|j!@$7MFx&_1^r$%r5{; z2^Y3zz9f9~G%v=1;bkdIMNJ(3jbouZ&glL#obH5T9A*(dFjj^BHv@mik#8LExmJKz z!ibb?rp*CM8NE1{l+Wlej?i$L)V-FXOFGhUhb>Y#*b9*1=r5UA6nX7g&r+G%1nHTd za)FMsAltUwS%sHVzz`uwbqqs@P>O2a03>E>XD92$j!YByL#P&BV6Tm{5w%Aqn@~P$ zr}%#f*%HsN^!EN-(`{YSx=i5rMUaB2k)@`R{~4cQcWh*`kf5N~_pZ{YQ&=fpH)${S zF8I5gzfM8+m^(|)=cK&}-v}WWGeZtE*Uef=XSw|5tnifdbc27&d;p#5$y}tsDln1v z(mUeFr2{HmPNIc3yw!u?O!p>w2$85LummcdmhCzkXdq>7Mk{#!cbqM{INgfrpksq^%XaMRbwHdiUh;sb zzGhjZ)X3g4rmNxqYcC1f8-PIwWsw>!ZNqH2=K`6~K(OG|5)5v8A<_%FfZD%5MBl*R z+S&g%6iZrZ4$x}%^EToSJ76p*Yrc|RTmVDSDAAA9CDknAPue^=IcZh9BwE0gHyK@R zbIbRx}zlm5&v;7YRFEjTZ4gcK0QrtD$|aKF(ywDI-I)|7@BLZun;znbvC{Xf zk2H#VO5+IQLnaN&$~RL1`{r0Y=g{m8|7C+;^5eDepJP9|@KB(LO&c8L(vvN+Nc9Nw&ZnPR z{CnBR&|H|xfPwLLG8pdt?@s`W?j1*Ux5D9WYL^WH5|?V>#u8^EaimekMgr~UwTzca z*7NU;N+-w1$0f>Y^V+_!1ES7djA2NHGfByV?6MPj4PN{8A3scI;>PWsgv<+pc>q@b z>31`{cj+1f|Ef;av8%nqWHpr(5NY_QiUHnnkwU0Bk__0>U5Q*cWel_}ETP zI)WQz9cKe=FCG#5mnL~Ef)NBAQBs@JapNNWo^n8>1jNLe=_hF%E_=$_C%U|s_!Bcd znFz%R5wnnaU@H1*Twe%BnxQGUPoy!eaoGqXQp%|*`(!mZA0kF<9J@WC}XVIlLO>GL^-nBfQk(8KbZ79irkr%sm{ zBU$!<^dX&wd411K7z1+~CXn;2wnQN2fYERye@pYnL@xXF5s&gUWoG7%b2l}^GBoXf z%M07J->c?QV5=+dV(V;H{nW*f@$6*QV!5lu81W$Ym`c@a0qn$=cXeTt{`|ADv`S0} z$P`0+C!o--e7(Rl`QHZi+8GT@1Pm|q64#F5U{KD8{s{b^F2F4Z7hs7pclpDR$RF*! zQ_EeIlwkXDDl4V9LI)O42VWv>Si{-Nz(K5zzM2i8BN#4y)y_Vd-FXEuPN1vmwWGLU4=2qW%45*Pq~ z{`~nKAz+MIpgH+KGfkitZT_d5-?wgk1S-ybGBcW8UZq0qY=YqVsr9M9aBXB4q z!QOP*1OH35F*1C8#_Kj{+UX@$=|e)yG&9lNUDYFnmVn))4KCmYu(QywfaxT+n{`aD zY$EKJpLgz#+`^5YDJ%x_npj+@b+lg>p835iOfxaKO{W8WSnu;QoFS-~#i)zw<@cZGCl>aNB`h;c>1 zq)PM~$p~?~0Wj8aG2T!;pA?^f#&x1_rHBv+Wd66R#?W~p$Pwq_q~XZ;K{ITGcWfc1T>4+j1f$+aaA2u= z?BDHRI{&Dlj2G#uy;p|c(;4dOV!_xb`U0mL-_hC3u&G>e zzEGn?8d!|Jelu#GZol<_ZR*c{QSj>^Of!G?PjE2Iv^sE zcWv`YVJKwV0rCk?Z^rNqc2`Z$voQ`ujl3YiEBt{2<(45lCz;-m*|?T}5UrKR~1 zNuUiF7dLyD#UeK{s#v~s^6>CbiYQ4$K;Uo<>?|Mt7<8+#C$B5%qs~Ca^YN$|WpY?I zNfq7bhMkFSe6U%e5zwMk(NAczEd^)8Xs@G^8 z;xck+Nc{}AN{IbhAOR~1AH1y}Run*i_;_$uOg%EyR_EX}rO>=?=7ncRFIcf=OfBQF zVO-bjpd-q+KUm@&NA;9E3~5`B4L5(-JT}8enUb8mG>ViY52~`QZ`>i-s!v+qs7Cd! z?X2i$cNhv@iowcV8k`OvF96`5zmI(0uu%;GQ?RfhARFw$;E`R_<&4j&raqOFRoE6(dY6-f23qC-Tz1H}*v`5U1Bx`TfzoO!*wCGWvDtQhJ;wJHpnaV4hi2^k`Yem`~ibWX%$KWc>AW zKw_zB`bRZ{dp2YvUof9r`H4R*ed8L&u4wg(t+f!Ju`8%NnV(NflScn(L%cxX1arp2 zvCnm;S6~C*42X2CnN~-+-%_C(Job8T4w*l6`2@UB;1wW6+g z*6AA=7)6+9QIaI&jIY&hTA5;12!t$jn`RSDy$*5?CP~ABmfvlMN&2g)6XB$&SVrMX zh5CJVc_0bH|Lj3z7#3h1%+5=AR7f`PdX%3|-dpH&`kK{Js}UnQl4Xl}Le4v{L31oX?(o7{|ui6*;J3&fuUMd;G5)`Lm}L;CJX&4|hN-3GK^iat^W& zDi9mNDk>^O2`tN7$Jp{oK93Dx3}zat$d;`pJWi4D&H3Jp&0%X3U}aRAFMO9iMdCiH z6+Fxo>S8$2dePA5#gjns?|4ZhIg$r#xXBJC0ajY2`H%0?2V6Y%0Yh;C93V1ZBIPy6 zPvRdnY!|Ut+hEO0Ni7I`$NNA_^hR>oiqQozRu#0B`Di=EvHi;k&|klVZ=N?E99S+& zR%{1VZZ^Ue(T(FtbKd`&DGo6w#<^@}RIxd3Jv`B%NpVUJzT#=mMkp!& z^yax5UdD_XQ0t#45?SB7{#Y2-L|#(D$f zbRWCT_D0=L_ZS`1()`-|aWpiKk~N+Du{ ztd|KyA$|3+ZnmM#Js>9buFqNu)l67aW zlHdF@YN8$El(TW&wgfx7x2}t1v!a?qoVa>gJ$!Mvs4#*9W<&1km3obGxq!i zW`LkYabtbrFM*4-5ntF{8O>E|lLyMF{1f#Dep;vm7iK^Z=@1%H;`_qj4^Su}h z!Mg+j#Yo6fPcPDBawP84&Xz2IHIbOVSYZ2mas#r;_^^ldEACe)mLgT-?K> zxOEIZh-0wO>2SytwaW?pFera6e3SDg#vU*9Vc(Eg1?a-i;Uy_RLf@{8NyIRuZd=4u zxSV4m&|fb-O%~B+*!vg602Vw`RDw2;HVOalmtApb z)QW?Lj$SAc0ZJZs_S4aewqMCDpZTr$PnoBhft~-OZ*5K>444=B72{r!<&bS;uEAKS zPTj0Uf(x%suGW4}t;J1FyT!1599fIs9DfKiSCQ% z4QY7tVm~d4i_mo9jd%=xt_=sd1=&8Mx5KeUgZsbIDZiCi5*BB~(fIhLSeWTr^2PPi zrdUt_`Wg&j96uL*9fFMHn!32YLSkn!?~3$d{#@TJ)dQb9!Gy$SRqvLH zRP@1rU*Em>)wfSl_r8!QzgumMcR)yhOYOUFhu5A2Mts1!vTu8=j-7gk3ERW4`_dAB z%8P4CqQ#k4rWIydm1x#}clLvC z>3#!d<~$Yl=6i#s@vK}-o~!12j(5t_UscSCNPAK6<;GmvMeHPgg9ZttdgjZ6z4HI> z|8xqe+?R8Z5O|Jmm)J48RYs%K={EoxCw8!MX+-_=q+iMOUfMAFgC+ewGgwk>$4_%u zjCu7Qew)SiOe89l0g9`8FivpDhv~nIJjJSPdgSDEII>8nnjRW>t z`Y$QtrbKWjva@_4l3htR4FzSknTEDM?Jz0sHkFT86NPOKY(iJqNwI8usRu9JD5VgB zcoxL`(dECv7A#jL%+dc|np1}C7oVH|=i8(Ie5=U%zO}rnq1NYnp0Lwfg7}Z~h7;!eK$?e8AX}+ z=5%Sd_WalEjpNUZdTgSiUC7=z8ZHR&=#NIWGuvp*9HzbYoykc7RxYPIiIXa;_=Nx@ z?A;DY#*{nAZUL_VJNINT{$<3xj*R&%Cm7OhShshSf}nhBg?b~#+6NOZxH~pV+ZD4i zA?ngkRoW}vId-r6OHo~fd1XY8HHhPYH%I62$3w~+dxS@BwVeDN4vr^898 zmOJTjt9CV7g%z7S^wf2)o^Hx`w`o&k_>R%YYE<-OzcineTfJp7J}`V4^ci#8B>x%{ z)N#YWw*l=P&RlY`QWQlP?d^fI&@)0}Tn)W+<13uC& z(||257`H4gJgE04>9tf_w%+?SZCYnxf1-A|Lybw-b!+;~HH%J}G3)gv_3O0{3>S$~ za@D!CY70&eblMz@0-&~_h?D?@|b*1 zF&onLy?8ThfqrxGcry%k`u}{S%k`f(f4+~w950T99&hfQ!%@d;=R4E{Rdtj+e3gpr zkFV9~-Sk!oZ=IkK7VSjl)5B}7D z?UF&O5qc9rnB#q|^3U8HYXKi}Fv-~T~&I^}=pZ?VXV)FSt9 zf3Mhn7J%$^LjS5dG|_mbowPHJruk&>oKI_fT7UQCGZ#;jbV@Qm*P(^#Nqun|9In{D z34Y;+{sjQBJxe}6+@Q0dL&{f|RO}mMoo}97o}#*uD%G=zLrT9+x(vK8*U;{l=7Zw} z3nLg+5w`k2S3ZW(mm@md@T>DEx;)1Ctf0W?f7nveL~O1-2&Ye5;zha=M9fjJ+&7A> z#vOBLJ~4fL(E`;>WWLOgt)n(&@o!2Zw!11m_2?e_O=HX}Vl*x1()#yLYu1cl{J2{D z4&&4IOaLLMW)_mSX5rePA=`$RcDdo`Sli&3`u_YjQ28M3l-EBlNImY6wpLz#`1320 zq!2qb?9y)gJ8BxMs8!}6-&_3;P4vH6#z}gnbSk7(1is>|roE==^U9V}Rc)8_-f_h< z{t{aU4NvKjo3isS1MJ-JDJT8JNMkiJ6OD5 z#ikO58UIC+`sJ+9g0?H`l6v=3X_Q|Ss;2kDuK zWwa1}Y}X@_f)lr{yfZ`m^m$Mg*}5B~*glPc>@~WFX-navxrfK_>h)|xj}Y~CP|)V0 znx|+8wp1^!Se&mUTBh>_0SnHZ)S~AxU3d>P%#7eqIiWn_2Jk-D9eQl>oN=pt0!>#3$UnfNVQ(-UTy1!h` zYGxKVMoaVW_Qr7-Du;C%h47AFg2Oj89$XMK10{WD*;Ee^7kLm+|y5D4YW%5g~(rEUTghI;GMV${!-O=9~|@OIg=fAg6^ zosf{0elX_OI#^jK(9K>Li?r*vV!RP=XS|0=H?;6FMsC%rFFhhcy|iVvicz%Va8&c* zb;SpWcFap09+$p{J{_Rb;X28JuYTM%=Z48_Ra|6Nm;Y+R zBZ!(=iTAXC4o{CNors-ZSgW!HnOAWnDpM_ zqciutMYYDz*vOYmxF(2EWEzjFV2s)=lVQEG&;dtD(wMu3lS~tJerkJear@+Ibrf+^ zS&P2%!g8}|x+jpWhmD>Rb+e-1id)k$Z$)LBV^0i1oD)7{Z(+&S~Vi5tp z&`sYa;H!)bO)Q9V0;h6D@*EX9XxTF;L(l`Whxux)@@ACgyo6`Tk*9~t$E%Z*ZjSa* zC?xWTv&o*#jIy`s&@&6bcvC7S0hGSZd2-$8*#7X5eUKKj_z;4>AGj(|83k}d z-4oN2pZedrJSQAk&(Zj!vD8=zfl?w!T5!EdkNcFH#53Vrr9 ziR_s3_tW%8hnkx|K?aY$q&I~mN(RS-1xAb+Y)g`v8Mp(98{U9Fhvs*_h8JrjnD|p6 zdvXV*WwiKzre9fUSe+{13N>zqwAB`WfkqebuO1TF$9FQd+X)Uy$O;$B3RkS)Aoz4MA zy@B0>45v74s>8t!b`|w+kzDv!B7MF+I)jh(8Y`9NEw8K!_EfIA{u98|q@S{VkhU0k zWyNt7<=4HqQ1@eUQZyww{8mcx+|Vb@K|qjlNQ|o8&XPW#F6G=`whEF>`=sbXs#-)5xWwuBt;|OwZI>rSv)~Q>)Co zBePGYEK6D|G@5dm9nx%iItN^P1_1N6vH2Bb&`YKQ0GO6P7|aw=6BgSJy6X#+<4f}K z75WvJN`rYJQ5oNm(Slbe93J_t-9vaR<06>N;#>{v5ibRcfUW*iuVZIo)66U<8xO@?K00mC}nXXsStZZ=YR>cMa=GAE!l? zdCtv)wO=j|zdR9%lM5c&o$Fz9u`nqT%G8IHKeCIF+-d07?+;|aX)BxEB{Xm!H43M4 zfx=7bfPX^Fg~HYv3<&T$r{s?8JIKMHoQ(3AtHCL+@_0c$}Qn!k3NYxEWhxeFE(jpq?g zWPr>D>(VN(kp57uqesp2mFi8iN_=z16?2v9+^qgv&mdNN_wY}5>NTGMm`yA+2HK6QQPrZYs66g@MrF!rZ%;h$h5PXiF^ z;=b5iYPUM=AW$nUC=Je#S^A(GaiuPO7C_zedGMH+5yvQE=m*&di)SbLO1SoKZiszudI7v|hKr z>nI;0o`G`rt~!ci2Xs@sk)W?CJAza4%tIC%J{2zT5D8;Vm6hti_UN&3TUC%a^Z*0^ zc5f9`gW#8QC7Q?v~yQp*;AIo@(92 zenn3B@cN)Ion`z~`Rq%LZp70VfyV*I0(r^Z@><4jAW*!>o+MUf@-%(n79jVe1e+qb zJ<;OAY4wW z;j}Y)2V)d63c;ZKB|Cl|0S2%?XdIzm~AQ zUrj97+U3UmO<4Fy8rZH^M%tq~DeVSM^gdSfRtM^vtV=Igwisi}n~d?YB9@P*@C+6w zo++u^_8n<7j$M69CX*w|M26&z{$hnFT!^b6F=7^E%rjj4Wz~n^*In6~j5h-US;9;tg0 zeXEmj0f7Zt2nt$D(%08tJn3K*rnL@sBA1hI638!Od1=81!+n_(tM>RTp@Lzn zyWd84wg&6=#0e(g?~e(ctEt3JpXxo}X1ej~w&=w4xRMqddrbJ`3u6NnxBNuLFxv0r^> z@ud~GEVEf$;1A-MUI=_&ii{23hW7KFyg@1IhA|d; zK1kt+c}i8|XInW6;&6%uc6ED$VE6WiqCk1vm+l2R=r5)Bv+7v>kXc@85!~Mmm?-1c zsT!ZGth{60Qt4#AAkY~k-eIwJhqkP8eAbgW+j!b1KF{`TQ^)xDxReGWE`tC9#w)6& z(n3Gqr!W|Yon)&7reQh|Uw-Iq;j4>(0wht<*YpXu@D+WhMBg zNk&S3&~DzaDG0P%&8D9*bt}nrCJxLXKQ$0h7)6MCeh09Obk8YO$SxTd;dx@5v?gOt zSzsB#XQC`CysR_a`>Ta|(U~eY4>cJdCC{86?DxI^3Pl|(Jf%HR4%8YIn^pD}g|uMb ze;GG5Ehc4rcV_{^2GP||ECTo2Bo1Fr&QpdL%`eQ3Bv%ey`%oOdJu{P^)6w{zwOZ-H=Pkb6R~{J5$gCv(M7o)m0JH z*Q>zqe2vgq9I?BQH8pyf;r&|#mMUYubLZVZWYKEY%&=vI*C1x@;rl7#?USky?tVT? z8}{JN1=WEHA#=*xA#-Vm>W4MKrwZO85LMLYC{X*?QNhIObi-tEDwmG~Zt7wE`R(!z z2Uie!7*F==A2d@ml8V4gRS($XokD5M!67r=B;rh01G3)%jw)B{D73k5%Om-4D)2+{ zFdT&a<|;504(;Bk$j5&>AG@ij)?D~=Wbd7i?|{OI#C_SSuRC9lMrU=< z&KF8Dl!5rlQ-hDQUHd=y;R*7~jUVw^t_c^>VfxrP&*jndq8u-hWem@|u_T@pz^K&M z&{g-^+1+7a9En?ho-Z&SN6v{~cUU-#CKEba9UstzV=XO4^L%olZm!x4cc!8M*bjV> zlV&iLw7J1?B{5cVJ|OVatz_{k>D@7!l17;O8$uKdsFm8@Jh?uZ4=%09c?Px( zmXGoFpsES4o#j$5`yEI@OlOcnM`C<^{vBVld9e-)^+R0jm~D2T{P&5R=m8iJFFR|7Gy7>g&LfVQ^ zEW22He6Y&;n=kUSop47$|F6oMXFCFLPRv#U!Fl(|p=a^>SlAm5tDrL5u zDGak`n~rE%NC_5vv~RVOekbR}ltFtru^!A&+$&<|%`0O`uf0glu|=@QpEYtoq{aLNmuc3u{_c`w^u1SWC?`iDY}Z+bd`6X ze=G?gjUqq{rj_u=((i!ChWe|l-Q6iXyK+)%eKzy?kLMe8co`?hCSzC*Dr9#PTQcjV zST?Jk6q`5|>Q27kU{1PYH_+e)B;bfZXOY^OD9e^NO~yd)y(64SA@jPsksxmXId^hv zW;8Cf>D=2SKB_tGH9np>=#xj;QS66np9R+}fEfUg3V}_aD~zi=$_3!5>w6+=`a!-^ zq=bnG|0k*Gk4$1x+Q!e~*I#6$l}=ibknI$eml_f61lKTQTj5_E|MEkqd!p)LSm7|# z^?9M!WjM!Mz>zqx+yl~)0dTe4Ns zm?cVpoRn9$HP+?iL&;AAFd&0G-gzdAYn>?uDl2SSVtG)Vt-+fV(!B6Q->JX*%csKp zhVCiIn*y*#>3eSTmCk0$_^I3XyBoyFzN!HtKd1>vwiZaHRLG2-?)uwTxK|f?<_so> zwA#m!-|zX_gYe?&ANuRr=QlaG+q`u zcRJ@)JkuAw1Kdrj&>N}VFYne&wIp(9OfAf4p*_Xkw+%}1P)ncRA+^2`6O!Jl4!K}z z(H`Njm-;zk;mSjCD@tx(x8F87?#uqMu_OS~*(?2Ssu^gk+EKkfKSC&i`PY4pLzq%( zxwqf7?R7?URGY?F68cJu?Y}RVYGh2b8RaWz&aaeJchs{VGNcvA%YM?CW zUt2!=gx_!WLGlYJrt_bgP~d#Cr9Sq|yyZy~FWikA0(5r;)EwZ zQ&9d4d>*THt=K6k~tWuN54@%Yv}wO^^{-^ zp%iQVQA=52)`#6z!BZ!)lZEzNWzDqJiTs^gpAAwDyZxK~Kj%(Xv%iu%KL zqf-tn^sX7Yon$E1WK*LZq*8)FB*(}V_Fsyu_)AR*36ed}fetCb9$yEWTd#i3$(*{i z-}EtzxEyQS0C>m~VTHpIYbK}QQ54xkpx6I6P%uC<72iL_#GY`y8Q6)-P8QlotQntz zv&fh%awi}k)HwAYX4I_KH*Dfyqzf8bif$^8isr^VGFGr4G&tqK?u#>QZqf%sz)FFP zSYIywai}2Fc{43*8JN!_Y3B3w3de94E$cEXN(yc0Co5H2si*yqeh@5d6Ob1i&oy2r z#Mk}$L+Z?VJ_UCLLD^u1E(9VSm5NP$nv6R=TS>a6*9k3%PaYARB|SpkH$_U-N&>=dWAxo0BrnDuTbF%B{|M z!sn!VQQ+f9>YpJ-nOWKG^OUJ<%lK?bbgQRlB`|`s?@TkBCK~;m}>l$Jq&*)IWx`i#ZQWyzhBCz7bU~FIE488OcVCd6S{;iDR2oY1mS29EyZN8G;eI=Q!OFBYI91)K&2&Yd+zeFwB7Tl_ND0s#*-m15hDm&NbAHBF_E_2Y zst)$5v=mwBs!6l_yc}fK7vAjFADkM{!yl>y$&LJNyo+q;hBf5Fq0C|)?exil?Vq(P+U)YWknxq!5JKilwgQ}gub?g8H z0l3-+dsJ)sO&avH9w`uod|p$wor4(bpbxZ}szIFFCD0*5TN00soK5gYL`U8`>}Cf( zafZ>6m)P7!ht?!;2DI0V{TBfc`ZPc2YbKJ0N;4N3A!+ORR;O$0slhMS(^eeKk#6l~ z8j7>kV^ET1gEsfD0`^N-6DB^NG-k%>Oc9n(X%gdCL~{Pv{Ze?9&<2X>+V_`k|EM$u z^{~28MyMDS`YZo16+^m4OU-Z`rqXHl>xgq}hW}V;Jr;!u>Wq%0y<6)=q1Nn0U9mxT zC3Cg)w&54GbEASoC3CyzZe49cp}`t(SXns@M{#Slh&<~qgL3}Fx9G2p9VrCd$k{qO zF3F-i=Pk39^%&Ydp`MeT?N*uB#A@(4Wgfk*&dfR%LV%I&$I)Zeum8qQE{Eh<~m zQd5{GHxE$z98wQ5#w(BI`J?NMBgzBFvMlehq#_igCaX4yPPx;Q1-BidRQy(D`P;!M zdA@z@mE3UZ3xeE&OdBDd>z$e`VI_PE^43-wy)Jh4YOs6yT%OM#OvzC1k`|N6$T(C_ z$n`FKIwC-EWQ!oDw_8Z`{+vgE+fG26m3voH;+6YJ-U~SQQ|^a<*Ll60Qn7a+wOq0F z4YmW`p_;}j?*n^g=$_J<9H@BQV|Z970aUNlR#d0mJ8&>1fJi@f@c!nS?LL!#OL=%nE%LG~+BI@BE!g~qVS0bra8mH?;!NP{l-vW25WF9P_W$1%6peE)gKLAM$6DKRjp1z6!S`8!B~v zS|_zH9A#+6YO*rWsV+2Ob}s^z2*$Y%0$^jBod}?WnZYkXs8{jPPn46M7OnOX*ND*K zr~p z5MW92Ixur&fOc}BZngUgWBu5YNHXo%#deL>u3K@107KX(w7(BPmrA9?5~nkH*Y%J! zyOZAP?FkG*zXw~}@A8Kk(ap~Nz<`&PhH`@vs$vlaDsM*p#g1&t+8;mr(X0Q-C+Wo-QoF2(d62B4iS;k;Lu*Nt+o z+VZ*PBSh(KBkFRk$bCNytLl{B;@IDw3$(B}^F&OBSS3HF*!$@*G~kRWNMJz{{v?om zj-?B{*iBsMd6NsDmV(!bG(dls6p4{Ds$|0IKru4+xwM*A-Po@mJ6L$US!T-pQzl9k zpx6T&d7G1uj3k)8Ym6*oA~z?~dBOX`x!6+PcHzDiUYGMIajs?|h*7Le_Vr%Zhj}t_ zro8t$urialtQsEA)ceU(HX_dKnMj_4C}bh+7lxB`bdr;jhD0JP2b~iOncBX$?lCBNWIkU0p;j0Ga}^!m8) z{e2@H}n#`xRAaU5;r$hm0Yjc@MjO{0>z<3yZ41 zz@ccuWSkXY&-3|V5pQF}N9V-DUs3WL%(YjY`JIzDo|%y1V2c1Tbb*Q-uzL0cM>ZU0hHWG`~n zgjAzWUGgt}3VtCJs=iQ~y_sH58CyxYGuc81l4=WpQz-J;&(Zi9u~gj>F$v`g?tzEJ z3Ea=87h19)@1CVg$Fa(EA2Ln9z~S-6?t@BPHMF4;Q=%8MJLRRzrNSoFDK0|37r|t_ zaxo_AYHxa^Ug|;5vlb_iYC0I7ScTFO$FRXLM9`$ zUObZ{0nuwFq=5{_4?b}pjxO=m+&EWHA7yQ?*B)uy9W(dk&4f#yJ8~JtMc*hdeP$MJ zxmne#rN!nfCfe(6>Os(nQnFkNjuFdDPrErQUKdA(U+gO8-FnQW%hkxc+1=&JWz?w? zHE2mN;TdYR;t6dl-U!}&3O%+W@x;DEhnESvLh=ZGT6Yx@J)0+znCXZ|B*Z3`mWj;X zVBdvZL`2aiKf}P^hpTc|&>lSae4|)wssx*(9lC|#zK-08)S5r@QfYRxPdix@q?!;- zm}O49AO&&RX&67f7@u~8&}kCNz9S&&3@x7+U*Wr@dRNpg_?EA4uj}NZGHW|sNOYN- zGMsta($#!3HZXmg|A5U}Z|Nrs-b`R|A|7Z}VNs`Fy{o=dMbf)>gqRITSu2D+(s)RHp7A^d z1qHQ=vXT}B#VIxliWA{yPXlk1=1Ec%6fY=Ll;ody4K0j1!L0`U#y4T#2^%64m6rHp zp~)tTRCjp_OcuL>(;O?UwF9$tdc%|-@!wLT=NZ`GeSiDI%ro7jG~uV(GD)KhZ*;lO zJwCx|eu?s}{I7Gz&I=FRzj@Yt^C$83jG(5W8T|yeR-U?26wHdlA{g^p$RVi{^y-?x-)x9Pg0nPO5qfU-oiJ~ zohJY?U(5uGJ`ujHIDD_XWaJtJ#rY>3Q?pkzrAr9TH>i#!w*SK;KRzde2cD0uOT#&2?^+RLSQv|=o(2;{9K{#94u>QTx$CODzFrEL>hbs_TF*G>sLDrzU~|Ev}T0% zUbpg2&ieXxWSJz((nw-y@K6sR&i6?ywZEm1#DvF`n;`e4rrP!nDz9xS91-2TCe3XP zSFiUJCXJ1q=lXG`o!(>uX3__9cNq^>NTs?9vdUU(esQ|^+WOm(Q0Y+XowuEpkDdp+ zx(DrAD%k_e{b0ttEc0XimXQ}roB-%~?)jCk99TK}+lk^)B}@oEhh=`Y%H7R}W#b)h z!BoP0{WC~b{KtFaH;S|-S4$69(tJ$MyT}B+gU4+}I0@ZuT+ftYPz!!4!uQ9R`>wf{ zZUYm^cRzFPfJ5iOZY}3zEukyNES11{;OUV<=WW&{8Rf;h0xOcOG?cUv;T-edQui+G zLrZ3R+b+<#UnLA{NZ0oepNz(kyfl9b*cu&Z%z9I%%Dt&KG>+<7Cgj>$uCVk9kh&V5 z`OWUoVmH-uHNMl-Ff{pT_s)D82LuRJVT0cLBOLY9UGRyGk`N-qn>vV;jt5!kRwO4xv!^InaB_ufIT3D25RJ~{ z@ZwfwzpHm#3y(FmO--yszm3r(8a+XOkx=Q{G>?|!6XR!PGIj7y>b=xa$$9$6Eh$1w zZBjcmjsy0ol^J2sB_-#t>lv0R)wxlU>PrwmI9L$J*YB=9^=HL~x32eV7;D|hiYkk* z&kAyo)SzzN%C{PDe1vfu@IR~D&+mDsWRhfDH9Uy?;OO>6fR__`;NZXMM5 zjY!+Y@?QE}A*spr+gRtFq$T_&|DLJqf#+OTHA;+b$sv){5G_4Yl;R;fUXfW{yT zZ3pk13rm_NR&SThehwyO$Oz6?;Q1bvl1wnxLCmh=P{}vJ;eDU1+cZ{dZ_xTTrY~fS zKy;TDr`BICh=**ewT{5M=Gnl7Ts(YbW0emF+sE1m32);lXJtL3nb9$(hO0Hae$+=h z@(%=!1W3EoS=lb;9Tx!p~gG|y@2 zV58odqJNlyy)7fXaX#40X|if@-=dLk7BSrFG1UTQ{6Ht1aAA>KW_(5ZeEp!y>>j@| zyWove{98FHsxwC2x)|tS`Q>;bcB;C*SKBWxj!uM4ThTG{uIJO-^y#NXmI#N=h#Zv9 zqlDURe`36&)Twk2&W&2fX9qKkhzwU@5^26IMXgUQ!NA(wJ85q)harK#veL_#&{Rv+ zxv7^sU^N_NYiu}%E9n$DI*BGv!!h^v6?I^HleI5}J~+r0K<>y`Bw4%3Uyl#w9biC! z>uN%24D9Y)7v^WvjApgMBd8{7WbyP_)Ql--L?ZfM!#0Jk?|lBXKi9>xB;@ZrCi7m% zbl^KTducK0gOw63BWBl4y<1ChqXCM%K3V?Ek27IVnwS)W{X9DEB5G@=H?R_VPP*v3 z=`T{1_P}s%b8O3(2u$={iE=vU+iup)wwo~$Ymt;P?KTNvEnI?Ze0v}r-a39$2a5}R zGl8(iGD8!G^tFTxThuTkLL{@_IY&%6jmBSkKFLO;KW2V7(W9%i%)9svsV`}K zsEB=Z%$+gtTeoay$18Luwt@Ctd`wA8cLWCI8=a`5g9SV=*1l?*gsHFT3u09%iOwDp z=KX3fuNlpMnt3pwVqX7HDAO@aW;b#sB3S=RXntpwf#%wz0!bh_X5}VK;0GqO!}Mbl zERSg>Y7(`h8!>3GE66X`k~z>n!}uliB;dSMy=K2Bb@XQ9?4UI<=} zM!F3ey9UJLf7uxi2oS40un*I|`;0MV+OMU=rEH>VUyj8s4hswW*ru$c%nUWmtvHCV z;pnnG#??&6g0NX`CwRSublnT095q9@v$i=~SzAsiE(%xgr=P|;*L zGSlrWluT5;BRDs=&A!j8n!f0hZjXtE=yz|ihc8=Ej>cxL>d!GAxl$$U?5^`tB zJ%AdH`E2y?1pBIYXDuBC}f1p2&|B`*W*(65?2VK!W|UQ#NbDXA2- zF}cS)7l{x(o#*p8vEyr1M#)TDQf*F zO!SYLV)n$%JKJR@=?-{*HM*_Smksf1PCvvYMDy51Lx5+%21h?(XS_^k>O5{0z@@yf%^iDM>1K?b&6yn7RB6 z8kmg0`18CZALNoy9e=K_!dPB8DzV5rDK?R_k1~v8Wm-1poLAMKAS0F6_h~cF9DNbD zC<6MS(r8;bn>&~#P&5s)NTxBk6J?PAi!O?mnP*4x){>ZtC}v~g8B2+@MXa+s7DM-GzQ=q z5jwJRT|_lm@B{k~*47RC`3|ZbS;?od_Y)nGJKpO~D)6?O$j%VWSa_qQkXAFyshay=ngtuqU}azvqhsV(bbuyxirlxkhU&S?!=iO9}&V(^?NT} z(%?T{ypdsaV=t8FLncR;^e)##2~Sx>HTTrd%ZklQ|2%kC2<;cJm&N_6@fdEDNwB8k zM%8Zx7+7wX&pfp!e5Hzz7Syx?&U^RB_Kjv$EUXLqS$i{}I_%O0?i*FXu<58w_HbS9 zy#7qpgnn5{^|$+DMcW4jCfPO^m>*L@yd{46%kqH-Uk1|t=77Oux|MvJ-!1_B%C5Ntmd;C9$nEx?&{u=>*d;CWN{ySp+ zM_Me>rB}XPi6_U+yaUHNtU0A^qvZCtEHeT^- zk`-DOZ67Lfy592rFu)Hz{myUTm@xFx%)wIb*OL^(PsGmgF*J??p{ch_J35_@IQdz# z2*!6e9Ec3Fxf1)I>>G>Qv^dd%033rod*m+taH4K>>R7a&YI>3NiIyuaMK+-+J7GqN$nim4iwBepsSr zz|J2tpAE;++Y1GF)BP|*LjmxEb+(&pSBDe!{lrL~s|lmC6(n~*E_1pFL>-C zW76=*;zU(R%V3g;t+`|R#k-@0ozNS1=MBo94F71eZ%$ zq_b#+$0Ai@KrMeL?L&#I8l>N%d=x|kOxxaKDsh8m)R3mkFTZk4QH;d#RoZL$4{X*-n`lC`PSZTUQvTaM1}~jcggd&@I8_sH`X5z=Uj?86 zu*rO2i_6}NyVsM4ihK}dD1y3R@$Jo8(&>S&{AS0_YL+hhJEGD9y5q+9>QATT`@TpN z`eR3t_yt#c!KvX!*=V17?Xqz-2>YGU{_=@hNM&;cJqZprB?a|#v z@vAOln|b5EW>L+?dwU$geQAd5tM6=~nieCz8~ydWy`MG5tG3rVzsI@AL$xE4)F;g| z&?=u7%`>o*R=beg-MIrz9HEKOj1BY)ae|rTtztUjO+5}$p_;4BLCWp$Ij@imx8!)Y zL?c^olUB5JS4Ha8U^UJ22$SJK7s}}+=k>uxj?E(HNX%T)s)H1~MXk>Bz3aLO`;kgh-qHFwShWf1xsQnTTo6gG9xT1&>_77UAjWmtbwZ*#$zN80+LF1`M>cv~ zGy<0^eUHVhzbw&2ZBLzhelF$RT-mmef0Cor`>cc>7o49i9<43j^>|jC>>~5C={YN9 z(x|uYZW8KcBhXA@&I|ArB<{SK7EBJq^*XIINmTsjcw=hG*O({XH*~L6{lG}=8i{&- zA0geQZVM^-`c@aN?PmRMpakQR2O5BQ7eFja3cp@sy5ZuS$B;P=87ppMN&sU$K(GCl zJZr?`=frmbK4>h>x}*ujA*s$}G@F~JG~+q~#XA`Nv%-bh)Id{k3AP(BjokSm6}fm5 zy`Ewz-UP0kyN~V7z=Z9PImTocIoj`oyzHwMNeR*Q0;1Z2B7M;;mKV!vi(~9LwwE5~ z7W7uZz2UHX*m0t3({a$0Mz2rD#V`SPWUZZ4M^^ynUCN92k@V_;gz@&Z4AAqa&6vfO zob4+W_-c-7O~4z&IWw^fI)~dBtq$L~D(e}$_GUP2Tf#(hofK20v-hgd-F5;s_kK?Xo6G4s}N!K&$VGti1!6mC*88G}yx)!@QByg!*m<>z) zwu64KQ!ZX~L5-MlJEG9y1eefh7J){!2vf&&8UxYWP$Q=24Dn3c39^xtsc17nz`3%Z zx=sskXs-U4LzA#}Pr5TCB6ByAkK;UtR>8{WKRhxwU5jQ|2%W=F+1k&s^%jL~gxOM% z71$Vm=fFt!bW4hVjY#I=w81zZ+DzTQy%Erw#-G!lSv z1k@N0n5i*R4>&NM$_fJAmz|oc{r=rO0Ak6R>T}1zH-Al%R(xq%3tA>_s=MW!^R@5W zNma(cheXCwaF>V4Yrig!=dQdZtoq0(9;f#mb|?E~14=3%$OP1JrdOjwcK0@ zIY1yi4=#UXIo3Ruu?u4mw)CI+(YU)KxGr%G^k8~NEj@$Ibj5&LRs z3=$9rX*#2Z9 z*YNue${mb95ijht(7~u%_ksqdFL)U_g8#;^oH~etRbNo0Zfjun%u=nJ4z)4f+6|AY z-_qY9tR2ve(yG_{8lnzFTa4P^z8f3$)xh!Dn3jOJ9h2WBtm2bk-K>}tzEsuxp=S9f zn^?>cBO*T61V1S42O;dDwwF0FSki(fMbKRzC&VpxT|}utl2KOn(Z;`I=&`nAPiUxp zlvuF$^5$gY*PjGT?J?0KqNwd9#>#sk-e?(vnCxVP((gx2^Q-6zQ^@+t| z_34wJy?v*X6@==C#H{8%@@E;4wmlORrUY~qCld4;Q1S--mZCz4>z zGsmfiGZ>rlWaG6?NG8PGby8-&H8b1;vPydyy}2)ujr+zHfjJGDVn>6J^oXztmpVyE z&Hyg|w03TNrc5RO1=>X#iQp#3I3QX#F@w~pnMhuz>nOn8(vx;{b>7C$tVd&MGbEDr z#=0yK^GVi9Gt~l%?@WY9t@3dP7XG7<(if^g;nR)TT&*%eipxREzl}v?oEs5}v9wJfiHKC_`dtsFx6d%) z&j9Io^Mb)2qPL%*f<2fJ*HhGTXli2CY-;O1(ZHVIm~6Fh;cdYxo1oDHY#{ zvgGhc##KFgY|)W2-k1rplAPA8Dryz-Fc+DxXaTqN;uZ!!+XG2IIL0{wbLJw;-Zp7> z)AUBnw!R=|wCb*j?#exlq*V3OvN<^;Ep2P^>w~z4<>cyJ!`@3qeIDFiBaK!Vf^+h0 ztmqyRd0$1d%6&X5DCi-IpKZXOcaV4`#5&c+?bCH$dnFF)+AC^eEhEH$Z zWRNT$U&h}}o_{=6Jlhbu@YARi?8@slV9KgrN_ni3n^2%NwVWERzzYb^RKS2fpTJhU^m3b8d$@p3# zw!QN8o_~k<3=DmTF{Zk|A$7%{_nEU)9}z7!DF%3L4 zl&4#%f&bM@WIjq|f0v(x(522Cdl#7YyK`b;7KdEHe^IDH;C_o zW7=^IE~{IUQF1d!#S8Xlo$|Z;&pYvYo}hT~W;u?R>ics}^5ACHi}@$gu|Q2jcCf8x zAsLAPx&YS@&5*7RRJpPA7zO5B%pFr8PoKs*L~bdTP6J;X(xp9hio(p_93p5!9EF%4cI51@e7}YjRF9`*OnbG=fk3tqd#l6o)bo@ ziTY4#3=oEz`jfuC4iJ&~t`a)Fw}{=~&BTktfR7F&A}oPAl|R4xmQF)LL!_yaFZfvA zAFFb)#P5@Ku@8wW1i!tQ7`*Aux=AyB2~7*f`0Nyr3q62qJy0X$YiK@I>+8CtUgD;S z4@u7dXH4o+!Le2#Wpa4YW-;bom;OwONKu}KYXqAq2(lfPeo(w<3bUC4g+M|iSR0=$ zu)cpwhI7c5nMAJn&_yxV-=aDySfQZ!as1Ueh7L^EMP@k0^q`jq zbc_F4!^nj`>5FGP9wQIT`|)EuBUo%Qo!yniEAs_$A+*ov?{`iUxVy`f~wR;Eodz6yh; zZ;6DU#bQ_r;lrS0Zl@KMNlP2v`c6%gvqiay!OCes7^LHGEWVIE=TX@?R zDzKzS*phHTA1aU3eCO&LbIJDOKp1DGblgz&^G}jW_DzlTXZmwr$iu56mJhv%jW2N`#vV(7P)@x z16vo9!P6C>iR!4yMY%$P70{4km7kQW-Ju?06lU-F$phBjTH&wtJKQFUAq`LDzrx)) zf4AM|G5-->?F9_iUAY5!-#9W#@nYgDd7WjKzd2|Bs=h)&K^0=w{lt1p-Z*yVN8;+!_j`3# zv5dUC?SS7)6+PA2GWQCjXM3_b|FJjG@km!hNzSdll*77ykr_TT!3VrP5#|ATvMtYK z!BN@??h_2+jPuOneV>t%XDLq436WQIzy2~{CwA(;Zqw^tF8ta3Rwt09q>or!6nUQF zZ6lum+p%+iaLP00PD#v-LG^bA#hK?n^3G*b+zjCZ6#4B9D+7~zAKZRo zgWZkOvY?Ig$k>@{rzn;gy3CZ0lZn$$>Q?0LIy38hQ>!3u>|4H%zukbSUw=Sl>*s_N z3W_bN&-_PJa_h;4R3E(O#U%6(?=MQ1IfsTb=;UP_gcuB5>rmAM3d$Jl0q5v&%v@jD zbB)hN&rJnZUuhBMmJ)<|J?Fnm-G2JT*IrAqB7CqVFZ^m)BeZeZ(ep$QYIx~Ku=T?k zYHYtEJ;iBlT>&!pu;jtAc@h<)ls%7osLwkG$Yz=kwt6fMP-9b^4>@6rgU`D4a)34B`R~-0HBkURu-DmQAveLuFjuiAPaAnY~?)sB}j=$k&`yL4F(1Zvl{pTNF%87C_ zQ3O-6N7I~J<0DhU23HoIXpUf#vSf-&w!2A%-H!^kKJkU$oJ`98At`}0VW(2wjTdyF z0;DqM+k$Oz$0?|&{+$F><`fwcvH7gWGPb23cZH(j)PJ-KO8a;QN}g>jJMgR*q@cP; zZqB}4tCHI6;dz|m#aL|Tfa3Oo#)osbiuMWmA}%Vd?!!|r%o_j;lWj2FYL$v2HKn2h zNPM!M2Wxz!k=J2Amd-+Z^w8L4#~@p7X@?$uaQMU-Fs_{bd9^xjvTrS5bsVloX;~+V z%!IHu=R<%QSC8Y)?1UCSXi-^H_6W*61%3_OxbjQFV4MQn6bEo^Do9K$kXw{ytgEN| z1Ed?ck+iQeAx}eB9FTyBeesK(*s*$zZx-xO7yo`{Ibj|^Zb1-d-7bd;z8e3a0=bRU`l!1 zpF{k9e-242Oby%s-Q;%iKBStYkWKada4oMg<#A-9&mK;6TZ7dlm+BmOjFZ0?`zn)s zG=yoNeaHIW0|~MPy%i^aoCyKG0RtnEM?3mJIq3uW<^M9!zo-IK?5@lCFl}pP@<^-z zi;mb$-smo8y%i@9`X`w{JJ}$A6GSGxTi6b`!9Eoy!GC_B6CC)bG>&Kk#P#Xh-N3d> zU`pg^;69oqh(=v`M@YRulkjME;<;Yh4*ErI8N4D{& zQXaKjx~KTxBUve(XLad0+8@YvcBp1#3n$yutH1V+!?xe=p9GWn_2*5t-6K7{rSXoH zOwtkG$YyfHyN@b+-_{RSxcAFa4{E=W4I7Mm{ic3(BEVfY7>y_F?Qd5sZx-|HjSG&O zawsq`wrdY%K#(T2OV!GL?m8X=I3cj^yqw2W@0Vf0{~+ka(453l>G0q?&t zP%r&!J_63VolEuIol!pVF;-Dn?`L%u4VUV$mchr_6}ug1?N(DO=RpY6=SHfPU1l2A67CSJeCGw**4q5EcT zORjDcdgg#FFtep-)=YL^?qL5Q&oA;|=l6vPBmdZv<-4SoToB?Aiayj(F(aBc=QwUL0XtZZpm2WZPJ|?N{>SJ` z9nrk*a@gLp1Eq5g>420yloX3K_-5Ve+Tu?+MPRz0St#`ggDYTE3cB8#*ZYwKgd>Zr74A?c1SsG8LZ9kdaU|ZnBKZ(^9;>XPSNKfzYj6 z17>z{mkqZ37b00MTOmW|Q8^Ef~((Q_AZ-{M9;TXMa>Rd(d7GIytJR|9`T#ii-p#?8J$Cc#Js}y4r3)>GJQWLpW zB-JUOHWIOE9hcI$Q4YRD80RLn>?l8_3mY)V6$*3<+h&s);GNqLE?AN&ieIgr?OK&o zeEN_F;%>rg&>O1W-uA#W^4lJN=697tGp{yh7XkG08eCb*jQI}+?b94^xOGILs#5QG#?`~XL)@TxQA|DK!GD_)(AOYi!say z7az$*)qRqIq-k@gI+H~PGot?HIsCi*fRw1!W_{@cU*0b91plGpcEU6V$fJ9$SU9;} zTx}i!tA2+)5g#B{a3Gq@E_l5^0eNNCc9l= zO9ZFF!L<~;$NObGYJL;o5PBgyv?r+tCLswuX%X{$L7Lrlj2Th3aUIL|-2doUzKmCe zV;(_NeV99i*l?$nkP8j|7MDA??q-~Gn`HW>HO&Q%gIEA187*CvS>LYJTd>1v!UQ0a ze(8*b=V27m$#nr-}so@PtTT zDPrHy;GQk{G~s?PxV9u_Ean+p{Gx5^Nfpk%@!sAG!ZOU@ENf71!gB35SD`+A`?VIq zyS}2MbwH)AtwO(_9E&YtXCZ%&rt+a*CQ`*s+y2YUjAO0l>~;ugnopgG&Ehj^SpSrK zt;ek(ag5E$vlkth0hwMyrN?q2NJGXaG+7(*(P)j4wD|w~cCtYIr`_zKIYUY|)g~SG_IU9-1%2I^d+# z>{{dgX4@;)f=mlb-0eI@ zu+jkjEtS}BJYcRggK`pl!|KoJbY*Ij&u=i=zpijkg8=m|EDH|m@;Rr5Ynv?`>IJvI zk3PL)uQcQKIT1K2$UZMn+7K5k*Tsl5&dm-|4hiTlz=&`O^slp>yH@7~?+|>-33TGdsJ;aF+Xi~8=WAgc@d0T zll0b|CU#_GS``jRUgxvF_3fH~E%imyEElRqbQY~nEF-OG(Vl5} z<#{kPK9k6TE}IJr=j|7652eD|OU6TRgrXs!_^-zKrr%G8cQTfdD#QG3J3YeV zT{aBR_-x)DAz@Kk%l%wKkGGi+Z*7^|#K*_Vf;oAG$^|o=qA=DG829gqTAxtrow()A za(L7_CFmBa*S*_n|L2F5#Ts6*PUNKEOjY)@d*qL;Z4^Sg%==N+ zI?_PGCbfUQoj+NhzpKq zL>35^7a%o!xW#g&K1IBeEW6d$l^-N9)5(xH6Vc;h0i9hN0qWr)t&OXG)GD4Xpjl*_k-eiOAr z30svoZ+Q$&-rTS0)z0l@mnc~KBw-Op1FT&>Yu06s_Jf^+Qk-kwCB3!u*$I1;@B*+u z$cbFXowCt7@p1MIG6VF`J0AuzK|Otj3-(nhgRygu>KR=VM>nHR%{6Se3oCicF(C8@ zpdt=wF+CscAju+r{*yw;I!6Z!r5V0^J$_w+Z1bz&0g%-wWzXC39@b$H4Dx6bJ+zr5DI6@*~UJV zCMRys7I%I~ES5>W$VHH0s0<-Av9JI?q39VYG(Ujw&5kVV2hk*q#Xq3B2Jqd3$DPJU z@H9AsI&{QJ*@Lm3u1?$gSQUmDI=kH=C`e?91$%?~A)-9k^bOoz^k&?f;mKD9v;0JN zWJuL}TK3EHz6o#VZpKEi_ITUo4wr8R`1Wg`knv;aCwogHPq;F$nbs!aEgZz20$ixN zsnR!5`P#VR)bbXqu|_G93Cb0wGCwO531jb!pHE>zW`54XgTF`5z_++tB;D)KPxLxX zzh%e(0Q@0mTpi$;y-u(Rt1{(*K0`PNg#7h$Q>(0`{POd$@SO7Frpn*8KVB7>5M&d0 z4#qMeg`vs@W5YM0C#O!UXXBJY5#hW1k_7_tz?PLZ<|e| zB)>iD?7j7ka4d9x5XgTU6XsxU8Da%j@^PW?-8O>h!nHWYcR3)O2Gz=b(C*W$b(EyK z@M|EZ*}atJm}jWWI}?hco0&DjLN2J9#*sl>V6jtwG!3{v$lt0)ZM!KnI7L^fLLVsx zS&n%&ZU&fFu50s%* zKv|c7ZuiXeIkNva^QKMj1ypM(kPuYLd*k_gf|y^kCEv7`hVtiYoi9+OzK97pSeR+Q zNM$=^CJeS|*T0RG$}-pt%Y>kw;Hjo9WAL}74JU{msXWs!3}CFXd9%K6GO)Zm-0^Ey zD&K8f!=}4OmURW_UP$zCo~`?-){CMGe@p-mody`4D0(}7wuEUIlOt*iDTNs5>FGMR z-y1ygq%%qipoZjV+FgiG6L-xAV9#OfoEYA1$we;&)VEkdu@9)hz$#>$^P2{L!z`Ta zT0fsKtU6LV1~qiQxLbfx`Hdw63*bGNlWEkEzN+Mv3nrE}r13hll1E|tdfRGM>9Z}Lc(-aGVDlu^RwmOKA4&2!?Pbm*OplDm9l<7m zk!$_DdYSVO^*Y4+4zapJP8>m8hcL;%u*Tp0v)bNX0{D0c0s=Ju3sg|}dGdb+#f~N= zLyrGE@t^M%tV4gpcE6F*L!*oK`rnbszX3ZZfxQcW0n-GPeiQjGpziQ|4FaOyTYsXG zN2~Zdyz)1A_K0SBn)nE3{Eamp?xtj5GUhj6^e?dS zp9eY%ngT^&=w$a>TVIdwHc}2d*Y8xfiq&n;Olj_~S*@-{43mLN<)nLx|AK0d#0r{P zrBQE&2HJ$*{*w(xhzTKcujdQ5A(8*dH$Z@7sK{8nUH|!U{+Q>!&n$n`_XyTtj^)d-6?bR21KOeRf_&aj_Cx~chuap^V z#edUM6GLxq_+HKRT8r6Vu-;#~Q8^O2z()TUZu~D3ndK2_`14!2KsA>Uhm`XH}2IpqPVKofrYFgH@P;g*lF3?|)m#A+qW^aW=d=HWXxq zdJC2fINAsPf!|#b-R%Oajg@1P1k(G>P)6wi6 ziQ#{;J3PLgoUiv8ai0FuVVMslcB(4u?r#cbIYLAhep^BU{XW&f1bd2^ z_5aLc>EUPrufHxvJYZwrgN>F+B?)J%+4rBl3~`3$O3kdJ=3f~>Rx$w-oCImmH;XGY zDKa{A|DF>Pa0W*;hx$%TrY1>mq2+w7G8Xumr5ZvVmud0UyAK1oC6_?{pN16vJ)hHD zH2aka!RP;TW-8Bg*E_w*o*vOTJ{4Gqo~gPt|K?z|zUBbhAT;B|4Gv%FP*VhwN_LDq zBBZpMgH2QKB@gU}50L5dOX>Y#F3zCM^3iPrK{ESZ9I{*8KPmrVB+%-jK?9&WK%0 zyKrglxbUbcxnd~o1`InU_jJmWTJou$LCsw0a^t}tpRH%BfLx=#pFwOsJsl^S<)0tvDia`ueTBTk?L z(EMy?65aVRc-k*sZi)k-sfxx>=B(Lb4R)>Tm*`D2=b1#BzEE8QD9?gFVwt>>6BiRMlUcu3~ z(j^jrIlKYph#{335IjsyJWXSi?3ahF#rfI9e5j4r9ADQKJ30e}4vXLrRt7|3{-zJA zm`B1q8%6l};1qkQ9F>3Hg7{S_Mg;5JR!@9=L+FSjo3&DfWBKQ8|4TbSHI||0gNvhs zx0hYIn=W;HWh9qc*1#4w)>#kg6q8;Zm+{)j!jkTVSah{f14Rw!*TlvwjZry-PCEZ8QUV2y0D zf3cC-_?9;BSn+-m1=UNyhy_gQTy{t7;4@bA$m_wDA@v~#P#-Fg)x45@2{JCCLN@6k zf8h|-Fs1QQmP<05XjIx@nVS+Fx}kq}SCFkl+?d_Co8m{9E%gxA_0T70 zPWGo>(f!`fH2;d1HgERg>r{5Z3oXvW!w8x|6*dv!8te1|PU>P^#h(24KW@+?gcwa{ zLIZ1yHv~Jo7o1D%H^c#ccG6VmNPb8g{E?aT;X-ZlfT-|kC#6iHeDDKcyFqJyf@2Qp zJHzg(6y$Qo?W+LWOY^UxMgf3r&?6>=uhSz&Avfq1H0bTG|HvI2Zu$^`;UAH_JQHN4 zv>}TCG+uOQ=NXGrh>Z|irN$;;sl6iVSW|1tc4;x%yaa0Y%YFb*4pA!fre|lrtRFxg z=RHleG2EtafuC;5mf$zxSS89{S{c@BAp+=zQ!h%fO zg~R*YDcN0<4mtn$kTtC0kF!V(1St;_ z&qAArYgM!`YiHb)y1Hl-P4hE%A~a24JNKQJ8BpA@aPshgNE4Cwh|;o<_F& zVtsG`eJ3*lY$xTLNHZvjU__3Ve(%>>;1Xc#q>%uUXQLu@q7f#DU)m<2)n5U=n=3`) z+|87pXZe|(1$5ZMOtGvIfZo_k+m+fv9S7|k?mh?II=wmK{6;|MN!OU8X^r0;H9zuQ z6aZ|-zfOEbBcXwNNi&$d@sjn>Y8ep2O@Gkkotv@1{fF{j-OYt#0O`{sNUT&f`jr=` zI+xBA8P$;9GKAGl1-%D;(=iIDZPu(=Bxb3ctm^q6SyN!xVCO6f&;(($;=ApeRdT4@83G!%N#r9;*9NRXpAqROi>Yr%y zAcsp*EOU9;t=N@2D6(mz2z@Pus%R`5-=EdI@m}h(ge40S-5?p*WZ%t%WA(+8{{@%B zT|REz6I8qaNd>k5NfVpArK*6td~F*D1JKV+Fco>$LTq|*%G90Czj?+x_XkitJumvP zgJw{!0cv7Au99z;lAwtlk$iLd$(}mZ@KeXUr4@rF|Fgp>)$21^g?>BDsHolj8@sJS z0Q(&%tq0z0Z1xV;*~`++)7kO~dzUZvp9vstr^gdCcD((g9M#;fwda;VcmIfyb0Z{d zN)rnn)JgH!MqSvS3)rpCpWaLPzX*HJu%^1@eVC5Y1QBT>7!*N8YCt+lh@c>f(nN|7 zIsv6AB@{sjQbUy%L=dD)3mt4UkzPZ9s5I$Hhfv-X^m)!XzxTuc6W685-fPdAx#ymH zX6a2zEd6@Dj!Er0X^u2=lWkldQSn(!k5jH3AKmr%yI^wJ zI4mP5c}c(Q$T8pLV}FzsoDkoGOXz&iM<@-?;2qZg=t@)%9I=3oS=w$0Ylozr(RLNb zHQC<~Wb$X83EDu?gbkQ~#l8oJ0m@}3j}g5bRsgG)+YVIO`7ybXpRPcXT6?xj&TuO1 z`|1B3o|X18oqnIT=o**1ME!p!*a4tB37-TAm>ay2=i(u%7G$7gFgiT%|L{vn_s!ne zaP6s}Gno-phkh~SSX0Y|dN9W<^bW9y|bc4}2L+#j8_9Z-P zAKZ@ApV0w_plfj0nhfjJ)JQb8= zs%KFv3dD1LZRM>rO{?1qO|kI{mOFbesiRYnJeCd{5fofWi{GVlJAjuMGRvsFWomR; z-1zh>p`qOr1G3reL;x7?e`J5UCrg??9it>8lQEP6f{Wq%lI57(!vV)RS{gx@TCO-Y zZtW(kqu?~6lQiajK{m)jZ-xylV9gvgX@UE}f#JBx~f z$GJ{gnnJzj+n@2udelXqDw_R8o<-Lmcyr!lAje?LmY6MXF7EP-GSA@bU_5(susaRz zFK&*2lQZ`Ug7qcd88V;TP4oE3x=@xeJmjQuYRCblbotJAPBAao3u#cSNH6NEhnqzQUEk z$uIF2yPkL=8`t`awe(BU9aOv)E%QeV5`XV+HXc}t)vxt_b{!{Hd9BaY+xc9)1Y-|x z$=jSb%N3pSRt=PQ((zPWn3O(L`buCVxZ9iXR&jpBOVVTf5wI)dZbn$6T%J~u>)rWy zhe}_Sv%lSnI<}T;kUc+13&evoKcD64dcQQMFuIXZq|K)XdB}m38h58KTD=FaY?6@L zOC1*~*9$A1!4dxf%Sp%e?t&hVI!DD#1Hb(aGw-=&RB?xL5SPX7$0$D_@l411&8K@j zDtOf=TT_Q?+ynjw9_f6s@^*9H#(-7bX8kg1qIw3K^0br0dsn+oF5>paso-h>*)fNw z1N9-e4z!}TC-FN~07xnGV92c%3Z9JxKKU|S18nN8VP{^~^ZAlhOdaF$W5pFK=~^QP z?~X;tP;|I{y3Ls%DwY=P(Z$?hEq{qYu$1p4?jx(~$Cn{uQVK`|sq`yW zZQY$IY;A7CZbnr|_xBka*k+HJAc>&>cj=$UfL4e|4DCPlmle07!^2Yh$G}i`jgOPV zhNn#E!CJAxg@^C++FO3}VDjgZr29>8g==Jg`e^n2g+W(^A#?XZ>bqR|N5YxeC9=&9 zYEg=m@JW3kT{xm04w8Bo6q2%bJmR8Fr7&V68!%5Cd^46~*}0M@vR)ypnN$2kv!b*& zA)>YR$#O$`m}>SXA^bhQM|ZvW?3e8vpWFiJzInx))2R8d?$O%+7S-@{!)9wUEWOHe zV>z3xu?0P9_^(aC-JF$O=Y@lEI8dHWQk}twkOA13k@;5j2Mg%?Y3&drnm(nvRIVKq zSombnw4?o&Bmfi;)MRq>hbhYtj|oji0EDpSkk2TUF8-LS`}}e`A@1D;kC&V)y*wd^ zsKVd0mmX*~dXT(0yBYu)_kGE-x)K2QuB{1@muik<9}JzX{RcVzT(n|5*6x$*eZ1nG z+-qj(gl=}pugCCuk=5`^k*En);eHtKRVMCqMpBK%t@W? zuDbN#cWOyW9<^5hi(JORAsGwwB&X zDSjyrwOEz^{)!8^qIeE`&+x73)&^sBpM5Z64v~YjfPv`QcaJVV-5C5X%!B2924+sCg51f8 z*svh@zr#%Dnjk{|d087l?9hz`(|9q#PUbfs!a8lMMe1K2Cf8_F!OM>3V?|-TBMm!g08VyVALVoUce3egE3yjGCjCuRI z&o-;qwW3w^df>$SGeO#g$q|d>>C6Smf72?k&IhGRHZZEr40u2UD1ywHveka_xn*wy z6J8(tHOdMcrrYJ?@cjF1)4BfxC=D@9_DmgcaO)tpqi%Z2AX#gwWf#|{{cZOeMH*kYEzvTB10VXH17!wgtbezM@orBD^ud0RXO2dJ=YWC|=)00)w{_gD0G zC}xcPXaUXN>&TvQm0W)QIs>m2f8+ej_Ng z`#}Dk9HjD))K2?`e=1Zv)>hYo5a*>POZ%NO@mdk>=GKxBg`0mg2AoBs>rAI*+40ra z^)NM^@NTaAliuG?EsfVZuthKS)XPCAPmq1%@ksb|)#HigPI*%ei{WgwM8`Z9+Y+AE zqm1RtGSX2HG6mhWfMQ02E-@1$vn^lpyNmsG zcN2L1Tfb#JF3e*?k8g(pZx@Z_3g}>2!JLHt!P|%~oYMPeXXM7ZG^-lh3*JWDnZvRj zWkOqVLX3rE4GXM45UqD=gCqyiQ31htF;d75io!A>FL(1-3_c|99DJ@fqa zWPZ!N&sW(5AJccrXZTe1>~Rp`qigMpy}iI3)16_G-@^IxCMGR8*!{H@Q_qb);z%&j zwt!#}slpp zcBr9$?*~dU<_P&Dya*pN7X}C~fuU8cFYpgFJmhTqt1={PnO5(Hj6(sPT;nhpYm2VdEUswUd|+(#1ixtmWR@-e>anhZN%5jFE|e8 z{I6}l{BY~e1TmHMi?NNQi6Osi!o)bXA9XqMh{me>7|s@Ud38*N&>7xm<6K{|wmAz*uHFkE?hY=d*RIwt%^N$wE(xV~6< zuy^YgcrqaMf~*Xm+|`)zPF0uYo0w7SYEuU+|5PknH7aa?CZp?q?z@YwgS-63Ii#yB z9Wrjz1P$rwnlH?th~3(>gE}e~yi^hm>@a9?8?FhqG2_gy-d^h%6So)kX{PcVZcgoX z2SQfFin$dW@jyv|sg`Bh0{y|IK{_ja z%~}tLOFs#&yb{;XzT2Q%?M&(!=ot+Gx9!xjyL(FtpiPSP4wES;-M&G>20nA1@`79R z$pN~@wZ2?`10-;B81B};BGIAl1?R%3$}&$E@q7T6NyTN#aREx8%|7S%A$&A}=!zNd zvq`V_AU!-{eO4yvA7|&n7$<3T{3oklj_s5_t^0ZKr|(O67SJakGCe=}R=O9|z2w$I z#g%TpW&8?G(-3Ur-P+3e+~p(owEIaRF=51|BwyD=o_D#aWEpG zY`$(Vy+E#*8D}J!oxLm7Ky6Ctbwil`llc2?>YRCaag%+5!U1l|jSC}Q*nZk;CyB*G zsxWZr`Hq?RS=o#fV{fto_IykgMD=^BRyM<^YgZ5 zCGx&MFkhEfK5x>Q^DhNm2G#`xv0fi-!&}X-0SdGEpMoi$!ThPzX$y8Ww`6I_zUFD8 zd(Q`rD6Ii1@jnH*E`Qie{F3hvB^8O;IeBIG#Xf#gzf`5K)k0$r-gw#G_-iOG!jVux z7|INAPw4x2?FjE2ZMB`C3jvgsL3&|o#TsFQ;(Rr)BpY>Ps&9XfUT2VdXdK?!ebs7g zDu!Hm18PU^-&KRYfg-u=4sZPqPE)+f)z?|*5KoiJoK7;n{(bvM#KxA80_@alR-O$U zl(^BZv-btTL1GMUIt16H*%y_1LIZf5?X3}+8YC)}6TErh0kyqeqvg8fudFCB>Liv4 zNr;-7RR5;Tb^-I_lH^sd+;L-1B*1lfOwq@)fB!g_ju%M97Klv6NN(l$)??J}vAT zcilXT6ra92^!#KSPKWY6`ZKF5RT+zkIOvbPjMncs{hzvfvlmS4tLZJbPP}`m&Dp}- zrTTLn9EN{~rjFlOu8wg0h%UHbp`S=Q7{?0)&Fz|-0}hMgagFhh%bm|uFG}+1Z(q_2 zV^c1`=7bQG*BX(_cEPM)*{XjiTGa%OlMiKk{147NQjtM6>#0>luO5B*KZk5BybwV~Xi~MbQRW@B! zWR05kw!pdfh?&jJ><=w97BqWJjd2g5%|qy_}xbBd2mc>k5yr68Lhlc%Z2$Q0jW(U%EvYKd`s3FEbnOa+`b=M zc3Ozez(!E0=2hOzsqE?H;{CbehV8p5>vxF`_iu^aTedUH8P2~LmbWu9)sP@Ueh8P6 z84q?rdUAAq;KG=IPL(1&tI7ULd^ytSrQq(ycQ0pUR9~v0lAn`c3Rm*pt%j;18W(XvEA(TK;tF1YA*Le9V<}6oWSPEzMww!E zSSJX+NB@CPQj@*-t+Iz4q}edGZ4cVT>7@=ECq6J~f4(H+k+s$>UPg)HROsnzT?D&J zb%i@mTMmKS#HJ!q+K zuY}g~!QEPVFwth{w#!~J^0CRsPH%72Z#&LS{ zGHCFA0nM_}R~b^}Sn7F;yRP2IlBMf5umr78@D_uxkvio5y#rHg)59dl**9(@AS5U?~_15_B7~L-|8zS=qx@lnd$dt z1hfSu_XWK`|5v@UfbvSa0d>-m+9k|g#Y@ly5#gm)E>XLy^|cd6mcGK$4{%*N`KU%Lr z=TbSf*x^}T<0z?Ou1YJ}=)47gaV^AGz9X|@6>;TFF zr|4Z#{W3oH#K@IF{JWX%{Mwv|!c5PI!rapyfkGeCeebw;qQ{Mgg#;xhS3H$k3~oD1 zj9vBn()^eUqpjbKv)^bYOJ#&H6`)w4YiwXF#TMpIWlrYyHzMJet6tQ}K}v$_+^chD zPEMDk>K%+7RV&ST{4;T(^3W41o6&AaV*{sS1T+)f1Q;dEmV<;~!1Tc4QhJlUzJHT_ z@cJkql4Ez6U9;FO#hY=q0ez&2_x8#B+`@@eE!QDmoi3#H-bJRi2B4w0treTnZ9X8G z49TsJf0O~?tViBJZTuBSLCMSJpyvxC*X^~W%E23Pa*&>xnFKaG-$QJxa#v+=gZ`59 zQt;?VxnqT4go{^`=ls(#wVnIYIB*gr&E8aF^$iP*eB~q|@ILUWB8!?p<@ktCP)K32 zQWd#!^w^AI!w23tBeOaRgs4{*{@)FL6^|(+?Ykw+;`#_Nl{erBkofF=9@V6uORbQL zXRBVZ`5D)dsN#a7!Fj(WO`xyGd| zsaDq?2P{~b-Kg?t8~TAZ1bQ8jPpZ^s&yM;u5PO3Zz~xBpQOuV8+RfWIXWhamR`+OG zDt-HFuNR{n^+?C_Lq#@y*A4vEI~vjq#96W4OVyfs zzS?=}c9DgVW2kzf$$M2oS5Ag#IcSK23^k4g8L9)&sB1+Q!%(2xS1-a5QAMtmUi!CU zUyy!(`7u{}kYh8x`n6KOMn~eJ{t8K(*VVaT*NC-g>*sZqo$2r2zi)!|Xr)dK*CK(*m(22S-21uYI|1K=Zko3mye!{)Cz%rYk?^A`I`PP%a}6ng=CTX+y3v1G1g~I!sIjr>_XxPs z!?SlmaaQXW3u+T1ws8dYd1?5kpheK>s$c;PIF|Wp?Q}MH7M)UX_qRs((a%|B;4an- zf-X-z!`P}p>$0bm1vR{W4VvP!)UOgxNPEWshIE3lJc)-21;ComO4DE3`q zEBhVdT<>muCE*7c%>y?s3#Jh)9osX`XRFR=viHctMqfW6c^^~)Kr=yKK0?0<~Q(3A$9%_0yShQ3bYu64Y>PTcA7�tS}EWDMh?~o-yPbXM#HQ?&? zI(cAt;mN_EqI50cjM-zV>7tNRKiee1Gk-+W1owVhUOq)`i{hYSMg??%0&78artwjv z6vy+d@MU!!xsaG4=9e9yZWY^};xrWiIUdUiMZ;pNtMN>N?oBr3A`g2>xq243Hc z*x6m$oWeThy|Oyo=!>8naylY*Df>H8BEjK30D=g2&aeR{3Fqg{gP^2j$|YU{N|8}e z#?V8A;Q}r9$QKk>q|%%m#4gXkYQh|h@th*$;R7YXtOB^hMRuVhN=F)wF{hG$NA2PR zovpHYl5J$`L#0ZL1Q+JJ=do#T=%$YI#B)y5s?DS>pJ`*1ijKtAk%*!wch9f4H;OG( zNGdf4C3=fD!&RIQ`66g#?(wfGATR&aTRV55?|0P&YRgm3F2WvWU*%m7+ms+7OjI9R8E!VYIWHn}ad%j1dt`6!P>BMyX zPgWT2cXa?_18&rG;LprW#hHiXtWd0X?gCe|p4C(5KIT*KtQn6x!3bD3d*qY)3%kR- zYh2d?i5Ad#>AQePw}YZ{@i|-Q84GZsolT%1ygBpG)}xj*G!m2qRZL+EZBVWZ$l${K zR?O%4{v>!SCHKf{J?dr)s6#<5@C@`}_?OE-L<<76<0gPI`Lcacj|cN`eSs`WaPITz zx9*dNGk$4h|7O?$;g?h7hCPOG< zNC+vKTWL)bTc_TaGn^K7JS5I`?%I3xd*0c<%3}%)Pq|!B=%p5EV%3b$xRd4Z>&LSz ziP=Y_3@DDgyu=$S_4;_}QO5Yx1h`?J8hhMvJ?37!o4Sr6c5-!27P)MF;VlnS`&Jc( z=Uv7k>NoM;#Rm?k2B}8MM~^!94~+ek!SrWi{DtE#DFKw%+6lq;?v{CGsMkV&4O3v) z!b#N(N7mV!>~9b5;Y3+QXdQg_$GyWWr01lJj+N%#j-a@qis;s)7wCP|Yi+3zlROsx zdU5u_vjB~_Y1b4gbirWIC_d5lEP{dEE3TO)TjziQ%m4j5&zb;5m8k{H5Kgj3H1`5xb;08CB%wPgZ5NPZ|O2&_`c7^ z%Ed&^1K+)lN(#*P^K)6&QH{P*^vv()c9X{06i^F2c)=&iF5P|A6p;4cBwf|X&Rwpu zr=Py4vT|#iLoVybdfCe~+4nkvuRjha-96>1Ye-Ey+d>`C>g>Ck!XM*BcdLsz{WTHX zSFb$teT!EfZrA7lyU#5k*$U%hwe#3ZWqN&7b!_JAI*FRYVExafa zIKQ)*fH!$E=Dxvbw)Jreci=qta>vOV@61o|U0{qFFGG3!l$aMHINEEO91ON7Ckx#i zsCk_kIW?W)j9nkSKI6LfE%4a*8A9ng)8%TqzzvDU@yhm+wshslu8JrP71@K;fmGDz zNx8~JK^g2-&c*U}RR7h<=$WBOIlnFEG%WrpU6-QUbY}zMLeurhgKHmURL{O(gF)h6 zyWBDp`ohI(rv%Z}>(q}C)>v0suLjR`ob@QWPG7S6POPRTKB4()Wwt#*a{H~HbCS|p zw_5RO72n-O+(GfO+IZyp7_QBkbia=v&)>LS5_R(ZBgci3QP(b|GAzl^*yR?dl`#(c z;oOjUxAJMvNzBbTMbxQudVWkIjq}WcpioyelE36PQ;ls-%hE&%(Ql!5TWUt^vNxSO z>+I_^C$G-R<%7Ym%~HZMLr;ul(pSrlrPum=FJbJ0%?T^b%Ggf&CMX<$?ky%tlKl4M+Pb&v%#W)MyvH}1;oYVYBs_f4|{ zZq2^`bO>EO&~m3B+vq-GKHlqUT?_%Z+|%cIGF`wg+kAac^Sy0qeVKAt-bd-%!4-M< z(j`k1s?+DL>F%4oJ{K>OokNLvcz*swzC^9>tusu(_rG?D3Y7W>P=2wbQY%Tmv}1So zXwW^{-#46rmrXIShU)eW_(v3S&$DGZ9){p+$a^<)I9z!F{{cl?mD8P?c z4b=2?g0tzE){tevxUy?v#?!R0<5pjNKFOfh9{Ouc{DGZ>p0a3;s;f)s#;OQPYU;S> zXGdQBg&Bs5!8E&+A3V*J5X>(qo4gsXtNAgX}_}~ zpv#dtE2xV2)h3^dw;npi%KhOL>vK;hN(?O%)9sV54I|nobn*?gP4vF=oOe*CB0u}) znEq`;YPqLE29wn>`)6@;!>hQJ7wcvq?)uEBlb!qd7P$(y?m_q9$Rt3Hh| zA?^%TDqj*qPU_?c#0XoeW-+sFr!6a&F>(_ZZ%MFgH(*(-5)1(Q?5JVNUK^n%tkv#2OnPOM7&RgB6ZuD< zYTvl5FL(#Z^Gab!s#kPsCZT^Jf9Mt6yEMfXe+>l0%mH@8xahU~PIkhb0->!eOFf9` z4TnmN4i+&0O(#$X-xNiSx6d}&zpkE{eWnrWO9K<4Ju9ql>9MEL5ovzg_UU;3@gA_> zMRB@Wl&rIGj_5d>*_ig98bQIE=bWx)z3{rUI+D!{p>3|F!W?N5Ry}*Ied?kpZ_}H} zuz|O4H8eXCpn{Z`JJd&Y%Q}kq1tgzc{cfoD%@$m85=p#q6~w#SStnwcmbVxjUL8L>PfyZlZ_fFlyV7KjNYkiy+2|sAJO5r5 z^4_SPIfOcYjk0-qw3Zu{5oKl?!?lFzO@ED+yus+R7F6ekWSD3%_G$Uu;t&J^^#x?^ z=n3Boz_Q;`Jm&u;Y(O2a;tz?T!)ziQE3zMk)$e|pQlG;~8tEKo<#v0`gqE)L z&9c5OE_~{Oh}K<;8@R=pldQALM(lwHms`!K;41S4^K8tyMCXCp5~R;eJG6N$2rb&4w7!T3h zVUygEdmVHjvDZ?X9Cd*0(A|r0S(BIC+P5K;VEU z{{Xk>tycK36s2`=(BgYga&x9DwehW=5M%Gq_bH{#QK=L^-^ciUgmGy{_JIRtLm5;GHfJ?I;?bc-N=AeR9Ha!JtN4dBX2B(6o>c;n$ zn*?O$pstrPV-}$h4~x}=m)auDr=S`+wL@% zU!7D~UZQ~XQ62|mg@#E)`%yybO}1S8`zNZ17mJq!BroTpg+d=VytszemI!R^pUO)w zQjK4QmQi4^EegE67bUW;C9o(_V2+$Qai zE9Kh~EQ`~lQZrK>_FVfaEjfhq)4M9C9w0}{f`Er|KUN@=cMke|tdpp8-ZY|StfoIo z6-ook)^#89DB($AY`R9?J3YUGUsEg;`?|T>HAC93>mVh+76!(;zjfk~3eP!!w3ja&_0KZYUQ?xF z#q-ykDtvF5qb(Gh9wC1K*q{q@0%0vDdj`^7*j0_Lrf_p%sqb|6)n;18-zTn)8r=TP zqly?^bfm4ZMiqQ+bFkH$-!ch>G84*?KdB*>L2MbSSxqx@HYlI*6A2xD2Q5mQt}fpu ztty;rr+B~B%Fkejrp_xbIlQ=mH)FfDL4yvzt{K=$lEHagogCYArh-hQyHP;A^Tg^> zw1kdD;u~NL8v(8$`zzVbjyLv9_>4b#EIM^+CpG{1dZ517cv%bMs<$d)a&e)~%)72@ z5_+BMB#A=1$eUqy*qd(qdbR^4dA^q@uzS_OmdE;WyK-ZKI83WO@_LA+A2IAw>%kAK&0 z3@?HA*ivZ1uHH+@zj<|X_r9TgS4-k(@#eXa3h_pI7<{~QBk2lbL6+VIi*%iy6i(qg zV0$-TT39xsAPh5I>5m%?-vlUlww=ixz8QId;-5F&QJk@gNjyomCmG~5Drn#DU5Qgf z*UgX9TZ=Wic*-gn4+f0B#5zfbz5-4Kz3L$+j;=Sqop$DAPc+%E{LQbrN2?&EYT zZXwR!tEg8-AT36dv7$q-y0WTgl#CfMzNf1J3JE&fkhD@Y+cz#&kLSWObPJlNDk>5? zdg8Gr^)IUidVNiedvS^~E>vYFvV&g;VbfRBgS<-m;<3ophS&S6%pkEJfo>Q8k`NH4 z1{+vd6KWMr`g1B&Ju@CA9aJi8gQ>Ct&x^)9xq< zNCtR#;6EpoJ1z@M%vSU1{2mt3k8K{dGV;QSqAM@`7@0iR5>s*6?|8T0RO*4`=dd)Z zS$wYOx_RG|YYUvLp|=r$s$Kx8fD+TA6K%==wf3HY7Y;_`mTkVC?Ob)MU0?puxA%N} zJ<(697Trer%BFY>8*HK~-w!rk5*0Q6K*vjO2u%A@k*qZ;I9&6YcQ)qpQ<%+?4 z0VZM5?~#zel%U9Ig(myawvAFrB-?Y`8B=4gjp1^~4jp~Vh+PK-8-6j55r*xcpfZVY zlG;L|1@wrz44e|yEIp{r3H!1Wjhw$K!wp1&RDr|-+?u0td?KN@44Gqf5&Fia7TCl7 zW|xQfdtMK=fG{-6 zrqNkY)cZ+vnKG)~{XWj)GB2~g)29}9#~WPmi> ze`S1PK7TOt>&(HvB{&1eZHM1L5uX@SqG7EoT(s#g;70zfHtF8Jvm?<|7br2axBmDg z3aM^vhrhADw$Sk@r?Xlyef#%;mO^5>=c3Q_%=8se<6JCu5B>pgU#+)k@fLJhRv~(G z0xlW^)skdkY-Nj2*79J!9Kok8Lrr$u`_hnplw_hba=Wqxm3~%~(6B5R)O%7u^7Of5 zkaKfA*E0a3ZyhaNq}041P+H^RT&4AUte>HJ_yR+Bb{k^l6&-|s&696xw0w0pcVgA& zfkKxhEeXV9hKK&3i=e1NK}-}n%P>N!+E5?v|7v>GZ+dQco^43R%;jgd_WtQGvHrd? zRF;(kdz~KtCX@rD|1;%WWHcpn<=3a?cecHkMPE&T6#ih<5INn>GU3jpVTGr ztXfU#CVQSvb>P1BxYswjaCeJp0FUerU=6>x>jRZpLA@CI^p-%W*Ys(SYVLXA!YXXeom0pzcU-KB)E8K>^~X(? zaD8{w1XMZb_Pmdv32SbS6=R&^PUL$H4nDhNEHj!(t9{i(6`_T)tP#*1^tpFjxZ06V z69Ng;kuRmekd}RatHl-{{2HDd=U>ul_#I`_xiDYc@xbX|$J_6l8)+ytY+^O^#{*l2?UkI(=Bhv|xVPkrYW_H}cNy@^UOZ1E_$Xhub7gn9sJ zhfznlV#{ZKEr~X@KWwno3vgN0EN#B6=3}_oE8^)wU8e*jbyzjMpG|dRKi9z#HyqSb z8engfK5(T6ZSFnLEwj^k_h5wm#Q+7Q&|gC$)MX(bP#y}5f9st)w1v)ev%>Ev5_Az~ z1!6X6F6Jv`7-~NyUrjCUku$+A}=CSiJO_s=1Ms8GssT?qlUza>8pVU?8j1o zFX>;+a*FX4{nm5RhXyvy0LjrwDLQtQKHz!ze8caN>&Jh4tDapvs(;)2S_x0i?aZN4 zNyR>{iP$TuS-Id4Zln!Qd#(~W6{h*O?(jrY37)%8mS?jpn}5C}7YaKx# z_~Z_$2w;bgn`Igu(@D*e_Bz6#C@g1ySnXXmIGPW*|qMFqU=o~GY2F7ywYoCOr z+t)weJkAQ!1$9a&xb^2vN(>j=UWN*GS*!S^2|Z@T*<`&R3521TNbXEaa>*t!!hW!L zw8@@=o@-H)Qb2Og0t%j2;5a3u7;3Uk1&xU;>@8DYc>=z(GUpEjTP{8+;9sE+PvmRT zlW%0{6LS(R%>l-MM%c-U(=}F+Z+A3x%LxrR5JBxepx12sYsg!NM{)x*eHMQ)kabqK zOrrU`-L`Tro(m3I2xq|A7*w{w2#}vx@%<%t3+VnXE!+ZX`HBu)IsC_QVA)KitF)l% zTqcg*4bR3Vi#}R;&S^HX*V$hDfczx%%+s?kCFHHaB93P&9X$e)8a4VeaDX2q@MS?L zFC~O?189<6bH|18Sw(Aq&0zWJ7%-tl`T6}3I^g9j;v=zNuL4;}oQXBmg$AP=4KGd| z%l1-7sv>3uV*cOHugfYTgo#9o>nbPO{4`)luM$=>sW_=Exzt^>A2 z7i>VMYSxKj%(_5R7C3%S_SVmTeKpgN*#Ue_ptr0G?Bpj3%v%~(5ei*9OD`VqDM#*T zAR7N?0ki%ZZJiY*o`>>& z0u1u-W0Tm(wEs39H#HRe*TWNufBc#XY#=3Okbc$e>t6@!!P3xRPUKUU0IgE?^ADLTk<&1S%Jd~lL~>v2``PX6%;u#{L9bQT#C zvS=Z$tXy>1g~lqI5Tmlpn1kuv!fX}@2dw_ zvwA_k|B6K|ZQ~EiK1$3@1rKoVKk)}XZ2c9M-6cm3t=Ljp;PNBv_bAZ)>pLmV9jBE@ zK?5KF5y}EFP{?0r1q)-Rx=e8xiVf6ggwL}cY;-a+x*P}3df%LAR;@T6Q}|ER{_CzD zWOrpEije~^HcK_@PDwIXuZCx~gz8x;*hWx{ir;cQTtFn3{EjFo@;?2yA*6OrSz=p% zX<3Hr{Gh24mVGz|x3eX2|pyVU;;-(HLF;ma}*K_|*+}>kF$4NVCwm5}_--|f{ zdZ9`G8y?&Wm<+P<9#8?aC_n$!oE3jw)cCc)o7cGggP6R-MDQQqH8eW>HUKMunBPai z9|Rq%9gVZo2bkzu24w=kFx|uJ zfUT_#&yd!mk?5y8Z0H}(xh(!dfJ04fCjb=8yJK&4=R{Ksv2u;%_6ioC3L%rF^s z61DU;phLy7ZzZ_U|I0TM}Y$aQ8z%Kw0Q4l zi^0jBsF)aLGIn#5v72d)j1%D-XODpl$U_Dz*0wDg@JA9P3_9L(Bfo+ABx;`N6jM}4 z1HJ{E>pxBn0UVYYWQxhlwHf8qDtFd5ox0WiegyNY1> zk<<*7BUU`dqh5>ZV(lOXScn$#FN%^QCPt&983fD}Da+cD%c1i%)7310branxc%Dtz zIOsO$s3ZHXLH%3nRL`Rs{|(I+94(N> zpKWz^Ni2sx;C&4^LoWmRUi6*U{CXo5ERccCdu~}L%E;*#K+WGo zJi_3Zuch3Qy=SPOPe-VnD0=0oz!gYFA zPyp4#p*DB21$5O_FROBlg+vjEVggm@Qoh+v}ZO*Hs!%>jW#z~!fT!(^n9{5MkL<{(rk zJw#bXj-|)%K;?k@&<@-IsPDT6xL@tXyJNEQjpxOV-7|)Wo!~-HLr}LqgZi$kUBgc8 zbj{WVwj`9<_ElDt`#n;2ichvVEs{GI4>qtMm+!qs2i(6E&ffY~HM4+Oq$jFWD68O@ zh@b59Xn-IHJHE1KDCEuuo&5oeYSj=CcXE8i^LF-(G5zprhBOxYo?m?zVr7ND2C>DLx4nP>`5GmZio}ah~JR)hdoz z{PH(~)RVhnE(+9 z{v9gG9rbhKxaVv}sjQo-h?pjF$sj;uyX-W#5q)Mk(!X^S#ve)vVG}6Lw8y#ge5FTKy($_K77)LWvZ$19Za&@mE z!7?jSMnWfW(F=627f1b10Q@p`xJm`NNr5R~v%5HVn{How_IB@wIn_JMyD4;QUgd#2 zr`{e3l<&7ykWqq4Z>|ZGIfOpD*}&fIk>^%-Zg`F0w(6DN7OBCKq<_wr&LYEEnEQR> zcb0DQWsc*Zh2A2>`LfGGKn5U}3yKg(?Kji(#O$5n*DN$OgXo2k;&MtaFPv|k_9Ax2 zkI<76t~0u$EFc->zC7<5+Ti(B;mXk28jfd9C}&5*an zO866t0)9B=LA}pbeI>I!N6r=Wkiojy(O$gZSPj8Ejb(SO%^$n;)~_f~gO(ma$U}A0 z|L6q=V)Rl3h{I5DK;9At{dW7f2LKz@f9O152uQ1+&})>&h)jP z*$DN3%ui$#%guG-+e^_q%4tulC9rYR7ab^&Dk;`JdoVw0pNVjZ~mgDQ_WwTUf&T_c;QnT;L z8Fy49U+E*CW4-DxJuA)qxEPI^#jt6|o--sch)Yf`tQ7qbHm9!o|MB*g zQBihb+c1qH;!sk8Fd!))Eh0Tgi*$n$k^_PQ(jZ9J&><-xjkJJ(MJOWOrGnCqNcXp| z0rkG0=YH3_-ap?DXSsCdy7oTzna3I1ctguhNFQxr=&Y}l&+w(RoKN3W=Ei0=F1EOt zq9fbDi##+RpM>J@mSR^FTWkaJLQWe^C;aJNgqRlI*UCKO^;+o_*-S#ANUi~i0jEV? zuW#K0Ng;Q-^rb8`K>Br1FzdH5bAbGMGFBGG>Dr8JY^W|4o<)*s?LO+q&En&t^;#K1EMckKX5I~YOOM9>+UEGwTLqikNy5QqLOx$`82ga3aZ^K= z!-HsX;FfeAuJW1I^ScI>9tN%sa<-5Jwh_bA;HfVC!^-MREd zx}QE3!p=h`j9ogHfplknlnoOmSYW{l)t1xF-x{I{DpOlbDlKKDuUGQy6Vvj>wjOOE zxu(Rf1v&g}FWJ2g*YJtQy5L6qGz&E0Fjxv-C?F~YV36nNRDs5t!TFP}Z?5_!L zus$aN#shLX4w=s_+GQ^EVsAV^hw|Y07?~aN>FZEN9g}Dzmozvq&;D%g2145%T;8O?iXAQCaxOUIpkS?!T zI4!lKm_)x;7p;}n2TzMqjSlP(?ZX0&LB!yC$87%V3KQZRj=WlFSpniTZ6!S3l|5F{ zTddYun}gLE$noaIERBN{SrjdR1@dAws}j()HM|>WK{*yBx~F@!VGoAJaj6(JnyR(i zdlNkrJ-^f3;8)tuWf;{{Ch$iI(gp#`;BTv%YzWc_SPXE5od4V~CvyP@dE zV7>e2VKkRQDB|Lm-KHk>uZ{!6M5yuStY&yTBt6cfh)~e(2+)^LNJ@AkXH6lxP;O$Q zCQS>0GB|hhn&se~PV_AL7yXW84SkaUG9dSob`OL#XabXtdzc@h*dz{-UK#Nk{>l;tYZ=G_9Nj0o71R)xzAbrELmlaLwl7(15T#)Wv7%f_XU5u}NiX5e;W7Ig8duxqaXxL^fQ39>uyQn^}Q4yTMmp}A1 zi7&1>`hrO^gz`X}`^^5cqFm)E>xo0XdXTbq4c31tZ*|A_bkwb zoe}x?U+#PN0`I!z9yUz$@Ui@)?~?iB+W5JAzQ_p0#pY!q#G>-hEQP8n@Ot^bxkpxC z;&&+QwzT(C5c*z7c(34KsP1Vua`ExTSYk#jS_o? zP3WB!5;{ECF4KRDaiRMr!f)+pct(ldklZ@{AVORRgOAN#g%x_0MQ4Uj3-6Tg>v%PH zp9w5GC|q&dY1zLymdC`)r8Qv1lfVw#UWSm%AZe&cmsL{E+v%2~a9X((6$v5rQ`joTKc0>)xw zu#|Iu;ldKf2er*@$SBlM3291@l3LeIzF$Rl^w-+hcEShji6JPJ9}m@h7Wxs35Zy8E zQNeU(V(#US%Z$#+07E&;@p-VenQjI`-^g$=`aXp(Iy4)_Ku4*a6{-n->*x;y~OgV|VK zQTtbzC0nEo5Ps-q8t()mj~#N4O`sJ8_dq_SM(1PV(cUMwOo(uQLs30ueo+7b0o$sa z&&v&hQfoB`fI|5z)ck-FFc03yW5VTmY!Z zP7c#I-KyZBS4qt`I|=D9J^{cD-(x8F7BV{g60a4yf<9uw3E#dF-e&KGijG!xNHDJ7 zUj$7R1gjvzpNXr+KrA4h0SAUdOX+353w_n*TJkgVHfoF#r(Ow7t7QUZ{Q38=@A>s+ z#A9P%s6@(;>-@&!M$^}VtZ?Cx%7(52VJOkXdPfqqo|^Fkb^?#>gSyE1p}<^t%XouxWt3f$HHa5u12W zX5X(v-0=ng8Bd^DT*PkBnX-1Cq95dr!g>ppq`fG%M&3a2v zFtaJYzl4!tfa!z4@PEj#CS*c;F&Rw=F1o?kr_}C+KIb;i_?%JLcXPA!s8 z!&>b|0Jxb0v5rB0+t}3o6uyg5pVx&loEZ6`(b5SU^g0M`pJ&1Rm<&|5{>gzRUC9Fj zmNAJRir7=%C6u+_5vxK7GWoGK0)56TYTTsf_O z9%Fm>&%o;U@^AH>*OSG+|Kdj2b#smzv{*pXJ+B-iA8)m_zrh<1;J{71HA_OY*N&-? z?^dzfI|&JpjFyEYswit|5WW<-35)Cu1vE$w{o%?R)TS$=) z%8hz6_T^2t%YEM``L^C}_P*}Ar*^ox)-vKEZe{}(8TV;BH&^)arbRNcOpW*GQ~Skz zy9&u8isIG&3%8(oT@;9o`}Nsh z56#4M12xFO6BnukI{1a?dflHojKu6As9~WoQ!N@LtaTM)s)7?Tytkyc#uP%&Ud=)go_Qp?vAc2qL zA=JqAL}H04>s;X|`^Ksun7VP{9B}xMUN0e$@^&Q{r7D(Qb%EBA!u}ykcPM<;1Q+g4 zK+NgQDHw90Pxw{|>sgS|NKykIB{NJ?EDiaQR+9_X5by0S69Y{Ic9DJUNi#kj(Tia} zg)Hm+_d^qSFCW?z1Rdiec^JNU(eeyN3NT9kc0l#`2DNXOfxPuuSz(1Bro_3Zo zN^yF|6d%@MrLxA-6}h9)FPd}jo`s1{U&>6^lXQ8u?vx#6*?e@?W+~AdttsN;0TK!J z=$qKG!cpBrF7;?~x|ZBp%jzbghuoiaqq}cjkk~jNU+N!k-yfQ-GIZGQ3UF@mLW{h! z3b%6bNynvEc2*{+0M{&mVHVW!ARYjzMU3|9h=BeY^aHQ>!&oB<@0XjUs5h(B$F@zM zt)FeJX$~)J&d=;j-JfsnG!@+*tW&LhrlzMN8E1ECcyegd?8tuY=XzyB*`+7w%I@#T z&Q>`J*V%Tjz1O_FQn}LyWA!|AOLV>)i!Q=mu3WDepEG^$ZeCwC&rH}$=;Mn%u=WdpKcXPTLh((5Ly&TE|WC|B&- zyc{+dU)obR3QpN^8EC)gW={(bujZq4tv2q6XOp$D**7)1EyBN{{j>d4vUGA9ATOfxnc>C6Z zdcIw3%2qjeCvsT^VZms6O;Ef+?*TK!9oU-=6ltY{YBgsZ2lz6H@jfjD|TPMh1S#QX7=C{ai|* zJ~M&^8m}NRv!!LutNvm8>4fcpfBscK@U@?EuHTL8O>V6~CeL-eZSShQz;+XWT+aSn4PrB@elS*$A-qEm8nm%Gu5}UeG zTdDwtQuX67D^q7;!)<0zDUc&no(`~=81$z!61IOlqiyT0Go+Z_L>q&Ght6kMs1;7| zV|$0{_h_`E(LuxWLi9hW_>ASg)4g(j{5=O%tjBs-;6}Rq5l(XD1w(xJ2slMKwDQb!Y&%G=ia0hunue&3<*qevEe?)W#5G$rRF-cS zhj(+bmXhQ3oNY#(@rP|Zeud(>-{wq&k^dhI^ADzrb_8~@1ik0LW#++f2}dKWPfIG5{lR>%n5Tnd;b;R-cG;?U@Q=p*_jD5OjpzfN zEgPLjM?D$DTO6yw6j1{2d|FLg9or90#w2VsWsU2gD&W+XF3$0kaaa36Q0X7y&II-r z8Vk7n!adnBR%oLH5}&r5jKU+5K|5cb?OfB?*jYke`knddZS@dud7HSUqr)$BBrU10 zDn1C@@12u4TBDN`pP`{~81&}+I@UUou|F1hGh*uBle;jgeZW}){sh`|JnAvUDxcQUx5uzf?0!Y+RSNSUz~vLQlpHt6@Q2|LuGH<#F<00-TG39x>`9oGAq>>IEBT+N;lcgTVU=+=UY8b_X>x<9w;H zCnIRC=5DWmdSVUa`OckpKt)52#2?juj?Ovr)Eo&skH3~$O<-}b#eheL+?)>d^HSI@ zE3v2n;7GL@P=rP1?g=?STmoDt;t5DdE#CAPnF=`nTL3J8}6od>z}z&sbZr59cdTkq1eabIj~IZ&@g{ zyJvTY@+jK$D2neiqxMG5^|n3xYuylY##6vXuo9nTq2=FnO&eH#Fb;~Na+x%$Qly^o zcxCPo+@}G$jpvVU%VJEg%e|`{Cafz#Y~SGxf8vhFb<+!sARLu0zKd~v&j6Kht7IvU ztYVl4s9iBoGXno@P+x&AbcH$Y0E?keVpm{OPs;{SNU;>!4c|1JHddTr zB;&TbZwA%3pIwWbX!i*bmJs^8|E%<_oHhOF2E2%MkKa46C{dSTZ?+15M7dMba;b)` zLf-DfHm43_C|Y2;!+SY+H`GOU=0r^b1|}MF@0>Ne?`=m&y+9>AD?v&C+Q9w%6rJ#9 z4nh6FXrwU+5{v9%hjpUgdn?3hBv=cT+=!1qyA$n`1<~bZ)=M_OvO(U{e(^CfGpel$Qk;%&!qiwvv(2DPL_0b0~iGQwxHVqECi$@*f!0 zOV89KBkWqwpSR&#o3p%AD_P9C#OcD8&>fFn-%(*LH}@^}-K+`Nn%JpWk`$&}GaSzJ zc3P>e%4}Rn>}ye^IDwvzPqk* z{&4rTkG$GMz%#&ZZA>blZu))PK>W~K?HMk868Xa^!RBA0BmmWd+K(WKg%G`WRWQrB z(yRd5%f-@J`{S)vMa){bgIW@r+w!%asFBCSlm5p zf}Yy4ee-QipkAltSA}VMLOdhPkVjh?UF%^ z|H1+J3jSqfcbwAb@|z5)45ho>TxW_8BhkQB<*bi{D)2z_J;?Gbd@sX-qdcMbGkL(C zAc5yoVQRH0jb?#VaO%{aE*KutSPQIpgZO*y*tvj`_=SWgnQTC^L?z650%9=!T4uVm zAfrZ}_G*w|i$8YQk7K@uB!R_DfpuB7EEJ&?q?Z;c1UEJ@916KyFaEy1R8zaoSXyK# z#G?7N5y2NtrxW==B&4oz(c|^HQ$b$uX<3k9yFB-zzCmH|$7yi;(c)VvE~#8_rxO53 z-fxfbRwhhbp8>cC-A7B91ZC1OV@Su_A;>6GhF@XHK`{X4A=eFZ=Sv+|h*zq--G$2*?zyGc-k(Fdn{Tut$5QLKyx2Ok~sxa>z^8&rX4Zx{V zp{50WRLWYW)Ew$ZWCs`Fa$;>(;Z)H;_EfUg?5FLYSbC?&rw)fOf-C%(>`d~6&!dN4nN zW(s3xK$X!3i}$_w2*iyd8Dv=&Fk|1m2?0(TpDo9d)mj6%3A1T0&J%4NFfW~}l->`^M&;#$j1RcaX0u2O(BVzaoa zcf9y^&~*`0v4`F1nGLJ`0tZ$3=*Qd3`E(>c>f+t9zDsXf*6l1C#Qo1zHYJb8kuNuhH09-3}m7)b|z8^5MM zX{wn?%6R+R&fdEK(^S(stCx~n!OxO>=Vn^ghP@{Sjdp*OvyXz^%SNNlS2Ikh- zwrr>neUh}^cfX3T4Lz&tJ~%{9A99iogxhEDh;Dy8UrB?lk_Z7Rp<0Gg#-}jr52dEg z+D8i&j%I-J#+NlAW7nJ)w`AqEFN0|8f(b~8Q%q(AhRvd^75q`Q`h=!?XKN^=Rgn0= z;?igG{S6!eL^c1ivk_w-K{(WQM!86F_rstN%}QzBU6ONp%zcwJmXQi`D~gowX}(p+ z#pX9&je6UU99|d`&UAw`!W~1FZIy7szyRTeSOVQ%obkXoRW@<|7OP$*+ zx}w})G*=<`8~FS{_oWV$B=8r$Mq{I}qpS=+4jnKHiTV3)4B`l?tAw0??C3EOJ;m5( zMv=@-eaFDhY=71FIhfle(fA~!BSZO2{M3-P#Jx)iHB@o+OGt8=h_WNoC~_GZ6rfLb)~2{r6&sOQQ0vmJ!Px=(I+}A^&S!;}eE#Yf zGu3#W-q|o0ul%uhp!Ivr=+adm;0yl6vvMWRiILH~Pv|(5z=a>!wz7U!#76LBP3#Wc zT$DQ-RKW4!`=STxi)J+_D)~7EnrC;axR8p~?Nm%5MkR#{>~dm2*Y)wFJ;5bCa*6_Q z&R2Cq<-MJL9Uk!&zC7+Hqc+@gUkP*E3Hp3||PY@vX|sg@e(Qy6$#%g6c-&JTb2 z`9N&D>2{=o{BWWWT-ib1Or>AD`7`_t@EdbcZPz5sN#e?W7XI}<6?_?V^iA_Z_J~8) z*+PRRD(WA4a5D05DzJ{*8a}<1!(=&G8`Dk@^1vO5jVjYJv#6<=10YFkcmW8bu~f}2 zWGV`>bSmd@d2urQ@ym)N$=mUtELTx9J%?BUn(|Sfr&a3-EYj`__f`y_FlyAu*Ji3>NI;XR$YO!d+L8AJbv2Da(5pd&@BgeD~&bO+Ka z61>%JT)eExE(CLTscfzdR&Nz%ECC0WG@LkKZ>T}){HqU6*+YImwJh8! z4s&Q%B#LX9W=fv<$1I%543q!kt2I<>l+x6PM8z+*T*ga(4Bo_AUW+QfN* zjpLC5yCm>?-gq5G>EbwWpm9a_NX5THuAyPq#HF;|YiMg!R8Vw1+_`1ld_0ooR$}_IW2g( ze(fm~Q-i?WeDwP{HWIZKqodMSRmE6HJJx%`f+5*@T|ah{+Nri?d%iCc+MPEMqW4&| z^X1HP*w^o#`!UR#*=+J7$;X~^7x~d@V{LKwV_W@)7EM#TE1RdQyXITZea>!TR}LFA z63D-15a3Xy(*8D}&1%rgJb_}_tF@sRoHQ_GOtU^&J1EW?i;BbieR|j^`%_D7vjUJ@ zzpR{III4X6F^Rp=g*4)ydn&seI`>_?-$0JdZpKBBCD8c@Bv}ZKTRW&uS=>9E$b;SR z?Ye|4yUQOgmAO-`arSDHE?YD`VvIc=j2gpct$QkM;T_Ax`0+a;NeJ#~w1z^NfG~Hu zo#30XL>ee*9Y54w7_O{cPte$P0cK!9^EcE7OwBDRj(OJFZt{o-8blE?^^HD&BW;|; zEa>ayIG-bCfzLPc2qft*4excVhZ(K$73TOUP!xj-zPi%qlZV=cBjM~F-lQcLe>f52 zh9Ew*QT+4Sx3z6V#RhK=1W&gU!F30AQ-~ilqQOWY$CWn<$_cHd0UZ%xgB?QWVWPrK zv@rc`yi_xx*WSK}J=zP1A}7<(`=ENj`HN;p)E`BVeM&2D?jg2H#iI^e)sA%U^Ke`& z6gz85uH)|I`CA5{3oG0b3!WmiB`Q4L{h|w*XjGEV`Q|G(Icw#8LPU<;7*5#WF^XU( ze<}pQPU}0d$#*4^6f;iHd^qs3P1c!^2+i7|kcmC9(4q0vl$&5`LH|P%uGm8#5cL-< zcmq$e9+;9?q)U&PknKxNTS*e|px*!@fEre<^Jt%xiEi#uOV9QTYg8&z&*Q+KrcW3U zqQa-k99NQc0`5k31l@m;s?lgpL<))@&>TACLtDmml(pk6wlD9${9Mn*NG3|B$@n*P zmO<_0F4e%9@lj&J*q6b?D#3ao9MpM&c|Y)^tNh|T=gE^#4<6=R|3-iqlOb zfSRWU9g$BWIgSI3@*Z4p;Md7cEqM5yrwh3brY8hjiv424o%GoPGsQAh`VS^>LieR- zN6dtTqb|bUO|i0+ zM5lz{RJ)KA``y11vbvL}*@G7{yCZdzweFLY#q@w}|8!F2ja*fjqOI#_m}o4p+z$KG zxd*s!oX*CK^Wp!7r-1&U$TY=))65fiO2izzFMS-+Va`el*uSZ=KEr|?#(|u$;IDr) z$CTO43Yun65#2FJZ1{}20&M34`Y7G8E5Bt+fq!#kXAK8_q~?zrxhLkaK2KLS={g>6 zbcQ&*9gpD2b@yK!N#k`D^rAQD#$$yQ#^7ml^JZyz*DsD-74!YEUoT*2)l5E9fjnq? z>zE<2Ie&4?qZZ5qQsr9dKyh=e^w#&!9da4A3$Aa=PHcgRvI0k1#M5W&guSx`p*dr! z`-2wKtXtRTDozh$J*Dq@!;>lLdl>5m6P;d=90zyqj-thXS@XLd?mr&Axc~a%{?5cy zThO?bRSmepiYLYx~b@vwNdZ(v2pQ2|(d(CuuB@ zN@JI3!9(!(oZa-wa#~b6OF}QCjRP8+se&0=)L^15^5fby_Bl)`Qf{0*mDp3>z8_t% z5d`#aL5hKY#oFc%a{G_hhpBlFKU7&ACBjxx84fu!zgN61Nj`5xA;97?|JF~T47-Tq z`z0QbX!s51|K;SKt1fc0pgg4;!8&Iqw6^CQAHTc}r3ApA&*s!XW1&tEe2~rv>*mEq z5kCkJzi7Qa3tE+!Uk5EMIsTfIXh)NM1R0+r*+D&I+e2=N{|CC^BN~E9Mg2R7B+8W5 z%_;HS7r>ao+)_C(t?J>l+o;Zn!;#-TBivb6760GhD9{M4b4h+33WNVmJ;dXh}K)vt^!b`R~-w3v@>K@vmP?mGUO zx7*0nH;GkhW**!F@v9w)9i0%}jVHx_!yE^RGhxq5cl&-CWVmIHtv4(b29ZCluEwAy zq0ujE*NvTA%8BnM-))AKr*Y8-Z z#*d(8cZ32+m>s(^n5Rkhg!%asU3XAsu^?Um{M{}V%_A7w%;3s*Z-%1JBQS%d$Kg9?*db-LU52;eF=;YR0a{)+bv|xU|IHQSd^XLMAV7t0GT;PvTV!)9MZV7S!P`efn2fqlOl0# zQnno!Qp!^78`W%G_5xdoVfu~Vxr~pj$nzmf-0oq|x^lzN5Y+KWS=gIPAjHC!{{)Z6 z?B-vqUwXQ}iua9zf@wEl&dMq=e#zb#H0pFsv5RuZ0ilvL13K-)R;(aH!3hX^Tue}K zg&Hfw{-Db?A-2ij*P4G@A(-3_QpK5YPGpW=*S%Lvc5=|vyMYH&AU9V_;Z~8l&|iBn z&?+ojeGU{6UJC-*irhAeODB-fZ}!Ko)vpO2&y$2iKzKt4uIsGSW#ab*U`3P<^cFnJ z+b&1~?DPhx5cpZ!5UtQ9+z$pKf!5oyKi^Z{2A!W`MFjz&n?sUjyw+OmDva1~+75g{5C*>p- z#D>##TXqe(C?4!cwbqJ_GbOT~s%XwJ=)MnuZ>5v&l$*q3ch@u5 zcS;enJXl8DGIaw`zsonpNV+Zb^ij6=sqFb_xOg z1u4G!GG|V7ug4U~_oZ-t6M_%+;dy5k4`@^cPZaaDgGss_2%JmW)w!x*?hM@#o6z{f zsrtxaW?gfl<2;93Fr>eEfd0ljqyEW68wr%Bzf=K>BamWInnbJSBv5ttWG|?@N7=sx zRYi11gor2x0R~k|ZiiW2zmDTz{Yekbi8jpuv_){4j*oGU*s4$LS?(MXOo;7#U+Ppr z`!=K_an9PsK}=So@;<67L_w=o)u9>?bN@Dn~?a0g1%q9|}E#0X`Et<924ahCc*0$7D-bmoA)g>r(Uap=aVMWlj8 z6=6p4x_}xk6RP}==N0!`$%#9X!$OU2I>-iebb-5#_Qqa&{wT>yDe#sFTcG@8uHm2H?KROYw=o}K2Vd~wPU-n@9HVR^wJqr=yXfC z%sjLDwY6i_z7-kr=z);UDQsrEUnAFj+8LemZ0%O7POXI7)ulHaa9 zhvcSVE@^dL<9Q=vw!>RqQv{jm_i^@6gk8->@Mos zc*3sJzY8~O#A!E1VxJ$8aO>wj&Vh_ie2Rzk+tv zR~uoiZ|L`?6FBCr!rXF%z#aIoI$5d}ePe!1BE?L|=~%}z{S^WlTjwn8_~oWVGDiNJ zm$XV)X>U1ijOa0IXvs-SbT!y^B>G}R(kc?TUG{4+^%%qT3JO`gd;r+^vI`a$Ky>{w zJTs^y4~!wy1QmI=5rGi-&y+ncGw|?n1nU1y%Vjxz9Qfq6B%o&FZ>Nmy=U)yJcsHA6 zpu6y)+5fRDP&+uMbs06X0#7evA%1Avx7rSjJ>Me^KgGn_1Ms4xaXc{c$;?q+8RJ;F zz=?rwxQ5XU0Ai4?A(^mOSqH@(Hx*yU>J2+l{JeYLSR`z4`7tf)93wtM(Sv2}mm#Mg z*dXtQA!`)(K*7hUR!lM6KNr+LH!GEA5smZ&^Tgg$OpFqQV#P%2Wvwa=p1&|x#IHf#+Rc}G2+J|4+4rfkIjPjO%e-JZ6()uV45YRts*=4+I4== zqR42(@46Gj*8xPW?kFxb^41e113yMzl0W=y@-pfcXb>O9V%p8Tp~#0DOyHy@JrfeI zprC(|46XPO2U++$;>-~aqG8n+yfATG{Wsc&bMEf~!~+!STkI+%E_NKwf(|sQh!A{) zk|2xu71o|%`?#V}^*wMZ+1umlf;S;lOzkt6tA=rvJ*M)@-}_TMqcO~F8(#Cjq2^#X zOi$c&ND68aWFk>>&mZv$_(!det`FU98`5cXI0BA9b}}>sXcv#+uUgwpy`aF7yC8&X z!WiFNy&1Q`M4lL)M2sM`6mJHJY(yGWDTb;3skWL~357Rt0Xu<-{o=L!npMb}%n-%; z6|4SXVKA5d)F{L6Fx;OIEZ(mOKI<^J0vuSkk1J~%-SQ4c)^9h_S2lxIw!-*n8o;f4J-Gmh_ANmBjJ-1JJgD0AVyyCCV=$E5Fnt1w z80SJa7FN=N)zURxzy2%?1~AsnNS8)_ENOR?rJ?|R~X zdS)lK*l>0{=*Yfe5t-L8&j=ttxCu}^jV_eglg)Z(Y{47{F~`?`o{Lglub?!wyB=B zuSiar86do@@cAD_)2Y1-RCBG-_XVTD8@MbD!;c;j;%_G&97#6 z>-#m`j&pHSnZoDhyL_hclvF;oZ@L;y74HbuKfPaB-Rx2_EL!5&^gZVQaYqP`mL!yx z!gP7V$VKWM$VV|OSDoE4nVyL~7`0ZG*=;U7+S=R+n%<(H{<0QQE3|xVj9mcJrTMXW zZPIGfYZLUp-j&>4O?Zq{QG~mWxtaV%>x-uW%uFdc5w$2i@bu)`5 z+-axYAMrjWT|0PyK+x7@vOme~P;_K>0Xr38Put7k$XzM7JIANaq);1 zR)bbnOEi!^g$?KShsLJfEpOQwQpuI@+)3!HYM2GJDS)f`gi=&|zVh=sHXmeeVBJwZ z!@3@r?|6CqW`e_hv+B=l57Ky|vcJ6;^1$l850*+qcq#R^w(f(t+y`k``2EwSxTGvA zRp#)PWiNZj_SU9U^j>Mkt(izZskb_Np>K+!9nTv3K0k+eBI(A`D__|is<@fuY_S{Exk-+nafNrIbVuOc_CP)9_(35@E|?`M z&cfX&B31q3T>#g#$i|W7@NI{qvX#Tjo(qackx((V&1uA9k2;(IA7S$FSK(wgbX`dm zG^25A8Z*C(Ec=c0Y_Rc-@7&60`q&}EQup;{8-qK#@qVv8k^^cX7LHnaqA2*ZYcF_N zHdK9KR9OR-B=_?&E>V7qoOW}-` z=?bXad${nor-6ODun1-zlBNtNOf@=68KB9m$HYpXGb`N|d`OQ`9L`2|=KYn(X-NxR zWAcHW+$~cPRj<7pA})D;o7iBLEd=mt1pZl7XqBB?i=|BC;DJAIXluvRC?58`WDz&K z{q1MyLor0Wb@SBWAg#XWbuha#jULNM!9Nu6>D!}5IA_x2#ZZq2Pu%@){W9kOu}F5V zbS^~!>1k`N%+v8DZxuJh4+KELIH+R}lSM9!*234u`aIin@oRFaNR+si!^o(`u3W?{ zH;1y{`bOH;IsG6|j8gzAv^QeZA86VUSxaW!-rZv&-(liNsI4_$Gucq>oSM9;?BS|w zP}>F0e``l54UExLg*lO0iZQ;a4*eO#G& zpS3qa7!5>!i2Y*dJ=-vr#gs=Qi8y0a?8%4Ct43!(wYR$3PZnpRsKJiC{ent2$scR# zdTb_x*{{MOS6NP$Sgk=Pb%E%oETpE~Cf69Ytnymt(67>9a3L{P%c>i>Ce=4s?BqVU z)!xh;jf{;4qGWLuBBZ=37{SWCW}mlFE7dP@<3e|7&A|MY>iE;_@dr-CsM(H^SL#BX63vG#rCLWaB=%rTXRM_OGZT#$7y$jCs7@s~3sSONJ7$bZ^Ll*T%nuP!ZnYyiw? zyR6p^nK}dJ4L)RYe(s?V6|wl`gS__nc{PAuhb=vT`ZS+0i40vduzv<58qa#fT2lR9 zrfC)S*`>bzs^EM}G=t&U*wox`G=na8>b=Xa&MoZaA5PF-$^0LyB~`s-;oICB;Bpx!ABLQdI}BiAe{ zQv?&2#TOm4zNFEz)HoS!Sih;jlFUVq|2*mvUwTO5Ss}2sk@*Wwn*DwAF>rsuItx{D z=<%gK7@&6NVslCZ6i?1v*&{b1z&4Dlw$H;373fI)1YN;NKW3oSgVCL=3ye;)EEbQe zmmd=LZd5N%hT2hzx((J6NSb=28OZDx;hA4cND`E_JrGAUj@JI_eioL6OBj(MSnpQFw_ z_EA&tYdF^g>{@Ct%gnEtL+v#x$aFP}7Bg0pvJ^GDMs>*qZ7@3LjA~&zzJ8Sb%?VFbcZ9XJ3YyI}CK zz!+i3DYKvA${ZfLu@@?%merM=&6Qb0bJnJj`!o>3;TuxpRh3_Nbeq0X|9q8pqF%!x;x#T=Y14<1k>XzF!EsyO!pP zc?KKX_(O(f@a0p}?LWN(H7KAlF&1grNrD+v85o-$WAudH{9XipO3cSh z@$0aE{fs4qVx*v)l;ifV4UuVseZ!nftplm39N=OwfEXFUesKDT{i z6SZm*%eB4-rWiV?C|qG>QBlDBH$*z-_(Z5j-vAe7ORu0Elh311rNL_iwXDM51>(I; zj`_St9fY2NK0r1C2Vua69UWVX^lbd^rvb_LQ<#G~cA=k}%e)E1`^WMD77gazGHMi= zC>PZf`EW1EerVaFh3=v`#mDp-EDWsZ%O2+A6qD4f26=M9khd1ch-|J8;eV~@DL)vI z2|P9^!T=9Qj$!n=_;!mFbGT42>+}e~L*U^|tSr>zWZ?gPdSgB?ki^{a=!QTTIa$Q% z)6LO$wk0qhGO`fXo=XXa&p|U{!N5Tf_=5+uL0h@vbrJKy1V+%Dg99iRNX(`G{6@Ao z984dy!ADGaALZA`2>zzmzupCvs#z2(Ds6(v3(IdhpdrIp@&EZvD4UQwC2-;@0Pgp5~z>kwULuySPRXJVGnR24vuuL)@vOPaJSz1!eJtn=AU1c zt1cdFfA|5wzyDY&=!nBYJ?*d5e)kl3hM64O1)LV0nMyc3|4C%6sFV4`8zJ)e@QqUn zp}?cxn^^nrb3FwUl@Ky~07f>P87atNsk@n}kEM7?aU2?tLkiCK4n|ntZmoI~zui(7 z#V}Q=UZe@1Vc+u&_V*1YKHvDELj`P1Yz}b(5Pr-DIAxekVg@&uunr%$FnA8GTnRw{ z%C-dtLhSxq|AB6nTd+cnYD=Voc2+(Ir^ZmI7CsU}HU1a(Xa${YEKLQ@dX&|eKYOaCE_ zz`9{pw*ZEoNaYH_cEIS|4Nk4KwXG@hNGL=3uPut!EaVltqJ6U4HHdKiE_FG<8+j9-b@%>ZanJOUZRbkK6*nq9lkn}@ZQQcmZ_aMLTbC&vv@CC zFX>3p@M$w!^H_U(~E)^Bly;RQ1#;!P9Al1lPYhs2}z6v z;Q8Q;k)_4dS;?J%bIn^ny2o4URwccDHu2nJLMF{ON%Erc@qkclg<=`8uFCp{^6bqt zV4;8sSAwjm0XAlAbRCLnexx=iR|-tvrC@Y+aS$lOld%lwDD$sZ`C*Qa`eh~F_qEHn zV(TTIH5N0h87%>?>}D~|3dRYR&9dy@deC8pqw&*Q3tU*bmGG3u~Hq88DB#>?s*WE%p$t z+frClm9IVR+oy!H!7R;B#av_L)c#ngL)M*VNAm%~@{A#2MnA^#Uy*Lmd7nPL)A)$p z9Gj>)S_ubbtW3u~8pf$J9^ZT|Pnc+0zM~vZ$^p~~J-IF@{$UUcK}%JP3LAm@M;b3F zfJz4vW8Z@fu@dsoTlI_@pZ!vM>inDXLnHQ0N;tmWg0svXt+QXkezYhs`iz8n7EQ#j zKoK8i8xQWZ34WyD4QH9lN4G4AEI!dl%Y|53GkKOPlge~Jv%GPrV9@_53HLbGB}TF@ zdv6nUdc9S4O;=S1scQWd2ixnK6u|gQ%gjEKc^YEd%eSVVjV0NN%!>VL^e`>keQ2rb z-86NhaHYt*v;NsY;^#efLNFE+ricLZ#;$~TtuK>@yD%-W#;BK)b4Y1O#Ldg&bI4Wd zfP4oLtX`qJ`A5Ujd=HEhhWOxTlqXeNS~VnyT6jcaOO~UCH*iQH)$^2ZFi_4Et*S;- zti}$;#Z!9c^;KE+W!Fv$j_Pe5sVKl0ov()sBt5*v%S#a$v-ylyg|ubJyYU^45O5V7 z$q|dq1v%kKmJ|g?j!OZJQn~Bp9@D(u4hPe_XKs2M?rseTF53z8LScE(y6;R#BmklT$Ab^tbK#DQp;O!(^j(tqs&}s#+xGh4BO8W24?m=l;6iBo*m_ z-KSlh&+*1x9}lP@?*56nmg`#wRyw-6M;;4|8or8IyKSt#NKTf)^L(TDa2M0WmSQU3 zN=JPBjp+q*yqfR1Pm*@i9~F4?(u?wlk5v@DNQ@8~SnU)obj{;9!%Q`ul*5>|WZ3@B z8vm9xl3~o~5*fyHbH(O}*UcCaX)%T*Ev{C5n1WqlT6&fU#-zTN^Ja9QL)2nNT)Q{G zrtT5{l^@0thsu9&6Q6A6w?3nG-Y!C7mG;|?e8+fV{Z_3n$f$kS0^rn;Z{c%SqWIt;MgauMRT zFpVzi>o!zTSUs17+JGLWH8oHP1%yHiORl?C9(~zhHPc5%?A~d#?BZF=D9|n&-e5EzR&Z= z^Ljn=hu7_OU-xxi*E#1p=Y7t7UFUczxB3E(>U2gPlm2TJOG_2jaY^}*o=@}o#B44P zz4BH?g#d)58)Xop zPd^xX*8>8OX8sl2ct*~6MMOAA?I^Q5He8$~LS{C#sLQQsFHUjiCrAbl zwL2zKrN;hoMi#AWSy<;dcG+UAqZjrsl{$2ntBScUI?8IGu) zwe`8tYrUm4V;X*^JoCm1^=Jg2|3o8o*M?N8F;U;^nS17UN?k`sq&$GM;%!?8X&RwWTWs+d|3DxDSFa>ZR8SajHzocq z{Z?_pJ+5S6^_tGqFm1dIWDK>8Dr{v1vJnE#N6y+Co}o3@2y1nbTbi`u=0})K7)AH+ z6}09suTpN*wepw8DfxU;eB8FklnM`c-b+dHVOa*26O5&;KI9z%kb$+=(oTYJHghAQ zvenm#bN|D>;sh(nhLh^{)AVB#2T3s2v=3>s{G`Xuv3^Y&`21Vx{Yk7TOgkKWE47?PjRT#I8 z4_g!$N&wA6L%Xa9Kuf6go??fyz0uWGbHcyzMeb??_yelZ9ZmV{nEZCs`3rPB+!+HB zalvB!W4J955A)7FJOhqPkY5#TN$8x{{(XpZvl((%@|OU3#Xg7p1E+wNZ#7GK(8d_m zv}Ra>HlBI(%6Hk6g(t0op@b&K8x^&PXM>kHoxzhXC6!P?(#Ddu22yj{ibAoJ^S&L| z=UVrQlbE)$V|L-^A`w^Rqrg_kfk8f5dK6#}R=;>tdgA@WrSn<|77q{EWU{Z7oT+FX zuDka#>H+$pWjN6^7i4E;79MCN(@3Fn_JtN50XE130(TbgUsbGGpl;E)N_y=mUSy({ zR%N6Z^udTntU-VUR|8kIm;V6iotY#KUCO47}9#qw{>oj#o2 z3wP~#*C0ZXky=-C<+efYc}nJ=K*%MdR;0Y{p!+lHKQ1)zIOx5WRcK3nc~k=nCkkEy zAcyV>7T=yokFYR-xSa5k{6S?u3Tje-tu?VwMt#SB-A^iw*ql2&(z|-#ThM8BEN)mE zCO)gn&bz}PyYs@5Uz%mU=GV*TO>#;Zf@&1y{99iK1XLGxxF^rcxqNMB9)4Tc@a~b5 zr>`->{-aS^d+ojtc83aAF##XSD6Q^4V-RrrwphYAcAV0J7K7?l zYG7V@DMv9pP=I}*8qbE~uusO>n3JOoAxu23I??0SXfYgiO)Cm!0p$>Qblg?vB_*|= zibj@PB=3G(7;A23V`GydXd4g&7v|?h<`);WLob^>3Y+UH9Y+Z(g}1&xn?#(OpUXzg zXlnIVM}!5o@)y#yDztM|@rwiaqG4s6s83Gh_@oY08pFKb@ClFpukeDk)^Y370G!y1 z$ybuKCm3G|1?PNenaLbFtJ_KBl&B3*^Wnahb1X^wqE_!s|B3_dvAD_KxANvR`n{^u zC&Q91;E|Mo+*_6Ga27C;H&J@h*JJ;l$gZbuhbQXW&Dg}j?&j}nJ~M$j4RdoHYHM+) zu;|2Rp7$JMgYlgK=edTVdKYs-7aHBOM1^uA`2Uq31?v9DTw7pBc*XzO6oY$m0cz0^yIQXACegpl$KGFp@DSX zNWK^H`iS0;qKLC9`e}d{`!XvTUs7qgrO{mLrs1ZjD`k2COJq*wj&2_guqqbJEdsP3 zCBjzjrZ1Q}gl!7_J0;^qQg{LSGqp3mp|fSWad{-8!1}IpjPe~D52K)`Fok!uXR6BF z+#I&47guH8UMCg|nj&08x~CbO)<@K$gq80G50-ao6brv%7m(MU*;*_(Qe0aZN>+_u zEm+tLP?mWhws}z=GiM47q1j3SRjt`JvQfWilQaoY0qd03P&vG`;}juhuKW3-@K2X51^X zJQecQsWVWd-XXJKbiGNcR*4iK>`JK>cFPSAiO(T!>(eJ+vtB0On$bdgdq1VEk@gVA za|(9Jvxn(%hOPOI-rxTVQdeKdn;?GG##d4SIbi)Hhi@RWq>mE`d2ya<X@x2(*_D5Pcw0liGvt8r3pc?v%OusCx8_+#f2!jHdM6by-Ia@}3BJa|nwev0 zWm14l_ro>%02Vdj&PFJio}B5zw43|rg5|1XA^d26sn|DK+1B;7VJ-?}*p56hpPZ}5# z?U}Wad$1LSZ>B-wW~gIgy=U7=(Xg6Ggo^{VWos<1VDz+5jYQhbNE(~=VLe7x31eAp zSP2tmQK>s(Z=q7iV5LTPv_o#1BBm}|PLx%txw_pXb4dgc(6l2r&w z0}>k??%B)lGh_N3f^caqR~t2S%a)hT<<+yg^V6v5OHVmWVbrjwnmO6q8#;<=L}5Y= zflyzMwRkP4f7om2V7qLh$TiA{Yi{WHL9$Bt!tTM+zyTzUnw~PTlhU?vb#3=mMB_IT zLtNWbBUQUtSr%iGCZw`%akQ#AyW%X6y~Ww!7=%lNcE{Ut2L}a=@DPK$KkZ`*$Vaah zcbdbHtrHxLpv081 zZB7X#)l*FW3e(P{2d&LZKc7x_FGH2fM|wC{A48T_9O8{0smf{W1_Sxy@k>mZ5t9qx zHb%ItXyl3rq+Jp+kD@i6gb| z@a%%(VJ1PniJukH+=iWId=L_jIxnAaom^|R80HjrT549Gr&0-9oAF4KU%*^2C#U}o zl^Lp?UExB)voECaB5fI;El4~QwH66zeF#&K5Ne2DP0Ee+$8Qq(IpAe8s`6uYx2&MH z^1RseORu?Z%&;Vn)(OoFpW3;4MNx%~>(1&;QMsZEz1UDLZoP3d^vF>s!W`bV*#(aS z=c$0DvQW_mn#XWb_jxX1}DGq|nFV zT;ex`m1Q~l-gVV_-**J*W4+VYEfV;#4=R1}d1uiA>7V(5c)FM;Dzsr?^jEYO^teTN*P5 z${zTxJL6*2MTwI6e-`K1c9cZK1}|77$KrJsPvW#m{2AOjmPfA;O&wYHY3+XP?{PGK zIyZnoO6UK@M;<d+RvN6>sDcDyo0?f(QZW_HjwGznS*_=Jy`zcC~ zJ{U3>Rb-{-p|nZQ$qM&ygJ{`W#8gY0nrN{bHQ!_hTh7k;o-TntCn`=8+F zPqT^QLO#mF_=2yCkxSoGBk?Ak=tfkF>Hn>1vP>ts&_zV|kM-Pd_#Xl@Ag1b0 zqTXMO>BwLh{m}1C2P{lHbRaei5ZR6vkw{dJA8RR!+}eU+TIOL2Cm7o^Hf?;(FU$A! zj+V0mQ}{1+NV7XJF)=C4L5qoeB|sA0RT}a9jeMi0Q`p7B)AgW@_rUYI?F~{c+nT%a zehO|E+J7olH=}m6{;cy;x_^`T^#s)@YN$y$d}snbtP6br^X~zME$Nn7M=@G?>coY; zX)^Kri>$~W=Gxi$YSoO-;advtbJZWk#FNaowrjnPV!&%bxvDWGDo|>N{-=Bm=8Y#s z$>uJrkg6}m$qgqu?}08p4r6{UD}FAQeFzs)(rI~2d|#awRykLN9d2jUblOzr?SF}B z-6h17<5uhS_cil*x`shV=1gfCrecwPetvvHxWm2Q{kB?H-vwlkOLup~L#nQCu3b*T zo_oI?X2q?i|7LV4B%Y9j)OH&pmuaPMhZhY7YAlasRsgG%;Neu8)gbQ@oAO_%YfDT| zKR6@SIqw9s@UibdjHC_AlFD>nvn$}q)PCFO!2<*7-|fiD_l=$heT6bQFo!K~du;Ic z5tla392S|l4b61GPhEZH=1Z)ooHd#a=+bI$SZbN{Y4DYb?r5Z7wrBeyZn*}{^=XMA zyB~FXjZPEy25{sNJ6M_hRJtAXl>!*&U6`aZ+PCwJO}Cp#A%4HHBEwU+&5!u*nAbAJ zaZtZUVcC=Yt8Q<*Cwp7O@WMy>v+O-nYn%|>w1Cbb{3*qfbj>v3l6q$ugO?$j*}p7v3yBCZbGm%YOdV$Re@V2SNy>EatYNRz-1umFZ~WKUxN` z&!uK6#D;Um7%`nqH4vRaT;JLzYf3y-4why~7^U7y5CUn{7}6lCZ38&OLHvZz3`fZo=;HuV=?H{*jr9yXZ@)_-KKV( z%ENkY3{D=-PZ5_PHXGLxIiG&%){ty2nl3KsFPOXwTk#w3oea@=noX)g({48D;W+OQ zLTwXQU6-l?5mP?te*?mQ>0@^gP6AuNQpZwmeTOONy|KC*kQbuJ{S=jOaq&>&4QOMx zCY(?(3{Vhy$eB6Oaw+)n%^R2R*H+(d-W8k{T7CnN>uh|7tGoJtc@e({`sZqGyR!JX z*4L9PELx2huCj9?pD5H)|Xo}v3Cz)0LCg8D-UdyzNC@cb%eM$rTUaX-B5+pFdA zjM@JAwSWqD#8B5O!jYuea5!nktKnjmCIS?R8EdXixba7i|A)qbKrSb&@7|0#6DxB! zIF32lR7{-XNBbj!7aStcbtWzOHkohDIrbJE>3 zhA0nY?qWM@HlDGx_To1Kiz{b85?85a$~YXSRW5pAOLc`SuzzN$6pp#3CLC{@oExsy z$}T?H_j)2^(W$e5&)^ya!htu^LtObI$i@OM{&rh;S(7aV0<&TxCq4O)XfgbCq9gXY z{?w(W>n7x7vlNkL1KYDfkH^mWXT%0tyPX?%gJ!Z8kwc^yrasnrHAv3R)I7!co?TRX z%|$V$^mVuGZf^Djhc0I+%i?k=k^2!-x)jW{4>h%tdxEC`7^_Rdk0$yl^Ut*Y~Q`Gge$sjqAefX__L%d)~`Q+(9xAd3!gZ3RK4OwmNj( zg?M#+9Qe-94>3KKN^#%zD<4vTW$sQ8?f_K~PFFzYva_O1Jl>Yj5aKZmvzp@Z&E=_q zgB`!q(%ql^*7#d#ghLQ6ai%^_D)YHvYt=BGk!(a~N~dDAOFJ%)udHo59qI7I>&wEp zIX?~Gjrrrjm2Iu6L{^&7Lk z)!s$@utFp1crgm+d3<*SGI8{X8;+WllqZA1ASZ( zhY%7vlr=fb_U^%m;OW++#@yXGewF&ec2JHg+fvujXF_@$aE4z^Xj>G&HD7n|7{4Cl zZIeatv1I9VnmwLX34CQrhDb0K-$`Z|E-hRYv-gzwP?bzG ze^;%K6%!yLkpCuHg|)mJqTRbAMDf^Ub+lpB{N<}@J|W7GS7oHU z5H43wQ%@^bdDP5V02fAw*8Qhp_uqZ8Btyn(fz`3EK1>AymeZ6=XC}4VCT%QpdbHXZ zl^uuWgV-y$+oJyN(_I`?p`scA?Ofki?mB93n*&hh^SDyjVpj+@&)%N?@2{MMgYZ>z z5G$+F{3_G6@NUH$Apx-;_?aGJ3Ed3i??ck%!H!q&tCEmovk9A6GXXJ>R7hUP-nyiL zvY!D<^m$w%Hi$IUvMkG!UF$ zqKG|N1kCyN6>Dt(WN;>2b)tb5V6D>6Fk%IKONlpDvrf<>G5KTw+ z?I_(8d#6_RCKjVa$H=^l&&%Mk@yK6|JdkVP<0zCvCx|u3EfezDXgN-!?F^HnExj*@ zi1~z6|B5%H>A0Kj`t0r_$nBK9#s(j7udlH{)V2fz$6vdSqIy{W8JfTo$eSNd9_wpK zcZS3Mc`t7a4`eCp;2ZSbgjkP3qQE_?i*!|W>iBFG4u?CZ{)Evlj+HQ+lV>`jA-jN+ z0kh@%Gut&;O!k47y+FNqBorwfHfF41jQP!vEFybd+zw@OST#_NX8NbC9`wj}humIO zWp>P{^j-6nvlX7=cZ#$ol&41)#O$^rzmK5s7pd+=J+}(JeXPcvU9Xz1R!~mh7{Nb!xKUQtIp+ITq@0P}+ zXd~#&`IZxRGZJydrq6t|F~d{~*uK3$>YECdkij<}_H#f+%m?H9me;UM)=baU>0#F> zAJJ?GM5tX}grVsIoem@VOe$Ems2F6tQxB&0G#U2JvB&Bqc6DhIC} ziCQ>Bk6>nKHmJsB@`a@8e(ImRxitcq@N9czmUzHUG}PuS%6$-@=2rSasbQg7vvEkB z)G$=woZdJTCtN0V6Dx&AR|JL8Y!^n3VafVydZJqGZ55!#9lhU?Vsn4BZ%3vr1vj3E z1`+UStZ37j3~PwZoB&Uwbe>rMFe?^jj!cN`pE4!rvFs=R-BP6ICI*JPc8wb>e( zYBoFKT1l29$1ZX2u>wpP;>JVo8eBL|?**;Fm5nN0I_Nk52oku6O5K|oxcYx?HYBJw zPW$;9EHm({ViDB}3$96n`EPybqrKP0Nn)7k-O56by2K{G-u*z3(hS?)02Yqkx?acS z4&f_~EETas9?ngWqLrmD0BsHKIL2s(ZTLymyjcvJQSU3CW){^zuX*IA(RCL6_N^o_ zZLPSgM2-QYUtN@42e7`uPv|(RD{=oRD?=<<&xw$wvb*^fONHwHnfG&?T5Q$b2zN^~(LCVvdwd>uF!&V(>A7mR*5m|?_4P#o!?pY$)LY84nibQ5m zLiTM;mQZ3C`!a(W%lDz@`g;Eb-yc4|xZRp&9@n|fb*}rlpL0Kt$7Uu*`?(KugFvAD z7mf8TKp-v-5Qr@T$_adO_qE#q2!sJ$)Yr8N8k!>#6K_(27Y4z~O_{4w&W*1v4dXsP z2v(+Hz+CJwchN&ij|1=j-q?Qr;N$v-2E5Xt&MJp}8-Cv(KX3kooa^k!E@ClWVoC9( zoH()+q`;f^$n>(}_R{2s0Vh^i#Fpf^E@J*Ih3drWFWxR$cf9AVTB-q+c_47%KYw0+ z5`+u<-_H&n`hTDOKOaC}{y!i5|HlV+OA1Z1N4Mt&->PayFg|~3b3=x$&AA~@Wo^$< zt3+MnR6U8!PEI>pJ#XD~B4*!qX@^eUiC6}42>$1~yR&-lyMy85Qca?ZVh2c|2VG8D zCCCdP-T#8ZP?k1pE;h6)3+?wsFDOn)`&>l1+0XIMpFdwdzS{zc>Eyb^>!kN(G~?vUR5u3GRZUG*l>>`N8nUD*y!PNcB2bw&T@K{adjw)_df z7~nyLRaN7Cg{F&Hjfy8R(*N-S6#u)H+yJ|)WV&nI@(lABLzv@#7Yids^Kx*y#4SIE zH*`6{Xh^&N^-%ipc`@U3vpA*S(bGNu$04f$!JDf_n&QBAl~>

-6Y65T8_3_+JC! zXIFtks=x0Cc-M4R>O#Uws>uKPttnqCKS&zbg&Qe*{H>dh_&3Ef|K}MqD}y3Z%V)%0 z;|5KarY)k_{`aWuy0WxFQ=-O!(1`euJ#KGAzs&o;XV(|Pn2WtEiFJ)rN|0-4YimnQ zoJtk_Uptu0xwnc7%S%UGG#*sn=ijhN@bn-&kszHVP0=7x@rgf(*g{@*WyxSfz=8i%Bdh%bY_ z#w#_wcYF1W6YA|T2G^Fm{_hvV4uq2&v|V=Lar~6*9kMBD#WXv-Gs8KIn+27G2skJp zzu!Obj|uDFO|`ez7*SFYTOT9VW0B$p*%3tBFtXe8*&rv%u3W8yw&o$}pv}41#GtQe z=ApF%Tg(Zm@U0a3Dss1DEQ_q^KC)Q5+BrY$Q@5QGvHM}{X1ueeGiIDKKjTN3*~h=P z+SSZvrxU)l0eKL-{_P%eV|#sUjOVHOFoDPz;*sKU=mQqMInW>MxGQ^6jM-EUYH-tG zxis8g=%Z&Nw?3q9kFRc5iCpwg!l+>4|ZuwE88}%0YDn3>5HgGiA5l-4It+r)*LCiul zfR)5g6h|8ja5sS(zOgsV&T@r0|Kl%H3SaDvhK6iyH1lYPqI7ESqbZc3Y+ne7MqD4Y zKVoHYq{NU^d?zk2AMd2`0))l6_H}ngE?Rvnc}IYW6FJpwyzU$wI!Ch$?ChuZ%o0$E zVny0W1t~Dcvi5eK&SswvM%&;cB4?2`w%efExqV+}D-XYZ+wz3*Cs_gIM3lr*@taG! z@;rCT`qsS?Lp_jRs?oNEi=xs{fvx=lVJSKZe@Fa8Uj@8nmQc4Ne2O{n!sq;V<44$- z1Ik_+A~4UrQoBO{Te)B*>q>1)-hGn$9E9sNC)l0rJR(=50+eK0G%E zqH~s?Cyhxrd$F9(oP%Gb21{Tuva2JWNYrzR9!VixnJWQjlvHhJM2ko;ByXmV@p2Vf zJi^7-HszAIhbCupNgHY8C|pLujQY=a{W^7A2}dt1MKkz0pjlavX#ZmwIb#l(>DNY~ zJDZ+lNemuU(EF_OTb=2)#Z%XlHRi6**&<6Ax~LLAgsVk1hehZK8_0OT=@oiUm*vgS z32(tt@~C0x<`seRgQTseJ5S?~BO(Qh0ARzx;$u}3p$n!?1d_bqUilZCn8a)D4v zV8|Fbag$CQHb0@K(!68G8OL@LF@{iBcWfq>(tIL6c#Ld@RDqoJ&sh^-UrUafRGzmY zkfzkDxxiRZ!*az-pJk(eFL_9(`AthID=UJp_Uti#xCOYFyZ2{6{AuZ@y8XtDuG$Md z@nWsPnI5Wh+l=7VmUq?gGe%Tcy?;w8Azn|3IJKnEbygM^ahb!RMjofdAslN^0PI3Ry0xb3qlaMWfta zr?6i03AFbFum#GVI9oDZ&)czv(mxr}_`{t^(X1i{5YqFCJ*j8Zx^7FpWxlH|h}5_$pO!d~RN*XqJ+` zs<*nwIYGK%#&9!1hZ4A|spIoVbBI{gK910O9M`)z_+|NPlNsO zdu7TW%3Y?4-#j66Arw-|Ei#dol_V-1#DFht`fo<-JpC6-{W(twJ9|cIaUJKoj(eKo z_d#AdZJiiPIWH!YCi9%;1-oC!Yh-`suE~ROV^#@U!}Qy-i&+%z zrTGI;rshwm4W6Ai%J#|5$Hj%?om#uO!*3Ql*l210 zf4$xY`|Jt{`grN>;3dl7WwCVG!FSKkR*(mTj7u@joZh3015Pos`MeUaF@il04v`cf z1Wm|q08sIkD4u~9KiyL6(P|demq)3)Cvi@vhJS^g%{If8q_75t3oH&>n%xneKF^lo z|KSA(7Pdlwz^e9hxGJn_ew-kHF%8 z;^1fWj1q$^L(5aMXtE0g)FKAOXQpSn*30iWMr5;U z-CX!ZyC7JcDkHulguIpu{!P85>NwgYQIh7OOG-u z!|f~{4&`x+Z{Y6$Mu_cv4hz22#!FkN-O!|3rJ;tTAXt(S(m|0}FfmF(QU~Z=-KwCw zl>_33%GfxFJ~a&aLQ)CdmP;6;b#4r1v7E$OE3Jr!xm@k`Imc!SZ)xa)Ce4CB2Sz$4 zU{KEF%QMeeuRDz2X;S${{XbwIh_w_imLErA+IzOSR!11l-VotXifY$w;p@?R;Hy&M zpdhsp^&j*-wB<3M{7mMu@ctCbNAFtQlG0=nK4l*gXw4}gn?`BxZEbdhc5if7@#xGH zTZ1_ITKsPqOGP$?s(Hdq+w{KI3GeFU1@6#g?h~zrZkX&>N7z)>jmraWohnFqx0aL3 z?&Gb^2K}Mu$LS8no{c`R)|QMGHf}rd8|0@D@lel0F}ZjE3g_M15SQ8xXf0 zvw5E0+q{QEg=L-(ucHc%iYN!fmLrm1z&@IGpUIx+E~`23H}7(%8to&yuJ!3=hacuj zaA)A>J>>Oa2-Y@BUHmsL?R@iM7zZ&2Ibw_6ND5dud`}e6Y@0@XKjW z&bwGunp?tGJ7FkIGm$fPT3xDYJycj)&eKVSpD(-rSx#P8bz&9PPt3ybT8B})TNbsL zfr{xuMCLHf@C0iOONx$>NXX$qkIV$8-CC|&`W#c01$XUee*``M+IXrY$A3C+elfJL^i$_~CAslwD z1XD0&>Do%4f|4@MpQBNhJw{+5Z_}&P2=sKLC>krw%XO5@g%nX&)U)LU;KO8^Mx))_ zQTvDPc74Lii}hBF#7=qYHeOX6A3WV3%IHY9HE&aF#dXWni3z8z{s4uMe9-ezP}wdnvhwPrwQdJMG>k{8I=n zexfA0PEd5XhHsOJIH4CBXeC{FZ{8-J*$5m;j?q4VV{DoK!+$1VJr1H1#C#sd5ts`Q z;}xgt&q3aB{87MobWot=Xs?yEwA|vo)X1Dx8?!q?P!ogN;Ga&Ob&Y}R0�S_024 zLvG0p#kS9ZRdlPZ2S?wphRvu~j^BW=8x15&=M~EQ3WcqTm;4*>hkuhUSD>niny0o+ z%3`FoA4TixN<<#fJ&GoGoemmc*E2UV;V-6q#2f3S^vurYKXV8?HCiWsz(+wmjsaP* zo5)Q6G|f_%YT(uO==^ZHofw#wo!C*g3#h&)ek= zq>o%D|E*=?_|IKfVlpN73@iew$1n4X5$hk2*L0K>f%J)iFIC;v+w}d{eR0cR=8qpg zegoVn3iuN4>!k(MJj>s?|1J-K!u|g06OH52lK#L)oKQ*7FSG9R+x%_T@^$87_;T#< zy)pFq$=#50wQ8mCjqg#&_{ArtI@?XNI%wt^1E^jjmYJc8#B~=FyTI)gn>yxD*bLP% z(JghpYG`GGoUzbHGhMq%*%2KmTKIq-iK0*_{2(l_$R5UZ%*npD9hu}m_4iIU&Ya#@ z8uBC%ee0t)g8Oy0HYP*@uV&uc-Psnw6hO-DCi{<9%J^1s%PlJzASEO#oBG&_!oB$= zz1u$AsUr_ehs}SYB@!|NXZu3PU#s@dtPtPJN^i&me%o`d^!sYLh8Hg&5jXxhqbL!H zw8*aWkpc1)1Q189RD2pc6X^ZvH9sOOF0ag@~S&CbWs zQ6s;$z{SP^G)we$y8pSr{Hp3^#b=c@c{I+kfGRM2f)2XCr@Rsz!`JSt_^HA=*k_Ez zGVRLt5eky9%O93uZ_s1>9+{JCaj#nYuGnuuXw)ju_5aJ-88pY@a@3)}n-pR1dm@Pe z(Zr`OP~+*Koa{U_%dm~79m+N(l#jB|4jF1zo1$-TEKhvtPr@ddr_PzZQ7^lD?hsjv z?F__5Wk52lMB0&neA6UEK$ypeIC_;3n1@l_Y}nCfU8!AxYq|u_E!+pys!^^?sqHmV zV*b217>mNs*p9nZs1b;~)?SV{A~Dy(nViP{oy$ zl%#ID^86iW+Tte<_?|m(+Ywqaj5gK zsz9l;bBa7-;`vaOdHomJo%8!RAX7L7F1#IrCBg)ZS79w)@0b8Bx6%-+nYzn%CK04- zdi?N2hbvPCIr&oA2P=mTC*Hr1+2=7P3(@LT zx!>e9vfGu~X0(ScO?xQy4j^G$|GHyw3?_f)2>i91ZmNe4(R^uQ8)hY)#+tmXC~=@N zw~=iCVn%mwy~?JW=w8z6S;oT=T6|H~%bjo~LEe!I%G`M*`D$?3^evguuk~~FFyW?j z+IQCy9obOR&IABa!r7&x0Y?C!I})Rypy2c)@gGkF?RM5*m&iGPSVrwl_UH+4!-&Zv zT~xCsnYveybba8R060vTmmNBzCJU(Glh0cvNAlUP4vCAoNyZWa2X%dfAKSia_j>hN z;#w-|Zz%rrmaCvQBPaGn1pRZ%3;8)pRyowq3W+^#vJ(-QXNOMJ;9y<#9qZ&5JRGP- zy9D_oU@U?)3u>SZT>&?En6~uLRaLAFG~Y3P&(JXGAiL||dD`Y#C$d>}RXT|1;4}(T zT22C5l>os8Xf2&ix8M5yPmEH-B`ewOKo%^d2{VPTkdLOMci~J2cwQgPD>%bAP z9bv=dEg4W}0~?+>c$?^dlL0jx;$4}*-ZfXa-zO+N?Q1&bxnq@xaT#4yX#E+yf8!YD zneMk5_-mDWunVSW3s#8M`gMeKn#_y)AH`wQz66NHswcKOnayevt<#X!XD+R#ioI8+ z2n5PMfKpYq#{&N2fpyO)G0WSkK*vaSujkl0i5heTD)B8yQ16Dg5HlQwzl%f9=Pn%j z+G>sgSK)|^cMuMhccYcwkv)NykEUQ{O+SmR^GL?{^gaqMf%gn?+gy5@Vjk$dF&g1d z<@l`Uz2}7nb`C6%t9+KC{`CV=W4fhS{vMb^81l?gCgJHpX|Z@-O-^Z1vobbclFm2d zY~gupvh31i(g~v)L{DmY;kW|E*@v%RmFpf5S=?zM`Yw>GC!J}5Q?OP~vhISMKnBXE z?9bxvFUWs^<`9Yf@Sj*(q@Z$>IPy7^e@dR~o5h~YQcz;l@sub;9jgv&qY;L@QyeRI zN1Iy0AE*~13T#q`=oA$3aB4zx?QKbWR_|5%wFzP4Kj_5H<#4xp(OF%p(Xd}af@wUN zTA@lIR>xBEYKQhaj|?9vhj`h$A~Nlsl%Gc@(9sA5C_|%ge#Son!g@*QDul3}420633zOf;Zw63O! zplbbQ10L5JgPH$*dEmbSz9q`>PlQ~StB8(j1Y+%eh;hwK(Z0B@p5aIQqKURiiiJZ~ z4>Cuo5_YrOQpRz(@TMYE7oi?3b?FElO zqqhP1{4Y;iIyyQM+y7a(M)0@w#jyxMr3+REoAmen=rcZoTc`7~b$!p)c}KjLQ)_!_ zfsNxAs)Ml~5)U{iC~m&~OCK3+WbfzK*-F~159+9>y^fI{t%B^jXk{_8BJnKWKKh;x zvXJpQs0C0f0KV1&2e=$q${+a0UYeY>V_i9s?sA0R4ESKea%pvPMG(HUwmpN~{>ey; z*jS#QXg1}`)uBZ(^TIQSD{XJinHy@;%NN=J4z?Lf+L<=p32a&el)EEoyp(13474V_1nV~=T`sT4K zHTR6bKa`v-_qgUn&HML3kNI!>x%|Xc+n&gn1INfRz;3s$A_1oZEI1LeBfe&Jh<8Xo z00Doc2lf>ddsS45V&gmP<1n0D&U1Ly0c;gnp!ayGeW0c{WM{T+b2zWIGp3uznCO9= z2$y`AonGQ-H2<#bASZOe5g9HW{4wh6-^YIlddy1>Sk6KJSHslAA!=(026_Nb%*nRz zFPKY|AhCj zGynK=EM&D~7|?!{P?5liec0pf4ckUBm7ojom{ImH119uPal!yBIlWuvX+pf=@5TEw zhhyZqqX)6N>I>AOY$i$MRX!!e`8V2{y49k1_e+#YnN$CbHS%n{YaX3d?RQo9l}d`q z+41Y3hUCrFYDw7nvq-<~64XScEpoK;T^UOJ1vW9<=SrrAu!I`fl$g~Q(X?UjW%^Au z#H(U=*5P=z4ZeDm$hVh4uI^23W@-MBz z*dSn~5w@%3>Sen*)YOTKWD_!%LE+^2fbdaQFAq~q14n~vi6NHAl^|K{iKAi z9W$-@90X(Qt{!QPQr-)O zv7M^jWyLC;nyso~{+*REpWP~{Z!U4B?KM?_iL*{)1FYy=cA&s<9JSoLjDI#Y8@3DN z;-XfFYvE#BYM{P#TwCJg^xL4*P|Q%(!TyBD9OM5yH!BJ)LEwR zxYCp=)2icMeG$e#{0e;|OufA=@H2C5b`d4v$_RY9!^{dI*Z39{YX6*_+wPq0W=){i zD~}|HfWHT5kqy^~G3{|Xp;{zKa@YIj0_KeCs%Kgs$$H!xy-q(w8KG(sy57GVHPz8s zh)SLnqUd>T1o6Qj^6;fb%=jM6NOc0W?Zhg5b+ThVVmo3-2eYrq?Z&s&nsxAAjkY|n z{j*bS+DuAQqq~*GMSB^hSxd2ra^SN%Yqe7G(5VR=ske{Nrh8xSEw41&qu^n`QjzOa z>ECGy6%R-RClkc9tN+rJ?;GLowhdoE>S7oO=*yCd(FOuVtOPLM_C0^xPufTfCf+Ejw&wf~L%p1J`CJ zx7$&3m=EMGO*ce-zTS|7k?}xA zON&;D)ee>{xl2T+Jg)4)cJp=Sc1Wr*!mT~4YC6GJ(5DT@5ESUKgw2&o?UaE^N|(oe z4b4uBB6ERK8RL&%c;QrWn`Cl2X^>fnL8-uf$5>E>L-#f@j{u9a3m@|~rWx!0%F4Bf zy-L?t=dQ1i`DJg@sOhD~m+*YQ_Rgdntp9>_V@8fHpdxI8%``pahFWe5m7vh3?**f+ zb<0i!omE*U`fZAeKeIx&N&nXA^D5FSvaeX>hwGY9vX=B5ri816E!-emK1uZttjhZF z1eQaiou}hl-Y??ktzY9}tk*mbvH0FKeedxjYxU3y3%|}8lv^i<;Ymh9FAJjrL!VOB zDaF{H>ETguTFx(SvecPzy=}W}W$ooajkbqWMug=IGnw~dOC%bPn65xlu z08wv=ItReaM(tK!TLk+f=Qw1o2xyNZOq|x|0tOd21V*wazK8LvtCjA<2u8*oSKxonh1)w(C_qm;GXRJ?9hOb}5r>%HRl}dMJp% zhPRw?D6x8i0-Ja`+21xT%sqtS7|oDta%vs4wsIqkiVhni3*dzVFPkIg^M;-L*SNDcto9q5OE-e77Z>#Ie_$ zXnMf_R1h9uOGc7Aq5AfkBlmpCSdPue2>uI@0Pc+mK}@wI@sM;G_BXJt=4rP_`iP;H z;btC%U6S>!q4n`)$zc+9sM+qeVW~(D$;q$xJ7M2E zXYDE&RkIy#n9FsYV)<9WO5db}<2rn`n5lh(2@dO{A*EBLc4NZ%@vda16|9oFJ!2qd zgSzH;oLsUwJs+w@%?A2oKx@okuMZn$yLUdotcH6-VfRbMGXL(PPk5P7i{`6YY0*%e zt46@?3N8CjtiILvP+=h>nQ!?eD#4mt;AIqDeQHV0$OH1#7cNjQ4bm0Q9;(8}RFoXO z?~+&!IwyS0H>p47l*jxw)z{183RycCb(BMbt=Oc4un3e_gULmTTuSNprwJq`U3_r8 zyWx&Wlo;w2AYIdJK4^0yZOG+LPxIKzMeQ+pI8$NwnCiCKMsUR^JkER3DCNL8_G@)c zn_IQnHysiWB@qT(J&~r!MA#2A0z$e5TOqk`mL)-ZUw9B-W?2DSS4Ys_S+MqC;e*K$ zCWikGux+5U7RR?}faTCyjXT?m5&oep#a+gWu0NPK(oSmZn7B^UW`bPv*k&Cm+(F0l z&WenY$KDh6V&iGHVrn7?%X+IPf`_4Iq@?aFj39qfIleT2<3<%=mRM(2%a*!*gJW;rq2E#kB}+jqnZmecb+D ze{N^rqSOW_+&ALTofxjg6nau!N>f+l!_H-QO)!QR&2jQaB`(3eZ@70|Uc~okYcq_^ z0s$isgo9s|&(P&*#=iQ5#DwBX;2*zm96GQd~9?C zMfT`1c59Q!!GVxFbmav>#e1-}K*!;?6VKpS+qKoaNn)wEtpcJ$ zV!}+NQ;aG3c>blQneK6ZsHxs(}>w zSyNWqXWI)g0&(S6=*m-8UNh>mnklKdd7seZ6K~U8 zayRf463{IGJ{flF&-TCa!M%Q9l(RvMVN7Roe>qSC4(!!{%+uQM_lP;DK`+4gOcOZs z@@pe1FyB>ygS!?9r%)^)qJiyTCvsE|ao2@F9JSTMM{870qHH>fX)D1kxEw(I?m)G? zyzQgyuj6_LHDs~7S{s5xfNKZBXhEE5Bp=ty!b^Q=_N)7UW!0`mpjo|V+2X*Zj+syn z>rizU09CsyZ1;xYjsO+|vd1}%8-AeQ$7Q*u9~;+VGzHD4T7mq19df_l1XCAn5liWR zq$j6X$mDowWqo)2S)`562w$aDRKgcFTWv+W`nEW|P_$PxC_+g)?k6Sv@ z*b{g9W+iVI`pww3s`v!tsu*8?X)daLk5<0_6%bD$)*;EkSbbvj`6v&u zEvxR6&bBAhP3L*^_sJQc#T*cmY!0+p`7`Gd+nT>!GNNHxo3p4D@>1J@uT%5nOZ@o` zYOQq2xDc|Pny3{qEo&_wdBY?&io+7v`n!a$tWEHlqrM^RxBHNB zsDZ}=Z3U92TmXC*JA>%);A=Wt+iKSF()MIXXC%e_Y3lmR%DNm{udN-c>nfPY+gO{8 zC+=*C`}9#O(%AyDez zSN31|FLeEhhMU~qa*xbka*vPNsn_h&A4}|JqgYT^%=qtf&Nw!tC5Qmw(7IFx;b#B+ zs_sjW9V&PC(dgw}RzOW3ty1$|HB!3}@iSArSVy9FGo!K<;cYNM&lS|NKnaP4HWn0K z$c!Pi=L}xYEz{+B{}&|tT@qn}e$i}pjzl(q!p43G!+h#WRjv9(J}Q@n2Gn~)evm$j zO21Fgvh(CyOOJVjO6q6#Bk5c}$y6M_Cfx8o!Tz>J;n9TA+8sngVWC&Y zZA0SesE1(9!l{0O55_(UjW;%odI7_Bvu0-((-@FHDL^O~3ouUrW`T{GgBnDBb+EJ> zGpn+hg{Y!)QlPtRxjD{;27sY|ryUzt5LS>zc@U^tO!wA#3K^c#ekQ0FV z1c2E_Et-1e?-rvh1lu;bA|v&qGqejXeMq~XD;_Au*s;WxH|C<0m^Ux5E_{zTFq-lC z6!K84uj4w6uEk^Tr7naTpF|DC1g$dBqei(0jMil{V#dWGSiy_aXpi|!wV^@}iCb!6 zmu~C-nLRlFQug;~5U61yX7B8qxvE#`i3+>>F%MO}@h2A)fha6_FLLcX8}F|@CtT31 zTkyEPuNCm1d8C6hcYuM}FcxFjXKZh#G{`tnX|$M5%3GHr z@`HiA9ZDgS$!%dt8pyZzq(=vwup>wyA=>z*@cAFBDFuSTd79OHGiLyM13z7pyq2ky z080)$Z8k8ZN`M@+FSZCx_QRvta<9%)iYO)Sb=q_cQjM;C3Dg~Fa= z?eS2(F`drq$O;PfLb=p+P8Th+sU^Qo$@}0|dFcVR%wTcyNW6YiW$U}6Tqus4C$&5w z({`_YxkH$>A%g=uQI?^{aPeuY$wXoFD8icXr8(`A&@bZ&nF;}Z)VRPF{%l}0*Frle zf|_~HNj!U1Afl{pcU$TXF&?d+^C)qVe`hJtNxThoXu|St{c=x)P>Rq&sf)VC2xb3>)RGPzy~D8VFCxfTD)M>R9D!T5Kt=Qq>q7dmO8T; zMLymht;K5@d%cMG6tkUXVSMGi{Zbj%wy7t+gJC3V@Xc9YM-6DS;%A-GtThxC1R>KFKq%rGb zFKGLxgK+!|S3a#vmvlTw;77wudLr%C_lA{uGZiu6o$LGHOTK$8&zuSHFBMX0YCAl8 z!Jy&hd-6~hJ|*FeaCw2cL;(Ufp4S>&;qt70Afu(cSWY!6^_sfVtMeW7cMOv|DfsZQ zo3|HmXM??6_$2ZX=FcS|wikhG&9DC6tD~Czv%@ITGhV6iIsNQu=)Pb1)HGzbUKNms zMZ`pVJdy{_TG%(S2RB@Z)G*+ePd83`b&d2>DrasE@(G+|0_%*OnpdI2Rj?5U-Wf99Je{BrT=^kqpDqD+NA z3-vKsRRNk{RXv+Mo9Xu*3@OOwNe^!qg`@P+p#j^#b{36r)!-p5kS@#o8Y2{oTB|7~ zH3eS#t>#qMSPL^IAdS{Gt!|y^CsmX=8rJwetfQ)F3LO(_pp)pJQ5R9jd`i7|6Bis4 z>e*S7xP2L+Fm<9yMa6WPRPYA1ptM(iS%`>;C@x1Pa5e6UdW~u8EVSJj<8kA%`UH?g z?I`xla>yg3UoF@K;n1<0zcTUi=H}iY4M3$0IcsE(HcPdf#2wEroDfr(rbchHbtdbQVc<@~ z-WS%Lp%R=)v6LU74Oq}&usklfrTa8Zpx!U(@IV+dgTrdSnMGw@jbR_o8r@ETuUa~? zOT_xwtXjq!p)Ko5fO$yuDf62+277;8sVu$3?uB`Tw$9MOG~7_uTdsm3Z~S-7i14du zbbP5RFm&?mvwHe);Xv38XEBP&Y-u2&E3*)EpVTzsQKG5??xa`R@gTD8JnPbr@A>v7 zpjuAtlcEK|_l6^M?w89sXbkE2h;MSmJ+f_T)A=f-nf`XNKS z&qtKxXUZNZ7NI|jbk+YdlVfSc_V84-Cfamu)=Si?=i4 zOLuY954ce=4?k&P9x^QGl`gXmy_4`x_Ki~Rg^Bk_WL;K>)&Qkny{zmBjb2^WOEa{3 z#r%mHz8^(@M<+*hO8zXi@H=T|d z%{|FCj*PZ2d#a~@dkvG*`54MJcHcPgih+3JhU`(xhLwQys`G@w1 z_xN?~piZFC&b}%Buz+i8IX$)K(|Yvp?fk5XmthIq9+96V#VwB@tQ`9%&osTXltivw zbS(y%KqSO1DyH&9;-{88j-|XB@-~QvS+yqHlEXOL*F48xH?{>my++#MyN6peO$#Q0j;)I@n$HfOWx8eRUz!h~By-GMJ6)qU zS5<~)pM0A1xOywci5}>Y^&@LFw7*&9jZ|@!xdG#*FRix!Bw)V)(93&xuo14M7av)<>D^@;9jZF2B%EzRXAGZW%+NqY-mAgXl z*NeNGTT_g>?Zk-c?VpRg!V_hcCKXOWK)ADW+~eXPTVwD*QmZwO{_U%pNScKFmCZ>) zIGx)Grn)@yl*rt>9bbK-tKZ{ngqIz8yc zC0`N@RAYOLb1y%UL&oL`2eqN7^fkIdCOZ=!g)uTe#sT`=KAYT13?^vj2t^1X`s7bRD9-?AuDNsS~8rbyfaLsJ2b>}4$ zM=pVX#bfE-gn;%4m}4Ex!Opm<`)PC*cIk}K#>|L=c8(pVJD&`SyVDpl#D?9mOX;+l zWH-d26AuJT7os=>?d#15)D}Y@4I?Qwj@W2S-L8!!??fd&u<=i~9g!AA3SU!U;}DMj zZCpg!Jn2gDIbz%gRmlc!qNKoai>FgeochZ0#@6?;oHJWl@ ziFZrRWvSaxzl>6>58G!J*4QVPAX>XOM(4fU+wCU89W~!{H#~2e^Zc!|jdFVZTXWGn zLlz+ zo>lO!a%!7NXS|7<{h5PTA7IDjty&r6CSd!fEoZlQUC;?3zXO@v_X!J1YS_CNB#iST zWcgxtXDsvscUdLodV)YHa~#t#yAG`z*uk8Zo~|!bxvOhvnrcTcyu_NRH|>5iq*{&> zk>0p^a$c7I;c`JK{jW1b16?S{5nM|Z?_KdzGoib%@hIAUO#Wi%`037rn4W5LB!p>* zaqbt%Ics#`)>man)7i#YCb`SHF{RLjzHGG1CJJ+34gU$d3qKeL>oz}<&XS`#R~-)R zS2@dY|HKm9<~MszJtO~hBlw|BIu`$@QBxC zA`<6QTwaUlTI&*ew0dzw5vrk&$vkAxL#I#*f0WbcsMtx{zQd{PudVZ6CMwdr4Pdv- zV34mNJ&#eq#Jc~B4~nDmH-AX>P!3giz1l6M5g64OJXVEMCl(Bl8mNYu{_u=VDc-38 z{=J+~mpJa?m&zXmYCwY;JoxLggvCU1&Wt&_UC%~)r1CWDt_a_F#XnWS?!MmGPQqESyNVl7HQMEt`>uprE?9>Rx z(*nugM3e7oGX`p_n#Nt@msuJ5NS9G%0U?2svdfs`i%(kh$7I=h8tegU%bwF>mzhHe zXob~`Df+zc`7sT?hSf@hy}UB;LWGz4nGP>^lcNX9v2!pNHK@d$>{EVx~;{_7CIFB9D_o^drY zUA&m`BHKyd#=+C)gG!lXe`&q>S}_EBHZB8OVCh_Z7{xO%73#7;3bI5A2R7BJ8xugc zK~C9qwiLWJV%=d;yY#3Ia4v=V^w4`8SSH#+x0OapLuLLNu{HShu0#@-z=(=K2G$0P z#NH(Dpk)QI!VaF9dBzQbX4k+CJxZIB4^K<0k;gI=grtpeW>;utK%a0!9Fwz{Naf#^ zD6?An{PG0_07BFQXOqX(mmWAjG6H`+t5(9SJt;kHChv@>5KO|QkW@egM$nKNlY4Pm z_tmo3lc5bBKE_cFJtm%+YQ|-~dCd39LFqA9fN4APdkIz@D5=1kW_FINYN};VjF|EH zuJiOUN&(>}Spz~12v1;i+`d?%&Neg^m`OO@G<;SLsPmj5>;?4*$UZy%C4@HI%4ArL z3#wl4X~1<$y8CihvC_Mvw6(X)bXmdB?fq(f!?Ir90cpr6-Us4vLq}p!C$}WM_v|}{ zQ|Zy25VNw>H_Aq*Oy*C{cuE6lhJP|cW#DqCUF5rH(To?5-c{& z_NWt@Ov)(%kw@(k!-M;0MvN)7b#6Q(AuOc@kNFbQ!_CuK@q0s-ub%;YPx~j3MY~-RZjgUE^J6k8P_t~={!I(F|2=TEG9quy*507VwZeT-G}?D zTD>J&9hr1xteNBiMzOgtEM%t66|zZUk6}5p3Y6;mgxZf>jGzU^#SKzbFprwVw>wwG z=$@K5ot!@s`x;z?XH+2%i1AMOW5(%Z@r%z0Z(EDB=(=8w%iJdTCrv7a?+_X|#VJK+ z%dC=W){jYT&mF}1GV?e>jiFxzlU~~Un8CDas6k+GQ=bXRL!GNXDgA`|LoZ?y_D72+ zoDZ8XK@*zu1YFAk+F1d2W`K5Bb9v&pmiF|ogHaDSc&em6i@`N=Oki!As0XB&CJq5{ z<(UgrK44G0Zgt>prpu7o=;B`2>>EgzE1-vKSKFy1cmAv#;8O5WNEC+|n_X#5l(V;T zj_sU;O)#I(@Ognd2CbDt10%QN`0{=*8|jGZt8r)vY0;irq^hh$Wz2P-x-Ya2C~uWHG+wn4f@<0btCrufuEb&UE)grEXvTF^@^$D)i&@?A=EZh^7}M$rf< zfU#h;@vv6V%^N;-Jw%964ia+HzPl%H><#Lv4S!?VAHB+-xAS@QLB>$_I^uZ(J@)(eZ}9DNF_y!YDO@xNYV*>mi@ z{TwCROX7@+F^9zCE9|+TlC6@!7gj;CcE=@`)qgCa{Wthu@ZUbY(7U^oSQ-k*e7-io zd@~#rHEn8AZu=@HuH_>Peug~-g9V?%^{1Q`=*P-gvL+?C1xgvNbL8iJ22CTyS-kfs z5-ec0l&wn;Kfrz`1P@U#!Z@#0$K3gLqr&Oreh;7Q*1#d&?&sf~PF^sZ@R^qp4FvBj zY46SPVcGaapoaQ6bF4R$+cTN~pdvuOJLgLNm@TN|B{FGf62o@Z-3!{Xa(k* zP})J!!QTb$M8X=r0F!4Q*C@LqV+WPikboFLz8Vd?*AzyP}-ZrS!zBgKjilW?witIn0~N&NJnAH z7FFoC2@5U@Thz|}5Z(KN?FKLyT$4qsTGw68b7y3(MYPMn9cyFs%l+NFQ1I7ZJ^D9Lly!I~!SH>I*Yc5{LBzUOF3lYJz8f8dF7+ zKDFsP`Z<<$P85?To|^@qQ(CJbk(86vqgwGejdR z1#hYtfkAm4ww6RKE~U|5GIc%X&*A(AO9+;*63H(e1QpbmyjZxsRk8m6G4<| z7guiS%_U`IUa4GVMIqPTvR$%8^)^Cg*0maB+#4y%DirR;MRu;yK(e{6>@u#s@0ELv z-%+2>=lA`Of9^Tw`5MpX>-iY3Y(ZlkxLNe4lWPIp^=hV3{bKhET$L`2Bn!vH0h3KT(F9oWn z!c#8g;$SI8AROZ{vqloYTVZ+)f(!1Rq0bz5oDZBV=)GlzFDVqw?H{a3-e27OE$RZ- zokOl@N9Kl0!0}3gVSPn-z8czV^3AO&e{Rqv-2rU}6>4%E9_v(ock}nYJ>lhdEMihr zmKxCMYVIUTvmjRvy~O8?S@C~wL)Ju~^|Y#{ov(Dv26B)ZJV-qy)RyIUT;p09zD_es z*z&Kx3r5!E-ukN2_>D>e6G>yL>;Fg#|0V9OpNsxucN0DZobwr>Tv|)|>rza$APL~~ z3qR5OF18dATu|?qQ-RfEJ8G(phZ#JxAvq51lvD|kzt{K7Wm^c4ddh_gSHBn0rukny zRaqhrMZ9(VH}g~*n^^qQK#U19gi{0p;Gloz;@ZHGxxhn)KS&Bg%!T9NcT%eB%0D%Q z)b3}T={y@URmgH6c)D{te)x#}fIkLb*-`l%1i+ePcTw zX#6Awu*L8wi0@rtP&Z*-4!Z!qTqcpWr9|{DeHhq7M;rlDe{rH#g1riu;Xfp26F!LK z%kS2|r&xeix3u4|TCc^Zm04g2rppL8>0G!{KwoB1XNjxpffvv;*klOCm@}*IwC{{> zMKpRJBF8PQWBj7&rOAzD9omAW~hp6|j}^>^G~ zoUjFz)D)CV*)E~f?)cA?=>CHvC>}~r?YwEdLd1JqD%oyTnpk27)OtXq>6=G7cVV?Z zaDLR(H>Q?9&4#@{fnGia(RtLB%EUJlyH#>knQ&K&a*q=dnq^_y!=QmIO=Ij^;!?hK z6cKh|9*$BDUWu|5vy#)xXCMiFG^`)vUD_hYhZq;+ZI{!$OK+N(!B@(bUSzR?3B@CQ zAu8-dRq@+AJhS1(1qZi4e`6s)KmMEj?;aU^=Fg!2qw>FZLBnCw{9N0)v$kX}Mu-(C zj|GfsNaic^i;6x2Ezae%*=d+uV}!KU?{MYNCe}WLJ1L~%rw9)#_{HXhWzqm~rSLq| ze8&Pk{QVA@QC+_;mUlT!E4L?E_t3rJz|m4$HbwR?`Xr=R-^()9G!2f)aXMJ09wFZe z_@&geu(s}T3;*2dBVKyR!62HA6!HBur=**kOx#uh8B(#ak+s!I-GsGbt`qh%Zdwm#<&s7gak zdXUs-;efryCgtF_GJxuM;iA-$$`N)V3?#c84FZ(d!A;6NCB#}XT zERnx;ogB8j93TAA<;WTUrZz3HNAn^o`SYmeeJ%)%iH2fQ$2?lphSW2}G)xNdLATp$ ztm3;B)o{#fdxAbCA()0Odf)eI&|P)~AX!+AOeD`wdEmc<8?*AcL`~J%ZiS!lN5Q9u z$-mjW*obL>xbje#wh5>OClAJ@B7EsfUt@OuX^?q%fM$ol`MxoH!8#W*8ZumK+-YG^ zp3ec@Kf=Vq1OM43fPP9kXDzaUzEKu*gFMjHyme&I&E}gs?EXi`b;0OA=Myln)Pe0n z5U5xWx{P>7;+6e+%fy8h7@2L4K{g{IfriYC<99VfsM&qTBAG46(9$!|}Z;KnhL<_J*XfWQMH_mbTSKH^4S zKwSy_9ZN5+aW~9RBDefnskrcPJ&PUr#>#a#YwjOrf^hrq#t6(Tjj9-1asUEv94`Q> z?d=EQYSR6drL0QWP)5IUA=^ntHF{ir0F;-0=GOrY0H+feTbA;d|7e#51CMmTLO@%W zhe?Nx9kM9WL>q4QFI|{WiQ|#Y9bxS z9+t0O8S^Yz350=lopV?54#4-R@;)s)qDtBInTJkENys~|jU5?OX&++!18*c7N35FU zsEm7L;G&X%*{ckWo=ZdqE)Gl1c?a2 zy!FNrcanBpP`J=7KW-{P?ghhS*BX=@w=|Pi|yPRNEkEE5jBwQM=Yl6?(OoRj9~^Q(;JE%Tgzcr zu$QF@0Ex!qz8VG9x!RE(-xIdlqcS^K!|B2iYPrV^+QfjfhdofjncI#2dGF;k*pK&Oj=|A%H-l~B%(*Qnb4b1r>73`ZlFtp9^8 zV|(iGGrMX7YM(-C3PKA%c?c5tqYwV)emk5Tb?doTxBfRC=tY`%aEeGT+?7aDF!B0* z^BUqo-Zamv=`)4I#=r?c%I=EWu8H5aP*kN(0IF++QhbE+V&KiRQ45C&JoZp#lHuxB z<`+Wsx2yc~Unw5r1B8Pwp_}qoM}%IK&`!A~1li68B67n)Ew$A#Pj~kg`CP{Ixa&c5 z0dD9?y`O5Ag|*Zkda!>u0YkQd*%_6=U*cmA2&svShV0c4*d6Gc^w(P{n%_X|40nG8 zAA`cD@#HO>;!{fuaW`91eolY?L2%03;#nnX|*U1@dQ+U!oGRpWD*OU-DoQQ!gzht)@b5H*PkNm$-V3_ua=6M8$6i z2)Bk??L6ywIvtenGE_#hB-L>Xy&c6FOU2=d>RcXyuKS(=G(Vk{)!S;5v8 z;?sNQ7Py)z=(Fhi+;L^}Nj1>X_Rv8XiLb?V^D_8fh7K`4zMMYH7*kSlV8U=DtE6Sc z2W{^#1DQnHj-4_aXV7oWz#_mpFZ^b6nWMQtrQjCf&GWlBVw3& z?ZJ^{RoX+Prl5@MkA1$0oxXR3G_@kX%B;1yMSfs92oYkuw19hPnUm)jM&p}x@utYn zT_4$w_?ixvt|_|ea-&=)^i61c8U@bxi@xEXn(`knq;7S8psDe8=!bR_;FNbn~aTiL^OTdvrY0 zHB7Z%A*XAnusGVYn?F@ysnH}RXx;>VJb`3z= zZOu4$zs=I6CT-VLz_x^|AJhE|kG$7;$f!3? zSDLCPym6`#mu>gmwWX+~cbe~*9P){4k&KoSdRzd~NOMFR3s^L_P2^F*VX-;8m0GEi z6>&UtSZG!khG#r)Ii*FhYR-$##I^E*G@bKac=WeCW2(U(=rQo&k-Zn94A7a0ichQg zbCIO4Ei+uoU1rXrbX|{=iw^?52d5WA5GgV{4;Di)pMDmhcCd;?QB+Hm|IUxZ18b6a zGxnj2k1O1v6eFR#^pywhY*(@n6`uSu2p(GT`lHvR>rtWBH-F?Gp#o(DZ~PXt(7FoJ z(E(2I*UWMj`yM{fhE?UJ!=JtNv#qJSv5gX>V&*A$Csa*k-Jx2_Ki{GH>G3ZA9^*2_j$ETh}t3A1$anCJ&rb|@8^vB{Zntah z5t-pSx8=&aZbVELb+X&}4smAf9k%Tu?%kscG687?V*rRALqEl8L{P(pctgw?QE8PQB4T~V2UBkBYp{` zTBc`~p4VgB4Z3|I=w3yaqFV33C9C^)PPzWm{I2}oe*#Ge!S-vl*1|%)w{2^3zr%7b zQ!821dBimWjZY^JpM9VugzkL64)Mk&S)u%+oF)9Ey1iCTD~{wOZ}NtQ*_c?Lz5ncA zhT5|Nwf2rNdqDlFlwM@+P#vYith*_LAB}csmSRC5)CLx73O;f! zk`=F~u>+s0S;l{pg;Xe>TSS5TfSPMAD)6t`IaCsY?Mga-bfCIjb;F zawq|HmXN7w#S$Yr8@7}7@T+2(V9r}Wfhv4dP?rooCJNprs`xPo!u5Kw*J1U!s~2%w zI@E;kf!a?U!^t~0ve#j^Ips@x^p1#RLJ zXQN@!tGM=ta0gT*p(rFf-kzWNCPPg@?mk>$+)xIfqh>IP;|OB9KWjVgsyVI@6)JRG zRqV*ED1rU4uYc>R7e~%|;nD#4HcHqT>^ihx$JBC?K9^rctXo=+zR+CbvD+v2iC&?r zPo)|vZ~KD4HKzn_uI!#(*XuK7cndFkOKMn#oEr(q$Sb8zBm_L=tvy=cnq`c^8L$5) zSs-pzv2Sz}N|?ZTO^gvLu_e=aWu-MNF4-7l^i<~6Ab=8JdbgJwZ$!LS5FT;)^Lku1 zfAqWG#XIZ`ESuL=gok6G1PB(SDTuiF&g&jg;LXJj%=;&qF)%>{H^9V@((N}g&Qj&O ztoQ=mN6u&N#wt?eik9pr>ylrw^@VzVqA&L1iTE>x_IKDjhxT+QVa)HDiagc`9_TQ- zG(5|BH0s=wz>W#GZ7J{Wtzb6&wU97@wA;xd1_yiO$K8Sh) zvSD+$s(4hDW=(g<93u+tC&j4(`_`NOCGJ1)K@a_KRw z$%4H*JUD9eQnX5}8(}aXp@tM-ea0)?Y5}+EGgL&mOFYJ6RE6-=?k&W&oC$+9mM|tO zz0kE_Ib$Q}3IMuO)>81|cIg?;UMAprb+%2Iwyu4Q&D@XL#iL8b7Hp~Pn%uGX?^MnY|aK*ZV(EfT+ZuT;XTrm zPWQMty(+r&kT+LwX+H~UPM3+IOO&j%wRcred^pW4qn`131)aIt7YF5wcyvksq_|o} zw8gf(?yICv^?keD^y}VvNs4M^#2gOc;Ux^yM=KOuNKH1)hrptZ?Qdud+Q6LrUAPY( zdyuJCR<&cT#t@wVx}tcZ=cgVsM<_jhmawjqyTo|oxN~rmU#MwS#!nq)f7@gi2G7l1 zHciwGhJ5~Ka~EspJ(**G4CG^s825N`{`9?jEmD-W9mdSpkt{J0rHdP>CB)Zm>Xm0e zr!4z3cLM>+UIxh{`&4WRTR#7=d!jGu%I{28Fv7tbXCf%@W5mwSWAe)hK`UX!IVH`3 z!h-nalKwoaqDOY5$mcuhi%+*JkII$u`Hbo0wMYWd6eD28|mPCaM%XGZn96P?Ee zXQneeEGQ8B8w~1e35)%txH4RaaaD#z02Vz#8JYP{O}URm!L8Nb*6o)JWp;lTx6TXC z{%#dkGHtQ7;-$T7RMQ71RV;Sw7xjNE9msL4zuEG+@S}mEU9?u$Tj%_d+iPmdN2VdQ zK#R-SXH9BSN|K6WjWdgW8{WyF%N*EnaPrf87!%X^c^oMEP23``Ka-Xn=GZEt^8_w< z>)obrL?S{LrFdyzcOq>_c{jYq*@CrMQ4i_VfItO;W}~#70=*Nz;%D2(bkD-M>Bkns zx{4mYklZG!g4-RmR*kgSxmjGWD40Nze-L@pxVs0jcZWEw*Zvw^OW9UE; zLZxnUa`kG4#_BLI%HA*RS3Nf6w!%Sn&i6B&5Dp>iMaks114_FF6prIgD%^$-58d>=7|vz5^{$Z@isER7+#R2$s9E_OURRbv6-HQp8+l$+BP6Y|w}aIaNf#|# zzn0fPWiUPh0+f5+G)^Z$2Q~Ku$9f-zWlclXRLO-^L*-WE{ua{J#aF>+ut9K8LB#Q> zL{L_`XqM`U%f$EHJ}bUAyYxF9j$IBhwlj5;8()NznwTeTT{A}`*AiMQRwsOYQM#kV zo8fyh7c#~^d*_?r99gBV%SQS-1gy3TCRyx!FI_FaX-KZD5ms9V|M9;R;SfG*4#(H_ zJ|vxJ^aA)pbgqylTk&hS2r^e;iEr3#Eng3Shd;VnRaNjJ`RpJ zqWR^sXMH$4JJHpcu>1gKAx-rJu2Og6<60vf#eef=fNxrJ`}xbxS$Wp%?nw;bqu6~8$8&?w-NM)G5ld2V)fY?(>&@mt34>@ zqumbU?bmC1l+Z)2U0a6)u^JH{Zr)X2Sz=)TH0!(DECP+f$?77W{Mi z_g3tC#-}Qd7*hcK_a1K>QREiBsr>;cvQ;A1rgwB+c%04Y4ttS%$i)$Sc!0`HJ+=?r zwGr+359|354;xgE{dfR*`dQSQ(j1y*sB^gGA1pEMKd=&Dk6G{!DuEphF!>l;;8|~( zPBK)XRz>FfkOx4qqaOV^D#B4Rt*h@ez=k=VyEhBQ7>AIZjk`edZ* zK6=@%NJ%yT9oWzK&~_&jVc3@2J!U1g=Cqy1GFt$!W zBu7(#L^`<;U_Z-vuaFQIVL9`>rof*WXWRRp*)6~xBrlBKF)>B1U=f6?7di~?5Opcy z&NmXjjSiKke;=!!%O?fN=Ma4lZM?qjC8@beU6;3B^syI8x`1a9!q182aC>TXgADNt z_JsG1_N81sDui05*u)2X(TQUzO27M@A9uw2q8#~~fo;Z)ey4UP4 zc4P@*j$Sx{s)99!q+r7%4*kZF(iWiOGsFx4&Z6r89G)JV@o|L3op{id1WIr#Fw&L; z$Qdl)D}~gmS5BweJDl;M>8sF@pVrLG{y)avcYgzQsZ$`(!TL^9Bh>e{aD3+p@6baD z;fv10e4c783fWF+vSZ(i!fo9)~L9C;LK8Cu3YPCem&^6PbECLhSKl zmtEN7H#&WXLd39E&hL#ms;Tc6`}^fv9aE&)v?0CO206|>l_^5__@6jaR7@&bBSs98 zlW+6YeSa4~Pf?Q`3rKbLUm zZA@pfUtdrr8MUmwIu!mTJy)_xwV|=L+kdVLue<#bE)-?r6zI6eza%Xjkti}5d0IND z9>CCtb)8|Y{^3WLAN>Dyl-c{oVMh@7wj5TTg?9}JEI%3EfnR}hxZa&Hg%`X?nxl^G&fBRac0uWI{c*Vc}sYsAoIi9)aM=Sx(=>h1KY zQ2IA@5Q}*=MC=Dq*>rmmWJRD`BH2yMC=Um4JaoqT@FyQ)wk%YlG-aYLMLdZ9FAtDB(A9AQHkP%b5 zi0{zN;MlcdXepF>(_uNP(g@fT4rPAj2NYCAm+b;)DbGZ#PtJz~b zI$cQ$u-9eGZ*@DE=gsNF&}>FDG2C)Gad&;b@#Y0$!|W3O9f3yVY~JK$kl@r1`NkbN zAw;i0kqgSGbVTFOz5a%=?h~^vb8@0RN0Y^nA)W0 zKfqfp+{eWQNDU-hqCza2Oy0vRL}^wlbi~_6%f4~~r1=aqbHueK&{!%ybY|ZIiZ{qx z-tlFohM(Ncf$xQu)ayz#<+bHesZ|f0D<_$5FhF4lw>^I7xhO3yV+u$+HE_zZg$*MI zh`Q!ZHkBexM2s-p?RNO#}pat@~J71$kNh_IA?owc1fu|an+tIbF1NPz+3ia z;Q9fegg81vwX4JcdRP^;BTyI;u^%1mlhdA!K-`Upn`Cn4jR`sASLj4ZPw?THq6HPU zSoTWFEu9B%Jzktok|$;TeERuE;2fiHaAx;{AM?0!Ny$*qF(ckpT;fpp6USYZZThFt zF0Z+Yh@8sYAC*=T?IodpGi9{o(atj80l*D40b5Pvj$CsiRbu!AWme5mlO_K)j6JXM%!mGfeT6v?a31jvx61HhNM)V=mot)#R_iM<)?}} z*=@nhKv5F>zud{6^w-mf6X0m5m9f{o@8{@Xr(* zEPK4r%BjV{hY?MJC5>(N_xujc06p@sMVklW_v;70SIaJao^NPG^>pwb&&)efb+Aup z{+WgMTr*@XtI3T;ct}qUC<9L8|C*AV4x_HWeJB1E^XT3L*r*}oFZ?x|f- z;?DCIp~WEN_LY(BkFishC>*FoF#QBe6pnWN1emG%#Jpl6pT6}E3GNl^UWNWw#tZ{U zU}__RNEATS&J52^nvvv_SK)7W>RNAg+WVa&~~*h9c(4JdoK z7y1o1ei*#F;MN!Ou!OcJ66RCP#@3KjRGyemU5<{Opg_6Yx+0guTI*Gl_8AL?Ug_o_ zJwGy21-f@>+mwlzapa7L#Mc#|G<0nlD-(K|If zqIn<7p3OF>p7>BiRZ#U`X$yuHe#+H~NheSCmZ6*2{ZqRA7h8i1fEM;_;lrCO+KPU~ z28y=t%C^gkpUVaWIDIt&oID%mxXLZMXE!5WSd{q;0NoPOgsky-TQ7 z0M3=9{l^4qiUa;1l6db3QFR&tbr(Y)gOL^>m(>Ifjkx zz4HXu&6_L`bmv#41!jhr*jr9vCGzGBO;KMjzey9ZVtro)vqN<$p2JKd!S`_pmgj`R znMK01Y1A7Ml7Ub}4}k}+w}%~X%EK^0j@{?X+)T}8NZb3(CGFo%rc)KSH&atZSHA9U zg!%}5IteBSzUV0vSG(oIu8wKPQ9JyjNQ}ntjPLY_U;+XpVam7i+y2W`)EO6WVdH?dv&m@Juwzjm~EHE!>4# z7M@3=tpaPn`h%C#jUXA~qU5)dAL0A>xrR2qX;Vh+*y6-QB&StOo-#Kx6iw zx?}wM!24q(At9Se=tu#P^vCI}j}73JXfmC$=;rm{xyJo`DNCo-1S3&qju`p{a9D0p z&H3^6AVFe~z$*YDs3Tn7xCWoYtx(M%RP#_;kRhI1V?EgXiH=tC?MI9Myyc96e}CcF z(PvOvd#=ydma@Bqr0b9@Pd+{0l5C-`6R%~WsF?O%R_kdssa&*VdVVCLPZ0mnAek4x zl%ArP98`tXZQ~r#9YtpwdR|uLHryDo54(0chQp!A-(s}NOG2YEz9VzA2cOpcZB(_U z(b3*IEjX{p-k-1AO}DfruwmF~8duXaX*hCq(IT?Y=~q>9*wxjR;eWTYS$J*{94EC` zM8FInkVqWVo=;4{2osX71z{8h(>dJz7xwK>EZah?DntsF&)R5~UFe?64W(*BtjJaF zslpofRoC4{lPR)c$-#L$wuZJzW7o4gu1?05%ufMV2Lx*GI6zjFMtDSm?KPy&X|%`Q zT)ULJH8=FmzsT}9x*^D^)P%0D^5}+#_BW&_$f`DA+f`c&*-#p^(=DM}pq=le<(YJ% z#r{EDgMAIG#=ceiku}o9K5y7@g-!T=zV7DyBFM4(>R&3 z4}0B94_h5p%(`3oz!KtEFo_NN$iJ*TPos)#&3!2-#o!eS*8+?C!UUa#9p?E;Vxho= zu#bQb`cU~+YF+Iq6s!iRzEF+2jI;WZf_JaJEK*n>Q0$m$S*2pL7VeN)X)YS7b=$Zp z*iOT=Eo+IjDp0FJ7|eb_=c;O1 zN-$5n*C?yHYje6T{qg5Q&`T;N#7nV5<920`~+2{MnSBBuJPL-xF`+$m(; zc&AbJKM0^zP)i+`hENy29hB#-C;nYkqKZM4vXZ_ORBP zTQPr`bV3~zg|Js$SFvRmfq*~|^+!|3W|xQS!TqACnh(FHl0R}*h^)34GT zPYnXxS=Zee!fmx1wLWmL{I}J>d4#qE)>}aJ+`nkH%u_KcyFx3(d;)sO?}{$V#Cxcv zQ}F07zCzVO?mm+j;pz~uuDRukEbtRmsP<_fC&e`3!<&~`ofDnjQaO5 zClzh0l~^#hy`NehD%X`l`JZM|+Z~QZdeXH1hDkb{Ahg>cZ^V7V%3a=|`4U>;lse0sG-$vKey+v+Tc2JBBe{5TDQ@;?iv$le$D4wujTMsnWSXQq+uivrKY;imy*4R)@y+$ z&ChSZ7A1$o2Ol+dyKmQC{wWLlM4rf5k|O>@z4Z$7(t!w4m0~0b@^|+U+|80Xzx+ve zatmNxejaBvdLNJKRl1;qDRhzVJ{Z{YzV}ntIhBDra-`cjtziv&an!1*?ok){SzhSY zeuRKX?Kh?8z^noGX#V2^+p-4#n?~{-p|7)NTL5ZyhHkdIpKJk26C%4B?b}m3nRw|r z%vM^rjhgS!e)%bU(i(p!S~=yDzZT!zRjLY|@=;W`!Jsqji(g>k#M&85RH90}5-g@# ztRD-RAAT?bX{=k4}?7nh@|*G2X;i286;I!NWczWx`8 zFqn6TF3c>}j+Lw9bh_+Y;8ttpVe=I0(Ztar2Ln6Qc9pR6BuCl6>TB&^dt0Jj-m%|) zMf$y7m zyO9{8a~(VVG&v|K^P$j(3r2RM^W_mz4D9y5NBU#UN63K)6)-c>+_9t3A@-O zKA~Jj z?GN_2n}$a=R47Fz4!(M*^-)SF4t0N^aAk2zlMiSBJiT%7l6m+hWI~ht-$XcOl?td0 zUdMJ$bH~^mGq_lnp2Fk#^wROdOTW(LR19}`9;<8Loc3(GRLQk4bSkVQRDySbK3kQw zH@3MExZH3)WpUe;Ov^J`oKzG^AI(L{m%k#VbfA34BeW7h(yI(?S$amH@e3LpF{9IA zK250mj6S0mD^>^TQ}074MC+&Lm}`#}D*v})vAZ-NN9fd?^JP_kSGo-gIh}l)Q_lvz zdpo0gYv;rE*1lMwElfgJbZ)-s(u7}zKS6S9v+*Dsjoxskhaxs7vf<;hAq-6;LA z>EQZ zc|Q z5h6Vrxwje4EU0}5_7mY+*_{8Zkp1NuHBf}8WMu-WMxS;uTw&v#ZNuzq++7I&^)Gj> z&6!Yvi3`5U9F+PuwtUgNsI;ALB+*Bu)fmrV52s3*5%MpyTr+xGdYrG_adja(C3Hgd zvzYRn>ZeQ7)1J=*Pv@CvsL)P2X~>S#VV={Nf<^h>cxOr30!o0+ z1${dbmWWZb^(__R5CtWMfu=qzzJKhj$FHBPC$M1VA)*bd?-ZRsmui3R$vmt0nYNB! z=fH+i6c^((UPc4i)}@nXiX`jHBJ+OY;8%^I@UHN^;cK*F&i5t$n3V?W@$eL&t^(<6 zsavA?ONKnQE-D$ne9lq6uwSr7R9WkRe=75EviDTQ1S4o-tSxTVjaDJ|>P}K*QrbxU zdc$KwwK{+kG(Gon$PKMYuj@$3W(EL0$=mqvVpu)zHP5PiUl0#XoLANVm|U4Fnf3DJ z%XSZatfEM*J|VBquaP=2Uk;S_FQf%t)Q}c*zd%6Kf}r2U_3k1(=I72C>ZL(NM54DN zP6wX9b;`5i>(z6hcq|V-0zdxTsx_1X2HnRkRh?1b*Cuu1APpw1a`Slpe-Q4|=9N z8fhD?`bT6~>5K+ch7qC;%vb&l=t=d{f%>bD;`&HXy%c##H|0PVAHqlek~1-1^OkfT zIh%B9jeK*2e^cC;{sb4gpi8S7noqbIM(e~9${pH?FK0$9A)%G7#x;E8InQ&|^Ax1L zozruvq2Hb*B0OH$(&yg?U;ZAogv?P6UuG_~4>LyLC`@brXAItCW@U~4jDDW_Hlq|p zaZH)KzvOe_Ib;${MYhpxVV^oB$y zPmY#CQ$dNpIFd?&fN~U@%~4*zGKgz~fe-hj4ziBl8dl=7Zli{e zZUiEi_KVVRU?2B&Y&DorRXrXD-Iia&LfYV23zCnWtB&RBvQdYw@+UQaL_-6Pr<3NF zO30+ekI5@8UWECKOY452Bbn7#&n;C0C!?NspPeXd< z)5Q@-y8hwGsm|M$Z~Zv~fqIs{@m=*E-u>oE0<@tve5)Qo^-W?>>~fKL@Tx7ufKxl? zOJn0z7cI>v&cte>Qo`kLvwmIL*^7J&pP}U;0&NY)oGVS%+EgTauvFxN>U4U!po`f0 zul-?@z}9lxTs6OTf zghfAPZ^F(8weR7CB3^acT_inO+Jt=0fQ{_VUYk&;zj%!s=R5vP+)HD-?h9M;BC1HH zWW}X;Z4?o9c8BArUveG3k3QGyVuy{^G`r~Hhpbd8uJy9}p`*f~SvBHlJ3L_wT~1)>~t+j#J7 z%K3!s49*xOA;KB6VC3}xHe^^8o^`;gsdhOLQkA@2_#!ULj;VJv?Xh=N)0eay%ml{u zu`>>wjYsgh!m5wIs@8&_sj;LC5+Y!i`;(Q|R-hE_>PJ0$KL-%{{(dvrG`OJd$s65G ziaDp1nFpQUXeY@hV(Frmx_wUE;C5kopIeuOnAT+G7c13?01rFRqbc`-e`bGfK^{A! zYgrd&+%>{D?nyS=Khg)+&veiotZ#>8E)uirA=|r>75k%u$WI$=1j z=SaJTLv4Oln(uZ45|0)l6^J=iWfkU**{%!(9j&s|uL|pW{T`DyU1cV|Q>v&qM@(F6 z-$9(Gh+L!JI7i?jD!VOqv4>=aN4m01aVcEC{_)|b>qa8;?x(}VkE1SMed`AQcJ25l zxYxUx_SRNNy=Anw75jR|qx3PWS^=m;y-h1atY37)6Oqu1|d zdTuv0R=28qKkU{Vt(n>CKj7s+NtOo|hYKJWue6rX)Sbdy?b_lFIvJJFr&0M{*z&w- z-HhC=7N%-)MKE3W~X9&%l3?z3)MR1X%2tAJ|sMPc&a& z%V9R~*oUd2+)`zP`(3$BQ$|sA)>F#QiZM&No#zFEXSc<3SL_1k6+(=(@YkM`-fa!y zkOC>;6WI#Yv^M<1D`|_eGzYRRSd}yComE9@a3+L@eEwL|FX(ZvRS(hy3l= zx}RG^>YzdbqrL~eYn4Oi_<6}@O=w>7Dygy_6QroZ05k*}UIl{Jw$?-TPJJ;j5qF<) z7k9`Fp>-K3>wj_&Lv<}^ZjHo@sF11xG77De{Y5E@gk)EpeP0zs^7|dXCM^MVl^n;j z`{CPF);m|}8_ymK7is>qG=B`VUO^(keGVUu%x)&<$_yQ7LcGziUtCFTleR+IYP9+! z(EaHJ4JWn9EFdkNdwq(D6K0dEz5V=qYIdUU2FE9jnATbSGrWj8F%iyO9VdrpwPOL% zV$pd@xf8+TcM`PB8cJQUO9wrP2Xuu!Q-cN(t+A^ryK>EDBGl;9e;{E?NRx%L-3M=AF7dt=uk zw%D6KhEJ>RpCz3Rj!KgcupOETI?Fq+GGUuBFg^NSQn(N1^~4! zCYx(ruQ0P(Ml06vxk2lr;Nry%u0%rhlY7bSg)oxe;B(za$c6Ko;o3^ZsyXnHt~;iK zLK+3OJyQL7_04{}Gi{_Dk$_OrCwkC1^X}2icRu#dv(vXC5~xVe&BxB2pCruh&RzVd zfEn`<-M_+VWLtHiwR}g@ehpsH-DX)SX4q$sFL3pGYA^IrlDNfF0xTQM-(>?gSpVhH zr8)0w1T_SQ+IC!GMhMKaANrghPrs*p^5Ebbi@Vn870Mju5M;W!j*pC#HVp zdj@v=g}wjV{*-ei?QM=9;kPXGn>Z9lIekPKWpXU+pI@5Xd#!7iJ7MMV!@%)!($v;` zy%D|OLYRcOL&=q!4*2=&Y196UVKa^iG>Kc>J7!~c9|d!cny3@buYjZ}sDbf`xtZHT z`X_NPqp|eO?ORd~7o~qdfMzDpECITK%jcAU&;43Fws1l{jTuq*Q=G*vrTg(4JQK;V z>f$%KdcjQH!m3B6XC(Wohp0A?P(Gt96K%2Edx2GEWGBkMr+uxen&R&+KP^p`he<^9 ztdwe*Z3kA0-G1GwL6pkwzp_(4QtXATVsUunl^1ENG0^kvvAxfpbzgcP`{la#E0zU| z7@Dl$)8XecG7+L9y#pDmF5!$o=PLNa5=5o#;rxaXR06)<=R=!zig~7g?fh9_mHl~r zP?Vh>H!aGhZs2r`B#%z+2v+_;0~`|n1q!thUc8wiCi!H0U++s=!C__sIzC9}y6tG} z`f;1NJ}3VfR|E4q4CmfXyGWoX4JH*TUb=|Y>kP#egRSgi!zCw3byB)hHf_+M;MEIp3CLK(eu)(EnS=2sF~6fPsiiE!xzBwrEM3EDvN73yL)5LA^#<{HmvRRh7B6m6@IYFG=)L z%56y(ckdNtjO6#tVj?;-#W!Dd?%B*n)vo=Rb{Lj2i=|@XO8W{s_>tBn*GPtB-ds;h zE!wUn2MSsFFFsN^)PII=rks;JGhJKLn62w-a*YkEhyS-y?9nKn_|poJlrKG3Vqhkzf1uTXg9YrC+2qO!As$sAZsa zpN$B1+xCois0lvowf0k3Rg=N3SIm|s-CZT*YjEwfdy}W`g{;)SJl9zy3z%y+5NIu; znt4)l<^$;VrEu?vaZz~p4t}KXR5&5^eYT?IoG+)IoI;LmUG91 zCc4P1QrbmyMmVB*EXuWng)9?PWuSw6qj$jjJF`UGxpnPe$apFF4i{m{wuBI)gN^v#k+?|S87D<6x@wNNh~-`yC4!wMJvfDXmN%dmU{NwbyYG zlCk{iD$f@bG*3v094=_N#vz#5Rx_TcVPIk8SpM|UX+;hb9`WI!VZ}_2{zQ!^WcBI> zb}yZPM{=o3mX<+bYDK-47ChAk_#1-YVS-_qOd@jaXNg`^7(aO$e-8I(L&7_~gikrvP zA@XlcwdpG=pS){g+GGX+o=55Gbz9H&rzMp@Qat=u_YQmy>##$SsvsT^(n#6%2a|u` zn{f{x5Pe&FTwS;Ovu^KXy!{6$R;5vOFlUuQrpAxi&Dy=*nDKoZE{PEquo?={7dbEQ zy!;bgR2pEY>H55|S1c@4vIex`<1IXVg?KypCQ}q$6jKz-z$p-S1alwU4?mFVfAqn` zpShPm%LNihR-AVaQ6f7ZV0`4Do)>eQe21pN!$*j=jiW#&0M4Azpc%K@I8C9R5`?Q& zp|!Rw^wBs<0sIex^-@vYOSwe&OHs^MX&uS6S0^aZ#D5RTK;{ui3FG$azldEilo~O8 zlNnsuKiWhbH1_{g$o415q#%OFI4m?=|0BPBU%El~w>}@quy1`Eak+cL6ki!htX=Id zcISHdsj_ZutYFV?A*ZXCi^9*-H1&S+AN~i809?8<@{(R&l`r9pSsrXF>RQpTBnB#i z6YKObq*3F$msndp=WAsf;dft_F@I6Xif;P|Fs(=3Wr6SX`G_c>7BKQrx?t~73PYpA zDbVm60K%m6Fx-Edr>xU$=2Dl96Q&(yvGI5J=E|W)>DMe9EaGnQ1uZ&xiBp(|6NCHT z^J}>U2K&_U^UFSA|5gyyr+&x!3Plsj0s6qw{k(VmjhRf)yOS)cw}Vw;AIFIqBsbNd z^dD#)6H?X)6vzzb%k_}!NOdu2?%M=@nP>*{NZz9yzrIbEqzyeR>UEk+U1pN9>AdS5 zmVAnlpN@KB2w7e7>h#sI)g-D<_+XK`5PcYGgYd@vA%v@$dH9-JG2)iXEFnwrCn0mK zx68&w7sN1^_>2&FtwJyO^#D! zUsMT7J5J1n@09Y(ndxJ)P`jFClJ^c$8WG#{!6m*J`hLUvhVtzW)d*EG_gBlWUsh_O zwynu?ww)6N+gGw3%iBhXm(?Lro4c&OTtVMf5ajb&OEFJ2hl}le5Oc05qSXoC9SggL zSv~&8ez72aaI*3Il~^@HVzU&vrXa6vu(>&s7+)C^u<6-0(v1EJHR*Y@fK6?##KjW3 zLSK_gl2-ga2O}4eO|}KZBELtJ)7Y9wl2&MY@q5>k;G6pwZ&6JymK|Ik^@@N=&-PMo zYO9Oh(&j4e4q+F4Omu!WH^LKDx!IK)RL*8Z99F75d7ML(C#iHkZy5040nKibJE5EuIefj~^~N<=AK?wc)*(PTmL3Y$*X+;K)}1F`nl#QBzwHxOYLwdkqN zh{+wu=ADKE**r)em|(fuy01|=-*a=va4M5ASc2Un`@RO7Qh;`Qd1I>jMdN%i-tEq# zn#5F5<#fysuaeiQq3WZvG7iGic*C6f!fhdY9gRd&n__+k`ydSu@|lG08beV6T5S=U@U+qNX9^IUO8-r|j# zP%*G+;h|7aQl-Ov9*0WUlj)S)9-DQ5?wedLcX1po+e^;n9-k>YN+0-Iw}{sp5n=*# zM9MS~VQE>rHLY|}gX#_q<-yQOvQHpT9nTbihjF;fmF8y5W7eq2u)KN~REhPv1+$)M z4j;<8fTyr@(pmGBmPYXP3(U;i`k>i5SEP#6Mx$}6f%cq{@+EDd7+=D79+zygENjjZ-+3ksxD3_{z!u%zRrfa(!5gS&ChLT^5sj-u&PsO`8)N%Wxl5CS}G3c(;Z zQzi1tOq~Gvh|T9B#(N2mX)t$bL$o3d8G=2R=%l^}mLyr1vG~mDmq8<~r7igPk*|{# z!VEc-hFKP@EUH3>v&mA{nPdq%{9?4LJPU&v>TQ*D=9Qk7IgOeDg1FuS0}SiQ#C_fm z0%;}a=x0`B{^NI8qDfz|&IKWI8daBwTU-wy&423lJ4MlAu|o9`5m)I-C^jqoDp&5GT8u z^d3R!blW_i=4riC%#x3bi%l>rtwqh( z=k%1IIc?n1YDLT|aJoiy8$4v0gb&&SYd=({jqyE9D5Ldzx_Q3q_alb}sv{04u8p4b zly3p50A!b(*Bo8B>c4GD|8I5>yUEut){--O&?j!RWV>e8C z3aj_V_s0W>aF#m?!YsC8+IR?UeY$*6QQ65_%gTu2>qLlJ^rD=a+_+xJ0Hc4kjfcqi zI0kJWKpg(^2Dv8%%sZNT@q~j2@hiYqU`4X?9p(oR;Q}2MRn_SMUP*Us5Ru^f+7%j!hD)nZ(k{Dl`!BgrUV?yciiq$v!D9J9 zQEh&-5&mM|lm31eIMhGsMt5#Nq(4Vx|DL~9UqC;>PDmX`YEnU~WfQaP&;#a$LGL;( zCJKsldH}G|5eX^tC#Y_ddOVG%`m!7fLhHtd!^VyK*>DX+G)6i{>Iu^6n^xYo(y8*E zdWs=k6(v;f?A!iXjIxwZ_urw9DD4b&^{@bGXtAJ-YkvaJx!yZ3=O??Wue3Q(=Q(7ABIj<*xD%&7 zvMk`@1GapBL6JO8>W>+I1WMp;B*|TiiFFb0Fh<0vc^uGhHvEmn``y7#48jAp=NT51 z!4)*72=yx|FXH=b{axB46D~HcDVU9B4AFK8gW1?@8gD_rb%hgu86kq?Q9aesMZ$xG zw(h<2*e9s@I|ms%l9YVElN3&Vm=IE(&s8EXv|XF7S+81ds?eHaWE9vM&FZ=p&RR$%3%s%OUBYi$By= z1Q?H++wtlbE&JVoo$e9g5IG123Bmo}0Yg*#U@!MpI^19c5uJIw;wGkTq6oK63vQFO z7l`mbI0t-?otuTuxumgqy6e)G)I31y0ux=A;^0|O{`}d9?nxx+5dR?{)%puU{{fVY zkGS#^ zt4AfmsSKsxsk{q%r*cv6IbhB;hCt9MtyzrqkCXCQm+kB41F|V|zox{1?;rc?Cp|y# z6X+@_-%&nyf4&;J5_Iu*#&B~9B~9uPaJ&va?+!GbBgMU=Y)Q<0j(XCz1YOm?UsK%$ zK0Usj*PR`-`2f`VVFI7L>Ut z#HCK}I4V`e(Z@0DZEH)y_7BHd?|cEJU%CmuOuE3*$`iA5U@` zrvL7=t@cqv?qdr2Sg+0KZX7Z&nsr171+vx@QbLa zyxK_m;Y2If&!Mjp*h+Y$dTf!b(Uu1zd6x6HX=8ICYW5Gxjd&!ryPXOu7C1zQiy@`Y z^x4)xVv>=SC**B}T<>1(gvN*`*JjaM^@a|sD@qT_g@>;UzsI;6rrE83_S@gmd)h9x z&})ro#9r1{XP3-&8CM)$-26W88>IN;48QLI)e5BDjFy;m$=(*I!4gIe6(odAE&q zZljJ$L@@Qq1rge_rI)ze%Ehw?Le`aBeFI|fB?fP4f}SKjg}T zM|l*>96)n>Gp1OrI1vV`nZpv4jB0xuTBZp+VX0JAQs%Vx;DtipXpfJN5zc#zvT8LXFo2 ze7-0kKMCq`%aVnow^{X0%)+$B1m3+*3&IA8SX+*I%PF;i)wR?h42}zGFaDJI0%MHp z&Vs_&KUQ@$Z@3yorMa|IB)K==@0{eCKZ|%wIggY@bPLssIan%Jt;a^v*3USfRe;!P zpPiGN%gi@#5LFMu6uRG^f{<`l-yASAc!q8x&hNPJUp?TlYq2C5vw1N{Tgz|&)^!ed zao9*TF4(?<;hFnxH=5O|XKpQG+sfPbwPjHjtGe9#llAO3S?uf#kxd&$2$enu&g0rR zx`(t6VmSLSYGjEhvb;RZby<|yY*lpV1jWS@Pf}9+sW6iPwB^BPGAQdv?t)aAJxx7> zxqfHw*^10skwf}n1zOLlS}y3gUKf<~Ymivn2(2~tvRGzH5=G5_vd&IbFA^VvheQmk8JgO#ZaxW_M4=2e(Eh@3W=ZZXI*hBgkKPk>^}GXoU&0zkVPr&i2P zGw<;=1--P7#kU6IOHi8!S2?;ogUpkVZeKtQE z$DhL|sSxY6-HZ)tqy_z-wjq9x2#4WD^mJwl-H|al^n;Djb&0qb>tb}Z|1yk9ke&P2`hsVB9FPW1Il+-Q(bg$F{eC$g1$lj8`(Y=M zW(FTK)dYvu%RQZoyUJoOEuB>(qAGUeAPZJ)S_7E!@M)DXYzthoel-=}$wm(*J zMP@V8C`m0tZLC%e7`yk0+nuU9+umtxx(o~FA!{uyglG5ZC=Bn1}QmR~i-Wo_oGd8I0sLq(7 z2Syn6DxED73$?+KPmOq4z;hkALmFm7NZ#?7X>ks9Pd%u1)?NH|w)xtnY<%kZbf!>+ zlKI$Z_jJ#E{e#1TI*20w5zdd}Kxp^glT*Mjyt;)ne33Bef{GP%XNnVyh{#zuWzisN z&6SYEy&)2txv^M-bSitL8)u7^v*{F2)2<~IdG-qR4Uk$6_vXyCnEsS;cs}S_P<(YL!npnNYxIE;Vqm8J7+c$R#3`51kt_$jW@J;IbEW|$YRoaNZ7q-;D zEV3%O;Tl$+lesJhF)KB;=1(cN>JxvEaL&CYM=005M1>aQ?{fzJu{G{!90T~RnG&#~ zr5*Bec_aHG#lIWT((B^q=g)Jt&Ir?4{Vboc}I z4EZ`oe5kBk+(@niL&BFnTn+eEYhB4_UUVb7zvb%MdUCfLCR*%iQgjx^E3LL-Hm{`} z@^fQRzD9SB=d2%j%JtFqcBouecV5G&ah;;e(*!kUigPlp!?w*%*YM|9XP?Zu2-=-G zFfuZpW`Oavz9Sa&1;;USdzXs!1Tac7GJt;9_W_U;isLiLT#luYSjt^eEu7A^o8NIE zf1(#Ifj}i(>{O%{&$~G4ej?HNY2Xht@*&-=#8ZCE0lK92ulgT#lr_AP2-O;qU?yuQk<{b4>C7%2@B5*NSIpeO-*%SZy&kxP}acl?GA~*l*rw6~|(HN}x=(m+C{NH~9zWmC- zB7gxD0*bT$=XiI8)Ggo405ZVhU;W>cvAf@o^Q){`%i^**uvJrS$$*tLC4v8Yl-Jel zDgHaa_T5D4Ozd?&EwX#cD+0S?f3Ug`h$)VZe74w>5&^Oezt@!n2fQL#VC)nus$ zHeb`0;8Z*<{NnO`CE= z%r=?5CgA_QC#iRbnl&9;3qW{bh3W7e4@v7y8jNYKWO2M;JFnZ$S4IfU{PCgT5zr(Zk!1_fO$j~Z{w{I*FOb}nC5fwkBrz772f>MNeRvp)Y% zF+2sLZ&SXw+0r5=b%=tE{O@C`?7$_MrXX_1^+yM!u2s$_LFIM!$@)Ztht>DmV#AEs zJds^GBWIz8knl~9d`(a}}a0Qg;a#aeI9;>kI^s?{$gi!&|OTTas(t`1SukOXiuS7s%jm z;Yo9herGUaX`S1Zh36abCF~eEYxyxm#0Gdi+R6KjV?VV}shQ%jZbwwmbzdYMIjDgB zzpnC$S?V@!9kr0N>rS6s#;}-q%%Hl>^wpfPcHx@^q+e6sp30YoS)tx3NSekPY^AWe z(j$G~q*IWcBnJhZE=Ax8>~Z=0k(N{;FXo_~ot?{5dW4zouVTtRN(uUu_|*cChq%Hc`00 zQ_bf3l&yl5rR9OU52pI`Jrln1pyDKV)oXgs@IC{+G`_@8Crph%y84c0Q1FMf*BOlW z0cxqr(HK z5AK|s{;_fV{R{stX)^T9`=qxdWf}>76;stjJBF)ovWTk$iI%f^_+p=OD`o|pdG6h2 z+Sv0NJw8F{ZEe;m@;OeA3=(X;$^&9JBLh&G7yn(0uoiFqt;_$8`)Q?wlG(BoOm7*O zxyv_q_O0LrK?Zy`+(@hIQ)O6-GVLYJt8mGRq)WYoIA|vF7L+@OB7jnct|#S;{|tf0SQQApQe^{yO8&4=EZD1ZpW70MrZr`_oT*G&cu+ z|33v_&@%&3V4r{f=%njfAQR+2GrTrcf86mOZn(RHdbp6ce+hV9unKZn@Z5hX49z@3 zv4nv3rbuso6o^V3cldYWXIWa545$D6vC?k<_3q!fIVo;Ine`voMwlz+AmA)^HlzhdHlZC{<1O{toV+t_br*{MYxougl8HZpdHy zJGvqPfJ9|hf2W5Ie~X9+k(d7a3y&$_d9vAmrm&{Kj*tINagX9+Vg958==*}`1zjhg zZv@}!<-YZF|7TYcoRk8`?CweKQ&FkET!xHISL3$YkAUp27z!{b`(J-O*Yk4qCooAq zsPfU)uKb#itlOJsq>5%ABUIzJet;hW)sf+#mPasB@yIVRYN{&Ex@}UiB)~OwZ2}ez5H377L(lJ;R)1r8Gp0;A zXOwgGR-D4k-vdl@A*^2soPD;QVS+g7FL0RZAt&>7!6Pp~up~iohmsEQoXzk)IHBTyPeI1P38e#;A{Y?x{n@EQ&kDK@D>5CTN?db9Y_&Jr-_x!VzI>^o zS8*?WqqXN7?+Jcd*bc}ix8TygCDOe))D^G+I+(j-t&8C!4em!cFBkmfQmH*h{m$+o zu;vf4xqR~Dd$Y5%#p(WyC212k>g2{fJ&hME;VYXhHIRBoQ2V=@cY_7O2t`5o@d3X& zZy;XzZTm=!<&7t;7X-bVN#}cavY~rg#5b07MEK7sq|2mKJAdZl20HD59j4kk4CQ24*8~3s`0Pd&jr^f49!;9Hqy=)-51UsJI6sQau^+ zLNBYH{8)?P&;W( zsBSkPX1qG&2Bk>8%`o+Tapr-t067TK9{X5Yu76GU$j2`^-Ini&d$Wj7O6+I+-k=XSo`RnM66t& z!}peC=$`M70y|CC1y`dOYE$hB)$K0J^=fTFH%AHXS>;3{XFZFhi0aZGr)&|gT#WRv z36liUevgw*-7zC_KsrjXAERMwJaujRjtvxB(}?l$n#xd5rlgoxMA%!niPwo>^HfqB zCoO~S2h=58_PPUJ=3J;<$Mu3BYnkcajlgMw+#z_Vzh$}q^h({8p)>rfY?a~{&u3BX zcRIKVX3>odtKJ!~WJ3xQx!fVyH z5$_hU8a@lcA_cHpe6#B*5-bl3+eK)HaE z61BGDxjXO3^3rCzCrPmc!E$BvbJS|1~&2c=rZdTCDQw4;jsj|xbd`i;W;3>_kv>(Ne zih7veT`Dll!ot>TBR(>43oXnq+*TjT%|ySoWwTEA;n-&H}D#if>L*=QXx&idTiBveLsn$sCSBUWgbCjS@Zh5-d zsYOR@y1}bxn)(5E5_Z^wjc`IS?R|5*^ljv7oD2-bFh06u&@6!Xz;t~sPIxi}>r1-DBP-I21x8Poum-5d< zcwYt^SS1((YN^Mny5}og$e>8Fm%nGUR-(`Hu7oUI%%cK;K|>iuHLnX&^R9LppelHF z{p&<7>k|F;yHMTN)GZ}p5Y374J&7||It#)@%KfH#QW`L+@LzLQB*=J0(c51>x#2ez z6L5Soy+&9>PlnMwiQh8O)^n$bbtE#h5mTNnTlxWg$5utEGfaZTrQkVf8+bMYGTkvq z-l8v1UcNGacIH6ODNpAUCWeb6GP7E1vzPGgsYi)4zU$ZdT zsrxk*VKM^yo?s$Tx7);KLhPpl8YK>s&oQN6*np9(?UqvB+4%y?a#cP69vO07MjZZH1n#79PWiCY9EcDa7#z}FP~ zK*_^l;cl@%cae*#N)RK6x7QW*y+llzwbK5C&g}NLVX;WKy-rpOzUP{z67#sQ$Fu06 zm7?nLqHxmo>TuYdLM)yk6lnl~pl;0LOZg~80Bi{#%^q)GC7`5Q9VsXs;kZ7i!JGoH z&#(9zzx|c2vn?4;?dS*tm!IXH`6mQR>-vbTH~ze$pQk5oSlA>DtSqFo>+jJ*RPlzynZWBVf) z;`Jz9q<2S9{cgp;ox;w~yvvr#i|m#!i;5Ux@%V10fJL`{29nmEPu<-5hwi5epvxIY z8-?0Wm*1cO_-6xGUdFEX;6HXh=O&$1J%9P}Bb|CJV7^jf80r@9oqaEG(>(L1noO5p z^1E!W>v~#&#TKK%j#jY$ArutSP*AhaN$rA zZq^mTTQUG>lqcyx)H zKNhZ{g)@&#qh-Duhea~b)xov>vPF-o zI!9{WA27f_l>`#RhD3rU6{kwY&xbX9JQEk7mo&gwn0W+ynn9;3o8qidf;hUOkohlP zZULS8`#v!O6bd9M6i|{^^tq`wnN^QI;1+svfq$NGXfCualzknS6{S5u9SH-!N5R0- zwSX%J{{63X0E2=g)TI?lrAFcj?y6!H-_Ca3ahRJURPF3N>uKG5ZBjMQVKT|7H*a)} zMx~MZ;85i9P!s>`!)QDh7-Zd9SW(PLLr|2h>U_QIl%dy{1R?V5J1v44co%)85>(1l z^Vl9f$S8^zq9lL(FDl#*LZAmD#6IJm3aafQY(rA=x(uGFdEHA4lAxaOB@l3o(ZBP_ z-PI}i>D&dQPJmF)vl{*6@(_P$gET)D_KSrxY0Sxk`=hbF%TPj1CeSOpOA z`%P%7>xaCG3F+#|9I_z&tjvJsW?rvPlU(qMx%RIw$uB&eZ}BtP=`APWkk_H zz!KR4cmYK!Qw_qo^IEuz4t-k4@Z)lS(nmP1*bK;{S5(fFC?>KnJkw$`Us?m{=`b-= z{=oIV3SX;$)DI{P z>DYb>;ciiARiLE`AzxUxuddo{nuh1@AG&iMc5J?k>iH8dd3)m%+%#Nx3ZTt@gg3{L zrlI>RV))LEsMe*-qtH{>Xqto2!z-HlTwNkBIiljxAqafR&V;P%Motj#x%{}q*1#(tUEokxxe;en`ETq7@LDoZ`SA&G(Nebx)kPj zJ5RhHJ8Ne=Q6Oonv@|`^1k2r=l85d)eZ+XKD25ZV?aH@r7Vlv5iE9eiaWU?vJzYe6f^uETu8-LxR$r}fun8huEq?NXyz=3+eI znG=32JhOGz)I>~cT%b*DLO?~iScAPV?oAla{T`<4dVFW!{2Ez0-+%RlRKJKsWPwy3 zVS(Qs;g+>x3SkO#EBduDK8J+VrfKK#!*~`3SaCkPzB{D!&0Ji;%8$Szk!nek)6O?c z%H5!KCOOp;o?Pu?({VvtoE@<48x!=+qw$ZC>?5q{k8gmjI(hV&23xqw4T%FpmG>_; z;F0B!{5}aZBn)nR)<&;WO}Gjp(fUJr83QH6yiT#uN?bM{5vmr{QhA(zI(c%F=E_8K zu-ZA6J3l6v6lm&~z8{tmcn?K^*A3gmA}u#&=S$vy8lF$)G^ygoMWo@_%;PEuX&w2M-P-Hw9U7?*WrbiSvt1sX*plDJ<@?z6bc!(OoV zSk`RJub|HwKEZoQ)NG!Jul-p;%6y6KP?Rw5=`4=e?A5UO<=7YQ#~7We!EKqtrUn$j z)~UnKZtb56Ov?fFFAss>_!ka{N}?od0!mr9-(>llGYXuHiuHn7o9h`dsv_%{-}W=Y zJsB)G6UwJzmf0?3NhO*S?o`Ya#grvJCaA_?Xd+|^S(jW}FU8#ck#iD4`#C32T41yP z3}Uj2@gaIL-AVJaKYU|554WrX+Jzkd$j9Nj9Fc?218jV#(zr5DK7PAt?E*CrC8i)H zu3havr~1wT$Ox{q0Cn2W)(!dA>w@CGhn)J0dY!6Mq^3Pfl`##G(f-3XCt))rw_7yZe;Rg(y9%xsnEddmnL$D# zc!5?a>Jd}?_HYwUvOp`Hnsk*QFVf*~TXZ{Csru;^HU0s$A_wh_D3yYZqJ_oJ?%(yi zj9Tq_j+&qIb}!_2)${dB$}Z$!u+-M(@AdkT11k@sUzbzsEs$6^36dBCMTTVuYWj#t z#>p;Ygpz>8d8Nrl#gcT#0)>9o^ja8{YeoiDRdKs%V$$($b`|qT^yK*Qsd2d`&yV?C zL+80r?TjIfi{0vYPa(M9MQYs_2gO8p|6&G}!W50Xd0fZV-kodw`RdjZ5P#1qGp-2~NS7%# zCEnhSI?)a~TY=|tvc&e+pu_L!Bdh1^Dc$L+Vq(3wZ+|;quvgFBbKC@R1B-@EZ(0)& z0$!5#QrA7)c3^kOk78svJ~!S_eq5*YLb_>n?H|#vJEn?)=Zo89%lr1U(~a}6&{In|W*`{b&mfHIvhbU35! zc#L&7si??E)coNc>8KMjO0@#X#wVMzD|u&ZQj?nc7Q3H(8HWZ>#)W=yIedqyJc0cp zAU~9Tcb+q1LU5ZS{tF@?vV-4M;Z)r1e%PCRDK%1I5BWQ^S!wxK^7{ar9e0nE zS1j@@Ken&rv37;9s^&g1VR+aq1b^hJ(+AmKvsPIcQ)`?T*rz!c8@%HkbCEj6p8Sb| zum9KHuzAOP3`+#P%iV1!R2XHx+dgumm^g=rYQ)lqNO0}Sp|`Q< zmy?fcJ^`y#7Pv|jTiP*`*J(%*amf^`a^N6UKVc&)9N#% z5|!+2&!f}sPZ4D!k(^O=v~!6fgV4rl{pXnC{le$;_T+y~DF1xzjaOINI}ciq?cG`2 zYW+yGItZ8lKt{uOI*nk}3sSWpi_@g}SJSn+FQ!KiQjM+yw?AldsZ4sxg+=+Xgl|iZ zmM--d>VmyD(Mm0J;tKs4Z6UH3@KX=Kw<@Q$q|EIu?}uzc2QnkwhZf`CRISbr#>$av zox|3v;O0zd)XwLlJD^}})3E>*fbdHLNyW#xO;sV@a-EA@F6MP3UUVuhv2rziS&@N1ZFY1caB*x(O-ThHK=kIu{bcJ08?@Xs zZG2W=A|z|Q`LiequdX-}xr}Rf;Z5#Evn)C{3lUKI$q@M3LIUJIoa3&VU(3acAaqbUm4Tu!a%bm-Snp_#jiHfQn=NDW;bl1@vg z9O4c2C{O2tXG`vmCk$Eumo_!L^64E+j0c~du5o$LyEaAAR%+f)S}7njUVt}PQs=UF z->;(ho0Dkj0Zb)9WS+$QgIHp<ds z=uDO@iI};lC&dqj4aRzhAE|7paEuoX!q?v#j}S(^Vk8%mkcd^;W}nJVvk|rG&KetC z_O7VoX}UyT)iK!uP2zT=dlo{C*UW@V@x_;b6gByx!aDO!?Se7n(?_GTJCI;{`B~Bt zgdK&0Yc=GJ&q|uPlnzMEEw(?a^|frJ7G*aA68?z_ZG*H7LukwnqIMo1op0elb!~I+ z@kKg4^2&U??DqK4!2Rx;)7);~N^gdoYH)XU)0VMA=_HbN%paketYTx4&(G!oSF?bQ){GD7KYvaiWObr`K5ly!Z_*MNIa>qZFu4CY=E;^r1tRZY;vR$+`^x4sXX?&pFDuDvc% zqs4LXO4sV}OZON;fc4cL5By~wI1=s@QH> zS0eUreOuHUEB4f8TvxnhlD3llWX7>HRi{y#QyqCNhGXJMzfZa=T6izOe03|?KzDP% z$0%h@sefxi#}hsciL>V9_Le_@1>2dF>F0bv2Dc4uE@;c;Z*YE50kW1*9I#{#4Yuba z&t5hgsh0WeN8hCs4DeEKib9+SG4t}m4Dk610Z5`6!-hcY6y2EES!7t$fc;S>__Zr) zCNabDVw|e+f`t{r)Sl)&^4t|70@28lw=reaYIWYP>s##m^FXhydUMrHprL*>z8%ij zL@kU5Y4?Uzoty8@g2KKP%Sg!X+LO6$k8JEl@7A23 z;{KI5CD$FI_EM6=UGRweZaHeh_Pg#l(uZ8)FqvXNbEwl9%qUV17YzZd{!v z{hxzMf`J2BF$NNMvM?Q#g4_q$$peQC&!+Mrg6>OV^D<9CPvxA6f_eQ&r|*^7W@UKk z?yuVAY(uNlKVlJocLdq#WXCRBGF<%-iHm%v^HoHMy4v^D%qjHxPYgM z=xP1i>FX6MUS_Q73`_4}u-o(aygJfEzqYc|^N9m&>9yw-AF7p&iE5?>AKmZ+J&8Qs3`&%7vV) z-_^aovUS#Z%-@!aneZu@mVJ0qIU{uAEP_Y)O3NeNf9t3u77 z!wZ|XiN4spk7CZd7i7{EYUunYc5n2;b2oVClH|Lps3IJN;YFq$g*nLvUy6_IFYx4o z-mC*6Od67hud3S=_f$b!6A@T{u^D`1YKDEQQf!Cjsx=nVfhpb{nwLXtf*KTEN}b>x zn)+LJuKL6;aNp`7Wp4N@$hQh5S(m^C)S0w0zxZ)8___Yr+CVnsq^a*A9Go&jms%Cp*vW&CDkzY!CRi}E(tbvXf z3%5)}J?KgM=oLmr(#W{skqlq1Cox`yKUwcNiuW6xP1*DzAY99%Oygv{Bo<528*YRv z&6Nu|pyM3F(Q8RY+59*WSy`5}VXcqyj6=iPFvX;snb0Nk=~DE5(cT7VUtI&z=?j@a zTfSFRo_^{x!ij|4^k61D#rfY1hmYZbr6}l%};QclU zhNpy+k7jS4jt!)oZbfxUqAAEIw3vK&Ln}0_t~a4gTj0sFR_|p^(1S6+d)?5rG==8o z^_i}VTv=wr7-=(wWEN4BM*hGcN<~jwV|HQ-yOUpSQs9*&G3RS`$~>Rn>a6jSv+B(` zbhsZ>vejks@RNQsMA2qypVpxfre^H{BzuCZVu0@j75TXGxNa28YFwPbGvJ4Rhh1vB zLdYXtaUsw*|DY;967c0NZndq)H3?d+;iAf~b!ql0HD1Lw^m+_qN{{udVCpjlutI;$-8+Ec-$4Ky&%PUWJoeqoXM=^5HjKNym zD>Ab*RW+Td0kzTRz3Q)43fN_5Kh)#M6^x$t5$j3x3-_=gO2)*FE8YGzIFCka$u8!P zU*nMEB}i8^4UWd0#A+CWBJZ_VLjpmQ+&xm{5bQ5?xPOp-X+XO;(Q60J2`mO4d>A87WkvT zj{p#G+w98pGTg<2z1qjWG;-N_X&QdETok`|Io*$u+k5PF1}Bus)wlXmezXEhum=bC z^|(%kU#F6@5~{J4O%oa}O&Fm(k(jF(EL<%DVWQ2BhpzbYt3Rs9)F%5zo#T=eryW_0;osL7V4U1DQ`q#W z=^zyW7(5BQc3GwaWCdU5?B$P^x|gzt*cm;?qhC3T3mbt}+j#trp>4P78{Ciu;ZmmP ztvnyO9^?I4AvHJ8#1Gs*L@r!_el#r{*gNT-zhq8UWYK$zyZHKp5!d)kay(!`(8XES zSxTM@xCggr7GoA5*_yUfbliEgNX&lAF@b4}AVXjZFgRnE86>5gmq(nGl@w#lnMNqM z#m4t5dHi41c=OC%zl)FIDu<-~f=-qgR!-@G#&s45_ZjFZmit(_ zTOS-SNjRFq4O(XwNKv*8%{MKzCifa_cX78ddV>Gm;If&i!P~VUD%8RI6;={0Q$9 z3p@Kz)wk2CfDmf8|IpRJ=_kb8`KD*koaJ_J_tib$SUBevHmbaQ&zk6#N+|pS0yY6} z9KiBL7!~3I<8CN>xF#(q;|C70hlxF;{mg#VMD^4I%zVBQ^Og*YyJ=X*D~}SG+vz5H z7x!hOY#VEfZ#g#F(Z;U(UrPErM{(jqW+-=46x+PBRg$&(Z~OR$^DUbL{P`e6wdt&* zr}K4#$9)PxBSRKEqBH5z;N$G&S1w-Zwv4d%-Pf4T-=+`rKW25UM=@qW_Iu$9e$@qi z5$14*1>-Bt$(=W?dC}o*&q#5?@?~pct2)bdiFqWNU>s2My;Xh7#YW zp5h*iGm=#<(t^KrNQqA1w~~ZC(0#b=iuK$q&jwwyEmg9EoQeeX(|xn=r)Cw0nWEt5 zAuAhQn)jEOnv3lP4zj3*l(@Y?U*Cfu(@c8M$gTLVA5%?VPVb#sRQ-UVDacM&Po2%H zNqknIqUSncY?xL@TyjxOA!Xu~wLnd)OS$J&1Djo@fA|;Ht2dm<2K6rW!!JF1N||MG z;dAkQ;a5+Y9AQXA5hlW;Ib$O+MDtRaZB59U%$85ZOrG{v{)IVw_F~JM(2}YGuMA)r zSW{vdw`p#k&#IIdXOC4bW3()hL2zBOCP29K3$)@{Nk$C?%nEOE?D@aRcRbMECN(-U zcbm4hIcY1xX;p2QJ}@Sx3mB5`wvJe9860eEspCP#*8#Nti2|xu-qLuc=S<~)%)wXh z5lTor>PF0RL3W46x{+&&n3u6L3Tr%LO7JdFw{zyi7Kt4xD_>NtB=L=jsP>~O$}7fN zd|z@h=T@V;kg-LEcq}V@uHqR>6=cz?=bWn5d?&u)4Er2YH-l?8Un2rK1JdQx0=iT; zddds`4_EIUNcG?TkDtSl8K*&%aa1IcmAw;+gzW8|RXAq0W1I$MHjvCxHb*!{93$mq z?{lnB#<7oejB||d>*#&o_viQf(;r^vIj-@z9@q7}E}&AQTmF-)?g>-8?X;! zEPBfweU)rVSXyb5EV~k~{LHzY2=Aj@5A<7}*X6sCWKCL{!?#j|I+}1ng|_FugdtKs zg>U;7jHY8t7dzqIuO)YWygr-D&G05#(i_rGCjR)9Lq_n;JA7I{{Z^UN`h7Slv-b0ChaLqP>B3)Qe84 zjjoG6-*Ho7vFK@5N3ZG8c2(Q`vc*Y(M-kdMsjaZWMOOJ5K-vG{x+%@qb7`jBeA{8> z_JA;Qz{xWCiE=98RDwloknq6g9X;NZAxor%{SH}nl5Om^z)Pr-Qs&)(rOxZX&0Dnh zMyHQ$aRM51_f1p(7vX>M;2%pDIhDC&zlImBCVB_kZRqtE3kaa~*qetH!zw#+&KKct z&M%nvv4-2h``0gRhykYuydN;-(XPaKQ!_tK2_3-D3{3`_GvUW6M=}qHbxG>>N3D&U z#m*NQ(Q%)iF{1;OiK;&1eP0DUFSfg1M&wqd7`$?1v6%6TyM2E}wZF91A>REAB7f5e zqx*5lr|!r6p)G2Dg@CUOI5|kj!CxiH;t3Lm59$OA%liCP!>rTvC?TJ^inRISe$@tt z#$I1heLB@w%(u&3p$ouBk;^|GoK*hK@i#M+AkG;mOJK@=e0 z?|34T1t{e*ect!>e}wnxZr=(Po*`~XBRyXCNh9M)`>C15HUW`2Pgz42!AjqGfENQZYk#+}DV|`O>}=Ajy1=|M-LVvsNqe6)In-mOT0x z-864R@+|f}?Q~%&rRg6AbsI`rtintt1SB{OWEp@v+<$b?6w)bt%;f{h+wziHtxR2H zsT{@X>68pMKOsisFl>E?uf8-|LeTggz{UsKUyPl+0BQot4A!_Hve@GeId7@?6e`|R zXa8KQ#<0rUcHsr)Ht|YOPF5pD*5RhXE45J>T-<>7<*~wtW%;_5AAy_t`Y_>s@M7Sl z0y;CkDzQ{m zoz|u-$&KQOPjGT^-ZO+5hilt*UCT?@gw0q7t+ah5%N?hQfHs*+Hx1x8K)_yBeJACw zdaF7Qa*b7YH!kMn>m#zWtngp_l=p)R53W&9x-Iap5R5B1$^eGpZ-R)|_UY6>9#EU$ zDX3Fgy*lQq#X$bvm@;|PqknIL(vTE7FoQrD(|ptXxL7>;`?Mz9BMI>`P|1;h9xp?- z!vAB$HN7EoA7hFS8sefC7EQ&SZ!s+N6GU(?glYtvuC(`GBU+(z0(cAE(CTl2P7MTIeZS_(h=g*ycTC7{(CLq5PzO3d zYF6qeKTTpu6nY$V`qbSoxi+^mk~B~M2muB^xx~8Ol+(EtP3EkNcki0+Lw&IecyYP0 zG>FA`;U7d?3nw;MRQH?6nS=40Dg~ma(8Fh50tb$bo!$#1`C@tJKZ1RJy?FT zFWQz54@NfkT(WgJ*}M|h2NkohHF=er#?;Kco|@CV6~>{Wl5Dl3&M|3sB);-cS8(q3 z0JJ-7d7d8aSpba{YUb`+y6Ay+;GaMBJ*n5P4_8Oja(X`5F%--AP2jwuJ+d>L)e;%7ceXONkX|`e<=LC7L?{C1f;xZ$aN~pD7iht{m(ny+8VQl*xBs63fE z@wY?YMSjR{GWBWN`Bu##{^Jf0)Fc7@z$&!}!ffT>+cnhw0{Zv`9BRR>ew*5DB3KXq zRDy*{k?>YQ>!YCKD~=D`{$QjdRnGl*qxn+5yISytw%{4FgvDTtyJX}GX#F8)*DE*W ztCzkA8df=;u4A9Ih$9HU7Da{B0*y?4susCl1za+`e;DkB(Kt69cD+uAOFCHO+91_?Akaz;jlcFB=!N%pMFl)9Kk}p0RUM^;x9Y3pl zZbjuSL$l7|9aiVPEBr#NdcA)W0mjq>I1J+HxluamVO(l_@v}5g2&8M`5+lsyeou`b z5}>Gsc%Xn=anp>uimW)-B`?WpcHwS(Pn-o>6QyfBCxlJ*+IFbIeF6&Hyd7UR3d{st zg*g0DgQpBMdl)fne^e?8mswlUBCmcu{j6=be|yp5naK9HuxuXWaenB}C^@8kJ( z^5`@A#huGQy;uBG}PfrYrGy6!+y+q&)OuxsZWnh@+%uvQZfwH7`5pbmu&2M2pi8uFEBp?bN{o_RAnGS*}m9+7VaNgVBqIr7d9RD1kYW6B z$n8XJ%*W8&5(6E-#n3vl#I3R(1+B+OEG}F_^pDQo+ak8l{Ns85=<7|dJ5RH#Ra!hHW27XP#A_M<&& z+2g3zWZk(CAupfCX0c1`ZjZ5bAU@i-x)rPD6VV6`pO?w8 zo3X$*;wOtVUd4okW;EIw**WdyIm`%YV>k{@p)~oHFmes?7hYv0-8Dum>hdT85qsG6 z65r!0q^rlhPl2qMP3SidAmKa^(XDs7F3?<|pwDvpB!3M>VsuI4PNUHKi1@Z~SDz48 zy;kB)1~Q^orzijzcQW%%7Z2TH+^Y3;e*rX_IDT3lqw#bK*f-SYRBB$xfPiWI{mn1$ zjdtfAR*R$^@UcG*|4wLH)R3Rsxy$iNrPVLioDa=zD7P~*w_-Z!*d`;HJk%=uO`H!! zMkwMFFxxW@A&xomZ$8iThjkt~p2pg;yA9i3(G)&D+cU_tTLQE|(l+>@iPFi-4@o%1 z_)bSM{MH|P@PjWY^d?tVQ_Kus zos!@}ia1&fT~C%3PoICCeYwJT*#ms=q?b1CV#xWV%=cW6v<~Q{l}zgY z6m+=F@xD5h`kQwW%qlGP?brDD#c|<-urQRS9nF2Rx%~c_`3{TtFB#Q5*qwF%C3CJ5 zWb=nda`=e%zO9@N5im9A3d(){1gu6iMEpy8)lY*S+AZGsm69yRo2%GqAu-;zP(s%Sl=`e{8^`6qb1FdtP>Gz3NTIG81sxW>b#OPjDRL#ig`% zjoz%#t-Bvg?e^bMK*-FigV#{zJZE4vys+V5@yk=z4Ymg%!@?IvgdNhQ&V-CS!4P+^ zvnlLKTR8n-<+(j^UYjpw@#ub*>IORKjkkNr`V!&K5rEgFUAQrxC8rDO4o|9!^TKKz zVKpeUB`_Siv?V1a#q+d04MS-gQkV6if8^t_(dLY>3Fk?c@TcjUNjd~wC=PBcFmzdA|ERB2wy90*LiF(qM#41W`P!U-Z~PgZ>&WS zL)3J8j5`yDsuN^F-G!mcS)yu;v=A_IzDg2;D&+-1*vw>%Bb%&CJ9WcYd7x9X%`+^9 zmTvL0O~@4IGc4FKD7opwF8oL(7WSNj>hnlAv@v_P)Jt zR*#JOM-6auI$Sn1=-vd;bbIkbWHzcmniJ~jL*6orh^|r7ximXj3PbA>xtDty z>yfmVA;sYf%Q-pQNPHZgP>xzEZ7s)tLpVWRWoK@KFoYjII^l zG>^#JoY79&wUD!j(d0~1o(?)5o}BIC zscE1}e|e*;j{f3X`I$&0_l=*an#$21r*2L(k}*s-jVaEvn`=#T!3I zT~${g9}~lzd+H)3jo*YlQ4s>|Y;Kmvt3O{4Dz=&)ra@wQWXOrA9L`B$d#) zR#l;|K1>XrX_D_Px)ST#tF~g|G?EL`foS`NpJg#r4bi{6gyD`h5dTnZ$NHTHh^#tH z3=~}g7Zgur!9#Nc7$MV`{fvQ~pN)-Vd8EUuN5KzaoZxb9A!9zvNy6{_CH#Z8BRt*J z^vG9EX&nu7<(Ts*&MeatbJnoougMO!jJNBUmuVSF*%-EIrbUA++F2?|2e<&wF_vZ< zLf5{Up0!4uBLmrdOUKjHDZo=9-naD|U(>MvWV&*R{2h2A|4;79$7XCpon5P*U1>oU z5x7ZaiKAT^3o`J$-5Bw6Gd4^2#B5O?Z-*a668DFx0C9-i=Ew)Z6jCuH^;?_zo-iN(&(j{c9)ar(p9u34Z zksV*&vT7>W^&uiVOP~31b-aX$WTh-eSU2UoHjyAct@s7*|Dgp5=Oe>7dnW0y?oo$B zM_=}u-`BF$?rKPz)Ugulxxe@1w{{qMSS8;m-z{9O3rmlZ$7o={g-2ELY3o|caGY7XQneCE>gUd?4P7ti~xQtD?;zZv?gOP#d z*7KJ>YDU9$!lX`D|B@}AEbRPdyPe&VFI;q;Jku)z8kU*8M(c{!DLwHx_K;#e6SlA; zXV=rd6xl8uQYL(~fQ?q!3o=a&4q9n(&1f#z$ij5?Mqr|@o5e_|L1EEMHnz46j8N+; zWnfm><+Ug>S$<{NYHc> z!SrOlG6o|#ZWlLcsdz@fe0xt5+He|-b&gDgrHefhtX!Lh^H* z(qeTfsV-k90*qunf!vAa;75+j?v#F(%aiQ)+5TnyPN<9>mE{4fi3)p(2>70#Rv)U+ZC+Z--_m6nRH09CPT_@&p)q2NvR7~9b zX5BMz=5k6M4*ZEJV_Q21QfcS0<1T#6JeP3lMYx16t~*AXww}tBt{G?+4|I63bj_B} zlTP&|I;3(U<{ETs2XVL-b#i0!k9VIxBGjHSPe|o3u@hVW)JATK?WhYG$(?&`V0KXxjOGn%SwliqXV;>T!0JzbO-sW6y5k zydNvkyRa(KwG>WKj16oE({3R_TDI4_s+Z)PVVp;%Y_zjSM}*Fh2?A%-zP!AB9Lr~= z?z-ZHpWNTa8;xP};=iQ8)4_$MQF5k4%K|dO2*%0A^U$Mhvo~Et2XM3t%S}s5kwT|v z7*?^YALitm@Z;!B8h5nhuF?h3CgbkX*M*WqRmrzMu1rD<+jPncW+{KKUEBAAy zTCdoDt~M*FGk20d@o;Oo1NX(!K!W^XGY&B=$EwSQc`k4)2d=cuA)scbQXWqI`3f0^ zNY!yxfYtgINHt0X`o^eDl*^Wr2|kHt&eo$}AW=GWq)MZT;VqGNs+wl9A>MSt*;S;{ zF6q&kh%a~|e`I&<^gBFHQ9R$2M+!avAQ4A63s=}*1FYs31r=Xt_&M$tr4g$DPczv3 znwVrd#`;`XSLjGFuaa2hR$mw%WJ}o-_8v4Q!|6WqZ)K^VGDt1%&S?o@v!_a&DV*}Ge2l&dh* zR1n%fX(B>{W^ECS`A>HL7y8w(7?(*}jF1{T^RfGBtbOoGs zD7_O~?dv!yiV2;QVPz;!=oFZP)$3Q|gi6X80su_j#hk0jILTq|3mdld9jIm$B6?L+ zA7*DeuDU7J_Xa?WHPG7wT`@y-H^hZUVjYifTvaO$z^xC+;b3ZoQe*Lb!c#}8}1P#)1 zR%&Gd%2@x*N5jIHg))77oJKQ0kCdu$B7VJCGBAg&!P8MRq+q7jO>}PA5s-9gRX!|R zG%J4utW~xer(cYFarSb$)hhe3it-o;$AF>V#~*L<{CMYfdKyo=&AVl0pwjLV46_>( z;fe3^Q!|KUpM%Y1JJ@QAm+IM6`$R08wi!&Cjoo@NVf(1Rz>Pm5o6qQ=cmEZBy^{-} zaL-oN7;%3)>o9I-V_z!0F|5?^t3{buLQ7f8`zTIW%?62<56W{Bq#nCd{d^{hFKp|N z6|wQC{OLTFPxVphZ1k~jSk0xzrqQ(vbBZq|@RwWd#1ck~P&fFTSs$=|J?NZBzDQe2 zhw(Y_4|D9^=m$)HEqHi~XxwNCYFK>pq(hjww<5)=kbDvVyP5ms$h0o2ThF0tP={FR z#{*B_5Bfji${DU~bfx^6(lu=qT8Hidb+2_@j_kW`aC)pfjg|D+~$>;|tsEYmQ(4MLZyNILw=`TgEQ+gs*pG zPzthq>;75{J}#u0+0OqF{n*qe@1CqJsjcZBZ3|0z$vqw{M!f8s@--l%mJnxp+ zG0KXur3F4kr8_Ky-~1n0cjyFk>nVV&G8v%t+BX=$5VjNv4z-7+0g*OiyB8*Ri%?E9 zi|=UQ*c0iHzQIY(G>`*%ry|DAa|kCp8A8l9&_fJyw)a(oN;YJ&^jBRf70(GjI1aDi z?ayuPRKW}nJihM5Kj@TxAwd<}Z@|v+t3dl5*=Pu0d;ckxe1(KlIdMg?$OMo{nEnLE zV|d!5fO(Nx4Uh@=ZnJot3rGrFZd1N9m%KV2HH4i%N?#$3Olqng<3W9l`~0N{m~GDS zZ?x6uQx^46hpm(aPp+6k$0pq|2R2CzFL4-M~0X=oml;xFmmQ!{APZnb#+>I;TXc53cy{{6JY3i2u!ycQ!L zAKS!K4yT3qS@#TXZn`=BB7xws&xl1?n{MFnYoH6$mnnoqDW)77zMath#m&XIXRY13 z1Ez#0$C{_3wl;D9Fz{$Bd^f;Da9w1n?%T1!ZEYa(DIF|&DyfUNIyS1UHHK}QD5@TY zw6epZfy1Nu$)jG|8+9uZs3djOJ1GcA3E1uJFeYq;VBk>$qf5KB0$IjW14j2 z9`}Z9;B?m1jW2j_U_3eAqC~?YBf#+@@7HwUW9)6HJ1d#Ifu)FrQNn9dzgH9!nI;Wm zYX$X#Ixk6;RCkT2Vn*lmk6Ht^fF({(h7NUmA*b_;e>xz$080n#{n-7e&A*Mvj)+oA z1x3U9_&a>7%7Od@9r^te8abLU3l}% zt0n9oq}wqc&Q4<`(0Z|cL!PSo2gOx?LT24=+|NBr4X^Uj1OgGtBEJ&?e%*k&tm%Hl)bwS|0Scwb1Lx)*z_VX=TgP^f57?4pncPGr} z9Bh8Xv(E90X4X(63+7{Zm~zt4$DB{;ENN`+UpieeltO>d}h}I^*buOUG7LY1Y1;wq(M;o#pM@t3Lz$j?~0k zcZ~5aN>x6U$5*mec@WC23L`(c$H-vnblysUpKUh6b5Xa5#R)xOZSuNbUEhr_z)iVs z(XZ!$TgH=?@}I^}?;ih6c zB&R83E6vJUJZ*g~fBvA0Fb$O+WvH=0Bv0hQ%VmLue;m5wB$d3Wt+Z76`%uA&9KS~O zz#B48)-A8y8?kSPyg?mq2`lQrZZB<-B=?L>4Ji<%{;>98w-WhtV@s1lx?HG(`AuL} z?n{*)+w5EI2;;`VonbS?>!H>`%Rpqz1pP|h7em!NoYbJqsLb|Iqid1hT{s^ib~0yj zXnin#BB4iT+8Uw4#lUKpnNo!(T;3rk;a%Jj^n=c;J;smvlo}+oMDv#%1{bw?vA892 zCU#e=V5IQZZuiqI-ysz>sDAUz*wY0{Fw{Pa5MDR}Aezu7s&hY3&x1gU{PsNR?mmdf zO;4To62rajVD*|#Z11?WNZ|E~IgqL>!qqO7_;O-dB4&CEg=ZIt`d}pt-$5T?_VbNv z(F6;GFYaRT6v}0#3mnZpUu4^njcyl7Z|5bU*?hMj(Crp}$)pK0f}N+rn6>TZX9c7Uy@(6CjOcbQL#NqC_Q z-LKqMGoqVFh=Va{sQMA6bb3eSQWR1d451o12I%(yV91g4CdVtj$2vgpdNp`%#73=+ zWt4m7?#K@NRvUrcZNO=F2i#{s6`<-T*%?1S0YrAtx-MC=6Z$6E7hQqkqO?9rP+N2as|F3J% z5zhKjsXFgx^f_YlO5@&U4AE}?itPI<&WJhkGKohHcfq|O=bf9+fton%#UXp*8O~R9 zPnBZa=0W0gS?sa6C8RoQn2)_egni{ws^!t73u@GR_w(dz`VV&O&+M**65r^sh4FpN zc&3YhYn@!__NH%1Q)bH-x!h5FRfFdVC)Q#goMjX01E&<}YWd{NhWL+9BIUxC^Hc)E zt-zFrim0A<%>|<-X+fbOS#_haJ(@?`lLVj8grcESStIJ|? zW$Hx=T|!MhU&+(CU!`+*ZX*j^4y>t~*G?rWBAoT2uh#h4DK~H!-~N%c@2 zwHpn_RC*F_cSc zOQFnXi<5IF2{gqKt|hPEi8fg{C6Iw$T04G}2X}>g&o`ll66Cb`@>mjirc6fCxIFG9 z`I7vY!B(7rG%E!agr1OPItG_@;AW2eg`_7PvkC-9e{}h16&Ok5!rr;zM-(o@m+usy z6Zi(eR*to88ae6n>C4rwB2jOfM;22?N$#ImAW<*wf8ko*s|{xCnWzn{BU=w;4db!0 zDUcEsMLRJcE7&md1=p?#te5_ zGhB1%@9}if)ok{7d+ZIlhFUtg4w=@=%8s3;8WOJq3*>ZQa~BO2xle_LJWtKGXC5|I z>uv5DpL#6T;(i?(yIjSz5l9JcY**`y?|-G-B=7#^`mom_`{ga~d2q9E462%FE#5nd zqG#KxHJ$zq{jxI>K4>B9Gvp`2gyG?u;r2BmHA0{Mff`0%C{Dc-%xrrHqfY$HLstp@98mA z`{Uh@7U=p>vTXu>mzM^DpX=!8kbSWWiXg>_o0yXi)oMvPqi?1 zkzg0u0HBH{B|fXT7cFY^-@xjGn?FqVdgfzB`)VK6MKqWMSsX_oZ*X$)3AGl0j<7(- zMRj{ChR%K}QTsVAa<*sJ72GU8@=Q6}f-YV;rVHOUuxBhF&vUEGgl=Te1+yw3mA9RS zv{Av0fokNl3_a5#PVT)k0{LF(#Z#==z>w-+=Kz_ke_`cjL|)3wcyD4;{ocrGQ}?Z( z$BOFN+4n_zkLKTomGW||`>ySw{klSq>fOCflay6HNk7=!t3K>$1~r=;XS@1%0U`{c zpVa->{wx=K-Ph#ez=!xMKl|C)o4zOB@9@)psuj4pDIX}9tytyhREaJZx>gj&Ql2|X zx~ojgX=$&o%I+S+PV5xeyVcJ*+%GvukrK>dn<#aWI1x%ZoPC{LLezi$k*oW}yLfzc zTg`V)STrt!KhV!F>%xgxiB?eaO0k;9HMx?AGWy`^tGZ@MmoQhMkEif7(!n;^VkAK! zi>1PY2EzWd6gl{caXN8LP|Yh1w4)c1?Q+vOLV>>Q7lgrd@+4Jk$lFjB?L1}ea#%+sKP@SjiPJ%NZnW+Z49rr*8#G4H?x z=vrz2^zOTdY%~lL4RpRU6QcpI;yB8*j+lf^yzY%L-F%b=GACP5%$&5*aCa1ZFk${p z%7reJ(;9&*w$`{n4Ip!Ta$jZO+FuqI`woTmvB^uGpbh=8RLkOQ;ySp4Z$$c?x8lj6 z|F)IcD!rZHu6M9X0dSHarpFSo?K>dTr%#U<`to4EXt}E}ZXfe<2i@p$v)d-&)Uj5Q z?sJ{=;1q8K0;qsZdx3GKn#JaYx^95-c_&ZEiTFZ;<=yID`mq-jT+ow@VR`fx9~%*r zq*-wd>RwV5a2qzym80Rp^8-$^TC#aX10VnZuZ$w#q*pfYxasNI@C9Y|QAA;LY=)IM zQ$PE69U>Iz(`<}^+u4Ue;)NC|VaJ-W_lxiLPeLMP-uDdP;%oYW>6)ij)B(hEp;YJf zpJ^}8F0H?@_w@_Gh=ZqTe{VRgMZG6~8qd3LxyHI#RtLvlr%u(8P{?%bV|pLB`}4jY%J zC=?f$uGF;N=sC$~Z3+j{#3qacQ`O*EgQ6oV5b$5Y-gUR4fJ7K{YTSy6uMR!`ZcqsV6nGq5Vs zNM$)cVxL>bEDKCMu(Pn?_li7Ut9d{D9Pu7Pm#=7W8l~1RrZlQbH+F)Nbt;^_?{g3U zXQt-m6n671Js7gyp!-29H?`f1Nqp~os3ZME21X>mJx;4_I8cu(y~*-8(nbLk>-Y`( zSin(qi_H!r6-&yq6M(pGcPMZd~O@j{qW_`rAsKxuCBI9`m(8_Y0<~h zTL=ob&AOJg$!(wXWMAXbwbq^>fI7_zC{kq&z9wW9r_DRb#H4Uz%Ylt>&?&&z0%VsL zYZRh{_{Wgx0C{K6%&=Wv6!0v#WU0Y&`tj=df|A;Y!qWjv4BL{KMexwDhu}UNT>zg> z(NI~8_9q}@;G_kTHT4@3Ib``{*I$%jK0@ZMGeD)#Knyj6^P1ymkKK;{X90FElO|co z<+IjsunavAS3j_uM71xzKQr8I;kTGaQ_nhUIU>y}Pgi_ai8XKld9T*aRRbKdQna$6 z5Gk5{+3OQxFkDRFz3!f-S_o?yStt%2W=bi~N}z8FphFraqs$HmrQZFi=yfSQ+~wR> z*^zT5)o!)3x9(!sLt}SNLJpLM8YTvIA^UYP?w6124Ty?TbfhXe(qX)K&PQW`~zyDgm!TK391<#S&~PcQ^8;B;sl zT6l5L^g5NC)*5`4u}uOhU&6NCt4v#?%L$bw zDSytEEhRM#${2yIcx0=XcYqLvQb=UF7}Anm`nP{xb>^0 zM@7JvP^|<5^wuU2Y}LGI3Jd^7CA7pe9z=y-0U`DK$*W7}K@1OKL);X%aaL6HwnGQ; zmG5LBzLg#p#xFa?C8b(LHvR;~lI*Yb<`TaW$F5D+dcx)j*~Q;kG<_|8kB1eT30F5= z0YKmEU(VM5zf@?n^ZcG%Sd$*4way~-K14-4Y<r6uq6Q^? z0Lc1$R)DOhS`#4MI?mEuR75|1+rA!SBKaztzSYhG4*^IIYbWks&Q+(>)+!^j%h$Nc zwOhfcSVE#C*%H3HGg8571?q|Xpi!d%QqOpzp=t(d!|f?T>r13n!N}cQ9Bub4KY${t z*#}xvZyYn1Vk&jJq?xKwkJ22BT}HS}2p>scN0(xT*xQ}HA_j1s8Rw{TCU&_2%%EOr zTTp*O_B5ww$aK)bAIiC-JH50H6{U zED+^4YgM>?N7ip4=BjI8LqN)aCWiB#Vf)8~{pozC@J-B)Ev+x>eBp`}YvL7q*?=r! z7!h^kQy-{@DCt%|7^EPdk7YL6&ssCQ{ClXsTo{2uBiS>YM#5*OKPR&~EmLPH#HyMEZ7f$b|c6S!WH5;3r7(!GUg{MK%Re;2YAwIaq;XdHA}h{N%1YX%KlnT zV^b46{sJ7~v;Ag-@^EYz6|7o*L8>x+iIcwJ++SXs>zLQB{m>NHaP{0v?>cuOKw?;+ z-*;Er%bveoA(Zv}liiq7L0_~ZuC^wm9G~!F4#4TZEN10s=SezF3UbDRHqX6ip7PH#ml2wyDHiJR-sqluPaRnX?q1}K^8B#Hd>mv9HT#sPPt5%%Ry z$dr)-6k)(eLTx`n@G6+T4M@sKc&qT^IF6Gj=sY2Nmh;SROCVoE=sj`XHffir z4%4Jfu#u!wZjz)=ZOYF8ooP6hS=0(Pd%s+?x=%5|nlx_iE=aAF7uT`{QRK8{;n^a` zw_>S#oFE@Jw!b^Y=73!Po1mz-W}g3H8DSA@)0^lG)bce0-G4lxZR((AvocYTaZ`>4 z%IRE$9`+Zy2P1tj=P0mxzkfjLEQSv}_lFeWb6(eJUQ^JvavBl;Wvz zKCPzu#di}8DQZIvi~182suxsqUAPws!(nNsi~rsk4;2I%MM^}f#xk}#{7pnC6yH^I zI%uPv21Bh=vY8*oUX&hfC zJj4z`VY(N5VMxO7kahFV&f>dTjNEhL^z@5kjng+5u5ON_YlR$&3vwp>HxZG*%`kE4 zCEyKFE-_AguFB1}+hbJ%+61}mWWIoQRtT+%Yr3$|f`vbLV6N1VGh4KJWhB-l$PgGQ z*r$+-l6@EM+1$Ih>Frf~SXjIOFOh#irM{x$rFGZH!BcZI4nWbvnON*`rt2R785i?K zgOD8}T#EvtY;+L9%?ex7oThi9OY+$!#U1AoOI*alxgt8Tx8PcQN>5PL+jgGIMeE(O zw-Mvq-|42@c%)LU{yLF6*Ljbp16)onUs6c?-`)B6{(E=R0#tzNndw~_Sx%}ty|Y)cEf3$Dv^`L! z3`|OLbgFl%Qwap7j%O;DgMmX1Sh^Z*lGMU-d&F|Ub5(-wM1Tdc_k)?*&7RATVfj8; z?6(HKAhLLeV`J6O>;&s14w8G@BKL;nQ}Bkz`;ojaFUoVHI)#q4_!ibq=5dSOiGMFN z#np+P213o9i31U(pCy~*nOb0@b$*U}jLwW?ld?A5P=$Kek_Fk>hu2(&yKIQ!5teH0 z!p!0mMfOfjQuMElQsWz(?a&!6x~2l=MX^9U$ZQ$R)hl#Hc*?99Z;d+(SczL#@(Dm$ zoGjPByPXK&(g!P8G_CR-&6?clIU-wxH}WYV&0zh4b21)}m~=3h`Wu=r`OrptCQ z_sY6;4NX(s*AHbZ9@*wTPLs?54{}$br>t$Q450Vc^jJCb5j@3v@}^F*4vn}y@>ity zUG$le=#tl&UohmH!$+ZyOV__@jQ%W+y{k!5twP~$do7&z3YlJ4 zQ8I6})h)TJ5*3oIxv=HJ%a71eU zNg4*;p3!HlbQN^(3p*P8n2jBLwJx=SO9SM63-T@ zHh2OIN$6;oIb(df_cZ9t%<1p0P8kabdDNt=3E7C^&WsvlTR8{QSJmd2Fur5Vqf2;$ zKiOHP2amE^;;ou;0d)vzoKFo=C?(sfe^=TbHH%+vv{lVnG1a;Rz`^Ky(z=O$08gi4 z512DzGngn7o=^2|bVTkG&gS_$XoOG-Eu4--AEIwxCd|coa7tum(*ZzPG?0{;z^O=* z5g1uytLjw|6cY^`>K_3)oI$6PR*`#~Ff-M00+7~ffJ!EfG;!`mNHhnAo>3e=({$nd znBx?|BCKR112ctK^5el)&{{1;>y@}grvmy01kR5g<*CW7qON0iE)Brf6a$?h{N|F= zAI*hsc)7q|fG`^Vd}?S2;UOpfcMFq)w&aTT)5Xhf`zIA6Vr&g2#{`)Z<)h^vlE0o7 z8bF^FxI*$RZnG$%a1q(;R*TB(htPs+rEpSawM|l8@%E6vFfa#9-0EOMrbZn@wpsi}<`_twjU2LPK-bj9;t1G6GTCqRKHcj6=w=l7^5i|dm40wwwx9Bx zf6?A&>2pB^zeAOrF73>*Mwa6aMQaFf)B8Z(@LAIPo+n94Az$<0yxGIsn;h?!;}*Oc z_KS5RruwM;`W{km&B0dJ(CtKfJ2c%gM@;9XYe*{_mAHZaLL^Vg*?J8`O00naKfjx6 zI`x%(9JeIjTVHb4IExuVt5t~{Mn1`V+x88}#t$Nc!Q*}iI+2(7n|qe)%VRj{#ojwB z3jtlHUM1namP+*|ggu%JtzR7N!Pm#n5@ra=J9zJdgNV-)xwaQhi^QDH95qYzT`W2z zRTXwV@{}!S+-x8Rlu3{z@+Bd85%wy%@$3hlB&nVyZ)>{?8~{D?NV(?jCqPAj*ygy@ zRQ#9_`FFj6StCd$cQdcv3P&Y|619D9Pg;mF^A&oMZ#It&kSh2g=G|U{J)YH}HM}Be%lF1)jTL`g0DQ~K`O2CZ&=GWW?lnWDi$;3aS z=g9=0YG&)#_tk~vSR(;_V2$-D*dbNVaIpkk-U0ccA&|Na0FW!ai_9N&jTK(6o|+f^0B2>wcQ&tlO4L>8`e&QyzS3W#=!DO30ma zp~;wHU7!+5g9=st?}9#2YZ_|S|D&L{h0LAy13?Y!4<}@yv9afRZ2w3-U{xT!{!jw9 z?42+v0nbB=tDWiujV*2D4y?~bf z`>2Me%6(qLL*-v{Lj3A%?>KI<iaKGsm5!}P8+Di+j^9_Cizx5sPW?Tb(E+aeSQ~L0#1Wj*Fk)Ti_Q&RP$WA}8Nis! z%wjwXWE9gGG8G?2%ShVi(;R`+snN&^SUqaV(!q-|-4*MqecV~%>3PoB+qU?e$&+?#JjnUxf`urv%d4)=8MS9klGnR6`V&8^qMVe5$^FFD0v3e|@Gs_?4@BO6dvTvb2QeL+c5 zq>c00ToXYHI%uQj@3^3lEWZObDe}_i*PG=0Aig_@9(&1zk{xN@du!g?G8l{oSX~uc z;9W495GBF7o=vajWrL$_zvTo(D!UyInckx9?V4^_lzF`%6m~XAJ1RV&vf%1B3c7&w zx9r7#HB)~Cd@0yM@16WZI6#8;ZejRdO3Q7M#Pi1UpJ2?Hy~KZ>9q|l`Q=&I;)lj=i zK1&JY`0%7LT{+*E$v!`PlN`!rJF9{XoVOFxSGrNoJ#F5AIgEpEZ5l+}h?Eh-7Wn$F zE%XjFe;(~BgHNih<=n_6TdWyjV|$t>dnny)^#FxoSIV~=kxvx?*~@9PdzB@UkR-!* zQY@-KSf3)Go%AR__NLQ0Q!hh%!?A)}1=}yCm(UMoCyW$b^${KL0>nzrPG!RfGLQo2 z7-q@7%J6=y|L~PSL3mC=?aafrBhLMpQ?&C<^0;y}$G zdMW{Ca>6M{wCA5F68)w^U=qz&bo(~hp9`dL_VMtH1v1Ifjp2lQ+M@5o6&DfbpO#?U z!%K~&n_Ir{IB#A))gypaLT6BB5&qFYQI{f$uV$oP^A_O02o~Z6bOWs3#X@I-2V_2G zfI`7IN#mHE2Xzr%|G@1qd07Cok4RkiT^i10TyR)QO#sta!5NfVL&);BW+8^ANk0Ft zy(|BRy8ZfQ%-Cb3EFs!uC(2kyy0xe%*%`!GvNuSCG2FC>y33Zml4a~NlD)bqyRsBw zO7?vj`|w9j2E4DpCazN8?HFzs(W>-dXxd){H(FYopg6OXqDYgJ$P9$vQN6}hC>VIv z7+Z<;ydBoCk%pYYmjK zSMCE+S|Y^Mv_oM$d$}`3B1ISCQ)0@dtKrsFG%2Q0ik!~5w&umD3ghfHze|)Nq-2*v z}6&_mZv0)qtu}SIPpdS>mlZmH7~eIz4<-ZE{pcA5+RGZZoNb}Gb8lya-b*Wj)3S+ zZ8@_&nw^c?*x`YVI^f;LquGX(VcK$z&!m+6&bONmDwr5Nx^_AS%~qDn`sCgRV7^{! z=;W8W4^-Zr^iYyIB|1;J_U2A% zYm|v6InE*tT&O2>DmWTjj%)4v7Q{XB^D9Wy1Y1+`k$Ydr<#9C}_aqe=2H+cb81a-^ z2k5^d+#hdK*Y<)K0FS7?q-o5_ru#{1@T8RD?yj=Uusb4h;?}aI8E!r)<@y??dg75b z>U8a~%U0#>;$1q`bDW4FPzHK9)v- z5!01Ih0ulx^+OK|kqDAvTFR%aMK6_)`-nRhiu$+Mi2fUNo!{*ctq5DVK6j(Y9#s@y ze-4vnj9b=8iv~`?k%X|Ii`w#oJ?R#$n%7c)#tRJ~&yl)OOSFXz*sM80ky({0x2}BH z>Ddpf)~Bwp-lR_wte)3uMq_=BfHWDTMIeE*FPHVNd$rIOZH|&EMj&bGefqQGD@=}P zzSfd;MJ`Ul*jxdtnADg+)Yc!q&kF`kaKgslLX9@KCAu8Cf@eugwlP1@x8E%D`mv*x4xxvZGzx#-~mF)QAh zhwybGm?g7o_3;q~5kJZ}(d$lPB41>IfI$4p6ghg|-1)RzxT{QBTnSrXl^Uxpnz>N& z?8Kv1ZF^FUM%Fi?uc3<+IV}a0gs)we6ML%{Zj-Bj4yePX`BYDf0@X2Y4F>Vwp{!sT zR(6TELjZxY3`c&0zrsoB$%ZPmJtI-lmp^&tB8Mf%OmOo!rp%!nnF#;KMPu@XJ=J7B zN|RgGHa9c#?t1^bdD42t5KRc+H41UNhL0kd5d!A2-ifZs#`jv#-@JmFjdi0uhA{01 z>}nN5-ei|LjUYDd9wwR&>b6cUQwcUC9+}k+W@2%HLRKpByv4V8#5=ZnC=>*i58C`V zxn2Eb?G1geAoe#pTBR7m1S;iT+^@EH!o36VWYi>2V`t1u1mu?#e6 z>fL^(s$JsoT5`#Wj-0#M4hh!iiVM2L>8@dE{ckl%fz(>U$e-y>C)$oTlDk~+#qM@} z<$-ljZZy-j1+8E$+Qe*qlcb~uUgJlrWj$jBV%*t2$tEGpBM?i{QyhKLb(533PDz2X z5w4AAcZ8nbX0n7G;US*I)Udv+bju6AB_Td$(_S*BZZilc;#;@#ParaTEzJv~ZPaJ1 zPQ8s%wq9yn<`y+I!F{gR^s>}YFT8Z#*2=Ejik45+T~lYT2ef#*<~u1CpI|_7%IK_3 zUZ9U3Hpq=(H+I$WXl>s8Js1l*l(ZMjXVf7bS=LYQQy%j_a=V;;1=%F!R4N7Je}I~a zL~OoO!I$DHN0x-w(8}-$P1MXT&LGBP_RvZy5A-LN1SQFNL-o91J%Uk zpXPY0Mp>LvVRc9`%cQ&^j_4dxDL7fyvPB}#pCmE$*m#!2eTs_~+Q^zd@bE#63GPhm zrv(=zyC`=3D>us@H&Yewx>o5!t8_Q>sHT7sp?ak{=$4Z}Px*)>>k3FEwO#8B(7d%; zI@l-+j3dK5Zf!yY>R#eMn7X$;BPYcVgcPeGbqqd{^4Ku1Ag*bh{qndnvQ;DWs3U6N zrF|B22-{~D3)&>DW!fL)p2^X2I$xgC04Te03RIr#ue#IFbA z$KJ+d<{z+fEShfjvkrbSrrPDBRyyPPdYA{D&C?YWG-ax{_M&y)t0K@>Gppd{OhZ!j zgd-I+{I0avD622)dQ~sGt~Bbw>1OiqR+lCV+f0-HT>Lp;(|=>rUa`9ZKV0GjOQ&tW zsbQ_Ntmpk&gQ-);#Jf3FTfXZy?0)^FD3f@9KOvWB^nT4mn~b&amm63Kn%Q@FNz0?bp+H zOIUmi7ybzjWiNYcMBkJae=?^S^o2VF@7BpB2kc zeUiYS|28eoL0ul&PIwiQlas6J{u!AaGZF2liA2Pu#DH0j>IiBnD_&C#Bl#+^9HilxykusDgLxt96a9j^8Ak zqF}@T=-AyW{9pr&4ffgkW=%=hs2JRs6wq2v6srky(NjA{-AD%YPV+4*14Abm=2>F$ zfmVO2=OVEEf>2U5{zCW&ArP?o&KM{@1)1tGdG6XyN#TiF7m03Rw>+VVgg=GeUD76H zPf>nsk`~5NzI`{A)qQ?crf;Yq3fa3&Gzv<5* zPX{Xx;BSBo=)>x;Of`FRqngEd?w2uXyW|z}qo$C(i@n)ZrI!`(D7--*_h61sAZhX~ zGPhVmuMnA+b2TGq+61S5E3iF12Kg=YQNHo`&*O7F%*6K}-t%eB3^W)YB}dcQWsX`0 z-U2Jsgs(-1D9e?vefp-4bzq;M`dOzK(xcJwm zuZ!pliv*kab$M5G!5Vt$0?!6|&9_P!?!kc|DZ>N$R@X`?FH>0|p3dma^ys_!z>i$W zol?~l0IQEnhQSBHjmBlRFQp8hb=J{N=TnjBoMx2|&4>zh~^ zOAJ~bqy2ja+V~$C>mt(|&q+cn&W#<+&mbWiGZh|y*mvWuBV z5v@Rbw*1-KB_~hCP5FptO)5|V+CVsiuEnNZ#-By9F6ywmAc$DSJrHkLkIx7ojENRD z7lZ=fdPL`laJ?Hf$o1l`V{XR;jd6k;OdOrr$zQwElL#rH>LF;_{scen32&~dV*#J8 zie(aR`D>87ik7fTnUW9oQCibe$kdCUR?(qXOJ`?qYf05IK=+Q03GfGY>YqRm^4V8mOobXXhu99nL~DF0!#Ukb>fpQb4xCTK!3 zbHT^TC-|dQ7;V^F4$S)U0MDQ%<~mY_c?`!S;CY!Mi(PEi@;F4X#c0d%?Msa)6Du&9 zJnpZRS-*QHO6+j}n4GW>;I2UE(tcBu_6h-UvFhe$_o%f?czd2p=;fBaG7M%ZbMG&1 zr*um7`qdatSDe(sZOFTC2|g#@7_Q52Q`(y9xW}E7Isz|B@o7G8V~pDxR^DyFqrIx$ z8DRQ@9FmrL%VlO+feF~$6;s)z16P1zRVHYVS_3~yYqupO_LAX5?)M8Wupc5fR9C@| z{(~jdoF4Onvko>h6>$PB(;vRBYF0bgcC|T?sG1B_YKQ?fZB;)USeOB_12A4ULcQCC z1<@R_^8Wn}-0$2*BWw4cSp+G+cB**da$8yrYi=hQEAaJ3NU&Bl^Cu3ri63EpDrv+- zG@s|p`m0+o1nop4&Jv$s0_lJ1K;%&0nQI^i>X6^vN>ZF%c9wgB7BC%gLo&zs93#A1 zUaA%u9ZJh1x~7;3;Lenge7IlGEGLw((OlJ7vukarW{UDkm+h-Kj=XiWQP5t7$X~QW zKxaZ7;3h#pr^LqF>u8k5%cd0_Eopy|lSYrxIP)PcUyHc@-d4fy?;R(k(o;$LTr>GY5GpVs~@7+eG3q488 z^F5@$o*ULSmaM~nIu981X~z8`gOYah2&MIjX(OSq2n4aJyii%UYd!yV5X1&s8HcPw z#~z>+B5SYdrqn*e^019&sA#-bV0szXxg@Po+JS#9 z<{|)cg7a_d^W$PvjC?zrLhHYtQB#d5r!lsHu`Hk4x7UuIQra;%JkBw!&5vrkq`pN! ze%wrVV-o&SUKjdhvrXIxtm@R%oA9*#@*h!hWyvQVT3|eL3SghZ7yFPzd~a@jRwGN~ zZqk+w3uNyzp)xo~&NMU3_{j^G{YNYdq_aIn)AGtvQkXw*VEkEu+2Ce$2c*NlnC`gl+gX7TvKoNBH%a)^_zSe+1Tc4eXq42KQ|hj+{X*;bJ4yG zvlX&Y}usI3*CHD>9DL%_6N`FZ`;akhtH_RIOK~ z_((((O%+ZS;F}+|b(c z!~?MtlpnI~ZMM#`Cnn5djI>0}8M(jLYz((>7sFwN*kbb76r+jGbepGFsm&PY9M{S0 z#DI)%P)$DlYhPnbadq-_ifzRt>5rKgwQwu!;p5h{{X#d1jyIY@5^Ga?yC;RnM$_33 zIf|VvN)M3&VLIs6y6DV75|z1&JV5HkbT0cBm^WQ%=+=bi8187V6Yu&@o{eMu{Wbu5 zL=2T5RjAKYVx>)MZK~Ntn-uAiinskg_uh>(>U+ld#^l*3y(lQ{U&mXf0&>vYq4i6g zw|pWvWcVyJXufaz{nsI*yhB;dwGRes;9%jsJ9kfJHns z<9v~$psPXZXA$qUO`b@N`qg2}+N#gFkk^y)?;G=8xJsNtLyN7>FaP`l$v_+(($M%VrJ-8{8JKe0RlcR0 zS);S)N)jcmHun7oCaVsIS~XhY%5=yZe$-Nsq?A`7{;m)Fh}-D&pE-cB5M;laHB~JU zF<}-v0HSgaF|%-gx!y01b+>jygm{uAow{_69q?6o;9LKYqXkR_f*?G%dJE9Ji~CIs z3aD6|o3NW0hYBNqXB7fyM~B?~f6qmOwd8AL)173EIvLhrLy-P3uazqkwxs3Y%a00yV&_4w z)QUUnSTJBo@1_I}2?tXJBuqoj#0cV~cN8%M^h_S0)c;of+nc_oMXuEUH@WD!NvE%`D=ENkJ**RqyAM6BWzR zpEG*3dhvzXTMf%MtT)Pf8yFaaJ-`^Y^xv#O&KLy4LCN%t&v8WTUXdhbMCrr0Hdkjc zjQs2$s?eB1lk1%_ z;>X5brN$VoPS4&Mm78U6cec(uKlbgeYR^|)@d7Y7M~u+4J*QHA8*D?Jv)KI5<5DjA z1o0|bSy^fP8u=}^axMu8iQ&niaz*(>I8i7l`@3m`!N9HXG{?EM^)Mk%?o$XNk#IoN zrO(T_`*~{>ywLw)E-(aOaWZ<;M1hFZR_~Uo6W_&(hd#?skBm~>WJMlu)miE)VR>wh zwF^$G#&epEGJ#I}sUqB}xt&2BWb&@UtiYv;(CV+pYF`2-^`Kv%u;326f1*^Vp{yMC z?zhmI8rk-Ucsun-btHrUc7$peN!-L^JCR9yHk6R8C?Z-tYzLNqnM{nTk*|@PA3x5; z>NH(;$T-fxGu~isc^qR3o4>h~A#)VZ@u~1VdayewMzq+d;4C?%MqsYZHaTwpL+N*( zn7SJQRk2Nhy1vh&lUwUf2;xsVJUwwlkAbfq;5&u+Z3ujF<;DtgSnKa3SMe(?UaZgo z430Z{L`2+~5D|;cey5UxHTb*||H`vx2j$20emE%9c@QCg%_uQXzA&&&rbca3 z@4MVQZ_KU-tHiG9vD&@o5?5jEy=~loDI`sC9~BR{$i}HanFemr)O@6I=G>#!RF8O= zjK{OvY#?I=te^s;w(k(oDiss?%hhg#Bdf>0j?l7gtYVO37}*QA0RY84370k|$T2-% z|Eq+ic(<9hFciRl-5xWbi#BF#Km33;0(c{cAEj&bB9gsB@7|syEHxEhxUo!0b}Yya z%Zl4eElv|s4DSqLvF7f*St0acSkuTxzg?!2NGv*eeTL*XGM+TTq^*_GEwx0uQ|a9- zNb<{zF&azNnPH``P$DfXiw%^N27NMsy{?!tBqe1QY?#C#S_&M;#tYrKF_B(#%Tj5h zv!l%yszyPP_P1z8bdg+1-*}ce{QAAbi!tjgL->a(rgz(lt&AT8Y%qVhz^UBGH`6KvmK`eVTMy zrsnGl!K!B>@t-&RzuVe`zkpPv9uU=0m_kQf4?Blss7&!!cao)3+r(LHgIxErp|5r6 zh;@*LBTxNpbS+8o0JAttWKwUnIbPnPA!^zkpRCJ}ZfqQ5b7w2~RI^6gK00>a%NA7V z)Zt=x`8+Z@?zldjOO|vg=yJfw$vqj*P}F!K8@J>N(`HSS`J*1|QuV$+8asaVq1mJ# zjBQbqTgGDJu_$OOAvmOb8~9v&yug#C;tq54J5Sf5GcJlQAP;xe>{ZWI6lNYVvQfA| zBsT`@o=jT@T(&Dg-rTUs`FEZ;1%ijfvTcTzm zaNEY-2hG*#CD8*tnm;Tx4QHhmWs8>;=k80R+nPBkBh$JL)=LUvO)C#_G08fUZ-hA3 zi$reJyW~wZew_Of?_BBcqQElMso86cQ7A@sc~pl&9tbw^UO2Ee{(4V2+Di%X1>Z)G z=pg$ST)2NNOk#sNZNG~EC>3<3xe%U{+L4)bC5tRICNyfC7tPq4c-Q6RdUz|_T2R=; zy`G2X?$3K6eh39a(zc6?&ZR?HO3+qd_FRc6MR5ty`m`vsbn{?)gRia%Syyxq$YKTsOE9$J8*DO@q@wixWOkNe$w`GQ<>C-A;E_P(PH{8AWuo z2iu349_aVJW$~8jA1?hkN&q$)tVckCT_rdHEzkb~^3+?BoWF*-RINkhZ2J1COd1vi ztf)hw3US+=4gY$V20_oA5NdyuUHT(gsC)S*%dMWiP`p{q#uSrEJ$&t8aW4Q^4j?MYYHTH53A* zAb@_okQ8`(fCIP>tESR-P4ZaBhR;?V2I)Cw1n*lkl&I&W(;UGw!BTN4c&-?P*#pEr zU!>9)rMhJ3`@DGH3$CnUn2;1SHR&4TkT32YNiW@oHh-W9G^n2g5kv)={G{CM0+^s# zNKsVUfMXIUV9*X*&XsfLO7X;pLB)p9+yfE&2SQ;wFA9Vn4fZiyM!IQCX&4IBgKK!! zo@Ga`#Tk1e8{NIOyCb0`mH=wNRxg(@1N3K%^y!2#GQi&`;fkcnhSlLUUyXA&Gvk>q!rZY->c zVK$L(-qhh8xJE(?C=V=D zFqMg|v$QoSLZQCF?z!mL*9>J@c3&_9GizV;wE~63@3w260D3orF9G9SJ-Io!P2~|B zib*Kn1y-%R8W@atZ%FLpBdcqC@B#2D3+#tr2Tr>a?5geTDpc{e;(@i{8vKM&_;I(qQRn;K=&{y2(j4=@9UGJDru6M`FziQ}^++P2{s*VM*Gm7mw>6FaWYT8my+ettvsrOs!4&6^$*XIDuJH#(UfZ&aB zQ^PLins$pb^6kgRz(JidF>#0DqZI1(s3i~g&zXJ?HUOev$qt$oI3=k6=F>kj5)#5D z`UjcPwEMX|Cy+<+g|MJ4r_xDLP?huy*aDyJ8zORVe*AO!?0!U$SL&8$skU;MgXV`V zXx721v%9nDg2Y-{=2pB$V^DeXJb7~ND*Lk{B|M?z0)2edJ}8wtUe;T8sc>F@4J=jB zj&6Kr!M%sKu3t9RrA%l8kS_VNm?fb12>-sVKH>xuD*g8Q&4+anbi?xtEL-vcKwv|_ zQo*Uyv%j{O;cN%PP^Z}rF#c-)J_arIjW7DC--FG}m-Gz4#;`gPgxt~$&f;=wDLrOg zFCu>T-y7)vYQK*JC;jX^V&`{GfB*NBq5pn;9fsij=Ut2o&5M8~`|n%X@=U?jvGbhY zzcZEwHtY8fJHM0S$Dsb#t?7G#F8;4u|9^O4|1Z7&3*DnmY{QnU>IbXV%8r7=@R-j3 MarsRCX&d7I0QZdV{{R30 literal 0 HcmV?d00001 diff --git a/doc/neps/nep-0047-array-api-standard.rst b/doc/neps/nep-0047-array-api-standard.rst new file mode 100644 index 000000000000..19965c20d908 --- /dev/null +++ b/doc/neps/nep-0047-array-api-standard.rst @@ -0,0 +1,590 @@ +.. _NEP47: + +======================================== +NEP 47 — Adopting the array API standard +======================================== + +:Author: Ralf Gommers +:Author: Stephan Hoyer +:Author: Aaron Meurer +:Status: Draft +:Type: Standards Track +:Created: 2021-01-21 +:Resolution: + + +Abstract +-------- + +We propose to adopt the `Python array API standard`_, developed by the +`Consortium for Python Data API Standards`_. Implementing this as a separate +new namespace in NumPy will allow authors of libraries which depend on NumPy +as well as end users to write code that is portable between NumPy and all +other array/tensor libraries that adopt this standard. + +.. note:: + + We expect that this NEP will remain in a draft state for quite a while. + Given the large scope we don't expect to propose it for acceptance any + time soon; instead, we want to solicit feedback on both the high-level + design and implementation, and learn what needs describing better in this + NEP or changing in either the implementation or the array API standard + itself. + + +Motivation and Scope +-------------------- + +Python users have a wealth of choice for libraries and frameworks for +numerical computing, data science, machine learning, and deep learning. New +frameworks pushing forward the state of the art in these fields are appearing +every year. One unintended consequence of all this activity and creativity +has been fragmentation in multidimensional array (a.k.a. tensor) libraries - +which are the fundamental data structure for these fields. Choices include +NumPy, Tensorflow, PyTorch, Dask, JAX, CuPy, MXNet, and others. + +The APIs of each of these libraries are largely similar, but with enough +differences that it’s quite difficult to write code that works with multiple +(or all) of these libraries. The array API standard aims to address that +issue, by specifying an API for the most common ways arrays are constructed +and used. The proposed API is quite similar to NumPy's API, and deviates mainly +in places where (a) NumPy made design choices that are inherently not portable +to other implementations, and (b) where other libraries consistently deviated +from NumPy on purpose because NumPy's design turned out to have issues or +unnecessary complexity. + +For a longer discussion on the purpose of the array API standard we refer to +the `Purpose and Scope section of the array API standard `__ +and the two blog posts announcing the formation of the Consortium [1]_ and +the release of the first draft version of the standard for community review [2]_. + +The scope of this NEP includes: + +- Adopting the 2021 version of the array API standard +- Adding a separate namespace, tentatively named ``numpy.array_api`` +- Changes needed/desired outside of the new namespace, for example new dunder + methods on the ``ndarray`` object +- Implementation choices, and differences between functions in the new + namespace with those in the main ``numpy`` namespace +- A new array object conforming to the array API standard +- Maintenance effort and testing strategy +- Impact on NumPy's total exposed API surface and on other future and + under-discussion design choices +- Relation to existing and proposed NumPy array protocols + (``__array_ufunc__``, ``__array_function__``, ``__array_module__``). +- Required improvements to existing NumPy functionality + +Out of scope for this NEP are: + +- Changes in the array API standard itself. Those are likely to come up + during review of this NEP, but should be upstreamed as needed and this NEP + subsequently updated. + + +Usage and Impact +---------------- + +*This section will be fleshed out later, for now we refer to the use cases given +in* `the array API standard Use Cases section `__ + +In addition to those use cases, the new namespace contains functionality that +is widely used and supported by many array libraries. As such, it is a good +set of functions to teach to newcomers to NumPy and recommend as "best +practice". That contrasts with NumPy's main namespace, which contains many +functions and objects that have been superceded or we consider mistakes - but +that we can't remove because of backwards compatibility reasons. + +The usage of the ``numpy.array_api`` namespace by downstream libraries is +intended to enable them to consume multiple kinds of arrays, *without having +to have a hard dependency on all of those array libraries*: + +.. image:: _static/nep-0047-library-dependencies.png + +Adoption in downstream libraries +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The prototype implementation of the ``array_api`` namespace will be used with +SciPy, scikit-learn and other libraries of interest that depend on NumPy, in +order to get more experience with the design and find out if any important +parts are missing. + +The pattern to support multiple array libraries is intended to be something +like:: + + def somefunc(x, y): + # Retrieves standard namespace. Raises if x and y have different + # namespaces. See Appendix for possible get_namespace implementation + xp = get_namespace(x, y) + out = xp.mean(x, axis=0) + 2*xp.std(y, axis=0) + return out + +The ``get_namespace`` call is effectively the library author opting in to +using the standard API namespace, and thereby explicitly supporting +all conforming array libraries. + + +The ``asarray`` / ``asanyarray`` pattern +```````````````````````````````````````` + +Many existing libraries use the same ``asarray`` (or ``asanyarray``) pattern +as NumPy itself does; accepting any object that can be coerced into a ``np.ndarray``. +We consider this design pattern problematic - keeping in mind the Zen of +Python, *"explicit is better than implicit"*, as well as the pattern being +historically problematic in the SciPy ecosystem for ``ndarray`` subclasses +and with over-eager object creation. All other array/tensor libraries are +more strict, and that works out fine in practice. We would advise authors of +new libraries to avoid the ``asarray`` pattern. Instead they should either +accept just NumPy arrays or, if they want to support multiple kinds of +arrays, check if the incoming array object supports the array API standard +by checking for ``__array_namespace__`` as shown in the example above. + +Existing libraries can do such a check as well, and only call ``asarray`` if +the check fails. This is very similar to the ``__duckarray__`` idea in +:ref:`NEP30`. + + +.. _adoption-application-code: + +Adoption in application code +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The new namespace can be seen by end users as a cleaned up and slimmed down +version of NumPy's main namespace. Encouraging end users to use this +namespace like:: + + import numpy.array_api as xp + + x = xp.linspace(0, 2*xp.pi, num=100) + y = xp.cos(x) + +seems perfectly reasonable, and potentially beneficial - users get offered only +one function for each purpose (the one we consider best-practice), and they +then write code that is more easily portable to other libraries. + + +Backward compatibility +---------------------- + +No deprecations or removals of existing NumPy APIs or other backwards +incompatible changes are proposed. + + +High-level design +----------------- + +The array API standard consists of approximately 120 objects, all of which +have a direct NumPy equivalent. This figure shows what is included at a high level: + +.. image:: _static/nep-0047-scope-of-array-API.png + +The most important changes compared to what NumPy currently offers are: + +- A new array object which: + + - conforms to the casting rules and indexing behaviour specified by the + standard, + - does not have methods other than dunder methods, + - does not support the full range of NumPy indexing behaviour. Advanced + indexing with integers is not supported. Only boolean indexing + with a single (possibly multi-dimensional) boolean array is supported. + An indexing expression that selects a single element returns a 0-D array + rather than a scalar. + +- Functions in the ``array_api`` namespace: + + - do not accept ``array_like`` inputs, only NumPy arrays and Python scalars + - do not support ``__array_ufunc__`` and ``__array_function__``, + - use positional-only and keyword-only parameters in their signatures, + - have inline type annotations, + - may have minor changes to signatures and semantics of individual + functions compared to their equivalents already present in NumPy, + - only support dtype literals, not format strings or other ways of + specifying dtypes + +- DLPack_ support will be added to NumPy, +- New syntax for "device support" will be added, through a ``.device`` + attribute on the new array object, and ``device=`` keywords in array creation + functions in the ``array_api`` namespace, +- Casting rules that differ from those NumPy currently has. Output dtypes can + be derived from input dtypes (i.e. no value-based casting), and 0-D arrays + are treated like >=1-D arrays. +- Not all dtypes NumPy has are part of the standard. Only boolean, signed and + unsigned integers, and floating-point dtypes up to ``float64`` are supported. + Complex dtypes are expected to be added in the next version of the standard. + Extended precision, string, void, object and datetime dtypes, as well as + structured dtypes, are not included. + +Improvements to existing NumPy functionality that are needed include: + +- Add support for stacks of matrices to some functions in ``numpy.linalg`` + that are currently missing such support. +- Add the ``keepdims`` keyword to ``np.argmin`` and ``np.argmax``. +- Add a "never copy" mode to ``np.asarray``. + + +Functions in the ``array_api`` namespace +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Let's start with an example of a function implementation that shows the most +important differences with the equivalent function in the main namespace:: + + def max(x: array, /, *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False + ) -> array: + """ + Array API compatible wrapper for :py:func:`np.max `. + """ + return np.max._implementation(x, axis=axis, keepdims=keepdims) + +This function does not accept ``array_like`` inputs, only ``ndarray``. There +are multiple reasons for this. Other array libraries all work like this. +Letting the user do coercion of lists, generators, or other foreign objects +separately results in a cleaner design with less unexpected behaviour. +It's higher-performance - less overhead from ``asarray`` calls. Static typing +is easier. Subclasses will work as expected. And the slight increase in verbosity +because users have to explicitly coerce to ``ndarray`` on rare occasions +seems like a small price to pay. + +This function does not support ``__array_ufunc__`` nor ``__array_function__``. +These protocols serve a similar purpose as the array API standard module itself, +but through a different mechanisms. Because only ``ndarray`` instances are accepted, +dispatching via one of these protocols isn't useful anymore. + +This function uses positional-only parameters in its signature. This makes code +more portable - writing ``max(x=x, ...)`` is no longer valid, hence if other +libraries call the first parameter ``input`` rather than ``x``, that is fine. +The rationale for keyword-only parameters (not shown in the above example) is +two-fold: clarity of end user code, and it being easier to extend the signature +in the future with keywords in the desired order. + +This function has inline type annotations. Inline annotations are far easier to +maintain than separate stub files. And because the types are simple, this will +not result in a large amount of clutter with type aliases or unions like in the +current stub files NumPy has. + + +DLPack support for zero-copy data interchange +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ability to convert one kind of array into another kind is valuable, and +indeed necessary when downstream libraries want to support multiple kinds of +arrays. This requires a well-specified data exchange protocol. NumPy already +supports two of these, namely the buffer protocol (i.e., PEP 3118), and +the ``__array_interface__`` (Python side) / ``__array_struct__`` (C side) +protocol. Both work similarly, letting the "producer" describe how the data +is laid out in memory so the "consumer" can construct its own kind of array +with a view on that data. + +DLPack works in a very similar way. The main reasons to prefer DLPack over +the options already present in NumPy are: + +1. DLPack is the only protocol with device support (e.g., GPUs using CUDA or + ROCm drivers, or OpenCL devices). NumPy is CPU-only, but other array + libraries are not. Having one protocol per device isn't tenable, hence + device support is a must. +2. Widespread support. DLPack has the widest adoption of all protocols, only + NumPy is missing support. And the experiences of other libraries with it + are positive. This contrasts with the protocols NumPy does support, which + are used very little - when other libraries want to interoperate with + NumPy, they typically use the (more limited, and NumPy-specific) + ``__array__`` protocol. + +Adding support for DLPack to NumPy entails: + +- Adding a ``ndarray.__dlpack__`` method +- Adding a ``from_dlpack`` function, which takes as input an object + supporting ``__dlpack__``, and returns an ``ndarray``. + +DLPack is currently a ~200 LoC header, and is meant to be included directly, so +no external dependency is needed. Implementation should be straightforward. + + +Syntax for device support +~~~~~~~~~~~~~~~~~~~~~~~~~ + +NumPy itself is CPU-only, so it clearly doesn't have a need for device support. +However, other libraries (e.g. TensorFlow, PyTorch, JAX, MXNet) support +multiple types of devices: CPU, GPU, TPU, and more exotic hardware. +To write portable code on systems with multiple devices, it's often necessary +to create new arrays on the same device as some other array, or check that +two arrays live on the same device. Hence syntax for that is needed. + +The array object will have a ``.device`` attribute which enables comparing +devices of different arrays (they only should compare equal if both arrays are +from the same library and it's the same hardware device). Furthermore, +``device=`` keywords in array creation functions are needed. For example:: + + def empty(shape: Union[int, Tuple[int, ...]], /, *, + dtype: Optional[dtype] = None, + device: Optional[device] = None) -> array: + """ + Array API compatible wrapper for :py:func:`np.empty `. + """ + return np.empty(shape, dtype=dtype, device=device) + +The implementation for NumPy may be as simple as setting the device attribute to +the string ``'cpu'`` and raising an exception if array creation functions +encounter any other value. + + +Dtypes and casting rules +~~~~~~~~~~~~~~~~~~~~~~~~ + +The supported dtypes in this namespace are boolean, 8/16/32/64-bit signed and +unsigned integer, and 32/64-bit floating-point dtypes. These will be added to +the namespace as dtype literals with the expected names (e.g., ``bool``, +``uint16``, ``float64``). + +The most obvious omissions are the complex dtypes. The rationale for the lack +of complex support in the first version of the array API standard is that several +libraries (PyTorch, MXNet) are still in the process of adding support for +complex dtypes. The next version of the standard is expected to include ``complex64`` +and ``complex128`` (see `this issue `__ +for more details). + +Specifying dtypes to functions, e.g. via the ``dtype=`` keyword, is expected +to only use the dtype literals. Format strings, Python builtin dtypes, or +string representations of the dtype literals are not accepted - this will +improve readability and portability of code at little cost. + +Casting rules are only defined between different dtypes of the same kind. The +rationale for this is that mixed-kind (e.g., integer to floating-point) +casting behavior differs between libraries. NumPy's mixed-kind casting +behavior doesn't need to be changed or restricted, it only needs to be +documented that if users use mixed-kind casting, their code may not be +portable. + +.. image:: _static/nep-0047-casting-rules-lattice.png + +*Type promotion diagram. Promotion between any two types is given by their +join on this lattice. Only the types of participating arrays matter, not +their values. Dashed lines indicate that behaviour for Python scalars is +undefined on overflow. Boolean, integer and floating-point dtypes are not +connected, indicating mixed-kind promotion is undefined.* + +The most important difference between the casting rules in NumPy and in the +array API standard is how scalars and 0-dimensional arrays are handled. In +the standard, array scalars do not exist and 0-dimensional arrays follow the +same casting rules as higher-dimensional arrays. + +See the `Type Promotion Rules section of the array API standard `__ +for more details. + +.. note:: + + It is not clear what the best way is to support the different casting rules + for 0-dimensional arrays and no value-based casting. One option may be to + implement this second set of casting rules, keep them private, mark the + array API functions with a private attribute that says they adhere to + these different rules, and let the casting machinery check whether for + that attribute. + + This needs discussion. + + +Indexing +~~~~~~~~ + +An indexing expression that would return a scalar with ``ndarray``, e.g. +``arr_2d[0, 0]``, will return a 0-D array with the new array object. There are +several reasons for that: array scalars are largely considered a design mistake +which no other array library copied; it works better for non-CPU libraries +(typically arrays can live on the device, scalars live on the host); and it's +simply a consistent design. To get a Python scalar out of a 0-D array, one can +simply use the builtin for the type, e.g. ``float(arr_0d)``. + +The other `indexing modes in the standard `__ +do work largely the same as they do for ``numpy.ndarray``. One noteworthy +difference is that clipping in slice indexing (e.g., ``a[:n]`` where ``n`` is +larger than the size of the first axis) is unspecified behaviour, because +that kind of check can be expensive on accelerators. + +The lack of advanced indexing, and boolean indexing being limited to a single +n-D boolean array, is due to those indexing modes not being suitable for all +types of arrays or JIT compilation. Their absence does not seem to be +problematic; if a user or library author wants to use them, they can do so +through zero-copy conversion to ``numpy.ndarray``. This will signal correctly +to whomever reads the code that it is then NumPy-specific rather than portable +to all conforming array types. + + + +The array object +~~~~~~~~~~~~~~~~ + +The array object in the standard does not have methods other than dunder +methods. The rationale for that is that not all array libraries have methods +on their array object (e.g., TensorFlow does not). It also provides only a +single way of doing something, rather than have functions and methods that +are effectively duplicate. + +Mixing operations that may produce views (e.g., indexing, ``nonzero``) +in combination with mutation (e.g., item or slice assignment) is +`explicitly documented in the standard to not be supported `__. +This cannot easily be prohibited in the array object itself; instead this will +be guidance to the user via documentation. + +The standard current does not prescribe a name for the array object itself. +We propose to simply name it ``ndarray``. This is the most obvious name, and +because of the separate namespace should not clash with ``numpy.ndarray``. + + +Implementation +-------------- + +.. note:: + + This section needs a lot more detail, which will gradually be added when + the implementation progresses. + +A prototype of the ``array_api`` namespace can be found in +https://github.com/data-apis/numpy/tree/array-api/numpy/_array_api. +The docstring in its ``__init__.py`` has notes on completeness of the +implementation. The code for the wrapper functions also contains ``# Note:`` +comments everywhere there is a difference with the NumPy API. +Two important parts that are not implemented yet are the new array object and +DLPack support. Functions may need changes to ensure the changed casting rules +are respected. + +The array object +~~~~~~~~~~~~~~~~ + +Regarding the array object implementation, we plan to start with a regular +Python class that wraps a ``numpy.ndarray`` instance. Attributes and methods +can forward to that wrapped instance, applying input validation and +implementing changed behaviour as needed. + +The casting rules are probably the most challenging part. The in-progress +dtype system refactor (NEPs 40-43) should make implementing the correct casting +behaviour easier - it is already moving away from value-based casting for +example. + + +The dtype objects +~~~~~~~~~~~~~~~~~ + +We must be able to compare dtypes for equality, and expressions like these must +be possible:: + + np.array_api.some_func(..., dtype=x.dtype) + +The above implies it would be nice to have ``np.array_api.float32 == +np.array_api.ndarray(...).dtype``. + +Dtypes should not be assumed to have a class hierarchy by users, however we are +free to implement it with a class hierarchy if that's convenient. We considered +the following options to implement dtype objects: + +1. Alias dtypes to those in the main namespace. E.g., ``np.array_api.float32 = + np.float32``. +2. Make the dtypes instances of ``np.dtype``. E.g., ``np.array_api.float32 = + np.dtype(np.float32)``. +3. Create new singleton classes with only the required methods/attributes + (currently just ``__eq__``). + +It seems like (2) would be easiest from the perspective of interacting with +functions outside the main namespace. And (3) would adhere best to the +standard. + +TBD: the standard does not yet have a good way to inspect properties of a +dtype, to ask questions like "is this an integer dtype?". Perhaps this is easy +enough to do for users, like so:: + + def _get_dtype(dt_or_arr): + return dt_or_arr.dtype if hasattr(dt_or_arr, 'dtype') else dt_or_arr + + def is_floating(dtype_or_array): + dtype = _get_dtype(dtype_or_array) + return dtype in (float32, float64) + + def is_integer(dtype_or_array): + dtype = _get_dtype(dtype_or_array) + return dtype in (uint8, uint16, uint32, uint64, int8, int16, int32, int64) + +However it could make sense to add to the standard. Note that NumPy itself +currently does not have a great for asking such questions, see +`gh-17325 `__. + + +Feedback from downstream library authors +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TODO - this can only be done after trying out some use cases + +Leo Fang (CuPy): *"My impression is for CuPy we could simply take this new array object and s/numpy/cupy"* + + +Related Work +------------ + +:ref:`NEP37` contains a similar mechanism to retrieve a NumPy-like namespace. +In fact, NEP 37 inspired the (slightly simpler) mechanism in the array API +standard. + +Other libraries have adopted large parts of NumPy's API, made changes where +necessary, and documented deviations. See for example +`the jax.numpy documentation `__ +and `Difference between CuPy and NumPy `__. +The array API standard was constructed with the help of such comparisons, only +between many array libraries rather than only between NumPy and one other library. + + +Alternatives +------------ + + + + +Appendix - a possible ``get_namespace`` implementation +------------------------------------------------------ + +The ``get_namespace`` function mentioned in the +:ref:`adoption-application-code` section can be implemented like:: + + def get_namespace(*xs): + # `xs` contains one or more arrays, or possibly Python scalars (accepting + # those is a matter of taste, but doesn't seem unreasonable). + namespaces = { + x.__array_namespace__() if hasattr(x, '__array_namespace__') else None for x in xs if not isinstance(x, (bool, int, float, complex)) + } + + if not namespaces: + # one could special-case np.ndarray above or use np.asarray here if + # older numpy versions need to be supported. + raise ValueError("Unrecognized array input") + + if len(namespaces) != 1: + raise ValueError(f"Multiple namespaces for array inputs: {namespaces}") + + xp, = namespaces + if xp is None: + raise ValueError("The input is not a supported array type") + + return xp + + +Discussion +---------- + +- `First discussion on the mailing list about the array API standard `__ + + +References and Footnotes +------------------------ + +.. _Python array API standard: https://data-apis.github.io/array-api/latest + +.. _Consortium for Python Data API Standards: https://data-apis.org/ + +.. _DLPack: https://github.com/dmlc/dlpack + +.. [1] https://data-apis.org/blog/announcing_the_consortium/ + +.. [2] https://data-apis.org/blog/array_api_standard_release/ + + +Copyright +--------- + +This document has been placed in the public domain. [1]_ From cfeda59146ad81c29d9f23d1e8dc572cd18236d4 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Sun, 21 Feb 2021 10:46:29 -0800 Subject: [PATCH 0610/1270] Remove LinAlg heading, mv link to further reading. * Remove reference to algebra in the intro text. * Link to linalg tutorial in Further Reading * Remove linalg heading + WIP from quickstart. --- doc/source/user/quickstart.rst | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 77569f6bb02a..b50d8a5ba888 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -20,8 +20,7 @@ in addition to NumPy. **Learner profile** -This is a quick overview of -algebra and arrays in NumPy. It demonstrates how n-dimensional +This is a quick overview of arrays in NumPy. It demonstrates how n-dimensional (:math:`n>=2`) arrays are represented and can be manipulated. In particular, if you don't know how to apply common functions to n-dimensional arrays (without using for-loops), or if you want to understand axis and shape properties for @@ -1391,11 +1390,6 @@ Indexing with strings See :ref:`structured_arrays`. -Linear Algebra -============== - -See :doc:`tutorial-svd` - Tricks and Tips =============== @@ -1484,3 +1478,4 @@ Further reading - `SciPy Tutorial `__ - `SciPy Lecture Notes `__ - A `matlab, R, IDL, NumPy/SciPy dictionary `__ +- :doc:`tutorial-svd` From 5d864e872060c37308c2fd3ca75508838161061e Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Sat, 20 Feb 2021 12:53:47 -0500 Subject: [PATCH 0611/1270] BUG: Segfault in nditer buffer dealloc for Object arrays --- numpy/core/src/multiarray/einsum.c.src | 4 +++- numpy/core/src/multiarray/nditer_api.c | 4 +++- numpy/core/src/umath/ufunc_object.c | 22 ++++++++++++++++------ numpy/core/tests/test_nditer.py | 13 +++++++++++++ 4 files changed, 35 insertions(+), 8 deletions(-) diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src index 6ad375f670a5..85806fab3612 100644 --- a/numpy/core/src/multiarray/einsum.c.src +++ b/numpy/core/src/multiarray/einsum.c.src @@ -1100,6 +1100,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, char **dataptr; npy_intp *stride; npy_intp *countptr; + int needs_api; NPY_BEGIN_THREADS_DEF; iternext = NpyIter_GetIterNext(iter, NULL); @@ -1110,12 +1111,13 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, dataptr = NpyIter_GetDataPtrArray(iter); stride = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); NPY_BEGIN_THREADS_NDITER(iter); NPY_EINSUM_DBG_PRINT("Einsum loop\n"); do { sop(nop, dataptr, stride, *countptr); - } while(iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; /* If the API was needed, it may have thrown an error */ diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index fceb58f33218..3403ce98b7d4 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -2632,6 +2632,7 @@ npyiter_clear_buffers(NpyIter *iter) /* Cleanup any buffers with references */ char **buffers = NBF_BUFFERS(bufferdata); PyArray_Descr **dtypes = NIT_DTYPES(iter); + npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); for (int iop = 0; iop < nop; ++iop, ++buffers) { /* * We may want to find a better way to do this, on the other hand, @@ -2640,7 +2641,8 @@ npyiter_clear_buffers(NpyIter *iter) * a well defined state (either NULL or owning the reference). * Only we implement cleanup */ - if (!PyDataType_REFCHK(dtypes[iop])) { + if (!PyDataType_REFCHK(dtypes[iop]) || + !(op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER)) { continue; } if (*buffers == 0) { diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 1a035eb619e2..cd6e27a35c6b 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -1431,6 +1431,7 @@ iterator_loop(PyUFuncObject *ufunc, char **dataptr; npy_intp *stride; npy_intp *count_ptr; + int needs_api; PyArrayObject **op_it; npy_uint32 iter_flags; @@ -1525,6 +1526,7 @@ iterator_loop(PyUFuncObject *ufunc, dataptr = NpyIter_GetDataPtrArray(iter); stride = NpyIter_GetInnerStrideArray(iter); count_ptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); NPY_BEGIN_THREADS_NDITER(iter); @@ -1532,7 +1534,7 @@ iterator_loop(PyUFuncObject *ufunc, do { NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*count_ptr); innerloop(dataptr, count_ptr, stride, innerloopdata); - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; } @@ -1859,6 +1861,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc, dataptr = NpyIter_GetDataPtrArray(iter); strides = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); NPY_BEGIN_THREADS_NDITER(iter); @@ -1869,7 +1872,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc, innerloop(dataptr, strides, dataptr[nop], strides[nop], *countptr, innerloopdata); - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; @@ -2973,6 +2976,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, } dataptr = NpyIter_GetDataPtrArray(iter); count_ptr = NpyIter_GetInnerLoopSizePtr(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { NPY_BEGIN_THREADS_THRESHOLDED(total_problem_size); @@ -2980,7 +2984,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, do { inner_dimensions[0] = *count_ptr; innerloop(dataptr, inner_dimensions, inner_strides, innerloopdata); - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { NPY_END_THREADS; @@ -3520,6 +3524,10 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, innerloop(dataptrs_copy, &count, strides_copy, innerloopdata); + if (needs_api && PyErr_Occurred()) { + break; + } + /* Jump to the faster loop when skipping is done */ if (skip_first_count == 0) { if (iternext(iter)) { @@ -3569,7 +3577,7 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, n = 1; } } - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); finish_loop: NPY_END_THREADS; @@ -3882,6 +3890,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, goto fail; } dataptr = NpyIter_GetDataPtrArray(iter); + needs_api = NpyIter_IterationNeedsAPI(iter); /* Execute the loop with just the outer iterator */ @@ -3932,7 +3941,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, innerloop(dataptr_copy, &count_m1, stride_copy, innerloopdata); } - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; } @@ -4263,6 +4272,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, npy_intp stride0_ind = PyArray_STRIDE(op[0], axis); int itemsize = op_dtypes[0]->elsize; + int needs_api = NpyIter_IterationNeedsAPI(iter); /* Get the variables needed for the loop */ iternext = NpyIter_GetIterNext(iter, NULL); @@ -4327,7 +4337,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, stride_copy, innerloopdata); } } - } while (iternext(iter)); + } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); NPY_END_THREADS; } diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 411095199c3c..f34547f9c35c 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2869,6 +2869,19 @@ def test_0d_iter(): assert_equal(vals['c'], [[(0.5)]*3]*2) assert_equal(vals['d'], 0.5) +def test_object_iter_cleanup(): + # see gh-18450 + # object arrays can raise a python exception in ufunc inner loops using + # nditer, which should cause iteration to stop & cleanup. There were bugs + # in the nditer cleanup when decref'ing object arrays. + # This test would trigger valgrind "uninitialized read" before the bugfix. + assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None) + + # this more explicit code also triggers the invalid access + arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str) + oarr = arr.astype(object) + oarr[:, -1] = None + assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1])) def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due From cdd9177bd6916b69e9e2ad48a1015ee0fa6a29fd Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 22 Feb 2021 07:14:06 +0000 Subject: [PATCH 0612/1270] MAINT: Bump mypy from 0.800 to 0.812 Bumps [mypy](https://github.com/python/mypy) from 0.800 to 0.812. - [Release notes](https://github.com/python/mypy/releases) - [Commits](https://github.com/python/mypy/compare/v0.800...v0.812) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 5066b37dda61..5a03bd2f6063 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -11,5 +11,5 @@ cffi # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # - There is no point in installing typing_extensions without mypy -mypy==0.800; platform_python_implementation != "PyPy" +mypy==0.812; platform_python_implementation != "PyPy" typing_extensions==3.7.4.3; platform_python_implementation != "PyPy" From f5439e7766be7e6145a0c34ad6ed212a20ff4981 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 22 Feb 2021 07:14:11 +0000 Subject: [PATCH 0613/1270] MAINT: Bump sphinx from 3.5.0 to 3.5.1 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 3.5.0 to 3.5.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/3.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v3.5.0...v3.5.1) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 3d8db1734445..a34a894180a0 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==3.5.0 +sphinx==3.5.1 numpydoc==1.1.0 ipython scipy From f85814f806effd39a7b3d74d68ed988371c2000e Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 22 Feb 2021 07:14:50 +0000 Subject: [PATCH 0614/1270] MAINT: Bump cython from 0.29.21 to 0.29.22 Bumps [cython](https://github.com/cython/cython) from 0.29.21 to 0.29.22. - [Release notes](https://github.com/cython/cython/releases) - [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) - [Commits](https://github.com/cython/cython/compare/0.29.21...0.29.22) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 5066b37dda61..19e9e7e62fdd 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,4 +1,4 @@ -cython==0.29.21 +cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 hypothesis==6.2.0 From 86039e1dc03e893e48802b54e35a0f45dc326b47 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 22 Feb 2021 07:15:39 +0000 Subject: [PATCH 0615/1270] MAINT: Bump hypothesis from 6.2.0 to 6.3.0 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.2.0 to 6.3.0. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.2.0...hypothesis-python-6.3.0) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 5066b37dda61..a4fe00dd0d7b 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.2.0 +hypothesis==6.3.0 pytest==6.2.2 pytz==2021.1 pytest-cov==2.11.1 From aeae93b6c0042f6ed8f45205545985cc194f84f3 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Sun, 3 Jan 2021 11:11:15 -0500 Subject: [PATCH 0616/1270] API: make piecewise subclass safe using use zeros_like. Subclass input of piecewise was already respected, so it seems more logical to ensure the output is the same subclass (possibly just an oversight that it was not done before). --- doc/release/upcoming_changes/18110.change.rst | 5 +++++ numpy/lib/function_base.py | 4 ++-- numpy/lib/tests/test_function_base.py | 8 ++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/18110.change.rst diff --git a/doc/release/upcoming_changes/18110.change.rst b/doc/release/upcoming_changes/18110.change.rst new file mode 100644 index 000000000000..7dbf8e5b79be --- /dev/null +++ b/doc/release/upcoming_changes/18110.change.rst @@ -0,0 +1,5 @@ +`numpy.piecewise` output class now matches the input class +---------------------------------------------------------- +When `numpy.ndarray` subclasses are used on input to `numpy.piecewise`, +they are passed on to the functions. The output will now be of the +same subclass as well. diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index d33a0fa7de05..c6db42ce4905 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -8,7 +8,7 @@ import numpy.core.numeric as _nx from numpy.core import transpose from numpy.core.numeric import ( - ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, + ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar, absolute ) @@ -606,7 +606,7 @@ def piecewise(x, condlist, funclist, *args, **kw): .format(n, n, n+1) ) - y = zeros(x.shape, x.dtype) + y = zeros_like(x) for cond, func in zip(condlist, funclist): if not isinstance(func, collections.abc.Callable): y[cond] = func diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4c7c0480c106..afcb81effd95 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2399,6 +2399,14 @@ def test_multidimensional_extrafunc(self): assert_array_equal(y, np.array([[-1., -1., -1.], [3., 3., 1.]])) + def test_subclasses(self): + class subclass(np.ndarray): + pass + x = np.arange(5.).view(subclass) + r = piecewise(x, [x<2., x>=4], [-1., 1., 0.]) + assert_equal(type(r), subclass) + assert_equal(r, [-1., -1., 0., 0., 1.]) + class TestBincount: From 2d975a771b88bcb4d049bbb3ea5f917694ec3524 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 17 Feb 2021 16:56:32 -0600 Subject: [PATCH 0617/1270] BUG: Correct shuffling of objects in 1-d array likes While introducing the buffer fixed the in-place problem years ago, running valgrind (and masked arrays) pointed out to me that without the additional `...` NumPy will unpack and repack objects leading to slightly incorrect results. MAINT: Warn about shuffle bug instead of fixing it in old random API --- numpy/random/_generator.pyx | 6 +++--- numpy/random/mtrand.pyx | 11 +++++++++++ numpy/random/tests/test_random.py | 25 +++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 8d440d4f7abc..9b55f4facd5f 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4437,9 +4437,9 @@ cdef class Generator: if i == j: # i == j is not needed and memcpy is undefined. continue - buf[...] = x[j] - x[j] = x[i] - x[i] = buf + buf[...] = x[j, ...] + x[j, ...] = x[i, ...] + x[i, ...] = buf else: # Untyped path. if not isinstance(x, Sequence): diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index a7436aa39748..1195a0abbc5b 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4462,6 +4462,17 @@ cdef class RandomState: # shuffling is a no-op return + if x.ndim == 1 and x.dtype.type is np.object_: + warnings.warn( + "Shuffling a one dimensional array subclass containing " + "objects gives incorrect results for most array " + "subclasses. " + "Please us the new random number API instead: " + "https://numpy.org/doc/stable/reference/random/index.html\n" + "The new API fixes this issue. This version will not " + "be fixed due to stability guarantees of the API.", + UserWarning, stacklevel=1) # Cython adds no stacklevel + buf = np.empty_like(x[0, ...]) with self.lock: for i in reversed(range(1, n)): diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index a0c72b419e52..2b3b65c19d3c 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -522,6 +522,31 @@ def test_shuffle_untyped_warning(self, random): random.shuffle(values) assert "test_random" in rec[0].filename + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + @pytest.mark.parametrize("use_array_like", [True, False]) + def test_shuffle_no_object_unpacking(self, random, use_array_like): + class MyArr(np.ndarray): + pass + + items = [None, np.array([3]), np.float64(3), np.array(10), np.float64(7)] + arr = np.array(items, dtype=object) + item_ids = {id(i) for i in items} + if use_array_like: + arr = arr.view(MyArr) + + # The array was created fine, and did not modify any objects: + assert all(id(i) in item_ids for i in arr) + + if use_array_like and not isinstance(random, np.random.Generator): + # The old API gives incorrect results, but warns about it. + with pytest.warns(UserWarning, + match="Shuffling a one dimensional array.*"): + random.shuffle(arr) + else: + random.shuffle(arr) + assert all(id(i) in item_ids for i in arr) + def test_shuffle_memoryview(self): # gh-18273 # allow graceful handling of memoryviews From 580891011523124356fae536ceedd7f8ceda8a26 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 21 Feb 2021 20:59:21 +0100 Subject: [PATCH 0618/1270] DOC: update NEP status for accepted/finished NEPs [ci skip] --- doc/neps/nep-0014-dropping-python2.7-proposal.rst | 2 +- doc/neps/nep-0023-backwards-compatibility.rst | 2 +- doc/neps/nep-0032-remove-financial-functions.rst | 2 +- .../nep-0035-array-creation-dispatch-with-array-function.rst | 2 ++ doc/neps/nep-0040-legacy-datatype-impl.rst | 2 +- 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/neps/nep-0014-dropping-python2.7-proposal.rst b/doc/neps/nep-0014-dropping-python2.7-proposal.rst index dfc09d2543c2..e14a173e2032 100644 --- a/doc/neps/nep-0014-dropping-python2.7-proposal.rst +++ b/doc/neps/nep-0014-dropping-python2.7-proposal.rst @@ -4,7 +4,7 @@ NEP 14 — Plan for dropping Python 2.7 support ============================================= -:Status: Accepted +:Status: Final :Resolution: https://mail.python.org/pipermail/numpy-discussion/2017-November/077419.html The Python core team plans to stop supporting Python 2 in 2020. The NumPy diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index 015df8964ea5..bdf78b05ae62 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -5,7 +5,7 @@ NEP 23 — Backwards compatibility and deprecation policy ======================================================= :Author: Ralf Gommers -:Status: Accepted +:Status: Final :Type: Process :Created: 2018-07-14 :Resolution: https://mail.python.org/pipermail/numpy-discussion/2021-January/081423.html diff --git a/doc/neps/nep-0032-remove-financial-functions.rst b/doc/neps/nep-0032-remove-financial-functions.rst index bf98a7467d2b..b57ae943fa96 100644 --- a/doc/neps/nep-0032-remove-financial-functions.rst +++ b/doc/neps/nep-0032-remove-financial-functions.rst @@ -5,7 +5,7 @@ NEP 32 — Remove the financial functions from NumPy ================================================== :Author: Warren Weckesser -:Status: Accepted +:Status: Final :Type: Standards Track :Created: 2019-08-30 :Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst index e0ca59316547..427d91b7d4d6 100644 --- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst +++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst @@ -424,6 +424,8 @@ Discussion ---------- - `Further discussion on implementation and the NEP's content `_ +- `Decision to release an experimental implementation in NumPy 1.20.0 `__ + References ---------- diff --git a/doc/neps/nep-0040-legacy-datatype-impl.rst b/doc/neps/nep-0040-legacy-datatype-impl.rst index 39889109de40..7ea7f6df35e9 100644 --- a/doc/neps/nep-0040-legacy-datatype-impl.rst +++ b/doc/neps/nep-0040-legacy-datatype-impl.rst @@ -6,7 +6,7 @@ NEP 40 — Legacy Datatype Implementation in NumPy :title: Legacy Datatype Implementation in NumPy :Author: Sebastian Berg -:Status: Draft +:Status: Final :Type: Informational :Created: 2019-07-17 From 691726475716c573863de051f17d3ed47500ad24 Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 23 Feb 2021 16:17:22 +0000 Subject: [PATCH 0619/1270] added type annotations to eye --- numpy/__init__.pyi | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1c52c7285cd1..7eebe3a8766c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -408,7 +408,15 @@ dstack: Any ediff1d: Any expand_dims: Any extract: Any -eye: Any +def eye( + N: int, + M: int = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: str = ..., + *, + like: ArrayLike = ... +) -> ndarray: ... fill_diagonal: Any finfo: Any flip: Any From 1f972db7df7443c2dcff50eb6a110f297f215fb4 Mon Sep 17 00:00:00 2001 From: Matt-Ord <55235095+Matt-Ord@users.noreply.github.com> Date: Tue, 23 Feb 2021 16:54:28 +0000 Subject: [PATCH 0620/1270] Apply suggestions from code review Fixed annotations of eye() Co-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com> --- numpy/__init__.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7eebe3a8766c..148a63583211 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -410,13 +410,13 @@ expand_dims: Any extract: Any def eye( N: int, - M: int = ..., + M: Optional[int] = ..., k: int = ..., dtype: DTypeLike = ..., - order: str = ..., + order: _OrderCF = ..., *, - like: ArrayLike = ... -) -> ndarray: ... + like: Optional[ArrayLike] = ... +) -> ndarray[Any, Any]: ... fill_diagonal: Any finfo: Any flip: Any From 9af015e0de86e374ededb3917fc2dd24bfe57a2c Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 19:20:26 -0800 Subject: [PATCH 0621/1270] MAINT: Remove suspicious type casting --- numpy/core/src/multiarray/dtypemeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index b2f36d794830..6aab94c3b25c 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -162,7 +162,7 @@ void_discover_descr_from_pyobject( } if (PyBytes_Check(obj)) { PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_VOID); - Py_ssize_t itemsize = (int)PyBytes_Size(obj); + Py_ssize_t itemsize = PyBytes_Size(obj); if (itemsize > NPY_MAX_INT) { PyErr_SetString(PyExc_TypeError, "byte-like to large to store inside array."); From a343d3beaf6c423904a7af917a9530f1c830558c Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 19:37:14 -0800 Subject: [PATCH 0622/1270] MAINT: remove nonsensical comparison of pointer < 0 --- numpy/core/src/multiarray/ctors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index c98d2751256c..52f21b61f8b7 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -552,7 +552,7 @@ PyArray_AssignFromCache_Recursive( else { PyArrayObject *view; view = (PyArrayObject *)array_item_asarray(self, i); - if (view < 0) { + if (view == NULL) { goto fail; } if (PyArray_AssignFromCache_Recursive(view, ndim, cache) < 0) { From 595824252f3d87bc38b4d9b5839eb8fdbf82eaa5 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 19:44:59 -0800 Subject: [PATCH 0623/1270] MAINT: verify pointer against NULL before using it --- numpy/core/src/multiarray/dtype_transfer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index ae3834e156dc..afa9c12a2f0c 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -1331,10 +1331,10 @@ get_datetime_to_unicode_transfer_function(int aligned, /* Get an ASCII string data type, adapted to match the UNICODE one */ str_dtype = PyArray_DescrNewFromType(NPY_STRING); - str_dtype->elsize = dst_dtype->elsize / 4; if (str_dtype == NULL) { return NPY_FAIL; } + str_dtype->elsize = dst_dtype->elsize / 4; /* Get the copy/swap operation to dst */ if (PyArray_GetDTypeCopySwapFn(aligned, From e97fba2e84ff2f2070c7abecbb8070a95df9f69b Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 20:08:28 -0800 Subject: [PATCH 0624/1270] BUG: check if PyArray_malloc succeeded --- numpy/core/src/umath/ufunc_object.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index cd6e27a35c6b..269b2e81ade5 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5219,7 +5219,11 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, if (cmp == 0 && current != NULL && current->arg_dtypes == NULL) { current->arg_dtypes = PyArray_malloc(ufunc->nargs * sizeof(PyArray_Descr*)); - if (arg_dtypes != NULL) { + if (current->arg_dtypes == NULL) { + PyErr_NoMemory(); + result = -1; + } + else if (arg_dtypes != NULL) { for (i = 0; i < ufunc->nargs; i++) { current->arg_dtypes[i] = arg_dtypes[i]; Py_INCREF(current->arg_dtypes[i]); From 769529430a2928551175cf4c4de15be630444116 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Tue, 23 Feb 2021 20:45:16 -0800 Subject: [PATCH 0625/1270] MAINT: cast Py_ssize_t to int --- numpy/core/src/multiarray/dtypemeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index 6aab94c3b25c..ddaf11042c62 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -167,7 +167,7 @@ void_discover_descr_from_pyobject( PyErr_SetString(PyExc_TypeError, "byte-like to large to store inside array."); } - descr->elsize = itemsize; + descr->elsize = (int)itemsize; return descr; } PyErr_Format(PyExc_TypeError, From cbf5c130ca29e9605a594fe7fb8ac0f18fd7f575 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Wed, 24 Feb 2021 08:39:08 +0000 Subject: [PATCH 0626/1270] DOC: Correct poisson docstring closes #18314 --- numpy/random/_generator.pyx | 5 +++-- numpy/random/mtrand.pyx | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 1912f8bed5d6..a84dba4e4e67 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3025,8 +3025,9 @@ cdef class Generator: Parameters ---------- lam : float or array_like of floats - Expectation of interval, must be >= 0. A sequence of expectation - intervals must be broadcastable over the requested size. + Expected number of events occurring in a fixed-time interval, + must be >= 0. A sequence must be broadcastable over the requested + size. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 1fd68111ead5..f8f0ab86808d 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -3524,8 +3524,9 @@ cdef class RandomState: Parameters ---------- lam : float or array_like of floats - Expectation of interval, must be >= 0. A sequence of expectation - intervals must be broadcastable over the requested size. + Expected number of events occurring in a fixed-time interval, + must be >= 0. A sequence must be broadcastable over the requested + size. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), From 789fee5d507c7f21fc143cc1c0599d71176e6537 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Wed, 24 Feb 2021 08:49:37 +0000 Subject: [PATCH 0627/1270] DOC: Explaing differences in code paths in choice Add explanation for different results when using user-specified probabilities even if uniform closes #18172 --- numpy/random/_generator.pyx | 6 ++++++ numpy/random/mtrand.pyx | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index a84dba4e4e67..f25f16a8a155 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -644,6 +644,12 @@ cdef class Generator: -------- integers, shuffle, permutation + Notes + ----- + Setting user-specified probabilities through ``p`` uses a more general but less + efficient sampler than the default. The general sampler produces a different sample + than the optimized sampler even if each element of ``p`` is 1 / len(a). + Examples -------- Generate a uniform random sample from np.arange(5) of size 3: diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index f8f0ab86808d..a60f078afa6d 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -852,6 +852,10 @@ cdef class RandomState: Notes ----- + Setting user-specified probabilities through ``p`` uses a more general but less + efficient sampler than the default. The general sampler produces a different sample + than the optimized sampler even if each element of ``p`` is 1 / len(a). + Sampling random rows from a 2-D array is not possible with this function, but is possible with `Generator.choice` through its ``axis`` keyword. From c2207b9f7f956d09327606dc40dcdb28bced4801 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Wed, 17 Feb 2021 13:34:02 +0000 Subject: [PATCH 0628/1270] ENH: Add typing for RandomState Add typing for RandomState Small fixes to Generator's typing --- numpy/random/__init__.pyi | 105 ++++--- numpy/random/_generator.pyi | 10 +- numpy/random/mtrand.pyi | 551 ++++++++++++++++++++++++++++++++++++ 3 files changed, 605 insertions(+), 61 deletions(-) create mode 100644 numpy/random/mtrand.pyi diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index b99c002aef52..2dd41933c0eb 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import List from numpy.random._generator import Generator as Generator from numpy.random._generator import default_rng as default_rng @@ -8,57 +8,56 @@ from numpy.random._philox import Philox as Philox from numpy.random._sfc64 import SFC64 as SFC64 from numpy.random.bit_generator import BitGenerator as BitGenerator from numpy.random.bit_generator import SeedSequence as SeedSequence +from numpy.random.mtrand import RandomState as RandomState +from numpy.random.mtrand import beta as beta +from numpy.random.mtrand import binomial as binomial +from numpy.random.mtrand import bytes as bytes +from numpy.random.mtrand import chisquare as chisquare +from numpy.random.mtrand import choice as choice +from numpy.random.mtrand import dirichlet as dirichlet +from numpy.random.mtrand import exponential as exponential +from numpy.random.mtrand import f as f +from numpy.random.mtrand import gamma as gamma +from numpy.random.mtrand import geometric as geometric +from numpy.random.mtrand import get_state as get_state +from numpy.random.mtrand import gumbel as gumbel +from numpy.random.mtrand import hypergeometric as hypergeometric +from numpy.random.mtrand import laplace as laplace +from numpy.random.mtrand import logistic as logistic +from numpy.random.mtrand import lognormal as lognormal +from numpy.random.mtrand import logseries as logseries +from numpy.random.mtrand import multinomial as multinomial +from numpy.random.mtrand import multivariate_normal as multivariate_normal +from numpy.random.mtrand import negative_binomial as negative_binomial +from numpy.random.mtrand import noncentral_chisquare as noncentral_chisquare +from numpy.random.mtrand import noncentral_f as noncentral_f +from numpy.random.mtrand import normal as normal +from numpy.random.mtrand import pareto as pareto +from numpy.random.mtrand import permutation as permutation +from numpy.random.mtrand import poisson as poisson +from numpy.random.mtrand import power as power +from numpy.random.mtrand import rand as rand +from numpy.random.mtrand import randint as randint +from numpy.random.mtrand import randn as randn +from numpy.random.mtrand import random as random +from numpy.random.mtrand import random_integers as random_integers +from numpy.random.mtrand import random_sample as random_sample +from numpy.random.mtrand import ranf as ranf +from numpy.random.mtrand import rayleigh as rayleigh +from numpy.random.mtrand import sample as sample +from numpy.random.mtrand import seed as seed +from numpy.random.mtrand import set_state as set_state +from numpy.random.mtrand import shuffle as shuffle +from numpy.random.mtrand import standard_cauchy as standard_cauchy +from numpy.random.mtrand import standard_exponential as standard_exponential +from numpy.random.mtrand import standard_gamma as standard_gamma +from numpy.random.mtrand import standard_normal as standard_normal +from numpy.random.mtrand import standard_t as standard_t +from numpy.random.mtrand import triangular as triangular +from numpy.random.mtrand import uniform as uniform +from numpy.random.mtrand import vonmises as vonmises +from numpy.random.mtrand import wald as wald +from numpy.random.mtrand import weibull as weibull +from numpy.random.mtrand import zipf as zipf __all__: List[str] - -beta: Any -binomial: Any -bytes: Any -chisquare: Any -choice: Any -dirichlet: Any -exponential: Any -f: Any -gamma: Any -geometric: Any -get_state: Any -gumbel: Any -hypergeometric: Any -laplace: Any -logistic: Any -lognormal: Any -logseries: Any -multinomial: Any -multivariate_normal: Any -negative_binomial: Any -noncentral_chisquare: Any -noncentral_f: Any -normal: Any -pareto: Any -permutation: Any -poisson: Any -power: Any -rand: Any -randint: Any -randn: Any -random: Any -random_integers: Any -random_sample: Any -ranf: Any -rayleigh: Any -sample: Any -seed: Any -set_state: Any -shuffle: Any -standard_cauchy: Any -standard_exponential: Any -standard_gamma: Any -standard_normal: Any -standard_t: Any -triangular: Any -uniform: Any -vonmises: Any -wald: Any -weibull: Any -zipf: Any -RandomState: Any diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index aadc4d0f8e68..1396c5a32201 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -3,7 +3,6 @@ from typing import Any, Callable, Dict, Literal, Optional, Sequence, Tuple, Type from numpy import ( bool_, - double, dtype, float32, float64, @@ -13,7 +12,6 @@ from numpy import ( int64, int_, ndarray, - single, uint, uint8, uint16, @@ -70,11 +68,9 @@ _DTypeLikeFloat64 = Union[ ] class Generator: - # COMPLETE def __init__(self, bit_generator: BitGenerator) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... - # Pickling support: def __getstate__(self) -> Dict[str, Any]: ... def __setstate__(self, state: Dict[str, Any]) -> None: ... def __reduce__(self) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Dict[str, Any]]: ... @@ -102,7 +98,7 @@ class Generator: @overload def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] @overload - def standard_cauchy(self, size: Optional[_ShapeLike] = ...) -> ndarray[Any, dtype[float64]]: ... + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... @overload def standard_exponential( # type: ignore[misc] self, @@ -325,7 +321,7 @@ class Generator: size: Optional[_ShapeLike] = ..., dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ..., - ) -> ndarray[Any, dtype[float64]]: ... + ) -> ndarray[Any, dtype[Union[float32, float64]]]: ... @overload def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload @@ -455,7 +451,6 @@ class Generator: right: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ..., ) -> ndarray[Any, dtype[float64]]: ... - # Complicated, discrete distributions: @overload def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload @@ -502,7 +497,6 @@ class Generator: def logseries( self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... ) -> ndarray[Any, dtype[int64]]: ... - # Multivariate distributions: def multivariate_normal( self, mean: _ArrayLikeFloat_co, diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi new file mode 100644 index 000000000000..c668e7edfa4d --- /dev/null +++ b/numpy/random/mtrand.pyi @@ -0,0 +1,551 @@ +import sys +from typing import Any, Callable, Dict, Literal, Optional, Sequence, Tuple, Type, Union, overload + +from numpy import ( + bool_, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + int_, + ndarray, + uint, + uint8, + uint16, + uint32, + uint64, +) +from numpy.random import BitGenerator, SeedSequence +from numpy.typing import ( + ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _BoolCodes, + _DoubleCodes, + _DTypeLikeBool, + _DTypeLikeInt, + _DTypeLikeUInt, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_DTypeLikeFloat32 = Union[ + dtype[float32], + _SupportsDType[dtype[float32]], + Type[float32], + _Float32Codes, + _SingleCodes, +] + +_DTypeLikeFloat64 = Union[ + dtype[float64], + _SupportsDType[dtype[float64]], + Type[float], + Type[float64], + _Float64Codes, + _DoubleCodes, +] + +class RandomState: + _bit_generator: BitGenerator + def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> Dict[str, Any]: ... + def __setstate__(self, state: Dict[str, Any]) -> None: ... + def __reduce__(self) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Dict[str, Any]]: ... + def seed(self, seed: Optional[_ArrayLikeFloat_co] = ...) -> None: ... + @overload + def get_state(self, legacy: Literal[False] = ...) -> Dict[str, Any]: ... + @overload + def get_state( + self, legacy: Literal[True] = ... + ) -> Union[Dict[str, Any], Tuple[str, ndarray[Any, dtype[uint32]], int, int, float]]: ... + def set_state( + self, state: Union[Dict[str, Any], Tuple[str, ndarray[Any, dtype[uint32]], int, int, float]] + ) -> None: ... + @overload + def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def random(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def beta( + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: _DTypeLikeBool = ..., + ) -> bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[bool_], Type[bool], Type[bool_], _BoolCodes, _SupportsDType[dtype[bool_]] + ] = ..., + ) -> ndarray[Any, dtype[bool_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] + ] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + ) -> ndarray[Any, dtype[uint]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int8], Type[int8], _Int8Codes, _SupportsDType[dtype[int8]]] = ..., + ) -> ndarray[Any, dtype[int8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int16], Type[int16], _Int16Codes, _SupportsDType[dtype[int16]]] = ..., + ) -> ndarray[Any, dtype[int16]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int32], Type[int32], _Int32Codes, _SupportsDType[dtype[int32]]] = ..., + ) -> ndarray[Any, dtype[Union[int32]]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Optional[ + Union[dtype[int64], Type[int64], _Int64Codes, _SupportsDType[dtype[int64]]] + ] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint8], Type[uint8], _UInt8Codes, _SupportsDType[dtype[uint8]]] = ..., + ) -> ndarray[Any, dtype[uint8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint16], Type[uint16], _UInt16Codes, _SupportsDType[dtype[uint16]] + ] = ..., + ) -> ndarray[Any, dtype[Union[uint16]]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint32], Type[uint32], _UInt32Codes, _SupportsDType[dtype[uint32]] + ] = ..., + ) -> ndarray[Any, dtype[uint32]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]] + ] = ..., + ) -> ndarray[Any, dtype[uint64]]: ... + def bytes(self, length: int) -> str: ... + def choice( + self, + a: ArrayLike, + size: Optional[_ShapeLike] = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> Any: ... + @overload + def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rand(self, *args: None) -> float: ... + @overload + def rand(self, *args: Sequence[int]) -> ndarray[Any, dtype[float64]]: ... + @overload + def randn(self, *args: None) -> float: ... + @overload + def randn(self, *args: Sequence[int]) -> ndarray[Any, dtype[float64]]: ... + @overload + def random_integers(self, low: int, high: Optional[int] = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_normal( # type: ignore[misc] + self, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + ) -> ndarray[Any, dtype[float64]]: ... + def multinomial( + self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + def multivariate_hypergeometric( + self, + colors: _ArrayLikeInt_co, + nsample: int, + size: Optional[_ShapeLike] = ..., + method: Literal["marginals", "count"] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + def shuffle(self, x: ArrayLike) -> Sequence[Any]: ... + @overload + def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ... + @overload + def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ... + +_rand: RandomState + +beta = _rand.beta +binomial = _rand.binomial +bytes = _rand.bytes +chisquare = _rand.chisquare +choice = _rand.choice +dirichlet = _rand.dirichlet +exponential = _rand.exponential +f = _rand.f +gamma = _rand.gamma +get_state = _rand.get_state +geometric = _rand.geometric +gumbel = _rand.gumbel +hypergeometric = _rand.hypergeometric +laplace = _rand.laplace +logistic = _rand.logistic +lognormal = _rand.lognormal +logseries = _rand.logseries +multinomial = _rand.multinomial +multivariate_normal = _rand.multivariate_normal +negative_binomial = _rand.negative_binomial +noncentral_chisquare = _rand.noncentral_chisquare +noncentral_f = _rand.noncentral_f +normal = _rand.normal +pareto = _rand.pareto +permutation = _rand.permutation +poisson = _rand.poisson +power = _rand.power +rand = _rand.rand +randint = _rand.randint +randn = _rand.randn +random = _rand.random +random_integers = _rand.random_integers +random_sample = _rand.random_sample +rayleigh = _rand.rayleigh +seed = _rand.seed +set_state = _rand.set_state +shuffle = _rand.shuffle +standard_cauchy = _rand.standard_cauchy +standard_exponential = _rand.standard_exponential +standard_gamma = _rand.standard_gamma +standard_normal = _rand.standard_normal +standard_t = _rand.standard_t +triangular = _rand.triangular +uniform = _rand.uniform +vonmises = _rand.vonmises +wald = _rand.wald +weibull = _rand.weibull +zipf = _rand.zipf +# Two legacy that are trivial wrappers around random_sample +sample = _rand.random_sample +ranf = _rand.random_sample From c21889f3c57b508c91895fa883d1e4cdc1a0193d Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 18 Feb 2021 07:21:48 +0000 Subject: [PATCH 0629/1270] MAINT: Restructure imports in pyi file --- numpy/random/__init__.pyi | 104 +++++++++++++++++++------------------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index 2dd41933c0eb..1187b7d8ecb0 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -8,56 +8,58 @@ from numpy.random._philox import Philox as Philox from numpy.random._sfc64 import SFC64 as SFC64 from numpy.random.bit_generator import BitGenerator as BitGenerator from numpy.random.bit_generator import SeedSequence as SeedSequence -from numpy.random.mtrand import RandomState as RandomState -from numpy.random.mtrand import beta as beta -from numpy.random.mtrand import binomial as binomial -from numpy.random.mtrand import bytes as bytes -from numpy.random.mtrand import chisquare as chisquare -from numpy.random.mtrand import choice as choice -from numpy.random.mtrand import dirichlet as dirichlet -from numpy.random.mtrand import exponential as exponential -from numpy.random.mtrand import f as f -from numpy.random.mtrand import gamma as gamma -from numpy.random.mtrand import geometric as geometric -from numpy.random.mtrand import get_state as get_state -from numpy.random.mtrand import gumbel as gumbel -from numpy.random.mtrand import hypergeometric as hypergeometric -from numpy.random.mtrand import laplace as laplace -from numpy.random.mtrand import logistic as logistic -from numpy.random.mtrand import lognormal as lognormal -from numpy.random.mtrand import logseries as logseries -from numpy.random.mtrand import multinomial as multinomial -from numpy.random.mtrand import multivariate_normal as multivariate_normal -from numpy.random.mtrand import negative_binomial as negative_binomial -from numpy.random.mtrand import noncentral_chisquare as noncentral_chisquare -from numpy.random.mtrand import noncentral_f as noncentral_f -from numpy.random.mtrand import normal as normal -from numpy.random.mtrand import pareto as pareto -from numpy.random.mtrand import permutation as permutation -from numpy.random.mtrand import poisson as poisson -from numpy.random.mtrand import power as power -from numpy.random.mtrand import rand as rand -from numpy.random.mtrand import randint as randint -from numpy.random.mtrand import randn as randn -from numpy.random.mtrand import random as random -from numpy.random.mtrand import random_integers as random_integers -from numpy.random.mtrand import random_sample as random_sample -from numpy.random.mtrand import ranf as ranf -from numpy.random.mtrand import rayleigh as rayleigh -from numpy.random.mtrand import sample as sample -from numpy.random.mtrand import seed as seed -from numpy.random.mtrand import set_state as set_state -from numpy.random.mtrand import shuffle as shuffle -from numpy.random.mtrand import standard_cauchy as standard_cauchy -from numpy.random.mtrand import standard_exponential as standard_exponential -from numpy.random.mtrand import standard_gamma as standard_gamma -from numpy.random.mtrand import standard_normal as standard_normal -from numpy.random.mtrand import standard_t as standard_t -from numpy.random.mtrand import triangular as triangular -from numpy.random.mtrand import uniform as uniform -from numpy.random.mtrand import vonmises as vonmises -from numpy.random.mtrand import wald as wald -from numpy.random.mtrand import weibull as weibull -from numpy.random.mtrand import zipf as zipf +from numpy.random.mtrand import ( + RandomState as RandomState, + beta as beta, + binomial as binomial, + bytes as bytes, + chisquare as chisquare, + choice as choice, + dirichlet as dirichlet, + exponential as exponential, + f as f, + gamma as gamma, + geometric as geometric, + get_state as get_state, + gumbel as gumbel, + hypergeometric as hypergeometric, + laplace as laplace, + logistic as logistic, + lognormal as lognormal, + logseries as logseries, + multinomial as multinomial, + multivariate_normal as multivariate_normal, + negative_binomial as negative_binomial, + noncentral_chisquare as noncentral_chisquare, + noncentral_f as noncentral_f, + normal as normal, + pareto as pareto, + permutation as permutation, + poisson as poisson, + power as power, + rand as rand, + randint as randint, + randn as randn, + random as random, + random_integers as random_integers, + random_sample as random_sample, + ranf as ranf, + rayleigh as rayleigh, + sample as sample, + seed as seed, + set_state as set_state, + shuffle as shuffle, + standard_cauchy as standard_cauchy, + standard_exponential as standard_exponential, + standard_gamma as standard_gamma, + standard_normal as standard_normal, + standard_t as standard_t, + triangular as triangular, + uniform as uniform, + vonmises as vonmises, + wald as wald, + weibull as weibull, + zipf as zipf, +) __all__: List[str] From 99c7c04eb7bebad0cd17b66b89f303e6ca9d8554 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 18 Feb 2021 07:32:00 +0000 Subject: [PATCH 0630/1270] TST: Add tests for random typing --- numpy/typing/tests/data/fail/random.py | 0 numpy/typing/tests/data/pass/random.py | 41 ++++++++++++++++++++++++ numpy/typing/tests/data/reveal/random.py | 15 +++++++++ 3 files changed, 56 insertions(+) create mode 100644 numpy/typing/tests/data/fail/random.py create mode 100644 numpy/typing/tests/data/pass/random.py create mode 100644 numpy/typing/tests/data/reveal/random.py diff --git a/numpy/typing/tests/data/fail/random.py b/numpy/typing/tests/data/fail/random.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py new file mode 100644 index 000000000000..f5233b0e1be3 --- /dev/null +++ b/numpy/typing/tests/data/pass/random.py @@ -0,0 +1,41 @@ +import numpy as np +from typing import Any, List + +SEED_NONE = None +SEED_INT = 4579435749574957634658964293569 +SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64) +SEED_ARRLIKE: List[int] = [1, 2, 3, 4] + +# default rng +np.random.default_rng() +np.random.default_rng(SEED_NONE) +np.random.default_rng(SEED_INT) +np.random.default_rng(SEED_ARR) +np.random.default_rng(SEED_ARR) + +# Seed Sequence +np.random.SeedSequence(SEED_NONE) +np.random.SeedSequence(SEED_INT) +np.random.SeedSequence(SEED_ARR) +np.random.SeedSequence(SEED_ARRLIKE) + +# Bit Generators +np.random.MT19937(SEED_NONE) +np.random.MT19937(SEED_INT) +np.random.MT19937(SEED_ARR) +np.random.MT19937(SEED_ARRLIKE) + +np.random.PCG64(SEED_NONE) +np.random.PCG64(SEED_INT) +np.random.PCG64(SEED_ARR) +np.random.PCG64(SEED_ARRLIKE) + +np.random.Philox(SEED_NONE) +np.random.Philox(SEED_INT) +np.random.Philox(SEED_ARR) +np.random.Philox(SEED_ARRLIKE) + +np.random.SFC64(SEED_NONE) +np.random.SFC64(SEED_INT) +np.random.SFC64(SEED_ARR) +np.random.SFC64(SEED_ARRLIKE) diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py new file mode 100644 index 000000000000..9e26f495f8d7 --- /dev/null +++ b/numpy/typing/tests/data/reveal/random.py @@ -0,0 +1,15 @@ +import numpy as np + +def_rng = np.random.default_rng() +mt19937 = np.random.MT19937() +pcg64 = np.random.MT19937() +sfc64 = np.random.SFC64() +philox = np.random.Philox() + + +reveal_type(def_rng) # E: np.random.MT19937 +reveal_type(mt19937) # E: np.random.Generator +reveal_type(pcg64) # E: np.random.MT19937 +reveal_type(sfc64) # E: np.random.SFC64 +reveal_type(philox) # E: np.random.Philox + From 95d1f23d3bbbadfe218fabc427f74353ad8aaa7e Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 18 Feb 2021 11:04:17 +0000 Subject: [PATCH 0631/1270] ENH/BUG: Add tests and correct typing issues Add tests for lower level components Correct errors found in testing Improve specificity of tests Other small fixes to docs and typing --- numpy/random/_generator.pyi | 43 +- numpy/random/bit_generator.pyi | 45 +- numpy/random/mtrand.pyi | 2 +- numpy/random/mtrand.pyx | 2 +- numpy/typing/tests/data/fail/random.py | 61 +++ numpy/typing/tests/data/pass/random.py | 460 ++++++++++++++++++++- numpy/typing/tests/data/reveal/random.py | 499 ++++++++++++++++++++++- numpy/typing/tests/test_typing.py | 5 +- 8 files changed, 1077 insertions(+), 40 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 1396c5a32201..904cbda3dc2f 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -76,7 +76,7 @@ class Generator: def __reduce__(self) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Dict[str, Any]]: ... @property def bit_generator(self) -> BitGenerator: ... - def bytes(self, length: int) -> str: ... + def bytes(self, length: int) -> bytes: ... @overload def standard_normal( # type: ignore[misc] self, @@ -279,16 +279,47 @@ class Generator: endpoint: bool = ..., ) -> ndarray[Any, dtype[uint64]]: ... # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> Union[_T, ndarray[Any,Any]] + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload def choice( self, a: ArrayLike, - size: Optional[_ShapeLike] = ..., + size: None = ..., replace: bool = ..., p: Optional[_ArrayLikeFloat_co] = ..., - axis: Optional[int] = ..., + axis: int = ..., shuffle: bool = ..., ) -> Any: ... @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, Any]: ... + @overload def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload def uniform( @@ -523,6 +554,8 @@ class Generator: def permuted( self, x: ArrayLike, *, axis: Optional[int] = ..., out: Optional[ndarray[Any, Any]] = ... ) -> ndarray[Any, Any]: ... - def shuffle(self, x: ArrayLike, axis: int = ...) -> Sequence[Any]: ... + def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... -def default_rng(seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> Generator: ... +def default_rng( + seed: Union[None, _ArrayLikeInt_co, SeedSequence, BitGenerator, Generator] = ... +) -> Generator: ... diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 80a2e829b9a4..7f066dbfaec0 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -18,8 +18,8 @@ from typing import ( overload, ) -from numpy import dtype, ndarray, uint32, uint64, unsignedinteger -from numpy.typing import DTypeLike, _ArrayLikeInt_co, _DTypeLikeUInt, _ShapeLike, _SupportsDType +from numpy import dtype, ndarray, uint32, uint64 +from numpy.typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt64Codes, _UInt32Codes if sys.version_info >= (3, 8): from typing import Literal @@ -28,11 +28,17 @@ else: _T = TypeVar("_T") -_UIntType = TypeVar("_UIntType", uint64, uint32) -_DTypeLike = Union[ - Type[_UIntType], - dtype[_UIntType], - _SupportsDType[dtype[_UIntType]], +_DTypeLikeUint32 = Union[ + dtype[uint32], + _SupportsDType[dtype[uint32]], + Type[uint32], + _UInt32Codes, +] +_DTypeLikeUint64 = Union[ + dtype[uint64], + _SupportsDType[dtype[uint64]], + Type[uint64], + _UInt64Codes, ] class _SeedSeqState(TypedDict): @@ -50,30 +56,19 @@ class _Interface(NamedTuple): bit_generator: Any class ISeedSequence(abc.ABC): - @overload - @abc.abstractmethod - def generate_state( - self, n_words: int, dtype: _DTypeLike[_UIntType] = ... - ) -> ndarray[Any, dtype[_UIntType]]: ... - @overload @abc.abstractmethod def generate_state( - self, n_words: int, dtype: _DTypeLikeUInt = ... - ) -> ndarray[Any, dtype[unsignedinteger[Any]]]: ... + self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... + ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... class ISpawnableSeedSequence(ISeedSequence): @abc.abstractmethod def spawn(self: _T, n_children: int) -> List[_T]: ... class SeedlessSeedSequence(ISpawnableSeedSequence): - @overload - def generate_state( - self, n_words: int, dtype: _DTypeLike[_UIntType] = ... - ) -> ndarray[Any, dtype[_UIntType]]: ... - @overload def generate_state( - self, n_words: int, dtype: _DTypeLikeUInt = ... - ) -> ndarray[Any, dtype[unsignedinteger[Any]]]: ... + self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... + ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... def spawn(self: _T, n_children: int) -> List[_T]: ... class SeedSequence(ISpawnableSeedSequence): @@ -84,7 +79,7 @@ class SeedSequence(ISpawnableSeedSequence): pool: ndarray[Any, dtype[uint32]] def __init__( self, - entropy: Union[None, int, Sequence[int]] = ..., + entropy: Union[None, int, Sequence[int], _ArrayLikeInt_co] = ..., *, spawn_key: Sequence[int] = ..., pool_size: int = ..., @@ -95,7 +90,9 @@ class SeedSequence(ISpawnableSeedSequence): def state( self, ) -> _SeedSeqState: ... - def generate_state(self, n_words: int, dtype: DTypeLike = ...) -> ndarray[Any, Any]: ... + def generate_state( + self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... + ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... def spawn(self, n_children: int) -> List[SeedSequence]: ... class BitGenerator(abc.ABC): diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index c668e7edfa4d..f05635390429 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -229,7 +229,7 @@ class RandomState: dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]] ] = ..., ) -> ndarray[Any, dtype[uint64]]: ... - def bytes(self, length: int) -> str: ... + def bytes(self, length: int) -> bytes: ... def choice( self, a: ArrayLike, diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 1fd68111ead5..40b3ea1002ac 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -784,7 +784,7 @@ cdef class RandomState: Returns ------- - out : str + out : bytes String of length `length`. See Also diff --git a/numpy/typing/tests/data/fail/random.py b/numpy/typing/tests/data/fail/random.py index e69de29bb2d1..c4d1e3e3e802 100644 --- a/numpy/typing/tests/data/fail/random.py +++ b/numpy/typing/tests/data/fail/random.py @@ -0,0 +1,61 @@ +import numpy as np +from typing import Any, List + +SEED_FLOAT: float = 457.3 +SEED_ARR_FLOAT: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0, 2, 3, 4]) +SEED_ARRLIKE_FLOAT: List[float] = [1.0, 2.0, 3.0, 4.0] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_STR: str = "String seeding not allowed" +# default rng +np.random.default_rng(SEED_FLOAT) # E: incompatible type +np.random.default_rng(SEED_ARR_FLOAT) # E: incompatible type +np.random.default_rng(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.default_rng(SEED_STR) # E: incompatible type + +# Seed Sequence +np.random.SeedSequence(SEED_FLOAT) # E: incompatible type +np.random.SeedSequence(SEED_ARR_FLOAT) # E: incompatible type +np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.SeedSequence(SEED_SEED_SEQ) # E: incompatible type +np.random.SeedSequence(SEED_STR) # E: incompatible type + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence() +seed_seq.spawn(11.5) # E: incompatible type +seed_seq.generate_state(3.14) # E: incompatible type +seed_seq.generate_state(3, np.uint8) # E: incompatible type +seed_seq.generate_state(3, "uint8") # E: incompatible type +seed_seq.generate_state(3, "u1") # E: incompatible type +seed_seq.generate_state(3, np.uint16) # E: incompatible type +seed_seq.generate_state(3, "uint16") # E: incompatible type +seed_seq.generate_state(3, "u2") # E: incompatible type +seed_seq.generate_state(3, np.int32) # E: incompatible type +seed_seq.generate_state(3, "int32") # E: incompatible type +seed_seq.generate_state(3, "i4") # E: incompatible type + +# Bit Generators +np.random.MT19937(SEED_FLOAT) # E: incompatible type +np.random.MT19937(SEED_ARR_FLOAT) # E: incompatible type +np.random.MT19937(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.MT19937(SEED_STR) # E: incompatible type + +np.random.PCG64(SEED_FLOAT) # E: incompatible type +np.random.PCG64(SEED_ARR_FLOAT) # E: incompatible type +np.random.PCG64(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.PCG64(SEED_STR) # E: incompatible type + +np.random.Philox(SEED_FLOAT) # E: incompatible type +np.random.Philox(SEED_ARR_FLOAT) # E: incompatible type +np.random.Philox(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.Philox(SEED_STR) # E: incompatible type + +np.random.SFC64(SEED_FLOAT) # E: incompatible type +np.random.SFC64(SEED_ARR_FLOAT) # E: incompatible type +np.random.SFC64(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.SFC64(SEED_STR) # E: incompatible type + +# Generator +np.random.Generator(None) # E: incompatible type +np.random.Generator(12333283902830213) # E: incompatible type +np.random.Generator("OxFEEDF00D") # E: incompatible type +np.random.Generator([123, 234]) # E: incompatible type +np.random.Generator(np.array([123, 234], dtype="u4")) # E: incompatible type diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index f5233b0e1be3..fa3585a9023a 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1,17 +1,30 @@ +from __future__ import annotations + +from typing import Any, List, Dict + import numpy as np -from typing import Any, List SEED_NONE = None SEED_INT = 4579435749574957634658964293569 SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64) SEED_ARRLIKE: List[int] = [1, 2, 3, 4] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_MT19937: np.random.MT19937 = np.random.MT19937(0) +SEED_PCG64: np.random.PCG64 = np.random.PCG64(0) +SEED_PHILOX: np.random.Philox = np.random.Philox(0) +SEED_SFC64: np.random.SFC64 = np.random.SFC64(0) # default rng np.random.default_rng() np.random.default_rng(SEED_NONE) np.random.default_rng(SEED_INT) np.random.default_rng(SEED_ARR) -np.random.default_rng(SEED_ARR) +np.random.default_rng(SEED_ARRLIKE) +np.random.default_rng(SEED_SEED_SEQ) +np.random.default_rng(SEED_MT19937) +np.random.default_rng(SEED_PCG64) +np.random.default_rng(SEED_PHILOX) +np.random.default_rng(SEED_SFC64) # Seed Sequence np.random.SeedSequence(SEED_NONE) @@ -24,18 +37,461 @@ np.random.MT19937(SEED_INT) np.random.MT19937(SEED_ARR) np.random.MT19937(SEED_ARRLIKE) +np.random.MT19937(SEED_SEED_SEQ) np.random.PCG64(SEED_NONE) np.random.PCG64(SEED_INT) np.random.PCG64(SEED_ARR) np.random.PCG64(SEED_ARRLIKE) +np.random.PCG64(SEED_SEED_SEQ) np.random.Philox(SEED_NONE) np.random.Philox(SEED_INT) np.random.Philox(SEED_ARR) np.random.Philox(SEED_ARRLIKE) +np.random.Philox(SEED_SEED_SEQ) np.random.SFC64(SEED_NONE) np.random.SFC64(SEED_INT) np.random.SFC64(SEED_ARR) np.random.SFC64(SEED_ARRLIKE) +np.random.SFC64(SEED_SEED_SEQ) + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE) +seed_seq.spawn(10) +seed_seq.generate_state(3) +seed_seq.generate_state(3, "u4") +seed_seq.generate_state(3, "uint32") +seed_seq.generate_state(3, "u8") +seed_seq.generate_state(3, "uint64") +seed_seq.generate_state(3, np.uint32) +seed_seq.generate_state(3, np.uint64) + + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) +D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) +D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) +D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) +I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) +D_arr_like_0p1: List[float] = [0.1] +D_arr_like_0p5: List[float] = [0.5] +D_arr_like_0p9: List[float] = [0.9] +D_arr_like_1p5: List[float] = [1.5] +I_arr_like_10: List[int] = [10] +I_arr_like_20: List[int] = [20] +D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) + +def_gen.standard_normal() +def_gen.standard_normal(size=None) +def_gen.standard_normal(size=1) + +def_gen.random() +def_gen.random(size=None) +def_gen.random(size=1) + +def_gen.standard_cauchy() +def_gen.standard_cauchy(size=None) +def_gen.standard_cauchy(size=1) + +def_gen.standard_exponential() +def_gen.standard_exponential(size=None) +def_gen.standard_exponential(size=1) + +def_gen.zipf(1.5) +def_gen.zipf(1.5, size=None) +def_gen.zipf(1.5, size=1) +def_gen.zipf(D_arr_1p5) +def_gen.zipf(D_arr_1p5, size=1) +def_gen.zipf(D_arr_like_1p5) +def_gen.zipf(D_arr_like_1p5, size=1) + +def_gen.weibull(0.5) +def_gen.weibull(0.5, size=None) +def_gen.weibull(0.5, size=1) +def_gen.weibull(D_arr_0p5) +def_gen.weibull(D_arr_0p5, size=1) +def_gen.weibull(D_arr_like_0p5) +def_gen.weibull(D_arr_like_0p5, size=1) + +def_gen.standard_t(0.5) +def_gen.standard_t(0.5, size=None) +def_gen.standard_t(0.5, size=1) +def_gen.standard_t(D_arr_0p5) +def_gen.standard_t(D_arr_0p5, size=1) +def_gen.standard_t(D_arr_like_0p5) +def_gen.standard_t(D_arr_like_0p5, size=1) + +def_gen.poisson(0.5) +def_gen.poisson(0.5, size=None) +def_gen.poisson(0.5, size=1) +def_gen.poisson(D_arr_0p5) +def_gen.poisson(D_arr_0p5, size=1) +def_gen.poisson(D_arr_like_0p5) +def_gen.poisson(D_arr_like_0p5, size=1) + +def_gen.power(0.5) +def_gen.power(0.5, size=None) +def_gen.power(0.5, size=1) +def_gen.power(D_arr_0p5) +def_gen.power(D_arr_0p5, size=1) +def_gen.power(D_arr_like_0p5) +def_gen.power(D_arr_like_0p5, size=1) + +def_gen.pareto(0.5) +def_gen.pareto(0.5, size=None) +def_gen.pareto(0.5, size=1) +def_gen.pareto(D_arr_0p5) +def_gen.pareto(D_arr_0p5, size=1) +def_gen.pareto(D_arr_like_0p5) +def_gen.pareto(D_arr_like_0p5, size=1) + +def_gen.chisquare(0.5) +def_gen.chisquare(0.5, size=None) +def_gen.chisquare(0.5, size=1) +def_gen.chisquare(D_arr_0p5) +def_gen.chisquare(D_arr_0p5, size=1) +def_gen.chisquare(D_arr_like_0p5) +def_gen.chisquare(D_arr_like_0p5, size=1) + +def_gen.exponential(0.5) +def_gen.exponential(0.5, size=None) +def_gen.exponential(0.5, size=1) +def_gen.exponential(D_arr_0p5) +def_gen.exponential(D_arr_0p5, size=1) +def_gen.exponential(D_arr_like_0p5) +def_gen.exponential(D_arr_like_0p5, size=1) + +def_gen.geometric(0.5) +def_gen.geometric(0.5, size=None) +def_gen.geometric(0.5, size=1) +def_gen.geometric(D_arr_0p5) +def_gen.geometric(D_arr_0p5, size=1) +def_gen.geometric(D_arr_like_0p5) +def_gen.geometric(D_arr_like_0p5, size=1) + +def_gen.logseries(0.5) +def_gen.logseries(0.5, size=None) +def_gen.logseries(0.5, size=1) +def_gen.logseries(D_arr_0p5) +def_gen.logseries(D_arr_0p5, size=1) +def_gen.logseries(D_arr_like_0p5) +def_gen.logseries(D_arr_like_0p5, size=1) + +def_gen.rayleigh(0.5) +def_gen.rayleigh(0.5, size=None) +def_gen.rayleigh(0.5, size=1) +def_gen.rayleigh(D_arr_0p5) +def_gen.rayleigh(D_arr_0p5, size=1) +def_gen.rayleigh(D_arr_like_0p5) +def_gen.rayleigh(D_arr_like_0p5, size=1) + +def_gen.standard_gamma(0.5) +def_gen.standard_gamma(0.5, size=None) +def_gen.standard_gamma(0.5, size=1) +def_gen.standard_gamma(D_arr_0p5) +def_gen.standard_gamma(D_arr_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5) +def_gen.standard_gamma(D_arr_like_0p5, size=1) + +def_gen.vonmises(0.5, 0.5) +def_gen.vonmises(0.5, 0.5, size=None) +def_gen.vonmises(0.5, 0.5, size=1) +def_gen.vonmises(D_arr_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_0p5) +def_gen.vonmises(D_arr_0p5, 0.5, size=1) +def_gen.vonmises(0.5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.wald(0.5, 0.5) +def_gen.wald(0.5, 0.5, size=None) +def_gen.wald(0.5, 0.5, size=1) +def_gen.wald(D_arr_0p5, 0.5) +def_gen.wald(0.5, D_arr_0p5) +def_gen.wald(D_arr_0p5, 0.5, size=1) +def_gen.wald(0.5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, 0.5) +def_gen.wald(0.5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.uniform(0.5, 0.5) +def_gen.uniform(0.5, 0.5, size=None) +def_gen.uniform(0.5, 0.5, size=1) +def_gen.uniform(D_arr_0p5, 0.5) +def_gen.uniform(0.5, D_arr_0p5) +def_gen.uniform(D_arr_0p5, 0.5, size=1) +def_gen.uniform(0.5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, 0.5) +def_gen.uniform(0.5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.beta(0.5, 0.5) +def_gen.beta(0.5, 0.5, size=None) +def_gen.beta(0.5, 0.5, size=1) +def_gen.beta(D_arr_0p5, 0.5) +def_gen.beta(0.5, D_arr_0p5) +def_gen.beta(D_arr_0p5, 0.5, size=1) +def_gen.beta(0.5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, 0.5) +def_gen.beta(0.5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.f(0.5, 0.5) +def_gen.f(0.5, 0.5, size=None) +def_gen.f(0.5, 0.5, size=1) +def_gen.f(D_arr_0p5, 0.5) +def_gen.f(0.5, D_arr_0p5) +def_gen.f(D_arr_0p5, 0.5, size=1) +def_gen.f(0.5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, 0.5) +def_gen.f(0.5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gamma(0.5, 0.5) +def_gen.gamma(0.5, 0.5, size=None) +def_gen.gamma(0.5, 0.5, size=1) +def_gen.gamma(D_arr_0p5, 0.5) +def_gen.gamma(0.5, D_arr_0p5) +def_gen.gamma(D_arr_0p5, 0.5, size=1) +def_gen.gamma(0.5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, 0.5) +def_gen.gamma(0.5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gumbel(0.5, 0.5) +def_gen.gumbel(0.5, 0.5, size=None) +def_gen.gumbel(0.5, 0.5, size=1) +def_gen.gumbel(D_arr_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_0p5) +def_gen.gumbel(D_arr_0p5, 0.5, size=1) +def_gen.gumbel(0.5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.laplace(0.5, 0.5) +def_gen.laplace(0.5, 0.5, size=None) +def_gen.laplace(0.5, 0.5, size=1) +def_gen.laplace(D_arr_0p5, 0.5) +def_gen.laplace(0.5, D_arr_0p5) +def_gen.laplace(D_arr_0p5, 0.5, size=1) +def_gen.laplace(0.5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, 0.5) +def_gen.laplace(0.5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.logistic(0.5, 0.5) +def_gen.logistic(0.5, 0.5, size=None) +def_gen.logistic(0.5, 0.5, size=1) +def_gen.logistic(D_arr_0p5, 0.5) +def_gen.logistic(0.5, D_arr_0p5) +def_gen.logistic(D_arr_0p5, 0.5, size=1) +def_gen.logistic(0.5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, 0.5) +def_gen.logistic(0.5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.lognormal(0.5, 0.5) +def_gen.lognormal(0.5, 0.5, size=None) +def_gen.lognormal(0.5, 0.5, size=1) +def_gen.lognormal(D_arr_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_0p5) +def_gen.lognormal(D_arr_0p5, 0.5, size=1) +def_gen.lognormal(0.5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.noncentral_chisquare(0.5, 0.5) +def_gen.noncentral_chisquare(0.5, 0.5, size=None) +def_gen.noncentral_chisquare(0.5, 0.5, size=1) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.normal(0.5, 0.5) +def_gen.normal(0.5, 0.5, size=None) +def_gen.normal(0.5, 0.5, size=1) +def_gen.normal(D_arr_0p5, 0.5) +def_gen.normal(0.5, D_arr_0p5) +def_gen.normal(D_arr_0p5, 0.5, size=1) +def_gen.normal(0.5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, 0.5) +def_gen.normal(0.5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.triangular(0.1, 0.5, 0.9) +def_gen.triangular(0.1, 0.5, 0.9, size=None) +def_gen.triangular(0.1, 0.5, 0.9, size=1) +def_gen.triangular(D_arr_0p1, 0.5, 0.9) +def_gen.triangular(0.1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1) +def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.triangular(0.5, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.noncentral_f(0.1, 0.5, 0.9) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=None) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=1) +def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.binomial(10, 0.5) +def_gen.binomial(10, 0.5, size=None) +def_gen.binomial(10, 0.5, size=1) +def_gen.binomial(I_arr_10, 0.5) +def_gen.binomial(10, D_arr_0p5) +def_gen.binomial(I_arr_10, 0.5, size=1) +def_gen.binomial(10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, 0.5) +def_gen.binomial(10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.negative_binomial(10, 0.5) +def_gen.negative_binomial(10, 0.5, size=None) +def_gen.negative_binomial(10, 0.5, size=1) +def_gen.negative_binomial(I_arr_10, 0.5) +def_gen.negative_binomial(10, D_arr_0p5) +def_gen.negative_binomial(I_arr_10, 0.5, size=1) +def_gen.negative_binomial(10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, 0.5) +def_gen.negative_binomial(10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.hypergeometric(20, 20, 10) +def_gen.hypergeometric(20, 20, 10, size=None) +def_gen.hypergeometric(20, 20, 10, size=1) +def_gen.hypergeometric(I_arr_20, 20, 10) +def_gen.hypergeometric(20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +def_gen.hypergeometric(20, I_arr_20, 10, size=1) +def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10) +def_gen.hypergeometric(20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + + +def_gen.bit_generator + +def_gen.bytes(2) + +def_gen.choice(5) +def_gen.choice(5, 3) +def_gen.choice(5, 3, replace=True) +def_gen.choice(5, 3, p=[1 / 5] * 5) +def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False) + +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +def_gen.dirichlet([0.5, 0.5]) +def_gen.dirichlet(np.array([0.5, 0.5])) +def_gen.dirichlet(np.array([0.5, 0.5]), size=3) + +def_gen.multinomial(20, [1 / 6.0] * 6) +def_gen.multinomial(20, np.array([0.5, 0.5])) +def_gen.multinomial(20, [1 / 6.0] * 6, size=2) +def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)) +def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)) + +def_gen.multivariate_hypergeometric([3, 5, 7], 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)) +def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count") +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals") + +def_gen.multivariate_normal([0.0], [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) +def_gen.multivariate_normal(np.array([0.0]), [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) + +def_gen.permutation(10) +def_gen.permutation([1, 2, 3, 4]) +def_gen.permutation(np.array([1, 2, 3, 4])) +def_gen.permutation(D_2D, axis=1) +def_gen.permuted(D_2D) +def_gen.permuted(D_2D_like) +def_gen.permuted(D_2D, axis=1) +def_gen.permuted(D_2D, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D, axis=1, out=D_2D) + +def_gen.shuffle(np.arange(10)) +def_gen.shuffle([1, 2, 3, 4, 5]) +def_gen.shuffle(D_2D, axis=1) + +def_gen.__str__() +def_gen.__repr__() +def_gen_state: Dict[str, Any] +def_gen_state = def_gen.__getstate__() +def_gen.__setstate__(def_gen_state) diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py index 9e26f495f8d7..cd1ac014b741 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.py @@ -1,15 +1,502 @@ +from __future__ import annotations + +from typing import Any, List + import numpy as np def_rng = np.random.default_rng() +seed_seq = np.random.SeedSequence() mt19937 = np.random.MT19937() -pcg64 = np.random.MT19937() +pcg64 = np.random.PCG64() sfc64 = np.random.SFC64() philox = np.random.Philox() +seedless_seq = np.random.bit_generator.SeedlessSeedSequence() + +reveal_type(def_rng) # E: numpy.random._generator.Generator +reveal_type(mt19937) # E: numpy.random._mt19937.MT19937 +reveal_type(pcg64) # E: numpy.random._pcg64.PCG64 +reveal_type(sfc64) # E: numpy.random._sfc64.SFC64 +reveal_type(philox) # E: numpy.random._philox.Philox +reveal_type(seed_seq) # E: numpy.random.bit_generator.SeedSequence +reveal_type(seedless_seq) # E: numpy.random.bit_generator.SeedlessSeedSequence + +mt19937_jumped = mt19937.jumped() +mt19937_jumped3 = mt19937.jumped(3) +mt19937_raw = mt19937.random_raw() +mt19937_raw_arr = mt19937.random_raw(5) + +reveal_type(mt19937_jumped) # E: numpy.random._mt19937.MT19937 +reveal_type(mt19937_jumped3) # E: numpy.random._mt19937.MT19937 +reveal_type(mt19937_raw) # E: int +reveal_type(mt19937_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(mt19937.lock) # E: threading.Lock + +pcg64_jumped = pcg64.jumped() +pcg64_jumped3 = pcg64.jumped(3) +pcg64_adv = pcg64.advance(3) +pcg64_raw = pcg64.random_raw() +pcg64_raw_arr = pcg64.random_raw(5) + +reveal_type(pcg64_jumped) # E: numpy.random._pcg64.PCG64 +reveal_type(pcg64_jumped3) # E: numpy.random._pcg64.PCG64 +reveal_type(pcg64_adv) # E: numpy.random._pcg64.PCG64 +reveal_type(pcg64_raw) # E: int +reveal_type(pcg64_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(pcg64.lock) # E: threading.Lock + +philox_jumped = philox.jumped() +philox_jumped3 = philox.jumped(3) +philox_adv = philox.advance(3) +philox_raw = philox.random_raw() +philox_raw_arr = philox.random_raw(5) + +reveal_type(philox_jumped) # E: numpy.random._philox.Philox +reveal_type(philox_jumped3) # E: numpy.random._philox.Philox +reveal_type(philox_adv) # E: numpy.random._philox.Philox +reveal_type(philox_raw) # E: int +reveal_type(philox_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(philox.lock) # E: threading.Lock + +sfc64_raw = sfc64.random_raw() +sfc64_raw_arr = sfc64.random_raw(5) + +reveal_type(sfc64_raw) # E: int +reveal_type(sfc64_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(sfc64.lock) # E: threading.Lock + +reveal_type(seed_seq.pool) # numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(seed_seq.entropy) # E:Union[None, int, Sequence[int]] +reveal_type(seed_seq.spawn(1)) # E: list[numpy.random.bit_generator.SeedSequence] +reveal_type(seed_seq.generate_state(8, "uint32")) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.unsignedinteger[numpy.typing._32Bit], numpy.unsignedinteger[numpy.typing._64Bit]]]] +reveal_type(seed_seq.generate_state(8, "uint64")) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.unsignedinteger[numpy.typing._32Bit], numpy.unsignedinteger[numpy.typing._64Bit]]]] + + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) +D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) +D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) +D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) +I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) +D_arr_like_0p1: List[float] = [0.1] +D_arr_like_0p5: List[float] = [0.5] +D_arr_like_0p9: List[float] = [0.9] +D_arr_like_1p5: List[float] = [1.5] +I_arr_like_10: List[int] = [10] +I_arr_like_20: List[int] = [20] +D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) + +reveal_type(def_gen.standard_normal()) # E: float +reveal_type(def_gen.standard_normal(size=None)) # E: float +reveal_type(def_gen.standard_normal(size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.random()) # E: float +reveal_type(def_gen.random(size=None)) # E: float +reveal_type(def_gen.random(size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.standard_cauchy()) # E: float +reveal_type(def_gen.standard_cauchy(size=None)) # E: float +reveal_type(def_gen.standard_cauchy(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.standard_exponential()) # E: float +reveal_type(def_gen.standard_exponential(size=None)) # E: float +reveal_type(def_gen.standard_exponential(size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.zipf(1.5)) # E: int +reveal_type(def_gen.zipf(1.5, size=None)) # E: int +reveal_type(def_gen.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.weibull(0.5)) # E: float +reveal_type(def_gen.weibull(0.5, size=None)) # E: float +reveal_type(def_gen.weibull(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.standard_t(0.5)) # E: float +reveal_type(def_gen.standard_t(0.5, size=None)) # E: float +reveal_type(def_gen.standard_t(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.poisson(0.5)) # E: int +reveal_type(def_gen.poisson(0.5, size=None)) # E: int +reveal_type(def_gen.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.power(0.5)) # E: float +reveal_type(def_gen.power(0.5, size=None)) # E: float +reveal_type(def_gen.power(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.pareto(0.5)) # E: float +reveal_type(def_gen.pareto(0.5, size=None)) # E: float +reveal_type(def_gen.pareto(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.chisquare(0.5)) # E: float +reveal_type(def_gen.chisquare(0.5, size=None)) # E: float +reveal_type(def_gen.chisquare(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.exponential(0.5)) # E: float +reveal_type(def_gen.exponential(0.5, size=None)) # E: float +reveal_type(def_gen.exponential(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.geometric(0.5)) # E: int +reveal_type(def_gen.geometric(0.5, size=None)) # E: int +reveal_type(def_gen.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.logseries(0.5)) # E: int +reveal_type(def_gen.logseries(0.5, size=None)) # E: int +reveal_type(def_gen.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.rayleigh(0.5)) # E: float +reveal_type(def_gen.rayleigh(0.5, size=None)) # E: float +reveal_type(def_gen.rayleigh(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.standard_gamma(0.5)) # E: float +reveal_type(def_gen.standard_gamma(0.5, size=None)) # E: float +reveal_type(def_gen.standard_gamma(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.vonmises(0.5, 0.5)) # E: float +reveal_type(def_gen.vonmises(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.vonmises(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.wald(0.5, 0.5)) # E: float +reveal_type(def_gen.wald(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.wald(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.uniform(0.5, 0.5)) # E: float +reveal_type(def_gen.uniform(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.uniform(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.beta(0.5, 0.5)) # E: float +reveal_type(def_gen.beta(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.beta(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.f(0.5, 0.5)) # E: float +reveal_type(def_gen.f(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.f(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.gamma(0.5, 0.5)) # E: float +reveal_type(def_gen.gamma(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.gamma(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.gumbel(0.5, 0.5)) # E: float +reveal_type(def_gen.gumbel(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.gumbel(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.laplace(0.5, 0.5)) # E: float +reveal_type(def_gen.laplace(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.laplace(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.logistic(0.5, 0.5)) # E: float +reveal_type(def_gen.logistic(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.logistic(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.lognormal(0.5, 0.5)) # E: float +reveal_type(def_gen.lognormal(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.lognormal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.noncentral_chisquare(0.5, 0.5)) # E: float +reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.normal(0.5, 0.5)) # E: float +reveal_type(def_gen.normal(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.normal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.triangular(0.1, 0.5, 0.9)) # E: float +reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9)) # E: float +reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.binomial(10, 0.5)) # E: int +reveal_type(def_gen.binomial(10, 0.5, size=None)) # E: int +reveal_type(def_gen.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.negative_binomial(10, 0.5)) # E: int +reveal_type(def_gen.negative_binomial(10, 0.5, size=None)) # E: int +reveal_type(def_gen.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.hypergeometric(20, 20, 10)) # E: int +reveal_type(def_gen.hypergeometric(20, 20, 10, size=None)) # E: int +reveal_type(def_gen.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + + +reveal_type(def_gen.bit_generator) # E: BitGenerator + +reveal_type(def_gen.bytes(2)) # E: bytes + +reveal_type(def_gen.choice(5)) # E: int +reveal_type(def_gen.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: numpy.ndarray[Any, Any] + +reveal_type(def_gen.dirichlet([0.5, 0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.multivariate_normal([0.0], [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.permutation([1, 2, 3, 4])) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permutation(np.array([1, 2, 3, 4]))) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permutation(D_2D, axis=1)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D_like)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D, axis=1)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D, out=D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D, axis=1, out=D_2D)) # E: numpy.ndarray[Any, Any] -reveal_type(def_rng) # E: np.random.MT19937 -reveal_type(mt19937) # E: np.random.Generator -reveal_type(pcg64) # E: np.random.MT19937 -reveal_type(sfc64) # E: np.random.SFC64 -reveal_type(philox) # E: np.random.Philox +reveal_type(def_gen.shuffle(np.arange(10))) # E: None +reveal_type(def_gen.shuffle([1, 2, 3, 4, 5])) # E: None +reveal_type(def_gen.shuffle(D_2D, axis=1)) # E: None +reveal_type(def_gen.shuffle(D_2D_like, axis=1)) # E: None +reveal_type(np.random.Generator(pcg64)) # E: Generator +reveal_type(def_gen.__str__()) # E: str +reveal_type(def_gen.__repr__()) # E: str +def_gen_state = def_gen.__getstate__() +reveal_type(def_gen_state) # E: builtins.dict[builtins.str, Any] +reveal_type(def_gen.__setstate__(def_gen_state)) # E: None diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index e8028242076b..70355dcd916c 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -48,8 +48,11 @@ def run_mypy() -> None: The mypy results are cached in `OUTPUT_MYPY` for further use. + The cache refresh can be skipped using + + NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests """ - if os.path.isdir(CACHE_DIR): + if os.path.isdir(CACHE_DIR) and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)): shutil.rmtree(CACHE_DIR) for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): From 69de25ea426121295e6404f3ec4dc35c905bb14a Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 19 Feb 2021 11:53:57 +0000 Subject: [PATCH 0632/1270] ENH/BUG: Improve definitions and fix bugs Add tests Fix related bugs Improve specificity of definitions --- numpy/random/_generator.pyi | 164 +++++++-- numpy/typing/tests/data/pass/random.py | 364 ++++++++++++++++++++ numpy/typing/tests/data/reveal/random.py | 410 ++++++++++++++++++++++- 3 files changed, 896 insertions(+), 42 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 904cbda3dc2f..a744dec67a84 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -88,9 +88,27 @@ class Generator: def standard_normal( # type: ignore[misc] self, size: _ShapeLike = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., - out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ..., - ) -> ndarray[Any, dtype[Union[float32, float64]]]: ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... @overload def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ... @overload @@ -111,18 +129,37 @@ class Generator: def standard_exponential( self, size: _ShapeLike = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + *, method: Literal["zig", "inv"] = ..., - out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ..., - ) -> ndarray[Any, dtype[Union[float32, float64]]]: ... + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_exponential( self, - size: Optional[_ShapeLike] = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., method: Literal["zig", "inv"] = ..., - out: ndarray[Any, dtype[Union[float32, float64]]] = ..., - ) -> ndarray[Any, dtype[Union[float32, float64]]]: ... + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... @overload def random( # type: ignore[misc] self, @@ -131,12 +168,32 @@ class Generator: out: None = ..., ) -> float: ... @overload + def random( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload def random( self, size: _ShapeLike = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., - out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ..., - ) -> ndarray[Any, dtype[Union[float32, float64]]]: ... + *, + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... @overload def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] @overload @@ -150,6 +207,12 @@ class Generator: self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... ) -> ndarray[Any, dtype[float64]]: ... @overload + def integers( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + ) -> int: ... + @overload def integers( # type: ignore[misc] self, low: int, @@ -164,40 +227,34 @@ class Generator: low: int, high: Optional[int] = ..., size: None = ..., - dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., + dtype: _DTypeLikeBool = ..., endpoint: bool = ..., - ) -> int: ... + ) -> bool: ... @overload def integers( # type: ignore[misc] self, - low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[bool_], Type[bool], Type[bool_], _BoolCodes, _SupportsDType[dtype[bool_]] - ] = ..., + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., endpoint: bool = ..., - ) -> ndarray[Any, dtype[bool_]]: ... + ) -> int: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, high: Optional[_ArrayLikeInt_co] = ..., size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] - ] = ..., - endpoint: bool = ..., - ) -> ndarray[Any, dtype[int_]]: ... + ) -> ndarray[Any, dtype[int64]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, high: Optional[_ArrayLikeInt_co] = ..., size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + dtype: _DTypeLikeBool = ..., endpoint: bool = ..., - ) -> ndarray[Any, dtype[uint]]: ... + ) -> ndarray[Any, dtype[bool_]]: ... @overload def integers( # type: ignore[misc] self, @@ -278,6 +335,26 @@ class Generator: ] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] + ] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint]]: ... # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> Union[_T, ndarray[Any,Any]] @overload def choice( @@ -350,9 +427,30 @@ class Generator: self, shape: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., - out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ..., - ) -> ndarray[Any, dtype[Union[float32, float64]]]: ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + dtype: _DTypeLikeFloat32 = ..., + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + dtype: _DTypeLikeFloat64 = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... @overload def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload @@ -536,7 +634,7 @@ class Generator: check_valid: Literal["warn", "raise", "ignore"] = ..., tol: float = ..., *, - method: Literal["svd", "eigh", "cholesky"] = ... + method: Literal["svd", "eigh", "cholesky"] = ..., ) -> ndarray[Any, dtype[float64]]: ... def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index fa3585a9023a..0f60344393d4 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -435,6 +435,370 @@ def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) +I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) + +def_gen.integers(0, 100) +def_gen.integers(100) +def_gen.integers([100]) +def_gen.integers(0, [100]) + +I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) +I_bool_low_like: List[int] = [0] +I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) +I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) + +def_gen.integers(2, dtype=bool) +def_gen.integers(0, 2, dtype=bool) +def_gen.integers(1, dtype=bool, endpoint=True) +def_gen.integers(0, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool) +def_gen.integers(0, I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True) + +def_gen.integers(2, dtype=np.bool_) +def_gen.integers(0, 2, dtype=np.bool_) +def_gen.integers(1, dtype=np.bool_, endpoint=True) +def_gen.integers(0, 1, dtype=np.bool_, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=np.bool_) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_) +def_gen.integers(0, I_bool_high_open, dtype=np.bool_) +def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True) + +I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) +I_u1_low_like: List[int] = [0] +I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) +I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) + +def_gen.integers(256, dtype="u1") +def_gen.integers(0, 256, dtype="u1") +def_gen.integers(255, dtype="u1", endpoint=True) +def_gen.integers(0, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1") +def_gen.integers(0, I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True) + +def_gen.integers(256, dtype="uint8") +def_gen.integers(0, 256, dtype="uint8") +def_gen.integers(255, dtype="uint8", endpoint=True) +def_gen.integers(0, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8") +def_gen.integers(0, I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True) + +def_gen.integers(256, dtype=np.uint8) +def_gen.integers(0, 256, dtype=np.uint8) +def_gen.integers(255, dtype=np.uint8, endpoint=True) +def_gen.integers(0, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8) +def_gen.integers(0, I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True) + +I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) +I_u2_low_like: List[int] = [0] +I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) + +def_gen.integers(65536, dtype="u2") +def_gen.integers(0, 65536, dtype="u2") +def_gen.integers(65535, dtype="u2", endpoint=True) +def_gen.integers(0, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2") +def_gen.integers(0, I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True) + +def_gen.integers(65536, dtype="uint16") +def_gen.integers(0, 65536, dtype="uint16") +def_gen.integers(65535, dtype="uint16", endpoint=True) +def_gen.integers(0, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16") +def_gen.integers(0, I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True) + +def_gen.integers(65536, dtype=np.uint16) +def_gen.integers(0, 65536, dtype=np.uint16) +def_gen.integers(65535, dtype=np.uint16, endpoint=True) +def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16) +def_gen.integers(0, I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True) + +I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) +I_u4_low_like: List[int] = [0] +I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) + +def_gen.integers(4294967296, dtype="u4") +def_gen.integers(0, 4294967296, dtype="u4") +def_gen.integers(4294967295, dtype="u4", endpoint=True) +def_gen.integers(0, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4") +def_gen.integers(0, I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True) + +def_gen.integers(4294967296, dtype="uint32") +def_gen.integers(0, 4294967296, dtype="uint32") +def_gen.integers(4294967295, dtype="uint32", endpoint=True) +def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32") +def_gen.integers(0, I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True) + +def_gen.integers(4294967296, dtype=np.uint32) +def_gen.integers(0, 4294967296, dtype=np.uint32) +def_gen.integers(4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32) +def_gen.integers(0, I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True) + +I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) +I_u8_low_like: List[int] = [0] +I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) + +def_gen.integers(18446744073709551616, dtype="u8") +def_gen.integers(0, 18446744073709551616, dtype="u8") +def_gen.integers(18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8") +def_gen.integers(0, I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True) + +def_gen.integers(18446744073709551616, dtype="uint64") +def_gen.integers(0, 18446744073709551616, dtype="uint64") +def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64") +def_gen.integers(0, I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True) + +def_gen.integers(18446744073709551616, dtype=np.uint64) +def_gen.integers(0, 18446744073709551616, dtype=np.uint64) +def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64) +def_gen.integers(0, I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True) + +I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) +I_i1_low_like: List[int] = [-128] +I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) +I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) + +def_gen.integers(128, dtype="i1") +def_gen.integers(-128, 128, dtype="i1") +def_gen.integers(127, dtype="i1", endpoint=True) +def_gen.integers(-128, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1") +def_gen.integers(-128, I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True) + +def_gen.integers(128, dtype="int8") +def_gen.integers(-128, 128, dtype="int8") +def_gen.integers(127, dtype="int8", endpoint=True) +def_gen.integers(-128, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8") +def_gen.integers(-128, I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True) + +def_gen.integers(128, dtype=np.int8) +def_gen.integers(-128, 128, dtype=np.int8) +def_gen.integers(127, dtype=np.int8, endpoint=True) +def_gen.integers(-128, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8) +def_gen.integers(-128, I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True) + +I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +I_i2_low_like: List[int] = [-32768] +I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) +I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) + +def_gen.integers(32768, dtype="i2") +def_gen.integers(-32768, 32768, dtype="i2") +def_gen.integers(32767, dtype="i2", endpoint=True) +def_gen.integers(-32768, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2") +def_gen.integers(-32768, I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True) + +def_gen.integers(32768, dtype="int16") +def_gen.integers(-32768, 32768, dtype="int16") +def_gen.integers(32767, dtype="int16", endpoint=True) +def_gen.integers(-32768, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16") +def_gen.integers(-32768, I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True) + +def_gen.integers(32768, dtype=np.int16) +def_gen.integers(-32768, 32768, dtype=np.int16) +def_gen.integers(32767, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16) +def_gen.integers(-32768, I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True) + +I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: List[int] = [-2147483648] +I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) + +def_gen.integers(2147483648, dtype="i4") +def_gen.integers(-2147483648, 2147483648, dtype="i4") +def_gen.integers(2147483647, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4") +def_gen.integers(-2147483648, I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True) + +def_gen.integers(2147483648, dtype="int32") +def_gen.integers(-2147483648, 2147483648, dtype="int32") +def_gen.integers(2147483647, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32") +def_gen.integers(-2147483648, I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True) + +def_gen.integers(2147483648, dtype=np.int32) +def_gen.integers(-2147483648, 2147483648, dtype=np.int32) +def_gen.integers(2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32) +def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True) + +I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: List[int] = [-9223372036854775808] +I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) + +def_gen.integers(9223372036854775808, dtype="i8") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8") +def_gen.integers(9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True) + +def_gen.integers(9223372036854775808, dtype="int64") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64") +def_gen.integers(9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True) + +def_gen.integers(9223372036854775808, dtype=np.int64) +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64) +def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64) +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True) + def_gen.bit_generator diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py index cd1ac014b741..b5a8a007b258 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.py @@ -89,20 +89,49 @@ D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) reveal_type(def_gen.standard_normal()) # E: float +reveal_type(def_gen.standard_normal(dtype=np.float32)) # E: float +reveal_type(def_gen.standard_normal(dtype="float32")) # E: float +reveal_type(def_gen.standard_normal(dtype="double")) # E: float +reveal_type(def_gen.standard_normal(dtype=np.float64)) # E: float reveal_type(def_gen.standard_normal(size=None)) # E: float -reveal_type(def_gen.standard_normal(size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.random()) # E: float +reveal_type(def_gen.random(dtype=np.float32)) # E: float +reveal_type(def_gen.random(dtype="float32")) # E: float +reveal_type(def_gen.random(dtype="double")) # E: float +reveal_type(def_gen.random(dtype=np.float64)) # E: float reveal_type(def_gen.random(size=None)) # E: float -reveal_type(def_gen.random(size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(size=1, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(size=1, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_cauchy()) # E: float reveal_type(def_gen.standard_cauchy(size=None)) # E: float reveal_type(def_gen.standard_cauchy(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(def_gen.standard_exponential()) # E: float +reveal_type(def_gen.standard_exponential(method="inv")) # E: float +reveal_type(def_gen.standard_exponential(dtype=np.float32)) # E: float +reveal_type(def_gen.standard_exponential(dtype="float32")) # E: float +reveal_type(def_gen.standard_exponential(dtype="double")) # E: float +reveal_type(def_gen.standard_exponential(dtype=np.float64)) # E: float reveal_type(def_gen.standard_exponential(size=None)) # E: float -reveal_type(def_gen.standard_exponential(size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=None, method="inv")) # E: float +reveal_type(def_gen.standard_exponential(size=1, method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.zipf(1.5)) # E: int reveal_type(def_gen.zipf(1.5, size=None)) # E: int @@ -194,11 +223,11 @@ reveal_type(def_gen.standard_gamma(0.5)) # E: float reveal_type(def_gen.standard_gamma(0.5, size=None)) # E: float -reveal_type(def_gen.standard_gamma(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.vonmises(0.5, 0.5)) # E: float reveal_type(def_gen.vonmises(0.5, 0.5, size=None)) # E: float @@ -438,6 +467,370 @@ reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) + +reveal_type(def_gen.integers(0, 100)) # E: int +reveal_type(def_gen.integers(100)) # E: int +reveal_type(def_gen.integers([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) +I_bool_low_like: List[int] = [0] +I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) +I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) + +reveal_type(def_gen.integers(2, dtype=bool)) # E: builtins.bool +reveal_type(def_gen.integers(0, 2, dtype=bool)) # E: builtins.bool +reveal_type(def_gen.integers(1, dtype=bool, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(0, 1, dtype=bool, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +reveal_type(def_gen.integers(2, dtype=np.bool_)) # E: builtins.bool +reveal_type(def_gen.integers(0, 2, dtype=np.bool_)) # E: builtins.bool +reveal_type(def_gen.integers(1, dtype=np.bool_, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(0, 1, dtype=np.bool_, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) +I_u1_low_like: List[int] = [0] +I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) +I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) + +reveal_type(def_gen.integers(256, dtype="u1")) # E: int +reveal_type(def_gen.integers(0, 256, dtype="u1")) # E: int +reveal_type(def_gen.integers(255, dtype="u1", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 255, dtype="u1", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] + +reveal_type(def_gen.integers(256, dtype="uint8")) # E: int +reveal_type(def_gen.integers(0, 256, dtype="uint8")) # E: int +reveal_type(def_gen.integers(255, dtype="uint8", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] + +reveal_type(def_gen.integers(256, dtype=np.uint8)) # E: int +reveal_type(def_gen.integers(0, 256, dtype=np.uint8)) # E: int +reveal_type(def_gen.integers(255, dtype=np.uint8, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] + +I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) +I_u2_low_like: List[int] = [0] +I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) + +reveal_type(def_gen.integers(65536, dtype="u2")) # E: int +reveal_type(def_gen.integers(0, 65536, dtype="u2")) # E: int +reveal_type(def_gen.integers(65535, dtype="u2", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] + +reveal_type(def_gen.integers(65536, dtype="uint16")) # E: int +reveal_type(def_gen.integers(0, 65536, dtype="uint16")) # E: int +reveal_type(def_gen.integers(65535, dtype="uint16", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] + +reveal_type(def_gen.integers(65536, dtype=np.uint16)) # E: int +reveal_type(def_gen.integers(0, 65536, dtype=np.uint16)) # E: int +reveal_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] + +I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) +I_u4_low_like: List[int] = [0] +I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) + +reveal_type(def_gen.integers(4294967296, dtype="u4")) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype="u4")) # E: int +reveal_type(def_gen.integers(4294967295, dtype="u4", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] + +reveal_type(def_gen.integers(4294967296, dtype="uint32")) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype="uint32")) # E: int +reveal_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] + +reveal_type(def_gen.integers(4294967296, dtype=np.uint32)) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint32)) # E: int +reveal_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] + +I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) +I_u8_low_like: List[int] = [0] +I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) + +reveal_type(def_gen.integers(18446744073709551616, dtype="u8")) # E: int +reveal_type(def_gen.integers(0, 18446744073709551616, dtype="u8")) # E: int +reveal_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.integers(18446744073709551616, dtype="uint64")) # E: int +reveal_type(def_gen.integers(0, 18446744073709551616, dtype="uint64")) # E: int +reveal_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.integers(18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] + +I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) +I_i1_low_like: List[int] = [-128] +I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) +I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) + +reveal_type(def_gen.integers(128, dtype="i1")) # E: int +reveal_type(def_gen.integers(-128, 128, dtype="i1")) # E: int +reveal_type(def_gen.integers(127, dtype="i1", endpoint=True)) # E: int +reveal_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] + +reveal_type(def_gen.integers(128, dtype="int8")) # E: int +reveal_type(def_gen.integers(-128, 128, dtype="int8")) # E: int +reveal_type(def_gen.integers(127, dtype="int8", endpoint=True)) # E: int +reveal_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] + +reveal_type(def_gen.integers(128, dtype=np.int8)) # E: int +reveal_type(def_gen.integers(-128, 128, dtype=np.int8)) # E: int +reveal_type(def_gen.integers(127, dtype=np.int8, endpoint=True)) # E: int +reveal_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] + +I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +I_i2_low_like: List[int] = [-32768] +I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) +I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) + +reveal_type(def_gen.integers(32768, dtype="i2")) # E: int +reveal_type(def_gen.integers(-32768, 32768, dtype="i2")) # E: int +reveal_type(def_gen.integers(32767, dtype="i2", endpoint=True)) # E: int +reveal_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] + +reveal_type(def_gen.integers(32768, dtype="int16")) # E: int +reveal_type(def_gen.integers(-32768, 32768, dtype="int16")) # E: int +reveal_type(def_gen.integers(32767, dtype="int16", endpoint=True)) # E: int +reveal_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] + +reveal_type(def_gen.integers(32768, dtype=np.int16)) # E: int +reveal_type(def_gen.integers(-32768, 32768, dtype=np.int16)) # E: int +reveal_type(def_gen.integers(32767, dtype=np.int16, endpoint=True)) # E: int +reveal_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] + +I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: List[int] = [-2147483648] +I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) + +reveal_type(def_gen.integers(2147483648, dtype="i4")) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="i4")) # E: int +reveal_type(def_gen.integers(2147483647, dtype="i4", endpoint=True)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] + +reveal_type(def_gen.integers(2147483648, dtype="int32")) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="int32")) # E: int +reveal_type(def_gen.integers(2147483647, dtype="int32", endpoint=True)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] + +reveal_type(def_gen.integers(2147483648, dtype=np.int32)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32)) # E: int +reveal_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] + +I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: List[int] = [-9223372036854775808] +I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) + +reveal_type(def_gen.integers(9223372036854775808, dtype="i8")) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int +reveal_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.integers(9223372036854775808, dtype="int64")) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int +reveal_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(def_gen.integers(9223372036854775808, dtype=np.int64)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int +reveal_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + reveal_type(def_gen.bit_generator) # E: BitGenerator @@ -492,7 +885,6 @@ reveal_type(def_gen.shuffle(np.arange(10))) # E: None reveal_type(def_gen.shuffle([1, 2, 3, 4, 5])) # E: None reveal_type(def_gen.shuffle(D_2D, axis=1)) # E: None -reveal_type(def_gen.shuffle(D_2D_like, axis=1)) # E: None reveal_type(np.random.Generator(pcg64)) # E: Generator reveal_type(def_gen.__str__()) # E: str From d7bc29c1382cc31f3bc177f9ff6672d3f73e3076 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 19 Feb 2021 13:35:35 +0000 Subject: [PATCH 0633/1270] TST: Add tests for dtype and out Add further tests for functions that accept out and dtype arguments --- numpy/typing/tests/data/pass/random.py | 58 +++++++++++++++++++++++- numpy/typing/tests/data/reveal/random.py | 26 +++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index 0f60344393d4..ab385674d5eb 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -85,21 +85,68 @@ D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) +S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) +D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) + def_gen.standard_normal() +def_gen.standard_normal(dtype=np.float32) +def_gen.standard_normal(dtype="float32") +def_gen.standard_normal(dtype="double") +def_gen.standard_normal(dtype=np.float64) def_gen.standard_normal(size=None) def_gen.standard_normal(size=1) +def_gen.standard_normal(size=1, dtype=np.float32) +def_gen.standard_normal(size=1, dtype="f4") +def_gen.standard_normal(size=1, dtype="float32", out=S_out) +def_gen.standard_normal(dtype=np.float32, out=S_out) +def_gen.standard_normal(size=1, dtype=np.float64) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="f8") +def_gen.standard_normal(out=D_out) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="float64", out=D_out) def_gen.random() +def_gen.random(dtype=np.float32) +def_gen.random(dtype="float32") +def_gen.random(dtype="double") +def_gen.random(dtype=np.float64) def_gen.random(size=None) def_gen.random(size=1) +def_gen.random(size=1, dtype=np.float32) +def_gen.random(size=1, dtype="f4") +def_gen.random(size=1, dtype="float32", out=S_out) +def_gen.random(dtype=np.float32, out=S_out) +def_gen.random(size=1, dtype=np.float64) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="f8") +def_gen.random(out=D_out) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="float64", out=D_out) def_gen.standard_cauchy() def_gen.standard_cauchy(size=None) def_gen.standard_cauchy(size=1) def_gen.standard_exponential() +def_gen.standard_exponential(method="inv") +def_gen.standard_exponential(dtype=np.float32) +def_gen.standard_exponential(dtype="float32") +def_gen.standard_exponential(dtype="double") +def_gen.standard_exponential(dtype=np.float64) def_gen.standard_exponential(size=None) -def_gen.standard_exponential(size=1) +def_gen.standard_exponential(size=None, method="inv") +def_gen.standard_exponential(size=1, method="inv") +def_gen.standard_exponential(size=1, dtype=np.float32) +def_gen.standard_exponential(size=1, dtype="f4", method="inv") +def_gen.standard_exponential(size=1, dtype="float32", out=S_out) +def_gen.standard_exponential(dtype=np.float32, out=S_out) +def_gen.standard_exponential(size=1, dtype=np.float64, method="inv") +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="f8") +def_gen.standard_exponential(out=D_out) +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="float64", out=D_out) def_gen.zipf(1.5) def_gen.zipf(1.5, size=None) @@ -191,11 +238,20 @@ def_gen.standard_gamma(0.5) def_gen.standard_gamma(0.5, size=None) +def_gen.standard_gamma(0.5, dtype="float32") +def_gen.standard_gamma(0.5, size=None, dtype="float32") def_gen.standard_gamma(0.5, size=1) def_gen.standard_gamma(D_arr_0p5) +def_gen.standard_gamma(D_arr_0p5, dtype="f4") +def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out) +def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out) def_gen.standard_gamma(D_arr_0p5, size=1) def_gen.standard_gamma(D_arr_like_0p5) def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(0.5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64) def_gen.vonmises(0.5, 0.5) def_gen.vonmises(0.5, 0.5, size=None) diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py index b5a8a007b258..7c731b482371 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.py @@ -87,6 +87,8 @@ I_arr_like_20: List[int] = [20] D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) +S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) +D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) reveal_type(def_gen.standard_normal()) # E: float reveal_type(def_gen.standard_normal(dtype=np.float32)) # E: float @@ -97,9 +99,14 @@ reveal_type(def_gen.standard_normal(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_normal(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] reveal_type(def_gen.standard_normal(size=1, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] reveal_type(def_gen.standard_normal(size=1, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_normal(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.random()) # E: float reveal_type(def_gen.random(dtype=np.float32)) # E: float @@ -110,9 +117,14 @@ reveal_type(def_gen.random(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.random(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] reveal_type(def_gen.random(size=1, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] reveal_type(def_gen.random(size=1, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.random(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.random(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="float64", out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_cauchy()) # E: float reveal_type(def_gen.standard_cauchy(size=None)) # E: float @@ -129,9 +141,14 @@ reveal_type(def_gen.standard_exponential(size=1, method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_exponential(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] reveal_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] reveal_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_exponential(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.zipf(1.5)) # E: int reveal_type(def_gen.zipf(1.5, size=None)) # E: int @@ -223,11 +240,20 @@ reveal_type(def_gen.standard_gamma(0.5)) # E: float reveal_type(def_gen.standard_gamma(0.5, size=None)) # E: float +reveal_type(def_gen.standard_gamma(0.5, dtype="float32")) # E: float +reveal_type(def_gen.standard_gamma(0.5, size=None, dtype="float32")) # E: float reveal_type(def_gen.standard_gamma(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_gamma(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] reveal_type(def_gen.standard_gamma(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_gamma(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(0.5, out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] reveal_type(def_gen.vonmises(0.5, 0.5)) # E: float reveal_type(def_gen.vonmises(0.5, 0.5, size=None)) # E: float From 7f2db304655f80aa40c5f8481959561da15d160d Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 19 Feb 2021 15:17:19 +0000 Subject: [PATCH 0634/1270] BUG: Corect typing statements in mtrand Bring over changes from Generator to RandomState where appropriate Remove attributes not in RandomState --- numpy/random/_generator.pyi | 8 ++-- numpy/random/mtrand.pyi | 83 +++++++++++++++++++++++++++---------- 2 files changed, 64 insertions(+), 27 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index a744dec67a84..51a231049bc8 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -114,10 +114,6 @@ class Generator: @overload def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... - @overload def standard_exponential( # type: ignore[misc] self, size: None = ..., @@ -523,6 +519,10 @@ class Generator: self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... ) -> ndarray[Any, dtype[float64]]: ... @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload def laplace( diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index f05635390429..91c8d84a4a98 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -113,6 +113,11 @@ class RandomState: def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] @overload def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ... + def randint( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + ) -> int: ... @overload def randint( # type: ignore[misc] self, @@ -120,6 +125,7 @@ class RandomState: high: Optional[int] = ..., size: None = ..., dtype: _DTypeLikeBool = ..., + endpoint: bool = ..., ) -> bool: ... @overload def randint( # type: ignore[misc] @@ -127,36 +133,31 @@ class RandomState: low: int, high: Optional[int] = ..., size: None = ..., - dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., - ) -> int: ... + dtype: _DTypeLikeBool = ..., + ) -> bool: ... @overload def randint( # type: ignore[misc] self, - low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[bool_], Type[bool], Type[bool_], _BoolCodes, _SupportsDType[dtype[bool_]] - ] = ..., - ) -> ndarray[Any, dtype[bool_]]: ... + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., + ) -> int: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, high: Optional[_ArrayLikeInt_co] = ..., size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] - ] = ..., - ) -> ndarray[Any, dtype[int_]]: ... + ) -> ndarray[Any, dtype[int64]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, high: Optional[_ArrayLikeInt_co] = ..., size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., - ) -> ndarray[Any, dtype[uint]]: ... + dtype: _DTypeLikeBool = ..., + ) -> ndarray[Any, dtype[bool_]]: ... @overload def randint( # type: ignore[misc] self, @@ -229,15 +230,58 @@ class RandomState: dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]] ] = ..., ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] + ] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + ) -> ndarray[Any, dtype[uint]]: ... def bytes(self, length: int) -> bytes: ... + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload def choice( self, a: ArrayLike, - size: Optional[_ShapeLike] = ..., + size: None = ..., replace: bool = ..., p: Optional[_ArrayLikeFloat_co] = ..., ) -> Any: ... @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> ndarray[Any, Any]: ... + @overload def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload def uniform( @@ -480,13 +524,6 @@ class RandomState: def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... ) -> ndarray[Any, dtype[int_]]: ... - def multivariate_hypergeometric( - self, - colors: _ArrayLikeInt_co, - nsample: int, - size: Optional[_ShapeLike] = ..., - method: Literal["marginals", "count"] = ..., - ) -> ndarray[Any, dtype[int_]]: ... def dirichlet( self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... ) -> ndarray[Any, dtype[float64]]: ... From 2e92a52bc523cf9eec245c01fdea01ce12dfbf9f Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Sat, 20 Feb 2021 01:05:33 +0000 Subject: [PATCH 0635/1270] ENH: Add tests for RandomState --- numpy/random/mtrand.pyi | 10 +- numpy/typing/tests/data/pass/random.py | 581 ++++++++++++++++++++++ numpy/typing/tests/data/reveal/random.py | 584 +++++++++++++++++++++++ 3 files changed, 1171 insertions(+), 4 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 91c8d84a4a98..f088a3929899 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,3 +1,4 @@ +from numpy.random.bit_generator import BitGenerator import sys from typing import Any, Callable, Dict, Literal, Optional, Sequence, Tuple, Type, Union, overload @@ -69,7 +70,7 @@ _DTypeLikeFloat64 = Union[ class RandomState: _bit_generator: BitGenerator - def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def __init__(self, seed: Union[None, _ArrayLikeInt_co, BitGenerator] = ...) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... def __getstate__(self) -> Dict[str, Any]: ... @@ -113,6 +114,7 @@ class RandomState: def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] @overload def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ... + @overload def randint( # type: ignore[misc] self, low: int, @@ -149,7 +151,7 @@ class RandomState: low: _ArrayLikeInt_co, high: Optional[_ArrayLikeInt_co] = ..., size: Optional[_ShapeLike] = ..., - ) -> ndarray[Any, dtype[int64]]: ... + ) -> ndarray[Any, dtype[int_]]: ... @overload def randint( # type: ignore[misc] self, @@ -293,11 +295,11 @@ class RandomState: @overload def rand(self, *args: None) -> float: ... @overload - def rand(self, *args: Sequence[int]) -> ndarray[Any, dtype[float64]]: ... + def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ... @overload def randn(self, *args: None) -> float: ... @overload - def randn(self, *args: Sequence[int]) -> ndarray[Any, dtype[float64]]: ... + def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ... @overload def random_integers(self, low: int, high: Optional[int] = ..., size: None = ...) -> int: ... # type: ignore[misc] @overload diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index ab385674d5eb..c762f085a20a 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -915,3 +915,584 @@ def_gen_state: Dict[str, Any] def_gen_state = def_gen.__getstate__() def_gen.__setstate__(def_gen_state) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +random_st.standard_normal() +random_st.standard_normal(size=None) +random_st.standard_normal(size=1) + +random_st.random() +random_st.random(size=None) +random_st.random(size=1) + +random_st.standard_cauchy() +random_st.standard_cauchy(size=None) +random_st.standard_cauchy(size=1) + +random_st.standard_exponential() +random_st.standard_exponential(size=None) +random_st.standard_exponential(size=1) + +random_st.zipf(1.5) +random_st.zipf(1.5, size=None) +random_st.zipf(1.5, size=1) +random_st.zipf(D_arr_1p5) +random_st.zipf(D_arr_1p5, size=1) +random_st.zipf(D_arr_like_1p5) +random_st.zipf(D_arr_like_1p5, size=1) + +random_st.weibull(0.5) +random_st.weibull(0.5, size=None) +random_st.weibull(0.5, size=1) +random_st.weibull(D_arr_0p5) +random_st.weibull(D_arr_0p5, size=1) +random_st.weibull(D_arr_like_0p5) +random_st.weibull(D_arr_like_0p5, size=1) + +random_st.standard_t(0.5) +random_st.standard_t(0.5, size=None) +random_st.standard_t(0.5, size=1) +random_st.standard_t(D_arr_0p5) +random_st.standard_t(D_arr_0p5, size=1) +random_st.standard_t(D_arr_like_0p5) +random_st.standard_t(D_arr_like_0p5, size=1) + +random_st.poisson(0.5) +random_st.poisson(0.5, size=None) +random_st.poisson(0.5, size=1) +random_st.poisson(D_arr_0p5) +random_st.poisson(D_arr_0p5, size=1) +random_st.poisson(D_arr_like_0p5) +random_st.poisson(D_arr_like_0p5, size=1) + +random_st.power(0.5) +random_st.power(0.5, size=None) +random_st.power(0.5, size=1) +random_st.power(D_arr_0p5) +random_st.power(D_arr_0p5, size=1) +random_st.power(D_arr_like_0p5) +random_st.power(D_arr_like_0p5, size=1) + +random_st.pareto(0.5) +random_st.pareto(0.5, size=None) +random_st.pareto(0.5, size=1) +random_st.pareto(D_arr_0p5) +random_st.pareto(D_arr_0p5, size=1) +random_st.pareto(D_arr_like_0p5) +random_st.pareto(D_arr_like_0p5, size=1) + +random_st.chisquare(0.5) +random_st.chisquare(0.5, size=None) +random_st.chisquare(0.5, size=1) +random_st.chisquare(D_arr_0p5) +random_st.chisquare(D_arr_0p5, size=1) +random_st.chisquare(D_arr_like_0p5) +random_st.chisquare(D_arr_like_0p5, size=1) + +random_st.exponential(0.5) +random_st.exponential(0.5, size=None) +random_st.exponential(0.5, size=1) +random_st.exponential(D_arr_0p5) +random_st.exponential(D_arr_0p5, size=1) +random_st.exponential(D_arr_like_0p5) +random_st.exponential(D_arr_like_0p5, size=1) + +random_st.geometric(0.5) +random_st.geometric(0.5, size=None) +random_st.geometric(0.5, size=1) +random_st.geometric(D_arr_0p5) +random_st.geometric(D_arr_0p5, size=1) +random_st.geometric(D_arr_like_0p5) +random_st.geometric(D_arr_like_0p5, size=1) + +random_st.logseries(0.5) +random_st.logseries(0.5, size=None) +random_st.logseries(0.5, size=1) +random_st.logseries(D_arr_0p5) +random_st.logseries(D_arr_0p5, size=1) +random_st.logseries(D_arr_like_0p5) +random_st.logseries(D_arr_like_0p5, size=1) + +random_st.rayleigh(0.5) +random_st.rayleigh(0.5, size=None) +random_st.rayleigh(0.5, size=1) +random_st.rayleigh(D_arr_0p5) +random_st.rayleigh(D_arr_0p5, size=1) +random_st.rayleigh(D_arr_like_0p5) +random_st.rayleigh(D_arr_like_0p5, size=1) + +random_st.standard_gamma(0.5) +random_st.standard_gamma(0.5, size=None) +random_st.standard_gamma(0.5, size=1) +random_st.standard_gamma(D_arr_0p5) +random_st.standard_gamma(D_arr_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5) +random_st.standard_gamma(D_arr_like_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5, size=1) + +random_st.vonmises(0.5, 0.5) +random_st.vonmises(0.5, 0.5, size=None) +random_st.vonmises(0.5, 0.5, size=1) +random_st.vonmises(D_arr_0p5, 0.5) +random_st.vonmises(0.5, D_arr_0p5) +random_st.vonmises(D_arr_0p5, 0.5, size=1) +random_st.vonmises(0.5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, 0.5) +random_st.vonmises(0.5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.wald(0.5, 0.5) +random_st.wald(0.5, 0.5, size=None) +random_st.wald(0.5, 0.5, size=1) +random_st.wald(D_arr_0p5, 0.5) +random_st.wald(0.5, D_arr_0p5) +random_st.wald(D_arr_0p5, 0.5, size=1) +random_st.wald(0.5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, 0.5) +random_st.wald(0.5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.uniform(0.5, 0.5) +random_st.uniform(0.5, 0.5, size=None) +random_st.uniform(0.5, 0.5, size=1) +random_st.uniform(D_arr_0p5, 0.5) +random_st.uniform(0.5, D_arr_0p5) +random_st.uniform(D_arr_0p5, 0.5, size=1) +random_st.uniform(0.5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, 0.5) +random_st.uniform(0.5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.beta(0.5, 0.5) +random_st.beta(0.5, 0.5, size=None) +random_st.beta(0.5, 0.5, size=1) +random_st.beta(D_arr_0p5, 0.5) +random_st.beta(0.5, D_arr_0p5) +random_st.beta(D_arr_0p5, 0.5, size=1) +random_st.beta(0.5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, 0.5) +random_st.beta(0.5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.f(0.5, 0.5) +random_st.f(0.5, 0.5, size=None) +random_st.f(0.5, 0.5, size=1) +random_st.f(D_arr_0p5, 0.5) +random_st.f(0.5, D_arr_0p5) +random_st.f(D_arr_0p5, 0.5, size=1) +random_st.f(0.5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, 0.5) +random_st.f(0.5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5) +random_st.f(D_arr_like_0p5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gamma(0.5, 0.5) +random_st.gamma(0.5, 0.5, size=None) +random_st.gamma(0.5, 0.5, size=1) +random_st.gamma(D_arr_0p5, 0.5) +random_st.gamma(0.5, D_arr_0p5) +random_st.gamma(D_arr_0p5, 0.5, size=1) +random_st.gamma(0.5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, 0.5) +random_st.gamma(0.5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gumbel(0.5, 0.5) +random_st.gumbel(0.5, 0.5, size=None) +random_st.gumbel(0.5, 0.5, size=1) +random_st.gumbel(D_arr_0p5, 0.5) +random_st.gumbel(0.5, D_arr_0p5) +random_st.gumbel(D_arr_0p5, 0.5, size=1) +random_st.gumbel(0.5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, 0.5) +random_st.gumbel(0.5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.laplace(0.5, 0.5) +random_st.laplace(0.5, 0.5, size=None) +random_st.laplace(0.5, 0.5, size=1) +random_st.laplace(D_arr_0p5, 0.5) +random_st.laplace(0.5, D_arr_0p5) +random_st.laplace(D_arr_0p5, 0.5, size=1) +random_st.laplace(0.5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, 0.5) +random_st.laplace(0.5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.logistic(0.5, 0.5) +random_st.logistic(0.5, 0.5, size=None) +random_st.logistic(0.5, 0.5, size=1) +random_st.logistic(D_arr_0p5, 0.5) +random_st.logistic(0.5, D_arr_0p5) +random_st.logistic(D_arr_0p5, 0.5, size=1) +random_st.logistic(0.5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, 0.5) +random_st.logistic(0.5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.lognormal(0.5, 0.5) +random_st.lognormal(0.5, 0.5, size=None) +random_st.lognormal(0.5, 0.5, size=1) +random_st.lognormal(D_arr_0p5, 0.5) +random_st.lognormal(0.5, D_arr_0p5) +random_st.lognormal(D_arr_0p5, 0.5, size=1) +random_st.lognormal(0.5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, 0.5) +random_st.lognormal(0.5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.noncentral_chisquare(0.5, 0.5) +random_st.noncentral_chisquare(0.5, 0.5, size=None) +random_st.noncentral_chisquare(0.5, 0.5, size=1) +random_st.noncentral_chisquare(D_arr_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.normal(0.5, 0.5) +random_st.normal(0.5, 0.5, size=None) +random_st.normal(0.5, 0.5, size=1) +random_st.normal(D_arr_0p5, 0.5) +random_st.normal(0.5, D_arr_0p5) +random_st.normal(D_arr_0p5, 0.5, size=1) +random_st.normal(0.5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, 0.5) +random_st.normal(0.5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.triangular(0.1, 0.5, 0.9) +random_st.triangular(0.1, 0.5, 0.9, size=None) +random_st.triangular(0.1, 0.5, 0.9, size=1) +random_st.triangular(D_arr_0p1, 0.5, 0.9) +random_st.triangular(0.1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.triangular(0.1, D_arr_0p5, 0.9, size=1) +random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.triangular(0.5, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.noncentral_f(0.1, 0.5, 0.9) +random_st.noncentral_f(0.1, 0.5, 0.9, size=None) +random_st.noncentral_f(0.1, 0.5, 0.9, size=1) +random_st.noncentral_f(D_arr_0p1, 0.5, 0.9) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.binomial(10, 0.5) +random_st.binomial(10, 0.5, size=None) +random_st.binomial(10, 0.5, size=1) +random_st.binomial(I_arr_10, 0.5) +random_st.binomial(10, D_arr_0p5) +random_st.binomial(I_arr_10, 0.5, size=1) +random_st.binomial(10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, 0.5) +random_st.binomial(10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5) +random_st.binomial(I_arr_like_10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.negative_binomial(10, 0.5) +random_st.negative_binomial(10, 0.5, size=None) +random_st.negative_binomial(10, 0.5, size=1) +random_st.negative_binomial(I_arr_10, 0.5) +random_st.negative_binomial(10, D_arr_0p5) +random_st.negative_binomial(I_arr_10, 0.5, size=1) +random_st.negative_binomial(10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, 0.5) +random_st.negative_binomial(10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.hypergeometric(20, 20, 10) +random_st.hypergeometric(20, 20, 10, size=None) +random_st.hypergeometric(20, 20, 10, size=1) +random_st.hypergeometric(I_arr_20, 20, 10) +random_st.hypergeometric(20, I_arr_20, 10) +random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +random_st.hypergeometric(20, I_arr_20, 10, size=1) +random_st.hypergeometric(I_arr_like_20, 20, I_arr_10) +random_st.hypergeometric(20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, 10) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +random_st.randint(0, 100) +random_st.randint(100) +random_st.randint([100]) +random_st.randint(0, [100]) + +random_st.randint(2, dtype=bool) +random_st.randint(0, 2, dtype=bool) +random_st.randint(I_bool_high_open, dtype=bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=bool) +random_st.randint(0, I_bool_high_open, dtype=bool) + +random_st.randint(2, dtype=np.bool_) +random_st.randint(0, 2, dtype=np.bool_) +random_st.randint(I_bool_high_open, dtype=np.bool_) +random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_) +random_st.randint(0, I_bool_high_open, dtype=np.bool_) + +random_st.randint(256, dtype="u1") +random_st.randint(0, 256, dtype="u1") +random_st.randint(I_u1_high_open, dtype="u1") +random_st.randint(I_u1_low, I_u1_high_open, dtype="u1") +random_st.randint(0, I_u1_high_open, dtype="u1") + +random_st.randint(256, dtype="uint8") +random_st.randint(0, 256, dtype="uint8") +random_st.randint(I_u1_high_open, dtype="uint8") +random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8") +random_st.randint(0, I_u1_high_open, dtype="uint8") + +random_st.randint(256, dtype=np.uint8) +random_st.randint(0, 256, dtype=np.uint8) +random_st.randint(I_u1_high_open, dtype=np.uint8) +random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8) +random_st.randint(0, I_u1_high_open, dtype=np.uint8) + +random_st.randint(65536, dtype="u2") +random_st.randint(0, 65536, dtype="u2") +random_st.randint(I_u2_high_open, dtype="u2") +random_st.randint(I_u2_low, I_u2_high_open, dtype="u2") +random_st.randint(0, I_u2_high_open, dtype="u2") + +random_st.randint(65536, dtype="uint16") +random_st.randint(0, 65536, dtype="uint16") +random_st.randint(I_u2_high_open, dtype="uint16") +random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16") +random_st.randint(0, I_u2_high_open, dtype="uint16") + +random_st.randint(65536, dtype=np.uint16) +random_st.randint(0, 65536, dtype=np.uint16) +random_st.randint(I_u2_high_open, dtype=np.uint16) +random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16) +random_st.randint(0, I_u2_high_open, dtype=np.uint16) + +random_st.randint(4294967296, dtype="u4") +random_st.randint(0, 4294967296, dtype="u4") +random_st.randint(I_u4_high_open, dtype="u4") +random_st.randint(I_u4_low, I_u4_high_open, dtype="u4") +random_st.randint(0, I_u4_high_open, dtype="u4") + +random_st.randint(4294967296, dtype="uint32") +random_st.randint(0, 4294967296, dtype="uint32") +random_st.randint(I_u4_high_open, dtype="uint32") +random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32") +random_st.randint(0, I_u4_high_open, dtype="uint32") + +random_st.randint(4294967296, dtype=np.uint32) +random_st.randint(0, 4294967296, dtype=np.uint32) +random_st.randint(I_u4_high_open, dtype=np.uint32) +random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32) +random_st.randint(0, I_u4_high_open, dtype=np.uint32) + + +random_st.randint(18446744073709551616, dtype="u8") +random_st.randint(0, 18446744073709551616, dtype="u8") +random_st.randint(I_u8_high_open, dtype="u8") +random_st.randint(I_u8_low, I_u8_high_open, dtype="u8") +random_st.randint(0, I_u8_high_open, dtype="u8") + +random_st.randint(18446744073709551616, dtype="uint64") +random_st.randint(0, 18446744073709551616, dtype="uint64") +random_st.randint(I_u8_high_open, dtype="uint64") +random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64") +random_st.randint(0, I_u8_high_open, dtype="uint64") + +random_st.randint(18446744073709551616, dtype=np.uint64) +random_st.randint(0, 18446744073709551616, dtype=np.uint64) +random_st.randint(I_u8_high_open, dtype=np.uint64) +random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64) +random_st.randint(0, I_u8_high_open, dtype=np.uint64) + +random_st.randint(128, dtype="i1") +random_st.randint(-128, 128, dtype="i1") +random_st.randint(I_i1_high_open, dtype="i1") +random_st.randint(I_i1_low, I_i1_high_open, dtype="i1") +random_st.randint(-128, I_i1_high_open, dtype="i1") + +random_st.randint(128, dtype="int8") +random_st.randint(-128, 128, dtype="int8") +random_st.randint(I_i1_high_open, dtype="int8") +random_st.randint(I_i1_low, I_i1_high_open, dtype="int8") +random_st.randint(-128, I_i1_high_open, dtype="int8") + +random_st.randint(128, dtype=np.int8) +random_st.randint(-128, 128, dtype=np.int8) +random_st.randint(I_i1_high_open, dtype=np.int8) +random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8) +random_st.randint(-128, I_i1_high_open, dtype=np.int8) + +random_st.randint(32768, dtype="i2") +random_st.randint(-32768, 32768, dtype="i2") +random_st.randint(I_i2_high_open, dtype="i2") +random_st.randint(I_i2_low, I_i2_high_open, dtype="i2") +random_st.randint(-32768, I_i2_high_open, dtype="i2") +random_st.randint(32768, dtype="int16") +random_st.randint(-32768, 32768, dtype="int16") +random_st.randint(I_i2_high_open, dtype="int16") +random_st.randint(I_i2_low, I_i2_high_open, dtype="int16") +random_st.randint(-32768, I_i2_high_open, dtype="int16") +random_st.randint(32768, dtype=np.int16) +random_st.randint(-32768, 32768, dtype=np.int16) +random_st.randint(I_i2_high_open, dtype=np.int16) +random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16) +random_st.randint(-32768, I_i2_high_open, dtype=np.int16) + +random_st.randint(2147483648, dtype="i4") +random_st.randint(-2147483648, 2147483648, dtype="i4") +random_st.randint(I_i4_high_open, dtype="i4") +random_st.randint(I_i4_low, I_i4_high_open, dtype="i4") +random_st.randint(-2147483648, I_i4_high_open, dtype="i4") + +random_st.randint(2147483648, dtype="int32") +random_st.randint(-2147483648, 2147483648, dtype="int32") +random_st.randint(I_i4_high_open, dtype="int32") +random_st.randint(I_i4_low, I_i4_high_open, dtype="int32") +random_st.randint(-2147483648, I_i4_high_open, dtype="int32") + +random_st.randint(2147483648, dtype=np.int32) +random_st.randint(-2147483648, 2147483648, dtype=np.int32) +random_st.randint(I_i4_high_open, dtype=np.int32) +random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32) +random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32) + +random_st.randint(9223372036854775808, dtype="i8") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8") +random_st.randint(I_i8_high_open, dtype="i8") +random_st.randint(I_i8_low, I_i8_high_open, dtype="i8") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8") + +random_st.randint(9223372036854775808, dtype="int64") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64") +random_st.randint(I_i8_high_open, dtype="int64") +random_st.randint(I_i8_low, I_i8_high_open, dtype="int64") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64") + +random_st.randint(9223372036854775808, dtype=np.int64) +random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64) +random_st.randint(I_i8_high_open, dtype=np.int64) +random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64) +random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64) + +bg: np.random.BitGenerator = random_st._bit_generator + +random_st.bytes(2) + +random_st.choice(5) +random_st.choice(5, 3) +random_st.choice(5, 3, replace=True) +random_st.choice(5, 3, p=[1 / 5] * 5) +random_st.choice(5, 3, p=[1 / 5] * 5, replace=False) + +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +random_st.dirichlet([0.5, 0.5]) +random_st.dirichlet(np.array([0.5, 0.5])) +random_st.dirichlet(np.array([0.5, 0.5]), size=3) + +random_st.multinomial(20, [1 / 6.0] * 6) +random_st.multinomial(20, np.array([0.5, 0.5])) +random_st.multinomial(20, [1 / 6.0] * 6, size=2) + +random_st.multivariate_normal([0.0], [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) +random_st.multivariate_normal(np.array([0.0]), [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) + +random_st.permutation(10) +random_st.permutation([1, 2, 3, 4]) +random_st.permutation(np.array([1, 2, 3, 4])) +random_st.permutation(D_2D) + +random_st.shuffle(np.arange(10)) +random_st.shuffle([1, 2, 3, 4, 5]) +random_st.shuffle(D_2D) + +np.random.RandomState(SEED_PCG64) +np.random.RandomState(0) +np.random.RandomState([0,1,2]) +random_st.__str__() +random_st.__repr__() +random_st_state = random_st.__getstate__() +random_st.__setstate__(random_st_state) +random_st.seed() +random_st.seed(1) +random_st.seed([0,1]) +random_st_get_state = random_st.get_state() +random_st_get_state_legacy = random_st.get_state(legacy=True) +random_st.set_state(random_st_get_state) + +random_st.rand() +random_st.rand(1) +random_st.rand(1, 2) +random_st.randn() +random_st.randn(1) +random_st.randn(1, 2) +random_st.random_sample() +random_st.random_sample(1) +random_st.random_sample(size=(1,2)) + +random_st.tomaxint() +random_st.tomaxint(1) +random_st.tomaxint((1,)) + \ No newline at end of file diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py index 7c731b482371..ebac7345cf72 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.py @@ -918,3 +918,587 @@ def_gen_state = def_gen.__getstate__() reveal_type(def_gen_state) # E: builtins.dict[builtins.str, Any] reveal_type(def_gen.__setstate__(def_gen_state)) # E: None + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +reveal_type(random_st.standard_normal()) # E: float +reveal_type(random_st.standard_normal(size=None)) # E: float +reveal_type(random_st.standard_normal(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.random()) # E: float +reveal_type(random_st.random(size=None)) # E: float +reveal_type(random_st.random(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.standard_cauchy()) # E: float +reveal_type(random_st.standard_cauchy(size=None)) # E: float +reveal_type(random_st.standard_cauchy(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.standard_exponential()) # E: float +reveal_type(random_st.standard_exponential(size=None)) # E: float +reveal_type(random_st.standard_exponential(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.zipf(1.5)) # E: int +reveal_type(random_st.zipf(1.5, size=None)) # E: int +reveal_type(random_st.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.weibull(0.5)) # E: float +reveal_type(random_st.weibull(0.5, size=None)) # E: float +reveal_type(random_st.weibull(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.standard_t(0.5)) # E: float +reveal_type(random_st.standard_t(0.5, size=None)) # E: float +reveal_type(random_st.standard_t(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.poisson(0.5)) # E: int +reveal_type(random_st.poisson(0.5, size=None)) # E: int +reveal_type(random_st.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.power(0.5)) # E: float +reveal_type(random_st.power(0.5, size=None)) # E: float +reveal_type(random_st.power(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.pareto(0.5)) # E: float +reveal_type(random_st.pareto(0.5, size=None)) # E: float +reveal_type(random_st.pareto(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.chisquare(0.5)) # E: float +reveal_type(random_st.chisquare(0.5, size=None)) # E: float +reveal_type(random_st.chisquare(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.exponential(0.5)) # E: float +reveal_type(random_st.exponential(0.5, size=None)) # E: float +reveal_type(random_st.exponential(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.geometric(0.5)) # E: int +reveal_type(random_st.geometric(0.5, size=None)) # E: int +reveal_type(random_st.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.logseries(0.5)) # E: int +reveal_type(random_st.logseries(0.5, size=None)) # E: int +reveal_type(random_st.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.rayleigh(0.5)) # E: float +reveal_type(random_st.rayleigh(0.5, size=None)) # E: float +reveal_type(random_st.rayleigh(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.standard_gamma(0.5)) # E: float +reveal_type(random_st.standard_gamma(0.5, size=None)) # E: float +reveal_type(random_st.standard_gamma(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.vonmises(0.5, 0.5)) # E: float +reveal_type(random_st.vonmises(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.vonmises(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.wald(0.5, 0.5)) # E: float +reveal_type(random_st.wald(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.wald(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.uniform(0.5, 0.5)) # E: float +reveal_type(random_st.uniform(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.uniform(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.beta(0.5, 0.5)) # E: float +reveal_type(random_st.beta(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.beta(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.f(0.5, 0.5)) # E: float +reveal_type(random_st.f(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.f(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.gamma(0.5, 0.5)) # E: float +reveal_type(random_st.gamma(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.gamma(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.gumbel(0.5, 0.5)) # E: float +reveal_type(random_st.gumbel(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.gumbel(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.laplace(0.5, 0.5)) # E: float +reveal_type(random_st.laplace(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.laplace(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.logistic(0.5, 0.5)) # E: float +reveal_type(random_st.logistic(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.logistic(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.lognormal(0.5, 0.5)) # E: float +reveal_type(random_st.lognormal(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.lognormal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.noncentral_chisquare(0.5, 0.5)) # E: float +reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.normal(0.5, 0.5)) # E: float +reveal_type(random_st.normal(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.normal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.triangular(0.1, 0.5, 0.9)) # E: float +reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9)) # E: float +reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.binomial(10, 0.5)) # E: int +reveal_type(random_st.binomial(10, 0.5, size=None)) # E: int +reveal_type(random_st.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.negative_binomial(10, 0.5)) # E: int +reveal_type(random_st.negative_binomial(10, 0.5, size=None)) # E: int +reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.hypergeometric(20, 20, 10)) # E: int +reveal_type(random_st.hypergeometric(20, 20, 10, size=None)) # E: int +reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.randint(0, 100)) # E: int +reveal_type(random_st.randint(100)) # E: int +reveal_type(random_st.randint([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.randint(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.randint(2, dtype=bool)) # E: builtins.bool +reveal_type(random_st.randint(0, 2, dtype=bool)) # E: builtins.bool +reveal_type(random_st.randint(I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(0, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +reveal_type(random_st.randint(2, dtype=np.bool_)) # E: builtins.bool +reveal_type(random_st.randint(0, 2, dtype=np.bool_)) # E: builtins.bool +reveal_type(random_st.randint(I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(0, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +reveal_type(random_st.randint(256, dtype="u1")) # E: int +reveal_type(random_st.randint(0, 256, dtype="u1")) # E: int +reveal_type(random_st.randint(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] + +reveal_type(random_st.randint(256, dtype="uint8")) # E: int +reveal_type(random_st.randint(0, 256, dtype="uint8")) # E: int +reveal_type(random_st.randint(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] + +reveal_type(random_st.randint(256, dtype=np.uint8)) # E: int +reveal_type(random_st.randint(0, 256, dtype=np.uint8)) # E: int +reveal_type(random_st.randint(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] + +reveal_type(random_st.randint(65536, dtype="u2")) # E: int +reveal_type(random_st.randint(0, 65536, dtype="u2")) # E: int +reveal_type(random_st.randint(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] + +reveal_type(random_st.randint(65536, dtype="uint16")) # E: int +reveal_type(random_st.randint(0, 65536, dtype="uint16")) # E: int +reveal_type(random_st.randint(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] + +reveal_type(random_st.randint(65536, dtype=np.uint16)) # E: int +reveal_type(random_st.randint(0, 65536, dtype=np.uint16)) # E: int +reveal_type(random_st.randint(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] + +reveal_type(random_st.randint(4294967296, dtype="u4")) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype="u4")) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] + +reveal_type(random_st.randint(4294967296, dtype="uint32")) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype="uint32")) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] + +reveal_type(random_st.randint(4294967296, dtype=np.uint32)) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype=np.uint32)) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] + + +reveal_type(random_st.randint(18446744073709551616, dtype="u8")) # E: int +reveal_type(random_st.randint(0, 18446744073709551616, dtype="u8")) # E: int +reveal_type(random_st.randint(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] + +reveal_type(random_st.randint(18446744073709551616, dtype="uint64")) # E: int +reveal_type(random_st.randint(0, 18446744073709551616, dtype="uint64")) # E: int +reveal_type(random_st.randint(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] + +reveal_type(random_st.randint(18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(random_st.randint(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] + +reveal_type(random_st.randint(128, dtype="i1")) # E: int +reveal_type(random_st.randint(-128, 128, dtype="i1")) # E: int +reveal_type(random_st.randint(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] + +reveal_type(random_st.randint(128, dtype="int8")) # E: int +reveal_type(random_st.randint(-128, 128, dtype="int8")) # E: int +reveal_type(random_st.randint(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] + +reveal_type(random_st.randint(128, dtype=np.int8)) # E: int +reveal_type(random_st.randint(-128, 128, dtype=np.int8)) # E: int +reveal_type(random_st.randint(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] + +reveal_type(random_st.randint(32768, dtype="i2")) # E: int +reveal_type(random_st.randint(-32768, 32768, dtype="i2")) # E: int +reveal_type(random_st.randint(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(32768, dtype="int16")) # E: int +reveal_type(random_st.randint(-32768, 32768, dtype="int16")) # E: int +reveal_type(random_st.randint(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(32768, dtype=np.int16)) # E: int +reveal_type(random_st.randint(-32768, 32768, dtype=np.int16)) # E: int +reveal_type(random_st.randint(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] + +reveal_type(random_st.randint(2147483648, dtype="i4")) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype="i4")) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] + +reveal_type(random_st.randint(2147483648, dtype="int32")) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype="int32")) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] + +reveal_type(random_st.randint(2147483648, dtype=np.int32)) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32)) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] + +reveal_type(random_st.randint(9223372036854775808, dtype="i8")) # E: int +reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int +reveal_type(random_st.randint(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(random_st.randint(9223372036854775808, dtype="int64")) # E: int +reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int +reveal_type(random_st.randint(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(random_st.randint(9223372036854775808, dtype=np.int64)) # E: int +reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int +reveal_type(random_st.randint(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] + +reveal_type(random_st._bit_generator) # E: BitGenerator + +reveal_type(random_st.bytes(2)) # E: bytes + +reveal_type(random_st.choice(5)) # E: int +reveal_type(random_st.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: numpy.ndarray[Any, Any] + +reveal_type(random_st.dirichlet([0.5, 0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.dirichlet(np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + +reveal_type(random_st.multivariate_normal([0.0], [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.permutation([1, 2, 3, 4])) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.permutation(np.array([1, 2, 3, 4]))) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.permutation(D_2D)) # E: numpy.ndarray[Any, Any] + +reveal_type(random_st.shuffle(np.arange(10))) # E: None +reveal_type(random_st.shuffle([1, 2, 3, 4, 5])) # E: None +reveal_type(random_st.shuffle(D_2D)) # E: None + +reveal_type(np.random.RandomState(pcg64)) # E: RandomState +reveal_type(np.random.RandomState(0)) # E: RandomState +reveal_type(np.random.RandomState([0,1,2])) # E: RandomState +reveal_type(random_st.__str__()) # E: str +reveal_type(random_st.__repr__()) # E: str +random_st_state = random_st.__getstate__() +reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] +reveal_type(random_st.__setstate__(random_st_state)) # E: None +reveal_type(random_st.seed()) # E: None +reveal_type(random_st.seed(1)) # E: None +reveal_type(random_st.seed([0,1])) # E: None +random_st_get_state = random_st.get_state() +reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] +random_st_get_state_legacy = random_st.get_state(legacy=True) +reveal_type(random_st_get_state_legacy) # E: Union[builtins.dict[builtins.str, Any], Tuple[builtins.str, numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]], builtins.int, builtins.int, builtins.float]] +reveal_type(random_st.set_state(random_st_get_state)) # E: None + +reveal_type(random_st.rand()) # E: float +reveal_type(random_st.rand(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rand(1, 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.randn()) # E: float +reveal_type(random_st.randn(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.randn(1, 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.random_sample()) # E: float +reveal_type(random_st.random_sample(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.random_sample(size=(1,2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.tomaxint()) # E: int +reveal_type(random_st.tomaxint(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.tomaxint((1,))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] + \ No newline at end of file From a05d5db2109d2d5afda40cb790b66cfb50a4708a Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Sat, 20 Feb 2021 01:20:28 +0000 Subject: [PATCH 0636/1270] BUG: Fix typing bugs --- numpy/random/mtrand.pyi | 2 +- numpy/typing/tests/data/pass/random.py | 11 +- numpy/typing/tests/data/reveal/random.py | 141 +++++++++++------------ 3 files changed, 76 insertions(+), 78 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index f088a3929899..8c5391b6e68d 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -529,7 +529,7 @@ class RandomState: def dirichlet( self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... ) -> ndarray[Any, dtype[float64]]: ... - def shuffle(self, x: ArrayLike) -> Sequence[Any]: ... + def shuffle(self, x: ArrayLike) -> None: ... @overload def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ... @overload diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index c762f085a20a..05bd62112ff2 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1470,16 +1470,16 @@ np.random.RandomState(SEED_PCG64) np.random.RandomState(0) -np.random.RandomState([0,1,2]) +np.random.RandomState([0, 1, 2]) random_st.__str__() random_st.__repr__() random_st_state = random_st.__getstate__() random_st.__setstate__(random_st_state) random_st.seed() random_st.seed(1) -random_st.seed([0,1]) -random_st_get_state = random_st.get_state() -random_st_get_state_legacy = random_st.get_state(legacy=True) +random_st.seed([0, 1]) +random_st_get_state = random_st.get_state() +random_st_get_state_legacy = random_st.get_state(legacy=True) random_st.set_state(random_st_get_state) random_st.rand() @@ -1490,9 +1490,8 @@ random_st.randn(1, 2) random_st.random_sample() random_st.random_sample(1) -random_st.random_sample(size=(1,2)) +random_st.random_sample(size=(1, 2)) random_st.tomaxint() random_st.tomaxint(1) random_st.tomaxint((1,)) - \ No newline at end of file diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py index ebac7345cf72..6f8ce4f2d631 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.py @@ -940,11 +940,11 @@ reveal_type(random_st.zipf(1.5)) # E: int reveal_type(random_st.zipf(1.5, size=None)) # E: int -reveal_type(random_st.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.weibull(0.5)) # E: float reveal_type(random_st.weibull(0.5, size=None)) # E: float @@ -964,11 +964,11 @@ reveal_type(random_st.poisson(0.5)) # E: int reveal_type(random_st.poisson(0.5, size=None)) # E: int -reveal_type(random_st.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.power(0.5)) # E: float reveal_type(random_st.power(0.5, size=None)) # E: float @@ -1004,19 +1004,19 @@ reveal_type(random_st.geometric(0.5)) # E: int reveal_type(random_st.geometric(0.5, size=None)) # E: int -reveal_type(random_st.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.logseries(0.5)) # E: int reveal_type(random_st.logseries(0.5, size=None)) # E: int -reveal_type(random_st.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.rayleigh(0.5)) # E: float reveal_type(random_st.rayleigh(0.5, size=None)) # E: float @@ -1233,50 +1233,50 @@ reveal_type(random_st.binomial(10, 0.5)) # E: int reveal_type(random_st.binomial(10, 0.5, size=None)) # E: int -reveal_type(random_st.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.negative_binomial(10, 0.5)) # E: int reveal_type(random_st.negative_binomial(10, 0.5, size=None)) # E: int -reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.hypergeometric(20, 20, 10)) # E: int reveal_type(random_st.hypergeometric(20, 20, 10, size=None)) # E: int -reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.randint(0, 100)) # E: int reveal_type(random_st.randint(100)) # E: int -reveal_type(random_st.randint([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.randint(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.randint([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.randint(2, dtype=bool)) # E: builtins.bool reveal_type(random_st.randint(0, 2, dtype=bool)) # E: builtins.bool @@ -1438,10 +1438,10 @@ reveal_type(random_st.bytes(2)) # E: bytes reveal_type(random_st.choice(5)) # E: int -reveal_type(random_st.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: numpy.ndarray[Any, Any] @@ -1453,16 +1453,16 @@ reveal_type(random_st.dirichlet(np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.multivariate_normal([0.0], [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(random_st.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] +reveal_type(random_st.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.permutation([1, 2, 3, 4])) # E: numpy.ndarray[Any, Any] reveal_type(random_st.permutation(np.array([1, 2, 3, 4]))) # E: numpy.ndarray[Any, Any] reveal_type(random_st.permutation(D_2D)) # E: numpy.ndarray[Any, Any] @@ -1473,7 +1473,7 @@ reveal_type(np.random.RandomState(pcg64)) # E: RandomState reveal_type(np.random.RandomState(0)) # E: RandomState -reveal_type(np.random.RandomState([0,1,2])) # E: RandomState +reveal_type(np.random.RandomState([0, 1, 2])) # E: RandomState reveal_type(random_st.__str__()) # E: str reveal_type(random_st.__repr__()) # E: str random_st_state = random_st.__getstate__() @@ -1481,10 +1481,10 @@ reveal_type(random_st.__setstate__(random_st_state)) # E: None reveal_type(random_st.seed()) # E: None reveal_type(random_st.seed(1)) # E: None -reveal_type(random_st.seed([0,1])) # E: None -random_st_get_state = random_st.get_state() +reveal_type(random_st.seed([0, 1])) # E: None +random_st_get_state = random_st.get_state() reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] -random_st_get_state_legacy = random_st.get_state(legacy=True) +random_st_get_state_legacy = random_st.get_state(legacy=True) reveal_type(random_st_get_state_legacy) # E: Union[builtins.dict[builtins.str, Any], Tuple[builtins.str, numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]], builtins.int, builtins.int, builtins.float]] reveal_type(random_st.set_state(random_st_get_state)) # E: None @@ -1496,9 +1496,8 @@ reveal_type(random_st.randn(1, 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.random_sample()) # E: float reveal_type(random_st.random_sample(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(random_st.random_sample(size=(1,2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.random_sample(size=(1, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.tomaxint()) # E: int -reveal_type(random_st.tomaxint(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] -reveal_type(random_st.tomaxint((1,))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]] - \ No newline at end of file +reveal_type(random_st.tomaxint(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.tomaxint((1,))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] From 3b561dd8de08f94aae49155b03d3a1e5fefdd658 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Sat, 20 Feb 2021 23:09:13 +0000 Subject: [PATCH 0637/1270] Small fixes --- numpy/random/_generator.pyi | 3 +-- numpy/random/bit_generator.pyi | 2 +- numpy/random/mtrand.pyi | 6 ++---- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 51a231049bc8..6b1a6f4af90c 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,5 +1,5 @@ import sys -from typing import Any, Callable, Dict, Literal, Optional, Sequence, Tuple, Type, Union, overload +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload from numpy import ( bool_, @@ -23,7 +23,6 @@ from numpy.typing import ( ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _BoolCodes, _DoubleCodes, _DTypeLikeBool, _DTypeLikeInt, diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 7f066dbfaec0..5b68dde6ccbf 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -19,7 +19,7 @@ from typing import ( ) from numpy import dtype, ndarray, uint32, uint64 -from numpy.typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt64Codes, _UInt32Codes +from numpy.typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes if sys.version_info >= (3, 8): from typing import Literal diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 8c5391b6e68d..fc61f7b5013f 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,6 +1,5 @@ -from numpy.random.bit_generator import BitGenerator import sys -from typing import Any, Callable, Dict, Literal, Optional, Sequence, Tuple, Type, Union, overload +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload from numpy import ( bool_, @@ -19,12 +18,11 @@ from numpy import ( uint32, uint64, ) -from numpy.random import BitGenerator, SeedSequence +from numpy.random.bit_generator import BitGenerator from numpy.typing import ( ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _BoolCodes, _DoubleCodes, _DTypeLikeBool, _DTypeLikeInt, From 4e3d7ed9bb6df109d4b9cc9d5abcb84cc53a9ce1 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Sun, 21 Feb 2021 01:12:59 +0000 Subject: [PATCH 0638/1270] Use {int_} for int return types --- numpy/random/mtrand.pyi | 2 +- numpy/typing/tests/data/reveal/random.py | 130 +++++++++++------------ 2 files changed, 66 insertions(+), 66 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index fc61f7b5013f..814c294ac55d 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -264,7 +264,7 @@ class RandomState: size: _ShapeLike = ..., replace: bool = ..., p: Optional[_ArrayLikeFloat_co] = ..., - ) -> ndarray[Any, dtype[int64]]: ... + ) -> ndarray[Any, dtype[int_]]: ... @overload def choice( self, diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py index 6f8ce4f2d631..0c4cd255878a 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.py @@ -940,11 +940,11 @@ reveal_type(random_st.zipf(1.5)) # E: int reveal_type(random_st.zipf(1.5, size=None)) # E: int -reveal_type(random_st.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.weibull(0.5)) # E: float reveal_type(random_st.weibull(0.5, size=None)) # E: float @@ -964,11 +964,11 @@ reveal_type(random_st.poisson(0.5)) # E: int reveal_type(random_st.poisson(0.5, size=None)) # E: int -reveal_type(random_st.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.power(0.5)) # E: float reveal_type(random_st.power(0.5, size=None)) # E: float @@ -1004,19 +1004,19 @@ reveal_type(random_st.geometric(0.5)) # E: int reveal_type(random_st.geometric(0.5, size=None)) # E: int -reveal_type(random_st.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.logseries(0.5)) # E: int reveal_type(random_st.logseries(0.5, size=None)) # E: int -reveal_type(random_st.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.rayleigh(0.5)) # E: float reveal_type(random_st.rayleigh(0.5, size=None)) # E: float @@ -1233,50 +1233,50 @@ reveal_type(random_st.binomial(10, 0.5)) # E: int reveal_type(random_st.binomial(10, 0.5, size=None)) # E: int -reveal_type(random_st.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.negative_binomial(10, 0.5)) # E: int reveal_type(random_st.negative_binomial(10, 0.5, size=None)) # E: int -reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.hypergeometric(20, 20, 10)) # E: int reveal_type(random_st.hypergeometric(20, 20, 10, size=None)) # E: int -reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.randint(0, 100)) # E: int reveal_type(random_st.randint(100)) # E: int -reveal_type(random_st.randint([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.randint(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint([100])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.randint(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.randint(2, dtype=bool)) # E: builtins.bool reveal_type(random_st.randint(0, 2, dtype=bool)) # E: builtins.bool @@ -1438,10 +1438,10 @@ reveal_type(random_st.bytes(2)) # E: bytes reveal_type(random_st.choice(5)) # E: int -reveal_type(random_st.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: numpy.ndarray[Any, Any] @@ -1453,16 +1453,16 @@ reveal_type(random_st.dirichlet(np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.multivariate_normal([0.0], [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(random_st.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.permutation([1, 2, 3, 4])) # E: numpy.ndarray[Any, Any] reveal_type(random_st.permutation(np.array([1, 2, 3, 4]))) # E: numpy.ndarray[Any, Any] reveal_type(random_st.permutation(D_2D)) # E: numpy.ndarray[Any, Any] @@ -1499,5 +1499,5 @@ reveal_type(random_st.random_sample(size=(1, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(random_st.tomaxint()) # E: int -reveal_type(random_st.tomaxint(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(random_st.tomaxint((1,))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.tomaxint(1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.tomaxint((1,))) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] From c282c31af54f7b67286ef3767966bede5c51b494 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Sun, 21 Feb 2021 20:23:38 +0000 Subject: [PATCH 0639/1270] TST: Add final tests --- numpy/typing/tests/data/reveal/random.py | 658 ++++++++++++----------- 1 file changed, 347 insertions(+), 311 deletions(-) diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.py index 0c4cd255878a..6fc35aced713 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.py @@ -152,11 +152,11 @@ reveal_type(def_gen.zipf(1.5)) # E: int reveal_type(def_gen.zipf(1.5, size=None)) # E: int -reveal_type(def_gen.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.weibull(0.5)) # E: float reveal_type(def_gen.weibull(0.5, size=None)) # E: float @@ -176,11 +176,11 @@ reveal_type(def_gen.poisson(0.5)) # E: int reveal_type(def_gen.poisson(0.5, size=None)) # E: int -reveal_type(def_gen.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.power(0.5)) # E: float reveal_type(def_gen.power(0.5, size=None)) # E: float @@ -216,19 +216,19 @@ reveal_type(def_gen.geometric(0.5)) # E: int reveal_type(def_gen.geometric(0.5, size=None)) # E: int -reveal_type(def_gen.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.logseries(0.5)) # E: int reveal_type(def_gen.logseries(0.5, size=None)) # E: int -reveal_type(def_gen.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.rayleigh(0.5)) # E: float reveal_type(def_gen.rayleigh(0.5, size=None)) # E: float @@ -453,52 +453,52 @@ reveal_type(def_gen.binomial(10, 0.5)) # E: int reveal_type(def_gen.binomial(10, 0.5, size=None)) # E: int -reveal_type(def_gen.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.negative_binomial(10, 0.5)) # E: int reveal_type(def_gen.negative_binomial(10, 0.5, size=None)) # E: int -reveal_type(def_gen.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.hypergeometric(20, 20, 10)) # E: int reveal_type(def_gen.hypergeometric(20, 20, 10, size=None)) # E: int -reveal_type(def_gen.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) reveal_type(def_gen.integers(0, 100)) # E: int reveal_type(def_gen.integers(100)) # E: int -reveal_type(def_gen.integers([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) I_bool_low_like: List[int] = [0] @@ -538,37 +538,37 @@ reveal_type(def_gen.integers(0, 256, dtype="u1")) # E: int reveal_type(def_gen.integers(255, dtype="u1", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 255, dtype="u1", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] reveal_type(def_gen.integers(256, dtype="uint8")) # E: int reveal_type(def_gen.integers(0, 256, dtype="uint8")) # E: int reveal_type(def_gen.integers(255, dtype="uint8", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] reveal_type(def_gen.integers(256, dtype=np.uint8)) # E: int reveal_type(def_gen.integers(0, 256, dtype=np.uint8)) # E: int reveal_type(def_gen.integers(255, dtype=np.uint8, endpoint=True)) # E: int reveal_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) I_u2_low_like: List[int] = [0] @@ -579,78 +579,103 @@ reveal_type(def_gen.integers(0, 65536, dtype="u2")) # E: int reveal_type(def_gen.integers(65535, dtype="u2", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] reveal_type(def_gen.integers(65536, dtype="uint16")) # E: int reveal_type(def_gen.integers(0, 65536, dtype="uint16")) # E: int reveal_type(def_gen.integers(65535, dtype="uint16", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] reveal_type(def_gen.integers(65536, dtype=np.uint16)) # E: int reveal_type(def_gen.integers(0, 65536, dtype=np.uint16)) # E: int reveal_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True)) # E: int reveal_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) I_u4_low_like: List[int] = [0] I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +reveal_type(def_gen.integers(4294967296, dtype=np.int_)) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype=np.int_)) # E: int +reveal_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + + reveal_type(def_gen.integers(4294967296, dtype="u4")) # E: int reveal_type(def_gen.integers(0, 4294967296, dtype="u4")) # E: int reveal_type(def_gen.integers(4294967295, dtype="u4", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] reveal_type(def_gen.integers(4294967296, dtype="uint32")) # E: int reveal_type(def_gen.integers(0, 4294967296, dtype="uint32")) # E: int reveal_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] reveal_type(def_gen.integers(4294967296, dtype=np.uint32)) # E: int reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint32)) # E: int reveal_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)) # E: int reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] + +reveal_type(def_gen.integers(4294967296, dtype=np.uint)) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint)) # E: int +reveal_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) I_u8_low_like: List[int] = [0] @@ -661,37 +686,37 @@ reveal_type(def_gen.integers(0, 18446744073709551616, dtype="u8")) # E: int reveal_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.integers(18446744073709551616, dtype="uint64")) # E: int reveal_type(def_gen.integers(0, 18446744073709551616, dtype="uint64")) # E: int reveal_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)) # E: int reveal_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.integers(18446744073709551616, dtype=np.uint64)) # E: int reveal_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64)) # E: int reveal_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int reveal_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) I_i1_low_like: List[int] = [-128] @@ -702,37 +727,37 @@ reveal_type(def_gen.integers(-128, 128, dtype="i1")) # E: int reveal_type(def_gen.integers(127, dtype="i1", endpoint=True)) # E: int reveal_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] reveal_type(def_gen.integers(128, dtype="int8")) # E: int reveal_type(def_gen.integers(-128, 128, dtype="int8")) # E: int reveal_type(def_gen.integers(127, dtype="int8", endpoint=True)) # E: int reveal_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] reveal_type(def_gen.integers(128, dtype=np.int8)) # E: int reveal_type(def_gen.integers(-128, 128, dtype=np.int8)) # E: int reveal_type(def_gen.integers(127, dtype=np.int8, endpoint=True)) # E: int reveal_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) I_i2_low_like: List[int] = [-32768] @@ -743,37 +768,37 @@ reveal_type(def_gen.integers(-32768, 32768, dtype="i2")) # E: int reveal_type(def_gen.integers(32767, dtype="i2", endpoint=True)) # E: int reveal_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] reveal_type(def_gen.integers(32768, dtype="int16")) # E: int reveal_type(def_gen.integers(-32768, 32768, dtype="int16")) # E: int reveal_type(def_gen.integers(32767, dtype="int16", endpoint=True)) # E: int reveal_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] reveal_type(def_gen.integers(32768, dtype=np.int16)) # E: int reveal_type(def_gen.integers(-32768, 32768, dtype=np.int16)) # E: int reveal_type(def_gen.integers(32767, dtype=np.int16, endpoint=True)) # E: int reveal_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) I_i4_low_like: List[int] = [-2147483648] @@ -784,37 +809,37 @@ reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="i4")) # E: int reveal_type(def_gen.integers(2147483647, dtype="i4", endpoint=True)) # E: int reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] reveal_type(def_gen.integers(2147483648, dtype="int32")) # E: int reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="int32")) # E: int reveal_type(def_gen.integers(2147483647, dtype="int32", endpoint=True)) # E: int reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] reveal_type(def_gen.integers(2147483648, dtype=np.int32)) # E: int reveal_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32)) # E: int reveal_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True)) # E: int reveal_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) I_i8_low_like: List[int] = [-9223372036854775808] @@ -825,37 +850,37 @@ reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int reveal_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)) # E: int reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.integers(9223372036854775808, dtype="int64")) # E: int reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int reveal_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)) # E: int reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.integers(9223372036854775808, dtype=np.int64)) # E: int reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int reveal_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)) # E: int reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.bit_generator) # E: BitGenerator @@ -863,10 +888,10 @@ reveal_type(def_gen.bytes(2)) # E: bytes reveal_type(def_gen.choice(5)) # E: int -reveal_type(def_gen.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: numpy.ndarray[Any, Any] @@ -878,25 +903,25 @@ reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] -reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.multivariate_normal([0.0], [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] -reveal_type(def_gen.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(def_gen.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(def_gen.permutation([1, 2, 3, 4])) # E: numpy.ndarray[Any, Any] reveal_type(def_gen.permutation(np.array([1, 2, 3, 4]))) # E: numpy.ndarray[Any, Any] reveal_type(def_gen.permutation(D_2D, axis=1)) # E: numpy.ndarray[Any, Any] @@ -1292,146 +1317,157 @@ reveal_type(random_st.randint(256, dtype="u1")) # E: int reveal_type(random_st.randint(0, 256, dtype="u1")) # E: int -reveal_type(random_st.randint(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] reveal_type(random_st.randint(256, dtype="uint8")) # E: int reveal_type(random_st.randint(0, 256, dtype="uint8")) # E: int -reveal_type(random_st.randint(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] reveal_type(random_st.randint(256, dtype=np.uint8)) # E: int reveal_type(random_st.randint(0, 256, dtype=np.uint8)) # E: int -reveal_type(random_st.randint(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] reveal_type(random_st.randint(65536, dtype="u2")) # E: int reveal_type(random_st.randint(0, 65536, dtype="u2")) # E: int -reveal_type(random_st.randint(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] reveal_type(random_st.randint(65536, dtype="uint16")) # E: int reveal_type(random_st.randint(0, 65536, dtype="uint16")) # E: int -reveal_type(random_st.randint(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] reveal_type(random_st.randint(65536, dtype=np.uint16)) # E: int reveal_type(random_st.randint(0, 65536, dtype=np.uint16)) # E: int -reveal_type(random_st.randint(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] reveal_type(random_st.randint(4294967296, dtype="u4")) # E: int reveal_type(random_st.randint(0, 4294967296, dtype="u4")) # E: int -reveal_type(random_st.randint(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] reveal_type(random_st.randint(4294967296, dtype="uint32")) # E: int reveal_type(random_st.randint(0, 4294967296, dtype="uint32")) # E: int -reveal_type(random_st.randint(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] reveal_type(random_st.randint(4294967296, dtype=np.uint32)) # E: int reveal_type(random_st.randint(0, 4294967296, dtype=np.uint32)) # E: int -reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(4294967296, dtype=np.uint)) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype=np.uint)) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] reveal_type(random_st.randint(18446744073709551616, dtype="u8")) # E: int reveal_type(random_st.randint(0, 18446744073709551616, dtype="u8")) # E: int -reveal_type(random_st.randint(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] reveal_type(random_st.randint(18446744073709551616, dtype="uint64")) # E: int reveal_type(random_st.randint(0, 18446744073709551616, dtype="uint64")) # E: int -reveal_type(random_st.randint(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] reveal_type(random_st.randint(18446744073709551616, dtype=np.uint64)) # E: int reveal_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64)) # E: int -reveal_type(random_st.randint(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] reveal_type(random_st.randint(128, dtype="i1")) # E: int reveal_type(random_st.randint(-128, 128, dtype="i1")) # E: int -reveal_type(random_st.randint(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] reveal_type(random_st.randint(128, dtype="int8")) # E: int reveal_type(random_st.randint(-128, 128, dtype="int8")) # E: int -reveal_type(random_st.randint(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] reveal_type(random_st.randint(128, dtype=np.int8)) # E: int reveal_type(random_st.randint(-128, 128, dtype=np.int8)) # E: int -reveal_type(random_st.randint(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] -reveal_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]] +reveal_type(random_st.randint(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] reveal_type(random_st.randint(32768, dtype="i2")) # E: int reveal_type(random_st.randint(-32768, 32768, dtype="i2")) # E: int -reveal_type(random_st.randint(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] reveal_type(random_st.randint(32768, dtype="int16")) # E: int reveal_type(random_st.randint(-32768, 32768, dtype="int16")) # E: int -reveal_type(random_st.randint(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] reveal_type(random_st.randint(32768, dtype=np.int16)) # E: int reveal_type(random_st.randint(-32768, 32768, dtype=np.int16)) # E: int -reveal_type(random_st.randint(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] -reveal_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]] +reveal_type(random_st.randint(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] reveal_type(random_st.randint(2147483648, dtype="i4")) # E: int reveal_type(random_st.randint(-2147483648, 2147483648, dtype="i4")) # E: int -reveal_type(random_st.randint(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] reveal_type(random_st.randint(2147483648, dtype="int32")) # E: int reveal_type(random_st.randint(-2147483648, 2147483648, dtype="int32")) # E: int -reveal_type(random_st.randint(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] reveal_type(random_st.randint(2147483648, dtype=np.int32)) # E: int reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32)) # E: int -reveal_type(random_st.randint(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] -reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]] +reveal_type(random_st.randint(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] + +reveal_type(random_st.randint(2147483648, dtype=np.int_)) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_)) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] reveal_type(random_st.randint(9223372036854775808, dtype="i8")) # E: int reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int -reveal_type(random_st.randint(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.randint(9223372036854775808, dtype="int64")) # E: int reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int -reveal_type(random_st.randint(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st.randint(9223372036854775808, dtype=np.int64)) # E: int reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int -reveal_type(random_st.randint(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] -reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]] +reveal_type(random_st.randint(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] reveal_type(random_st._bit_generator) # E: BitGenerator From dc8481b0d6c87c564ad4e927abf8703f73eab696 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Tue, 23 Feb 2021 17:26:37 +0000 Subject: [PATCH 0640/1270] CLN: Clean up random typing Clean unnecessary definitions Fix small typing errors --- numpy/random/_generator.pyi | 15 ++++----------- numpy/random/mtrand.pyi | 14 +++----------- 2 files changed, 7 insertions(+), 22 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 6b1a6f4af90c..14dc5513174d 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,5 +1,5 @@ import sys -from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, TypeVar from numpy import ( bool_, @@ -49,6 +49,8 @@ if sys.version_info >= (3, 8): else: from typing_extensions import Literal +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + _DTypeLikeFloat32 = Union[ dtype[float32], _SupportsDType[dtype[float32]], @@ -72,7 +74,7 @@ class Generator: def __str__(self) -> str: ... def __getstate__(self) -> Dict[str, Any]: ... def __setstate__(self, state: Dict[str, Any]) -> None: ... - def __reduce__(self) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Dict[str, Any]]: ... + def __reduce__(self) -> Tuple[Callable[[str], Generator], Tuple[str], Dict[str, Any]]: ... @property def bit_generator(self) -> BitGenerator: ... def bytes(self, length: int) -> bytes: ... @@ -217,15 +219,6 @@ class Generator: endpoint: bool = ..., ) -> bool: ... @overload - def integers( # type: ignore[misc] - self, - low: int, - high: Optional[int] = ..., - size: None = ..., - dtype: _DTypeLikeBool = ..., - endpoint: bool = ..., - ) -> bool: ... - @overload def integers( # type: ignore[misc] self, low: int, diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 814c294ac55d..fdf58c7e1046 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -73,7 +73,7 @@ class RandomState: def __str__(self) -> str: ... def __getstate__(self) -> Dict[str, Any]: ... def __setstate__(self, state: Dict[str, Any]) -> None: ... - def __reduce__(self) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Dict[str, Any]]: ... + def __reduce__(self) -> Tuple[Callable[[str], RandomState], Tuple[str], Dict[str, Any]]: ... def seed(self, seed: Optional[_ArrayLikeFloat_co] = ...) -> None: ... @overload def get_state(self, legacy: Literal[False] = ...) -> Dict[str, Any]: ... @@ -128,14 +128,6 @@ class RandomState: endpoint: bool = ..., ) -> bool: ... @overload - def randint( # type: ignore[misc] - self, - low: int, - high: Optional[int] = ..., - size: None = ..., - dtype: _DTypeLikeBool = ..., - ) -> bool: ... - @overload def randint( # type: ignore[misc] self, low: int, @@ -291,11 +283,11 @@ class RandomState: size: Optional[_ShapeLike] = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload - def rand(self, *args: None) -> float: ... + def rand(self) -> float: ... @overload def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ... @overload - def randn(self, *args: None) -> float: ... + def randn(self) -> float: ... @overload def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ... @overload From d4c123657f08b43939a7b7e329f40015d38b2849 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Wed, 24 Feb 2021 10:41:07 +0000 Subject: [PATCH 0641/1270] BUG: Remove incorrect variable name Remove incorrect variable name in randint --- numpy/random/mtrand.pyi | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index fdf58c7e1046..3137b0a955ad 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -125,7 +125,6 @@ class RandomState: high: Optional[int] = ..., size: None = ..., dtype: _DTypeLikeBool = ..., - endpoint: bool = ..., ) -> bool: ... @overload def randint( # type: ignore[misc] From e2740334e8f205faee2e307db44bcc2c65ca586e Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Wed, 24 Feb 2021 06:57:11 -0800 Subject: [PATCH 0642/1270] Goto done when an error condition is reached --- numpy/core/src/umath/ufunc_object.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 269b2e81ade5..f30f31a2ee2f 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5222,6 +5222,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, if (current->arg_dtypes == NULL) { PyErr_NoMemory(); result = -1; + goto done; } else if (arg_dtypes != NULL) { for (i = 0; i < ufunc->nargs; i++) { From 6dba374bb17cf177b0e374814b2756a664ccddd4 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 24 Feb 2021 18:59:25 +0000 Subject: [PATCH 0643/1270] Commit textual suggestions from PR review Co-authored-by: InessaPawson Co-authored-by: Matt Haberland --- doc/neps/nep-0048-spending-project-funds.rst | 22 ++++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/neps/nep-0048-spending-project-funds.rst b/doc/neps/nep-0048-spending-project-funds.rst index 20c960a49146..b0fd6e9a308f 100644 --- a/doc/neps/nep-0048-spending-project-funds.rst +++ b/doc/neps/nep-0048-spending-project-funds.rst @@ -18,7 +18,7 @@ Abstract The NumPy project has historically never received significant **unrestricted** funding. However, that is starting to change. This NEP aims to provide -guidance about spending NumPy project unrestricted funds, by formulating a set +guidance about spending NumPy project unrestricted funds by formulating a set of principles about *what* to pay for and *who* to pay. It will also touch on how decisions regarding spending funds get made, how funds get administered, and transparency around these topics. @@ -27,10 +27,10 @@ and transparency around these topics. Motivation and Scope -------------------- -In its 16+ year history, the NumPy project has only spent on the order of +Since its inception and until 2020, the NumPy project has only spent on the order of $10,000 USD of funds that were not restricted to a particular program. Project income of this type has been relying on donations from individuals and, from -May 2019, recurring monthly contributions from Tidelift. By the end of 2020, +mid 2019, recurring monthly contributions from Tidelift. By the end of 2020, the Tidelift contributions increased to $3,000/month, and there's also a potential for an increase of donations and grants going directly to the project. Having a clear set of principles around how to use these funds will @@ -62,7 +62,7 @@ Out of scope for this NEP are: executed. In some cases, we may not even know the contributions were funded or done by an employee on work time. (Whether that's the case or not should not change how we approach a contribution). For grants though, we do expect - the PI and funded team to align their work with the project's needs and be + the research/project leader and funded team to align their work with the needs of NumPy and be receptive to feedback from other NumPy maintainers and contributors.* @@ -100,9 +100,9 @@ be giving to (a) the most boring/painful tasks that no one likes doing, and to necessary structural changes to the code base that are too large to be done by a volunteer in a reasonable amount of time. -There are also a large amount of tasks, activities, and projects outside of +There are also many tasks, activities, and projects outside of development work that are important and could enhance the project or community -- think for example of user surveys, translations, outreach, dedicated +- think of, for example, user surveys, translations, outreach, dedicated mentoring of newcomers, community organization, website improvements, and administrative tasks. @@ -126,7 +126,7 @@ It will likely depend on the project/tasks whether there's already a clear best candidate within the NumPy team, or whether we look for new people to get involved. Before making any decisions, the decision makers should think about whether an opportunity should be advertised to give a wider group of people a -chance to throw their hat in the ring for it. +chance to apply for it. Compensating fairly ``````````````````` @@ -143,7 +143,7 @@ We consider them both extreme: - "Same work -- same pay" is unfair to people living in locations with a higher cost of living. For example, the average rent for a single family apartment - can differ by a large factor (from a few hundred dollar to thousands of + can differ by a large factor (from a few hundred dollars to thousands of dollars per month). - "Pay local market rates" bakes in existing inequalities between countries and makes fixed-cost items like a development machine or a holiday trip @@ -293,7 +293,7 @@ all transactions is probably fine, but not necessary. effort required.* -History and current status +NumPy funding – history and current status -------------------------- The NumPy project received its first major funding in 2017. For an overview of @@ -324,7 +324,7 @@ its "managed open source" business model. From May 2019 till July 2020 this was $1,000/month, and it started steadily growing after that to about $3,000/month (as of Feb 2021). -Finally there have been other incidental project income, for example some book +Finally, there has been other incidental project income, for example, some book royalties from Packt Publishing, GSoC mentoring fees from Google, and merchandise sales revenue through the NumFOCUS web shop. All of these were small (two or three figure) amounts. @@ -336,7 +336,7 @@ website and Sphinx theme. Those spending decisions were made by the NumPy Steering Council and announced on the mailing list. That leaves about $25,000 in available funds at the time of writing, and -that amount is current growing at a rate of about $3,000/month. +that amount is currently growing at a rate of about $3,000/month. Related Work From 7eb5cfc8d190cdc6b22bf517b61d233e09a162fe Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 24 Feb 2021 19:02:26 +0000 Subject: [PATCH 0644/1270] Add intro paragraph to Motivation and Scope to explain unrestricted funding Co-authored-by: InessaPawson --- doc/neps/nep-0048-spending-project-funds.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/neps/nep-0048-spending-project-funds.rst b/doc/neps/nep-0048-spending-project-funds.rst index b0fd6e9a308f..8131c85090ba 100644 --- a/doc/neps/nep-0048-spending-project-funds.rst +++ b/doc/neps/nep-0048-spending-project-funds.rst @@ -27,6 +27,10 @@ and transparency around these topics. Motivation and Scope -------------------- +NumPy is a fiscally sponsored project of NumFOCUS, a 501(c)(3) nonprofit organization headquartered in Austin, TX. Therefore, for all legal and accounting matters the NumPy project has to follow the rules and regulations for US nonprofits. All nonprofit donations are classified into two categories: **unrestricted funds** which may be used for any legal purpose appropriate to the organization and **restricted funds**, monies set aside for a particular purpose (e.g., project, educational program, etc.). It’s important to note that restricted funds are **permanently restricted** to that purpose and cannot be used for any other expenses.  + +For the detailed timeline of NumPy funding refer to [NumPy funding – history and current status](#history-and-current-status). + Since its inception and until 2020, the NumPy project has only spent on the order of $10,000 USD of funds that were not restricted to a particular program. Project income of this type has been relying on donations from individuals and, from From 6727c674226f83470cedd6c32be74163e46b47d0 Mon Sep 17 00:00:00 2001 From: cisaacstern <62192187+cisaacstern@users.noreply.github.com> Date: Wed, 24 Feb 2021 22:02:42 -0800 Subject: [PATCH 0645/1270] DOC: Closes issue #7408 --- doc/source/reference/routines.array-creation.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/routines.array-creation.rst b/doc/source/reference/routines.array-creation.rst index e718f0052872..30780c286c41 100644 --- a/doc/source/reference/routines.array-creation.rst +++ b/doc/source/reference/routines.array-creation.rst @@ -7,8 +7,8 @@ Array creation routines .. currentmodule:: numpy -Ones and zeros --------------- +From shape or value +------------------- .. autosummary:: :toctree: generated/ From 129f3f1b6b0154a175d2abd2289119c85bd705d9 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 11 Feb 2021 13:21:12 +0100 Subject: [PATCH 0646/1270] MAINT: Added the `_ArrayLikeInt` type alias An invariant-ish array-like consisting of int-like objects. Note that it is not truly invariant due to `builtins.bool` inheriting from `builtins.int` --- numpy/typing/__init__.py | 2 +- numpy/typing/_array_like.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 61d780b85bcf..d71ec0719112 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -327,6 +327,7 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _SupportsArray, _ArrayND, _ArrayOrScalar, + _ArrayLikeInt, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, @@ -339,7 +340,6 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _ArrayLikeVoid_co, _ArrayLikeStr_co, _ArrayLikeBytes_co, - ) if __doc__ is not None: diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 133f38800591..ef6c061d1aa3 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -124,6 +124,11 @@ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType_co]: ... bytes, ] +_ArrayLikeInt = _ArrayLike[ + "dtype[integer[Any]]", + int, +] + if TYPE_CHECKING: _ArrayND = ndarray[Any, dtype[_ScalarType]] _ArrayOrScalar = Union[_ScalarType, _ArrayND[_ScalarType]] From 668f1aa1082fb0316edd9e6069c4b16fb4a2d6c5 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 22 Sep 2020 01:15:35 +0200 Subject: [PATCH 0647/1270] ENH: Add annotations for `np.lib.index_tricks` --- numpy/__init__.pyi | 30 ++++++- numpy/lib/index_tricks.pyi | 179 +++++++++++++++++++++++++++++++++++++ 2 files changed, 207 insertions(+), 2 deletions(-) create mode 100644 numpy/lib/index_tricks.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 148a63583211..fa498f5086fe 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -496,9 +496,7 @@ nanstd: Any nansum: Any nanvar: Any nbytes: Any -ndenumerate: Any ndfromtxt: Any -ndindex: Any nditer: Any nested_iters: Any newaxis: Any @@ -2899,3 +2897,31 @@ class errstate(Generic[_CallType], ContextDecorator): __exc_value: Optional[BaseException], __traceback: Optional[TracebackType], ) -> None: ... + +class ndenumerate(Generic[_ScalarType]): + iter: flatiter[_ArrayND[_ScalarType]] + @overload + def __new__( + cls, arr: _NestedSequence[_SupportsArray[dtype[_ScalarType]]], + ) -> ndenumerate[_ScalarType]: ... + @overload + def __new__(cls, arr: _NestedSequence[str]) -> ndenumerate[str_]: ... + @overload + def __new__(cls, arr: _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... + @overload + def __new__(cls, arr: _NestedSequence[bool]) -> ndenumerate[bool_]: ... + @overload + def __new__(cls, arr: _NestedSequence[int]) -> ndenumerate[int_]: ... + @overload + def __new__(cls, arr: _NestedSequence[float]) -> ndenumerate[float_]: ... + @overload + def __new__(cls, arr: _NestedSequence[complex]) -> ndenumerate[complex_]: ... + @overload + def __new__(cls, arr: _RecursiveSequence) -> ndenumerate[Any]: ... + def __next__(self: ndenumerate[_ScalarType]) -> Tuple[_Shape, _ScalarType]: ... + def __iter__(self: _T) -> _T: ... + +class ndindex: + def __init__(self, *shape: SupportsIndex) -> None: ... + def __iter__(self: _T) -> _T: ... + def __next__(self) -> _Shape: ... diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi new file mode 100644 index 000000000000..e602f99074bc --- /dev/null +++ b/numpy/lib/index_tricks.pyi @@ -0,0 +1,179 @@ +import sys +from typing import ( + Any, + Tuple, + TypeVar, + Generic, + overload, + List, + Union, + Sequence, +) + +from numpy import ( + # Circumvent a naming conflict with `AxisConcatenator.matrix` + matrix as _Matrix, + ndenumerate as ndenumerate, + ndindex as ndindex, + ndarray, + dtype, + str_, + bytes_, + bool_, + int_, + float_, + complex_, + intp, + _OrderCF, + _ModeKind, +) +from numpy.typing import ( + # Arrays + ArrayLike, + _NestedSequence, + _RecursiveSequence, + _ArrayND, + _ArrayOrScalar, + _ArrayLikeInt, + + # DTypes + DTypeLike, + _SupportsDType, + + # Shapes + _ShapeLike, +) + +if sys.version_info >= (3, 8): + from typing import Literal, SupportsIndex +else: + from typing_extensions import Literal, SupportsIndex + +_T = TypeVar("_T") +_DType = TypeVar("_DType", bound=dtype[Any]) +_BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) +_SliceOrTuple = TypeVar("_SliceOrTuple", bound=Union[slice, Tuple[slice, ...]]) +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +__all__: List[str] + +def unravel_index( + indices: _ArrayLikeInt, + shape: _ShapeLike, + order: _OrderCF = ... +) -> Tuple[_ArrayOrScalar[intp], ...]: ... + +def ravel_multi_index( + multi_index: ArrayLike, + dims: _ShapeLike, + mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., + order: _OrderCF = ... +) -> _ArrayOrScalar[intp]: ... + +@overload +def ix_(*args: _NestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... +@overload +def ix_(*args: _NestedSequence[str]) -> Tuple[_ArrayND[str_], ...]: ... +@overload +def ix_(*args: _NestedSequence[bytes]) -> Tuple[_ArrayND[bytes_], ...]: ... +@overload +def ix_(*args: _NestedSequence[bool]) -> Tuple[_ArrayND[bool_], ...]: ... +@overload +def ix_(*args: _NestedSequence[int]) -> Tuple[_ArrayND[int_], ...]: ... +@overload +def ix_(*args: _NestedSequence[float]) -> Tuple[_ArrayND[float_], ...]: ... +@overload +def ix_(*args: _NestedSequence[complex]) -> Tuple[_ArrayND[complex_], ...]: ... +@overload +def ix_(*args: _RecursiveSequence) -> Tuple[_ArrayND[Any], ...]: ... + +class nd_grid(Generic[_BoolType]): + sparse: _BoolType + def __init__(self, sparse: _BoolType = ...) -> None: ... + @overload + def __getitem__( + self: nd_grid[Literal[False]], + key: Union[slice, Sequence[slice]], + ) -> _ArrayND[Any]: ... + @overload + def __getitem__( + self: nd_grid[Literal[True]], + key: Union[slice, Sequence[slice]], + ) -> List[_ArrayND[Any]]: ... + +class MGridClass(nd_grid[Literal[False]]): + def __init__(self) -> None: ... + +mgrid: MGridClass + +class OGridClass(nd_grid[Literal[True]]): + def __init__(self) -> None: ... + +ogrid: OGridClass + +class AxisConcatenator: + axis: int + matrix: bool + ndmin: int + trans1d: int + def __init__( + self, + axis: int = ..., + matrix: bool = ..., + ndmin: int = ..., + trans1d: int = ..., + ) -> None: ... + @staticmethod + @overload + def concatenate( # type: ignore[misc] + *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... + ) -> _ArrayND[Any]: ... + @staticmethod + @overload + def concatenate( + *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ... + ) -> _ArrayType: ... + @staticmethod + def makemat( + data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ... + ) -> _Matrix: ... + + # TODO: Sort out this `__getitem__` method + def __getitem__(self, key: Any) -> Any: ... + +class RClass(AxisConcatenator): + axis: Literal[0] + matrix: Literal[False] + ndmin: Literal[1] + trans1d: Literal[-1] + def __init__(self) -> None: ... + +r_: RClass + +class CClass(AxisConcatenator): + axis: Literal[-1] + matrix: Literal[False] + ndmin: Literal[2] + trans1d: Literal[0] + def __init__(self) -> None: ... + +c_: CClass + +class IndexExpression(Generic[_BoolType]): + maketuple: _BoolType + def __init__(self, maketuple: _BoolType) -> None: ... + @overload + def __getitem__( # type: ignore[misc] + self: IndexExpression[Literal[True]], item: slice + ) -> Tuple[slice]: ... + @overload + def __getitem__(self, item: _SliceOrTuple) -> _SliceOrTuple: ... + +index_exp: IndexExpression[Literal[True]] +s_: IndexExpression[Literal[False]] + +def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ... +def diag_indices(n: int, ndim: int = ...) -> Tuple[_ArrayND[int_], ...]: ... +def diag_indices_from(arr: ArrayLike) -> Tuple[_ArrayND[int_], ...]: ... + +# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` From a58784d1df7189944b15cccb8702ae85969b9b02 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 4 Feb 2021 00:54:13 +0100 Subject: [PATCH 0648/1270] ENH: Expose `index_tricks` content to `np` and `np.lib` namespace --- numpy/__init__.pyi | 27 +++++++++++++++------------ numpy/lib/__init__.pyi | 34 ++++++++++++++++++++-------------- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fa498f5086fe..dba4176f23d8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -338,6 +338,21 @@ from numpy.core.shape_base import ( vstack as vstack, ) +from numpy.lib.index_tricks import ( + ravel_multi_index as ravel_multi_index, + unravel_index as unravel_index, + mgrid as mgrid, + ogrid as ogrid, + r_ as r_, + c_ as c_, + s_ as s_, + index_exp as index_exp, + ix_ as ix_, + fill_diagonal as fill_diagonal, + diag_indices as diag_indices, + diag_indices_from as diag_indices_from, +) + from numpy.lib.ufunclike import ( fix as fix, isposinf as isposinf, @@ -375,7 +390,6 @@ busday_count: Any busday_offset: Any busdaycalendar: Any byte_bounds: Any -c_: Any can_cast: Any cast: Any chararray: Any @@ -395,8 +409,6 @@ delete: Any deprecate: Any deprecate_with_doc: Any diag: Any -diag_indices: Any -diag_indices_from: Any diagflat: Any diff: Any digitize: Any @@ -417,7 +429,6 @@ def eye( *, like: Optional[ArrayLike] = ... ) -> ndarray[Any, Any]: ... -fill_diagonal: Any finfo: Any flip: Any fliplr: Any @@ -444,7 +455,6 @@ i0: Any iinfo: Any imag: Any in1d: Any -index_exp: Any info: Any inner: Any insert: Any @@ -457,7 +467,6 @@ isin: Any isreal: Any isrealobj: Any iterable: Any -ix_: Any kaiser: Any kron: Any lexsort: Any @@ -474,7 +483,6 @@ may_share_memory: Any median: Any memmap: Any meshgrid: Any -mgrid: Any min: Any min_scalar_type: Any mintypecode: Any @@ -501,7 +509,6 @@ nditer: Any nested_iters: Any newaxis: Any numarray: Any -ogrid: Any packbits: Any pad: Any percentile: Any @@ -522,8 +529,6 @@ promote_types: Any put_along_axis: Any putmask: Any quantile: Any -r_: Any -ravel_multi_index: Any real: Any real_if_close: Any recarray: Any @@ -536,7 +541,6 @@ rot90: Any round: Any round_: Any row_stack: Any -s_: Any save: Any savetxt: Any savez: Any @@ -568,7 +572,6 @@ typename: Any union1d: Any unique: Any unpackbits: Any -unravel_index: Any unwrap: Any vander: Any vdot: Any diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 4468d27e92c6..c7fab69439c6 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,5 +1,25 @@ from typing import Any, List +from numpy import ( + ndenumerate as ndenumerate, + ndindex as ndindex, +) + +from numpy.lib.index_tricks import ( + ravel_multi_index as ravel_multi_index, + unravel_index as unravel_index, + mgrid as mgrid, + ogrid as ogrid, + r_ as r_, + c_ as c_, + s_ as s_, + index_exp as index_exp, + ix_ as ix_, + fill_diagonal as fill_diagonal, + diag_indices as diag_indices, + diag_indices_from as diag_indices_from, +) + from numpy.lib.ufunclike import ( fix as fix, isposinf as isposinf, @@ -25,20 +45,6 @@ asfarray: Any mintypecode: Any asscalar: Any common_type: Any -ravel_multi_index: Any -unravel_index: Any -mgrid: Any -ogrid: Any -r_: Any -c_: Any -s_: Any -index_exp: Any -ix_: Any -ndenumerate: Any -ndindex: Any -fill_diagonal: Any -diag_indices: Any -diag_indices_from: Any select: Any piecewise: Any trim_zeros: Any From 8a18515f6b5eb97a184b92858741eb4a5aab613a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 11 Feb 2021 18:21:31 +0100 Subject: [PATCH 0649/1270] TST: Add typing tests for `np.lib.index_tricks` --- numpy/typing/tests/data/fail/index_tricks.py | 18 ++++++ numpy/typing/tests/data/pass/index_tricks.py | 62 +++++++++++++++++++ .../typing/tests/data/reveal/index_tricks.py | 61 ++++++++++++++++++ 3 files changed, 141 insertions(+) create mode 100644 numpy/typing/tests/data/fail/index_tricks.py create mode 100644 numpy/typing/tests/data/pass/index_tricks.py create mode 100644 numpy/typing/tests/data/reveal/index_tricks.py diff --git a/numpy/typing/tests/data/fail/index_tricks.py b/numpy/typing/tests/data/fail/index_tricks.py new file mode 100644 index 000000000000..706f135b2bc0 --- /dev/null +++ b/numpy/typing/tests/data/fail/index_tricks.py @@ -0,0 +1,18 @@ +from typing import List +import numpy as np + +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] + +np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: incompatible type +np.mgrid[1] # E: Invalid index type +np.mgrid[...] # E: Invalid index type +np.ogrid[1] # E: Invalid index type +np.ogrid[...] # E: Invalid index type +np.index_exp[1] # E: No overload variant +np.index_exp[...] # E: No overload variant +np.s_[1] # E: cannot be "int" +np.s_[...] # E: cannot be "ellipsis" +np.fill_diagonal(AR_LIKE_f, 2) # E: incompatible type +np.diag_indices(1.0) # E: incompatible type diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py new file mode 100644 index 000000000000..ce7f415f3f2c --- /dev/null +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -0,0 +1,62 @@ +from __future__ import annotations +from typing import Any +import numpy as np + +AR_LIKE_b = [[True, True], [True, True]] +AR_LIKE_i = [[1, 2], [3, 4]] +AR_LIKE_f = [[1.0, 2.0], [3.0, 4.0]] +AR_LIKE_U = [["1", "2"], ["3", "4"]] + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] = np.array(AR_LIKE_i, dtype=np.int64) + +np.ndenumerate(AR_i8) +np.ndenumerate(AR_LIKE_f) +np.ndenumerate(AR_LIKE_U) + +np.ndenumerate(AR_i8).iter +np.ndenumerate(AR_LIKE_f).iter +np.ndenumerate(AR_LIKE_U).iter + +next(np.ndenumerate(AR_i8)) +next(np.ndenumerate(AR_LIKE_f)) +next(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndenumerate(AR_i8)) +iter(np.ndenumerate(AR_LIKE_f)) +iter(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndindex(1, 2, 3)) +next(np.ndindex(1, 2, 3)) + +np.unravel_index([22, 41, 37], (7, 6)) +np.unravel_index([31, 41, 13], (7, 6), order='F') +np.unravel_index(1621, (6, 7, 8, 9)) + +np.ravel_multi_index(AR_LIKE_i, (7, 6)) +np.ravel_multi_index(AR_LIKE_i, (7, 6), order='F') +np.ravel_multi_index(AR_LIKE_i, (4, 6), mode='clip') +np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=('clip', 'wrap')) +np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)) + +np.mgrid[1:1:2] +np.mgrid[1:1:2, None:10] + +np.ogrid[1:1:2] +np.ogrid[1:1:2, None:10] + +np.index_exp[0:1] +np.index_exp[0:1, None:3] + +np.s_[0:1] +np.s_[0:1, None:3] + +np.ix_(AR_LIKE_b[0]) +np.ix_(AR_LIKE_i[0], AR_LIKE_f[0]) +np.ix_(AR_i8[0]) + +np.fill_diagonal(AR_i8, 5) + +np.diag_indices(4) +np.diag_indices(2, 3) + +np.diag_indices_from(AR_i8) diff --git a/numpy/typing/tests/data/reveal/index_tricks.py b/numpy/typing/tests/data/reveal/index_tricks.py new file mode 100644 index 000000000000..dc061d314cb3 --- /dev/null +++ b/numpy/typing/tests/data/reveal/index_tricks.py @@ -0,0 +1,61 @@ +from typing import Any, List +import numpy as np + +AR_LIKE_b: List[bool] +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] +AR_LIKE_U: List[str] + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] + +reveal_type(np.ndenumerate(AR_i8)) # E: numpy.ndenumerate[{int64}] +reveal_type(np.ndenumerate(AR_LIKE_f)) # E: numpy.ndenumerate[{double}] +reveal_type(np.ndenumerate(AR_LIKE_U)) # E: numpy.ndenumerate[numpy.str_] + +reveal_type(np.ndenumerate(AR_i8).iter) # E: numpy.flatiter[numpy.ndarray[Any, numpy.dtype[{int64}]]] +reveal_type(np.ndenumerate(AR_LIKE_f).iter) # E: numpy.flatiter[numpy.ndarray[Any, numpy.dtype[{double}]]] +reveal_type(np.ndenumerate(AR_LIKE_U).iter) # E: numpy.flatiter[numpy.ndarray[Any, numpy.dtype[numpy.str_]]] + +reveal_type(next(np.ndenumerate(AR_i8))) # E: Tuple[builtins.tuple[builtins.int], {int64}] +reveal_type(next(np.ndenumerate(AR_LIKE_f))) # E: Tuple[builtins.tuple[builtins.int], {double}] +reveal_type(next(np.ndenumerate(AR_LIKE_U))) # E: Tuple[builtins.tuple[builtins.int], numpy.str_] + +reveal_type(iter(np.ndenumerate(AR_i8))) # E: Iterator[Tuple[builtins.tuple[builtins.int], {int64}]] +reveal_type(iter(np.ndenumerate(AR_LIKE_f))) # E: Iterator[Tuple[builtins.tuple[builtins.int], {double}]] +reveal_type(iter(np.ndenumerate(AR_LIKE_U))) # E: Iterator[Tuple[builtins.tuple[builtins.int], numpy.str_]] + +reveal_type(iter(np.ndindex(1, 2, 3))) # E: Iterator[builtins.tuple[builtins.int]] +reveal_type(next(np.ndindex(1, 2, 3))) # E: builtins.tuple[builtins.int] + +reveal_type(np.unravel_index([22, 41, 37], (7, 6))) # E: tuple[Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]]] +reveal_type(np.unravel_index([31, 41, 13], (7, 6), order="F")) # E: tuple[Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]]] +reveal_type(np.unravel_index(1621, (6, 7, 8, 9))) # E: tuple[Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]]] + +reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6))) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F")) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip")) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap"))) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] + +reveal_type(np.mgrid[1:1:2]) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.mgrid[1:1:2, None:10]) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.ogrid[1:1:2]) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.ogrid[1:1:2, None:10]) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.index_exp[0:1]) # E: Tuple[builtins.slice] +reveal_type(np.index_exp[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] + +reveal_type(np.s_[0:1]) # E: builtins.slice +reveal_type(np.s_[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] + +reveal_type(np.ix_(AR_LIKE_b)) # E: tuple[numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(np.ix_(AR_LIKE_i, AR_LIKE_f)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{double}]]] +reveal_type(np.ix_(AR_i8)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int64}]]] + +reveal_type(np.fill_diagonal(AR_i8, 5)) # E: None + +reveal_type(np.diag_indices(4)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] +reveal_type(np.diag_indices(2, 3)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] + +reveal_type(np.diag_indices_from(AR_i8)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] From f345d732a97b647de1fb26aeae533ca48e8229e9 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 16 Feb 2021 14:51:49 +0100 Subject: [PATCH 0650/1270] MAINT: Relax the type-constraints of `IndexExpression.__getitem__` --- numpy/lib/index_tricks.pyi | 10 +++++----- numpy/typing/tests/data/fail/index_tricks.py | 4 ---- numpy/typing/tests/data/pass/index_tricks.py | 2 ++ numpy/typing/tests/data/reveal/index_tricks.py | 2 ++ 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi index e602f99074bc..3e5bc1adb47b 100644 --- a/numpy/lib/index_tricks.pyi +++ b/numpy/lib/index_tricks.pyi @@ -52,7 +52,7 @@ else: _T = TypeVar("_T") _DType = TypeVar("_DType", bound=dtype[Any]) _BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) -_SliceOrTuple = TypeVar("_SliceOrTuple", bound=Union[slice, Tuple[slice, ...]]) +_TupType = TypeVar("_TupType", bound=Tuple[Any, ...]) _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) __all__: List[str] @@ -163,11 +163,11 @@ class IndexExpression(Generic[_BoolType]): maketuple: _BoolType def __init__(self, maketuple: _BoolType) -> None: ... @overload - def __getitem__( # type: ignore[misc] - self: IndexExpression[Literal[True]], item: slice - ) -> Tuple[slice]: ... + def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] @overload - def __getitem__(self, item: _SliceOrTuple) -> _SliceOrTuple: ... + def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> Tuple[_T]: ... + @overload + def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... index_exp: IndexExpression[Literal[True]] s_: IndexExpression[Literal[False]] diff --git a/numpy/typing/tests/data/fail/index_tricks.py b/numpy/typing/tests/data/fail/index_tricks.py index 706f135b2bc0..cbc43fd54b3a 100644 --- a/numpy/typing/tests/data/fail/index_tricks.py +++ b/numpy/typing/tests/data/fail/index_tricks.py @@ -10,9 +10,5 @@ np.mgrid[...] # E: Invalid index type np.ogrid[1] # E: Invalid index type np.ogrid[...] # E: Invalid index type -np.index_exp[1] # E: No overload variant -np.index_exp[...] # E: No overload variant -np.s_[1] # E: cannot be "int" -np.s_[...] # E: cannot be "ellipsis" np.fill_diagonal(AR_LIKE_f, 2) # E: incompatible type np.diag_indices(1.0) # E: incompatible type diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index ce7f415f3f2c..4c4c1195990a 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -46,9 +46,11 @@ np.index_exp[0:1] np.index_exp[0:1, None:3] +np.index_exp[0, 0:1, ..., [0, 1, 3]] np.s_[0:1] np.s_[0:1, None:3] +np.s_[0, 0:1, ..., [0, 1, 3]] np.ix_(AR_LIKE_b[0]) np.ix_(AR_LIKE_i[0], AR_LIKE_f[0]) diff --git a/numpy/typing/tests/data/reveal/index_tricks.py b/numpy/typing/tests/data/reveal/index_tricks.py index dc061d314cb3..ec20130251e2 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.py +++ b/numpy/typing/tests/data/reveal/index_tricks.py @@ -45,9 +45,11 @@ reveal_type(np.index_exp[0:1]) # E: Tuple[builtins.slice] reveal_type(np.index_exp[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] +reveal_type(np.index_exp[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] reveal_type(np.s_[0:1]) # E: builtins.slice reveal_type(np.s_[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] +reveal_type(np.s_[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] reveal_type(np.ix_(AR_LIKE_b)) # E: tuple[numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(np.ix_(AR_LIKE_i, AR_LIKE_f)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{double}]]] From 8faae9d5fad6a8342b21deed1dcbecaaa9a32711 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 25 Feb 2021 13:03:44 -0500 Subject: [PATCH 0651/1270] BUG: Fix non-versioneer uses --- numpy/distutils/misc_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 37e12007235c..e797745e12db 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -1970,7 +1970,7 @@ def get_version(self, version_file=None, version_variable=None): try: version = version_module.get_versions()['version'] except AttributeError: - version = None + pass if version is not None: break From b16dc4ad63ca02e5a3d88689fddc48159d297abd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mont=C3=B3n=20Pinillos?= Date: Sat, 26 Sep 2020 19:37:51 +0200 Subject: [PATCH 0652/1270] Fixed Von Mises distribution for big values of kappa by falling back to a normal distribution (which the von mises distribution converges to). --- numpy/random/src/distributions/distributions.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 4494f860e881..6c60ad882f02 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -853,9 +853,14 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { */ s = (1. / kappa + kappa); } else { - double r = 1 + sqrt(1 + 4 * kappa * kappa); - double rho = (r - sqrt(2 * r)) / (2 * kappa); - s = (1 + rho * rho) / (2 * rho); + /* Fallback to normal distribution for big values of kappa*/ + if (kappa > 1e6){ + return mu + sqrt(1/kappa) * random_standard_normal(bitgen_state); + }else{ + double r = 1 + sqrt(1 + 4 * kappa * kappa); + double rho = (r - sqrt(2 * r)) / (2 * kappa); + s = (1 + rho * rho) / (2 * rho); + } } while (1) { From f26a7d202a7136c0750356f7ec6c148d67a5927e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mont=C3=B3n=20Pinillos?= Date: Sat, 26 Sep 2020 23:27:23 +0200 Subject: [PATCH 0653/1270] Fixed style and added check for bounds in [-pi,pi] interval. --- .../random/src/distributions/distributions.c | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 6c60ad882f02..59e11ca47002 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -844,7 +844,8 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { } if (kappa < 1e-8) { return M_PI * (2 * next_double(bitgen_state) - 1); - } else { + } + else { /* with double precision rho is zero until 1.4e-8 */ if (kappa < 1e-5) { /* @@ -852,11 +853,21 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { * precise until relatively large kappas as second order is 0 */ s = (1. / kappa + kappa); - } else { - /* Fallback to normal distribution for big values of kappa*/ - if (kappa > 1e6){ - return mu + sqrt(1/kappa) * random_standard_normal(bitgen_state); - }else{ + } + else { + /* Fallback to normal distribution for big values of kappa */ + if (kappa > 1e6) { + result = mu + sqrt(1. / kappa) * random_standard_normal(bitgen_state); + /* Check if result is within bounds */ + if (result < -M_PI) { + return result + 2*M_PI; + } + if (result > M_PI) { + return result - 2*M_PI; + } + return result; + } + else { double r = 1 + sqrt(1 + 4 * kappa * kappa); double rho = (r - sqrt(2 * r)) / (2 * kappa); s = (1 + rho * rho) / (2 * rho); From 5fa800a32341f24ecc178eb754dc91c9b5d0db2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mont=C3=B3n=20Pinillos?= Date: Fri, 9 Oct 2020 04:09:19 +0200 Subject: [PATCH 0654/1270] Added test for [-np.pi, np.pi] range for the von mises distribution using large values of kappa (>1e6) --- numpy/random/tests/test_generator_mt19937_regressions.py | 3 +++ numpy/random/tests/test_randomstate_regression.py | 3 +++ numpy/random/tests/test_regression.py | 3 +++ 3 files changed, 9 insertions(+) diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 2ef6b0631920..314713cbee17 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -14,6 +14,9 @@ def test_VonMises_range(self): for mu in np.linspace(-7., 7., 5): r = mt19937.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + for mu in [-np.pi, np.pi]: + r = mt19937.vonmises(mu, 10**6 + 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 0bf361e5eb46..d7ca540d8ec9 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -18,6 +18,9 @@ def test_VonMises_range(self): for mu in np.linspace(-7., 7., 5): r = random.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + for mu in [-np.pi, np.pi]: + r = random.vonmises(mu, 10**6 + 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 54d5a3efbdba..2ebd4c12f82a 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -14,6 +14,9 @@ def test_VonMises_range(self): for mu in np.linspace(-7., 7., 5): r = random.mtrand.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + for mu in [-np.pi, np.pi]: + r = random.mtrand.vonmises(mu, 10**6 + 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 From 2195375e6b0ba74f772fecfdc7ca41b5897faed6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 26 Feb 2021 13:45:41 +0100 Subject: [PATCH 0655/1270] TST: Remove the `einsum` typing tests reliance on issuing a `ComplexWarning` --- numpy/typing/tests/data/pass/einsumfunc.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/typing/tests/data/pass/einsumfunc.py b/numpy/typing/tests/data/pass/einsumfunc.py index 914eed4ccfe5..a2a39fb1c4c1 100644 --- a/numpy/typing/tests/data/pass/einsumfunc.py +++ b/numpy/typing/tests/data/pass/einsumfunc.py @@ -2,7 +2,6 @@ from typing import List, Any -import pytest import numpy as np AR_LIKE_b = [True, True, True] @@ -12,6 +11,7 @@ AR_LIKE_c = [1j, 2j, 3j] AR_LIKE_U = ["1", "2", "3"] +OUT_f: np.ndarray[Any, np.dtype[np.float64]] = np.empty(3, dtype=np.float64) OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128) np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b) @@ -25,8 +25,7 @@ np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16") np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe") np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c) -with pytest.raises(np.ComplexWarning): - np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=float, casting="unsafe", out=OUT_c) +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=int, casting="unsafe", out=OUT_f) np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b) np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u) From 79c30300390c267bacb5297f36c1e9605bca8f2c Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 26 Feb 2021 13:04:38 +0000 Subject: [PATCH 0656/1270] BUG: Prevent RandomState from changing Apply vonmises fix only to Generator Add tests for correctness closes #17378 closes #17275 --- .../random/src/distributions/distributions.c | 20 +++++++++++------ numpy/random/tests/test_generator_mt19937.py | 22 ++++++++++++++++++- .../test_generator_mt19937_regressions.py | 7 ++---- numpy/random/tests/test_randomstate.py | 9 ++++++++ .../tests/test_randomstate_regression.py | 3 --- numpy/random/tests/test_regression.py | 3 --- 6 files changed, 45 insertions(+), 19 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 59e11ca47002..ce909933ee65 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -1,6 +1,7 @@ #include "numpy/random/distributions.h" #include "ziggurat_constants.h" #include "logfactorial.h" +#include #if defined(_MSC_VER) && defined(_WIN64) #include @@ -844,8 +845,7 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { } if (kappa < 1e-8) { return M_PI * (2 * next_double(bitgen_state) - 1); - } - else { + } else { /* with double precision rho is zero until 1.4e-8 */ if (kappa < 1e-5) { /* @@ -853,9 +853,12 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { * precise until relatively large kappas as second order is 0 */ s = (1. / kappa + kappa); - } - else { - /* Fallback to normal distribution for big values of kappa */ + } else { +#ifndef NP_RANDOM_LEGACY + /* Fallback to normal distribution for big values of kappa + * Fix only applies to Generator, not RandomState + * RandomState defines NP_RANDOM_LEGACY=1 + */ if (kappa > 1e6) { result = mu + sqrt(1. / kappa) * random_standard_normal(bitgen_state); /* Check if result is within bounds */ @@ -866,12 +869,15 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { return result - 2*M_PI; } return result; - } - else { + } else { +#endif + /* Path for 1e-5 <= kappa <= 1e6 */ double r = 1 + sqrt(1 + 4 * kappa * kappa); double rho = (r - sqrt(2 * r)) / (2 * kappa); s = (1 + rho * rho) / (2 * rho); +#if !defined NP_RANDOM_LEGACY } +#endif } while (1) { diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index d68bcd38b539..f6bd985b4510 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -10,7 +10,7 @@ assert_warns, assert_no_warnings, assert_array_equal, assert_array_almost_equal, suppress_warnings) -from numpy.random import Generator, MT19937, SeedSequence +from numpy.random import Generator, MT19937, SeedSequence, RandomState random = Generator(MT19937()) @@ -1735,6 +1735,26 @@ def test_vonmises_nan(self): r = random.vonmises(mu=0., kappa=np.nan) assert_(np.isnan(r)) + @pytest.mark.parametrize("kappa", [1e4, 1e15]) + def test_vonmises_large_kappa(self, kappa): + random = Generator(MT19937(self.seed)) + rs = RandomState(random.bit_generator) + state = random.bit_generator.state + + random_state_vals = rs.vonmises(0, kappa, size=10) + random.bit_generator.state = state + gen_vals = random.vonmises(0, kappa, size=10) + if kappa < 1e6: + assert_allclose(random_state_vals, gen_vals) + else: + assert np.all(random_state_vals != gen_vals) + + @pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2]) + @pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15]) + def test_vonmises_large_kappa_range(self, mu, kappa): + r = random.vonmises(mu, kappa, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + def test_wald(self): random = Generator(MT19937(self.seed)) actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 314713cbee17..9f6dcdc6bae8 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -1,22 +1,19 @@ from numpy.testing import (assert_, assert_array_equal) import numpy as np import pytest -from numpy.random import Generator, MT19937 +from numpy.random import Generator, MT19937, RandomState mt19937 = Generator(MT19937()) class TestRegression: - def test_VonMises_range(self): + def test_vonmises_range(self): # Make sure generated random variables are in [-pi, pi]. # Regression test for ticket #986. for mu in np.linspace(-7., 7., 5): r = mt19937.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - for mu in [-np.pi, np.pi]: - r = mt19937.vonmises(mu, 10**6 + 1, 50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 7f5f0805050e..b16275b700e6 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -1238,6 +1238,15 @@ def test_vonmises_small(self): r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) assert_(np.isfinite(r).all()) + def test_vonmises_large(self): + # guard against changes in RandomState when Generator is fixed + random.seed(self.seed) + actual = random.vonmises(mu=0., kappa=1e7, size=3) + desired = np.array([4.634253748521111e-04, + 3.558873596114509e-04, + -2.337119622577433e-04]) + assert_array_almost_equal(actual, desired, decimal=8) + def test_vonmises_nan(self): random.seed(self.seed) r = random.vonmises(mu=0., kappa=np.nan) diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index d7ca540d8ec9..0bf361e5eb46 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -18,9 +18,6 @@ def test_VonMises_range(self): for mu in np.linspace(-7., 7., 5): r = random.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - for mu in [-np.pi, np.pi]: - r = random.vonmises(mu, 10**6 + 1, 50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 2ebd4c12f82a..54d5a3efbdba 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -14,9 +14,6 @@ def test_VonMises_range(self): for mu in np.linspace(-7., 7., 5): r = random.mtrand.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - for mu in [-np.pi, np.pi]: - r = random.mtrand.vonmises(mu, 10**6 + 1, 50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 From aa529592b33262bb9fdd73df3b493a3d3cf2e392 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 26 Feb 2021 18:19:43 +0000 Subject: [PATCH 0657/1270] CLN: Move to legacy function Avoid conditional compilation and move old version to legacy_vonmises Small clean up Additional comments --- numpy/random/include/legacy-distributions.h | 1 + numpy/random/mtrand.pyx | 4 +- .../random/src/distributions/distributions.c | 29 +++----- .../random/src/legacy/legacy-distributions.c | 68 +++++++++++++++++++ 4 files changed, 82 insertions(+), 20 deletions(-) diff --git a/numpy/random/include/legacy-distributions.h b/numpy/random/include/legacy-distributions.h index b8ba0841c127..f7ccd2cb5111 100644 --- a/numpy/random/include/legacy-distributions.h +++ b/numpy/random/include/legacy-distributions.h @@ -31,6 +31,7 @@ extern double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden); extern double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale); extern double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape); extern double legacy_exponential(aug_bitgen_t *aug_state, double scale); +extern double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa); extern int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial); extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 8d349b7d8e26..6f44e271f245 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -51,7 +51,6 @@ cdef extern from "numpy/random/distributions.h": void random_standard_uniform_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil int64_t random_positive_int(bitgen_t *bitgen_state) nogil double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil - double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil @@ -100,6 +99,7 @@ cdef extern from "include/legacy-distributions.h": double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil double legacy_power(aug_bitgen_t *state, double a) nogil + double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil np.import_array() @@ -2281,7 +2281,7 @@ cdef class RandomState: >>> plt.show() """ - return cont(&random_vonmises, &self._bitgen, size, self.lock, 2, + return cont(&legacy_vonmises, &self._bitgen, size, self.lock, 2, mu, 'mu', CONS_NONE, kappa, 'kappa', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, None) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index ce909933ee65..f47c54a538bc 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -1,7 +1,6 @@ #include "numpy/random/distributions.h" #include "ziggurat_constants.h" #include "logfactorial.h" -#include #if defined(_MSC_VER) && defined(_WIN64) #include @@ -844,6 +843,7 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { return NPY_NAN; } if (kappa < 1e-8) { + /* Use a uniform for very small values of kappa */ return M_PI * (2 * next_double(bitgen_state) - 1); } else { /* with double precision rho is zero until 1.4e-8 */ @@ -854,30 +854,23 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { */ s = (1. / kappa + kappa); } else { -#ifndef NP_RANDOM_LEGACY - /* Fallback to normal distribution for big values of kappa - * Fix only applies to Generator, not RandomState - * RandomState defines NP_RANDOM_LEGACY=1 - */ - if (kappa > 1e6) { + if (kappa <= 1e6) { + /* Path for 1e-5 <= kappa <= 1e6 */ + double r = 1 + sqrt(1 + 4 * kappa * kappa); + double rho = (r - sqrt(2 * r)) / (2 * kappa); + s = (1 + rho * rho) / (2 * rho); + } else { + /* Fallback to wrapped normal distribution for kappa > 1e6 */ result = mu + sqrt(1. / kappa) * random_standard_normal(bitgen_state); - /* Check if result is within bounds */ + /* Ensure result is within bounds */ if (result < -M_PI) { - return result + 2*M_PI; + result += 2*M_PI; } if (result > M_PI) { - return result - 2*M_PI; + result -= 2*M_PI; } return result; - } else { -#endif - /* Path for 1e-5 <= kappa <= 1e6 */ - double r = 1 + sqrt(1 + 4 * kappa * kappa); - double rho = (r - sqrt(2 * r)) / (2 * kappa); - s = (1 + rho * rho) / (2 * rho); -#if !defined NP_RANDOM_LEGACY } -#endif } while (1) { diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index fd067fe8dc2b..5b17401dd984 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -1,3 +1,13 @@ +/* + * This file contains generation code for distribution that have been modified + * since Generator was introduced. These are preserved using identical code + * to what was in NumPy 1.16 so that the stream of values generated by + * RandomState is not changed when there are changes that affect Generator. + * + * These functions should not be changed except if they contain code that + * cannot be compiled. They should not be changed for bug fixes, performance + * improvements that can change the values produced, or enhancements to precision. + */ #include "include/legacy-distributions.h" @@ -390,3 +400,61 @@ int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) { binomial_t *binomial) { return random_multinomial(bitgen_state, n, mnix, pix, d, binomial); } + +double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { + double s; + double U, V, W, Y, Z; + double result, mod; + int neg; + if (npy_isnan(kappa)) { + return NPY_NAN; + } + if (kappa < 1e-8) { + return M_PI * (2 * next_double(bitgen_state) - 1); + } else { + /* with double precision rho is zero until 1.4e-8 */ + if (kappa < 1e-5) { + /* + * second order taylor expansion around kappa = 0 + * precise until relatively large kappas as second order is 0 + */ + s = (1. / kappa + kappa); + } else { + /* Path for 1e-5 <= kappa <= 1e6 */ + double r = 1 + sqrt(1 + 4 * kappa * kappa); + double rho = (r - sqrt(2 * r)) / (2 * kappa); + s = (1 + rho * rho) / (2 * rho); + } + + while (1) { + U = next_double(bitgen_state); + Z = cos(M_PI * U); + W = (1 + s * Z) / (s + Z); + Y = kappa * (s - W); + V = next_double(bitgen_state); + /* + * V==0.0 is ok here since Y >= 0 always leads + * to accept, while Y < 0 always rejects + */ + if ((Y * (2 - Y) - V >= 0) || (log(Y / V) + 1 - Y >= 0)) { + break; + } + } + + U = next_double(bitgen_state); + + result = acos(W); + if (U < 0.5) { + result = -result; + } + result += mu; + neg = (result < 0); + mod = fabs(result); + mod = (fmod(mod + M_PI, 2 * M_PI) - M_PI); + if (neg) { + mod *= -1; + } + + return mod; + } +} \ No newline at end of file From 678919107f29a1da946d2c2ca00d712b830a5753 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Fri, 26 Feb 2021 13:00:14 -0800 Subject: [PATCH 0658/1270] DOC: mv meta landing page to index.rst. The current master_doc is really only used to specify the toctree to set up the navbar in the pydata-sphinx-theme. By renaming to index.rst the navbar config is preserved and the links back to the main page point to the original landing page rather than the contents page, which was mostly empty. --- doc/source/conf.py | 2 +- doc/source/{contents.rst => index.rst} | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) rename doc/source/{contents.rst => index.rst} (96%) diff --git a/doc/source/conf.py b/doc/source/conf.py index 238f8588a6e2..7eb96eb22a28 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -94,7 +94,7 @@ class PyTypeObject(ctypes.Structure): # The suffix of source filenames. source_suffix = '.rst' -master_doc = 'contents' +master_doc = 'index' # General substitutions. project = 'NumPy' diff --git a/doc/source/contents.rst b/doc/source/index.rst similarity index 96% rename from doc/source/contents.rst rename to doc/source/index.rst index 5d4e1209772a..21dec00fe5a9 100644 --- a/doc/source/contents.rst +++ b/doc/source/index.rst @@ -6,6 +6,7 @@ NumPy Documentation .. toctree:: :maxdepth: 1 + :hidden: User Guide API reference From 9e068f4032dab58cf310cfb663fa4930371cecf3 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 2 Jul 2020 18:41:11 +0100 Subject: [PATCH 0659/1270] ENH: Improve error message in multinomial Improve error message when the sum of pvals is larger than 1 when the input data is an ndarray closes #8317 xref #16732 --- numpy/random/_generator.pyx | 14 +++++++++++++- numpy/random/tests/test_generator_mt19937.py | 8 ++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index f25f16a8a155..856b1cc219ea 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3756,7 +3756,19 @@ cdef class Generator: pix = np.PyArray_DATA(parr) check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1) if kahan_sum(pix, d-1) > (1.0 + 1e-12): - raise ValueError("sum(pvals[:-1]) > 1.0") + msg = "sum(pvals[:-1]) > 1.0" + # When floating, but not float dtype, and close, improve the error + # 1.0001 works for float16 and float32 + if (isinstance(pvals, np.ndarray) and + pvals.dtype != float and + np.issubdtype(pvals.dtype, np.floating) and + pvals.sum() < 1.0001): + msg = ("sum(pvals[:-1].astype(np.float64)) > 1.0. pvals are " + "cast to 64-bit floating point values prior to " + "checking the constraint. Changes in precision when " + "casting may produce violations even if " + "pvals[:-1].sum() <= 1.") + raise ValueError(msg) if np.PyArray_NDIM(on) != 0: # vector check_array_constraint(on, 'n', CONS_NON_NEGATIVE) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index f6bd985b4510..9de04477496c 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -142,6 +142,14 @@ def test_multidimensional_pvals(self): assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]]) assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]])) + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + random = Generator(MT19937(1432985819)) + match = r"[\w\s]*pvals are cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) class TestMultivariateHypergeometric: From 2ee92e20151f5c7b0c5602a550b91b9171898210 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Wed, 24 Feb 2021 15:19:27 +0000 Subject: [PATCH 0660/1270] Accept suggestion Use if else. Co-authored-by: Eric Wieser --- numpy/random/_generator.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 856b1cc219ea..b0bfd860ccd6 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3756,7 +3756,6 @@ cdef class Generator: pix = np.PyArray_DATA(parr) check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1) if kahan_sum(pix, d-1) > (1.0 + 1e-12): - msg = "sum(pvals[:-1]) > 1.0" # When floating, but not float dtype, and close, improve the error # 1.0001 works for float16 and float32 if (isinstance(pvals, np.ndarray) and @@ -3768,6 +3767,8 @@ cdef class Generator: "checking the constraint. Changes in precision when " "casting may produce violations even if " "pvals[:-1].sum() <= 1.") + else: + msg = "sum(pvals[:-1]) > 1.0" raise ValueError(msg) if np.PyArray_NDIM(on) != 0: # vector From e900be2bf367d2a90af922e56c27aadcd0581bdf Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 26 Feb 2021 23:01:15 +0000 Subject: [PATCH 0661/1270] Address comments --- numpy/random/_generator.pyx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index b0bfd860ccd6..cd950b3fa8d5 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3758,15 +3758,15 @@ cdef class Generator: if kahan_sum(pix, d-1) > (1.0 + 1e-12): # When floating, but not float dtype, and close, improve the error # 1.0001 works for float16 and float32 - if (isinstance(pvals, np.ndarray) and - pvals.dtype != float and - np.issubdtype(pvals.dtype, np.floating) and - pvals.sum() < 1.0001): - msg = ("sum(pvals[:-1].astype(np.float64)) > 1.0. pvals are " - "cast to 64-bit floating point values prior to " - "checking the constraint. Changes in precision when " - "casting may produce violations even if " - "pvals[:-1].sum() <= 1.") + if (isinstance(pvals, np.ndarray) + and np.issubdtype(pvals.dtype, np.floating) + and pvals.dtype != float + and pvals.sum() < 1.0001): + msg = ("sum(pvals[:-1].astype(np.float64)) > 1.0. The pvals " + "array is cast to 64-bit floating point prior to " + "checking the sum. Precision changes when casting may " + "cause problems even if the sum of the original pvals " + "is valid.") else: msg = "sum(pvals[:-1]) > 1.0" raise ValueError(msg) From b1015adfdbce55b7ee9211baca2f51284d67694a Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 26 Feb 2021 23:06:56 +0000 Subject: [PATCH 0662/1270] Port error to RandomState --- numpy/random/mtrand.pyx | 15 ++++++++++++++- numpy/random/tests/test_generator_mt19937.py | 2 +- numpy/random/tests/test_randomstate.py | 8 ++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 6f44e271f245..4e12f8e59264 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4232,7 +4232,20 @@ cdef class RandomState: pix = np.PyArray_DATA(parr) check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1) if kahan_sum(pix, d-1) > (1.0 + 1e-12): - raise ValueError("sum(pvals[:-1]) > 1.0") + # When floating, but not float dtype, and close, improve the error + # 1.0001 works for float16 and float32 + if (isinstance(pvals, np.ndarray) + and np.issubdtype(pvals.dtype, np.floating) + and pvals.dtype != float + and pvals.sum() < 1.0001): + msg = ("sum(pvals[:-1].astype(np.float64)) > 1.0. The pvals " + "array is cast to 64-bit floating point prior to " + "checking the sum. Precision changes when casting may " + "cause problems even if the sum of the original pvals " + "is valid.") + else: + msg = "sum(pvals[:-1]) > 1.0" + raise ValueError(msg) if size is None: shape = (d,) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 9de04477496c..446b350dd8de 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -147,7 +147,7 @@ def test_multinomial_pvals_float32(self): 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) pvals = x / x.sum() random = Generator(MT19937(1432985819)) - match = r"[\w\s]*pvals are cast to 64-bit floating" + match = r"[\w\s]*pvals array is cast to 64-bit floating" with pytest.raises(ValueError, match=match): random.multinomial(1, pvals) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index b16275b700e6..861813a95d1f 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -167,6 +167,14 @@ def test_p_non_contiguous(self): contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) assert_array_equal(non_contig, contig) + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + class TestSetState: def setup(self): From ed28f858ac7aba108a95df9900f998f75d0c2d10 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 27 Feb 2021 21:04:09 +0100 Subject: [PATCH 0663/1270] DOC: add guide for downstream package authors --- doc/source/user/depending_on_numpy.rst | 143 +++++++++++++++++++++++++ doc/source/user/index.rst | 1 + 2 files changed, 144 insertions(+) create mode 100644 doc/source/user/depending_on_numpy.rst diff --git a/doc/source/user/depending_on_numpy.rst b/doc/source/user/depending_on_numpy.rst new file mode 100644 index 000000000000..a29429eeb92d --- /dev/null +++ b/doc/source/user/depending_on_numpy.rst @@ -0,0 +1,143 @@ +.. _for-downstream-package-authors: + +For downstream package authors +============================== + +This document aims to explain some best practices for authoring a package that +depends on NumPy. + + +Understanding NumPy's versioning and API/ABI stability +------------------------------------------------------ + +NumPy uses a standard, :pep:`440` compliant, versioning scheme: +``major.minor.bugfix``. A *major* release is highly unusual (NumPy is still at +version ``1.xx``) and if it happens it will likely indicate an ABI break. +*Minor* versions are released regularly, typically every 6 months. Minor +versions contain new features, deprecations, and removals of previously +deprecated code. *Bugfix* releases are made even more frequently; they do not +contain any new features or deprecations. + +It is important to know that NumPy, like Python itself and most other +well-known scientific Python projects, does **not** use semantic versioning. +Instead, backwards-incompatible API changes require deprecation warnings for at +least two releases. For more details, see :ref:`NEP23`. + +NumPy has both a Python API and a C API. The C API can be used directly or via +Cython, f2py, or other such tools. If your package uses the C API, then ABI +(application binary interface) stability of NumPy is important. NumPy's ABI is +backwards but not forwards compatible. + + +Testing against the NumPy main branch or pre-releases +----------------------------------------------------- + +For large, actively maintained packages that depend on NumPy, we recommend +testing against the development version of NumPy in CI. To make this easy, +nightly builds are provided as wheels at +https://anaconda.org/scipy-wheels-nightly/. +This helps detect regressions in NumPy that need fixing before the next NumPy +release. + +Furthermore, we recommend to raise errors on warnings in CI for this job, +either all warnings or otherwise at least ``DeprecationWarning`` and +``FutureWarning``. This gives you an early warning about changes in NumPy to +adapt your code. + + +Adding a dependency on NumPy +---------------------------- + +Build-time dependency +````````````````````` + +If a package either uses the NumPy C API directly or it uses some other tool +that depends on it like Cython or Pythran, NumPy is a *build-time* dependency +of the package. Because the NumPy ABI is only forward-compatible, you must +build your own binaries (wheels or other package formats) against the lowest +NumPy version that you support (or an even older version). + +Picking the correct NumPy version to build against for each Python version and +platform can get complicated. There are a couple of ways to do this. +Build-time dependencies are specified in ``pyproject.toml`` (see PEP 517), +which is the file used to build wheels by PEP 517 compliant tools (e.g., +when using ``pip wheel``). + +You can specify everything manually in ``pyproject.toml``, or you can instead +rely on the `oldest-supported-numpy `__ +metapackage. ``oldest-supported-numpy`` will specify the correct NumPy version +at build time for wheels, taking into account Python version, Python +implementation (CPython or PyPy), operating system and hardware platform. It +will specify the oldest NumPy version that supports that combination of +characteristics. Note: for platforms for which NumPy provides wheels on PyPI, +it will be the first version with wheels (even if some older NumPy version +happens to build). + +For conda-forge it's a little less complicated: there's dedicated handling for +NumPy in build-time and runtime dependencies, so typically this is enough +(see `here `__ for docs):: + + host: + - numpy + run: + - {{ pin_compatible('numpy') }} + +.. note:: + + ``pip`` has ``--no-use-pep517`` and ``--no-build-isolation`` flags that may + ignore ``pyproject.toml`` or treat it differently - if users use those + flags, they are responsible for installing the correct build dependencies + themselves. + + Please do not use ``setup_requires`` (it is deprecated and may invoke + ``easy_install``). + +Because for NumPy you have to care about ABI compatibility, you +specify the version with ``==`` to the lowest supported version. For your other +build dependencies you can probably be looser, however it's still important to +set lower and upper bounds for each dependency. It's fine to specify either a +range or a specific version for a dependency like ``wheel`` or ``setuptools``. +It's recommended to set the upper bound of the range to the latest already +released version of ``wheel`` and ``setuptools`` - this prevents future +releases from breaking your packages on PyPI. + + +Runtime dependency & version ranges +``````````````````````````````````` + +NumPy itself and many core scientific Python packages have agreed on a schedule +for dropping support for old Python and NumPy versions: :ref:`NEP29`. We +recommend all packages depending on NumPy to follow the recommendations in NEP +29. + +For *run-time dependencies*, you specify the range of versions in +``install_requires`` in ``setup.py`` (assuming you use ``numpy.distutils`` or +``setuptools`` to build). Getting the upper bound right for NumPy is slightly +tricky. If we don't set any bound, a too-new version will be pulled in a few +years down the line, and NumPy may have deprecated and removed some API that +your package depended on by then. On the other hand if you set the upper bound +to the newest already-released version, then as soon as a new NumPy version is +released there will be no matching version of your package that works with it. + +What to do here depends on your release frequency. Given that NumPy releases +come in a 6-monthly cadence and that features that get deprecated in NumPy +should stay around for another two releases, a good upper bound is +``<1.xx+3.0`` - where ``xx`` is the minor version of the latest +already-released NumPy. This is safe to do if you release at least once a year. +If your own releases are much less frequent, you may set the upper bound a +little further into the future - this is a trade-off between a future NumPy +version _maybe_ removing something you rely on, and the upper bound being +exceeded which _may_ lead to your package being hard to install in combination +with other packages relying on the latest NumPy. + + +.. note:: + + + SciPy has more documentation on how it builds wheels and deals with its + build-time and runtime dependencies + `here `__. + + NumPy and SciPy wheel build CI may also be useful as a reference, it can be + found `here for NumPy `__ and + `here for SciPy `__. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index c5a48ccab922..b47d6634eb01 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -23,6 +23,7 @@ details are found in :ref:`reference`. c-info tutorials_index howtos_index + depending_on_numpy .. Links to these files are placed directly in the top-level html From a760dfc0f1d520ff7e1e6e90117fb841e3b62951 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 27 Feb 2021 21:40:32 +0100 Subject: [PATCH 0664/1270] DOC: address review comments on NEP 48 --- doc/neps/nep-0048-spending-project-funds.rst | 69 ++++++++++++++------ 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/doc/neps/nep-0048-spending-project-funds.rst b/doc/neps/nep-0048-spending-project-funds.rst index 8131c85090ba..b97d456b5f7b 100644 --- a/doc/neps/nep-0048-spending-project-funds.rst +++ b/doc/neps/nep-0048-spending-project-funds.rst @@ -27,9 +27,16 @@ and transparency around these topics. Motivation and Scope -------------------- -NumPy is a fiscally sponsored project of NumFOCUS, a 501(c)(3) nonprofit organization headquartered in Austin, TX. Therefore, for all legal and accounting matters the NumPy project has to follow the rules and regulations for US nonprofits. All nonprofit donations are classified into two categories: **unrestricted funds** which may be used for any legal purpose appropriate to the organization and **restricted funds**, monies set aside for a particular purpose (e.g., project, educational program, etc.). It’s important to note that restricted funds are **permanently restricted** to that purpose and cannot be used for any other expenses.  +NumPy is a fiscally sponsored project of NumFOCUS, a 501(c)(3) nonprofit +organization headquartered in Austin, TX. Therefore, for all legal and +accounting matters the NumPy project has to follow the rules and regulations +for US nonprofits. All nonprofit donations are classified into two categories: +**unrestricted funds** which may be used for any legal purpose appropriate +to the organization and **restricted funds**, monies set aside for a +particular purpose (e.g., project, educational program, etc.). -For the detailed timeline of NumPy funding refer to [NumPy funding – history and current status](#history-and-current-status). +For the detailed timeline of NumPy funding refer to +:ref:`numpy-funding-history`. Since its inception and until 2020, the NumPy project has only spent on the order of $10,000 USD of funds that were not restricted to a particular program. Project @@ -63,10 +70,11 @@ Out of scope for this NEP are: directly to the NumPy project, and companies or institutions funding specific features. *Rationale: As a project, we have no direct control over how this work gets - executed. In some cases, we may not even know the contributions were funded - or done by an employee on work time. (Whether that's the case or not should - not change how we approach a contribution). For grants though, we do expect - the research/project leader and funded team to align their work with the needs of NumPy and be + executed (at least formally, until issues or PRs show up). In some cases, we + may not even know the contributions were funded or done by an employee on + work time. (Whether that's the case or not should not change how we approach + a contribution). For grants though, we do expect the research/project leader + and funded team to align their work with the needs of NumPy and be receptive to feedback from other NumPy maintainers and contributors.* @@ -79,6 +87,14 @@ in ways that attract more volunteers and enhance their participation experience is critical. That key principle motivates many of the more detailed principles given below for what to pay for and whom to pay. +The approach for spending funds will be: + +- first figure out what we want to fund, +- then look for a great candidate, +- after that's settled, determine a fair compensation level. + +The next sections go into detail on each of these three points. + What to pay for ``````````````` @@ -119,7 +135,8 @@ Whom to pay ``````````` 1. All else being equal, give preference to existing maintainers/contributors. -2. Consider this an opportunity to make the project more diverse. +2. When looking outside of the current team, consider this an opportunity to + make the project more diverse. 3. Pay attention to the following when considering paying someone: - the necessary technical or domain-specific skills to execute the tasks, @@ -135,13 +152,21 @@ chance to apply for it. Compensating fairly ``````````````````` -Paying people fairly is a difficult topic. Therefore, we will only offer some -guidance here. Final decisions will always have to be considered and approved -by the group of people that bears this responsibility (according to the -current NumPy governance structure, this would be the NumPy Steering Council). +.. note:: -Discussions on employee compensation tend to be dominated by two narratives: -"pay local market rates" and "same work -- same pay". + This section on compensating fairly will be considered *Draft* even if this + NEP as a whole is accepted. Once we have applied the approach outlined here + at least 2-3 times and we are happy with it, will we remove this note and + consider this section *Accepted*. + +Paying people fairly is a difficult topic, especially when it comes to +distributed teams. Therefore, we will only offer some guidance here. Final +decisions will always have to be considered and approved by the group of people +that bears this responsibility (according to the current NumPy governance +structure, this would be the NumPy Steering Council). + +Discussions on remote employee compensation tend to be dominated by two +narratives: "pay local market rates" and "same work -- same pay". We consider them both extreme: @@ -193,9 +218,11 @@ Some other considerations: than someone who is employed since they have to take care of their benefits and accounting on their own. - Some people may be happy with one-off payments towards a particular - deliverable (e.g., hiring a cleaner or some other service to use the saved - time for work on open source). This should be compensated at a lower rate - compared to an individual contractor. + deliverable (e.g., "triage all open issues for label X for $x,xxx"). + This should be compensated at a lower rate compared to an individual + contractor. Or they may motivate lower amounts for another reason (e.g., "I + want to receive $x,xxx to hire a cleaner or pay for childcare, to free up + time for work on open source). - When funding someone's time through their employer, that employer may want to set the compensation level based on its internal rules (e.g., overhead rates). Small deviations from the guidelines in this NEP may be needed in such cases, @@ -297,8 +324,10 @@ all transactions is probably fine, but not necessary. effort required.* +.. _numpy-funding-history: + NumPy funding – history and current status --------------------------- +------------------------------------------ The NumPy project received its first major funding in 2017. For an overview of the early history of NumPy (and SciPy), including some institutions sponsoring @@ -346,9 +375,9 @@ that amount is currently growing at a rate of about $3,000/month. Related Work ------------ -See references. We assume that there are other open source projects having -published guidelines on spending project funds, however we don't have concrete -examples at this time. +See references. We assume that other open source projects have also developed +guidelines on spending project funds. However, we were unable to find any +examples at the time of writing. Alternatives From 592b71c8cd10ef3cf9bccebb6c3ed0198f105484 Mon Sep 17 00:00:00 2001 From: jbCodeHub Date: Sun, 28 Feb 2021 09:58:54 +0100 Subject: [PATCH 0665/1270] TST: Branch coverage improvement for `np.polynomial` (#18499) * added tests for vander_nd in test_polyutils to cover some of the missed branches * added tests to polyline and vander to improve branch coverage * added tests to test_legendre.py to improve branch coverage --- numpy/polynomial/tests/test_legendre.py | 12 ++++++++++++ numpy/polynomial/tests/test_polynomial.py | 7 +++++++ numpy/polynomial/tests/test_polyutils.py | 15 +++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index a2a212c242e3..92399c160ecb 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -305,6 +305,9 @@ def test_legint_axis(self): res = leg.legint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) + def test_legint_zerointord(self): + assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3)) + class TestDerivative: @@ -345,6 +348,9 @@ def test_legder_axis(self): res = leg.legder(c2d, axis=1) assert_almost_equal(res, tgt) + def test_legder_orderhigherthancoeff(self): + c = (1, 2, 3, 4) + assert_equal(leg.legder(c, 4), [0]) class TestVander: # some random values in [-1, 1) @@ -393,6 +399,9 @@ def test_legvander3d(self): van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) + def test_legvander_negdeg(self): + assert_raises(ValueError, leg.legvander, (1, 2, 3), -1) + class TestFitting: @@ -541,6 +550,9 @@ def test_legtrim(self): def test_legline(self): assert_equal(leg.legline(3, 4), [3, 4]) + def test_legline_zeroscl(self): + assert_equal(leg.legline(3, 0), [3]) + def test_leg2poly(self): for i in range(10): assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 5fd1a82a2faf..a0a09fcf4a93 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -473,6 +473,10 @@ def test_polyvander3d(self): van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) + def test_polyvandernegdeg(self): + x = np.arange(3) + assert_raises(ValueError, poly.polyvander, x, -1) + class TestCompanion: @@ -591,3 +595,6 @@ def test_polytrim(self): def test_polyline(self): assert_equal(poly.polyline(3, 4), [3, 4]) + + def test_polyline_zero(self): + assert_equal(poly.polyline(3, 0), [3]) diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index 1b27f53b5988..cc630790da1c 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -40,6 +40,21 @@ def test_trimcoef(self): assert_equal(pu.trimcoef(coef, 1), coef[:-3]) assert_equal(pu.trimcoef(coef, 2), [0]) + def test_vander_nd_exception(self): + # n_dims != len(points) + assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) + # n_dims != len(degrees) + assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) + # n_dims == 0 + assert_raises(ValueError, pu._vander_nd, (), (), []) + + def test_div_zerodiv(self): + # c2[-1] == 0 + assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) + + def test_pow_too_large(self): + # power > maxpower + assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) class TestDomain: From 48ad82dd5cbcfa963e00d22c9cf21edeef623e66 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 28 Feb 2021 13:39:51 +0100 Subject: [PATCH 0666/1270] DOC: address review comments on guide for downstream package authors --- doc/source/user/depending_on_numpy.rst | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/doc/source/user/depending_on_numpy.rst b/doc/source/user/depending_on_numpy.rst index a29429eeb92d..a892e8b6d031 100644 --- a/doc/source/user/depending_on_numpy.rst +++ b/doc/source/user/depending_on_numpy.rst @@ -26,7 +26,9 @@ least two releases. For more details, see :ref:`NEP23`. NumPy has both a Python API and a C API. The C API can be used directly or via Cython, f2py, or other such tools. If your package uses the C API, then ABI (application binary interface) stability of NumPy is important. NumPy's ABI is -backwards but not forwards compatible. +forward but not backward compatible. This means: binaries compiled against a +given version of NumPy will still run correctly with newer NumPy versions, but +not with older versions. Testing against the NumPy main branch or pre-releases @@ -84,13 +86,17 @@ NumPy in build-time and runtime dependencies, so typically this is enough .. note:: - ``pip`` has ``--no-use-pep517`` and ``--no-build-isolation`` flags that may - ignore ``pyproject.toml`` or treat it differently - if users use those - flags, they are responsible for installing the correct build dependencies - themselves. + ``pip`` has ``--no-use-pep517`` and ``--no-build-isolation`` flags that may + ignore ``pyproject.toml`` or treat it differently - if users use those + flags, they are responsible for installing the correct build dependencies + themselves. - Please do not use ``setup_requires`` (it is deprecated and may invoke - ``easy_install``). + ``conda`` will always use ``-no-build-isolation``; dependencies for conda + builds are given in the conda recipe (``meta.yaml``), the ones in + ``pyproject.toml`` have no effect. + + Please do not use ``setup_requires`` (it is deprecated and may invoke + ``easy_install``). Because for NumPy you have to care about ABI compatibility, you specify the version with ``==`` to the lowest supported version. For your other From 6e4e8b789e8d0fe4591d80a578ff21b388b1956d Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 28 Feb 2021 18:06:06 +0200 Subject: [PATCH 0667/1270] BLD: add _2_24 to valid manylinux names --- tools/openblas_support.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 4a691aba0b46..75f3e81b8815 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -60,7 +60,7 @@ def get_manylinux(arch): default = '2014' ret = os.environ.get("MB_ML_VER", default) # XXX For PEP 600 this can be a glibc version - assert ret in ('1', '2010', '2014'), f'invalid MB_ML_VER {ret}' + assert ret in ('1', '2010', '2014', '_2_24'), f'invalid MB_ML_VER {ret}' return ret From 5fcf88f933503296735a42c12b17f595aed3d07e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 28 Feb 2021 22:41:33 +0100 Subject: [PATCH 0668/1270] DOC: address a few more review comments on downstream packaging guide [ci skip] --- doc/source/user/depending_on_numpy.rst | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/doc/source/user/depending_on_numpy.rst b/doc/source/user/depending_on_numpy.rst index a892e8b6d031..4cf272d09bb9 100644 --- a/doc/source/user/depending_on_numpy.rst +++ b/doc/source/user/depending_on_numpy.rst @@ -19,8 +19,8 @@ deprecated code. *Bugfix* releases are made even more frequently; they do not contain any new features or deprecations. It is important to know that NumPy, like Python itself and most other -well-known scientific Python projects, does **not** use semantic versioning. -Instead, backwards-incompatible API changes require deprecation warnings for at +well known scientific Python projects, does **not** use semantic versioning. +Instead, backwards incompatible API changes require deprecation warnings for at least two releases. For more details, see :ref:`NEP23`. NumPy has both a Python API and a C API. The C API can be used directly or via @@ -39,10 +39,8 @@ testing against the development version of NumPy in CI. To make this easy, nightly builds are provided as wheels at https://anaconda.org/scipy-wheels-nightly/. This helps detect regressions in NumPy that need fixing before the next NumPy -release. - -Furthermore, we recommend to raise errors on warnings in CI for this job, -either all warnings or otherwise at least ``DeprecationWarning`` and +release. Furthermore, we recommend to raise errors on warnings in CI for this +job, either all warnings or otherwise at least ``DeprecationWarning`` and ``FutureWarning``. This gives you an early warning about changes in NumPy to adapt your code. @@ -55,7 +53,7 @@ Build-time dependency If a package either uses the NumPy C API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency -of the package. Because the NumPy ABI is only forward-compatible, you must +of the package. Because the NumPy ABI is only forward compatible, you must build your own binaries (wheels or other package formats) against the lowest NumPy version that you support (or an even older version). From bf72d67a8b02fe06ba1efa634cff1c9f6f26a39c Mon Sep 17 00:00:00 2001 From: Filip Ter Date: Sun, 28 Feb 2021 15:07:39 -0800 Subject: [PATCH 0669/1270] ENH: A more helpful error message, when types don't match type of default kwarg --- numpy/lib/function_base.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c6db42ce4905..ddc919e4f5dc 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -671,11 +671,23 @@ def select(condlist, choicelist, default=0): raise ValueError("select with an empty condition list is not possible") choicelist = [np.asarray(choice) for choice in choicelist] - choicelist.append(np.asarray(default)) + + try: + intermediate_dtype = np.result_type(*choicelist) + except TypeError as e: + raise TypeError('Choicelist elements do not have a common dtype: {}' + .format(e)) + default_array = np.asarray(default) + choicelist.append(default_array) # need to get the result type before broadcasting for correct scalar # behaviour - dtype = np.result_type(*choicelist) + try: + dtype = np.result_type(intermediate_dtype, default_array) + except TypeError as e: + raise TypeError( + 'Choicelists and default do not have a common dtype: {}' + .format(e)) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it separately optimizes From 8996557425a5a6150ff33135e03feae75b0923b5 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 1 Mar 2021 09:39:33 +0000 Subject: [PATCH 0670/1270] MAINT: Bump hypothesis from 6.3.0 to 6.3.4 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.3.0 to 6.3.4. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.3.0...hypothesis-python-6.3.4) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 55742e617412..02caa8748620 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.3.0 +hypothesis==6.3.4 pytest==6.2.2 pytz==2021.1 pytest-cov==2.11.1 From 8c671cb7a2e99dbd4146a77c0e88f4d24efc911f Mon Sep 17 00:00:00 2001 From: Roman Yurchak Date: Mon, 1 Mar 2021 13:47:02 +0100 Subject: [PATCH 0671/1270] DOC Improve formatting in the depending_on_numpy documentation (#18518) --- doc/source/user/depending_on_numpy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/depending_on_numpy.rst b/doc/source/user/depending_on_numpy.rst index 4cf272d09bb9..d8e97ef1f967 100644 --- a/doc/source/user/depending_on_numpy.rst +++ b/doc/source/user/depending_on_numpy.rst @@ -126,7 +126,7 @@ released there will be no matching version of your package that works with it. What to do here depends on your release frequency. Given that NumPy releases come in a 6-monthly cadence and that features that get deprecated in NumPy should stay around for another two releases, a good upper bound is -``<1.xx+3.0`` - where ``xx`` is the minor version of the latest +``<1.(xx+3).0`` - where ``xx`` is the minor version of the latest already-released NumPy. This is safe to do if you release at least once a year. If your own releases are much less frequent, you may set the upper bound a little further into the future - this is a trade-off between a future NumPy From 10b540ad0d82d7c1d0124c7cf9a8370490dcb8a4 Mon Sep 17 00:00:00 2001 From: "Patrick T. Komiske III" Date: Mon, 1 Mar 2021 16:52:56 -0500 Subject: [PATCH 0672/1270] removed extraneous ARGOUTVIEWM dim. 4 typemaps https://github.com/numpy/numpy/issues/18521 --- tools/swig/numpy.i | 152 --------------------------------------------- 1 file changed, 152 deletions(-) diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index 60930c098330..99ed073abe11 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -2872,158 +2872,6 @@ $result = SWIG_Python_AppendOutput($result,obj); } -/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3, DIM_TYPE* DIM4) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 ) - (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; - $5 = &dim4_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities") - (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) -{ - npy_intp dims[4] = { *$2, *$3, *$4 , *$5 }; - PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject* array = (PyArrayObject*) obj; - - if (!array || !require_fortran(array)) SWIG_fail; - -%#ifdef SWIGPY_USE_CAPSULE - PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); -%#else - PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free); -%#endif - -%#if NPY_API_VERSION < 0x00000007 - PyArray_BASE(array) = cap; -%#else - PyArray_SetBaseObject(array,cap); -%#endif - - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, - DATA_TYPE** ARGOUTVIEWM_FARRAY4) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEWM_FARRAY4) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &dim4_temp; - $5 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4) -{ - npy_intp dims[4] = { *$1, *$2, *$3 , *$4 }; - PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5)); - PyArrayObject* array = (PyArrayObject*) obj; - - if (!array || !require_fortran(array)) SWIG_fail; - -%#ifdef SWIGPY_USE_CAPSULE - PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); -%#else - PyObject* cap = PyCObject_FromVoidPtr((void*)(*$5), free); -%#endif - -%#if NPY_API_VERSION < 0x00000007 - PyArray_BASE(array) = cap; -%#else - PyArray_SetBaseObject(array,cap); -%#endif - - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3, DIM_TYPE* DIM4) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 ) - (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; - $5 = &dim4_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Utilities") - (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4) -{ - npy_intp dims[4] = { *$2, *$3, *$4 , *$5 }; - PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject* array = (PyArrayObject*) obj; - - if (!array) SWIG_fail; - -%#ifdef SWIGPY_USE_CAPSULE - PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); -%#else - PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free); -%#endif - -%#if NPY_API_VERSION < 0x00000007 - PyArray_BASE(array) = cap; -%#else - PyArray_SetBaseObject(array,cap); -%#endif - - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, - DATA_TYPE** ARGOUTVIEWM_ARRAY4) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEWM_ARRAY4) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &dim4_temp; - $5 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Utilities") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4) -{ - npy_intp dims[4] = { *$1, *$2, *$3 , *$4 }; - PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5)); - PyArrayObject* array = (PyArrayObject*) obj; - - if (!array) SWIG_fail; - -%#ifdef SWIGPY_USE_CAPSULE - PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); -%#else - PyObject* cap = PyCObject_FromVoidPtr((void*)(*$5), free); -%#endif - -%#if NPY_API_VERSION < 0x00000007 - PyArray_BASE(array) = cap; -%#else - PyArray_SetBaseObject(array,cap); -%#endif - - $result = SWIG_Python_AppendOutput($result,obj); -} - /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4) */ From 4f3aa0018b2a9adceddc5eb5361a2f09af759b06 Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Sun, 28 Feb 2021 03:31:51 +0100 Subject: [PATCH 0673/1270] DOC: trunc, floor, ceil, rint, fix should all link to each other in see also --- numpy/core/code_generators/ufunc_docstrings.py | 15 ++++++++------- numpy/lib/ufunclike.py | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 2f75cb41fb0b..bb95407557fb 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -781,7 +781,7 @@ def add_newdoc(place, name, doc): See Also -------- - floor, trunc, rint + floor, trunc, rint, fix Examples -------- @@ -813,7 +813,7 @@ def add_newdoc(place, name, doc): See Also -------- - ceil, floor, rint + ceil, floor, rint, fix Notes ----- @@ -1374,13 +1374,14 @@ def add_newdoc(place, name, doc): See Also -------- - ceil, trunc, rint + ceil, trunc, rint, fix Notes ----- - Some spreadsheet programs calculate the "floor-towards-zero", in other - words ``floor(-2.5) == -2``. NumPy instead uses the definition of - `floor` where `floor(-2.5) == -3`. + Other than the NumPy ``floor`` function, which always rounds down, the + FLOOR function in common spreadsheet programs calculates the + "floor-towards-zero", i.e. ``FLOOR(-2.5) == -2``. This "floor-towards-zero" + function is called ``fix`` in NumPy. Examples -------- @@ -3486,7 +3487,7 @@ def add_newdoc(place, name, doc): See Also -------- - ceil, floor, trunc + fix, ceil, floor, trunc Examples -------- diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py index 0956de82b14b..a93c4773bc4e 100644 --- a/numpy/lib/ufunclike.py +++ b/numpy/lib/ufunclike.py @@ -100,7 +100,7 @@ def fix(x, out=None): See Also -------- - trunc, floor, ceil + rint, trunc, floor, ceil around : Round to given number of decimals Examples From d78a1c6b3874956248e9cb96d4e32f39bec5956b Mon Sep 17 00:00:00 2001 From: aerikpawson <45904740+aerikpawson@users.noreply.github.com> Date: Tue, 2 Mar 2021 19:52:00 +1000 Subject: [PATCH 0674/1270] Specify color in RGB in the docs about the new NumPy logo Specifying color in RGB in the guidelines for the new NumPy logo. Fixes #gh-398 https://github.com/numpy/numpy.org/issues/398 --- branding/logo/logoguidelines.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/branding/logo/logoguidelines.md b/branding/logo/logoguidelines.md index c674a9b37763..44bd51ece6c7 100644 --- a/branding/logo/logoguidelines.md +++ b/branding/logo/logoguidelines.md @@ -4,7 +4,7 @@ These guidelines are meant to help keep the NumPy logo consistent and recognizab The primary logo is the horizontal option (logomark and text next to each other) and the secondary logo is the stacked version (logomark over text). I’ve also provided the logomark on its own (meaning it doesn’t have text). When in doubt, it’s preferable to use primary or secondary options over the logomark alone. ## Color -The full color options are a combo of Maximum Blue (#4DABCF) and Han Blue (#4D77CF), while light options are white (#FFFFFF) and dark options Warm Black (#013243). +The full color options are a combo of Maximum Blue/rgb(77, 171, 207) and Han Blue/rgb(77, 119, 207), while light options are White/rgb(255, 255, 255) and dark options are Gunmetal/rgb(1, 50, 67). Whenever possible, use the full color logos. One color logos (light or dark) are to be used when full color will not have enough contrast, usually when logos must be on colored backgrounds. @@ -15,4 +15,4 @@ Please do not make the primary logo smaller than 50px wide, secondary logo small A few other notes to keep in mind when using the logo: - Make sure to scale the logo proportionally. - Maintain a good amount of space around the logo. Don’t let it overlap with text, images, or other elements. -- Do not try and recreate or modify the logo. For example, do not use the logomark and then try to write NumPy in another font. \ No newline at end of file +- Do not try and recreate or modify the logo. For example, do not use the logomark and then try to write NumPy in another font. From 3867832a404f8eaaca0345c210d4f1d3a7b9d2bd Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 2 Mar 2021 11:56:16 +0200 Subject: [PATCH 0675/1270] revert changes to note, add a sentence about fix --- numpy/core/code_generators/ufunc_docstrings.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index bb95407557fb..a9dbbf92e809 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -1378,9 +1378,9 @@ def add_newdoc(place, name, doc): Notes ----- - Other than the NumPy ``floor`` function, which always rounds down, the - FLOOR function in common spreadsheet programs calculates the - "floor-towards-zero", i.e. ``FLOOR(-2.5) == -2``. This "floor-towards-zero" + Some spreadsheet programs calculate the "floor-towards-zero", where + ``floor(-2.5) == -2``. NumPy instead uses the definition of + `floor` where `floor(-2.5) == -3`. The "floor-towards-zero" function is called ``fix`` in NumPy. Examples From 940f7d865d26b0bf5f35683cfc7aa1195d1c7078 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Tue, 2 Mar 2021 13:28:24 -0500 Subject: [PATCH 0676/1270] BUG: incorrect error fallthrough in nditer Fixup to gh-18450 --- numpy/core/src/umath/ufunc_object.c | 7 ++++++- numpy/core/tests/test_nditer.py | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index f30f31a2ee2f..653e0b5befd7 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -3525,7 +3525,7 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, strides_copy, innerloopdata); if (needs_api && PyErr_Occurred()) { - break; + goto finish_loop; } /* Jump to the faster loop when skipping is done */ @@ -3539,6 +3539,11 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, } } while (iternext(iter)); } + + if (needs_api && PyErr_Occurred()) { + goto finish_loop; + } + do { /* Turn the two items into three for the inner loop */ dataptrs_copy[0] = dataptrs[0]; diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index f34547f9c35c..b2341fe4ef36 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2883,6 +2883,14 @@ def test_object_iter_cleanup(): oarr[:, -1] = None assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1])) + # followup: this tests for a bug introduced in the first pass of gh-18450, + # caused by an incorrect fallthrough of the TypeError + class T: + def __bool__(self): + raise TypeError("Ambiguous") + assert_raises(TypeError, np.logical_or.reduce, + np.array([T(), T()], dtype='O')) + def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to From f9fe1432548688b4f4c2cd2e35b8dd972d8ec51f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 2 Mar 2021 15:51:24 -0700 Subject: [PATCH 0677/1270] CI: Use Ubuntu 18.04 to run "full" test. NumPy does not build using the `--coverage` flag on Ubuntu 20.04, the problem seems to be gcc 9.3.0-17. Work around that by running on Ubuntu 18.04 instead. --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 91a4a72607f5..5366fe75e76f 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -80,7 +80,7 @@ jobs: full: needs: smoke_test - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 env: USE_WHEEL: 1 RUN_FULL_TESTS: 1 From 7ef28b2f1c383828bebcbb87b8e487e27c6e3ff9 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Mar 2021 15:37:11 -0600 Subject: [PATCH 0678/1270] use the new openblas lib --- tools/openblas_support.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 75f3e81b8815..d11ad173befa 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -80,7 +80,7 @@ def download_openblas(target, plat, ilp64): suffix = 'macosx_10_9_x86_64-gf_1becaaa.tar.gz' typ = 'tar.gz' elif plat == 'macosx-arm64': - suffix = 'macosx_11_0_arm64-gf_f10e307.tar.gz' + suffix = 'macosx_11_0_arm64-gf_f26990f.tar.gz' typ = 'tar.gz' elif osname == 'win': if plat == "win-32": From 0a8dca0632185058a4438f792cebf04321212da2 Mon Sep 17 00:00:00 2001 From: Kulin Seth Date: Wed, 3 Mar 2021 14:57:05 -0800 Subject: [PATCH 0679/1270] 18532: Fix the numpy Apple M1 build. There was a build issue on numpy with M1: AssertionError: would build wheel with unsupported tag ('cp39', 'cp39', 'macosx_11_0_universal2') The issue was fixed in the packaging 20.5 and onwards, but that was not getting picked by setuptools. So explicitly adding this dependency in the toml file. The change was tested with: ``` python3 -m pip install --force-reinstall . ``` --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index b1ffd8c7651b..cd46aaf521df 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,7 @@ [build-system] # Minimum requirements for the build system to execute. requires = [ + "packaging==20.5", "setuptools<49.2.0", "wheel==0.36.2", "Cython>=0.29.21,<3.0", # Note: keep in sync with tools/cythonize.py From 4b914538a2fbabe1715493f325ad930735e025c9 Mon Sep 17 00:00:00 2001 From: Michael Lamparski Date: Wed, 3 Mar 2021 18:46:17 -0500 Subject: [PATCH 0680/1270] BUG: NameError in numpy.distutils.fcompiler.compaq Fix a simple mistake in commit da0497fdf35 which can produce a NameError when installing numpy in MinGW/MSYS2. --- numpy/distutils/fcompiler/compaq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py index 6ce590c7c821..1a356866a283 100644 --- a/numpy/distutils/fcompiler/compaq.py +++ b/numpy/distutils/fcompiler/compaq.py @@ -80,8 +80,8 @@ class CompaqVisualFCompiler(FCompiler): except DistutilsPlatformError: pass except AttributeError as e: - if '_MSVCCompiler__root' in str(msg): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) + if '_MSVCCompiler__root' in str(e): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) else: raise except IOError as e: From 0148bf8cdd7577730af8f1418c3a92dfad82595a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 12 Feb 2021 17:34:02 +0100 Subject: [PATCH 0681/1270] TST: Validate the mypy exit-code Any value other than 0 and 1 is indicative of some sort of crash --- numpy/typing/tests/test_typing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 70355dcd916c..d6db68c83d40 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -57,7 +57,7 @@ def run_mypy() -> None: for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): # Run mypy - stdout, stderr, _ = api.run([ + stdout, stderr, exit_code = api.run([ "--config-file", MYPY_INI, "--cache-dir", @@ -65,6 +65,7 @@ def run_mypy() -> None: directory, ]) assert not stderr, directory + assert exit_code in {0, 1}, stdout stdout = stdout.replace('*', '') # Parse the output From 7b432403396af7034628d18492d37b483757951b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 12 Feb 2021 17:53:23 +0100 Subject: [PATCH 0682/1270] TST: Improve a number of mypy-related exception messages --- numpy/typing/tests/test_typing.py | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index d6db68c83d40..be08c1359daa 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -40,6 +40,12 @@ def _key_func(key: str) -> str: return os.path.join(drive, tail.split(":", 1)[0]) +def _strip_filename(msg: str) -> str: + """Strip the filename from a mypy message.""" + _, tail = os.path.splitdrive(msg) + return tail.split(":", 1)[-1] + + @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.fixture(scope="module", autouse=True) @@ -64,8 +70,10 @@ def run_mypy() -> None: CACHE_DIR, directory, ]) - assert not stderr, directory - assert exit_code in {0, 1}, stdout + if stderr: + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}") + elif exit_code not in {0, 1}: + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}") stdout = stdout.replace('*', '') # Parse the output @@ -95,7 +103,9 @@ def test_success(path): # Alias `OUTPUT_MYPY` so that it appears in the local namespace output_mypy = OUTPUT_MYPY if path in output_mypy: - raise AssertionError("\n".join(v for v in output_mypy[path])) + msg = "Unexpected mypy output\n\n" + msg += "\n".join(_strip_filename(v) for v in output_mypy[path]) + raise AssertionError(msg) @pytest.mark.slow @@ -112,14 +122,15 @@ def test_fail(path): output_mypy = OUTPUT_MYPY assert path in output_mypy for error_line in output_mypy[path]: + error_line = _strip_filename(error_line) match = re.match( - r"^.+\.py:(?P\d+): (error|note): .+$", + r"(?P\d+): (error|note): .+$", error_line, ) if match is None: raise ValueError(f"Unexpected error line format: {error_line}") lineno = int(match.group('lineno')) - errors[lineno] += error_line + errors[lineno] += f'{error_line}\n' for i, line in enumerate(lines): lineno = i + 1 @@ -132,7 +143,7 @@ def test_fail(path): expected_error = errors.get(lineno) _test_fail(path, marker, expected_error, lineno) else: - pytest.fail(f"Error {repr(errors[lineno])} not found") + pytest.fail(f"Unexpected mypy output\n\n{errors[lineno]}") _FAIL_MSG1 = """Extra error at line {} @@ -251,8 +262,9 @@ def test_reveal(path): output_mypy = OUTPUT_MYPY assert path in output_mypy for error_line in output_mypy[path]: + error_line = _strip_filename(error_line) match = re.match( - r"^.+\.py:(?P\d+): note: .+$", + r"(?P\d+): note: .+$", error_line, ) if match is None: @@ -312,6 +324,8 @@ def test_extended_precision() -> None: for _msg in output_mypy[path]: *_, _lineno, msg_typ, msg = _msg.split(":") + + msg = _strip_filename(msg) lineno = int(_lineno) msg_typ = msg_typ.strip() assert msg_typ in {"error", "note"} From 2cc9e2994f14ccfb3cc276db08a9d632621fcb55 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 26 Jan 2021 13:26:15 +0100 Subject: [PATCH 0683/1270] ENH: Add dtype-support to the (ufunc-based) `ndarray` inplace magic methods --- numpy/__init__.pyi | 198 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 186 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index dba4176f23d8..4964dfce22a8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -15,6 +15,7 @@ from numpy.typing import ( _SupportsArray, _NestedSequence, _RecursiveSequence, + _SupportsArray, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, @@ -2202,18 +2203,191 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): ) -> Any: ... # `np.generic` does not support inplace operations - def __iadd__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __isub__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __imul__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __itruediv__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __ifloordiv__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __ipow__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __imod__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __ilshift__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __irshift__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __iand__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __ixor__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... - def __ior__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... + @overload # type: ignore[misc] + def __iadd__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __iadd__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + @overload + def __iadd__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __iadd__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __iadd__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + @overload + def __iadd__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __iadd__(self: _ArrayND[timedelta64], other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + @overload + def __iadd__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... + @overload + def __iadd__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __iadd__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __isub__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __isub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __isub__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __isub__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __isub__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + @overload + def __isub__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __isub__(self: _ArrayND[timedelta64], other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + @overload + def __isub__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... + @overload + def __isub__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __isub__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __imul__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __imul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + @overload + def __imul__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __imul__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __imul__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + @overload + def __imul__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __imul__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... + @overload + def __imul__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __imul__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __itruediv__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __itruediv__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + @overload + def __itruediv__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __itruediv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __itruediv__(self: _ArrayND[timedelta64], other: _ArrayLikeInt_co) -> _ArrayND[timedelta64]: ... + @overload + def __itruediv__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __itruediv__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ifloordiv__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __ifloordiv__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + @overload + def __ifloordiv__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ifloordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __ifloordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeInt_co) -> _ArrayND[timedelta64]: ... + @overload + def __ifloordiv__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __ifloordiv__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ipow__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __ipow__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __ipow__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __ipow__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + @overload + def __ipow__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ipow__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __ipow__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __imod__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __imod__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __imod__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __imod__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + @overload + def __imod__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[timedelta64]: ... + @overload + def __imod__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __imod__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ilshift__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __ilshift__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __ilshift__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __irshift__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __irshift__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __irshift__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __irshift__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __irshift__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __iand__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __iand__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + @overload + def __iand__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __iand__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __iand__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __iand__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ixor__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __ixor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + @overload + def __ixor__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __ixor__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __ixor__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __ixor__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ior__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + @overload + def __ior__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + @overload + def __ior__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + @overload + def __ior__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + @overload + def __ior__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + @overload + def __ior__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property From 982468e33aea84f05fd329baaceebac62245bfbc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 12 Feb 2021 18:34:04 +0100 Subject: [PATCH 0684/1270] TST: Add typing tests for the inplace `ndarray` magic methods --- numpy/typing/tests/data/fail/arithmetic.py | 49 +++++++++++++-- numpy/typing/tests/data/pass/arithmetic.py | 73 ++++++++++++++++++++++ 2 files changed, 116 insertions(+), 6 deletions(-) diff --git a/numpy/typing/tests/data/fail/arithmetic.py b/numpy/typing/tests/data/fail/arithmetic.py index 1ca8a7b70092..02bbffa53ba3 100644 --- a/numpy/typing/tests/data/fail/arithmetic.py +++ b/numpy/typing/tests/data/fail/arithmetic.py @@ -61,13 +61,50 @@ AR_M // AR_LIKE_M # E: Unsupported operand types _3 = AR_m // AR_LIKE_b # E: Need type annotation -AR_m // AR_LIKE_c # E: Unsupported operand types +AR_m // AR_LIKE_c # E: Unsupported operand types -AR_b // AR_LIKE_m # E: Unsupported operand types -AR_u // AR_LIKE_m # E: Unsupported operand types -AR_i // AR_LIKE_m # E: Unsupported operand types -AR_f // AR_LIKE_m # E: Unsupported operand types -AR_c // AR_LIKE_m # E: Unsupported operand types +AR_b // AR_LIKE_m # E: Unsupported operand types +AR_u // AR_LIKE_m # E: Unsupported operand types +AR_i // AR_LIKE_m # E: Unsupported operand types +AR_f // AR_LIKE_m # E: Unsupported operand types +AR_c // AR_LIKE_m # E: Unsupported operand types + +# Array multiplication + +AR_b *= AR_LIKE_u # E: incompatible type +AR_b *= AR_LIKE_i # E: incompatible type +AR_b *= AR_LIKE_f # E: incompatible type +AR_b *= AR_LIKE_c # E: incompatible type +AR_b *= AR_LIKE_m # E: incompatible type + +AR_u *= AR_LIKE_i # E: incompatible type +AR_u *= AR_LIKE_f # E: incompatible type +AR_u *= AR_LIKE_c # E: incompatible type +AR_u *= AR_LIKE_m # E: incompatible type + +AR_i *= AR_LIKE_f # E: incompatible type +AR_i *= AR_LIKE_c # E: incompatible type +AR_i *= AR_LIKE_m # E: incompatible type + +AR_f *= AR_LIKE_c # E: incompatible type +AR_f *= AR_LIKE_m # E: incompatible type + +# Array power + +AR_b **= AR_LIKE_b # E: incompatible type +AR_b **= AR_LIKE_u # E: incompatible type +AR_b **= AR_LIKE_i # E: incompatible type +AR_b **= AR_LIKE_f # E: incompatible type +AR_b **= AR_LIKE_c # E: incompatible type + +AR_u **= AR_LIKE_i # E: incompatible type +AR_u **= AR_LIKE_f # E: incompatible type +AR_u **= AR_LIKE_c # E: incompatible type + +AR_i **= AR_LIKE_f # E: incompatible type +AR_i **= AR_LIKE_c # E: incompatible type + +AR_f **= AR_LIKE_c # E: incompatible type # Scalars diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 86e7a92a801d..7a297cfc5df6 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -42,6 +42,18 @@ def __floordiv__(self, value: Any) -> Object: def __rfloordiv__(self, value: Any) -> Object: return self + def __mul__(self, value: Any) -> Object: + return self + + def __rmul__(self, value: Any) -> Object: + return self + + def __pow__(self, value: Any) -> Object: + return self + + def __rpow__(self, value: Any) -> Object: + return self + AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True]) AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) @@ -267,6 +279,67 @@ def __rfloordiv__(self, value: Any) -> Object: AR_LIKE_f // AR_O AR_LIKE_O // AR_O +# Inplace multiplication + +AR_b *= AR_LIKE_b + +AR_u *= AR_LIKE_b +AR_u *= AR_LIKE_u + +AR_i *= AR_LIKE_b +AR_i *= AR_LIKE_u +AR_i *= AR_LIKE_i + +AR_f *= AR_LIKE_b +AR_f *= AR_LIKE_u +AR_f *= AR_LIKE_i +AR_f *= AR_LIKE_f + +AR_c *= AR_LIKE_b +AR_c *= AR_LIKE_u +AR_c *= AR_LIKE_i +AR_c *= AR_LIKE_f +AR_c *= AR_LIKE_c + +AR_m *= AR_LIKE_b +AR_m *= AR_LIKE_u +AR_m *= AR_LIKE_i +AR_m *= AR_LIKE_f + +AR_O *= AR_LIKE_b +AR_O *= AR_LIKE_u +AR_O *= AR_LIKE_i +AR_O *= AR_LIKE_f +AR_O *= AR_LIKE_c +AR_O *= AR_LIKE_O + +# Inplace power + +AR_u **= AR_LIKE_b +AR_u **= AR_LIKE_u + +AR_i **= AR_LIKE_b +AR_i **= AR_LIKE_u +AR_i **= AR_LIKE_i + +AR_f **= AR_LIKE_b +AR_f **= AR_LIKE_u +AR_f **= AR_LIKE_i +AR_f **= AR_LIKE_f + +AR_c **= AR_LIKE_b +AR_c **= AR_LIKE_u +AR_c **= AR_LIKE_i +AR_c **= AR_LIKE_f +AR_c **= AR_LIKE_c + +AR_O **= AR_LIKE_b +AR_O **= AR_LIKE_u +AR_O **= AR_LIKE_i +AR_O **= AR_LIKE_f +AR_O **= AR_LIKE_c +AR_O **= AR_LIKE_O + # unary ops -c16 From 376ba3a9f50397dc94b98253980de0bf0ebc0b68 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 15 Feb 2021 01:32:39 +0100 Subject: [PATCH 0685/1270] MAINT: Disallow `timedelta64 % integer`-based operations `timedelta64` does not support the `__mod__` and `__divmod__` operations with integers and booleans --- numpy/__init__.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4964dfce22a8..29e93526b591 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1641,7 +1641,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + def __mod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[timedelta64]: ... @overload def __mod__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -1663,7 +1663,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + def __rmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[timedelta64]: ... @overload def __rmod__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -1685,7 +1685,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayOrScalar[floating[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... + def __divmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... @overload def __divmod__( self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], @@ -1703,7 +1703,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayOrScalar[floating[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... + def __rdivmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... @overload def __rdivmod__( self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], From c6bdbad5458c3b48ee92350fa0b723ee3b98743f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 4 Mar 2021 12:48:39 +0100 Subject: [PATCH 0686/1270] MAINT: Removed a redundant overload --- numpy/__init__.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 29e93526b591..9cdc4f2e1b72 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2227,8 +2227,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload # type: ignore[misc] def __isub__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... @overload - def __isub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... - @overload def __isub__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... @overload def __isub__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... From 7134dcfe5f43146ffd1ba8d9c58504ec7146ea3f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 4 Mar 2021 08:10:12 -0700 Subject: [PATCH 0687/1270] Update azure-pipelines.yml for Azure Pipelines --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d55bb6cef481..af6799e442e2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -3,7 +3,7 @@ trigger: batch: False branches: include: - - master + - main - maintenance/* From 954605ec08ad833899ffb956395f2d70f8eea2df Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Thu, 4 Mar 2021 07:11:41 -0800 Subject: [PATCH 0688/1270] MAINT: Update master to main after branch rename (gh-18544) * Upload docs to main branch of neps/devdocs * Update `master` to `main` to be consistent with current state of renaming * Remove badges from README * Replace remaining instances of `master` with `main` * Add back logo * Fix CircleCI branch checks * Fix GA branch name * master->main in issue templates. * master->main for 2 links to numpy archive. * Change master to main in NEP 23 Co-authored-by: Ross Barnowski --- .circleci/config.yml | 8 +-- .github/CONTRIBUTING.md | 2 +- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- .github/ISSUE_TEMPLATE/feature-request.md | 2 +- .github/ISSUE_TEMPLATE/post-install.md | 2 +- .github/workflows/build_test.yml | 4 +- README.md | 7 --- benchmarks/README.rst | 2 +- doc/DISTUTILS.rst.txt | 2 +- doc/HOWTO_RELEASE.rst.txt | 12 ++--- doc/RELEASE_WALKTHROUGH.rst.txt | 6 +-- doc/TESTS.rst.txt | 2 +- doc/neps/nep-0000.rst | 2 +- doc/neps/nep-0001-npy-format.rst | 2 +- doc/neps/nep-0016-abstract-array.rst | 2 +- doc/neps/nep-0023-backwards-compatibility.rst | 12 ++--- doc/neps/nep-0024-missing-data-2.rst | 2 +- doc/neps/nep-0025-missing-data-3.rst | 2 +- doc/neps/nep-0026-missing-data-summary.rst | 8 +-- doc/neps/nep-0036-fair-play.rst | 2 +- doc/neps/nep-0046-sponsorship-guidelines.rst | 2 +- doc/source/conf.py | 3 +- doc/source/dev/development_workflow.rst | 50 +++++++++---------- doc/source/dev/gitwash/development_setup.rst | 20 ++++---- doc/source/dev/gitwash/dot2_dot3.rst | 10 ++-- doc/source/dev/gitwash/git_links.inc | 2 +- doc/source/dev/governance/governance.rst | 2 +- doc/source/dev/index.rst | 10 ++-- doc/source/dev/reviewer_guidelines.rst | 4 +- doc/source/docs/howto_build_docs.rst | 2 +- doc/source/user/how-to-how-to.rst | 2 +- numpy/_version.py | 3 +- numpy/core/src/multiarray/ctors.c | 2 +- shippable.yml | 2 +- tools/ci/push_docs_to_repo.py | 2 +- tools/travis-test.sh | 2 +- 36 files changed, 97 insertions(+), 104 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ea50ca84fa52..6f4a32513e4a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -91,7 +91,7 @@ jobs: - run: name: deploy devdocs command: | - if [ "${CIRCLE_BRANCH}" == "master" ]; then + if [ "${CIRCLE_BRANCH}" == "main" ]; then touch doc/build/html/.nojekyll ./tools/ci/push_docs_to_repo.py doc/build/html \ @@ -101,7 +101,7 @@ jobs: --message "Docs build of $CIRCLE_SHA1" \ --force else - echo "Not on the master branch; skipping deployment" + echo "Not on the main branch; skipping deployment" fi - add_ssh_keys: @@ -120,7 +120,7 @@ jobs: - run: name: deploy neps command: | - if [ "${CIRCLE_BRANCH}" == "master" ]; then + if [ "${CIRCLE_BRANCH}" == "main" ]; then touch doc/neps/_build/html/.nojekyll ./tools/ci/push_docs_to_repo.py doc/neps/_build/html \ @@ -130,5 +130,5 @@ jobs: --message "Docs build of $CIRCLE_SHA1" \ --force else - echo "Not on the master branch; skipping deployment" + echo "Not on the main branch; skipping deployment" fi diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index da599095647a..8f16950f765e 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -8,7 +8,7 @@ also include a brief, self-contained code example that demonstrates the problem. If you are reporting a segfault please include a GDB traceback, which you can generate by following -[these instructions.](https://github.com/numpy/numpy/blob/master/doc/source/dev/development_environment.rst#debugging) +[these instructions.](https://github.com/numpy/numpy/blob/main/doc/source/dev/development_environment.rst#debugging) ## Contributing code diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 78ffc1063eaf..6da1f7370d00 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -20,7 +20,7 @@ import numpy as np +https://github.com/numpy/numpy/blob/main/doc/source/dev/development_environment.rst#debugging --> diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 00c6f59c5faf..68872ec06caf 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -12,5 +12,5 @@ post your idea on the [numpy-discussion mailing list] (https://mail.python.org/mailman/listinfo/numpy-discussion) to explain your reasoning in addition to opening an issue or pull request. You can also check out our [Contributor Guide] -(https://github.com/numpy/numpy/blob/master/doc/source/dev/index.rst) if you +(https://github.com/numpy/numpy/blob/main/doc/source/dev/index.rst) if you need more information. --> diff --git a/.github/ISSUE_TEMPLATE/post-install.md b/.github/ISSUE_TEMPLATE/post-install.md index c0ec7896a40d..11b91384c1fd 100644 --- a/.github/ISSUE_TEMPLATE/post-install.md +++ b/.github/ISSUE_TEMPLATE/post-install.md @@ -15,7 +15,7 @@ labels: 32 - Installation +https://github.com/numpy/numpy/blob/main/doc/source/dev/development_environment.rst#debugging --> diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 5366fe75e76f..2f792339b1b6 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -3,11 +3,11 @@ name: Build_Test on: push: branches: - - master + - main - maintenance/** pull_request: branches: - - master + - main - maintenance/** defaults: diff --git a/README.md b/README.md index 9207d00c5e97..a87563aec22f 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,5 @@ # NumPy -[![Travis CI](https://img.shields.io/travis/com/numpy/numpy/master?label=Travis%20CI)]( - https://travis-ci.com/github/numpy/numpy) -[![Azure](https://dev.azure.com/numpy/numpy/_apis/build/status/azure-pipeline%20numpy.numpy)]( - https://dev.azure.com/numpy/numpy/_build/latest?definitionId=5) -[![codecov](https://codecov.io/gh/numpy/numpy/branch/master/graph/badge.svg)]( - https://codecov.io/gh/numpy/numpy) - NumPy is the fundamental package needed for scientific computing with Python. - **Website:** https://www.numpy.org diff --git a/benchmarks/README.rst b/benchmarks/README.rst index f56c253399ad..2700e95e7ab2 100644 --- a/benchmarks/README.rst +++ b/benchmarks/README.rst @@ -50,7 +50,7 @@ Compare change in benchmark results to another version/commit/branch:: python runtests.py --bench-compare v1.6.2 bench_core python runtests.py --bench-compare 8bf4e9b bench_core - python runtests.py --bench-compare master bench_core + python runtests.py --bench-compare main bench_core All of the commands above display the results in plain text in the console, and the results are not saved for comparison with diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt index c58a423c0bfb..539a3b9c121a 100644 --- a/doc/DISTUTILS.rst.txt +++ b/doc/DISTUTILS.rst.txt @@ -472,7 +472,7 @@ by these modules during the build process are ready to be compiled. This form of generic typing is also supported for C header files (preprocessed to produce ``.h`` files). -.. _conv_template.py: https://github.com/numpy/numpy/blob/master/numpy/distutils/conv_template.py +.. _conv_template.py: https://github.com/numpy/numpy/blob/main/numpy/distutils/conv_template.py Useful functions in ``numpy.distutils.misc_util`` ------------------------------------------------- diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt index 9dbee320c97a..c64a36691705 100644 --- a/doc/HOWTO_RELEASE.rst.txt +++ b/doc/HOWTO_RELEASE.rst.txt @@ -18,7 +18,7 @@ Source tree NumPy Docs ---------- -- https://github.com/numpy/numpy/blob/master/doc/HOWTO_RELEASE.rst.txt +- https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst.txt SciPy.org wiki @@ -35,7 +35,7 @@ Supported platforms and versions ================================ :ref:`NEP 29 ` outlines which Python versions are supported; For the first half of 2020, this will be Python >= 3.6. We test -NumPy against all these versions every time we merge code to master. Binary +NumPy against all these versions every time we merge code to main. Binary installers may be available for a subset of these versions (see below). OS X @@ -185,7 +185,7 @@ A typical release schedule is one beta, two release candidates and a final release. It's best to discuss the timing on the mailing list first, in order for people to get their commits in on time, get doc wiki edits merged, etc. After a date is set, create a new maintenance/x.y.z branch, add new empty -release notes for the next version in the master branch and update the Trac +release notes for the next version in the main branch and update the Trac Milestones. @@ -336,8 +336,8 @@ to public keyservers, with a command such as:: gpg --send-keys -Update the version of the master branch ---------------------------------------- +Update the version of the main branch +------------------------------------- Increment the release number in setup.py. Release candidates should have "rc1" (or "rc2", "rcN") appended to the X.Y.Z format. @@ -492,5 +492,5 @@ After the final release is announced, a few administrative tasks are left to be done: - Forward port changes in the release branch to release notes and release - scripts, if any, to master branch. + scripts, if any, to main branch. - Update the Milestones in Trac. diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt index 1d3f54edf16b..4fbc7af1c6f4 100644 --- a/doc/RELEASE_WALKTHROUGH.rst.txt +++ b/doc/RELEASE_WALKTHROUGH.rst.txt @@ -29,7 +29,7 @@ appended to ``doc/release/1.19.0-notes.rst`` for patch release, though not for new releases like ``1.19.0``, as the changelogs for ``*.0`` releases tend to be excessively long. The ``doc/source/release.rst`` file should also be updated with a link to the new release notes. These changes should be committed to the -maintenance branch, and later will be forward ported to master. The changelog +maintenance branch, and later will be forward ported to main. The changelog should be reviewed for name duplicates or short names and the ``.mailmap`` file updated if needed. @@ -292,7 +292,7 @@ python-announce-list is BCC so that replies will not be sent to that list. Post-Release Tasks ------------------ -Checkout master and forward port the documentation changes:: +Checkout main and forward port the documentation changes:: $ git checkout -b post-1.19.0-release-update $ git checkout maintenance/1.19.x doc/source/release/1.19.0-notes.rst @@ -301,7 +301,7 @@ Checkout master and forward port the documentation changes:: $ gvim doc/source/release.rst # Add link to new notes $ git add doc/changelog/1.19.0-changelog.rst doc/source/release/1.19.0-notes.rst $ git status # check status before commit - $ git commit -a -m"REL: Update master after 1.19.0 release." + $ git commit -a -m"REL: Update main after 1.19.0 release." $ git push origin HEAD Go to github and make a PR. diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index 0cb24684fc7f..21cc0867333f 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -278,7 +278,7 @@ minor variations, it can be helpful to create a base class containing all the common tests, and then create a subclass for each variation. Several examples of this technique exist in NumPy; below are excerpts from one in `numpy/linalg/tests/test_linalg.py -`__:: +`__:: class LinalgTestCase: def test_single(self): diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst index 590976081723..7f841b7e28fb 100644 --- a/doc/neps/nep-0000.rst +++ b/doc/neps/nep-0000.rst @@ -278,7 +278,7 @@ References and Footnotes .. [1] This historical record is available by the normal git commands for retrieving older revisions, and can also be browsed on - `GitHub `_. + `GitHub `_. .. [2] The URL for viewing NEPs on the web is https://www.numpy.org/neps/. diff --git a/doc/neps/nep-0001-npy-format.rst b/doc/neps/nep-0001-npy-format.rst index 3a28247ab9ed..fdf4ae47ae26 100644 --- a/doc/neps/nep-0001-npy-format.rst +++ b/doc/neps/nep-0001-npy-format.rst @@ -292,7 +292,7 @@ included in the 1.9.0 release of numpy. Specifically, the file format.py in this directory implements the format as described here. - https://github.com/numpy/numpy/blob/master/numpy/lib/format.py + https://github.com/numpy/numpy/blob/main/numpy/lib/format.py References diff --git a/doc/neps/nep-0016-abstract-array.rst b/doc/neps/nep-0016-abstract-array.rst index 63ad600e9b3e..9d21abe6f4bb 100644 --- a/doc/neps/nep-0016-abstract-array.rst +++ b/doc/neps/nep-0016-abstract-array.rst @@ -141,7 +141,7 @@ Notes: In practice, either way we'd only do the full test after first checking for well-known types like ``ndarray``, ``list``, etc. `This is how NumPy currently checks for other double-underscore attributes -`__ +`__ and the same idea applies here to either approach. So these numbers won't affect the common case, just the case where we actually have an ``AbstractArray``, or else another third-party object that will end up diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index bdf78b05ae62..8b6f4cd1186a 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -95,10 +95,10 @@ that can be used to assess such impact include: analysis tools to determine where and how the functionality is used. - Test prominent downstream libraries against a development build of NumPy containing the proposed change to get real-world data on its impact. -- Make a change in master and revert it before release if it causes problems. - We encourage other packages to test against NumPy's master branch and if - that's too burdensome, then at least to test pre-releases. This often - turns up issues quickly. +- Make a change on the main branch and revert it before release if it + causes problems. We encourage other packages to test against + NumPy's main branch and if that's too burdensome, then at least to + test pre-releases. This often turns up issues quickly. Alternatives to deprecations ```````````````````````````` @@ -279,8 +279,8 @@ introduced for v1.11.0 and turned into a hard error for v1.12.0. This change was disruptive, however it did catch real bugs in, e.g., SciPy and scikit-learn. Overall the change was worth the cost, and introducing it in -master first to allow testing, then removing it again before a release, is a -useful strategy. +the main branch first to allow testing, then removing it again before +a release, is a useful strategy. Similar deprecations that also look like good examples of cleanups/improvements: diff --git a/doc/neps/nep-0024-missing-data-2.rst b/doc/neps/nep-0024-missing-data-2.rst index 8e63629f32be..903ece1ba8e0 100644 --- a/doc/neps/nep-0024-missing-data-2.rst +++ b/doc/neps/nep-0024-missing-data-2.rst @@ -14,7 +14,7 @@ Abstract -------- *Context: this NEP was written as an alternative to NEP 12, which at the time of writing -had an implementation that was merged into the NumPy master branch.* +had an implementation that was merged into the NumPy main branch.* The principle of this NEP is to separate the APIs for masking and for missing values, according to diff --git a/doc/neps/nep-0025-missing-data-3.rst b/doc/neps/nep-0025-missing-data-3.rst index 81045652e17d..1756ce491188 100644 --- a/doc/neps/nep-0025-missing-data-3.rst +++ b/doc/neps/nep-0025-missing-data-3.rst @@ -14,7 +14,7 @@ Abstract *Context: this NEP was written as an additional alternative to NEP 12 (NEP 24 is another alternative), which at the time of writing had an implementation -that was merged into the NumPy master branch.* +that was merged into the NumPy main branch.* To try and make more progress on the whole missing values/masked arrays/... debate, it seems useful to have a more technical discussion of the pieces diff --git a/doc/neps/nep-0026-missing-data-summary.rst b/doc/neps/nep-0026-missing-data-summary.rst index f25ce9b91274..49d89d828449 100644 --- a/doc/neps/nep-0026-missing-data-summary.rst +++ b/doc/neps/nep-0026-missing-data-summary.rst @@ -17,7 +17,7 @@ The debate about how NumPy should handle missing data, a subject with many preexisting approaches, requirements, and conventions, has been long and contentious. There has been more than one proposal for how to implement support into NumPy, and there is a testable implementation which is -merged into NumPy's current master. The vast number of emails and differing +merged into NumPy's current main. The vast number of emails and differing points of view has made it difficult for interested parties to understand the issues and be comfortable with the direction NumPy is going. @@ -565,7 +565,7 @@ Recommendations for Moving Forward we're going to have to figure out how to experiment with such changes out-of-core if NumPy is to continue to evolve without forking -- might as well do it now. The existing code can live in - master, disabled, or it can live in a branch -- it'll still be there + the main branch, be disabled, or live its own branch -- it'll still be there once we know what we're doing. **Mark** thinks we should: @@ -576,8 +576,8 @@ Recommendations for Moving Forward A more detailed rationale for this recommendation is: * A solid preliminary NA-mask implementation is currently in NumPy - master. This implementation has been extensively tested - against scipy and other third-party packages, and has been in master + main. This implementation has been extensively tested + against scipy and other third-party packages, and has been in main in a stable state for a significant amount of time. * This implementation integrates deeply with the core, providing an interface which is usable in the same way R's NA support is. It diff --git a/doc/neps/nep-0036-fair-play.rst b/doc/neps/nep-0036-fair-play.rst index 4c8195b71607..34c8f6eb8286 100644 --- a/doc/neps/nep-0036-fair-play.rst +++ b/doc/neps/nep-0036-fair-play.rst @@ -48,7 +48,7 @@ identifying the problematic release, locating alterations, and determining an appropriate course of action. During a community call on `October 16th, 2019 -`__ +`__ the community resolved to draft guidelines on the distribution of modified NumPy versions. diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst index bce125212fa3..b8b312aa52d9 100644 --- a/doc/neps/nep-0046-sponsorship-guidelines.rst +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -240,7 +240,7 @@ Discussion References and Footnotes ------------------------ -- `Inside NumPy: preparing for the next decade `__ presentation at SciPy'19 discussing the impact of the first NumPy grant. +- `Inside NumPy: preparing for the next decade `__ presentation at SciPy'19 discussing the impact of the first NumPy grant. - `Issue `__ and `email `__ where IBM offered a $5,000 bounty for VSX SIMD support diff --git a/doc/source/conf.py b/doc/source/conf.py index 7eb96eb22a28..dcf60ada9297 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -94,6 +94,7 @@ class PyTypeObject(ctypes.Structure): # The suffix of source filenames. source_suffix = '.rst' +# Will change to `root_doc` in Sphinx 4 master_doc = 'index' # General substitutions. @@ -442,7 +443,7 @@ def linkcode_resolve(domain, info): linespec = "" if 'dev' in numpy.__version__: - return "https://github.com/numpy/numpy/blob/master/numpy/%s%s" % ( + return "https://github.com/numpy/numpy/blob/main/numpy/%s%s" % ( fn, linespec) else: return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % ( diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index 34535b2f54b6..8c56f6fb2cbc 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -49,10 +49,10 @@ First, fetch new commits from the ``upstream`` repository: git fetch upstream -Then, create a new branch based on the master branch of the upstream +Then, create a new branch based on the main branch of the upstream repository:: - git checkout -b my-new-feature upstream/master + git checkout -b my-new-feature upstream/main .. _editing-workflow: @@ -147,7 +147,7 @@ In more detail It may be the case that while you were working on your edits, new commits have been added to ``upstream`` that affect your work. In this case, follow the -:ref:`rebasing-on-master` section of this document to apply those changes to +:ref:`rebasing-on-main` section of this document to apply those changes to your branch. .. _writing-the-commit-message: @@ -226,10 +226,10 @@ mailing list may also be useful. -.. _rebasing-on-master: +.. _rebasing-on-main: -Rebasing on master -================== +Rebasing on main +================ This updates your feature branch with changes from the upstream `NumPy github`_ repo. If you do not absolutely need to do this, try to avoid doing @@ -244,8 +244,8 @@ Next, you need to update the feature branch:: git checkout my-new-feature # make a backup in case you mess up git branch tmp my-new-feature - # rebase on upstream master branch - git rebase upstream/master + # rebase on upstream main branch + git rebase upstream/main If you have made changes to files that have changed also upstream, this may generate merge conflicts that you need to resolve. See @@ -258,7 +258,7 @@ Finally, remove the backup branch upon a successful rebase:: .. note:: - Rebasing on master is preferred over merging upstream back to your + Rebasing on main is preferred over merging upstream back to your branch. Using ``git merge`` and ``git pull`` is discouraged when working on feature branches. @@ -325,7 +325,7 @@ Suppose that the commit history looks like this:: 29001ed Add pre-nep for a couple of structured_array_extensions. ... -and ``6ad92e5`` is the last commit in the ``master`` branch. Suppose we +and ``6ad92e5`` is the last commit in the ``main`` branch. Suppose we want to make the following changes: * Rewrite the commit message for ``13d7934`` to something more sensible. @@ -392,7 +392,7 @@ Deleting a branch on github_ :: - git checkout master + git checkout main # delete branch locally git branch -D my-unwanted-branch # delete branch on github @@ -451,25 +451,25 @@ Backporting =========== Backporting is the process of copying new feature/fixes committed in -`numpy/master`_ back to stable release branches. To do this you make a branch +`numpy/main`_ back to stable release branches. To do this you make a branch off the branch you are backporting to, cherry pick the commits you want from -``numpy/master``, and then submit a pull request for the branch containing the +``numpy/main``, and then submit a pull request for the branch containing the backport. 1. First, you need to make the branch you will work on. This needs to be - based on the older version of NumPy (not master):: + based on the older version of NumPy (not main):: # Make a new branch based on numpy/maintenance/1.8.x, # backport-3324 is our new name for the branch. git checkout -b backport-3324 upstream/maintenance/1.8.x -2. Now you need to apply the changes from master to this branch using +2. Now you need to apply the changes from main to this branch using `git cherry-pick`_:: # Update remote git fetch upstream # Check the commit log for commits to cherry pick - git log upstream/master + git log upstream/main # This pull request included commits aa7a047 to c098283 (inclusive) # so you use the .. syntax (for a range of commits), the ^ makes the # range inclusive. @@ -480,7 +480,7 @@ backport. 3. You might run into some conflicts cherry picking here. These are resolved the same way as merge/rebase conflicts. Except here you can - use `git blame`_ to see the difference between master and the + use `git blame`_ to see the difference between main and the backported branch to make sure nothing gets screwed up. 4. Push the new branch to your Github repository:: @@ -488,8 +488,8 @@ backport. git push -u origin backport-3324 5. Finally make a pull request using Github. Make sure it is against the - maintenance branch and not master, Github will usually suggest you - make the pull request against master. + maintenance branch and not main, Github will usually suggest you + make the pull request against main. .. _pushing-to-main: @@ -499,7 +499,7 @@ Pushing changes to the main repo *Requires commit rights to the main NumPy repo.* When you have a set of "ready" changes in a feature branch ready for -NumPy's ``master`` or ``maintenance`` branches, you can push +NumPy's ``main`` or ``maintenance`` branches, you can push them to ``upstream`` as follows: 1. First, merge or rebase on the target branch. @@ -507,23 +507,23 @@ them to ``upstream`` as follows: a) Only a few, unrelated commits then prefer rebasing:: git fetch upstream - git rebase upstream/master + git rebase upstream/main - See :ref:`rebasing-on-master`. + See :ref:`rebasing-on-main`. b) If all of the commits are related, create a merge commit:: git fetch upstream - git merge --no-ff upstream/master + git merge --no-ff upstream/main 2. Check that what you are going to push looks sensible:: - git log -p upstream/master.. + git log -p upstream/main.. git log --oneline --graph 3. Push to upstream:: - git push upstream my-feature-branch:master + git push upstream my-feature-branch:main .. note:: diff --git a/doc/source/dev/gitwash/development_setup.rst b/doc/source/dev/gitwash/development_setup.rst index a7e9c28b9611..badb73ca0c70 100644 --- a/doc/source/dev/gitwash/development_setup.rst +++ b/doc/source/dev/gitwash/development_setup.rst @@ -100,8 +100,8 @@ Make the local copy #. Set up your repository so ``git pull`` pulls from ``upstream`` by default: :: - git config branch.master.remote upstream - git config branch.master.merge refs/heads/master + git config branch.main.remote upstream + git config branch.main.merge refs/heads/main ****************************************************************************** Look it over @@ -109,17 +109,17 @@ Look it over #. The branches shown by ``git branch -a`` will include - - the ``master`` branch you just cloned on your own machine - - the ``master`` branch from your fork on GitHub, which git named + - the ``main`` branch you just cloned on your own machine + - the ``main`` branch from your fork on GitHub, which git named ``origin`` by default - - the ``master`` branch on the the main NumPy repo, which you named + - the ``main`` branch on the the main NumPy repo, which you named ``upstream``. :: - master - remotes/origin/master - remotes/upstream/master + main + remotes/origin/main + remotes/upstream/main If ``upstream`` isn't there, it will be added after you access the NumPy repo with a command like ``git fetch`` or ``git pull``. @@ -139,8 +139,8 @@ Look it over user.name=Your Name remote.origin.url=git@github.com:your-github-id/numpy.git remote.origin.fetch=+refs/heads/*:refs/remotes/origin/* - branch.master.remote=upstream - branch.master.merge=refs/heads/master + branch.main.remote=upstream + branch.main.merge=refs/heads/main remote.upstream.url=https://github.com/numpy/numpy.git remote.upstream.fetch=+refs/heads/*:refs/remotes/upstream/* diff --git a/doc/source/dev/gitwash/dot2_dot3.rst b/doc/source/dev/gitwash/dot2_dot3.rst index 7759e2e60d68..30852b5ad387 100644 --- a/doc/source/dev/gitwash/dot2_dot3.rst +++ b/doc/source/dev/gitwash/dot2_dot3.rst @@ -7,22 +7,22 @@ Thanks to Yarik Halchenko for this explanation. Imagine a series of commits A, B, C, D... Imagine that there are two -branches, *topic* and *master*. You branched *topic* off *master* when -*master* was at commit 'E'. The graph of the commits looks like this:: +branches, *topic* and *main*. You branched *topic* off *main* when +*main* was at commit 'E'. The graph of the commits looks like this:: A---B---C topic / - D---E---F---G master + D---E---F---G main Then:: - git diff master..topic + git diff main..topic will output the difference from G to C (i.e. with effects of F and G), while:: - git diff master...topic + git diff main...topic would output just differences in the topic branch (i.e. only A, B, and C). diff --git a/doc/source/dev/gitwash/git_links.inc b/doc/source/dev/gitwash/git_links.inc index 8032dca41e97..8126cf9ac6cf 100644 --- a/doc/source/dev/gitwash/git_links.inc +++ b/doc/source/dev/gitwash/git_links.inc @@ -47,7 +47,7 @@ .. _ipython git workflow: https://mail.python.org/pipermail/ipython-dev/2010-October/005632.html .. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html .. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html -.. _numpy/master: https://github.com/numpy/numpy +.. _numpy/main: https://github.com/numpy/numpy .. _git cherry-pick: https://www.kernel.org/pub/software/scm/git/docs/git-cherry-pick.html .. _git blame: https://www.kernel.org/pub/software/scm/git/docs/git-blame.html .. _this blog post: https://github.com/blog/612-introducing-github-compare-view diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 9209f26b7bd5..8c9cc0825eb0 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -381,7 +381,7 @@ A list of current Institutional Partners is maintained at the page Document history ================ -https://github.com/numpy/numpy/commits/master/doc/source/dev/governance/governance.rst +https://github.com/numpy/numpy/commits/main/doc/source/dev/governance/governance.rst Acknowledgements ================ diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index bcd144d71cc7..30d9742a0816 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -78,8 +78,8 @@ Here's the short summary, complete TOC links are below: * Pull the latest changes from upstream:: - git checkout master - git pull upstream master + git checkout main + git pull upstream main * Create a branch for the feature you want to work on. Since the branch name will appear in the merge message, use a sensible name @@ -173,13 +173,13 @@ Here's the short summary, complete TOC links are below: For a more detailed discussion, read on and follow the links at the bottom of this page. -Divergence between ``upstream/master`` and your feature branch --------------------------------------------------------------- +Divergence between ``upstream/main`` and your feature branch +------------------------------------------------------------ If GitHub indicates that the branch of your Pull Request can no longer be merged automatically, you have to incorporate changes that have been made since you started into your branch. Our recommended way to do this is to -:ref:`rebase on master`. +:ref:`rebase on main `. .. _guidelines: diff --git a/doc/source/dev/reviewer_guidelines.rst b/doc/source/dev/reviewer_guidelines.rst index 0b225b9b6b14..1d93bc1361f5 100644 --- a/doc/source/dev/reviewer_guidelines.rst +++ b/doc/source/dev/reviewer_guidelines.rst @@ -74,8 +74,8 @@ For maintainers - Make sure all automated CI tests pass before merging a PR, and that the :ref:`documentation builds ` without any errors. -- In case of merge conflicts, ask the PR submitter to :ref:`rebase on master - `. +- In case of merge conflicts, ask the PR submitter to :ref:`rebase on main + `. - For PRs that add new features or are in some way complex, wait at least a day or two before merging it. That way, others get a chance to comment before the code goes in. Consider adding it to the release notes. diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index 29912a5af8f1..5db67b9b8231 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -81,7 +81,7 @@ pdf format is also built with ``make dist``. See `HOWTO RELEASE`_ for details on how to update https://numpy.org/doc. .. _Matplotlib: https://matplotlib.org/ -.. _HOWTO RELEASE: https://github.com/numpy/numpy/blob/master/doc/HOWTO_RELEASE.rst.txt +.. _HOWTO RELEASE: https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst.txt Sphinx extensions ----------------- diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst index de8afc28ae9e..a3be0729be58 100644 --- a/doc/source/user/how-to-how-to.rst +++ b/doc/source/user/how-to-how-to.rst @@ -115,4 +115,4 @@ Is this page an example of a how-to? ****************************************************************************** Yes -- until the sections with question-mark headings; they explain rather -than giving directions. In a how-to, those would be links. \ No newline at end of file +than giving directions. In a how-to, those would be links. diff --git a/numpy/_version.py b/numpy/_version.py index 6605bf4afa36..dcc0c5573521 100644 --- a/numpy/_version.py +++ b/numpy/_version.py @@ -1,4 +1,3 @@ - # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -191,7 +190,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". + # "stabilization", as well as "HEAD" and "main". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 52f21b61f8b7..70ca96a2d5ef 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1150,7 +1150,7 @@ _array_from_buffer_3118(PyObject *memoryview) * bpo-32780 * bpo-32782 * - * Note that even if the above are fixed in master, we have to drop the + * Note that even if the above are fixed in main, we have to drop the * early patch versions of python to actually make use of the fixes. */ if (!npy_ctypes_check(Py_TYPE(view->obj))) { diff --git a/shippable.yml b/shippable.yml index 2843377e26e2..f26055232014 100644 --- a/shippable.yml +++ b/shippable.yml @@ -1,6 +1,6 @@ branches: only: - - master + - main - maintenance/* language: python diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py index ae530548459b..939a09c583b5 100755 --- a/tools/ci/push_docs_to_repo.py +++ b/tools/ci/push_docs_to_repo.py @@ -56,7 +56,7 @@ def run(cmd, stdout=True): print('- uploading as %s <%s>' % (args.committer, args.email)) if args.force: - run(['git', 'push', 'origin', 'master', '--force']) + run(['git', 'push', 'origin', 'main', '--force']) else: print('\n!! No `--force` argument specified; aborting') print('!! Before enabling that flag, make sure you know what it does\n') diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 6d8d147572f0..ac514541111a 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -42,7 +42,7 @@ setup_base() # Travis typically has a stable numpy release pre-installed, and if we # don't remove it, then we can accidentally end up e.g. running old # test modules that were in the stable release but have been removed - # from master. (See gh-2765, gh-2768.) Using 'pip install' also has + # from main. (See gh-2765, gh-2768.) Using 'pip install' also has # the advantage that it tests that numpy is 'pip install' compatible, # see e.g. gh-2766... if [ -z "$USE_DEBUG" ]; then From 267a435d9f0875f96508e055acb71145959c18bb Mon Sep 17 00:00:00 2001 From: Kulin Seth Date: Thu, 4 Mar 2021 13:36:09 -0800 Subject: [PATCH 0689/1270] Update pyproject.toml Co-authored-by: Matti Picus --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index cd46aaf521df..14f275e97bbb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [build-system] # Minimum requirements for the build system to execute. requires = [ - "packaging==20.5", + "packaging==20.5; platform_machine=='arm64'", # macos M1 "setuptools<49.2.0", "wheel==0.36.2", "Cython>=0.29.21,<3.0", # Note: keep in sync with tools/cythonize.py @@ -74,4 +74,3 @@ requires = [ directory = "change" name = "Changes" showcontent = true - From e78ec032a0cc5497a5d4683b0fbb635b9a20ab67 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Thu, 4 Feb 2021 08:45:48 -0600 Subject: [PATCH 0690/1270] DOC: Added links to text and table I found these convenient to have on hand while reading --- numpy/polynomial/__init__.py | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index c832094e277d..d629df29f974 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -15,25 +15,24 @@ This package provides *convenience classes* for each of six different kinds of polynomials: - ============ ================ - **Name** **Provides** - ============ ================ - Polynomial Power series - Chebyshev Chebyshev series - Legendre Legendre series - Laguerre Laguerre series - Hermite Hermite series - HermiteE HermiteE series - ============ ================ + ======================== ================ + **Name** **Provides** + ======================== ================ + `~polynomial.Polynomial` Power series + `~chebyshev.Chebyshev` Chebyshev series + `~legendre.Legendre` Legendre series + `~laguerre.Laguerre` Laguerre series + `~hermite.Hermite` Hermite series + `~hermite_e.HermiteE` HermiteE series + ======================== ================ These *convenience classes* provide a consistent interface for creating, manipulating, and fitting data with polynomials of different bases. The convenience classes are the preferred interface for the `~numpy.polynomial` -package, and are available from the `numpy.polynomial` namespace. -This eliminates the need to -navigate to the corresponding submodules, e.g. ``np.polynomial.Polynomial`` -or ``np.polynomial.Chebyshev`` instead of -``np.polynomial.polynomial.Polynomial`` or +package, and are available from the ``numpy.polynomial`` namespace. +This eliminates the need to navigate to the corresponding submodules, e.g. +``np.polynomial.Polynomial`` or ``np.polynomial.Chebyshev`` instead of +``np.polynomial.polynomial.Polynomial`` or ``np.polynomial.chebyshev.Chebyshev``, respectively. The classes provide a more consistent and concise interface than the type-specific functions defined in the submodules for each type of polynomial. @@ -45,7 +44,7 @@ >>> c = Chebyshev.fit(xdata, ydata, deg=1) is preferred over the `chebyshev.chebfit` function from the -`numpy.polynomial.chebyshev` module:: +``np.polynomial.chebyshev`` module:: >>> from numpy.polynomial.chebyshev import chebfit >>> c = chebfit(xdata, ydata, deg=1) @@ -58,8 +57,8 @@ The following lists the various constants and methods common to all of the classes representing the various kinds of polynomials. In the following, the term ``Poly`` represents any one of the convenience classes (e.g. -``Polynomial``, ``Chebyshev``, ``Hermite``, etc.) while the lowercase ``p`` -represents an **instance** of a polynomial class. +`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.) +while the lowercase ``p`` represents an **instance** of a polynomial class. Constants --------- From 7d33c0a910501ad4f609ce7f0b4e7d083b3bd2b6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 5 Mar 2021 08:26:30 -0700 Subject: [PATCH 0691/1270] CI: Pin docker image. The azure-pipeline test Linux_Python_38_32bit_full_with_asserts has been failing after the release of the latest manylinux2010. Pin the docker image to an earlier version to fix this. Closes #18553. --- azure-pipelines.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d55bb6cef481..f1cf2a9b36ad 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -63,12 +63,11 @@ stages: - job: Linux_Python_38_32bit_full_with_asserts pool: - vmImage: 'ubuntu-18.04' + vmImage: 'ubuntu-20.04' steps: - script: | - docker pull quay.io/pypa/manylinux2010_i686 docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ - -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2010_i686 \ + -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2010_i686:2021-02-28-1f32361 \ /bin/bash -xc "cd numpy && \ /opt/python/cp38-cp38/bin/python -mvenv venv &&\ source venv/bin/activate && \ From 1879a2ebe69c736ffe3abbc41acf341556de8a7f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 6 Mar 2021 17:58:03 +0100 Subject: [PATCH 0692/1270] DOC: address remaining review comments [ci skip] --- doc/neps/nep-0048-spending-project-funds.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/neps/nep-0048-spending-project-funds.rst b/doc/neps/nep-0048-spending-project-funds.rst index b97d456b5f7b..3571eef2d8e6 100644 --- a/doc/neps/nep-0048-spending-project-funds.rst +++ b/doc/neps/nep-0048-spending-project-funds.rst @@ -95,6 +95,8 @@ The approach for spending funds will be: The next sections go into detail on each of these three points. +.. _section-what-to-pay-for: + What to pay for ``````````````` @@ -123,7 +125,7 @@ a volunteer in a reasonable amount of time. There are also many tasks, activities, and projects outside of development work that are important and could enhance the project or community - think of, for example, user surveys, translations, outreach, dedicated -mentoring of newcomers, community organization, website improvements, and +mentoring of newcomers, community organizating, website improvements, and administrative tasks. Time of people to perform tasks is also not the only thing that funds can be @@ -145,9 +147,10 @@ Whom to pay It will likely depend on the project/tasks whether there's already a clear best candidate within the NumPy team, or whether we look for new people to get -involved. Before making any decisions, the decision makers should think about -whether an opportunity should be advertised to give a wider group of people a -chance to apply for it. +involved. Before making any decisions, the decision makers (according to the +NumPy governance document - currently that's the Steering Council) should think +about whether an opportunity should be advertised to give a wider group of +people a chance to apply for it. Compensating fairly ``````````````````` @@ -251,7 +254,8 @@ Each listed idea must meet the following requirements: 1. It must be clearly scoped: its description must explain the importance to the project, referencing the NumPy Roadmap if possible, the items to pay for - or activities and deliverables, and why it should be a funded activity. + or activities and deliverables, and why it should be a funded activity (see + :ref:`section-what-to-pay-for`). 2. It must contain the following metadata: title, cost, time duration or effort estimate, and (if known) names of the team member(s) to execute or coordinate. 3. It must have an assigned priority (low, medium, or high). This discussion From 0e1fea2fb3cf33b25f57e71c4390a01d1c134780 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 16 Feb 2021 13:20:59 +0530 Subject: [PATCH 0693/1270] ENH: Added Linter script --- tools/lint_diff.ini | 5 +++++ tools/linter.py | 52 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 tools/lint_diff.ini create mode 100644 tools/linter.py diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini new file mode 100644 index 000000000000..ba091468eda7 --- /dev/null +++ b/tools/lint_diff.ini @@ -0,0 +1,5 @@ +[pycodestyle] +max_line_length = 79 +statistics = True +ignore = E121,E122,E123,E125,E126,E127,E128,E226,E251,E265,E266,E302,E402,E712,E721,E731,E741,W291,W293,W391,W503,W504 +exclude = numpy/__config__.py diff --git a/tools/linter.py b/tools/linter.py new file mode 100644 index 000000000000..f0c2b7927aab --- /dev/null +++ b/tools/linter.py @@ -0,0 +1,52 @@ +import os +import sys +import subprocess +from argparse import ArgumentParser +from git import Repo + +BASE_BRANCH = 'master' +CONFIG = os.path.join( + os.path.abspath(os.path.dirname(__file__)), + 'lint_diff.ini', +) + + +class DiffLinter: + def __init__(self, branch): + self.branch = branch + self.repo = Repo('.') + + def get_branch_diff(self): + commit = self.repo.merge_base(BASE_BRANCH, self.branch)[0] + diff = self.repo.git.diff(commit, self.branch, '*.py') + return diff + + def run_pycodestyle(self, diff): + """ + Original Author: Josh Wilson (@person142) + Source: https://github.com/scipy/scipy/blob/master/tools/lint_diff.py + Run pycodestyle on the given diff. + """ + res = subprocess.run( + ['pycodestyle', '--diff', '--config', CONFIG], + input=diff, + stdout=subprocess.PIPE, + encoding='utf-8', + ) + return res.returncode, res.stdout + + def run_lint(self): + diff = self.get_branch_diff() + retcode, errors = self.run_pycodestyle(diff) + + errors and print(errors) + + sys.exit(retcode) + +if __name__ == '__main__': + parser = ArgumentParser() + parser.add_argument("--branch", type=str, default='master', + help="The branch to diff against") + args = parser.parse_args() + + DiffLinter(args.branch).run_lint() From 37ce99a4ab6f066f1363c33d1ec6f2b4c6c4a583 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 16 Feb 2021 13:34:59 +0530 Subject: [PATCH 0694/1270] ENH: Added uncommitted changes support --- tools/linter.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/tools/linter.py b/tools/linter.py index f0c2b7927aab..509e199d844b 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -16,15 +16,20 @@ def __init__(self, branch): self.branch = branch self.repo = Repo('.') - def get_branch_diff(self): + def get_branch_diff(self, uncommitted): commit = self.repo.merge_base(BASE_BRANCH, self.branch)[0] - diff = self.repo.git.diff(commit, self.branch, '*.py') + + if uncommitted: + diff = self.repo.git.diff(self.branch, '***.py') + else: + diff = self.repo.git.diff(commit, self.branch, '***.py') return diff def run_pycodestyle(self, diff): """ Original Author: Josh Wilson (@person142) - Source: https://github.com/scipy/scipy/blob/master/tools/lint_diff.py + Source: + https://github.com/scipy/scipy/blob/master/tools/lint_diff.py Run pycodestyle on the given diff. """ res = subprocess.run( @@ -35,18 +40,21 @@ def run_pycodestyle(self, diff): ) return res.returncode, res.stdout - def run_lint(self): - diff = self.get_branch_diff() + def run_lint(self, uncommitted): + diff = self.get_branch_diff(uncommitted) retcode, errors = self.run_pycodestyle(diff) errors and print(errors) sys.exit(retcode) + if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("--branch", type=str, default='master', help="The branch to diff against") + parser.add_argument("--uncommitted", action='store_true', + help="Check only uncommitted changes") args = parser.parse_args() - DiffLinter(args.branch).run_lint() + DiffLinter(args.branch).run_lint(args.uncommitted) From f43ba6df89974d050f944987711366c5f0268f60 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 16 Feb 2021 14:40:38 +0530 Subject: [PATCH 0695/1270] BLD: Added linting check --- azure-pipelines.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d2e251acd97a..5b6ec71ce89c 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -18,6 +18,26 @@ stages: - stage: InitialTests jobs: + - job: Lint + condition: and(succeeded(), ne(variables['Build.SourceBranch'], 'refs/heads/master')) # skip for PR merges + pool: + vmImage: 'ubuntu-18.04' + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.8' + addToPath: true + architecture: 'x64' + - script: >- + python -m pip install + pycodestyle==2.5.0 + GitPython==3.1.13 + displayName: 'Install tools' + failOnStderr: true + - script: | + python tools/linter.py --branch origin/$(System.PullRequest.SourceBranch) + displayName: 'Run Lint Checks' + failOnStderr: true # Native build is based on gcc flag `-march=native` - job: Linux_baseline_native pool: From 8e51b5b512a284cd580fdcdfff3cfbe556f013ac Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 16 Feb 2021 14:40:51 +0530 Subject: [PATCH 0696/1270] MAINT: Added exception handling and docs --- tools/linter.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tools/linter.py b/tools/linter.py index 509e199d844b..bd54097e31a8 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -2,7 +2,7 @@ import sys import subprocess from argparse import ArgumentParser -from git import Repo +from git import Repo, exc BASE_BRANCH = 'master' CONFIG = os.path.join( @@ -16,8 +16,18 @@ def __init__(self, branch): self.branch = branch self.repo = Repo('.') - def get_branch_diff(self, uncommitted): - commit = self.repo.merge_base(BASE_BRANCH, self.branch)[0] + def get_branch_diff(self, uncommitted = False): + """ + Determine the first common ancestor commit. + Find diff between branch and FCA commit. + Note: if `uncommitted` is set, check only + uncommitted changes + """ + try: + commit = self.repo.merge_base(BASE_BRANCH, self.branch)[0] + except exc.GitCommandError: + print(f"Branch with name `{self.branch}` does not exist") + sys.exit(1) if uncommitted: diff = self.repo.git.diff(self.branch, '***.py') From 135963d9dc839456873f87e0a0d94fa37990ca9c Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 16 Feb 2021 20:19:40 +0530 Subject: [PATCH 0697/1270] MAINT: Changed branch argument to take target branch --- azure-pipelines.yml | 2 +- tools/linter.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5b6ec71ce89c..c8aa5b1110c2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -35,7 +35,7 @@ stages: displayName: 'Install tools' failOnStderr: true - script: | - python tools/linter.py --branch origin/$(System.PullRequest.SourceBranch) + python tools/linter.py --branch origin/$(System.PullRequest.TargetBranch) displayName: 'Run Lint Checks' failOnStderr: true # Native build is based on gcc flag `-march=native` diff --git a/tools/linter.py b/tools/linter.py index bd54097e31a8..68d869367253 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -4,7 +4,6 @@ from argparse import ArgumentParser from git import Repo, exc -BASE_BRANCH = 'master' CONFIG = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'lint_diff.ini', @@ -15,6 +14,7 @@ class DiffLinter: def __init__(self, branch): self.branch = branch self.repo = Repo('.') + self.head = self.repo.head.commit def get_branch_diff(self, uncommitted = False): """ @@ -24,15 +24,15 @@ def get_branch_diff(self, uncommitted = False): uncommitted changes """ try: - commit = self.repo.merge_base(BASE_BRANCH, self.branch)[0] + commit = self.repo.merge_base(self.branch, self.head)[0] except exc.GitCommandError: print(f"Branch with name `{self.branch}` does not exist") sys.exit(1) if uncommitted: - diff = self.repo.git.diff(self.branch, '***.py') + diff = self.repo.git.diff(self.head, '***.py') else: - diff = self.repo.git.diff(commit, self.branch, '***.py') + diff = self.repo.git.diff(commit, self.head, '***.py') return diff def run_pycodestyle(self, diff): From 4e11afbd4faad41e0119abd55a5bc8dc4a37de29 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 21 Feb 2021 11:40:18 +0530 Subject: [PATCH 0698/1270] ENH: Added github actions --- .github/workflows/build_test.yml | 40 +++++++++++++++++++++++--------- linter_requirements.txt | 2 ++ 2 files changed, 31 insertions(+), 11 deletions(-) create mode 100644 linter_requirements.txt diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 2f792339b1b6..45a7f9ce03cd 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -19,6 +19,24 @@ env: PYTHON_VERSION: 3.7 jobs: + lint: + if: "github.repository == 'numpy/numpy' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - uses: actions/setup-python@v2 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Install linter requirements + run: + python -m pip install -r linter_requirements.txt + - name: Run linter on PR diff + run: + python tools/linter.py --branch origin/${{ github.base_ref }} + smoke_test: if: "github.repository == 'numpy/numpy' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" runs-on: ubuntu-latest @@ -33,7 +51,7 @@ jobs: - uses: ./.github/actions basic: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest strategy: matrix: @@ -49,7 +67,7 @@ jobs: - uses: ./.github/actions debug: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-20.04 env: USE_DEBUG: 1 @@ -64,7 +82,7 @@ jobs: - uses: ./.github/actions blas64: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest env: NPY_USE_BLAS_ILP64: 1 @@ -79,7 +97,7 @@ jobs: - uses: ./.github/actions full: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-18.04 env: USE_WHEEL: 1 @@ -97,7 +115,7 @@ jobs: - uses: ./.github/actions benchmark: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest env: PYTHONOPTIMIZE: 2 @@ -118,7 +136,7 @@ jobs: - uses: ./.github/actions no_relaxed_strides: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest env: NPY_RELAXED_STRIDES_CHECKING: 0 @@ -135,7 +153,7 @@ jobs: - uses: ./.github/actions use_wheel: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest env: USE_WHEEL: 1 @@ -151,7 +169,7 @@ jobs: - uses: ./.github/actions no_array_func: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest env: NUMPY_EXPERIMENTAL_ARRAY_FUNCTION: 0 @@ -166,7 +184,7 @@ jobs: - uses: ./.github/actions no_openblas: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest env: BLAS: None @@ -184,7 +202,7 @@ jobs: - uses: ./.github/actions pypy37: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -197,7 +215,7 @@ jobs: - uses: ./.github/actions sdist: - needs: smoke_test + needs: [smoke_test, lint] runs-on: ubuntu-latest env: USE_SDIST: 1 diff --git a/linter_requirements.txt b/linter_requirements.txt new file mode 100644 index 000000000000..b5b49bc8cf72 --- /dev/null +++ b/linter_requirements.txt @@ -0,0 +1,2 @@ +pycodestyle==2.5.0 +GitPython==3.1.13 \ No newline at end of file From 6464b4bfd650f6033b2933b6857075dd27e270d5 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 21 Feb 2021 11:40:29 +0530 Subject: [PATCH 0699/1270] MAINT: Use linter_requirements --- azure-pipelines.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c8aa5b1110c2..60c3ebf74fdd 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,9 +29,7 @@ stages: addToPath: true architecture: 'x64' - script: >- - python -m pip install - pycodestyle==2.5.0 - GitPython==3.1.13 + python -m pip install -r linter_requirements.txt displayName: 'Install tools' failOnStderr: true - script: | From 11cfa1c942ce2ed3d0e08ab50eb1612bb51ec60e Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 22 Feb 2021 21:46:24 +0530 Subject: [PATCH 0700/1270] ENH, MAINT: Added runtest options | Added unified diff --- runtests.py | 31 +++++++++++++++++++++++++++++++ tools/linter.py | 5 +++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/runtests.py b/runtests.py index 4c9493a35b32..68a52115b42f 100755 --- a/runtests.py +++ b/runtests.py @@ -28,6 +28,12 @@ $ python runtests.py --gcov [...other args...] $ python runtests.py --lcov-html +Run lint checks. +Provide target branch name or `uncommitted` to check before committing: + + $ python runtests.py --lint master + $ python runtests.py --lint uncommitted + """ # # This is a generic test runner script for projects using NumPy's test @@ -84,6 +90,10 @@ def main(argv): parser.add_argument("--coverage", action="store_true", default=False, help=("report coverage of project code. HTML output goes " "under build/coverage")) + parser.add_argument("--lint", default=None, + help="'' or 'uncommitted', passed to " + "tools/linter.py [--branch BRANCH] " + "[--uncommitted]") parser.add_argument("--durations", action="store", default=-1, type=int, help=("Time N slowest tests, time all if 0, time none if < 0")) parser.add_argument("--gcov", action="store_true", default=False, @@ -162,6 +172,9 @@ def main(argv): print("*** Benchmarks should not be run against debug " "version; remove -g flag ***") + if args.lint: + check_lint(args.lint) + if not args.no_build: # we need the noarch path in case the package is pure python. site_dir, site_dir_noarch = build_project(args) @@ -637,6 +650,24 @@ def lcov_generate(): else: print("HTML output generated under build/lcov/") +def check_lint(lint_args): + """ + Adds ROOT_DIR to path and performs lint checks. + This functions exits the program with status code of lint check. + """ + sys.path.append(ROOT_DIR) + try: + from tools.linter import DiffLinter + except ModuleNotFoundError as e: + print(f"Error: {e.msg}. " + "Install using linter_requirements.txt.") + sys.exit(1) + + uncommitted = lint_args == "uncommitted" + branch = "master" if uncommitted else lint_args + + DiffLinter(branch).run_lint(uncommitted) + if __name__ == "__main__": main(argv=sys.argv[1:]) diff --git a/tools/linter.py b/tools/linter.py index 68d869367253..f51a8848b291 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -30,9 +30,10 @@ def get_branch_diff(self, uncommitted = False): sys.exit(1) if uncommitted: - diff = self.repo.git.diff(self.head, '***.py') + diff = self.repo.git.diff(self.head, '--unified=0', '***.py') else: - diff = self.repo.git.diff(commit, self.head, '***.py') + diff = self.repo.git.diff(commit, self.head, + '--unified=0', '***.py') return diff def run_pycodestyle(self, diff): From eb9bc0e8e564f39c7f9167b731ed12b21f5205a7 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sun, 7 Mar 2021 11:02:02 +0530 Subject: [PATCH 0701/1270] MAINT: Replaced master with main --- runtests.py | 4 ++-- tools/linter.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/runtests.py b/runtests.py index 68a52115b42f..5885d2df647c 100755 --- a/runtests.py +++ b/runtests.py @@ -31,7 +31,7 @@ Run lint checks. Provide target branch name or `uncommitted` to check before committing: - $ python runtests.py --lint master + $ python runtests.py --lint main $ python runtests.py --lint uncommitted """ @@ -664,7 +664,7 @@ def check_lint(lint_args): sys.exit(1) uncommitted = lint_args == "uncommitted" - branch = "master" if uncommitted else lint_args + branch = "main" if uncommitted else lint_args DiffLinter(branch).run_lint(uncommitted) diff --git a/tools/linter.py b/tools/linter.py index f51a8848b291..2952e91edb8b 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -62,7 +62,7 @@ def run_lint(self, uncommitted): if __name__ == '__main__': parser = ArgumentParser() - parser.add_argument("--branch", type=str, default='master', + parser.add_argument("--branch", type=str, default='main', help="The branch to diff against") parser.add_argument("--uncommitted", action='store_true', help="Check only uncommitted changes") From 3412ef6643a946a04f5e522e5a412ff24b0ea4b9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 7 Mar 2021 17:36:13 +0100 Subject: [PATCH 0702/1270] CI: another master -> main fix (#18566) --- azure-pipelines.yml | 2 +- tools/linter.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 60c3ebf74fdd..dd27354fe792 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,7 +19,7 @@ stages: jobs: - job: Lint - condition: and(succeeded(), ne(variables['Build.SourceBranch'], 'refs/heads/master')) # skip for PR merges + condition: and(succeeded(), ne(variables['Build.SourceBranch'], 'refs/heads/main')) # skip for PR merges pool: vmImage: 'ubuntu-18.04' steps: diff --git a/tools/linter.py b/tools/linter.py index 2952e91edb8b..b1bab2b70fce 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -40,7 +40,7 @@ def run_pycodestyle(self, diff): """ Original Author: Josh Wilson (@person142) Source: - https://github.com/scipy/scipy/blob/master/tools/lint_diff.py + https://github.com/scipy/scipy/blob/main/tools/lint_diff.py Run pycodestyle on the given diff. """ res = subprocess.run( From 404e41fd16d456d83d4db538b7b39e0269522005 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 7 Mar 2021 18:00:16 +0100 Subject: [PATCH 0703/1270] CI: skip lint check on merges with main (#18567) There's no lint diff, so the check should not run --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 45a7f9ce03cd..55013f3cf3fb 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -20,7 +20,7 @@ env: jobs: lint: - if: "github.repository == 'numpy/numpy' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" + if: "github.repository == 'numpy/numpy' && github.ref != 'refs/heads/main' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 From 9812cc869a1cbb47e4918e3eaff58d2a5e1b4d46 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 7 Mar 2021 11:48:39 -0600 Subject: [PATCH 0704/1270] CI: Ensure that doc-build uses "main" as branch name The default of git is still "master", so we need to set `--initial-branch=main` to ensure that we use the name `main` that is used in the devdoc repository. Closes gh-18568 --- tools/ci/push_docs_to_repo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py index 939a09c583b5..555a918e48b1 100755 --- a/tools/ci/push_docs_to_repo.py +++ b/tools/ci/push_docs_to_repo.py @@ -44,7 +44,7 @@ def run(cmd, stdout=True): workdir = tempfile.mkdtemp() os.chdir(workdir) -run(['git', 'init']) +run(['git', 'init', '--initial-branch=main']) run(['git', 'remote', 'add', 'origin', args.remote]) run(['git', 'config', '--local', 'user.name', args.committer]) run(['git', 'config', '--local', 'user.email', args.email]) From f2dd5ab63fad36c0dd6b5e16cd7883ad25b98100 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 7 Mar 2021 13:22:17 -0600 Subject: [PATCH 0705/1270] CI: Use `git branch -m` instead of `--initial-branch=main` It seems the git in the CircleCI environment does not have the `--initial-branch` option that my local git has, hopefully `git branch -m` works fine. --- tools/ci/push_docs_to_repo.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py index 555a918e48b1..00c4f7074f9a 100755 --- a/tools/ci/push_docs_to_repo.py +++ b/tools/ci/push_docs_to_repo.py @@ -44,7 +44,8 @@ def run(cmd, stdout=True): workdir = tempfile.mkdtemp() os.chdir(workdir) -run(['git', 'init', '--initial-branch=main']) +run(['git', 'init']) +run(['git', 'branch', '-m', 'master', 'main']) run(['git', 'remote', 'add', 'origin', args.remote]) run(['git', 'config', '--local', 'user.name', args.committer]) run(['git', 'config', '--local', 'user.email', args.email]) From 31e172ee5b34acd3243441480a8f431d5689fbc4 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 7 Mar 2021 13:06:08 -0800 Subject: [PATCH 0706/1270] Fix overflow warning on apple silicon --- numpy/core/src/umath/loops.c.src | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 570b3ec04bc7..bd8c78d2d43e 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -2526,6 +2526,21 @@ NPY_NO_EXPORT void const @ftype@ in1i = ((@ftype@ *)ip1)[1]; const @ftype@ in2r = ((@ftype@ *)ip2)[0]; const @ftype@ in2i = ((@ftype@ *)ip2)[1]; +#if defined(__APPLE__) && defined(__aarch64__) + // On macos-arm64, if this block of code was not there, + // when branch prediction goes wrong, the floating point exception + // register does not get cleared and an exception for the + // wrong branch is thrown. + if (in2i == 0) { + ((@ftype@ *)op1)[0] = npy_floor@c@(in1r/in2r); + ((@ftype@ *)op1)[1] = 0; + } + else if (in2r == 0) { + ((@ftype@ *)op1)[0] = npy_floor@c@(in1i/in2i); + ((@ftype@ *)op1)[1] = 0; + } + else +#endif if (npy_fabs@c@(in2r) >= npy_fabs@c@(in2i)) { const @ftype@ rat = in2i/in2r; ((@ftype@ *)op1)[0] = npy_floor@c@((in1r + in1i*rat)/(in2r + in2i*rat)); From 5631e8ee5aeea8fe645ee46aa12f76ecff1badf2 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 7 Mar 2021 15:05:35 -0600 Subject: [PATCH 0707/1270] CI: Do not pass original branch name to `git branch -m main` Should fix the current failure that "master" does not exist. I am a bit curious why master does not exist, maybe the CircleCI git is set up to use some other default. Move the branch rename to after the commit, to make sure that the fact that the branch is empty (and `git branch` also) cannot influence the result. [ci-skip] --- tools/ci/push_docs_to_repo.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py index 00c4f7074f9a..058f748ec1af 100755 --- a/tools/ci/push_docs_to_repo.py +++ b/tools/ci/push_docs_to_repo.py @@ -45,7 +45,9 @@ def run(cmd, stdout=True): os.chdir(workdir) run(['git', 'init']) -run(['git', 'branch', '-m', 'master', 'main']) +# ensure the working branch is called "main" +# (`--initial-branch=main` appared to have failed on older git versions): +run(['git', 'checkout', '-b', 'main']) run(['git', 'remote', 'add', 'origin', args.remote]) run(['git', 'config', '--local', 'user.name', args.committer]) run(['git', 'config', '--local', 'user.email', args.email]) From a7621b15bfa86bb7476684980b44c60aee2dd9c4 Mon Sep 17 00:00:00 2001 From: InessaPawson Date: Mon, 8 Mar 2021 13:50:58 +1000 Subject: [PATCH 0708/1270] MAINT: Update the Call for Contributions section Expanding the section to provide info about the types of contribution that exist within the NumPy project and points of contact with the community coordinators and other contributors. --- README.md | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index a87563aec22f..e84066c22db1 100644 --- a/README.md +++ b/README.md @@ -30,13 +30,28 @@ Tests can then be run after installation with: Call for Contributions ---------------------- -NumPy appreciates help from a wide range of different backgrounds. -Work such as high level documentation or website improvements are valuable -and we would like to grow our team with people filling these roles. -Small improvements or fixes are always appreciated and issues labeled as easy -may be a good starting point. -If you are considering larger contributions outside the traditional coding work, -please contact us through the mailing list. +The NumPy project welcomes your expertise and enthusiasm! + +Small improvements or fixes are always appreciated. (Issues labeled as easy may be a good starting point.) If you are considering larger contributions to the source code, please contact us through the [mailing list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. + +Writing code isn’t the only way to contribute to NumPy. You can also: +- review pull requests +- triage issues +- develop tutorials, presentations, and other educational materials +- maintain and improve our website numpy.org +- develop graphic design for our brand assets and promotional materials +- translate website content +- serve as a community coordinator +- write grant proposals and help with other fundraising efforts + +If you’re unsure where to start or how your skills fit in, reach out! You can ask on the mailing list or here, on GitHub, by opening a new issue or leaving a comment on a relevant issue that is already open. + +Those are our preferred channels (open source is open by nature), but if you’d like to speak to us in private first, contact our community coordinators at numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for an invite). + +We also have a biweekly community call, details of which are announced on the mailing list. You are very welcome to join. + +If you are new to contributing to open source, we highly recommend reading [this guide](https://opensource.guide/how-to-contribute/). + [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org) From 6f4ba2ad101456b5fbebde9ed4331a1945ae2deb Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 16 Jan 2021 14:00:01 +0000 Subject: [PATCH 0709/1270] SIMD: add NPYV intrinsics that compute the parameters used for fast integer division --- numpy/core/src/common/simd/intdiv.h | 473 ++++++++++++++++++++++++++++ numpy/core/src/common/simd/simd.h | 5 + 2 files changed, 478 insertions(+) create mode 100644 numpy/core/src/common/simd/intdiv.h diff --git a/numpy/core/src/common/simd/intdiv.h b/numpy/core/src/common/simd/intdiv.h new file mode 100644 index 000000000000..a6c293d87f64 --- /dev/null +++ b/numpy/core/src/common/simd/intdiv.h @@ -0,0 +1,473 @@ +/** + * This header implements `npyv_divisor_*` intrinsics used for computing the parameters + * of fast integer division, while division intrinsics `npyv_divc_*` are defined in + * {extension}/arithmetic.h. + */ +#ifndef NPY_SIMD + #error "Not a standalone header, use simd/simd.h instead" +#endif +#ifndef _NPY_SIMD_INTDIV_H +#define _NPY_SIMD_INTDIV_H +/********************************************************************************** + ** Integer division + ********************************************************************************** + * Almost all architecture (except Power10) doesn't support integer vector division, + * also the cost of scalar division in architectures like x86 is too high it can take + * 30 to 40 cycles on modern chips and up to 100 on old ones. + * + * Therefore we are using division by multiplying with precomputed reciprocal technique, + * the method that been used in this implementation is based on T. Granlund and P. L. Montgomery + * “Division by invariant integers using multiplication(see [Figure 4.1, 5.1] + * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556) + * + * It shows a good impact for all architectures especially on X86, + * however computing divisor parameters is kind of expensive so this implementation + * should only works when divisor is a scalar and used multiple of times. + * + * The division process is separated into two intrinsics for each data type + * + * 1- npyv_{dtype}x3 npyv_divisor_{dtype} ({dtype} divisor); + * For computing the divisor parameters (multiplier + shifters + sign of divisor(signed only)) + * + * 2- npyv_{dtype} npyv_divisor_{dtype} (npyv_{dtype} dividend, npyv_{dtype}x3 divisor_parms); + * For performing the final division. + * + ** For example: + * int vstep = npyv_nlanes_s32; // number of lanes + * int x = 0x6e70; + * npyv_s32x3 divisor = npyv_divisor_s32(x); // init divisor params + * for (; len >= vstep; src += vstep, dst += vstep, len -= vstep) { + * npyv_s32 a = npyv_load_s32(*src); // load s32 vector from memory + * a = npyv_divc_s32(a, divisor); // divide all elements by x + * npyv_store_s32(dst, a); // store s32 vector into memroy + * } + * + ** NOTES: + * - For 64-bit division on Aarch64 and IBM/Power, we fall-back to the scalar division + * since emulating multiply-high is expensive and both architectures have very fast dividers. + * + ** TODO: + * - Add support for Power10(VSX4) + * + *************************************************************** + ** Figure 4.1: Unsigned division by run–time invariant divisor + *************************************************************** + * Initialization (given uword d with 1 ≤ d < 2^N): + * int l = ceil(log2(d)); + * uword m = 2^N * (2^l− d) / d + 1; + * int sh1 = min(l, 1); + * int sh2 = max(l − 1, 0); + * + * For q = FLOOR(a/d), all uword: + * uword t1 = MULUH(m, a); + * q = SRL(t1 + SRL(a − t1, sh1), sh2); + * + ************************************************************************************ + ** Figure 5.1: Signed division by run–time invariant divisor, rounded towards zero + ************************************************************************************ + * Initialization (given constant sword d with d !=0): + * int l = max(ceil(log2(abs(d))), 1); + * udword m0 = 1 + (2^(N+l-1)) / abs(d); + * sword m = m0 − 2^N; + * sword dsign = XSIGN(d); + * int sh = l − 1; + * + * For q = TRUNC(a/d), all sword: + * sword q0 = a + MULSH(m, a); + * q0 = SRA(q0, sh) − XSIGN(a); + * q = EOR(q0, dsign) − dsign; + */ +/** + * bit-scan reverse for non-zeros. returns the index of the highest set bit. + * equivalent to floor(log2(a)) + */ +#ifdef _MSC_VER + #include // _BitScanReverse +#endif +NPY_FINLINE unsigned npyv__bitscan_revnz_u32(npy_uint32 a) +{ + assert(a > 0); // due to use __builtin_clz + unsigned r; +#if defined(NPY_HAVE_SSE2) && defined(_MSC_VER) + unsigned long rl; + (void)_BitScanReverse(&rl, (unsigned long)a); + r = (unsigned)rl; +#elif defined(NPY_HAVE_SSE2) && (defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)) + __asm__("bsr %1, %0" : "=r" (r) : "r"(a)); +#elif defined(__GNUC__) || defined(__clang__) + r = 31 - __builtin_clz(a); // performs on arm -> clz, ppc -> cntlzw +#else + r = 0; + while (a >>= 1) { + r++; + } +#endif + return r; +} +NPY_FINLINE unsigned npyv__bitscan_revnz_u64(npy_uint64 a) +{ + assert(a > 0); // due to use __builtin_clzll +#if defined(_M_AMD64) && defined(_MSC_VER) + unsigned long rl; + (void)_BitScanReverse64(&rl, a); + return (unsigned)rl; +#elif defined(__x86_64__) && (defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)) + npy_uint64 r; + __asm__("bsrq %1, %0" : "=r"(r) : "r"(a)); + return (unsigned)r; +#elif defined(__GNUC__) || defined(__clang__) + return 63 - __builtin_clzll(a); +#else + npy_uint64 a_hi = a >> 32; + if (a_hi == 0) { + return npyv__bitscan_revnz_u32((npy_uint32)a); + } + return 32 + npyv__bitscan_revnz_u32((npy_uint32)a_hi); +#endif +} +/** + * Divides 128-bit unsigned integer by a 64-bit when the lower + * 64-bit of the dividend is zero. + * + * This function is needed to calculate the multiplier of 64-bit integer division + * see npyv_divisor_u64/npyv_divisor_s64. + */ +NPY_FINLINE npy_uint64 npyv__divh128_u64(npy_uint64 high, npy_uint64 divisor) +{ + assert(divisor > 1); + npy_uint64 quotient; +#if defined(_M_X64) && defined(_MSC_VER) && _MSC_VER >= 1920 + npy_uint64 remainder; + quotient = _udiv128(high, 0, divisor, &remainder); + (void)remainder; +#elif defined(__x86_64__) && (defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)) + __asm__("divq %[d]" : "=a"(quotient) : [d] "r"(divisor), "a"(0), "d"(high)); +#elif defined(__SIZEOF_INT128__) + quotient = (npy_uint64)((((__uint128_t)high) << 64) / divisor); +#else + /** + * Minified version based on Donald Knuth’s Algorithm D (Division of nonnegative integers), + * and Generic implementation in Hacker’s Delight. + * + * See https://skanthak.homepage.t-online.de/division.html + * with respect to the license of the Hacker's Delight book + * (https://web.archive.org/web/20190408122508/http://www.hackersdelight.org/permissions.htm) + */ + // shift amount for normalize + unsigned ldz = 63 - npyv__bitscan_revnz_u64(divisor); + // normalize divisor + divisor <<= ldz; + high <<= ldz; + // break divisor up into two 32-bit digits + npy_uint32 divisor_hi = divisor >> 32; + npy_uint32 divisor_lo = divisor & 0xFFFFFFFF; + // compute high quotient digit + npy_uint32 quotient_hi = (npy_uint32)(high / divisor_hi); + npy_uint64 remainder = high - divisor_hi * quotient_hi; + npy_uint64 base32 = 1ULL << 32; + while (quotient_hi >= base32 || quotient_hi*divisor_lo > base32*remainder) { + remainder += --divisor_hi; + if (remainder >= base32) { + break; + } + } + // compute dividend digit pairs + npy_uint64 dividend_pairs = base32*high - divisor*quotient_hi; + // compute second quotient digit for lower zeros + npy_uint32 quotient_lo = (npy_uint32)(dividend_pairs / divisor_hi); + quotient = base32*quotient_hi + quotient_lo; +#endif + return quotient; +} +// Initializing divisor parameters for unsigned 8-bit division +NPY_FINLINE npyv_u8x3 npyv_divisor_u8(npy_uint8 d) +{ + unsigned l, l2, sh1, sh2, m; + switch (d) { + case 0: // LCOV_EXCL_LINE + // for potential divide by zero, On x86 GCC inserts `ud2` instruction + // instead of letting the HW/CPU trap it which leads to illegal instruction exception. + // 'volatile' should suppress this behavior and allow us to raise HW/CPU + // arithmetic exception. + m = sh1 = sh2 = 1 / ((npy_uint8 volatile *)&d)[0]; + break; + case 1: + m = 1; sh1 = sh2 = 0; + break; + case 2: + m = 1; sh1 = 1; sh2 = 0; + break; + default: + l = npyv__bitscan_revnz_u32(d - 1) + 1; // ceil(log2(d)) + l2 = (npy_uint8)(1 << l); // 2^l, overflow to 0 if l = 8 + m = ((l2 - d) << 8) / d + 1; // multiplier + sh1 = 1; sh2 = l - 1; // shift counts + } + npyv_u8x3 divisor; + divisor.val[0] = npyv_setall_u8(m); +#ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[1] = npyv_set_u8(sh1); + divisor.val[2] = npyv_set_u8(sh2); +#elif defined(NPY_HAVE_VSX2) + divisor.val[1] = npyv_setall_u8(sh1); + divisor.val[2] = npyv_setall_u8(sh2); +#elif defined(NPY_HAVE_NEON) + divisor.val[1] = npyv_reinterpret_u8_s8(npyv_setall_s8(-sh1)); + divisor.val[2] = npyv_reinterpret_u8_s8(npyv_setall_s8(-sh2)); +#else + #error "please initialize the shifting operand for the new architecture" +#endif + return divisor; +} +// Initializing divisor parameters for signed 8-bit division +NPY_FINLINE npyv_s16x3 npyv_divisor_s16(npy_int16 d); +NPY_FINLINE npyv_s8x3 npyv_divisor_s8(npy_int8 d) +{ +#ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + npyv_s16x3 p = npyv_divisor_s16(d); + npyv_s8x3 r; + r.val[0] = npyv_reinterpret_s8_s16(p.val[0]); + r.val[1] = npyv_reinterpret_s8_s16(p.val[1]); + r.val[2] = npyv_reinterpret_s8_s16(p.val[2]); + return r; +#else + int d1 = abs(d); + int sh, m; + if (d1 > 1) { + sh = (int)npyv__bitscan_revnz_u32(d1-1); // ceil(log2(abs(d))) - 1 + m = (1 << (8 + sh)) / d1 + 1; // multiplier + } + else if (d1 == 1) { + sh = 0; m = 1; + } + else { + // raise arithmetic exception for d == 0 + sh = m = 1 / ((npy_int8 volatile *)&d)[0]; // LCOV_EXCL_LINE + } + npyv_s8x3 divisor; + divisor.val[0] = npyv_setall_s8(m); + divisor.val[2] = npyv_setall_s8(d < 0 ? -1 : 0); + #ifdef NPY_HAVE_VSX2 + divisor.val[1] = npyv_setall_s8(sh); + #elif defined(NPY_HAVE_NEON) + divisor.val[1] = npyv_setall_s8(-sh); + #else + #error "please initialize the shifting operand for the new architecture" + #endif + return divisor; +#endif +} +// Initializing divisor parameters for unsigned 16-bit division +NPY_FINLINE npyv_u16x3 npyv_divisor_u16(npy_uint16 d) +{ + unsigned l, l2, sh1, sh2, m; + switch (d) { + case 0: // LCOV_EXCL_LINE + // raise arithmetic exception for d == 0 + m = sh1 = sh2 = 1 / ((npy_uint16 volatile *)&d)[0]; + break; + case 1: + m = 1; sh1 = sh2 = 0; + break; + case 2: + m = 1; sh1 = 1; sh2 = 0; + break; + default: + l = npyv__bitscan_revnz_u32(d - 1) + 1; // ceil(log2(d)) + l2 = (npy_uint16)(1 << l); // 2^l, overflow to 0 if l = 16 + m = ((l2 - d) << 16) / d + 1; // multiplier + sh1 = 1; sh2 = l - 1; // shift counts + } + npyv_u16x3 divisor; + divisor.val[0] = npyv_setall_u16(m); +#ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[1] = npyv_set_u16(sh1); + divisor.val[2] = npyv_set_u16(sh2); +#elif defined(NPY_HAVE_VSX2) + divisor.val[1] = npyv_setall_u16(sh1); + divisor.val[2] = npyv_setall_u16(sh2); +#elif defined(NPY_HAVE_NEON) + divisor.val[1] = npyv_reinterpret_u16_s16(npyv_setall_s16(-sh1)); + divisor.val[2] = npyv_reinterpret_u16_s16(npyv_setall_s16(-sh2)); +#else + #error "please initialize the shifting operand for the new architecture" +#endif + return divisor; +} +// Initializing divisor parameters for signed 16-bit division +NPY_FINLINE npyv_s16x3 npyv_divisor_s16(npy_int16 d) +{ + int d1 = abs(d); + int sh, m; + if (d1 > 1) { + sh = (int)npyv__bitscan_revnz_u32(d1 - 1); // ceil(log2(abs(d))) - 1 + m = (1 << (16 + sh)) / d1 + 1; // multiplier + } + else if (d1 == 1) { + sh = 0; m = 1; + } + else { + // raise arithmetic exception for d == 0 + sh = m = 1 / ((npy_int16 volatile *)&d)[0]; // LCOV_EXCL_LINE + } + npyv_s16x3 divisor; + divisor.val[0] = npyv_setall_s16(m); + divisor.val[2] = npyv_setall_s16(d < 0 ? -1 : 0); // sign of divisor +#ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[1] = npyv_set_s16(sh); +#elif defined(NPY_HAVE_VSX2) + divisor.val[1] = npyv_setall_s16(sh); +#elif defined(NPY_HAVE_NEON) + divisor.val[1] = npyv_setall_s16(-sh); +#else + #error "please initialize the shifting operand for the new architecture" +#endif + return divisor; +} +// Initializing divisor parameters for unsigned 32-bit division +NPY_FINLINE npyv_u32x3 npyv_divisor_u32(npy_uint32 d) +{ + npy_uint32 l, l2, sh1, sh2, m; + switch (d) { + case 0: // LCOV_EXCL_LINE + // raise arithmetic exception for d == 0 + m = sh1 = sh2 = 1 / ((npy_uint32 volatile *)&d)[0]; // LCOV_EXCL_LINE + break; + case 1: + m = 1; sh1 = sh2 = 0; + break; + case 2: + m = 1; sh1 = 1; sh2 = 0; + break; + default: + l = npyv__bitscan_revnz_u32(d - 1) + 1; // ceil(log2(d)) + l2 = (npy_uint32)(1ULL << l); // 2^l, overflow to 0 if l = 32 + m = ((npy_uint64)(l2 - d) << 32) / d + 1; // multiplier + sh1 = 1; sh2 = l - 1; // shift counts + } + npyv_u32x3 divisor; + divisor.val[0] = npyv_setall_u32(m); +#ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[1] = npyv_set_u32(sh1); + divisor.val[2] = npyv_set_u32(sh2); +#elif defined(NPY_HAVE_VSX2) + divisor.val[1] = npyv_setall_u32(sh1); + divisor.val[2] = npyv_setall_u32(sh2); +#elif defined(NPY_HAVE_NEON) + divisor.val[1] = npyv_reinterpret_u32_s32(npyv_setall_s32(-sh1)); + divisor.val[2] = npyv_reinterpret_u32_s32(npyv_setall_s32(-sh2)); +#else + #error "please initialize the shifting operand for the new architecture" +#endif + return divisor; +} +// Initializing divisor parameters for signed 32-bit division +NPY_FINLINE npyv_s32x3 npyv_divisor_s32(npy_int32 d) +{ + npy_int32 d1 = abs(d); + npy_int32 sh, m; + if (d1 > 1) { + sh = npyv__bitscan_revnz_u32(d1 - 1); // ceil(log2(abs(d))) - 1 + m = (1ULL << (32 + sh)) / d1 + 1; // multiplier + } + else if (d1 == 1) { + sh = 0; m = 1; + } + // fix abs overflow + else if (d == (1 << 31)) { + m = d + 1; + sh = 30; + } + else { + // raise arithmetic exception for d == 0 + sh = m = 1 / ((npy_int32 volatile *)&d)[0]; // LCOV_EXCL_LINE + } + npyv_s32x3 divisor; + divisor.val[0] = npyv_setall_s32(m); + divisor.val[2] = npyv_setall_s32(d < 0 ? -1 : 0); // sign of divisor +#ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[1] = npyv_set_s32(sh); +#elif defined(NPY_HAVE_VSX2) + divisor.val[1] = npyv_setall_s32(sh); +#elif defined(NPY_HAVE_NEON) + divisor.val[1] = npyv_setall_s32(-sh); +#else + #error "please initialize the shifting operand for the new architecture" +#endif + return divisor; +} +// Initializing divisor parameters for unsigned 64-bit division +NPY_FINLINE npyv_u64x3 npyv_divisor_u64(npy_uint64 d) +{ + npyv_u64x3 divisor; +#if defined(NPY_HAVE_VSX2) || defined(NPY_HAVE_NEON) + divisor.val[0] = npyv_setall_u64(d); +#else + npy_uint64 l, l2, sh1, sh2, m; + switch (d) { + case 0: // LCOV_EXCL_LINE + // raise arithmetic exception for d == 0 + m = sh1 = sh2 = 1 / ((npy_uint64 volatile *)&d)[0]; // LCOV_EXCL_LINE + break; + case 1: + m = 1; sh1 = sh2 = 0; + break; + case 2: + m = 1; sh1 = 1; sh2 = 0; + break; + default: + l = npyv__bitscan_revnz_u64(d - 1) + 1; // ceil(log2(d)) + l2 = l < 64 ? 1ULL << l : 0; // 2^l + m = npyv__divh128_u64(l2 - d, d) + 1; // multiplier + sh1 = 1; sh2 = l - 1; // shift counts + } + divisor.val[0] = npyv_setall_u64(m); + #ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[1] = npyv_set_u64(sh1); + divisor.val[2] = npyv_set_u64(sh2); + #else + #error "please initialize the shifting operand for the new architecture" + #endif +#endif + return divisor; +} +// Initializing divisor parameters for signed 64-bit division +NPY_FINLINE npyv_s64x3 npyv_divisor_s64(npy_int64 d) +{ + npyv_s64x3 divisor; +#if defined(NPY_HAVE_VSX2) || defined(NPY_HAVE_NEON) + divisor.val[0] = npyv_setall_s64(d); + divisor.val[1] = npyv_cvt_s64_b64( + npyv_cmpeq_s64(npyv_setall_s64(-1), divisor.val[0]) + ); +#else + npy_int64 d1 = llabs(d); + npy_int64 sh, m; + if (d1 > 1) { + sh = npyv__bitscan_revnz_u64(d1 - 1); // ceil(log2(abs(d))) - 1 + m = npyv__divh128_u64(1ULL << sh, d1) + 1; // multiplier + } + else if (d1 == 1) { + sh = 0; m = 1; + } + // fix abs overflow + else if (d == (1LL << 63)) { + m = d + 1; + sh = 62; + } + else { + // raise arithmetic exception for d == 0 + sh = m = 1 / ((npy_int64 volatile *)&d)[0]; // LCOV_EXCL_LINE + } + divisor.val[0] = npyv_setall_s64(m); + divisor.val[2] = npyv_setall_s64(d < 0 ? -1 : 0); // sign of divisor + #ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[1] = npyv_set_s64(sh); + #else + #error "please initialize the shifting operand for the new architecture" + #endif +#endif + return divisor; +} + +#endif // _NPY_SIMD_INTDIV_H diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index d6c14228d7a4..a3e2b95de9bd 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -56,6 +56,11 @@ typedef double npyv_lanetype_f64; #include "emulate_maskop.h" #endif +// enable integer divisor generator for all SIMD extensions +#if NPY_SIMD + #include "intdiv.h" +#endif + /** * Some SIMD extensions currently(AVX2, AVX512F) require (de facto) * a maximum number of strides sizes when dealing with non-contiguous memory access. From f4d06c77e1873759e573176b50dfcd41e2fa9982 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 16 Jan 2021 14:00:54 +0000 Subject: [PATCH 0710/1270] SIMD: add NPYV fast integer division intrinsics for SSE --- numpy/core/src/common/simd/sse/arithmetic.h | 177 +++++++++++++++++++- 1 file changed, 176 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index c21b7da2d7e4..a1ec88f56489 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -85,6 +85,181 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) // saturated // TODO: after implment Packs intrins +/*************************** + * Integer Division + ***************************/ +// See simd/intdiv.h for more clarification +// divide each unsigned 8-bit element by a precomputed divisor +NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) +{ + const __m128i bmask = _mm_set1_epi32(0xFF00FF00); + const __m128i shf1b = _mm_set1_epi8(0xFFU >> _mm_cvtsi128_si32(divisor.val[1])); + const __m128i shf2b = _mm_set1_epi8(0xFFU >> _mm_cvtsi128_si32(divisor.val[2])); + // high part of unsigned multiplication + __m128i mulhi_odd = _mm_mulhi_epu16(a, divisor.val[0]); + __m128i mulhi_even = _mm_mulhi_epu16(_mm_slli_epi16(a, 8), divisor.val[0]); + mulhi_even = _mm_srli_epi16(mulhi_even, 8); + __m128i mulhi = npyv_select_u8(bmask, mulhi_odd, mulhi_even); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = _mm_sub_epi8(a, mulhi); + q = _mm_and_si128(_mm_srl_epi16(q, divisor.val[1]), shf1b); + q = _mm_add_epi8(mulhi, q); + q = _mm_and_si128(_mm_srl_epi16(q, divisor.val[2]), shf2b); + return q; +} +// divide each signed 8-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor); +NPY_FINLINE npyv_s8 npyv_divc_s8(npyv_s8 a, const npyv_s8x3 divisor) +{ + const __m128i bmask = _mm_set1_epi32(0x00FF00FF); + // instead of _mm_cvtepi8_epi16/_mm_packs_epi16 to wrap around overflow + __m128i divc_even = npyv_divc_s16(_mm_srai_epi16(_mm_slli_epi16(a, 8), 8), divisor); + __m128i divc_odd = npyv_divc_s16(_mm_srai_epi16(a, 8), divisor); + divc_odd = _mm_slli_epi16(divc_odd, 8); + return npyv_select_u8(bmask, divc_even, divc_odd); +} +// divide each unsigned 16-bit element by a precomputed divisor +NPY_FINLINE npyv_u16 npyv_divc_u16(npyv_u16 a, const npyv_u16x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi = _mm_mulhi_epu16(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = _mm_sub_epi16(a, mulhi); + q = _mm_srl_epi16(q, divisor.val[1]); + q = _mm_add_epi16(mulhi, q); + q = _mm_srl_epi16(q, divisor.val[2]); + return q; +} +// divide each signed 16-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor) +{ + // high part of signed multiplication + __m128i mulhi = _mm_mulhi_epi16(a, divisor.val[0]); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m128i q = _mm_sra_epi16(_mm_add_epi16(a, mulhi), divisor.val[1]); + q = _mm_sub_epi16(q, _mm_srai_epi16(a, 15)); + q = _mm_sub_epi16(_mm_xor_si128(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 32-bit element by a precomputed divisor +NPY_FINLINE npyv_u32 npyv_divc_u32(npyv_u32 a, const npyv_u32x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi_even = _mm_srli_epi64(_mm_mul_epu32(a, divisor.val[0]), 32); + __m128i mulhi_odd = _mm_mul_epu32(_mm_srli_epi64(a, 32), divisor.val[0]); +#ifdef NPY_HAVE_SSE41 + __m128i mulhi = _mm_blend_epi16(mulhi_even, mulhi_odd, 0xCC); +#else + __m128i mask_13 = _mm_setr_epi32(0, -1, 0, -1); + mulhi_odd = _mm_and_si128(mulhi_odd, mask_13); + __m128i mulhi = _mm_or_si128(mulhi_even, mulhi_odd); +#endif + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = _mm_sub_epi32(a, mulhi); + q = _mm_srl_epi32(q, divisor.val[1]); + q = _mm_add_epi32(mulhi, q); + q = _mm_srl_epi32(q, divisor.val[2]); + return q; +} +// divide each signed 32-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s32 npyv_divc_s32(npyv_s32 a, const npyv_s32x3 divisor) +{ + __m128i asign = _mm_srai_epi32(a, 31); +#ifdef NPY_HAVE_SSE41 + // high part of signed multiplication + __m128i mulhi_even = _mm_srli_epi64(_mm_mul_epi32(a, divisor.val[0]), 32); + __m128i mulhi_odd = _mm_mul_epi32(_mm_srli_epi64(a, 32), divisor.val[0]); + __m128i mulhi = _mm_blend_epi16(mulhi_even, mulhi_odd, 0xCC); +#else // not SSE4.1 + // high part of "unsigned" multiplication + __m128i mulhi_even = _mm_srli_epi64(_mm_mul_epu32(a, divisor.val[0]), 32); + __m128i mulhi_odd = _mm_mul_epu32(_mm_srli_epi64(a, 32), divisor.val[0]); + __m128i mask_13 = _mm_setr_epi32(0, -1, 0, -1); + mulhi_odd = _mm_and_si128(mulhi_odd, mask_13); + __m128i mulhi = _mm_or_si128(mulhi_even, mulhi_odd); + // convert unsigned to signed high multiplication + // mulhi - ((a < 0) ? m : 0) - ((m < 0) ? a : 0); + const __m128i msign= _mm_srai_epi32(divisor.val[0], 31); + __m128i m_asign = _mm_and_si128(divisor.val[0], asign); + __m128i a_msign = _mm_and_si128(a, msign); + mulhi = _mm_sub_epi32(mulhi, m_asign); + mulhi = _mm_sub_epi32(mulhi, a_msign); +#endif + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m128i q = _mm_sra_epi32(_mm_add_epi32(a, mulhi), divisor.val[1]); + q = _mm_sub_epi32(q, asign); + q = _mm_sub_epi32(_mm_xor_si128(q, divisor.val[2]), divisor.val[2]); + return q; +} +// returns the high 64 bits of unsigned 64-bit multiplication +// xref https://stackoverflow.com/a/28827013 +NPY_FINLINE npyv_u64 npyv__mullhi_u64(npyv_u64 a, npyv_u64 b) +{ + __m128i lomask = npyv_setall_s64(0xffffffff); + __m128i a_hi = _mm_srli_epi64(a, 32); // a0l, a0h, a1l, a1h + __m128i b_hi = _mm_srli_epi64(b, 32); // b0l, b0h, b1l, b1h + // compute partial products + __m128i w0 = _mm_mul_epu32(a, b); // a0l*b0l, a1l*b1l + __m128i w1 = _mm_mul_epu32(a, b_hi); // a0l*b0h, a1l*b1h + __m128i w2 = _mm_mul_epu32(a_hi, b); // a0h*b0l, a1h*b0l + __m128i w3 = _mm_mul_epu32(a_hi, b_hi); // a0h*b0h, a1h*b1h + // sum partial products + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + return hi; +} +// divide each unsigned 64-bit element by a precomputed divisor +NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi = npyv__mullhi_u64(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = _mm_sub_epi64(a, mulhi); + q = _mm_srl_epi64(q, divisor.val[1]); + q = _mm_add_epi64(mulhi, q); + q = _mm_srl_epi64(q, divisor.val[2]); + return q; +} +// divide each signed 64-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi = npyv__mullhi_u64(a, divisor.val[0]); + // convert unsigned to signed high multiplication + // mulhi - ((a < 0) ? m : 0) - ((m < 0) ? a : 0); +#ifdef NPY_HAVE_SSE42 + const __m128i msign= _mm_cmpgt_epi64(_mm_setzero_si128(), divisor.val[0]); + __m128i asign = _mm_cmpgt_epi64(_mm_setzero_si128(), a); +#else + const __m128i msign= _mm_srai_epi32(_mm_shuffle_epi32(divisor.val[0], _MM_SHUFFLE(3, 3, 1, 1)), 31); + __m128i asign = _mm_srai_epi32(_mm_shuffle_epi32(a, _MM_SHUFFLE(3, 3, 1, 1)), 31); +#endif + __m128i m_asign = _mm_and_si128(divisor.val[0], asign); + __m128i a_msign = _mm_and_si128(a, msign); + mulhi = _mm_sub_epi64(mulhi, m_asign); + mulhi = _mm_sub_epi64(mulhi, a_msign); + // q = (a + mulhi) >> sh + __m128i q = _mm_add_epi64(a, mulhi); + // emulate arithmetic right shift + const __m128i sigb = npyv_setall_s64(1LL << 63); + q = _mm_srl_epi64(_mm_add_epi64(q, sigb), divisor.val[1]); + q = _mm_sub_epi64(q, _mm_srl_epi64(sigb, divisor.val[1])); + // q = q - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + q = _mm_sub_epi64(q, asign); + q = _mm_sub_epi64(_mm_xor_si128(q, divisor.val[2]), divisor.val[2]); + return q; +} /*************************** * Division ***************************/ @@ -175,7 +350,7 @@ NPY_FINLINE float npyv_sum_f32(npyv_f32 a) __m128 t2 = _mm_add_ps(a, t1); __m128 t3 = _mm_shuffle_ps(t2, t2, 1); __m128 t4 = _mm_add_ss(t2, t3); - return _mm_cvtss_f32(t4); + return _mm_cvtss_f32(t4); #endif } From a40a3537796e8f4d3fc45ff3942ba6327df19739 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 16 Jan 2021 14:01:11 +0000 Subject: [PATCH 0711/1270] SIMD: add NPYV fast integer division intrinsics for AVX2 --- numpy/core/src/common/simd/avx2/arithmetic.h | 160 ++++++++++++++++++- 1 file changed, 158 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index 4b8258759266..7cd5a0ea67f1 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -66,6 +66,164 @@ // saturated // TODO: after implment Packs intrins +/*************************** + * Integer Division + ***************************/ +// See simd/intdiv.h for more clarification +// divide each unsigned 8-bit element by a precomputed divisor +NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) +{ + const __m256i bmask = _mm256_set1_epi32(0xFF00FF00); + const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); + const __m128i shf2 = _mm256_castsi256_si128(divisor.val[2]); + const __m256i shf1b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); + const __m256i shf2b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); + // high part of unsigned multiplication + __m256i mulhi_odd = _mm256_mulhi_epu16(a, divisor.val[0]); + __m256i mulhi_even = _mm256_mulhi_epu16(_mm256_slli_epi16(a, 8), divisor.val[0]); + mulhi_even = _mm256_srli_epi16(mulhi_even, 8); + __m256i mulhi = _mm256_blendv_epi8(mulhi_even, mulhi_odd, bmask); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m256i q = _mm256_sub_epi8(a, mulhi); + q = _mm256_and_si256(_mm256_srl_epi16(q, shf1), shf1b); + q = _mm256_add_epi8(mulhi, q); + q = _mm256_and_si256(_mm256_srl_epi16(q, shf2), shf2b); + return q; +} +// divide each signed 8-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor); +NPY_FINLINE npyv_s8 npyv_divc_s8(npyv_s8 a, const npyv_s8x3 divisor) +{ + const __m256i bmask = _mm256_set1_epi32(0x00FF00FF); + // instead of _mm256_cvtepi8_epi16/_mm256_packs_epi16 to wrap around overflow + __m256i divc_even = npyv_divc_s16(_mm256_srai_epi16(_mm256_slli_epi16(a, 8), 8), divisor); + __m256i divc_odd = npyv_divc_s16(_mm256_srai_epi16(a, 8), divisor); + divc_odd = _mm256_slli_epi16(divc_odd, 8); + return _mm256_blendv_epi8(divc_odd, divc_even, bmask); +} +// divide each unsigned 16-bit element by a precomputed divisor +NPY_FINLINE npyv_u16 npyv_divc_u16(npyv_u16 a, const npyv_u16x3 divisor) +{ + const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); + const __m128i shf2 = _mm256_castsi256_si128(divisor.val[2]); + // high part of unsigned multiplication + __m256i mulhi = _mm256_mulhi_epu16(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m256i q = _mm256_sub_epi16(a, mulhi); + q = _mm256_srl_epi16(q, shf1); + q = _mm256_add_epi16(mulhi, q); + q = _mm256_srl_epi16(q, shf2); + return q; +} +// divide each signed 16-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor) +{ + const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); + // high part of signed multiplication + __m256i mulhi = _mm256_mulhi_epi16(a, divisor.val[0]); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m256i q = _mm256_sra_epi16(_mm256_add_epi16(a, mulhi), shf1); + q = _mm256_sub_epi16(q, _mm256_srai_epi16(a, 15)); + q = _mm256_sub_epi16(_mm256_xor_si256(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 32-bit element by a precomputed divisor +NPY_FINLINE npyv_u32 npyv_divc_u32(npyv_u32 a, const npyv_u32x3 divisor) +{ + const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); + const __m128i shf2 = _mm256_castsi256_si128(divisor.val[2]); + // high part of unsigned multiplication + __m256i mulhi_even = _mm256_srli_epi64(_mm256_mul_epu32(a, divisor.val[0]), 32); + __m256i mulhi_odd = _mm256_mul_epu32(_mm256_srli_epi64(a, 32), divisor.val[0]); + __m256i mulhi = _mm256_blend_epi32(mulhi_even, mulhi_odd, 0xAA); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m256i q = _mm256_sub_epi32(a, mulhi); + q = _mm256_srl_epi32(q, shf1); + q = _mm256_add_epi32(mulhi, q); + q = _mm256_srl_epi32(q, shf2); + return q; +} +// divide each signed 32-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s32 npyv_divc_s32(npyv_s32 a, const npyv_s32x3 divisor) +{ + const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); + // high part of signed multiplication + __m256i mulhi_even = _mm256_srli_epi64(_mm256_mul_epi32(a, divisor.val[0]), 32); + __m256i mulhi_odd = _mm256_mul_epi32(_mm256_srli_epi64(a, 32), divisor.val[0]); + __m256i mulhi = _mm256_blend_epi32(mulhi_even, mulhi_odd, 0xAA); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m256i q = _mm256_sra_epi32(_mm256_add_epi32(a, mulhi), shf1); + q = _mm256_sub_epi32(q, _mm256_srai_epi32(a, 31)); + q = _mm256_sub_epi32(_mm256_xor_si256(q, divisor.val[2]), divisor.val[2]); + return q; +} +// returns the high 64 bits of unsigned 64-bit multiplication +// xref https://stackoverflow.com/a/28827013 +NPY_FINLINE npyv_u64 npyv__mullhi_u64(npyv_u64 a, npyv_u64 b) +{ + __m256i lomask = npyv_setall_s64(0xffffffff); + __m256i a_hi = _mm256_srli_epi64(a, 32); // a0l, a0h, a1l, a1h + __m256i b_hi = _mm256_srli_epi64(b, 32); // b0l, b0h, b1l, b1h + // compute partial products + __m256i w0 = _mm256_mul_epu32(a, b); // a0l*b0l, a1l*b1l + __m256i w1 = _mm256_mul_epu32(a, b_hi); // a0l*b0h, a1l*b1h + __m256i w2 = _mm256_mul_epu32(a_hi, b); // a0h*b0l, a1h*b0l + __m256i w3 = _mm256_mul_epu32(a_hi, b_hi); // a0h*b0h, a1h*b1h + // sum partial products + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + return hi; +} +// divide each unsigned 64-bit element by a divisor +NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) +{ + const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); + const __m128i shf2 = _mm256_castsi256_si128(divisor.val[2]); + // high part of unsigned multiplication + __m256i mulhi = npyv__mullhi_u64(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m256i q = _mm256_sub_epi64(a, mulhi); + q = _mm256_srl_epi64(q, shf1); + q = _mm256_add_epi64(mulhi, q); + q = _mm256_srl_epi64(q, shf2); + return q; +} +// divide each unsigned 64-bit element by a divisor (round towards zero) +NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) +{ + const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); + // high part of unsigned multiplication + __m256i mulhi = npyv__mullhi_u64(a, divisor.val[0]); + // convert unsigned to signed high multiplication + // mulhi - ((a < 0) ? m : 0) - ((m < 0) ? a : 0); + __m256i asign = _mm256_cmpgt_epi64(_mm256_setzero_si256(), a); + __m256i msign = _mm256_cmpgt_epi64(_mm256_setzero_si256(), divisor.val[0]); + __m256i m_asign = _mm256_and_si256(divisor.val[0], asign); + __m256i a_msign = _mm256_and_si256(a, msign); + mulhi = _mm256_sub_epi64(mulhi, m_asign); + mulhi = _mm256_sub_epi64(mulhi, a_msign); + // q = (a + mulhi) >> sh + __m256i q = _mm256_add_epi64(a, mulhi); + // emulate arithmetic right shift + const __m256i sigb = npyv_setall_s64(1LL << 63); + q = _mm256_srl_epi64(_mm256_add_epi64(q, sigb), shf1); + q = _mm256_sub_epi64(q, _mm256_srl_epi64(sigb, shf1)); + // q = q - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + q = _mm256_sub_epi64(q, asign); + q = _mm256_sub_epi64(_mm256_xor_si256(q, divisor.val[2]), divisor.val[2]); + return q; +} /*************************** * Division ***************************/ @@ -176,5 +334,3 @@ NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) } #endif // _NPY_SIMD_AVX2_ARITHMETIC_H - - From 6ab925dbf6548e384b08ad88f5951eb8c81e905e Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 16 Jan 2021 14:01:21 +0000 Subject: [PATCH 0712/1270] SIMD: add NPYV fast integer division intrinsics for AVX512 --- .../core/src/common/simd/avx512/arithmetic.h | 224 +++++++++++++++++- 1 file changed, 222 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index 450da7ea5484..29e1af7e806d 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -106,6 +106,226 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) // saturated // TODO: after implment Packs intrins +/*************************** + * Integer Division + ***************************/ +// See simd/intdiv.h for more clarification +// divide each unsigned 8-bit element by divisor +NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) +{ + const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); + const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); +#ifdef NPY_HAVE_AVX512BW + const __m512i shf1b = _mm512_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); + const __m512i shf2b = _mm512_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); + // high part of unsigned multiplication + __m512i mulhi_odd = _mm512_mulhi_epu16(a, divisor.val[0]); + __m512i mulhi_even = _mm512_mulhi_epu16(_mm512_slli_epi16(a, 8), divisor.val[0]); + mulhi_even = _mm512_srli_epi16(mulhi_even, 8); + __m512i mulhi = _mm512_mask_mov_epi8(mulhi_even, 0xAAAAAAAAAAAAAAAA, mulhi_odd); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m512i q = _mm512_sub_epi8(a, mulhi); + q = _mm512_and_si512(_mm512_srl_epi16(q, shf1), shf1b); + q = _mm512_add_epi8(mulhi, q); + q = _mm512_and_si512(_mm512_srl_epi16(q, shf2), shf2b); + return q; +#else + const __m256i bmask = _mm256_set1_epi32(0xFF00FF00); + const __m256i shf1b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); + const __m256i shf2b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); + const __m512i shf2bw= npyv512_combine_si256(shf2b, shf2b); + const __m256i mulc = npyv512_lower_si256(divisor.val[0]); + //// lower 256-bit + __m256i lo_a = npyv512_lower_si256(a); + // high part of unsigned multiplication + __m256i mulhi_odd = _mm256_mulhi_epu16(lo_a, mulc); + __m256i mulhi_even = _mm256_mulhi_epu16(_mm256_slli_epi16(lo_a, 8), mulc); + mulhi_even = _mm256_srli_epi16(mulhi_even, 8); + __m256i mulhi = _mm256_blendv_epi8(mulhi_even, mulhi_odd, bmask); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m256i lo_q = _mm256_sub_epi8(lo_a, mulhi); + lo_q = _mm256_and_si256(_mm256_srl_epi16(lo_q, shf1), shf1b); + lo_q = _mm256_add_epi8(mulhi, lo_q); + lo_q = _mm256_srl_epi16(lo_q, shf2); // no sign extend + + //// higher 256-bit + __m256i hi_a = npyv512_higher_si256(a); + // high part of unsigned multiplication + mulhi_odd = _mm256_mulhi_epu16(hi_a, mulc); + mulhi_even = _mm256_mulhi_epu16(_mm256_slli_epi16(hi_a, 8), mulc); + mulhi_even = _mm256_srli_epi16(mulhi_even, 8); + mulhi = _mm256_blendv_epi8(mulhi_even, mulhi_odd, bmask); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m256i hi_q = _mm256_sub_epi8(hi_a, mulhi); + hi_q = _mm256_and_si256(_mm256_srl_epi16(hi_q, shf1), shf1b); + hi_q = _mm256_add_epi8(mulhi, hi_q); + hi_q = _mm256_srl_epi16(hi_q, shf2); // no sign extend + return _mm512_and_si512(npyv512_combine_si256(lo_q, hi_q), shf2bw); // extend sign +#endif +} +// divide each signed 8-bit element by divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor); +NPY_FINLINE npyv_s8 npyv_divc_s8(npyv_s8 a, const npyv_s8x3 divisor) +{ + __m512i divc_even = npyv_divc_s16(npyv_shri_s16(npyv_shli_s16(a, 8), 8), divisor); + __m512i divc_odd = npyv_divc_s16(npyv_shri_s16(a, 8), divisor); + divc_odd = npyv_shli_s16(divc_odd, 8); +#ifdef NPY_HAVE_AVX512BW + return _mm512_mask_mov_epi8(divc_even, 0xAAAAAAAAAAAAAAAA, divc_odd); +#else + const __m512i bmask = _mm512_set1_epi32(0x00FF00FF); + return npyv_select_u8(bmask, divc_even, divc_odd); +#endif +} +// divide each unsigned 16-bit element by divisor +NPY_FINLINE npyv_u16 npyv_divc_u16(npyv_u16 a, const npyv_u16x3 divisor) +{ + const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); + const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + #define NPYV__DIVC_U16(RLEN, A, MULC, R) \ + mulhi = _mm##RLEN##_mulhi_epu16(A, MULC); \ + R = _mm##RLEN##_sub_epi16(A, mulhi); \ + R = _mm##RLEN##_srl_epi16(R, shf1); \ + R = _mm##RLEN##_add_epi16(mulhi, R); \ + R = _mm##RLEN##_srl_epi16(R, shf2); + +#ifdef NPY_HAVE_AVX512BW + __m512i mulhi, q; + NPYV__DIVC_U16(512, a, divisor.val[0], q) + return q; +#else + const __m256i m = npyv512_lower_si256(divisor.val[0]); + __m256i lo_a = npyv512_lower_si256(a); + __m256i hi_a = npyv512_higher_si256(a); + + __m256i mulhi, lo_q, hi_q; + NPYV__DIVC_U16(256, lo_a, m, lo_q) + NPYV__DIVC_U16(256, hi_a, m, hi_q) + return npyv512_combine_si256(lo_q, hi_q); +#endif + #undef NPYV__DIVC_U16 +} +// divide each signed 16-bit element by divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor) +{ + const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + #define NPYV__DIVC_S16(RLEN, A, MULC, DSIGN, R) \ + mulhi = _mm##RLEN##_mulhi_epi16(A, MULC); \ + R = _mm##RLEN##_sra_epi16(_mm##RLEN##_add_epi16(A, mulhi), shf1); \ + R = _mm##RLEN##_sub_epi16(R, _mm##RLEN##_srai_epi16(A, 15)); \ + R = _mm##RLEN##_sub_epi16(_mm##RLEN##_xor_si##RLEN(R, DSIGN), DSIGN); + +#ifdef NPY_HAVE_AVX512BW + __m512i mulhi, q; + NPYV__DIVC_S16(512, a, divisor.val[0], divisor.val[2], q) + return q; +#else + const __m256i m = npyv512_lower_si256(divisor.val[0]); + const __m256i dsign = npyv512_lower_si256(divisor.val[2]); + __m256i lo_a = npyv512_lower_si256(a); + __m256i hi_a = npyv512_higher_si256(a); + + __m256i mulhi, lo_q, hi_q; + NPYV__DIVC_S16(256, lo_a, m, dsign, lo_q) + NPYV__DIVC_S16(256, hi_a, m, dsign, hi_q) + return npyv512_combine_si256(lo_q, hi_q); +#endif + #undef NPYV__DIVC_S16 +} +// divide each unsigned 32-bit element by divisor +NPY_FINLINE npyv_u32 npyv_divc_u32(npyv_u32 a, const npyv_u32x3 divisor) +{ + const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); + const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); + // high part of unsigned multiplication + __m512i mulhi_even = _mm512_srli_epi64(_mm512_mul_epu32(a, divisor.val[0]), 32); + __m512i mulhi_odd = _mm512_mul_epu32(_mm512_srli_epi64(a, 32), divisor.val[0]); + __m512i mulhi = _mm512_mask_mov_epi32(mulhi_even, 0xAAAA, mulhi_odd); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m512i q = _mm512_sub_epi32(a, mulhi); + q = _mm512_srl_epi32(q, shf1); + q = _mm512_add_epi32(mulhi, q); + q = _mm512_srl_epi32(q, shf2); + return q; +} +// divide each signed 32-bit element by divisor (round towards zero) +NPY_FINLINE npyv_s32 npyv_divc_s32(npyv_s32 a, const npyv_s32x3 divisor) +{ + const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); + // high part of signed multiplication + __m512i mulhi_even = _mm512_srli_epi64(_mm512_mul_epi32(a, divisor.val[0]), 32); + __m512i mulhi_odd = _mm512_mul_epi32(_mm512_srli_epi64(a, 32), divisor.val[0]); + __m512i mulhi = _mm512_mask_mov_epi32(mulhi_even, 0xAAAA, mulhi_odd); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m512i q = _mm512_sra_epi32(_mm512_add_epi32(a, mulhi), shf1); + q = _mm512_sub_epi32(q, _mm512_srai_epi32(a, 31)); + q = _mm512_sub_epi32(_mm512_xor_si512(q, divisor.val[2]), divisor.val[2]); + return q; +} +// returns the high 64 bits of unsigned 64-bit multiplication +// xref https://stackoverflow.com/a/28827013 +NPY_FINLINE npyv_u64 npyv__mullhi_u64(npyv_u64 a, npyv_u64 b) +{ + __m512i lomask = npyv_setall_s64(0xffffffff); + __m512i a_hi = _mm512_srli_epi64(a, 32); // a0l, a0h, a1l, a1h + __m512i b_hi = _mm512_srli_epi64(b, 32); // b0l, b0h, b1l, b1h + // compute partial products + __m512i w0 = _mm512_mul_epu32(a, b); // a0l*b0l, a1l*b1l + __m512i w1 = _mm512_mul_epu32(a, b_hi); // a0l*b0h, a1l*b1h + __m512i w2 = _mm512_mul_epu32(a_hi, b); // a0h*b0l, a1h*b0l + __m512i w3 = _mm512_mul_epu32(a_hi, b_hi); // a0h*b0h, a1h*b1h + // sum partial products + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + return hi; +} +// divide each unsigned 64-bit element by a divisor +NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) +{ + const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); + const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); + // high part of unsigned multiplication + __m512i mulhi = npyv__mullhi_u64(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m512i q = _mm512_sub_epi64(a, mulhi); + q = _mm512_srl_epi64(q, shf1); + q = _mm512_add_epi64(mulhi, q); + q = _mm512_srl_epi64(q, shf2); + return q; +} +// divide each unsigned 64-bit element by a divisor (round towards zero) +NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) +{ + const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); + // high part of unsigned multiplication + __m512i mulhi = npyv__mullhi_u64(a, divisor.val[0]); + // convert unsigned to signed high multiplication + // mulhi - ((a < 0) ? m : 0) - ((m < 0) ? a : 0); + __m512i asign = _mm512_srai_epi64(a, 63); + __m512i msign = _mm512_srai_epi64(divisor.val[0], 63); + __m512i m_asign = _mm512_and_si512(divisor.val[0], asign); + __m512i a_msign = _mm512_and_si512(a, msign); + mulhi = _mm512_sub_epi64(mulhi, m_asign); + mulhi = _mm512_sub_epi64(mulhi, a_msign); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m512i q = _mm512_sra_epi64(_mm512_add_epi64(a, mulhi), shf1); + q = _mm512_sub_epi64(q, asign); + q = _mm512_sub_epi64(_mm512_xor_si512(q, divisor.val[2]), divisor.val[2]); + return q; +} /*************************** * Division ***************************/ @@ -136,11 +356,11 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) * 2- shuff(cross) /add /shuff(cross) /add /shuff /add /shuff /add /extract * 3- _mm512_reduce_add_ps/pd * The first one is been widely used by many projects - * + * * the second one is used by Intel Compiler, maybe because the * latency of hadd increased by (2-3) starting from Skylake-X which makes two * extra shuffles(non-cross) cheaper. check https://godbolt.org/z/s3G9Er for more info. - * + * * The third one is almost the same as the second one but only works for * intel compiler/GCC 7.1/Clang 4, we still need to support older GCC. ***************************/ From 5c185cc7c104928cea93917ebb806797d5d8d7dd Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 16 Jan 2021 14:01:34 +0000 Subject: [PATCH 0713/1270] SIMD: add NPYV fast integer division intrinsics for VSX --- numpy/core/src/common/simd/vsx/arithmetic.h | 132 ++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index 7c4e32f27ad1..123fcaf92d6b 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -94,6 +94,138 @@ #define npyv_mul_f32 vec_mul #define npyv_mul_f64 vec_mul +/*************************** + * Integer Division + ***************************/ +/*** + * TODO: Add support for VSX4(Power10) + */ +// See simd/intdiv.h for more clarification +// divide each unsigned 8-bit element by a precomputed divisor +NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) +{ + const npyv_u8 mergeo_perm = { + 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 + }; + // high part of unsigned multiplication + npyv_u16 mul_even = vec_mule(a, divisor.val[0]); + npyv_u16 mul_odd = vec_mulo(a, divisor.val[0]); + npyv_u8 mulhi = (npyv_u8)vec_perm(mul_even, mul_odd, mergeo_perm); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + npyv_u8 q = vec_sub(a, mulhi); + q = vec_sr(q, divisor.val[1]); + q = vec_add(mulhi, q); + q = vec_sr(q, divisor.val[2]); + return q; +} +// divide each signed 8-bit element by a precomputed divisor +NPY_FINLINE npyv_s8 npyv_divc_s8(npyv_s8 a, const npyv_s8x3 divisor) +{ + const npyv_u8 mergeo_perm = { + 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 + }; + // high part of signed multiplication + npyv_s16 mul_even = vec_mule(a, divisor.val[0]); + npyv_s16 mul_odd = vec_mulo(a, divisor.val[0]); + npyv_s8 mulhi = (npyv_s8)vec_perm(mul_even, mul_odd, mergeo_perm); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + npyv_s8 q = vec_sra(vec_add(a, mulhi), (npyv_u8)divisor.val[1]); + q = vec_sub(q, vec_sra(a, npyv_setall_u8(7))); + q = vec_sub(vec_xor(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 16-bit element by a precomputed divisor +NPY_FINLINE npyv_u16 npyv_divc_u16(npyv_u16 a, const npyv_u16x3 divisor) +{ + const npyv_u8 mergeo_perm = { + 2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31 + }; + // high part of unsigned multiplication + npyv_u32 mul_even = vec_mule(a, divisor.val[0]); + npyv_u32 mul_odd = vec_mulo(a, divisor.val[0]); + npyv_u16 mulhi = (npyv_u16)vec_perm(mul_even, mul_odd, mergeo_perm); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + npyv_u16 q = vec_sub(a, mulhi); + q = vec_sr(q, divisor.val[1]); + q = vec_add(mulhi, q); + q = vec_sr(q, divisor.val[2]); + return q; +} +// divide each signed 16-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor) +{ + const npyv_u8 mergeo_perm = { + 2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31 + }; + // high part of signed multiplication + npyv_s32 mul_even = vec_mule(a, divisor.val[0]); + npyv_s32 mul_odd = vec_mulo(a, divisor.val[0]); + npyv_s16 mulhi = (npyv_s16)vec_perm(mul_even, mul_odd, mergeo_perm); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + npyv_s16 q = vec_sra(vec_add(a, mulhi), (npyv_u16)divisor.val[1]); + q = vec_sub(q, vec_sra(a, npyv_setall_u16(15))); + q = vec_sub(vec_xor(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 32-bit element by a precomputed divisor +NPY_FINLINE npyv_u32 npyv_divc_u32(npyv_u32 a, const npyv_u32x3 divisor) +{ +#if defined(__GNUC__) && __GNUC__ < 8 + // Doubleword integer wide multiplication supported by GCC 8+ + npyv_u64 mul_even, mul_odd; + __asm__ ("vmulouw %0,%1,%2" : "=v" (mul_even) : "v" (a), "v" (divisor.val[0])); + __asm__ ("vmuleuw %0,%1,%2" : "=v" (mul_odd) : "v" (a), "v" (divisor.val[0])); +#else + // Doubleword integer wide multiplication supported by GCC 8+ + npyv_u64 mul_even = vec_mule(a, divisor.val[0]); + npyv_u64 mul_odd = vec_mulo(a, divisor.val[0]); +#endif + // high part of unsigned multiplication + npyv_u32 mulhi = vec_mergeo((npyv_u32)mul_even, (npyv_u32)mul_odd); + // floor(x/d) = (((a-mulhi) >> sh1) + mulhi) >> sh2 + npyv_u32 q = vec_sub(a, mulhi); + q = vec_sr(q, divisor.val[1]); + q = vec_add(mulhi, q); + q = vec_sr(q, divisor.val[2]); + return q; +} +// divide each signed 32-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s32 npyv_divc_s32(npyv_s32 a, const npyv_s32x3 divisor) +{ +#if defined(__GNUC__) && __GNUC__ < 8 + // Doubleword integer wide multiplication supported by GCC8+ + npyv_s64 mul_even, mul_odd; + __asm__ ("vmulosw %0,%1,%2" : "=v" (mul_even) : "v" (a), "v" (divisor.val[0])); + __asm__ ("vmulesw %0,%1,%2" : "=v" (mul_odd) : "v" (a), "v" (divisor.val[0])); +#else + // Doubleword integer wide multiplication supported by GCC8+ + npyv_s64 mul_even = vec_mule(a, divisor.val[0]); + npyv_s64 mul_odd = vec_mulo(a, divisor.val[0]); +#endif + // high part of signed multiplication + npyv_s32 mulhi = vec_mergeo((npyv_s32)mul_even, (npyv_s32)mul_odd); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + npyv_s32 q = vec_sra(vec_add(a, mulhi), (npyv_u32)divisor.val[1]); + q = vec_sub(q, vec_sra(a, npyv_setall_u32(31))); + q = vec_sub(vec_xor(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 64-bit element by a precomputed divisor +NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) +{ + const npy_uint64 d = vec_extract(divisor.val[0], 0); + return npyv_set_u64(vec_extract(a, 0) / d, vec_extract(a, 1) / d); +} +// divide each signed 64-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) +{ + npyv_b64 overflow = vec_and(vec_cmpeq(a, npyv_setall_s64(-1LL << 63)), (npyv_b64)divisor.val[1]); + npyv_s64 d = vec_sel(divisor.val[0], npyv_setall_s64(1), overflow); + return vec_div(a, d); +} /*************************** * Division ***************************/ From 2da9858aed5efca22d92682000f03e341dc46d64 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 16 Jan 2021 14:01:44 +0000 Subject: [PATCH 0714/1270] SIMD: add NPYV fast integer division intrinsics for NEON --- numpy/core/src/common/simd/neon/arithmetic.h | 150 ++++++++++++++++++- 1 file changed, 149 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h index 69a49f571e1f..00994806df68 100644 --- a/numpy/core/src/common/simd/neon/arithmetic.h +++ b/numpy/core/src/common/simd/neon/arithmetic.h @@ -60,6 +60,154 @@ #define npyv_mul_f32 vmulq_f32 #define npyv_mul_f64 vmulq_f64 +/*************************** + * Integer Division + ***************************/ +// See simd/intdiv.h for more clarification +// divide each unsigned 8-bit element by a precomputed divisor +NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) +{ + const uint8x8_t mulc_lo = vget_low_u8(divisor.val[0]); + // high part of unsigned multiplication + uint16x8_t mull_lo = vmull_u8(vget_low_u8(a), mulc_lo); +#if NPY_SIMD_F64 + uint16x8_t mull_hi = vmull_high_u8(a, divisor.val[0]); + // get the high unsigned bytes + uint8x16_t mulhi = vuzp2q_u8(vreinterpretq_u8_u16(mull_lo), vreinterpretq_u8_u16(mull_hi)); +#else + const uint8x8_t mulc_hi = vget_high_u8(divisor.val[0]); + uint16x8_t mull_hi = vmull_u8(vget_high_u8(a), mulc_hi); + uint8x16_t mulhi = vuzpq_u8(vreinterpretq_u8_u16(mull_lo), vreinterpretq_u8_u16(mull_hi)).val[1]; +#endif + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + uint8x16_t q = vsubq_u8(a, mulhi); + q = vshlq_u8(q, vreinterpretq_s8_u8(divisor.val[1])); + q = vaddq_u8(mulhi, q); + q = vshlq_u8(q, vreinterpretq_s8_u8(divisor.val[2])); + return q; +} +// divide each signed 8-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s8 npyv_divc_s8(npyv_s8 a, const npyv_s8x3 divisor) +{ + const int8x8_t mulc_lo = vget_low_s8(divisor.val[0]); + // high part of signed multiplication + int16x8_t mull_lo = vmull_s8(vget_low_s8(a), mulc_lo); +#if NPY_SIMD_F64 + int16x8_t mull_hi = vmull_high_s8(a, divisor.val[0]); + // get the high unsigned bytes + int8x16_t mulhi = vuzp2q_s8(vreinterpretq_s8_s16(mull_lo), vreinterpretq_s8_s16(mull_hi)); +#else + const int8x8_t mulc_hi = vget_high_s8(divisor.val[0]); + int16x8_t mull_hi = vmull_s8(vget_high_s8(a), mulc_hi); + int8x16_t mulhi = vuzpq_s8(vreinterpretq_s8_s16(mull_lo), vreinterpretq_s8_s16(mull_hi)).val[1]; +#endif + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + int8x16_t q = vshlq_s8(vaddq_s8(a, mulhi), divisor.val[1]); + q = vsubq_s8(q, vshrq_n_s8(a, 7)); + q = vsubq_s8(veorq_s8(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 16-bit element by a precomputed divisor +NPY_FINLINE npyv_u16 npyv_divc_u16(npyv_u16 a, const npyv_u16x3 divisor) +{ + const uint16x4_t mulc_lo = vget_low_u16(divisor.val[0]); + // high part of unsigned multiplication + uint32x4_t mull_lo = vmull_u16(vget_low_u16(a), mulc_lo); +#if NPY_SIMD_F64 + uint32x4_t mull_hi = vmull_high_u16(a, divisor.val[0]); + // get the high unsigned bytes + uint16x8_t mulhi = vuzp2q_u16(vreinterpretq_u16_u32(mull_lo), vreinterpretq_u16_u32(mull_hi)); +#else + const uint16x4_t mulc_hi = vget_high_u16(divisor.val[0]); + uint32x4_t mull_hi = vmull_u16(vget_high_u16(a), mulc_hi); + uint16x8_t mulhi = vuzpq_u16(vreinterpretq_u16_u32(mull_lo), vreinterpretq_u16_u32(mull_hi)).val[1]; +#endif + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + uint16x8_t q = vsubq_u16(a, mulhi); + q = vshlq_u16(q, vreinterpretq_s16_u16(divisor.val[1])); + q = vaddq_u16(mulhi, q); + q = vshlq_u16(q, vreinterpretq_s16_u16(divisor.val[2])); + return q; +} +// divide each signed 16-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor) +{ + const int16x4_t mulc_lo = vget_low_s16(divisor.val[0]); + // high part of signed multiplication + int32x4_t mull_lo = vmull_s16(vget_low_s16(a), mulc_lo); +#if NPY_SIMD_F64 + int32x4_t mull_hi = vmull_high_s16(a, divisor.val[0]); + // get the high unsigned bytes + int16x8_t mulhi = vuzp2q_s16(vreinterpretq_s16_s32(mull_lo), vreinterpretq_s16_s32(mull_hi)); +#else + const int16x4_t mulc_hi = vget_high_s16(divisor.val[0]); + int32x4_t mull_hi = vmull_s16(vget_high_s16(a), mulc_hi); + int16x8_t mulhi = vuzpq_s16(vreinterpretq_s16_s32(mull_lo), vreinterpretq_s16_s32(mull_hi)).val[1]; +#endif + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + int16x8_t q = vshlq_s16(vaddq_s16(a, mulhi), divisor.val[1]); + q = vsubq_s16(q, vshrq_n_s16(a, 15)); + q = vsubq_s16(veorq_s16(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 32-bit element by a precomputed divisor +NPY_FINLINE npyv_u32 npyv_divc_u32(npyv_u32 a, const npyv_u32x3 divisor) +{ + const uint32x2_t mulc_lo = vget_low_u32(divisor.val[0]); + // high part of unsigned multiplication + uint64x2_t mull_lo = vmull_u32(vget_low_u32(a), mulc_lo); +#if NPY_SIMD_F64 + uint64x2_t mull_hi = vmull_high_u32(a, divisor.val[0]); + // get the high unsigned bytes + uint32x4_t mulhi = vuzp2q_u32(vreinterpretq_u32_u64(mull_lo), vreinterpretq_u32_u64(mull_hi)); +#else + const uint32x2_t mulc_hi = vget_high_u32(divisor.val[0]); + uint64x2_t mull_hi = vmull_u32(vget_high_u32(a), mulc_hi); + uint32x4_t mulhi = vuzpq_u32(vreinterpretq_u32_u64(mull_lo), vreinterpretq_u32_u64(mull_hi)).val[1]; +#endif + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + uint32x4_t q = vsubq_u32(a, mulhi); + q = vshlq_u32(q, vreinterpretq_s32_u32(divisor.val[1])); + q = vaddq_u32(mulhi, q); + q = vshlq_u32(q, vreinterpretq_s32_u32(divisor.val[2])); + return q; +} +// divide each signed 32-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s32 npyv_divc_s32(npyv_s32 a, const npyv_s32x3 divisor) +{ + const int32x2_t mulc_lo = vget_low_s32(divisor.val[0]); + // high part of signed multiplication + int64x2_t mull_lo = vmull_s32(vget_low_s32(a), mulc_lo); +#if NPY_SIMD_F64 + int64x2_t mull_hi = vmull_high_s32(a, divisor.val[0]); + // get the high unsigned bytes + int32x4_t mulhi = vuzp2q_s32(vreinterpretq_s32_s64(mull_lo), vreinterpretq_s32_s64(mull_hi)); +#else + const int32x2_t mulc_hi = vget_high_s32(divisor.val[0]); + int64x2_t mull_hi = vmull_s32(vget_high_s32(a), mulc_hi); + int32x4_t mulhi = vuzpq_s32(vreinterpretq_s32_s64(mull_lo), vreinterpretq_s32_s64(mull_hi)).val[1]; +#endif + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + int32x4_t q = vshlq_s32(vaddq_s32(a, mulhi), divisor.val[1]); + q = vsubq_s32(q, vshrq_n_s32(a, 31)); + q = vsubq_s32(veorq_s32(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 64-bit element by a divisor +NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) +{ + const uint64_t d = vgetq_lane_u64(divisor.val[0], 0); + return npyv_set_u64(vgetq_lane_u64(a, 0) / d, vgetq_lane_u64(a, 1) / d); +} +// returns the high 64 bits of signed 64-bit multiplication +NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) +{ + const int64_t d = vgetq_lane_s64(divisor.val[0], 0); + return npyv_set_s64(vgetq_lane_s64(a, 0) / d, vgetq_lane_s64(a, 1) / d); +} /*************************** * Division ***************************/ @@ -148,7 +296,7 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { - uint32x2_t a0 = vpadd_u32(vget_low_u32(a), vget_high_u32(a)); + uint32x2_t a0 = vpadd_u32(vget_low_u32(a), vget_high_u32(a)); return (unsigned)vget_lane_u32(vpadd_u32(a0, vget_high_u32(a)),0); } From 6c94b4c2c7d48acf08a0f4d2d5844f7d7f3669de Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 16 Jan 2021 14:01:56 +0000 Subject: [PATCH 0715/1270] SIMD, TST: add test cases for NPYV fast integer division --- numpy/core/src/_simd/_simd.dispatch.c.src | 19 +++++- numpy/core/tests/test_simd.py | 70 +++++++++++++++++++++++ 2 files changed, 86 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index e5b58a8d296e..4acd20a75cd1 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -15,8 +15,8 @@ /**begin repeat * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# - * #esfx = u16, s8, u32, s16, u32, s32, u64, s64, f32, f64# - * #expand_sup =1, 0, 1, 0, 0, 0, 0, 0, 0, 0# + * #esfx = u16, s8, u32,s16, u32, s32, u64, s64, f32, f64# + * #expand_sup= 1, 0, 1, 0, 0, 0, 0, 0, 0, 0# * #simd_sup = 1, 1, 1, 1, 1, 1, 1, 1, 1, NPY_SIMD_F64# * #fp_only = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# * #sat_sup = 1, 1, 1, 1, 0, 0, 0, 0, 0, 0# @@ -27,6 +27,7 @@ * #sum_sup = 0, 0, 0, 0, 1, 0, 1, 0, 1, 1# * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# + * #intdiv_sup= 1, 1, 1, 1, 1, 1, 1, 1, 0, 0# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# * #shr_imm = 0, 0, 16, 16, 32, 32, 64, 64, 0, 0# */ @@ -354,6 +355,11 @@ SIMD_IMPL_INTRIN_2(mul_@sfx@, v@sfx@, v@sfx@, v@sfx@) SIMD_IMPL_INTRIN_2(div_@sfx@, v@sfx@, v@sfx@, v@sfx@) #endif // div_sup +#if @intdiv_sup@ +SIMD_IMPL_INTRIN_1(divisor_@sfx@, v@sfx@x3, @sfx@) +SIMD_IMPL_INTRIN_2(divc_@sfx@, v@sfx@, v@sfx@, v@sfx@x3) +#endif // intdiv_sup + #if @fused_sup@ /**begin repeat1 * #intrin = muladd, mulsub, nmuladd, nmulsub# @@ -442,6 +448,7 @@ SIMD_IMPL_INTRIN_1(not_@bsfx@, v@bsfx@, v@bsfx@) SIMD_IMPL_INTRIN_1(tobits_@bsfx@, u64, v@bsfx@) /**end repeat**/ + //######################################################################### //## Attach module functions //######################################################################### @@ -449,7 +456,7 @@ static PyMethodDef simd__intrinsics_methods[] = { /**begin repeat * #sfx = u8, s8, u16, s16, u32, s32, u64, s64, f32, f64# * #bsfx = b8, b8, b16, b16, b32, b32, b64, b64, b32, b64# - * #esfx = u16, s8, u32, s16, u32, s32, u64, s64, f32, f64# + * #esfx = u16, s8, u32,s16, u32, s32, u64, s64, f32, f64# * #expand_sup =1, 0, 1, 0, 0, 0, 0, 0, 0, 0# * #simd_sup = 1, 1, 1, 1, 1, 1, 1, 1, 1, NPY_SIMD_F64# * #fp_only = 0, 0, 0, 0, 0, 0, 0, 0, 1, 1# @@ -461,6 +468,7 @@ static PyMethodDef simd__intrinsics_methods[] = { * #sum_sup = 0, 0, 0, 0, 1, 0, 1, 0, 1, 1# * #rev64_sup = 1, 1, 1, 1, 1, 1, 0, 0, 1, 0# * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# + * #intdiv_sup= 1, 1, 1, 1, 1, 1, 1, 1, 0, 0# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# * #shr_imm = 0, 0, 16, 16, 32, 32, 64, 64, 0, 0# */ @@ -568,6 +576,11 @@ SIMD_INTRIN_DEF(mul_@sfx@) SIMD_INTRIN_DEF(div_@sfx@) #endif // div_sup +#if @intdiv_sup@ +SIMD_INTRIN_DEF(divisor_@sfx@) +SIMD_INTRIN_DEF(divc_@sfx@) +#endif // intdiv_sup + #if @fused_sup@ /**begin repeat1 * #intrin = muladd, mulsub, nmuladd, nmulsub# diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 1d1a111be83f..8306a86d32fb 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -733,6 +733,76 @@ def test_arithmetic_div(self): div = self.div(vdata_a, vdata_b) assert div == data_div + def test_arithmetic_intdiv(self): + """ + Test integer division intrinics: + npyv_divisor_##sfx + npyv_divc_##sfx + """ + if self._is_fp(): + return + + def trunc_div(a, d): + """ + Divide towards zero works with large integers > 2^53, + equivalent to int(a/d) + """ + sign_a, sign_d = a < 0, d < 0 + if a == 0 or sign_a == sign_d: + return a // d + return (a + sign_d - sign_a) // d + 1 + + int_min = self._int_min() if self._is_signed() else 1 + int_max = self._int_max() + rdata = ( + 0, 1, self.nlanes, int_max-self.nlanes, + int_min, int_min//2 + 1 + ) + divisors = (1, 2, self.nlanes, int_min, int_max, int_max//2) + + for x, d in zip(rdata, divisors): + data = self._data(x) + vdata = self.load(data) + data_divc = [trunc_div(a, d) for a in data] + divisor = self.divisor(d) + divc = self.divc(vdata, divisor) + assert divc == data_divc + + if not self._is_signed(): + return + + safe_neg = lambda x: -x-1 if -x > int_max else -x + # test round divison for signed integers + for x, d in zip(rdata, divisors): + d_neg = safe_neg(d) + data = self._data(x) + data_neg = [safe_neg(a) for a in data] + vdata = self.load(data) + vdata_neg = self.load(data_neg) + divisor = self.divisor(d) + divisor_neg = self.divisor(d_neg) + + # round towards zero + data_divc = [trunc_div(a, d_neg) for a in data] + divc = self.divc(vdata, divisor_neg) + assert divc == data_divc + data_divc = [trunc_div(a, d) for a in data_neg] + divc = self.divc(vdata_neg, divisor) + assert divc == data_divc + + # test truncate sign if the dividend is zero + vzero = self.zero() + for d in (-1, -10, -100, int_min//2, int_min): + divisor = self.divisor(d) + divc = self.divc(vzero, divisor) + assert divc == vzero + + # test overflow + vmin = self.setall(int_min) + divisor = self.divisor(-1) + divc = self.divc(vmin, divisor) + assert divc == vmin + def test_arithmetic_reduce_sum(self): """ Test reduce sum intrinics: From 8aae310556359517394df263f3b5819eff8c3daa Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 4 Feb 2021 21:35:03 +0000 Subject: [PATCH 0716/1270] SIMD, BUG: fix passing immediate values to npyv_setall_u64 on SSE/32-bit --- numpy/core/src/common/simd/sse/misc.h | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/numpy/core/src/common/simd/sse/misc.h b/numpy/core/src/common/simd/sse/misc.h index 7ba47bc688df..1099c491d072 100644 --- a/numpy/core/src/common/simd/sse/misc.h +++ b/numpy/core/src/common/simd/sse/misc.h @@ -18,21 +18,16 @@ #define npyv_zero_f64 _mm_setzero_pd // vector with a specific value set to all lanes -#define npyv_setall_u8(VAL) _mm_set1_epi8((char)VAL) -#define npyv_setall_s8(VAL) _mm_set1_epi8((char)VAL) -#define npyv_setall_u16(VAL) _mm_set1_epi16((short)VAL) -#define npyv_setall_s16(VAL) _mm_set1_epi16((short)VAL) -#define npyv_setall_u32(VAL) _mm_set1_epi32((int)VAL) -#define npyv_setall_s32(VAL) _mm_set1_epi32(VAL) -#if !defined(__x86_64__) && !defined(_M_X64) - #define npyv_setall_u64(VAL) _mm_set_epi32((int)(VAL >> 32), (int)VAL, (int)(VAL >> 32), (int)VAL) - #define npyv_setall_s64 npyv_setall_u64 -#else - #define npyv_setall_u64(VAL) _mm_set1_epi64x(VAL) - #define npyv_setall_s64(VAL) _mm_set1_epi64x(VAL) -#endif -#define npyv_setall_f32(VAL) _mm_set1_ps(VAL) -#define npyv_setall_f64(VAL) _mm_set1_pd(VAL) +#define npyv_setall_u8(VAL) _mm_set1_epi8((char)(VAL)) +#define npyv_setall_s8(VAL) _mm_set1_epi8((char)(VAL)) +#define npyv_setall_u16(VAL) _mm_set1_epi16((short)(VAL)) +#define npyv_setall_s16(VAL) _mm_set1_epi16((short)(VAL)) +#define npyv_setall_u32(VAL) _mm_set1_epi32((int)(VAL)) +#define npyv_setall_s32(VAL) _mm_set1_epi32((int)(VAL)) +#define npyv_setall_u64(VAL) _mm_set1_epi64x((npy_int64)(VAL)) +#define npyv_setall_s64(VAL) _mm_set1_epi64x((npy_int64)(VAL)) +#define npyv_setall_f32 _mm_set1_ps +#define npyv_setall_f64 _mm_set1_pd /** * vector with specific values set to each lane and From 64cdbccda17ec9e1d04495adc4e2b1bdcdd39ba5 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 8 Mar 2021 07:45:23 +0000 Subject: [PATCH 0717/1270] MAINT: Bump sphinx from 3.5.1 to 3.5.2 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 3.5.1 to 3.5.2. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/3.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v3.5.1...v3.5.2) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index a34a894180a0..3d74d7b87b47 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==3.5.1 +sphinx==3.5.2 numpydoc==1.1.0 ipython scipy From b648095952bf7d7ba88b13f416dca798ab3fd2e7 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 8 Mar 2021 07:45:56 +0000 Subject: [PATCH 0718/1270] MAINT: Bump hypothesis from 6.3.4 to 6.6.0 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.3.4 to 6.6.0. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.3.4...hypothesis-python-6.6.0) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 02caa8748620..ee37626df217 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.3.4 +hypothesis==6.6.0 pytest==6.2.2 pytz==2021.1 pytest-cov==2.11.1 From d8dabe53ff0850e1a34a39bd56b6423678a40067 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 8 Mar 2021 07:48:04 +0000 Subject: [PATCH 0719/1270] MAINT: Bump pycodestyle from 2.5.0 to 2.6.0 Bumps [pycodestyle](https://github.com/PyCQA/pycodestyle) from 2.5.0 to 2.6.0. - [Release notes](https://github.com/PyCQA/pycodestyle/releases) - [Changelog](https://github.com/PyCQA/pycodestyle/blob/master/CHANGES.txt) - [Commits](https://github.com/PyCQA/pycodestyle/compare/2.5.0...2.6.0) Signed-off-by: dependabot-preview[bot] --- linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linter_requirements.txt b/linter_requirements.txt index b5b49bc8cf72..9df85e7c3b1b 100644 --- a/linter_requirements.txt +++ b/linter_requirements.txt @@ -1,2 +1,2 @@ -pycodestyle==2.5.0 +pycodestyle==2.6.0 GitPython==3.1.13 \ No newline at end of file From 8045d1c79bd0615c8760d2423976196a1ca32d65 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Mon, 8 Mar 2021 22:25:15 +1300 Subject: [PATCH 0720/1270] MAINT: OrderedDict is no longer necessary from Python 3.7 --- doc/source/reference/random/performance.py | 9 ++++--- numpy/core/_ufunc_config.py | 28 ++++++++++------------ numpy/core/records.py | 15 ++---------- numpy/testing/_private/parameterized.py | 14 +---------- 4 files changed, 19 insertions(+), 47 deletions(-) diff --git a/doc/source/reference/random/performance.py b/doc/source/reference/random/performance.py index 28a42eb0dce2..3267197f53fb 100644 --- a/doc/source/reference/random/performance.py +++ b/doc/source/reference/random/performance.py @@ -1,4 +1,3 @@ -from collections import OrderedDict from timeit import repeat import pandas as pd @@ -8,7 +7,7 @@ PRNGS = [MT19937, PCG64, Philox, SFC64] -funcs = OrderedDict() +funcs = {} integers = 'integers(0, 2**{bits},size=1000000, dtype="uint{bits}")' funcs['32-bit Unsigned Ints'] = integers.format(bits=32) funcs['64-bit Unsigned Ints'] = integers.format(bits=64) @@ -26,10 +25,10 @@ """ test = "rg.{func}" -table = OrderedDict() +table = {} for prng in PRNGS: print(prng) - col = OrderedDict() + col = {} for key in funcs: t = repeat(test.format(func=funcs[key]), setup.format(prng=prng().__class__.__name__), @@ -38,7 +37,7 @@ col = pd.Series(col) table[prng().__class__.__name__] = col -npfuncs = OrderedDict() +npfuncs = {} npfuncs.update(funcs) npfuncs['32-bit Unsigned Ints'] = 'randint(2**32,dtype="uint32",size=1000000)' npfuncs['64-bit Unsigned Ints'] = 'randint(2**64,dtype="uint64",size=1000000)' diff --git a/numpy/core/_ufunc_config.py b/numpy/core/_ufunc_config.py index 454d911cf575..b40e7445ec5b 100644 --- a/numpy/core/_ufunc_config.py +++ b/numpy/core/_ufunc_config.py @@ -98,10 +98,9 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): File "", line 1, in FloatingPointError: overflow encountered in short_scalars - >>> from collections import OrderedDict >>> old_settings = np.seterr(all='print') - >>> OrderedDict(np.geterr()) - OrderedDict([('divide', 'print'), ('over', 'print'), ('under', 'print'), ('invalid', 'print')]) + >>> np.geterr() + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} >>> np.int16(32000) * np.int16(3) 30464 @@ -153,15 +152,14 @@ def geterr(): Examples -------- - >>> from collections import OrderedDict - >>> sorted(np.geterr().items()) - [('divide', 'warn'), ('invalid', 'warn'), ('over', 'warn'), ('under', 'ignore')] + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} >>> np.arange(3.) / np.arange(3.) array([nan, 1., 1.]) >>> oldsettings = np.seterr(all='warn', over='raise') - >>> OrderedDict(sorted(np.geterr().items())) - OrderedDict([('divide', 'warn'), ('invalid', 'warn'), ('over', 'raise'), ('under', 'warn')]) + >>> np.geterr() + {'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'} >>> np.arange(3.) / np.arange(3.) array([nan, 1., 1.]) @@ -270,7 +268,6 @@ def seterrcall(func): >>> saved_handler = np.seterrcall(err_handler) >>> save_err = np.seterr(all='call') - >>> from collections import OrderedDict >>> np.array([1, 2, 3]) / 0.0 Floating point error (divide by zero), with flag 1 @@ -278,8 +275,8 @@ def seterrcall(func): >>> np.seterrcall(saved_handler) - >>> OrderedDict(sorted(np.seterr(**save_err).items())) - OrderedDict([('divide', 'call'), ('invalid', 'call'), ('over', 'call'), ('under', 'call')]) + >>> np.seterr(**save_err) + {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'} Log error message: @@ -298,8 +295,8 @@ def seterrcall(func): >>> np.seterrcall(saved_handler) - >>> OrderedDict(sorted(np.seterr(**save_err).items())) - OrderedDict([('divide', 'log'), ('invalid', 'log'), ('over', 'log'), ('under', 'log')]) + >>> np.seterr(**save_err) + {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'} """ if func is not None and not isinstance(func, collections.abc.Callable): @@ -402,7 +399,6 @@ class errstate(contextlib.ContextDecorator): Examples -------- - >>> from collections import OrderedDict >>> olderr = np.seterr(all='ignore') # Set error handling to known state. >>> np.arange(3) / 0. @@ -421,8 +417,8 @@ class errstate(contextlib.ContextDecorator): Outside the context the error handling behavior has not changed: - >>> OrderedDict(sorted(np.geterr().items())) - OrderedDict([('divide', 'ignore'), ('invalid', 'ignore'), ('over', 'ignore'), ('under', 'ignore')]) + >>> np.geterr() + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} """ diff --git a/numpy/core/records.py b/numpy/core/records.py index a626a05893d8..a32f5abf1524 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -35,7 +35,7 @@ """ import os import warnings -from collections import Counter, OrderedDict +from collections import Counter from contextlib import nullcontext from . import numeric as sb @@ -75,23 +75,12 @@ numfmt = nt.typeDict -# taken from OrderedDict recipes in the Python documentation -# https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes -class _OrderedCounter(Counter, OrderedDict): - """Counter that remembers the order elements are first encountered""" - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, OrderedDict(self)) - - def __reduce__(self): - return self.__class__, (OrderedDict(self),) - def find_duplicate(list): """Find duplication in a list, return a list of duplicated elements""" return [ item - for item, counts in _OrderedCounter(list).items() + for item, counts in Counter(list).items() if counts > 1 ] diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py index 55a204e3f887..db9629a94680 100644 --- a/numpy/testing/_private/parameterized.py +++ b/numpy/testing/_private/parameterized.py @@ -37,11 +37,6 @@ from types import MethodType from collections import namedtuple -try: - from collections import OrderedDict as MaybeOrderedDict -except ImportError: - MaybeOrderedDict = dict - from unittest import TestCase _param = namedtuple("param", "args kwargs") @@ -113,13 +108,6 @@ def __repr__(self): return "param(*%r, **%r)" %self -class QuietOrderedDict(MaybeOrderedDict): - """ When OrderedDict is available, use it to make sure that the kwargs in - doc strings are consistently ordered. """ - __str__ = dict.__str__ - __repr__ = dict.__repr__ - - def parameterized_argument_value_pairs(func, p): """Return tuples of parameterized arguments and their values. @@ -165,7 +153,7 @@ def parameterized_argument_value_pairs(func, p): ]) seen_arg_names = {n for (n, _) in result} - keywords = QuietOrderedDict(sorted([ + keywords = dict(sorted([ (name, p.kwargs[name]) for name in p.kwargs if name not in seen_arg_names From 0edf15195fe54472d02985f1d7fea9f80a7f5ad6 Mon Sep 17 00:00:00 2001 From: InessaPawson Date: Tue, 9 Mar 2021 07:30:17 +1000 Subject: [PATCH 0721/1270] Update README.md Co-authored-by: Matti Picus --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e84066c22db1..593c6f937623 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,9 @@ Writing code isn’t the only way to contribute to NumPy. You can also: - serve as a community coordinator - write grant proposals and help with other fundraising efforts -If you’re unsure where to start or how your skills fit in, reach out! You can ask on the mailing list or here, on GitHub, by opening a new issue or leaving a comment on a relevant issue that is already open. +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by opening a new issue or leaving a +comment on a relevant issue that is already open. Those are our preferred channels (open source is open by nature), but if you’d like to speak to us in private first, contact our community coordinators at numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for an invite). From d3388145860c1b2aff958cade49532cc76254e7c Mon Sep 17 00:00:00 2001 From: InessaPawson Date: Tue, 9 Mar 2021 07:30:42 +1000 Subject: [PATCH 0722/1270] Update README.md Co-authored-by: Matti Picus --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 593c6f937623..4bab18c95d91 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ Writing code isn’t the only way to contribute to NumPy. You can also: - maintain and improve our website numpy.org - develop graphic design for our brand assets and promotional materials - translate website content -- serve as a community coordinator +- help with outreach and onboard new contributors - write grant proposals and help with other fundraising efforts If you’re unsure where to start or how your skills fit in, reach out! You can From 2506e80b86c6fee443c377bc4c4250c58d89b0af Mon Sep 17 00:00:00 2001 From: InessaPawson Date: Tue, 9 Mar 2021 07:31:17 +1000 Subject: [PATCH 0723/1270] Update README.md Co-authored-by: Matti Picus --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 4bab18c95d91..4d18f959d35d 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,10 @@ Call for Contributions The NumPy project welcomes your expertise and enthusiasm! -Small improvements or fixes are always appreciated. (Issues labeled as easy may be a good starting point.) If you are considering larger contributions to the source code, please contact us through the [mailing list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. +Small improvements or fixes are always appreciated. (Issues labeled as "good +first issue" may be a good starting point.) If you are considering larger +contributions to the source code, please contact us through the [mailing +list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. Writing code isn’t the only way to contribute to NumPy. You can also: - review pull requests From 1164c12557cee2540edb76c68b0f0cfa5f4538cc Mon Sep 17 00:00:00 2001 From: InessaPawson Date: Tue, 9 Mar 2021 07:31:48 +1000 Subject: [PATCH 0724/1270] Update README.md Co-authored-by: Matti Picus --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4d18f959d35d..a66bdc00158a 100644 --- a/README.md +++ b/README.md @@ -53,9 +53,12 @@ comment on a relevant issue that is already open. Those are our preferred channels (open source is open by nature), but if you’d like to speak to us in private first, contact our community coordinators at numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for an invite). -We also have a biweekly community call, details of which are announced on the mailing list. You are very welcome to join. +We also have a biweekly community call, details of which are announced on the +mailing list. You are very welcome to join. -If you are new to contributing to open source, we highly recommend reading [this guide](https://opensource.guide/how-to-contribute/). +If you are new to contributing to open source, [this +guide](https://opensource.guide/how-to-contribute/) helps explain why, what, +and how to successfully get involved. From 0b26d7cb49d6727bd4172d7837c2cff3da5c4435 Mon Sep 17 00:00:00 2001 From: InessaPawson Date: Tue, 9 Mar 2021 07:32:38 +1000 Subject: [PATCH 0725/1270] Update README.md Co-authored-by: Matti Picus --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a66bdc00158a..a156bb04860b 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,10 @@ If you’re unsure where to start or how your skills fit in, reach out! You can ask on the mailing list or here, on GitHub, by opening a new issue or leaving a comment on a relevant issue that is already open. -Those are our preferred channels (open source is open by nature), but if you’d like to speak to us in private first, contact our community coordinators at numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for an invite). +Our preferred channels of communication are all public, but if you’d like to +speak to us in private first, contact our community coordinators at +numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for +an invite). We also have a biweekly community call, details of which are announced on the mailing list. You are very welcome to join. From 72261a2e3291bb87abe9fbd75a5197cfc0b41f1f Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 9 Mar 2021 01:13:48 +0200 Subject: [PATCH 0726/1270] use pypy nightly to work around bug --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 55013f3cf3fb..7f155097b333 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -211,7 +211,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 with: - python-version: pypy-3.7 + python-version: pypy-3.7-nightly - uses: ./.github/actions sdist: From ada38d2229f87823fd3b2a9d8ed104c3c22bbf4e Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Tue, 9 Mar 2021 16:52:31 +0100 Subject: [PATCH 0727/1270] DOC: Clarify docs for fliplr() / flipud() --- numpy/lib/twodim_base.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index afd50b9c6881..58a097f99ba5 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -47,10 +47,11 @@ def _flip_dispatcher(m): @array_function_dispatch(_flip_dispatcher) def fliplr(m): """ - Flip array in the left/right direction. + Reverse the order of elements along axis 1 (left/right). - Flip the entries in each row in the left/right direction. - Columns are preserved, but appear in a different order than before. + For a 2-D array, this flips the entries in each row in the left/right + direction. Columns are preserved, but appear in a different order than + before. Parameters ---------- @@ -66,11 +67,13 @@ def fliplr(m): See Also -------- flipud : Flip array in the up/down direction. + flip : Flip array in one or more dimesions. rot90 : Rotate array counterclockwise. Notes ----- - Equivalent to m[:,::-1]. Requires the array to be at least 2-D. + Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. + Requires the array to be at least 2-D. Examples -------- @@ -98,10 +101,10 @@ def fliplr(m): @array_function_dispatch(_flip_dispatcher) def flipud(m): """ - Flip array in the up/down direction. + Reverse the order of elements along axis 0 (up/down). - Flip the entries in each column in the up/down direction. - Rows are preserved, but appear in a different order than before. + For a 2-D array, this flips the entries in each column in the up/down + direction. Rows are preserved, but appear in a different order than before. Parameters ---------- @@ -117,12 +120,13 @@ def flipud(m): See Also -------- fliplr : Flip array in the left/right direction. + flip : Flip array in one or more dimesions. rot90 : Rotate array counterclockwise. Notes ----- - Equivalent to ``m[::-1,...]``. - Does not require the array to be two-dimensional. + Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. + Requires the array to be at least 1-D. Examples -------- From 482359aac063b0d56310edfb3ce14ea17396152e Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 10 Mar 2021 19:50:13 +0530 Subject: [PATCH 0728/1270] DOC: Added documentation for linter (#18423) --- doc/source/dev/development_environment.rst | 31 ++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 013414568a2c..fb1b8cd6ae45 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -188,6 +188,35 @@ For more extensive information, see :ref:`testing-guidelines` *Note: do not run the tests from the root directory of your numpy git repo without ``runtests.py``, that will result in strange test errors.* +Running Linting +--------------- +Lint checks can be performed on newly added lines of Python code. + +Install all dependent packages using pip:: + + $ python -m pip install -r linter_requirements.txt + +To run lint checks before committing new code, run:: + + $ python runtests.py --lint uncommitted + +To check all changes in newly added Python code of current branch with target branch, run:: + + $ python runtests.py --lint main + +If there are no errors, the script exits with no message. In case of errors:: + + $ python runtests.py --lint main + ./numpy/core/tests/test_scalarmath.py:34:5: E303 too many blank lines (3) + 1 E303 too many blank lines (3) + +It is advisable to run lint checks before pushing commits to a remote branch +since the linter runs as part of the CI pipeline. + +For more details on Style Guidelines: + + - `Python Style Guide`_ + - `C Style Guide`_ Rebuilding & cleaning the workspace ----------------------------------- @@ -264,6 +293,8 @@ typically packaged as ``python-dbg``) is highly recommended. .. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/ .. _Waf: https://code.google.com/p/waf/ .. _`match test names using python operators`: https://docs.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests +.. _`Python Style Guide`: https://www.python.org/dev/peps/pep-0008/ +.. _`C Style Guide`: https://numpy.org/neps/nep-0045-c_style_guide.html Understanding the code & getting started ---------------------------------------- From fd0ba3f1a7a71ca40c3a47d715af00cdb74a863d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 10 Mar 2021 16:48:01 -0600 Subject: [PATCH 0729/1270] MAINT: Remove strange `op == NULL` check While there is a weird scary comment about it being important, it doesn't actually seem important. The ufunc machinery used to be able to return NotImplemented possibly that has something to do with it... --- numpy/core/src/multiarray/number.c | 34 ++---------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index a629dfe97fde..78f21db4fe67 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -227,10 +227,7 @@ PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, { PyObject *args, *ret = NULL, *meth; PyObject *kwds; - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } + args = Py_BuildValue("(Oi)", m1, axis); kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "reduce"); @@ -250,10 +247,7 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, { PyObject *args, *ret = NULL, *meth; PyObject *kwds; - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } + args = Py_BuildValue("(Oi)", m1, axis); kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "accumulate"); @@ -270,28 +264,12 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, NPY_NO_EXPORT PyObject * PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op) { - /* - * I suspect that the next few lines are buggy and cause NotImplemented to - * be returned at weird times... but if we raise an error here, then - * *everything* breaks. (Like, 'arange(10) + 1' and just - * 'repr(arange(10))' both blow up with an error here.) Not sure what's - * going on with that, but I'll leave it alone for now. - njs, 2015-06-21 - */ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunctionObjArgs(op, m1, m2, NULL); } NPY_NO_EXPORT PyObject * PyArray_GenericUnaryFunction(PyArrayObject *m1, PyObject *op) { - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } return PyObject_CallFunctionObjArgs(op, m1, NULL); } @@ -299,20 +277,12 @@ static PyObject * PyArray_GenericInplaceBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op) { - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } return PyObject_CallFunctionObjArgs(op, m1, m2, m1, NULL); } static PyObject * PyArray_GenericInplaceUnaryFunction(PyArrayObject *m1, PyObject *op) { - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } return PyObject_CallFunctionObjArgs(op, m1, m1, NULL); } From 1feda6ee96db40f05a83ebea7e41d2a01973975a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 10 Mar 2021 17:17:41 -0600 Subject: [PATCH 0730/1270] MAINT: Do not claim input to binops is `self` (array object) On the C-side binops only guarantee that one of the operands is an instance of `PyArrayObject *`, but we do not know which one. Typing the first as `PyArrayObject *` is just misleading for almost no actual payoff --- numpy/core/src/multiarray/arrayobject.c | 23 +++++---- numpy/core/src/multiarray/calculation.c | 4 +- numpy/core/src/multiarray/number.c | 68 +++++++++++++------------ numpy/core/src/multiarray/number.h | 2 +- numpy/core/src/multiarray/temp_elide.c | 13 ++--- numpy/core/src/multiarray/temp_elide.h | 2 +- 6 files changed, 58 insertions(+), 54 deletions(-) diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 1326140d5f32..e7fbb88cd282 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -1356,11 +1356,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) switch (cmp_op) { case Py_LT: RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other); - result = PyArray_GenericBinaryFunction(self, other, n_ops.less); + result = PyArray_GenericBinaryFunction( + (PyObject *)self, other, n_ops.less); break; case Py_LE: RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other); - result = PyArray_GenericBinaryFunction(self, other, n_ops.less_equal); + result = PyArray_GenericBinaryFunction( + (PyObject *)self, other, n_ops.less_equal); break; case Py_EQ: RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other); @@ -1410,9 +1412,8 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) return result; } - result = PyArray_GenericBinaryFunction(self, - (PyObject *)other, - n_ops.equal); + result = PyArray_GenericBinaryFunction( + (PyObject *)self, (PyObject *)other, n_ops.equal); break; case Py_NE: RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other); @@ -1462,18 +1463,18 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) return result; } - result = PyArray_GenericBinaryFunction(self, (PyObject *)other, - n_ops.not_equal); + result = PyArray_GenericBinaryFunction( + (PyObject *)self, (PyObject *)other, n_ops.not_equal); break; case Py_GT: RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other); - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater); + result = PyArray_GenericBinaryFunction( + (PyObject *)self, other, n_ops.greater); break; case Py_GE: RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other); - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater_equal); + result = PyArray_GenericBinaryFunction( + (PyObject *)self, other, n_ops.greater_equal); break; default: Py_INCREF(Py_NotImplemented); diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index 43d88271b10d..9fe05f7a393d 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -423,7 +423,7 @@ __New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, return NULL; } arr2 = (PyArrayObject *)PyArray_EnsureAnyArray( - PyArray_GenericBinaryFunction(arr1, obj3, n_ops.multiply)); + PyArray_GenericBinaryFunction((PyObject *)arr1, obj3, n_ops.multiply)); Py_DECREF(arr1); Py_DECREF(obj3); if (arr2 == NULL) { @@ -1211,7 +1211,7 @@ PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) n_ops.conjugate); } else { - return PyArray_GenericBinaryFunction(self, + return PyArray_GenericBinaryFunction((PyObject *)self, (PyObject *)out, n_ops.conjugate); } diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index a629dfe97fde..70729b4d79f7 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -268,7 +268,7 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, NPY_NO_EXPORT PyObject * -PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op) +PyArray_GenericBinaryFunction(PyObject *m1, PyObject *m2, PyObject *op) { /* * I suspect that the next few lines are buggy and cause NotImplemented to @@ -317,7 +317,7 @@ PyArray_GenericInplaceUnaryFunction(PyArrayObject *m1, PyObject *op) } static PyObject * -array_add(PyArrayObject *m1, PyObject *m2) +array_add(PyObject *m1, PyObject *m2) { PyObject *res; @@ -329,7 +329,7 @@ array_add(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_subtract(PyArrayObject *m1, PyObject *m2) +array_subtract(PyObject *m1, PyObject *m2) { PyObject *res; @@ -341,7 +341,7 @@ array_subtract(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_multiply(PyArrayObject *m1, PyObject *m2) +array_multiply(PyObject *m1, PyObject *m2) { PyObject *res; @@ -353,14 +353,14 @@ array_multiply(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_remainder(PyArrayObject *m1, PyObject *m2) +array_remainder(PyObject *m1, PyObject *m2) { BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_remainder, array_remainder); return PyArray_GenericBinaryFunction(m1, m2, n_ops.remainder); } static PyObject * -array_divmod(PyArrayObject *m1, PyObject *m2) +array_divmod(PyObject *m1, PyObject *m2) { BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_divmod, array_divmod); return PyArray_GenericBinaryFunction(m1, m2, n_ops.divmod); @@ -368,7 +368,7 @@ array_divmod(PyArrayObject *m1, PyObject *m2) /* Need this to be version dependent on account of the slot check */ static PyObject * -array_matrix_multiply(PyArrayObject *m1, PyObject *m2) +array_matrix_multiply(PyObject *m1, PyObject *m2) { BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_matrix_multiply, array_matrix_multiply); return PyArray_GenericBinaryFunction(m1, m2, n_ops.matmul); @@ -472,15 +472,16 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent) * the result is in value (can be NULL if an error occurred) */ static int -fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace, +fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **value) { double exponent; NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */ - if (PyArray_Check(a1) && - !PyArray_ISOBJECT(a1) && + if (PyArray_Check(o1) && + !PyArray_ISOBJECT((PyArrayObject *)o1) && ((kind=is_scalar_with_conversion(o2, &exponent))>0)) { + PyArrayObject *a1 = (PyArrayObject *)o1; PyObject *fastop = NULL; if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { if (exponent == 1.0) { @@ -544,7 +545,7 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace, } static PyObject * -array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo) +array_power(PyObject *a1, PyObject *o2, PyObject *modulo) { PyObject *value = NULL; @@ -635,7 +636,7 @@ array_invert(PyArrayObject *m1) } static PyObject * -array_left_shift(PyArrayObject *m1, PyObject *m2) +array_left_shift(PyObject *m1, PyObject *m2) { PyObject *res; @@ -647,7 +648,7 @@ array_left_shift(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_right_shift(PyArrayObject *m1, PyObject *m2) +array_right_shift(PyObject *m1, PyObject *m2) { PyObject *res; @@ -659,7 +660,7 @@ array_right_shift(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_bitwise_and(PyArrayObject *m1, PyObject *m2) +array_bitwise_and(PyObject *m1, PyObject *m2) { PyObject *res; @@ -671,7 +672,7 @@ array_bitwise_and(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_bitwise_or(PyArrayObject *m1, PyObject *m2) +array_bitwise_or(PyObject *m1, PyObject *m2) { PyObject *res; @@ -683,7 +684,7 @@ array_bitwise_or(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_bitwise_xor(PyArrayObject *m1, PyObject *m2) +array_bitwise_xor(PyObject *m1, PyObject *m2) { PyObject *res; @@ -734,7 +735,7 @@ array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo INPLACE_GIVE_UP_IF_NEEDED( a1, o2, nb_inplace_power, array_inplace_power); - if (fast_scalar_power(a1, o2, 1, &value) != 0) { + if (fast_scalar_power((PyObject *)a1, o2, 1, &value) != 0) { value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power); } return value; @@ -781,7 +782,7 @@ array_inplace_bitwise_xor(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_floor_divide(PyArrayObject *m1, PyObject *m2) +array_floor_divide(PyObject *m1, PyObject *m2) { PyObject *res; @@ -793,13 +794,14 @@ array_floor_divide(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_true_divide(PyArrayObject *m1, PyObject *m2) +array_true_divide(PyObject *m1, PyObject *m2) { PyObject *res; + PyArrayObject *a1 = (PyArrayObject *)m1; BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_true_divide, array_true_divide); if (PyArray_CheckExact(m1) && - (PyArray_ISFLOAT(m1) || PyArray_ISCOMPLEX(m1)) && + (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) && try_binary_elide(m1, m2, &array_inplace_true_divide, &res, 0)) { return res; } @@ -930,22 +932,22 @@ array_index(PyArrayObject *v) NPY_NO_EXPORT PyNumberMethods array_as_number = { - .nb_add = (binaryfunc)array_add, - .nb_subtract = (binaryfunc)array_subtract, - .nb_multiply = (binaryfunc)array_multiply, - .nb_remainder = (binaryfunc)array_remainder, - .nb_divmod = (binaryfunc)array_divmod, + .nb_add = array_add, + .nb_subtract = array_subtract, + .nb_multiply = array_multiply, + .nb_remainder = array_remainder, + .nb_divmod = array_divmod, .nb_power = (ternaryfunc)array_power, .nb_negative = (unaryfunc)array_negative, .nb_positive = (unaryfunc)array_positive, .nb_absolute = (unaryfunc)array_absolute, .nb_bool = (inquiry)_array_nonzero, .nb_invert = (unaryfunc)array_invert, - .nb_lshift = (binaryfunc)array_left_shift, - .nb_rshift = (binaryfunc)array_right_shift, - .nb_and = (binaryfunc)array_bitwise_and, - .nb_xor = (binaryfunc)array_bitwise_xor, - .nb_or = (binaryfunc)array_bitwise_or, + .nb_lshift = array_left_shift, + .nb_rshift = array_right_shift, + .nb_and = array_bitwise_and, + .nb_xor = array_bitwise_xor, + .nb_or = array_bitwise_or, .nb_int = (unaryfunc)array_int, .nb_float = (unaryfunc)array_float, @@ -962,11 +964,11 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = { .nb_inplace_xor = (binaryfunc)array_inplace_bitwise_xor, .nb_inplace_or = (binaryfunc)array_inplace_bitwise_or, - .nb_floor_divide = (binaryfunc)array_floor_divide, - .nb_true_divide = (binaryfunc)array_true_divide, + .nb_floor_divide = array_floor_divide, + .nb_true_divide = array_true_divide, .nb_inplace_floor_divide = (binaryfunc)array_inplace_floor_divide, .nb_inplace_true_divide = (binaryfunc)array_inplace_true_divide, - .nb_matrix_multiply = (binaryfunc)array_matrix_multiply, + .nb_matrix_multiply = array_matrix_multiply, .nb_inplace_matrix_multiply = (binaryfunc)array_inplace_matrix_multiply, }; diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h index 643241b3d02f..4f426f964ca7 100644 --- a/numpy/core/src/multiarray/number.h +++ b/numpy/core/src/multiarray/number.h @@ -56,7 +56,7 @@ NPY_NO_EXPORT PyObject * _PyArray_GetNumericOps(void); NPY_NO_EXPORT PyObject * -PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op); +PyArray_GenericBinaryFunction(PyObject *m1, PyObject *m2, PyObject *op); NPY_NO_EXPORT PyObject * PyArray_GenericUnaryFunction(PyArrayObject *m1, PyObject *op); diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c index b19dee418821..2b4621744427 100644 --- a/numpy/core/src/multiarray/temp_elide.c +++ b/numpy/core/src/multiarray/temp_elide.c @@ -274,13 +274,14 @@ check_callers(int * cannot) * "cannot" is set to true if it cannot be done even with swapped arguments */ static int -can_elide_temp(PyArrayObject * alhs, PyObject * orhs, int * cannot) +can_elide_temp(PyObject *olhs, PyObject *orhs, int *cannot) { /* * to be a candidate the array needs to have reference count 1, be an exact * array of a basic type, own its data and size larger than threshold */ - if (Py_REFCNT(alhs) != 1 || !PyArray_CheckExact(alhs) || + PyArrayObject *alhs = (PyArrayObject *)olhs; + if (Py_REFCNT(olhs) != 1 || !PyArray_CheckExact(olhs) || !PyArray_ISNUMBER(alhs) || !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || @@ -328,22 +329,22 @@ can_elide_temp(PyArrayObject * alhs, PyObject * orhs, int * cannot) * try eliding a binary op, if commutative is true also try swapped arguments */ NPY_NO_EXPORT int -try_binary_elide(PyArrayObject * m1, PyObject * m2, +try_binary_elide(PyObject * m1, PyObject * m2, PyObject * (inplace_op)(PyArrayObject * m1, PyObject * m2), PyObject ** res, int commutative) { /* set when no elision can be done independent of argument order */ int cannot = 0; if (can_elide_temp(m1, m2, &cannot)) { - *res = inplace_op(m1, m2); + *res = inplace_op((PyArrayObject *)m1, m2); #if NPY_ELIDE_DEBUG != 0 puts("elided temporary in binary op"); #endif return 1; } else if (commutative && !cannot) { - if (can_elide_temp((PyArrayObject *)m2, (PyObject *)m1, &cannot)) { - *res = inplace_op((PyArrayObject *)m2, (PyObject *)m1); + if (can_elide_temp(m2, m1, &cannot)) { + *res = inplace_op((PyArrayObject *)m2, m1); #if NPY_ELIDE_DEBUG != 0 puts("elided temporary in commutative binary op"); #endif diff --git a/numpy/core/src/multiarray/temp_elide.h b/numpy/core/src/multiarray/temp_elide.h index d073adf28ec6..206bb025381e 100644 --- a/numpy/core/src/multiarray/temp_elide.h +++ b/numpy/core/src/multiarray/temp_elide.h @@ -8,7 +8,7 @@ NPY_NO_EXPORT int can_elide_temp_unary(PyArrayObject * m1); NPY_NO_EXPORT int -try_binary_elide(PyArrayObject * m1, PyObject * m2, +try_binary_elide(PyObject * m1, PyObject * m2, PyObject * (inplace_op)(PyArrayObject * m1, PyObject * m2), PyObject ** res, int commutative); From 3cd03898cff204aa2589735e39da7630b347c647 Mon Sep 17 00:00:00 2001 From: InessaPawson Date: Thu, 11 Mar 2021 18:52:05 +1000 Subject: [PATCH 0731/1270] Commit textual suggestions from PR review --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a156bb04860b..cc34f47bf6c7 100644 --- a/README.md +++ b/README.md @@ -32,8 +32,8 @@ Call for Contributions The NumPy project welcomes your expertise and enthusiasm! -Small improvements or fixes are always appreciated. (Issues labeled as "good -first issue" may be a good starting point.) If you are considering larger +Small improvements or fixes are always appreciated; issues labeled as "good +first issue" may be a good starting point. If you are considering larger contributions to the source code, please contact us through the [mailing list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. @@ -41,7 +41,7 @@ Writing code isn’t the only way to contribute to NumPy. You can also: - review pull requests - triage issues - develop tutorials, presentations, and other educational materials -- maintain and improve our website numpy.org +- maintain and improve [our website](https://github.com/numpy/numpy.org) - develop graphic design for our brand assets and promotional materials - translate website content - help with outreach and onboard new contributors From 989adad03995109f0b5daeaaf8fe92d9fd98d302 Mon Sep 17 00:00:00 2001 From: aerikpawson <45904740+aerikpawson@users.noreply.github.com> Date: Thu, 11 Mar 2021 20:43:53 +1000 Subject: [PATCH 0732/1270] Commit suggestions from PR review Removing the unofficial names of the logo colors. --- branding/logo/logoguidelines.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/branding/logo/logoguidelines.md b/branding/logo/logoguidelines.md index 44bd51ece6c7..0c37e3dd455e 100644 --- a/branding/logo/logoguidelines.md +++ b/branding/logo/logoguidelines.md @@ -4,7 +4,7 @@ These guidelines are meant to help keep the NumPy logo consistent and recognizab The primary logo is the horizontal option (logomark and text next to each other) and the secondary logo is the stacked version (logomark over text). I’ve also provided the logomark on its own (meaning it doesn’t have text). When in doubt, it’s preferable to use primary or secondary options over the logomark alone. ## Color -The full color options are a combo of Maximum Blue/rgb(77, 171, 207) and Han Blue/rgb(77, 119, 207), while light options are White/rgb(255, 255, 255) and dark options are Gunmetal/rgb(1, 50, 67). +The full color options are a combo of two shades of blue, rgb(77, 171, 207) and rgb(77, 119, 207), while light options are rgb(255, 255, 255) and dark options are rgb(1, 50, 67). Whenever possible, use the full color logos. One color logos (light or dark) are to be used when full color will not have enough contrast, usually when logos must be on colored backgrounds. From 80b2e5dcdcac7e52ce4b37ec5afaa5de005f9aa5 Mon Sep 17 00:00:00 2001 From: Mainak Debnath Date: Thu, 11 Mar 2021 16:19:39 +0530 Subject: [PATCH 0733/1270] added exception chaining in index_tricks.py and mrecords.py --- numpy/lib/index_tricks.py | 26 ++++++++++++++++++-------- numpy/ma/mrecords.py | 36 ++++++++++++++++++++---------------- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 9d3de69ddc5d..fa7c518326ae 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -6,7 +6,7 @@ import numpy.core.numeric as _nx from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod, arange, ndim - ) +) from numpy.core.numerictypes import find_common_type, issubdtype import numpy.matrixlib as matrixlib @@ -25,7 +25,7 @@ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', 'diag_indices', 'diag_indices_from' - ] +] def _ix__dispatcher(*args): @@ -106,6 +106,7 @@ def ix_(*args): out.append(new) return tuple(out) + class nd_grid: """ Construct a multi-dimensional "meshgrid". @@ -167,7 +168,7 @@ def __getitem__(self, key): typ = float if self.sparse: nn = [_nx.arange(_x, dtype=_t) - for _x, _t in zip(size, (typ,)*len(size))] + for _x, _t in zip(size, (typ,)*len(size))] else: nn = _nx.indices(size, typ) for k in range(len(size)): @@ -248,11 +249,14 @@ class MGridClass(nd_grid): array([-1. , -0.5, 0. , 0.5, 1. ]) """ + def __init__(self): super(MGridClass, self).__init__(sparse=False) + mgrid = MGridClass() + class OGridClass(nd_grid): """ `nd_grid` instance which returns an open multi-dimensional "meshgrid". @@ -292,9 +296,11 @@ class OGridClass(nd_grid): [4]]), array([[0, 1, 2, 3, 4]])] """ + def __init__(self): super(OGridClass, self).__init__(sparse=True) + ogrid = OGridClass() @@ -357,7 +363,7 @@ def __getitem__(self, key): elif isinstance(item, str): if k != 0: raise ValueError("special directives must be the " - "first entry.") + "first entry.") if item in ('r', 'c'): matrix = True col = (item == 'c') @@ -376,8 +382,8 @@ def __getitem__(self, key): try: axis = int(item) continue - except (ValueError, TypeError): - raise ValueError("unknown special directive") + except (ValueError, TypeError) as e: + raise ValueError("unknown special directive") from e elif type(item) in ScalarType: newobj = array(item, ndmin=ndmin) scalars.append(len(objs)) @@ -420,6 +426,7 @@ def __len__(self): # etc. because otherwise we couldn't get the doc string to come out right # in help(r_) + class RClass(AxisConcatenator): """ Translates slice objects to concatenation along the first axis. @@ -518,8 +525,10 @@ class RClass(AxisConcatenator): def __init__(self): AxisConcatenator.__init__(self, 0) + r_ = RClass() + class CClass(AxisConcatenator): """ Translates slice objects to concatenation along the second axis. @@ -528,7 +537,7 @@ class CClass(AxisConcatenator): useful because of its common occurrence. In particular, arrays will be stacked along their last axis after being upgraded to at least 2-D with 1's post-pended to the shape (column vectors made out of 1-D arrays). - + See Also -------- column_stack : Stack 1-D arrays as columns into a 2-D array. @@ -751,6 +760,7 @@ def __getitem__(self, item): else: return item + index_exp = IndexExpression(maketuple=True) s_ = IndexExpression(maketuple=False) @@ -885,7 +895,7 @@ def fill_diagonal(a, val, wrap=False): # Explicit, fast formula for the common case. For 2-d arrays, we # accept rectangular ones. step = a.shape[1] + 1 - #This is needed to don't have tall matrix have the diagonal wrap. + # This is needed to don't have tall matrix have the diagonal wrap. if not wrap: end = a.shape[1] * a.shape[1] else: diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 70087632e9f6..126bdd9dc8b0 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -13,23 +13,23 @@ # first place, and then rename the invalid fields with a trailing # underscore. Maybe we could just overload the parser function ? +from numpy.ma import ( + MAError, MaskedArray, masked, nomask, masked_array, getdata, + getmaskarray, filled +) +import numpy.ma as ma import warnings import numpy as np from numpy import ( - bool_, dtype, ndarray, recarray, array as narray - ) + bool_, dtype, ndarray, recarray, array as narray +) from numpy.core.records import ( - fromarrays as recfromarrays, fromrecords as recfromrecords - ) + fromarrays as recfromarrays, fromrecords as recfromrecords +) _byteorderconv = np.core.records._byteorderconv -import numpy.ma as ma -from numpy.ma import ( - MAError, MaskedArray, masked, nomask, masked_array, getdata, - getmaskarray, filled - ) _check_fill_value = ma.core._check_fill_value @@ -37,7 +37,7 @@ __all__ = [ 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', 'fromtextfile', 'addfield', - ] +] reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] @@ -199,7 +199,8 @@ def __getattribute__(self, attr): try: res = fielddict[attr][:2] except (TypeError, KeyError) as e: - raise AttributeError(f'record array has no attribute {attr}') from e + raise AttributeError( + f'record array has no attribute {attr}') from e # So far, so good _localdict = ndarray.__getattribute__(self, '__dict__') _data = ndarray.view(self, _localdict['_baseclass']) @@ -273,8 +274,9 @@ def __setattr__(self, attr, val): # Let's try to set the field try: res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError(f'record array has no attribute {attr}') + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e if val is masked: _fill_value = _localdict['_fill_value'] @@ -355,7 +357,7 @@ def __repr__(self): reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] reprstr.insert(0, 'masked_records(') reprstr.extend([fmt % (' fill_value', self.fill_value), - ' )']) + ' )']) return str("\n".join(reprstr)) def view(self, dtype=None, type=None): @@ -483,6 +485,7 @@ def __reduce__(self): (self.__class__, self._baseclass, (0,), 'b',), self.__getstate__()) + def _mrreconstruct(subtype, baseclass, baseshape, basetype,): """ Build a new MaskedArray from the information stored in a pickle. @@ -492,6 +495,7 @@ def _mrreconstruct(subtype, baseclass, baseshape, basetype,): _mask = ndarray.__new__(ndarray, baseshape, 'b1') return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + mrecarray = MaskedRecords @@ -752,7 +756,7 @@ def addfield(mrecord, newfield, newfieldname=None): newdata = recarray(_data.shape, newdtype) # Add the existing field [newdata.setfield(_data.getfield(*f), *f) - for f in _data.dtype.fields.values()] + for f in _data.dtype.fields.values()] # Add the new field newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) newdata = newdata.view(MaskedRecords) @@ -762,7 +766,7 @@ def addfield(mrecord, newfield, newfieldname=None): newmask = recarray(_data.shape, newmdtype) # Add the old masks [newmask.setfield(_mask.getfield(*f), *f) - for f in _mask.dtype.fields.values()] + for f in _mask.dtype.fields.values()] # Add the mask of the new field newmask.setfield(getmaskarray(newfield), *newmask.dtype.fields[newfieldname]) From 2fad7d21fa0d38368f345e5483f7e4594bff9723 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 11 Mar 2021 13:08:35 +0100 Subject: [PATCH 0734/1270] MAINT: Add annotations for `dtype.__getitem__`, `__mul__` and `names` --- numpy/__init__.pyi | 20 ++++++++++++++++++++ numpy/typing/tests/data/pass/dtype.py | 14 ++++++++++++++ numpy/typing/tests/data/reveal/dtype.py | 16 ++++++++++++++++ 3 files changed, 50 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9cdc4f2e1b72..2c6d885160a8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -736,6 +736,24 @@ class dtype(Generic[_DTypeScalar_co]): align: bool = ..., copy: bool = ..., ) -> dtype[void]: ... + + @overload + def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: str | int) -> dtype[Any]: ... + + # NOTE: In the future 1-based multiplications will also yield `void` dtypes + @overload + def __mul__(self, value: Literal[0]) -> None: ... # type: ignore[misc] + @overload + def __mul__(self: _DType, value: Literal[1]) -> _DType: ... + @overload + def __mul__(self, value: int) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.800. Set the return-type to `Any` for now. + def __rmul__(self, value: int) -> Any: ... + def __eq__(self, other: DTypeLike) -> bool: ... def __ne__(self, other: DTypeLike) -> bool: ... def __gt__(self, other: DTypeLike) -> bool: ... @@ -775,6 +793,8 @@ class dtype(Generic[_DTypeScalar_co]): @property def name(self) -> str: ... @property + def names(self) -> Optional[Tuple[str, ...]]: ... + @property def num(self) -> int: ... @property def shape(self) -> _Shape: ... diff --git a/numpy/typing/tests/data/pass/dtype.py b/numpy/typing/tests/data/pass/dtype.py index 4f93426f3a90..e849cfdd4e11 100644 --- a/numpy/typing/tests/data/pass/dtype.py +++ b/numpy/typing/tests/data/pass/dtype.py @@ -1,6 +1,7 @@ import numpy as np dtype_obj = np.dtype(np.str_) +void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)]) np.dtype(dtype=np.int64) np.dtype(int) @@ -41,3 +42,16 @@ class Test: dtype_obj.subdtype dtype_obj.newbyteorder() dtype_obj.type +dtype_obj.name +dtype_obj.names + +dtype_obj * 0 +dtype_obj * 2 + +0 * dtype_obj +2 * dtype_obj + +void_dtype_obj["f0"] +void_dtype_obj[0] +void_dtype_obj[["f0", "f1"]] +void_dtype_obj[["f0"]] diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.py index b30a0ad6ebe3..215d89ead66c 100644 --- a/numpy/typing/tests/data/reveal/dtype.py +++ b/numpy/typing/tests/data/reveal/dtype.py @@ -1,6 +1,7 @@ import numpy as np dtype_obj: np.dtype[np.str_] +void_dtype_obj: np.dtype[np.void] reveal_type(np.dtype(np.float64)) # E: numpy.dtype[{float64}] reveal_type(np.dtype(np.int64)) # E: numpy.dtype[{int64}] @@ -39,3 +40,18 @@ reveal_type(dtype_obj.subdtype) # E: Union[Tuple[numpy.dtype[numpy.str_], builtins.tuple[builtins.int]], None] reveal_type(dtype_obj.newbyteorder()) # E: numpy.dtype[numpy.str_] reveal_type(dtype_obj.type) # E: Type[numpy.str_] +reveal_type(dtype_obj.name) # E: str +reveal_type(dtype_obj.names) # E: Union[builtins.tuple[builtins.str], None] + +reveal_type(dtype_obj * 0) # E: None +reveal_type(dtype_obj * 1) # E: numpy.dtype[numpy.str_] +reveal_type(dtype_obj * 2) # E: numpy.dtype[numpy.void] + +reveal_type(0 * dtype_obj) # E: Any +reveal_type(1 * dtype_obj) # E: Any +reveal_type(2 * dtype_obj) # E: Any + +reveal_type(void_dtype_obj["f0"]) # E: numpy.dtype[Any] +reveal_type(void_dtype_obj[0]) # E: numpy.dtype[Any] +reveal_type(void_dtype_obj[["f0", "f1"]]) # E: numpy.dtype[numpy.void] +reveal_type(void_dtype_obj[["f0"]]) # E: numpy.dtype[numpy.void] From 87a3925bd23326e5aeade707a04a0f29f73f1348 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Thu, 11 Mar 2021 23:18:02 +0530 Subject: [PATCH 0735/1270] MAINT: Added continue-on-error --- .github/workflows/build_test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 7f155097b333..540baa25d6f0 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -22,6 +22,7 @@ jobs: lint: if: "github.repository == 'numpy/numpy' && github.ref != 'refs/heads/main' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" runs-on: ubuntu-latest + continue-on-error: true steps: - uses: actions/checkout@v2 with: From abf593e91023aca8f1efbd74581a754cdeaf3593 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 11 Mar 2021 12:30:28 -0600 Subject: [PATCH 0736/1270] TST: Fix some uninitialized memory in the tests These don't typically show up, because of our caches, but if array-data cache is disabled (which can be convenient for valgrind), these do show up. --- numpy/core/tests/test_regression.py | 3 ++- numpy/core/tests/test_umath.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 5faa9923c568..09929330788a 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2104,7 +2104,8 @@ def test_searchsorted_wrong_dtype(self): assert_raises(TypeError, np.searchsorted, a, 1.2) # Ticket #2066, similar problem: dtype = np.format_parser(['i4', 'i4'], [], []) - a = np.recarray((2, ), dtype) + a = np.recarray((2,), dtype) + a[...] = [(1, 2), (3, 4)] assert_raises(TypeError, np.searchsorted, a, 1) def test_complex64_alignment(self): diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index bc72aa862a73..2249c866caf5 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1952,7 +1952,7 @@ class StoreArrayPrepareWrap(np.ndarray): _wrap_args = None _prepare_args = None def __new__(cls): - return np.empty(()).view(cls) + return np.zeros(()).view(cls) def __array_wrap__(self, obj, context): self._wrap_args = context[1] return obj From c5de5b5c2cf048e1556f31dfcfa031c8f624b98e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Thu, 11 Mar 2021 23:59:16 +0100 Subject: [PATCH 0737/1270] BUG: Fixed ``where`` keyword for ``np.mean`` & ``np.var`` methods (gh-18560) * Fixed keyword bug * Added test case * Reverted to original notation * Added tests for var and std Closes gh-18552 --- numpy/core/_methods.py | 4 ++-- numpy/core/tests/test_multiarray.py | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index 1867ba68c4e6..09147fe5b7aa 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -165,7 +165,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): is_float16_result = False rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) - if rcount == 0 if where is True else umr_any(rcount == 0): + if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default @@ -198,7 +198,7 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) # Make this warning show up on top. - if ddof >= rcount if where is True else umr_any(ddof >= rcount): + if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None): warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index b30fcb812aa5..cffb1af99c31 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -5720,6 +5720,15 @@ def test_mean_where(self): np.array(_res)) assert_allclose(np.mean(a, axis=_ax, where=_wh), np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[1.5, 5.5], [9.5, 13.5]] + assert_allclose(a3d.mean(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), + np.array(_res)) + with pytest.warns(RuntimeWarning) as w: assert_allclose(a.mean(axis=1, where=wh_partial), np.array([np.nan, 5.5, 9.5, np.nan])) @@ -5795,6 +5804,15 @@ def test_var_where(self): np.array(_res)) assert_allclose(np.var(a, axis=_ax, where=_wh), np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.25, 0.25], [0.25, 0.25]] + assert_allclose(a3d.var(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a3d, axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a, axis=1, where=wh_full), np.var(a[wh_full].reshape((5, 3)), axis=1)) assert_allclose(np.var(a, axis=0, where=wh_partial), @@ -5834,6 +5852,14 @@ def test_std_where(self): assert_allclose(a.std(axis=_ax, where=_wh), _res) assert_allclose(np.std(a, axis=_ax, where=_wh), _res) + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.5, 0.5], [0.5, 0.5]] + assert_allclose(a3d.std(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.std(a3d, axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(a.std(axis=1, where=whf), np.std(a[whf].reshape((5,3)), axis=1)) assert_allclose(np.std(a, axis=1, where=whf), From 54dcb5cf1290098cc7a2a11cf3661c2875d6ac37 Mon Sep 17 00:00:00 2001 From: leonarduschen Date: Mon, 26 Oct 2020 01:57:26 +0800 Subject: [PATCH 0738/1270] Update navbar linked page back to index --- doc/source/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index dcf60ada9297..95865c0249c7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -161,6 +161,7 @@ def setup(app): html_logo = '_static/numpylogo.svg' html_theme_options = { + "logo_link": "index", "github_url": "https://github.com/numpy/numpy", "twitter_url": "https://twitter.com/numpy_team", } From 814ad699e99d5f8a1bc912a9ee95b5d7825cdf42 Mon Sep 17 00:00:00 2001 From: Leonardus Date: Fri, 12 Mar 2021 22:51:42 +0800 Subject: [PATCH 0739/1270] Bump pydata-sphinx-theme to 0.5.0 --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 3d74d7b87b47..e366020205f2 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -4,4 +4,4 @@ ipython scipy matplotlib pandas -pydata-sphinx-theme==0.4.3 +pydata-sphinx-theme==0.5.0 From 38e51251c0879a1412dc5e42e3f957f2421fc295 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 4 Mar 2021 13:52:10 +0100 Subject: [PATCH 0740/1270] ENH: Add annotations for `np.lib.arrayterator` --- numpy/lib/__init__.pyi | 5 +++- numpy/lib/arrayterator.pyi | 53 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 numpy/lib/arrayterator.pyi diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index c7fab69439c6..ec528d2200d0 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -5,6 +5,10 @@ from numpy import ( ndindex as ndindex, ) +from numpy.lib.arrayterator import ( + Arrayterator as Arrayterator, +) + from numpy.lib.index_tricks import ( ravel_multi_index as ravel_multi_index, unravel_index as unravel_index, @@ -31,7 +35,6 @@ __all__: List[str] emath: Any math: Any tracemalloc_domain: Any -Arrayterator: Any iscomplexobj: Any isrealobj: Any imag: Any diff --git a/numpy/lib/arrayterator.pyi b/numpy/lib/arrayterator.pyi new file mode 100644 index 000000000000..39d6fd84354e --- /dev/null +++ b/numpy/lib/arrayterator.pyi @@ -0,0 +1,53 @@ +import sys +from typing import ( + List, + Any, + TypeVar, + Generator, + List, + Union, + Tuple, + overload, +) + +from numpy import ndarray, dtype, generic +from numpy.typing import DTypeLike + +# TODO: Set a shape bound once we've got proper shape support +_Shape = TypeVar("_Shape", bound=Any) +_DType = TypeVar("_DType", bound=dtype[Any]) +_ScalarType = TypeVar("_ScalarType", bound=generic) + +_Index = Union[ + Union[ellipsis, int, slice], + Tuple[Union[ellipsis, int, slice], ...], +] + +__all__: List[str] + +# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, +# but its ``__getattr__` method does wrap around the former and thus has +# access to all its methods + +class Arrayterator(ndarray[_Shape, _DType]): + var: ndarray[_Shape, _DType] # type: ignore[assignment] + buf_size: None | int + start: List[int] + stop: List[int] + step: List[int] + + @property # type: ignore[misc] + def shape(self) -> Tuple[int, ...]: ... + @property + def flat( # type: ignore[override] + self: ndarray[Any, dtype[_ScalarType]] + ) -> Generator[_ScalarType, None, None]: ... + def __init__( + self, var: ndarray[_Shape, _DType], buf_size: None | int = ... + ) -> None: ... + @overload + def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... + def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ... + def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ... From f75336e55c10ebcacf55aec5505fd8c0b93ba81a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 4 Mar 2021 13:52:32 +0100 Subject: [PATCH 0741/1270] TST: Add typing tests for `np.lib.arrayterator` --- numpy/typing/tests/data/fail/arrayterator.py | 14 ++++++++++ numpy/typing/tests/data/pass/arrayterator.py | 27 +++++++++++++++++++ .../typing/tests/data/reveal/arrayterator.py | 24 +++++++++++++++++ 3 files changed, 65 insertions(+) create mode 100644 numpy/typing/tests/data/fail/arrayterator.py create mode 100644 numpy/typing/tests/data/pass/arrayterator.py create mode 100644 numpy/typing/tests/data/reveal/arrayterator.py diff --git a/numpy/typing/tests/data/fail/arrayterator.py b/numpy/typing/tests/data/fail/arrayterator.py new file mode 100644 index 000000000000..c50fb2ec4e52 --- /dev/null +++ b/numpy/typing/tests/data/fail/arrayterator.py @@ -0,0 +1,14 @@ +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] +ar_iter = np.lib.Arrayterator(AR_i8) + +np.lib.Arrayterator(np.int64()) # E: incompatible type +ar_iter.shape = (10, 5) # E: is read-only +ar_iter[None] # E: Invalid index type +ar_iter[None, 1] # E: Invalid index type +ar_iter[np.intp()] # E: Invalid index type +ar_iter[np.intp(), ...] # E: Invalid index type +ar_iter[AR_i8] # E: Invalid index type +ar_iter[AR_i8, :] # E: Invalid index type diff --git a/numpy/typing/tests/data/pass/arrayterator.py b/numpy/typing/tests/data/pass/arrayterator.py new file mode 100644 index 000000000000..572be5e2fe29 --- /dev/null +++ b/numpy/typing/tests/data/pass/arrayterator.py @@ -0,0 +1,27 @@ + +from __future__ import annotations + +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) +ar_iter = np.lib.Arrayterator(AR_i8) + +ar_iter.var +ar_iter.buf_size +ar_iter.start +ar_iter.stop +ar_iter.step +ar_iter.shape +ar_iter.flat + +ar_iter.__array__() + +for i in ar_iter: + pass + +ar_iter[0] +ar_iter[...] +ar_iter[:] +ar_iter[0, 0, 0] +ar_iter[..., 0, :] diff --git a/numpy/typing/tests/data/reveal/arrayterator.py b/numpy/typing/tests/data/reveal/arrayterator.py new file mode 100644 index 000000000000..b57861d00f05 --- /dev/null +++ b/numpy/typing/tests/data/reveal/arrayterator.py @@ -0,0 +1,24 @@ +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] +ar_iter = np.lib.Arrayterator(AR_i8) + +reveal_type(ar_iter.var) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter.buf_size) # E: Union[None, builtins.int] +reveal_type(ar_iter.start) # E: builtins.list[builtins.int] +reveal_type(ar_iter.stop) # E: builtins.list[builtins.int] +reveal_type(ar_iter.step) # E: builtins.list[builtins.int] +reveal_type(ar_iter.shape) # E: builtins.tuple[builtins.int] +reveal_type(ar_iter.flat) # E: 'typing.Generator[{int64}, None, None] + +reveal_type(ar_iter.__array__()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] + +for i in ar_iter: + reveal_type(i) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] + +reveal_type(ar_iter[0]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[...]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[:]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[0, 0, 0]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[..., 0, :]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] From 335b6cb399ecaa869cf78c49984eb1dcc412b8bc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 11 Mar 2021 14:30:01 +0100 Subject: [PATCH 0742/1270] MAINT: Relax the signature of the `__array__` protocol Implementations of `__array__` do not require the `dtype` parameter --- numpy/typing/_array_like.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index ef6c061d1aa3..eb8d3f4e1abd 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -40,11 +40,12 @@ if TYPE_CHECKING or HAVE_PROTOCOL: # The `_SupportsArray` protocol only cares about the default dtype - # (i.e. `dtype=None`) of the to-be returned array. + # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned + # array. # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads class _SupportsArray(Protocol[_DType_co]): - def __array__(self, dtype: None = ...) -> ndarray[Any, _DType_co]: ... + def __array__(self) -> ndarray[Any, _DType_co]: ... else: _SupportsArray = Any From 025dcca2cb3c240e0af1d075d24a5bba18421593 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 11 Mar 2021 14:45:22 +0100 Subject: [PATCH 0743/1270] MAINT: Tighten the type-constraints of the `__array__` dtype parameter The likes of `np.array(..., dtype=dtype)` will always pass `np.dtype` instances to `__array__(self, dtype=...)`, so that latter should not have to take arbitrary dtype-like objects but only actual `np.dtype` instances. --- numpy/__init__.pyi | 6 +++--- numpy/typing/tests/data/pass/arithmetic.py | 2 +- numpy/typing/tests/data/pass/array_like.py | 12 ++++++------ numpy/typing/tests/data/pass/flatiter.py | 2 +- numpy/typing/tests/data/pass/ufunclike.py | 4 +--- numpy/typing/tests/data/reveal/flatiter.py | 2 +- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9cdc4f2e1b72..b1ab7c14f68f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -853,7 +853,7 @@ class flatiter(Generic[_NdArraySubClass]): @overload def __array__(self: flatiter[ndarray[Any, _DType]], __dtype: None = ...) -> ndarray[Any, _DType]: ... @overload - def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... + def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... _OrderKACF = Optional[Literal["K", "A", "C", "F"]] _OrderACF = Optional[Literal["A", "C", "F"]] @@ -1362,7 +1362,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType_co]: ... @overload - def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... + def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... @property def ctypes(self) -> _ctypes: ... @property @@ -2409,7 +2409,7 @@ class generic(_ArrayOrScalarCommon): @overload def __array__(self: _ScalarType, __dtype: None = ...) -> ndarray[Any, dtype[_ScalarType]]: ... @overload - def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... + def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... @property def base(self) -> None: ... @property diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 7a297cfc5df6..62bd79004a11 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -25,7 +25,7 @@ class Object: - def __array__(self, dtype: None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 563fc08c7ffb..e16d196b6024 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,7 +1,7 @@ from typing import Any, List, Optional import numpy as np -from numpy.typing import ArrayLike, DTypeLike, _SupportsArray +from numpy.typing import ArrayLike, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 @@ -18,20 +18,20 @@ class A: - def __array__(self, dtype: DTypeLike = None) -> np.ndarray: + def __array__(self, dtype: Optional[np.dtype] = None) -> np.ndarray: return np.array([1, 2, 3]) x13: ArrayLike = A() scalar: _SupportsArray = np.int64(1) -scalar.__array__(None) +scalar.__array__() array: _SupportsArray = np.array(1) -array.__array__(None) +array.__array__() a: _SupportsArray = A() -a.__array__(None) -a.__array__(dtype=None) +a.__array__() +a.__array__() # Escape hatch for when you mean to make something like an object # array. diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index 4fdf2529962b..63c839af4b23 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -13,4 +13,4 @@ a[...] a[:] a.__array__() -a.__array__(np.float64) +a.__array__(np.dtype(np.float64)) diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index 448ee66bb87c..7eac89e8f9aa 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -13,9 +13,7 @@ def __floor__(self) -> Object: def __ge__(self, value: object) -> bool: return True - def __array__( - self, dtype: None = None - ) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret diff --git a/numpy/typing/tests/data/reveal/flatiter.py b/numpy/typing/tests/data/reveal/flatiter.py index 221101ebb9f8..97776dd9f4f5 100644 --- a/numpy/typing/tests/data/reveal/flatiter.py +++ b/numpy/typing/tests/data/reveal/flatiter.py @@ -14,4 +14,4 @@ reveal_type(a[...]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] reveal_type(a[:]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] reveal_type(a.__array__()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] -reveal_type(a.__array__(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(a.__array__(np.dtype(np.float64))) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] From 601c43ff608bd3d0d846670a6b3d82b40c56d6fa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 12 Mar 2021 11:16:19 -0600 Subject: [PATCH 0744/1270] BUG: Fix ma coercion list-of-ma-arrays if they do not cast to bool There was a regression here due to force casting to bool, but if that happens to fail (it does, but should not for strings). The mask would just be dropped. Of course masked arrays are held together by faith here, but its a regression. Closes gh-18551 --- numpy/ma/core.py | 5 +++-- numpy/ma/tests/test_core.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index cda2eeb343f2..10ee0fb06f56 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2859,8 +2859,9 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, elif isinstance(data, (tuple, list)): try: # If data is a sequence of masked array - mask = np.array([getmaskarray(np.asanyarray(m, dtype=mdtype)) - for m in data], dtype=mdtype) + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) except ValueError: # If data is nested mask = nomask diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index f4078062521f..e1cc47063261 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -238,6 +238,25 @@ def test_creation_with_list_of_maskedarrays(self): assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) assert_(data.mask is nomask) + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + # Te above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool(): + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + def test_creation_from_ndarray_with_padding(self): x = np.array([('A', 0)], dtype={'names':['f0','f1'], 'formats':['S4','i8'], From 8d297a801eabaa740fc07d4826cb03951cc1d729 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 12 Mar 2021 11:46:05 -0600 Subject: [PATCH 0745/1270] Apply suggestions from code review --- numpy/ma/tests/test_core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e1cc47063261..ad395f169dd3 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -244,7 +244,8 @@ def test_creation_with_list_of_maskedarrays_no_bool_cast(self): normal_int = np.arange(2) res = np.ma.asarray([masked_str, normal_int]) assert_array_equal(res.mask, [[True, False], [False, False]]) - # Te above only failed due a long chain of oddity, try also with + + # The above only failed due a long chain of oddity, try also with # an object array that cannot be converted to bool always: class NotBool(): def __bool__(self): From 54a2c49afaae10ba4de29b465174a184580b3252 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 13 Mar 2021 14:39:45 -0600 Subject: [PATCH 0746/1270] Update numpy/ma/tests/test_core.py --- numpy/ma/tests/test_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index ad395f169dd3..9bfb82d1ff1d 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -242,7 +242,7 @@ def test_creation_with_list_of_maskedarrays_no_bool_cast(self): # Tests the regression in gh-18551 masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) normal_int = np.arange(2) - res = np.ma.asarray([masked_str, normal_int]) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") assert_array_equal(res.mask, [[True, False], [False, False]]) # The above only failed due a long chain of oddity, try also with From c8aa075cb2b1f0a59fba7fd1eadb5bc2951ba974 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 14 Mar 2021 10:39:35 -0500 Subject: [PATCH 0747/1270] Update comment Co-authored-by: Matti Picus --- numpy/core/src/umath/loops.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index bd8c78d2d43e..68e209fe9312 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -2527,7 +2527,7 @@ NPY_NO_EXPORT void const @ftype@ in2r = ((@ftype@ *)ip2)[0]; const @ftype@ in2i = ((@ftype@ *)ip2)[1]; #if defined(__APPLE__) && defined(__aarch64__) - // On macos-arm64, if this block of code was not there, + // On macos-arm64 without this block of code, // when branch prediction goes wrong, the floating point exception // register does not get cleared and an exception for the // wrong branch is thrown. From e8ddc947e8e20c5972adaa1a9c198ead9b9c7c7d Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 15 Mar 2021 07:37:51 +0000 Subject: [PATCH 0748/1270] MAINT: Bump pycodestyle from 2.6.0 to 2.7.0 Bumps [pycodestyle](https://github.com/PyCQA/pycodestyle) from 2.6.0 to 2.7.0. - [Release notes](https://github.com/PyCQA/pycodestyle/releases) - [Changelog](https://github.com/PyCQA/pycodestyle/blob/master/CHANGES.txt) - [Commits](https://github.com/PyCQA/pycodestyle/commits) Signed-off-by: dependabot-preview[bot] --- linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linter_requirements.txt b/linter_requirements.txt index 9df85e7c3b1b..51a769ee0aa4 100644 --- a/linter_requirements.txt +++ b/linter_requirements.txt @@ -1,2 +1,2 @@ -pycodestyle==2.6.0 +pycodestyle==2.7.0 GitPython==3.1.13 \ No newline at end of file From b8069779c990f04d06399432e0b8276ac96a109f Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 15 Mar 2021 07:38:53 +0000 Subject: [PATCH 0749/1270] MAINT: Bump hypothesis from 6.6.0 to 6.8.1 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.6.0 to 6.8.1. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.6.0...hypothesis-python-6.8.1) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index ee37626df217..0762d5acf586 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.6.0 +hypothesis==6.8.1 pytest==6.2.2 pytz==2021.1 pytest-cov==2.11.1 From d9c187101adf8bc8a505b85ef9cd8e39e699bf17 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 15 Mar 2021 07:44:40 -0600 Subject: [PATCH 0750/1270] CI: Update apt package list before Python install Closes #18613. --- azure-pipelines.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index dd27354fe792..936ef7473a76 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -284,6 +284,7 @@ stages: vmImage: 'ubuntu-18.04' steps: - script: | + sudo apt update sudo apt install python3.7 sudo apt install python3.7-dev if ! `gcc-4.8 2>/dev/null`; then From 5db3366c4e879b8f779a0ae4048c7637854f1588 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 15 Mar 2021 14:50:43 +0100 Subject: [PATCH 0751/1270] API: Formally classify `np.lib.stride_tricks` as part of the public API With `as_strided`, and the newly introduced `sliding_window_view` function, there are currently 2 public objects that can: a. Only be imported from a private module b. Are publicly documented to-be imported from aforementioned module Both observations are problematic and in need of rectification. This commit therefore moves `np.lib.stride_tricks` to the `PUBLIC_MODULES` list. --- numpy/tests/test_public_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 69430eeda089..c7a789b21521 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -154,6 +154,7 @@ def test_NPY_NO_EXPORT(): "lib.mixins", "lib.recfunctions", "lib.scimath", + "lib.stride_tricks", "linalg", "ma", "ma.extras", @@ -280,7 +281,6 @@ def test_NPY_NO_EXPORT(): "lib.npyio", "lib.polynomial", "lib.shape_base", - "lib.stride_tricks", "lib.twodim_base", "lib.type_check", "lib.ufunclike", From 564a71c29fae0b09eb8a44a96600a1f9a5b00a16 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 15 Mar 2021 14:56:31 +0100 Subject: [PATCH 0752/1270] MAINT: Re-export a number of sub-modules Ensures that type checkers will allow the likes of: >>> import numpy as np >>> out = np.lib.stride_tricks.sliding_window_view(...) --- numpy/lib/__init__.pyi | 7 +++++++ numpy/ma/__init__.pyi | 2 ++ numpy/polynomial/__init__.pyi | 10 ++++++++++ numpy/typing/tests/data/fail/modules.py | 5 +++++ numpy/typing/tests/data/pass/modules.py | 13 +++++++++++++ numpy/typing/tests/data/reveal/modules.py | 13 +++++++++++++ 6 files changed, 50 insertions(+) diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index ec528d2200d0..02ed56c8b8c8 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -5,6 +5,13 @@ from numpy import ( ndindex as ndindex, ) +from numpy.lib import ( + format as format, + mixins as mixins, + scimath as scimath, + stride_tricks as stride_stricks, +) + from numpy.lib.arrayterator import ( Arrayterator as Arrayterator, ) diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index 66dfe40de6a5..16e026272605 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -1,5 +1,7 @@ from typing import Any, List +from numpy.ma import extras as extras + __all__: List[str] core: Any diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 817ba22ac83f..5f4d11e9097a 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,5 +1,15 @@ from typing import Any +from numpy.polynomial import ( + chebyshev as chebyshev, + hermite as hermite, + hermite_e as hermite_e, + laguerre as laguerre, + legendre as legendre, + polynomial as polynomial, + polyutils as polyutils, +) + Polynomial: Any Chebyshev: Any Legendre: Any diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py index b80fd9edeae2..7b9309329ac8 100644 --- a/numpy/typing/tests/data/fail/modules.py +++ b/numpy/typing/tests/data/fail/modules.py @@ -9,6 +9,11 @@ np.os # E: Module has no attribute np.math # E: Module has no attribute +# Public sub-modules that are not imported to their parent module by default; +# e.g. one must first execute `import numpy.lib.recfunctions` +np.lib.recfunctions # E: Module has no attribute +np.ma.mrecords # E: Module has no attribute + np.__NUMPY_SETUP__ # E: Module has no attribute np.__deprecated_attrs__ # E: Module has no attribute np.__expired_functions__ # E: Module has no attribute diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py index 2fdb69eb3eab..5ca9691ac880 100644 --- a/numpy/typing/tests/data/pass/modules.py +++ b/numpy/typing/tests/data/pass/modules.py @@ -15,6 +15,19 @@ np.testing np.version +np.lib.format +np.lib.mixins +np.lib.scimath +np.lib.stride_tricks +np.ma.extras +np.polynomial.chebyshev +np.polynomial.hermite +np.polynomial.hermite_e +np.polynomial.laguerre +np.polynomial.legendre +np.polynomial.polynomial +np.polynomial.polyutils + np.__path__ np.__version__ np.__git_version__ diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py index 75513f2b08c9..4852d60b79e9 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.py @@ -17,6 +17,19 @@ reveal_type(np.testing) # E: ModuleType reveal_type(np.version) # E: ModuleType +reveal_type(np.lib.format) # E: ModuleType +reveal_type(np.lib.mixins) # E: ModuleType +reveal_type(np.lib.scimath) # E: ModuleType +reveal_type(np.lib.stride_tricks) # E: ModuleType +reveal_type(np.ma.extras) # E: ModuleType +reveal_type(np.polynomial.chebyshev) # E: ModuleType +reveal_type(np.polynomial.hermite) # E: ModuleType +reveal_type(np.polynomial.hermite_e) # E: ModuleType +reveal_type(np.polynomial.laguerre) # E: ModuleType +reveal_type(np.polynomial.legendre) # E: ModuleType +reveal_type(np.polynomial.polynomial) # E: ModuleType +reveal_type(np.polynomial.polyutils) # E: ModuleType + # TODO: Remove when annotations have been added to `np.testing.assert_equal` reveal_type(np.testing.assert_equal) # E: Any From 0f5d8ccdea3088db4f4e88d8832474bdb69766ff Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Mon, 15 Mar 2021 22:00:20 +0100 Subject: [PATCH 0753/1270] DOC: Consistently use rng as variable name for random generators --- doc/source/reference/random/generator.rst | 14 +++++------ doc/source/reference/random/index.rst | 24 +++++++++---------- .../reference/random/new-or-different.rst | 20 ++++++++-------- doc/source/reference/routines.polynomials.rst | 4 ++-- numpy/random/_generator.pyx | 8 +++---- 5 files changed, 35 insertions(+), 35 deletions(-) diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index a359d22538e7..7934be98a6d4 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -71,13 +71,13 @@ By default, `Generator.permuted` returns a copy. To operate in-place with `Generator.permuted`, pass the same array as the first argument *and* as the value of the ``out`` parameter. For example, - >>> rg = np.random.default_rng() + >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]) - >>> y = rg.permuted(x, axis=1, out=x) + >>> y = rng.permuted(x, axis=1, out=x) >>> x array([[ 1, 0, 2, 4, 3], # random [ 6, 7, 8, 9, 5], @@ -97,13 +97,13 @@ which dimension of the input array to use as the sequence. In the case of a two-dimensional array, ``axis=0`` will, in effect, rearrange the rows of the array, and ``axis=1`` will rearrange the columns. For example - >>> rg = np.random.default_rng() + >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]) - >>> rg.permutation(x, axis=1) + >>> rng.permutation(x, axis=1) array([[ 1, 3, 2, 0, 4], # random [ 6, 8, 7, 5, 9], [11, 13, 12, 10, 14]]) @@ -116,7 +116,7 @@ how `numpy.sort` treats it. Each slice along the given axis is shuffled independently of the others. Compare the following example of the use of `Generator.permuted` to the above example of `Generator.permutation`: - >>> rg.permuted(x, axis=1) + >>> rng.permuted(x, axis=1) array([[ 1, 0, 2, 4, 3], # random [ 5, 7, 6, 9, 8], [10, 14, 12, 13, 11]]) @@ -131,9 +131,9 @@ Shuffling non-NumPy sequences a sequence that is not a NumPy array, it shuffles that sequence in-place. For example, - >>> rg = np.random.default_rng() + >>> rng = np.random.default_rng() >>> a = ['A', 'B', 'C', 'D', 'E'] - >>> rg.shuffle(a) # shuffle the list in-place + >>> rng.shuffle(a) # shuffle the list in-place >>> a ['B', 'D', 'A', 'E', 'C'] # random diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 13ce7c40cf26..69d597874a07 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -84,10 +84,10 @@ different .. code-block:: python try: - rg_integers = rg.integers + rng_integers = rng.integers except AttributeError: - rg_integers = rg.randint - a = rg_integers(1000) + rng_integers = rng.randint + a = rng_integers(1000) Seeds can be passed to any of the BitGenerators. The provided value is mixed via `SeedSequence` to spread a possible sequence of seeds across a wider @@ -97,8 +97,8 @@ is wrapped with a `Generator`. .. code-block:: python from numpy.random import Generator, PCG64 - rg = Generator(PCG64(12345)) - rg.standard_normal() + rng = Generator(PCG64(12345)) + rng.standard_normal() Here we use `default_rng` to create an instance of `Generator` to generate a random float: @@ -146,10 +146,10 @@ As a convenience NumPy provides the `default_rng` function to hide these details: >>> from numpy.random import default_rng ->>> rg = default_rng(12345) ->>> print(rg) +>>> rng = default_rng(12345) +>>> print(rng) Generator(PCG64) ->>> print(rg.random()) +>>> print(rng.random()) 0.22733602246716966 One can also instantiate `Generator` directly with a `BitGenerator` instance. @@ -158,16 +158,16 @@ To use the default `PCG64` bit generator, one can instantiate it directly and pass it to `Generator`: >>> from numpy.random import Generator, PCG64 ->>> rg = Generator(PCG64(12345)) ->>> print(rg) +>>> rng = Generator(PCG64(12345)) +>>> print(rng) Generator(PCG64) Similarly to use the older `MT19937` bit generator (not recommended), one can instantiate it directly and pass it to `Generator`: >>> from numpy.random import Generator, MT19937 ->>> rg = Generator(MT19937(12345)) ->>> print(rg) +>>> rng = Generator(MT19937(12345)) +>>> print(rng) Generator(MT19937) What's New or Different diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index 6cab0f729e64..8904209d686a 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -58,18 +58,18 @@ And in more detail: from numpy.random import Generator, PCG64 import numpy.random - rg = Generator(PCG64()) - %timeit -n 1 rg.standard_normal(100000) + rng = Generator(PCG64()) + %timeit -n 1 rng.standard_normal(100000) %timeit -n 1 numpy.random.standard_normal(100000) .. ipython:: python - %timeit -n 1 rg.standard_exponential(100000) + %timeit -n 1 rng.standard_exponential(100000) %timeit -n 1 numpy.random.standard_exponential(100000) .. ipython:: python - %timeit -n 1 rg.standard_gamma(3.0, 100000) + %timeit -n 1 rng.standard_gamma(3.0, 100000) %timeit -n 1 numpy.random.standard_gamma(3.0, 100000) @@ -94,9 +94,9 @@ And in more detail: .. ipython:: python - rg = Generator(PCG64(0)) - rg.random(3, dtype='d') - rg.random(3, dtype='f') + rng = Generator(PCG64(0)) + rng.random(3, dtype='d') + rng.random(3, dtype='f') * Optional ``out`` argument that allows existing arrays to be filled for select distributions @@ -121,9 +121,9 @@ And in more detail: .. ipython:: python - rg = Generator(PCG64(123456789)) + rng = Generator(PCG64(123456789)) a = np.arange(12).reshape((3, 4)) a - rg.choice(a, axis=1, size=5) - rg.shuffle(a, axis=1) # Shuffle in-place + rng.choice(a, axis=1, size=5) + rng.shuffle(a, axis=1) # Shuffle in-place a diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst index da481ae4c602..ecfb012f0c6a 100644 --- a/doc/source/reference/routines.polynomials.rst +++ b/doc/source/reference/routines.polynomials.rst @@ -118,9 +118,9 @@ example, consider a simple linear fit to the following data: .. ipython:: python - rg = np.random.default_rng() + rng = np.random.default_rng() x = np.arange(10) - y = np.arange(10) + rg.standard_normal(10) + y = np.arange(10) + rng.standard_normal(10) With the legacy polynomial module, a linear fit (i.e. polynomial of degree 1) could be applied to these data with `~numpy.polyfit`: diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ee2dcb24b7f8..73d295602faf 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -176,8 +176,8 @@ cdef class Generator: Examples -------- >>> from numpy.random import Generator, PCG64 - >>> rg = Generator(PCG64()) - >>> rg.standard_normal() + >>> rng = Generator(PCG64()) + >>> rng.standard_normal() -0.203 # random See Also @@ -997,8 +997,8 @@ cdef class Generator: ----- For random samples from :math:`N(\\mu, \\sigma^2)`, use one of:: - mu + sigma * gen.standard_normal(size=...) - gen.normal(mu, sigma, size=...) + mu + sigma * rng.standard_normal(size=...) + rng.normal(mu, sigma, size=...) Examples -------- From 497804069a78360b54ae8fccdd0f8a79520e99ba Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 15 Mar 2021 16:08:34 -0600 Subject: [PATCH 0754/1270] MAINT: Fix missed replacement of rg by rng. --- doc/source/reference/random/new-or-different.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index 8904209d686a..a815439267fc 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -112,7 +112,7 @@ And in more detail: .. ipython:: python existing = np.zeros(4) - rg.random(out=existing[:2]) + rng.random(out=existing[:2]) print(existing) * Optional ``axis`` argument for methods like `~.Generator.choice`, From f0fa0662bb1cf116193b3a6e628c18f595937da7 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 16 Mar 2021 17:14:39 +0100 Subject: [PATCH 0755/1270] API: Move `polynomial.polyutils` to the `PRIVATE_BUT_PRESENT_MODULES` list Aforementioned module was accidently marked as public --- numpy/polynomial/__init__.pyi | 1 - numpy/tests/test_public_api.py | 2 +- numpy/typing/tests/data/pass/modules.py | 1 - numpy/typing/tests/data/reveal/modules.py | 1 - 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 5f4d11e9097a..755f7521bfb2 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -7,7 +7,6 @@ from numpy.polynomial import ( laguerre as laguerre, legendre as legendre, polynomial as polynomial, - polyutils as polyutils, ) Polynomial: Any diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index c7a789b21521..6e4a8dee0a7c 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -167,7 +167,6 @@ def test_NPY_NO_EXPORT(): "polynomial.laguerre", "polynomial.legendre", "polynomial.polynomial", - "polynomial.polyutils", "random", "testing", "typing", @@ -294,6 +293,7 @@ def test_NPY_NO_EXPORT(): "ma.timer_comparison", "matrixlib", "matrixlib.defmatrix", + "polynomial.polyutils", "random.mtrand", "random.bit_generator", "testing.print_coercion_tables", diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py index 5ca9691ac880..9261874d565a 100644 --- a/numpy/typing/tests/data/pass/modules.py +++ b/numpy/typing/tests/data/pass/modules.py @@ -26,7 +26,6 @@ np.polynomial.laguerre np.polynomial.legendre np.polynomial.polynomial -np.polynomial.polyutils np.__path__ np.__version__ diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py index 4852d60b79e9..fa356969aa92 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.py @@ -28,7 +28,6 @@ reveal_type(np.polynomial.laguerre) # E: ModuleType reveal_type(np.polynomial.legendre) # E: ModuleType reveal_type(np.polynomial.polynomial) # E: ModuleType -reveal_type(np.polynomial.polyutils) # E: ModuleType # TODO: Remove when annotations have been added to `np.testing.assert_equal` reveal_type(np.testing.assert_equal) # E: Any From eeba278c401dea4f38d92897ced61ec49775def4 Mon Sep 17 00:00:00 2001 From: christos val Date: Tue, 16 Mar 2021 18:15:09 -0400 Subject: [PATCH 0756/1270] add note to rint docstrings --- numpy/core/code_generators/ufunc_docstrings.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index a9dbbf92e809..5026bfae8b5b 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -3489,6 +3489,12 @@ def add_newdoc(place, name, doc): -------- fix, ceil, floor, trunc + Notes + ----- + For values exactly halfway between rounded decimal values, NumPy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. + Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) From 398b01f346116e7974ef9bacf0a2b29f1b3492e4 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 6 Aug 2020 10:59:32 +0100 Subject: [PATCH 0757/1270] BUG: np.random: Use log1p to improve precision This reduces the number of cases when floating point precision makes the argument to `log` become `0` --- .../random/src/distributions/distributions.c | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index f47c54a538bc..3df819d9da7e 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -47,7 +47,7 @@ static double standard_exponential_unlikely(bitgen_t *bitgen_state, uint8_t idx, double x) { if (idx == 0) { /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */ - return ziggurat_exp_r - log(1.0 - next_double(bitgen_state)); + return ziggurat_exp_r - npy_log1p(-next_double(bitgen_state)); } else if ((fe_double[idx - 1] - fe_double[idx]) * next_double(bitgen_state) + fe_double[idx] < exp(-x)) { @@ -84,7 +84,7 @@ static float standard_exponential_unlikely_f(bitgen_t *bitgen_state, uint8_t idx, float x) { if (idx == 0) { /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */ - return ziggurat_exp_r_f - logf(1.0f - next_float(bitgen_state)); + return ziggurat_exp_r_f - npy_log1pf(-next_float(bitgen_state)); } else if ((fe_float[idx - 1] - fe_float[idx]) * next_float(bitgen_state) + fe_float[idx] < expf(-x)) { @@ -121,7 +121,7 @@ void random_standard_exponential_inv_fill(bitgen_t * bitgen_state, npy_intp cnt, { npy_intp i; for (i = 0; i < cnt; i++) { - out[i] = -log(1.0 - next_double(bitgen_state)); + out[i] = -npy_log1p(-next_double(bitgen_state)); } } @@ -129,7 +129,7 @@ void random_standard_exponential_inv_fill_f(bitgen_t * bitgen_state, npy_intp cn { npy_intp i; for (i = 0; i < cnt; i++) { - out[i] = -log(1.0 - next_float(bitgen_state)); + out[i] = -npy_log1p(-next_float(bitgen_state)); } } @@ -155,8 +155,8 @@ double random_standard_normal(bitgen_t *bitgen_state) { if (idx == 0) { for (;;) { /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */ - xx = -ziggurat_nor_inv_r * log(1.0 - next_double(bitgen_state)); - yy = -log(1.0 - next_double(bitgen_state)); + xx = -ziggurat_nor_inv_r * npy_log1p(-next_double(bitgen_state)); + yy = -npy_log1p(-next_double(bitgen_state)); if (yy + yy > xx * xx) return ((rabs >> 8) & 0x1) ? -(ziggurat_nor_r + xx) : ziggurat_nor_r + xx; @@ -196,8 +196,8 @@ float random_standard_normal_f(bitgen_t *bitgen_state) { if (idx == 0) { for (;;) { /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */ - xx = -ziggurat_nor_inv_r_f * logf(1.0f - next_float(bitgen_state)); - yy = -logf(1.0f - next_float(bitgen_state)); + xx = -ziggurat_nor_inv_r_f * npy_log1pf(-next_float(bitgen_state)); + yy = -npy_log1pf(-next_float(bitgen_state)); if (yy + yy > xx * xx) return ((rabs >> 8) & 0x1) ? -(ziggurat_nor_r_f + xx) : ziggurat_nor_r_f + xx; @@ -508,7 +508,7 @@ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) { } double random_rayleigh(bitgen_t *bitgen_state, double mode) { - return mode * sqrt(-2.0 * log(1.0 - next_double(bitgen_state))); + return mode * sqrt(-2.0 * npy_log1p(-next_double(bitgen_state))); } double random_standard_t(bitgen_t *bitgen_state, double df) { @@ -916,7 +916,7 @@ RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p) { double q, r, U, V; RAND_INT_TYPE result; - r = log(1.0 - p); + r = npy_log1p(-p); while (1) { V = next_double(bitgen_state); @@ -958,7 +958,7 @@ RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p) { } RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p) { - return (RAND_INT_TYPE)ceil(log(1.0 - next_double(bitgen_state)) / log(1.0 - p)); + return (RAND_INT_TYPE)ceil(npy_log1p(-next_double(bitgen_state)) / npy_log1p(-p)); } RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p) { From 3b67a2acca7d53585b7bc30a8f839f1bf3040d78 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Wed, 17 Mar 2021 12:59:51 +0000 Subject: [PATCH 0758/1270] ENH: Improve the exception for default low in Generator.integers Improve the exception when low is 0 in case the single input form was used. closes #14333 --- numpy/random/_bounded_integers.pyx.in | 27 ++++++++++++-------- numpy/random/tests/test_generator_mt19937.py | 15 +++++++++++ 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/numpy/random/_bounded_integers.pyx.in b/numpy/random/_bounded_integers.pyx.in index 9f46685d3258..7eb6aff1e9f1 100644 --- a/numpy/random/_bounded_integers.pyx.in +++ b/numpy/random/_bounded_integers.pyx.in @@ -8,6 +8,7 @@ __all__ = [] np.import_array() + cdef extern from "numpy/random/distributions.h": # Generate random numbers in closed interval [off, off + rng]. uint64_t random_bounded_uint64(bitgen_t *bitgen_state, @@ -51,6 +52,17 @@ cdef extern from "numpy/random/distributions.h": np.npy_bool *out) nogil +cdef object format_bounds_error(bint closed, object low): + # Special case low == 0 to provide a better exception for users + # since low = 0 is the default single-argument case. + if not np.any(low): + comp = '<' if closed else '<=' + return f'high {comp} 0' + else: + comp = '>' if closed else '>=' + return f'low {comp} high' + + {{ py: type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'), @@ -99,8 +111,7 @@ cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object s if np.any(high_comp(high_arr, {{ub}})): raise ValueError('high is out of bounds for {{nptype}}') if np.any(low_high_comp(low_arr, high_arr)): - comp = '>' if closed else '>=' - raise ValueError('low {comp} high'.format(comp=comp)) + raise ValueError(format_bounds_error(closed, low_arr)) low_arr = np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST) high_arr = np.PyArray_FROM_OTF(high, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST) @@ -173,8 +184,7 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size, # Avoid object dtype path if already an integer high_lower_comp = np.less if closed else np.less_equal if np.any(high_lower_comp(high_arr, {{lb}})): - comp = '>' if closed else '>=' - raise ValueError('low {comp} high'.format(comp=comp)) + raise ValueError(format_bounds_error(closed, low_arr)) high_m1 = high_arr if closed else high_arr - dt.type(1) if np.any(np.greater(high_m1, {{ub}})): raise ValueError('high is out of bounds for {{nptype}}') @@ -191,13 +201,11 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size, if closed_upper > {{ub}}: raise ValueError('high is out of bounds for {{nptype}}') if closed_upper < {{lb}}: - comp = '>' if closed else '>=' - raise ValueError('low {comp} high'.format(comp=comp)) + raise ValueError(format_bounds_error(closed, low_arr)) highm1_data[i] = <{{nptype}}_t>closed_upper if np.any(np.greater(low_arr, highm1_arr)): - comp = '>' if closed else '>=' - raise ValueError('low {comp} high'.format(comp=comp)) + raise ValueError(format_bounds_error(closed, low_arr)) high_arr = highm1_arr low_arr = np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST) @@ -316,8 +324,7 @@ cdef object _rand_{{nptype}}(object low, object high, object size, if high > {{ub}}: raise ValueError("high is out of bounds for {{nptype}}") if low > high: # -1 already subtracted, closed interval - comp = '>' if closed else '>=' - raise ValueError('low {comp} high'.format(comp=comp)) + raise ValueError(format_bounds_error(closed, low)) rng = <{{utype}}_t>(high - low) off = <{{utype}}_t>(<{{nptype}}_t>low) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 446b350dd8de..310545e0d8ea 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -2562,3 +2562,18 @@ def test_ragged_shuffle(): gen = Generator(MT19937(0)) assert_no_warnings(gen.shuffle, seq) assert seq == [1, [], []] + + +@pytest.mark.parametrize("high", [-2, [-2]]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_single_arg_integer_exception(high, endpoint): + # GH 14333 + gen = Generator(MT19937(0)) + msg = 'high < 0' if endpoint else 'high <= 0' + with pytest.raises(ValueError, match=msg): + gen.integers(high, endpoint=endpoint) + msg = 'low > high' if endpoint else 'low >= high' + with pytest.raises(ValueError, match=msg): + gen.integers(-1, high, endpoint=endpoint) + with pytest.raises(ValueError, match=msg): + gen.integers([-1], high, endpoint=endpoint) From 4572e12e199c2032c159121ae9afd12c3d3d5a5a Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Wed, 17 Mar 2021 10:46:14 +0000 Subject: [PATCH 0759/1270] BUG: Use lop1p to improve numerical precision Use log1p(-x) instead of log(1 - x) Seperate legacy version from current closes #17020 --- .../core/include/numpy/random/distributions.h | 5 +- numpy/random/_examples/cython/setup.py | 15 +++--- numpy/random/include/legacy-distributions.h | 2 +- numpy/random/mtrand.pyx | 4 +- numpy/random/setup.py | 7 +-- .../random/src/distributions/distributions.c | 26 +++++----- .../random/src/legacy/legacy-distributions.c | 51 +++++++++++++++---- 7 files changed, 74 insertions(+), 36 deletions(-) diff --git a/numpy/core/include/numpy/random/distributions.h b/numpy/core/include/numpy/random/distributions.h index 3ffacc8f9ee0..c58024605ff5 100644 --- a/numpy/core/include/numpy/random/distributions.h +++ b/numpy/core/include/numpy/random/distributions.h @@ -123,8 +123,9 @@ DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial); -DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p); -DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p); +DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p); +DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p); DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample); diff --git a/numpy/random/_examples/cython/setup.py b/numpy/random/_examples/cython/setup.py index 83f06fde8581..7e0dd3e057a6 100644 --- a/numpy/random/_examples/cython/setup.py +++ b/numpy/random/_examples/cython/setup.py @@ -4,19 +4,20 @@ Usage: python setup.py build_ext -i """ +from distutils.core import setup +from os.path import dirname, join, abspath import numpy as np -from distutils.core import setup from Cython.Build import cythonize +from numpy.distutils.misc_util import get_info from setuptools.extension import Extension -from os.path import join, dirname path = dirname(__file__) src_dir = join(dirname(path), '..', 'src') defs = [('NPY_NO_DEPRECATED_API', 0)] inc_path = np.get_include() -# not so nice. We need the random/lib library from numpy -lib_path = join(np.get_include(), '..', '..', 'random', 'lib') +lib_path = [abspath(join(np.get_include(), '..', '..', 'random', 'lib'))] +lib_path += get_info('npymath')['library_dirs'] extending = Extension("extending", sources=[join('.', 'extending.pyx')], @@ -29,10 +30,10 @@ distributions = Extension("extending_distributions", sources=[join('.', 'extending_distributions.pyx')], include_dirs=[inc_path], - library_dirs=[lib_path], - libraries=['npyrandom'], + library_dirs=lib_path, + libraries=['npyrandom', 'npymath'], define_macros=defs, - ) + ) extensions = [extending, distributions] diff --git a/numpy/random/include/legacy-distributions.h b/numpy/random/include/legacy-distributions.h index f7ccd2cb5111..3d882b73ba00 100644 --- a/numpy/random/include/legacy-distributions.h +++ b/numpy/random/include/legacy-distributions.h @@ -39,7 +39,7 @@ extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, extern int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample); -extern int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p); +extern int64_t legacy_logseries(bitgen_t *bitgen_state, double p); extern int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam); extern int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a); extern int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p); diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 4e12f8e59264..d2fa7a0749e0 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -89,7 +89,7 @@ cdef extern from "include/legacy-distributions.h": int64_t n, binomial_t *binomial) nogil int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil - int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t legacy_logseries(bitgen_t *bitgen_state, double p) nogil int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil @@ -3954,7 +3954,7 @@ cdef class RandomState: >>> plt.show() """ - out = disc(&legacy_random_logseries, &self._bitgen, size, self.lock, 1, 0, + out = disc(&legacy_logseries, &self._bitgen, size, self.lock, 1, 0, p, 'p', CONS_BOUNDED_0_1, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE) diff --git a/numpy/random/setup.py b/numpy/random/setup.py index bfd08e4691aa..dce9a101ebce 100644 --- a/numpy/random/setup.py +++ b/numpy/random/setup.py @@ -23,7 +23,7 @@ def generate_libraries(ext, build_dir): # enable unix large file support on 32 bit systems # (64 bit off_t, lseek -> lseek64 etc.) - if sys.platform[:3] == "aix": + if sys.platform[:3] == 'aix': defs = [('_LARGE_FILES', None)] else: defs = [('_FILE_OFFSET_BITS', '64'), @@ -116,7 +116,7 @@ def generate_libraries(ext, build_dir): # gen.pyx, src/distributions/distributions.c config.add_extension(gen, sources=[f'{gen}.c'], - libraries=EXTRA_LIBRARIES, + libraries=EXTRA_LIBRARIES + ['npymath'], extra_compile_args=EXTRA_COMPILE_ARGS, include_dirs=['.', 'src'], extra_link_args=EXTRA_LINK_ARGS, @@ -124,13 +124,14 @@ def generate_libraries(ext, build_dir): define_macros=defs, ) config.add_data_files('_bounded_integers.pxd') + mtrand_libs = ['m', 'npymath'] if os.name != 'nt' else ['npymath'] config.add_extension('mtrand', sources=['mtrand.c', 'src/legacy/legacy-distributions.c', 'src/distributions/distributions.c', ], include_dirs=['.', 'src', 'src/legacy'], - libraries=['m'] if os.name != 'nt' else [], + libraries=mtrand_libs, extra_compile_args=EXTRA_COMPILE_ARGS, extra_link_args=EXTRA_LINK_ARGS, depends=depends + ['mtrand.pyx'], diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 3df819d9da7e..6b4deb925ff5 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -906,15 +906,9 @@ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { } } -/* - * RAND_INT_TYPE is used to share integer generators with RandomState which - * used long in place of int64_t. If changing a distribution that uses - * RAND_INT_TYPE, then the original unmodified copy must be retained for - * use in RandomState by copying to the legacy distributions source file. - */ -RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p) { +int64_t random_logseries(bitgen_t *bitgen_state, double p) { double q, r, U, V; - RAND_INT_TYPE result; + int64_t result; r = npy_log1p(-p); @@ -926,7 +920,7 @@ RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p) { U = next_double(bitgen_state); q = 1.0 - exp(r * U); if (V <= q * q) { - result = (RAND_INT_TYPE)floor(1 + log(V) / log(q)); + result = (int64_t)floor(1 + log(V) / log(q)); if ((result < 1) || (V == 0.0)) { continue; } else { @@ -940,6 +934,14 @@ RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p) { } } +/* + * RAND_INT_TYPE is used to share integer generators with RandomState which + * used long in place of int64_t. If changing a distribution that uses + * RAND_INT_TYPE, then the original unmodified copy must be retained for + * use in RandomState by copying to the legacy distributions source file. + */ + +/* Still used but both generator and mtrand via legacy_random_geometric */ RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p) { double U; RAND_INT_TYPE X; @@ -957,11 +959,11 @@ RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p) { return X; } -RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p) { - return (RAND_INT_TYPE)ceil(npy_log1p(-next_double(bitgen_state)) / npy_log1p(-p)); +int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) { + return (int64_t)ceil(npy_log1p(-next_double(bitgen_state)) / npy_log1p(-p)); } -RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p) { +int64_t random_geometric(bitgen_t *bitgen_state, double p) { if (p >= 0.333333333333333333333333) { return random_geometric_search(bitgen_state, p); } else { diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 5b17401dd984..bfea15e40361 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -379,23 +379,28 @@ int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, } -int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) { - return (int64_t)random_logseries(bitgen_state, p); -} - - int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) { +int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) { return (int64_t)random_poisson(bitgen_state, lam); } - int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) { +int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) { return (int64_t)random_zipf(bitgen_state, a); } - int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { - return (int64_t)random_geometric(bitgen_state, p); + +static long legacy_geometric_inversion(bitgen_t *bitgen_state, double p) { + return (long)ceil(npy_log1p(-next_double(bitgen_state)) / log(1 - p)); +} + +int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { + if (p >= 0.333333333333333333333333) { + return (int64_t)random_geometric_search(bitgen_state, p); + } else { + return (int64_t)legacy_geometric_inversion(bitgen_state, p); + } } - void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, +void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial) { return random_multinomial(bitgen_state, n, mnix, pix, d, binomial); @@ -457,4 +462,32 @@ double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { return mod; } +} + +int64_t legacy_logseries(bitgen_t *bitgen_state, double p) { + double q, r, U, V; + long result; + + r = log(1.0 - p); + + while (1) { + V = next_double(bitgen_state); + if (V >= p) { + return 1; + } + U = next_double(bitgen_state); + q = 1.0 - exp(r * U); + if (V <= q * q) { + result = (long)floor(1 + log(V) / log(q)); + if ((result < 1) || (V == 0.0)) { + continue; + } else { + return (int64_t)result; + } + } + if (V >= q) { + return 1; + } + return 2; + } } \ No newline at end of file From e7df4a4e2f0e21f110b177241f32c84548a95bd4 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 17 Mar 2021 09:51:26 -0600 Subject: [PATCH 0760/1270] STY: Break long line. --- numpy/random/tests/test_random.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 2b3b65c19d3c..8ec73519a782 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -529,7 +529,9 @@ def test_shuffle_no_object_unpacking(self, random, use_array_like): class MyArr(np.ndarray): pass - items = [None, np.array([3]), np.float64(3), np.array(10), np.float64(7)] + items = [None, np.array([3]), np.float64(3), np.array(10), + np.float64(7) + ] arr = np.array(items, dtype=object) item_ids = {id(i) for i in items} if use_array_like: From 14583921acf39ccf1f40ceda40af325adeba81ad Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 17 Mar 2021 09:58:08 -0600 Subject: [PATCH 0761/1270] STY: Fix hanging indentation. --- numpy/random/tests/test_random.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 8ec73519a782..6a584a511e1c 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -529,9 +529,9 @@ def test_shuffle_no_object_unpacking(self, random, use_array_like): class MyArr(np.ndarray): pass - items = [None, np.array([3]), np.float64(3), np.array(10), - np.float64(7) - ] + items = [ + None, np.array([3]), np.float64(3), np.array(10), np.float64(7) + ] arr = np.array(items, dtype=object) item_ids = {id(i) for i in items} if use_array_like: From 1258a0361886d21cca89d337fff8f431946ec66c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 17 Mar 2021 19:03:08 -0500 Subject: [PATCH 0762/1270] TST: Add tests for datetime byteswaps and unicode byteswap casts It seemst he unicode didn't actually help coverage, but here we go. --- numpy/core/tests/test_casting_unittests.py | 22 ++++++++++++++++++++++ numpy/core/tests/test_datetime.py | 18 ++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index 674583d471cc..45d58a23a270 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -619,6 +619,28 @@ def test_string_to_string_cancast(self, other_dt, string_char): elif change_length > 0: assert safety == Casting.safe + @pytest.mark.parametrize("order1", [">", "<"]) + @pytest.mark.parametrize("order2", [">", "<"]) + def test_unicode_byteswapped_cast(self, order1, order2): + # Very specific tests (not using the castingimpl directly) + # that tests unicode bytedwaps including for unaligned array data. + dtype1 = np.dtype(f"{order1}U30") + dtype2 = np.dtype(f"{order2}U30") + data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[None, 1:].view(dtype1) + data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[None, 1:].view(dtype2) + if dtype1.alignment != 1: + # alignment should always be >1, but skip the check if not + assert not data1.flags.aligned + assert not data2.flags.aligned + + element = "this is a ünicode string‽" + data1[0] = element + # Test both `data1` and `data1.copy()` (which should be aligned) + for data in [data1, data1.copy()]: + data2[...] = data1 + assert data2[0] == element + assert data2.copy()[0] == element + def test_void_to_string_special_case(self): # Cover a small special case in void to string casting that could # probably just as well be turned into an error (compare diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 99885da47db1..d2c04a506df9 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -690,10 +690,28 @@ def test_datetime_string_conversion(self): def test_time_byteswapping(self, time_dtype): times = np.array(["2017", "NaT"], dtype=time_dtype) times_swapped = times.astype(times.dtype.newbyteorder()) + assert_array_equal(times, times_swapped) unswapped = times_swapped.view(np.int64).newbyteorder() assert_array_equal(unswapped, times.view(np.int64)) + @pytest.mark.parametrize(["time1", "time2"], + [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) + def test_time_byteswapped_cast(self, time1, time2): + dtype1 = np.dtype(time1) + dtype2 = np.dtype(time2) + times = np.array(["2017", "NaT"], dtype=dtype1) + expected = times.astype(dtype2) + + # Test that every byte-swapping combination also returns the same + # results (previous tests check that this comparison works fine). + res1 = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res1, expected) + res2 = times.astype(dtype2.newbyteorder()) + assert_array_equal(res2, expected) + res3 = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res3, expected) + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) @pytest.mark.parametrize("str_dtype", ["U", "S"]) def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): From b32198f058ebb5b81f9818fbaf2ad832bea16dba Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 17 Mar 2021 19:21:56 -0500 Subject: [PATCH 0763/1270] TST: Expend write-mask buffered nditer tests for high dim arrays This is utterly confusing, but adds coverage to another uncovered pass. I honestly do not really understand right now, why "writemasked" and "buffered" combination can bypass the buffering here. I guess there some special logic about that in the `where=` ufunc code that uses the masked versions here? --- numpy/core/tests/test_nditer.py | 43 +++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 411095199c3c..ca67ac074d70 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2705,9 +2705,25 @@ def test_iter_writemasked_badinput(): op_dtypes=['f4', None], casting='same_kind') -def test_iter_writemasked(): - a = np.zeros((3,), dtype='f8') - msk = np.array([True, True, False]) +def _is_buffered(iterator): + try: + iterator.itviews + except ValueError: + return True + return False + +@pytest.mark.parametrize("a", + [np.zeros((3,), dtype='f8'), + np.zeros((9876, 3*5), dtype='f8')[::2, :], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :]]) +def test_iter_writemasked(a): + # Note, the slicing above is to ensure that nditer cannot combine multiple + # axes into one. The repetition is just to make things a bit more + # interesting. + shape = a.shape + reps = shape[-1] // 3 + msk = np.empty(shape, dtype=bool) + msk[...] = [True, True, False] * reps # When buffering is unused, 'writemasked' effectively does nothing. # It's up to the user of the iterator to obey the requested semantics. @@ -2718,18 +2734,31 @@ def test_iter_writemasked(): for x, m in it: x[...] = 1 # Because we violated the semantics, all the values became 1 - assert_equal(a, [1, 1, 1]) + assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape)) # Even if buffering is enabled, we still may be accessing the array # directly. it = np.nditer([a, msk], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']]) + # @seberg: I honestly don't currently understand why a "buffered" iterator + # would end up not using a buffer for the small array here at least when + # "writemasked" is used, that seems confusing... Check by testing for + # actual memory overlap! + is_buffered = True with it: for x, m in it: x[...] = 2.5 - # Because we violated the semantics, all the values became 2.5 - assert_equal(a, [2.5, 2.5, 2.5]) + if np.may_share_memory(x, a): + is_buffered = False + + if not is_buffered: + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape)) + else: + # For large sizes, the iterator may be buffered: + assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape)) + a[...] = 2.5 # If buffering will definitely happening, for instance because of # a cast, only the items selected by the mask will be copied back from @@ -2744,7 +2773,7 @@ def test_iter_writemasked(): x[...] = 3 # Even though we violated the semantics, only the selected values # were copied back - assert_equal(a, [3, 3, 2.5]) + assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape)) def test_iter_writemasked_decref(): # force casting (to make it interesting) by using a structured dtype. From f01b7f1c256bc5d1c8ea54d7aa8e9f9beb64c14c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 5 Dec 2020 16:13:28 -0600 Subject: [PATCH 0764/1270] ENH: Add vectorcall/fastcall argument parsing capabilities This is a fast argument parser (an original version also supported dictionary unpacking (for args, kwargs call style) which supports the new FASTCALL and VECTORCALL convention of CPython. Fastcall is supported by all Python versions we support. This allows todrastically reduces the overhead of methods when keyword arguments are passed. --- numpy/core/setup.py | 2 + numpy/core/src/common/npy_argparse.c | 421 +++++++++++++++++++++++++++ numpy/core/src/common/npy_argparse.h | 96 ++++++ 3 files changed, 519 insertions(+) create mode 100644 numpy/core/src/common/npy_argparse.c create mode 100644 numpy/core/src/common/npy_argparse.h diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 822f9f580d44..514ec1f23516 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -731,6 +731,7 @@ def get_mathlib_info(*args): join('src', 'common', 'cblasfuncs.h'), join('src', 'common', 'lowlevel_strided_loops.h'), join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_argparse.h'), join('src', 'common', 'npy_cblas.h'), join('src', 'common', 'npy_config.h'), join('src', 'common', 'npy_ctypes.h'), @@ -749,6 +750,7 @@ def get_mathlib_info(*args): common_src = [ join('src', 'common', 'array_assign.c'), join('src', 'common', 'mem_overlap.c'), + join('src', 'common', 'npy_argparse.c'), join('src', 'common', 'npy_longdouble.c'), join('src', 'common', 'templ_common.h.src'), join('src', 'common', 'ucsnarrow.c'), diff --git a/numpy/core/src/common/npy_argparse.c b/numpy/core/src/common/npy_argparse.c new file mode 100644 index 000000000000..3df780990422 --- /dev/null +++ b/numpy/core/src/common/npy_argparse.c @@ -0,0 +1,421 @@ +#include "Python.h" + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include "numpy/ndarraytypes.h" +#include "npy_argparse.h" +#include "npy_pycompat.h" +#include "npy_import.h" + +#include "arrayfunction_override.h" + + +/** + * Small wrapper converting to array just like CPython does. + * + * We could use our own PyArray_PyIntAsInt function, but it handles floats + * differently. + * A disadvantage of this function compared to ``PyArg_*("i")`` code is that + * it will not say which parameter is wrong. + * + * @param obj The python object to convert + * @param value The output value + * + * @returns 0 on failure and 1 on success (`NPY_FAIL`, `NPY_SUCCEED`) + */ +NPY_NO_EXPORT int +PyArray_PythonPyIntFromInt(PyObject *obj, int *value) +{ + /* Pythons behaviour is to check only for float explicitly... */ + if (NPY_UNLIKELY(PyFloat_Check(obj))) { + PyErr_SetString(PyExc_TypeError, + "integer argument expected, got float"); + return NPY_FAIL; + } + + long result = PyLong_AsLong(obj); + if (NPY_UNLIKELY((result == -1) && PyErr_Occurred())) { + return NPY_FAIL; + } + if (NPY_UNLIKELY((result > INT_MAX) || (result < INT_MIN))) { + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return NPY_FAIL; + } + else { + *value = (int)result; + return NPY_SUCCEED; + } +} + + +typedef int convert(PyObject *, void *); + +/** + * Internal function to initialize keyword argument parsing. + * + * This does a few simple jobs: + * + * * Check the input for consistency to find coding errors, for example + * a parameter not marked with | after one marked with | (optional). + * 2. Find the number of positional-only arguments, the number of + * total, required, and keyword arguments. + * 3. Intern all keyword arguments strings to allow fast, identity based + * parsing and avoid string creation overhead on each call. + * + * @param funcname Name of the function, mainly used for errors. + * @param cache A cache object stored statically in the parsing function + * @param va_orig Argument list to npy_parse_arguments + * @return 0 on success, -1 on failure + */ +static int +initialize_keywords(const char *funcname, + _NpyArgParserCache *cache, va_list va_orig) { + va_list va; + int nargs = 0; + int nkwargs = 0; + int npositional_only = 0; + int nrequired = 0; + int npositional = 0; + char state = '\0'; + + va_copy(va, va_orig); + while (1) { + /* Count length first: */ + char *name = va_arg(va, char *); + convert *converter = va_arg(va, convert *); + void *data = va_arg(va, void *); + + /* Check if this is the sentinel, only converter may be NULL */ + if ((name == NULL) && (converter == NULL) && (data == NULL)) { + break; + } + + if (name == NULL) { + PyErr_Format(PyExc_SystemError, + "NumPy internal error: name is NULL in %s() at " + "argument %d.", funcname, nargs); + va_end(va); + return -1; + } + if (data == NULL) { + PyErr_Format(PyExc_SystemError, + "NumPy internal error: data is NULL in %s() at " + "argument %d.", funcname, nargs); + va_end(va); + return -1; + } + + nargs += 1; + if (*name == '|') { + if (state == '$') { + PyErr_Format(PyExc_SystemError, + "NumPy internal error: positional argument `|` " + "after keyword only `$` one to %s() at argument %d.", + funcname, nargs); + va_end(va); + return -1; + } + state = '|'; + name++; /* advance to actual name. */ + npositional += 1; + } + else if (*name == '$') { + state = '$'; + name++; /* advance to actual name. */ + } + else { + if (state != '\0') { + PyErr_Format(PyExc_SystemError, + "NumPy internal error: non-required argument after " + "required | or $ one to %s() at argument %d.", + funcname, nargs); + va_end(va); + return -1; + } + + nrequired += 1; + npositional += 1; + } + + if (*name == '\0') { + /* Empty string signals positional only */ + if (state != '\0') { + PyErr_Format(PyExc_SystemError, + "NumPy internal error: non-kwarg marked with | or $ " + "to %s() at argument %d.", funcname, nargs); + va_end(va); + return -1; + } + npositional_only += 1; + } + else { + nkwargs += 1; + } + } + va_end(va); + + if (npositional == -1) { + npositional = nargs; + } + + if (nargs > _NPY_MAX_KWARGS) { + PyErr_Format(PyExc_SystemError, + "NumPy internal error: function %s() has %d arguments, but " + "the maximum is currently limited to %d for easier parsing; " + "it can be increased by modifying `_NPY_MAX_KWARGS`.", + funcname, nargs, _NPY_MAX_KWARGS); + return -1; + } + + /* + * Do any necessary string allocation and interning, + * creating a caching object. + */ + cache->nargs = nargs; + cache->npositional_only = npositional_only; + cache->npositional = npositional; + cache->nrequired = nrequired; + + /* NULL kw_strings for easier cleanup (and NULL termination) */ + memset(cache->kw_strings, 0, sizeof(PyObject *) * (nkwargs + 1)); + + va_copy(va, va_orig); + for (int i = 0; i < nargs; i++) { + /* Advance through non-kwargs, which do not require setup. */ + char *name = va_arg(va, char *); + va_arg(va, convert *); + va_arg(va, void *); + + if (*name == '|' || *name == '$') { + name++; /* ignore | and $ */ + } + if (i >= npositional_only) { + int i_kwarg = i - npositional_only; + cache->kw_strings[i_kwarg] = PyUString_InternFromString(name); + if (cache->kw_strings[i_kwarg] == NULL) { + va_end(va); + goto error; + } + } + } + + va_end(va); + return 0; + +error: + for (int i = 0; i < nkwargs; i++) { + Py_XDECREF(cache->kw_strings[i]); + } + cache->npositional = -1; /* not initialized */ + return -1; +} + + +static int +raise_incorrect_number_of_positional_args(const char *funcname, + const _NpyArgParserCache *cache, Py_ssize_t len_args) +{ + if (cache->npositional == cache->nrequired) { + PyErr_Format(PyExc_TypeError, + "%s() takes %d positional arguments but %zd were given", + funcname, cache->npositional, len_args); + } + else { + PyErr_Format(PyExc_TypeError, + "%s() takes from %d to %d positional arguments but " + "%zd were given", + funcname, cache->nrequired, cache->npositional, len_args); + } + return -1; +} + +static void +raise_missing_argument(const char *funcname, + const _NpyArgParserCache *cache, int i) +{ + if (i < cache->npositional_only) { + PyErr_Format(PyExc_TypeError, + "%s() missing required positional argument %d", + funcname, i); + } + else { + PyObject *kw = cache->kw_strings[i - cache->npositional_only]; + PyErr_Format(PyExc_TypeError, + "%s() missing required argument '%S' (pos %d)", + funcname, kw, i); + } +} + + +/** + * Generic helper for argument parsing + * + * See macro version for an example pattern of how to use this function. + * + * @param funcname + * @param cache + * @param args Python passed args (METH_FASTCALL) + * @param len_args + * @param kwnames + * @param ... List of arguments (see macro version). + * + * @return Returns 0 on success and -1 on failure. + */ +NPY_NO_EXPORT int +_npy_parse_arguments(const char *funcname, + /* cache_ptr is a NULL initialized persistent storage for data */ + _NpyArgParserCache *cache, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, + /* ... is NULL, NULL, NULL terminated: name, converter, value */ + ...) +{ + if (NPY_UNLIKELY(cache->npositional == -1)) { + va_list va; + va_start(va, kwnames); + + int res = initialize_keywords(funcname, cache, va); + va_end(va); + if (res < 0) { + return -1; + } + } + + if (NPY_UNLIKELY(len_args > cache->npositional)) { + return raise_incorrect_number_of_positional_args( + funcname, cache, len_args); + } + + /* NOTE: Could remove the limit but too many kwargs are slow anyway. */ + PyObject *all_arguments[NPY_MAXARGS]; + + for (Py_ssize_t i = 0; i < len_args; i++) { + all_arguments[i] = args[i]; + } + + /* Without kwargs, do not iterate all converters. */ + int max_nargs = (int)len_args; + Py_ssize_t len_kwargs = 0; + + /* If there are any kwargs, first handle them */ + if (NPY_LIKELY(kwnames != NULL)) { + len_kwargs = PyTuple_GET_SIZE(kwnames); + max_nargs = cache->nargs; + + for (int i = len_args; i < cache->nargs; i++) { + all_arguments[i] = NULL; + } + + for (Py_ssize_t i = 0; i < len_kwargs; i++) { + PyObject *key = PyTuple_GET_ITEM(kwnames, i); + PyObject *value = args[i + len_args]; + PyObject *const *name; + + /* Super-fast path, check identity: */ + for (name = cache->kw_strings; *name != NULL; name++) { + if (*name == key) { + break; + } + } + if (NPY_UNLIKELY(*name == NULL)) { + /* Slow fallback, if identity checks failed for some reason */ + for (name = cache->kw_strings; *name != NULL; name++) { + int eq = PyObject_RichCompareBool(*name, key, Py_EQ); + if (eq == -1) { + return -1; + } + else if (eq) { + break; + } + } + if (NPY_UNLIKELY(*name == NULL)) { + /* Invalid keyword argument. */ + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%S'", + funcname, key); + return -1; + } + } + + ssize_t param_pos = ( + (name - cache->kw_strings) + cache->npositional_only); + + /* There could be an identical positional argument */ + if (NPY_UNLIKELY(all_arguments[param_pos] != NULL)) { + PyErr_Format(PyExc_TypeError, + "argument for %s() given by name ('%S') and position " + "(position %zd)", funcname, key, param_pos); + return -1; + } + + all_arguments[param_pos] = value; + } + } + + /* + * There cannot be too many args, too many kwargs would find an + * incorrect one above. + */ + assert(len_args + len_kwargs <= cache->nargs); + + /* At this time `all_arguments` holds either NULLs or the objects */ + va_list va; + va_start(va, kwnames); + + for (int i = 0; i < max_nargs; i++) { + va_arg(va, char *); + convert *converter = va_arg(va, convert *); + void *data = va_arg(va, void *); + + if (all_arguments[i] == NULL) { + continue; + } + + int res; + if (converter == NULL) { + *((PyObject **) data) = all_arguments[i]; + continue; + } + res = converter(all_arguments[i], data); + + if (NPY_UNLIKELY(res == NPY_SUCCEED)) { + continue; + } + else if (NPY_UNLIKELY(res == NPY_FAIL)) { + /* It is usually the users responsibility to clean up. */ + goto converting_failed; + } + else if (NPY_UNLIKELY(res == Py_CLEANUP_SUPPORTED)) { + /* TODO: Implementing cleanup if/when needed should not be hard */ + PyErr_Format(PyExc_SystemError, + "converter cleanup of parameter %d to %s() not supported.", + i, funcname); + goto converting_failed; + } + assert(0); + } + + /* Required arguments are typically not passed as keyword arguments */ + if (NPY_UNLIKELY(len_args < cache->nrequired)) { + /* (PyArg_* also does this after the actual parsing is finished) */ + if (NPY_UNLIKELY(max_nargs < cache->nrequired)) { + raise_missing_argument(funcname, cache, max_nargs); + goto converting_failed; + } + for (int i = 0; i < cache->nrequired; i++) { + if (NPY_UNLIKELY(all_arguments[i] == NULL)) { + raise_missing_argument(funcname, cache, i); + goto converting_failed; + } + } + } + + va_end(va); + return 0; + +converting_failed: + va_end(va); + return -1; + +} diff --git a/numpy/core/src/common/npy_argparse.h b/numpy/core/src/common/npy_argparse.h new file mode 100644 index 000000000000..5da535c9171f --- /dev/null +++ b/numpy/core/src/common/npy_argparse.h @@ -0,0 +1,96 @@ +#ifndef _NPY_ARGPARSE_H +#define _NPY_ARGPARSE_H + +#include "Python.h" +#include "numpy/ndarraytypes.h" + +/* + * This file defines macros to help with keyword argument parsing. + * This solves two issues as of now: + * 1. Pythons C-API PyArg_* keyword argument parsers are slow, due to + * not caching the strings they use. + * 2. It allows the use of METH_ARGPARSE (and `tp_vectorcall`) + * when available in Python, which removes a large chunk of overhead. + * + * Internally CPython achieves similar things by using a code generator + * argument clinic. NumPy may well decide to use argument clinic or a different + * solution in the future. + */ + +NPY_NO_EXPORT int +PyArray_PythonPyIntFromInt(PyObject *obj, int *value); + + +#define _NPY_MAX_KWARGS 15 + +typedef struct { + int npositional; + int nargs; + int npositional_only; + int nrequired; + /* Null terminated list of keyword argument name strings */ + PyObject *kw_strings[_NPY_MAX_KWARGS+1]; +} _NpyArgParserCache; + + +/* + * The sole purpose of this macro is to hide the argument parsing cache. + * Since this cache must be static, this also removes a source of error. + */ +#define NPY_PREPARE_ARGPARSER static _NpyArgParserCache __argparse_cache = {-1} + +/** + * Macro to help with argument parsing. + * + * The pattern for using this macro is by defining the method as: + * + * @code + * static PyObject * + * my_method(PyObject *self, + * PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) + * { + * NPY_PREPARE_ARGPARSER; + * + * PyObject *argument1, *argument3; + * int argument2 = -1; + * if (npy_parse_arguments("method", args, len_args, kwnames), + * "argument1", NULL, &argument1, + * "|argument2", &PyArray_PythonPyIntFromInt, &argument2, + * "$argument3", NULL, &argument3, + * NULL, NULL, NULL) < 0) { + * return NULL; + * } + * } + * @endcode + * + * The `NPY_PREPARE_ARGPARSER` macro sets up a static cache variable necessary + * to hold data for speeding up the parsing. `npy_parse_arguments` must be + * used in cunjunction with the macro defined in the same scope. + * (No two `npy_parse_arguments` may share a single `NPY_PREPARE_ARGPARSER`.) + * + * @param funcname + * @param args Python passed args (METH_FASTCALL) + * @param len_args Number of arguments (not flagged) + * @param kwnames Tuple as passed by METH_FASTCALL or NULL. + * @param ... List of arguments must be param1_name, param1_converter, + * *param1_outvalue, param2_name, ..., NULL, NULL, NULL. + * Where name is ``char *``, ``converter`` a python converter + * function or NULL and ``outvalue`` is the ``void *`` passed to + * the converter (holding the converted data or a borrowed + * reference if converter is NULL). + * + * @return Returns 0 on success and -1 on failure. + */ +NPY_NO_EXPORT int +_npy_parse_arguments(const char *funcname, + /* cache_ptr is a NULL initialized persistent storage for data */ + _NpyArgParserCache *cache_ptr, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, + /* va_list is NULL, NULL, NULL terminated: name, converter, value */ + ...) NPY_GCC_NONNULL(1); + +#define npy_parse_arguments(funcname, args, len_args, kwnames, ...) \ + _npy_parse_arguments(funcname, &__argparse_cache, \ + args, len_args, kwnames, __VA_ARGS__) + +#endif /* _NPY_ARGPARSE_H */ From f473d571f58c78f89979138c8cc79fcf5db05f42 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 17 Mar 2021 20:41:49 -0500 Subject: [PATCH 0765/1270] TST: Add specific argument parsing tests --- numpy/core/setup.py | 4 +- .../src/multiarray/_multiarray_tests.c.src | 23 +++++++ numpy/core/tests/test_argparse.py | 62 +++++++++++++++++++ 3 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 numpy/core/tests/test_argparse.py diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 514ec1f23516..8c34a3286d72 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -716,8 +716,10 @@ def get_mathlib_info(*args): config.add_extension('_multiarray_tests', sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), - join('src', 'common', 'mem_overlap.c')], + join('src', 'common', 'mem_overlap.c'), + join('src', 'common', 'npy_argparse.c')], depends=[join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_argparse.h'), join('src', 'common', 'npy_extint128.h')], libraries=['npymath']) diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index 3c8caefcef1f..febcc8512750 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -7,6 +7,7 @@ #include "numpy/npy_math.h" #include "numpy/halffloat.h" #include "common.h" +#include "npy_argparse.h" #include "mem_overlap.h" #include "npy_extint128.h" #include "array_method.h" @@ -19,6 +20,25 @@ #define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) + +static PyObject * +argparse_example_function(PyObject *NPY_UNUSED(mod), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + NPY_PREPARE_ARGPARSER; + int arg1; + PyObject *arg2, *arg3, *arg4; + if (npy_parse_arguments("func", args, len_args, kwnames, + "", &PyArray_PythonPyIntFromInt, &arg1, + "arg2", NULL, &arg2, + "|arg3", NULL, &arg3, + "$arg3", NULL, &arg4, + NULL, NULL, NULL) < 0) { + return NULL; + } + Py_RETURN_NONE; +} + /* test PyArray_IsPythonScalar, before including private py3 compat header */ static PyObject * IsPythonScalar(PyObject * dummy, PyObject *args) @@ -2255,6 +2275,9 @@ run_intp_converter(PyObject* NPY_UNUSED(self), PyObject *args) } static PyMethodDef Multiarray_TestsMethods[] = { + {"argparse_example_function", + (PyCFunction)argparse_example_function, + METH_KEYWORDS | METH_FASTCALL, NULL}, {"IsPythonScalar", IsPythonScalar, METH_VARARGS, NULL}, diff --git a/numpy/core/tests/test_argparse.py b/numpy/core/tests/test_argparse.py new file mode 100644 index 000000000000..63a01dee404f --- /dev/null +++ b/numpy/core/tests/test_argparse.py @@ -0,0 +1,62 @@ +""" +Tests for the private NumPy argument parsing functionality. +They mainly exists to ensure good test coverage without having to try the +weirder cases on actual numpy functions but test them in one place. + +The test function is defined in C to be equivalent to (errors may not always +match exactly, and could be adjusted): + + def func(arg1, /, arg2, *, arg3): + i = integer(arg1) # reproducing the 'i' parsing in Python. + return None +""" + +import pytest + +import numpy as np +from numpy.core._multiarray_tests import argparse_example_function as func + + +def test_invalid_integers(): + with pytest.raises(TypeError, + match="integer argument expected, got float"): + func(1.) + with pytest.raises(OverflowError): + func(2**100) + + +def test_missing_arguments(): + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func() + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func(arg2=1, arg3=4) + with pytest.raises(TypeError, + match=r"missing required argument \'arg2\' \(pos 1\)"): + func(1, arg3=5) + + +def test_too_many_positional(): + # the second argument is positional but can be passed as keyword. + with pytest.raises(TypeError, + match="takes from 2 to 3 positional arguments but 4 were given"): + func(1, 2, 3, 4) + + +def test_multiple_values(): + with pytest.raises(TypeError, + match=r"given by name \('arg2'\) and position \(position 1\)"): + func(1, 2, arg2=3) + + +def test_string_fallbacks(): + # We can (currently?) use numpy strings to test the "slow" fallbacks + # that should normally not be taken due to string interning. + arg2 = np.unicode_("arg2") + missing_arg = np.unicode_("missing_arg") + func(1, **{arg2: 3}) + with pytest.raises(TypeError, + match="got an unexpected keyword argument 'missing_arg'"): + func(2, **{missing_arg: 3}) + From 9a4533246596919315095ff02a982d7c94281796 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 5 Dec 2020 16:15:41 -0600 Subject: [PATCH 0766/1270] ENH: Use fast parsing for methods and undispatched functions Array methods are the easiest target for the new parser. They do not require any larger improvements and most functions in multiarray are wrapped. Similarly the printing functions are not dispatched through `__array_function__` and thus have the most to gain. --- numpy/core/src/multiarray/methods.c | 254 +++++++++++-------- numpy/core/src/multiarray/multiarraymodule.c | 167 ++++++------ 2 files changed, 238 insertions(+), 183 deletions(-) diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 04ce53ed73d5..ff5a5d8bcab9 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -10,6 +10,7 @@ #include "numpy/arrayscalars.h" #include "arrayfunction_override.h" +#include "npy_argparse.h" #include "npy_config.h" #include "npy_pycompat.h" #include "npy_import.h" @@ -103,20 +104,23 @@ forward_ndarray_method(PyArrayObject *self, PyObject *args, PyObject *kwds, static PyObject * -array_take(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_take(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int dimension = NPY_MAXDIMS; PyObject *indices; PyArrayObject *out = NULL; NPY_CLIPMODE mode = NPY_RAISE; - static char *kwlist[] = {"indices", "axis", "out", "mode", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O&:take", kwlist, - &indices, - PyArray_AxisConverter, &dimension, - PyArray_OutputConverter, &out, - PyArray_ClipmodeConverter, &mode)) + if (npy_parse_arguments("take", args, len_args, kwnames, + "indices", NULL, &indices, + "|axis", &PyArray_AxisConverter, &dimension, + "|out", &PyArray_OutputConverter, &out, + "|mode", &PyArray_ClipmodeConverter, &mode, + NULL, NULL, NULL) < 0) { return NULL; + } PyObject *ret = PyArray_TakeFrom(self, indices, dimension, out, mode); @@ -199,14 +203,16 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_squeeze(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_squeeze(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *axis_in = NULL; npy_bool axis_flags[NPY_MAXDIMS]; + NPY_PREPARE_ARGPARSER; - static char *kwlist[] = {"axis", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:squeeze", kwlist, - &axis_in)) { + if (npy_parse_arguments("squeeze", args, len_args, kwnames, + "|axis", NULL, &axis_in, + NULL, NULL, NULL) < 0) { return NULL; } @@ -224,16 +230,18 @@ array_squeeze(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_view(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_view(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *out_dtype = NULL; PyObject *out_type = NULL; PyArray_Descr *dtype = NULL; + NPY_PREPARE_ARGPARSER; - static char *kwlist[] = {"dtype", "type", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO:view", kwlist, - &out_dtype, - &out_type)) { + if (npy_parse_arguments("view", args, len_args, kwnames, + "|dtype", NULL, &out_dtype, + "|type", NULL, &out_type, + NULL, NULL, NULL) < 0) { return NULL; } @@ -271,16 +279,19 @@ array_view(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_argmax(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = NPY_MAXDIMS; PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&:argmax", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) + if (npy_parse_arguments("argmax", args, len_args, kwnames, + "|axis", &PyArray_AxisConverter, &axis, + "|out", &PyArray_OutputConverter, &out, + NULL, NULL, NULL) < 0) { return NULL; + } PyObject *ret = PyArray_ArgMax(self, axis, out); @@ -294,16 +305,19 @@ array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_argmin(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_argmin(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = NPY_MAXDIMS; PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&:argmin", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) + if (npy_parse_arguments("argmin", args, len_args, kwnames, + "|axis", &PyArray_AxisConverter, &axis, + "|out", &PyArray_OutputConverter, &out, + NULL, NULL, NULL) < 0) { return NULL; + } PyObject *ret = PyArray_ArgMin(self, axis, out); @@ -804,10 +818,9 @@ array_setscalar(PyArrayObject *self, PyObject *args) static PyObject * -array_astype(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_astype(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - static char *kwlist[] = {"dtype", "order", "casting", - "subok", "copy", NULL}; PyArray_Descr *dtype = NULL; /* * TODO: UNSAFE default for compatibility, I think @@ -816,13 +829,15 @@ array_astype(PyArrayObject *self, PyObject *args, PyObject *kwds) NPY_CASTING casting = NPY_UNSAFE_CASTING; NPY_ORDER order = NPY_KEEPORDER; int forcecopy = 1, subok = 1; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&ii:astype", kwlist, - PyArray_DescrConverter, &dtype, - PyArray_OrderConverter, &order, - PyArray_CastingConverter, &casting, - &subok, - &forcecopy)) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("astype", args, len_args, kwnames, + "dtype", &PyArray_DescrConverter, &dtype, + "|order", &PyArray_OrderConverter, &order, + "|casting", &PyArray_CastingConverter, &casting, + "|subok", &PyArray_PythonPyIntFromInt, &subok, + "|copy", &PyArray_PythonPyIntFromInt, &forcecopy, + NULL, NULL, NULL) < 0) { Py_XDECREF(dtype); return NULL; } @@ -1143,13 +1158,15 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw } static PyObject * -array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_copy(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { NPY_ORDER order = NPY_CORDER; - static char *kwlist[] = {"order", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:copy", kwlist, - PyArray_OrderConverter, &order)) { + if (npy_parse_arguments("copy", args, len_args, kwnames, + "|order", PyArray_OrderConverter, &order, + NULL, NULL, NULL) < 0) { return NULL; } @@ -1257,7 +1274,8 @@ array_choose(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_sort(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_sort(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis=-1; int val; @@ -1265,12 +1283,13 @@ array_sort(PyArrayObject *self, PyObject *args, PyObject *kwds) PyObject *order = NULL; PyArray_Descr *saved = NULL; PyArray_Descr *newd; - static char *kwlist[] = {"axis", "kind", "order", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&O:sort", kwlist, - &axis, - PyArray_SortkindConverter, &sortkind, - &order)) { + if (npy_parse_arguments("sort", args, len_args, kwnames, + "|axis", &PyArray_PythonPyIntFromInt, &axis, + "|kind", &PyArray_SortkindConverter, &sortkind, + "|order", NULL, &order, + NULL, NULL, NULL) < 0) { return NULL; } if (order == Py_None) { @@ -1313,7 +1332,8 @@ array_sort(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_partition(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_partition(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis=-1; int val; @@ -1321,16 +1341,16 @@ array_partition(PyArrayObject *self, PyObject *args, PyObject *kwds) PyObject *order = NULL; PyArray_Descr *saved = NULL; PyArray_Descr *newd; - static char *kwlist[] = {"kth", "axis", "kind", "order", NULL}; PyArrayObject * ktharray; PyObject * kthobj; + NPY_PREPARE_ARGPARSER; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O:partition", kwlist, - &kthobj, - &axis, - PyArray_SelectkindConverter, &sortkind, - &order)) { + if (npy_parse_arguments("partition", args, len_args, kwnames, + "kth", NULL, &kthobj, + "|axis", &PyArray_PythonPyIntFromInt, &axis, + "|kind", &PyArray_SelectkindConverter, &sortkind, + "|order", NULL, &order, + NULL, NULL, NULL) < 0) { return NULL; } @@ -1381,18 +1401,20 @@ array_partition(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_argsort(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = -1; NPY_SORTKIND sortkind = NPY_QUICKSORT; PyObject *order = NULL, *res; PyArray_Descr *newd, *saved=NULL; - static char *kwlist[] = {"axis", "kind", "order", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O:argsort", kwlist, - PyArray_AxisConverter, &axis, - PyArray_SortkindConverter, &sortkind, - &order)) { + if (npy_parse_arguments("argsort", args, len_args, kwnames, + "|axis", &PyArray_AxisConverter, &axis, + "|kind", &PyArray_SortkindConverter, &sortkind, + "|order", NULL, &order, + NULL, NULL, NULL) < 0) { return NULL; } if (order == Py_None) { @@ -1433,21 +1455,23 @@ array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * -array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_argpartition(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = -1; NPY_SELECTKIND sortkind = NPY_INTROSELECT; PyObject *order = NULL, *res; PyArray_Descr *newd, *saved=NULL; - static char *kwlist[] = {"kth", "axis", "kind", "order", NULL}; PyObject * kthobj; PyArrayObject * ktharray; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O:argpartition", kwlist, - &kthobj, - PyArray_AxisConverter, &axis, - PyArray_SelectkindConverter, &sortkind, - &order)) { + if (npy_parse_arguments("argpartition", args, len_args, kwnames, + "kth", NULL, &kthobj, + "|axis", &PyArray_AxisConverter, &axis, + "|kind", &PyArray_SelectkindConverter, &sortkind, + "|order", NULL, &order, + NULL, NULL, NULL) < 0) { return NULL; } if (order == Py_None) { @@ -1494,17 +1518,20 @@ array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds) } static PyObject * -array_searchsorted(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_searchsorted(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - static char *kwlist[] = {"v", "side", "sorter", NULL}; PyObject *keys; PyObject *sorter; NPY_SEARCHSIDE side = NPY_SEARCHLEFT; + NPY_PREPARE_ARGPARSER; sorter = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:searchsorted", - kwlist, &keys, - PyArray_SearchsideConverter, &side, &sorter)) { + if (npy_parse_arguments("searchsorted", args, len_args, kwnames, + "v", NULL, &keys, + "|side", &PyArray_SearchsideConverter, &side, + "|sorter", NULL, &sorter, + NULL, NULL, NULL) < 0) { return NULL; } if (sorter == Py_None) { @@ -2285,14 +2312,17 @@ array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * -array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_dot(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *a = (PyObject *)self, *b, *o = NULL; PyArrayObject *ret; - static char* kwlist[] = {"b", "out", NULL}; + NPY_PREPARE_ARGPARSER; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:dot", kwlist, &b, &o)) { + if (npy_parse_arguments("dot", args, len_args, kwnames, + "b", NULL, &b, + "|out", NULL, &o, + NULL, NULL, NULL) < 0) { return NULL; } @@ -2374,20 +2404,22 @@ array_nonzero(PyArrayObject *self, PyObject *args) static PyObject * -array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_trace(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis1 = 0, axis2 = 1, offset = 0; PyArray_Descr *dtype = NULL; PyArrayObject *out = NULL; int rtype; - static char *kwlist[] = {"offset", "axis1", "axis2", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiO&O&:trace", kwlist, - &offset, - &axis1, - &axis2, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("trace", args, len_args, kwnames, + "|offset", &PyArray_PythonPyIntFromInt, &offset, + "|axis1", &PyArray_PythonPyIntFromInt, &axis1, + "|axis2", &PyArray_PythonPyIntFromInt, &axis2, + "|dtype", &PyArray_DescrConverter2, &dtype, + "|out", &PyArray_OutputConverter, &out, + NULL, NULL, NULL) < 0) { Py_XDECREF(dtype); return NULL; } @@ -2448,13 +2480,15 @@ array_diagonal(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * -array_flatten(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_flatten(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { NPY_ORDER order = NPY_CORDER; - static char *kwlist[] = {"order", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:flatten", kwlist, - PyArray_OrderConverter, &order)) { + if (npy_parse_arguments("flatten", args, len_args, kwnames, + "|order", PyArray_OrderConverter, &order, + NULL, NULL, NULL) < 0) { return NULL; } return PyArray_Flatten(self, order); @@ -2462,13 +2496,15 @@ array_flatten(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * -array_ravel(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_ravel(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { NPY_ORDER order = NPY_CORDER; - static char *kwlist[] = {"order", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:ravel", kwlist, - PyArray_OrderConverter, &order)) { + if (npy_parse_arguments("ravel", args, len_args, kwnames, + "|order", PyArray_OrderConverter, &order, + NULL, NULL, NULL) < 0) { return NULL; } return PyArray_Ravel(self, order); @@ -2724,19 +2760,19 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"argmax", (PyCFunction)array_argmax, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"argmin", (PyCFunction)array_argmin, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"argpartition", (PyCFunction)array_argpartition, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"argsort", (PyCFunction)array_argsort, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"astype", (PyCFunction)array_astype, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"byteswap", (PyCFunction)array_byteswap, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2757,7 +2793,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS, NULL}, {"copy", (PyCFunction)array_copy, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"cumprod", (PyCFunction)array_cumprod, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2769,13 +2805,13 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"dot", (PyCFunction)array_dot, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"fill", (PyCFunction)array_fill, METH_VARARGS, NULL}, {"flatten", (PyCFunction)array_flatten, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"getfield", (PyCFunction)array_getfield, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2802,7 +2838,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS, NULL}, {"partition", (PyCFunction)array_partition, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"prod", (PyCFunction)array_prod, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2814,7 +2850,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"ravel", (PyCFunction)array_ravel, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"repeat", (PyCFunction)array_repeat, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2829,7 +2865,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"searchsorted", (PyCFunction)array_searchsorted, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"setfield", (PyCFunction)array_setfield, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2838,10 +2874,10 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"sort", (PyCFunction)array_sort, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"squeeze", (PyCFunction)array_squeeze, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"std", (PyCFunction)array_stddev, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2853,7 +2889,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS, NULL}, {"take", (PyCFunction)array_take, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"tobytes", (PyCFunction)array_tobytes, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2868,7 +2904,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"trace", (PyCFunction)array_trace, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"transpose", (PyCFunction)array_transpose, METH_VARARGS, NULL}, @@ -2877,6 +2913,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"view", (PyCFunction)array_view, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 7915c75be5ce..a0f7afeb5938 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -26,7 +26,7 @@ #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" - +#include "npy_argparse.h" #include "npy_config.h" #include "npy_pycompat.h" #include "npy_import.h" @@ -2829,28 +2829,36 @@ array_fastCopyAndTranspose(PyObject *NPY_UNUSED(dummy), PyObject *args) } static PyObject * -array_correlate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +array_correlate(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *shape, *a0; int mode = 0; - static char *kwlist[] = {"a", "v", "mode", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i:correlate", kwlist, - &a0, &shape, &mode)) { + if (npy_parse_arguments("correlate", args, len_args, kwnames, + "a", NULL, &a0, + "v", NULL, &shape, + "|mode", &PyArray_PythonPyIntFromInt, &mode, + NULL, NULL, NULL) < 0) { return NULL; } return PyArray_Correlate(a0, shape, mode); } static PyObject* -array_correlate2(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +array_correlate2(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *shape, *a0; int mode = 0; - static char *kwlist[] = {"a", "v", "mode", NULL}; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i:correlate2", kwlist, - &a0, &shape, &mode)) { + if (npy_parse_arguments("correlate2", args, len_args, kwnames, + "a", NULL, &a0, + "v", NULL, &shape, + "|mode", &PyArray_PythonPyIntFromInt, &mode, + NULL, NULL, NULL) < 0) { return NULL; } return PyArray_Correlate2(a0, shape, mode); @@ -3426,6 +3434,42 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args) return res; } + +static int +trimmode_converter(PyObject *obj, TrimMode *trim) +{ + if (!PyUnicode_Check(obj) || PyUnicode_GetLength(obj) != 1) { + goto error; + } + const char *trimstr = PyUnicode_AsUTF8AndSize(obj, NULL); + + if (trimstr != NULL) { + if (trimstr[0] == 'k') { + *trim = TrimMode_None; + } + else if (trimstr[0] == '.') { + *trim = TrimMode_Zeros; + } + else if (trimstr[0] == '0') { + *trim = TrimMode_LeaveOneZero; + } + else if (trimstr[0] == '-') { + *trim = TrimMode_DptZeros; + } + else { + goto error; + } + } + return NPY_SUCCEED; + +error: + PyErr_Format(PyExc_TypeError, + "if supplied, trim must be 'k', '.', '0' or '-' found `%100S`", + obj); + return NPY_FAIL; +} + + /* * Prints floating-point scalars using the Dragon4 algorithm, scientific mode. * See docstring of `np.format_float_scientific` for description of arguments. @@ -3433,43 +3477,28 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args) * precision, which is equivalent to `None`. */ static PyObject * -dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +dragon4_scientific(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *obj; - static char *kwlist[] = {"x", "precision", "unique", "sign", "trim", - "pad_left", "exp_digits", NULL}; int precision=-1, pad_left=-1, exp_digits=-1; - char *trimstr=NULL; DigitMode digit_mode; TrimMode trim = TrimMode_None; int sign=0, unique=1; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiisii:dragon4_scientific", - kwlist, &obj, &precision, &unique, &sign, &trimstr, &pad_left, - &exp_digits)) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("dragon4_scientific", args, len_args, kwnames, + "x", NULL , &obj, + "|precision", &PyArray_PythonPyIntFromInt, &precision, + "|unique", &PyArray_PythonPyIntFromInt, &unique, + "|sign", &PyArray_PythonPyIntFromInt, &sign, + "|trim", &trimmode_converter, &trim, + "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, + "|exp_digits", &PyArray_PythonPyIntFromInt, &exp_digits, + NULL, NULL, NULL) < 0) { return NULL; } - if (trimstr != NULL) { - if (strcmp(trimstr, "k") == 0) { - trim = TrimMode_None; - } - else if (strcmp(trimstr, ".") == 0) { - trim = TrimMode_Zeros; - } - else if (strcmp(trimstr, "0") == 0) { - trim = TrimMode_LeaveOneZero; - } - else if (strcmp(trimstr, "-") == 0) { - trim = TrimMode_DptZeros; - } - else { - PyErr_SetString(PyExc_TypeError, - "if supplied, trim must be 'k', '.', '0' or '-'"); - return NULL; - } - } - digit_mode = unique ? DigitMode_Unique : DigitMode_Exact; if (unique == 0 && precision < 0) { @@ -3489,44 +3518,30 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) * precision, which is equivalent to `None`. */ static PyObject * -dragon4_positional(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +dragon4_positional(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *obj; - static char *kwlist[] = {"x", "precision", "unique", "fractional", - "sign", "trim", "pad_left", "pad_right", NULL}; int precision=-1, pad_left=-1, pad_right=-1; - char *trimstr=NULL; CutoffMode cutoff_mode; DigitMode digit_mode; TrimMode trim = TrimMode_None; int sign=0, unique=1, fractional=0; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiiisii:dragon4_positional", - kwlist, &obj, &precision, &unique, &fractional, &sign, &trimstr, - &pad_left, &pad_right)) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("dragon4_positional", args, len_args, kwnames, + "x", NULL , &obj, + "|precision", &PyArray_PythonPyIntFromInt, &precision, + "|unique", &PyArray_PythonPyIntFromInt, &unique, + "|fractional", &PyArray_PythonPyIntFromInt, &fractional, + "|sign", &PyArray_PythonPyIntFromInt, &sign, + "|trim", &trimmode_converter, &trim, + "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, + "|pad_right", &PyArray_PythonPyIntFromInt, &pad_right, + NULL, NULL, NULL) < 0) { return NULL; } - if (trimstr != NULL) { - if (strcmp(trimstr, "k") == 0) { - trim = TrimMode_None; - } - else if (strcmp(trimstr, ".") == 0) { - trim = TrimMode_Zeros; - } - else if (strcmp(trimstr, "0") == 0) { - trim = TrimMode_LeaveOneZero; - } - else if (strcmp(trimstr, "-") == 0) { - trim = TrimMode_DptZeros; - } - else { - PyErr_SetString(PyExc_TypeError, - "if supplied, trim must be 'k', '.', '0' or '-'"); - return NULL; - } - } - digit_mode = unique ? DigitMode_Unique : DigitMode_Exact; cutoff_mode = fractional ? CutoffMode_FractionLength : CutoffMode_TotalLength; @@ -4052,15 +4067,19 @@ array_may_share_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject * } static PyObject * -normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) +normalize_axis_index(PyObject *NPY_UNUSED(self), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - static char *kwlist[] = {"axis", "ndim", "msg_prefix", NULL}; int axis; int ndim; PyObject *msg_prefix = Py_None; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "ii|O:normalize_axis_index", - kwlist, &axis, &ndim, &msg_prefix)) { + if (npy_parse_arguments("normalize_axis_index", args, len_args, kwnames, + "axis", &PyArray_PythonPyIntFromInt, &axis, + "ndim", &PyArray_PythonPyIntFromInt, &ndim, + "|msg_prefix", NULL, &msg_prefix, + NULL, NULL, NULL) < 0) { return NULL; } if (check_and_adjust_axis_msg(&axis, ndim, msg_prefix) < 0) { @@ -4191,10 +4210,10 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS, NULL}, {"correlate", (PyCFunction)array_correlate, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"correlate2", (PyCFunction)array_correlate2, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"frombuffer", (PyCFunction)array_frombuffer, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -4241,10 +4260,10 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"dragon4_positional", (PyCFunction)dragon4_positional, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"dragon4_scientific", (PyCFunction)dragon4_scientific, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"compare_chararrays", (PyCFunction)compare_chararrays, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -4277,7 +4296,7 @@ static struct PyMethodDef array_module_methods[] = { {"unpackbits", (PyCFunction)io_unpack, METH_VARARGS | METH_KEYWORDS, NULL}, {"normalize_axis_index", (PyCFunction)normalize_axis_index, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, METH_VARARGS, NULL}, {"_discover_array_parameters", (PyCFunction)_discover_array_parameters, From b39dc56bcbfb2990e970e405b863218980b6d68a Mon Sep 17 00:00:00 2001 From: Giulio Procopio <61784957+Giuppox@users.noreply.github.com> Date: Thu, 18 Mar 2021 13:07:30 +0100 Subject: [PATCH 0767/1270] ENH: Removed useless declarations in `bad_commands` When creating the `bad_commands` dictionary it is useless to declare commands that are already set with a default message in the 329th line's for loop --- setup.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/setup.py b/setup.py index 4c7d825df781..247dc512a2b1 100755 --- a/setup.py +++ b/setup.py @@ -313,8 +313,6 @@ def parse_setuppy_commands(): Instead, build what you want to upload and upload those files with `twine upload -s ` instead. """, - upload_docs="`setup.py upload_docs` is not supported", - easy_install="`setup.py easy_install` is not supported", clean=""" `setup.py clean` is not supported, use one of the following instead: @@ -322,10 +320,6 @@ def parse_setuppy_commands(): - `git clean -Xdf` (cleans all versioned files, doesn't touch files that aren't checked into the git repo) """, - check="`setup.py check` is not supported", - register="`setup.py register` is not supported", - bdist_dumb="`setup.py bdist_dumb` is not supported", - bdist="`setup.py bdist` is not supported", build_sphinx=""" `setup.py build_sphinx` is not supported, use the Makefile under doc/""", From b5de1ceb1f1707d0539446b3d8883f3f8f80cb69 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 11:28:18 -0500 Subject: [PATCH 0768/1270] MAINT: Add comment to unused func, silence linter and avoid non-contig error --- .../src/multiarray/lowlevel_strided_loops.c.src | 5 +++++ numpy/core/tests/test_casting_unittests.py | 10 +++++----- numpy/core/tests/test_datetime.py | 15 ++++++++------- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index 8a6948fa4025..631042dae1d7 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -285,6 +285,11 @@ _strided_to_strided( return 0; } +/* + * NOTE: This function is currently unused. It would currently be used for + * builtin dtypes that have an elsize other than 2, 4, 8, or 16 bytes. + * Since unicode and complex swap differently, no such dtype exists. + */ static int _swap_strided_to_strided( PyArrayMethod_Context *context, char *const *args, diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index 45d58a23a270..c8fcd4b42522 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -626,20 +626,20 @@ def test_unicode_byteswapped_cast(self, order1, order2): # that tests unicode bytedwaps including for unaligned array data. dtype1 = np.dtype(f"{order1}U30") dtype2 = np.dtype(f"{order2}U30") - data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[None, 1:].view(dtype1) - data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[None, 1:].view(dtype2) + data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1) + data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2) if dtype1.alignment != 1: # alignment should always be >1, but skip the check if not assert not data1.flags.aligned assert not data2.flags.aligned element = "this is a ünicode string‽" - data1[0] = element + data1[()] = element # Test both `data1` and `data1.copy()` (which should be aligned) for data in [data1, data1.copy()]: data2[...] = data1 - assert data2[0] == element - assert data2.copy()[0] == element + assert data2[()] == element + assert data2.copy()[()] == element def test_void_to_string_special_case(self): # Cover a small special case in void to string casting that could diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index d2c04a506df9..b4146eadf3f2 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -705,12 +705,12 @@ def test_time_byteswapped_cast(self, time1, time2): # Test that every byte-swapping combination also returns the same # results (previous tests check that this comparison works fine). - res1 = times.astype(dtype1.newbyteorder()).astype(dtype2) - assert_array_equal(res1, expected) - res2 = times.astype(dtype2.newbyteorder()) - assert_array_equal(res2, expected) - res3 = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) - assert_array_equal(res3, expected) + res = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res, expected) + res = times.astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) @pytest.mark.parametrize("str_dtype", ["U", "S"]) @@ -732,7 +732,8 @@ def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): assert_array_equal(res, to_strings) # Check that conversion from string to times works if src is swapped: - from_strings_swapped = from_strings.astype(from_strings.dtype.newbyteorder()) + from_strings_swapped = from_strings.astype( + from_strings.dtype.newbyteorder()) res = from_strings_swapped.astype(time_dtype) assert_array_equal(res, times) # And if both are swapped: From f53eaf7780cf2c92157cb8dc650447792476e2eb Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Sat, 10 Oct 2020 18:04:04 +0530 Subject: [PATCH 0769/1270] DEP: Deprecate inexact matches of mode, shift parameter parsing to C --- numpy/core/include/numpy/ndarraytypes.h | 6 ++ numpy/core/numeric.py | 13 ---- numpy/core/src/multiarray/conversion_utils.c | 71 ++++++++++++++++++++ numpy/core/src/multiarray/conversion_utils.h | 3 + numpy/core/src/multiarray/multiarraymodule.c | 4 +- 5 files changed, 82 insertions(+), 15 deletions(-) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 63e8bf974e96..f616268f1a10 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -236,6 +236,12 @@ typedef enum { NPY_RAISE=2 } NPY_CLIPMODE; +typedef enum { + NPY_VALID=0, + NPY_SAME=1, + NPY_FULL=2 +} NPY_CORRELATEMODE; + /* The special not-a-time (NaT) value */ #define NPY_DATETIME_NAT NPY_MIN_INT64 diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 7675386e70b1..a6ee9eba9979 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -662,17 +662,6 @@ def flatnonzero(a): return np.nonzero(np.ravel(a))[0] -_mode_from_name_dict = {'v': 0, - 's': 1, - 'f': 2} - - -def _mode_from_name(mode): - if isinstance(mode, str): - return _mode_from_name_dict[mode.lower()[0]] - return mode - - def _correlate_dispatcher(a, v, mode=None): return (a, v) @@ -748,7 +737,6 @@ def correlate(a, v, mode='valid'): array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) """ - mode = _mode_from_name(mode) return multiarray.correlate2(a, v, mode) @@ -852,7 +840,6 @@ def convolve(a, v, mode='full'): raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') - mode = _mode_from_name(mode) return multiarray.correlate(a, v[::-1], mode) diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index dd18f71fd03e..bcb9167914a0 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -715,6 +715,77 @@ PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE *modes, int n) return NPY_SUCCEED; } +static int correlatemode_parser(char const *str, Py_ssize_t length, void *data) +{ + NPY_CORRELATEMODE *val = (NPY_CORRELATEMODE *)data; + int is_exact = 0; + + if (length < 1) { + return -1; + } + if (str[0] == 'V' || str[0] == 'v') { + *val = NPY_VALID; + is_exact = (length == 5 && strcmp(str, "valid") == 0); + } + else if (str[0] == 'S' || str[0] == 's') { + *val = NPY_SAME; + is_exact = (length == 4 && strcmp(str, "same") == 0); + } + else if (str[0] == 'F' || str[0] == 'f') { + *val = NPY_FULL; + is_exact = (length == 4 && strcmp(str, "full") == 0); + } + else { + return -1; + } + + /* Filters out the case sensitive/non-exact + * match inputs and other inputs and outputs DeprecationWarning + */ + if (!is_exact) { + if (DEPRECATE("inexact matches and case insensitive matches for " + "convolve/correlate mode are deprecated, please " + "use one of 'valid', 'same', or 'full' instead.") < 0) { + return -1; + } + } + + return 0; +} + +/* + * Convert an object to NPY_VALID / NPY_SAME / NPY_FULL + */ +NPY_NO_EXPORT int +PyArray_CorrelatemodeConverter(PyObject *object, NPY_CORRELATEMODE *val) +{ + if (PyUnicode_Check(object)) { + return string_converter_helper( + object, (void *)val, correlatemode_parser, "mode", + "must be one of 'valid', 'same', or 'full'"); + } + + else { + /* For users passing integers */ + int number = PyArray_PyIntAsInt(object); + if (error_converting(number)) { + PyErr_SetString(PyExc_TypeError, + "convolve/correlate mode not understood"); + return NPY_FAIL; + } + if (number <= (int) NPY_FULL + && number >= (int) NPY_VALID) { + *val = (NPY_CORRELATEMODE) number; + return NPY_SUCCEED; + } + else { + PyErr_Format(PyExc_ValueError, + "integer convolve/correlate mode must be 0, 1, or 2"); + return NPY_FAIL; + } + } +} + static int casting_parser(char const *str, Py_ssize_t length, void *data) { NPY_CASTING *casting = (NPY_CASTING *)data; diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h index bee0c6064d05..7d1871c43ddb 100644 --- a/numpy/core/src/multiarray/conversion_utils.h +++ b/numpy/core/src/multiarray/conversion_utils.h @@ -42,6 +42,9 @@ PyArray_TypestrConvert(int itemsize, int gentype); NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp(int len, npy_intp const *vals); +NPY_NO_EXPORT int +PyArray_CorrelatemodeConverter(PyObject *object, NPY_CORRELATEMODE *val); + NPY_NO_EXPORT int PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind); diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index a0f7afeb5938..12705dc19177 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2839,7 +2839,7 @@ array_correlate(PyObject *NPY_UNUSED(dummy), if (npy_parse_arguments("correlate", args, len_args, kwnames, "a", NULL, &a0, "v", NULL, &shape, - "|mode", &PyArray_PythonPyIntFromInt, &mode, + "|mode", &PyArray_CorrelatemodeConverter, &mode, NULL, NULL, NULL) < 0) { return NULL; } @@ -2857,7 +2857,7 @@ array_correlate2(PyObject *NPY_UNUSED(dummy), if (npy_parse_arguments("correlate2", args, len_args, kwnames, "a", NULL, &a0, "v", NULL, &shape, - "|mode", &PyArray_PythonPyIntFromInt, &mode, + "|mode", &PyArray_CorrelatemodeConverter, &mode, NULL, NULL, NULL) < 0) { return NULL; } From 6256db85dd510600a46e5192a22774bc0ababcf0 Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Sat, 10 Oct 2020 18:07:12 +0530 Subject: [PATCH 0770/1270] TST: Added test for inexact matches of mode --- numpy/core/tests/test_numeric.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 8d3cec708ce9..ed76ca3c9d19 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2947,6 +2947,18 @@ def test_zero_size(self): with pytest.raises(ValueError): np.correlate(np.ones(1000), np.array([]), mode='full') + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.correlate(d, k, mode='valid') + with assert_warns(DeprecationWarning): + valid_mode = np.correlate(d, k, mode='v') + assert_array_equal(valid_mode, default_mode) + # integer mode + with assert_raises(ValueError): + np.correlate(d, k, mode=-1) + assert_array_equal(np.correlate(d, k, mode=0), valid_mode) + class TestConvolve: def test_object(self): d = [1.] * 100 @@ -2960,6 +2972,17 @@ def test_no_overwrite(self): assert_array_equal(d, np.ones(100)) assert_array_equal(k, np.ones(3)) + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.convolve(d, k, mode='full') + with assert_warns(DeprecationWarning): + full_mode = np.convolve(d, k, mode='f') + assert_array_equal(full_mode, default_mode) + # integer mode + with assert_raises(ValueError): + np.convolve(d, k, mode=-1) + assert_array_equal(np.convolve(d, k, mode=2), full_mode) class TestArgwhere: From 35158c723787c46014ff2bf8332e0aa639dcb0c7 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Tue, 19 Jan 2021 10:59:23 +0000 Subject: [PATCH 0771/1270] Update numpy/core/src/multiarray/conversion_utils.c --- numpy/core/src/multiarray/conversion_utils.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index bcb9167914a0..3c4c21dedd23 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -743,6 +743,7 @@ static int correlatemode_parser(char const *str, Py_ssize_t length, void *data) * match inputs and other inputs and outputs DeprecationWarning */ if (!is_exact) { + /* Numpy 1.21, 2021-01-19 */ if (DEPRECATE("inexact matches and case insensitive matches for " "convolve/correlate mode are deprecated, please " "use one of 'valid', 'same', or 'full' instead.") < 0) { From 4bcfc9df533dc888f637d5291cd184c90288487c Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Tue, 19 Jan 2021 17:27:00 +0530 Subject: [PATCH 0772/1270] REL: Add deprecation release note --- doc/release/upcoming_changes/17492.deprecation.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/17492.deprecation.rst diff --git a/doc/release/upcoming_changes/17492.deprecation.rst b/doc/release/upcoming_changes/17492.deprecation.rst new file mode 100644 index 000000000000..e0a2a78afba9 --- /dev/null +++ b/doc/release/upcoming_changes/17492.deprecation.rst @@ -0,0 +1,8 @@ +Inexact matches for `numpy.convolve` and `numpy.correlate` are deprecated +------------------------------------------------------------------------- + +`numpy.convolve` and `numpy.correlate` now parses its ``mode`` argument in C +instead of Python, and raises a warning when there are case insensitive and/or +inexact matches found for ``mode`` argument in the functions. +Pass full ``"same"``, ``"valid"``, ``"full"`` strings instead of +``"s"``, ``"v"``, ``"f"`` for the ``mode`` argument. From 153c8ed8e716446e1bf063b5543b03dd317d2e29 Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Tue, 19 Jan 2021 18:52:46 +0530 Subject: [PATCH 0773/1270] Remove extra information Co-authored-by: Eric Wieser --- doc/release/upcoming_changes/17492.deprecation.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/release/upcoming_changes/17492.deprecation.rst b/doc/release/upcoming_changes/17492.deprecation.rst index e0a2a78afba9..50005aed75b6 100644 --- a/doc/release/upcoming_changes/17492.deprecation.rst +++ b/doc/release/upcoming_changes/17492.deprecation.rst @@ -1,8 +1,7 @@ Inexact matches for `numpy.convolve` and `numpy.correlate` are deprecated ------------------------------------------------------------------------- -`numpy.convolve` and `numpy.correlate` now parses its ``mode`` argument in C -instead of Python, and raises a warning when there are case insensitive and/or -inexact matches found for ``mode`` argument in the functions. +`numpy.convolve` and `numpy.correlate` now emits a warning when there are case +insensitive and/or inexact matches found for ``mode`` argument in the functions. Pass full ``"same"``, ``"valid"``, ``"full"`` strings instead of ``"s"``, ``"v"``, ``"f"`` for the ``mode`` argument. From f5a2b486238183018092bb6a1f43ecd3f2d79ff9 Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Tue, 19 Jan 2021 18:53:34 +0530 Subject: [PATCH 0774/1270] Add one more test for illegal arguments Co-authored-by: Eric Wieser --- numpy/core/tests/test_numeric.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index ed76ca3c9d19..22c9a5d62c6a 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2983,6 +2983,9 @@ def test_mode(self): with assert_raises(ValueError): np.convolve(d, k, mode=-1) assert_array_equal(np.convolve(d, k, mode=2), full_mode) + # illegal arguments + with assert_raises(TypeError): + np.convolve(d, k, mode=None) class TestArgwhere: From edfd440a38d4f57000e1ca8d98d9f0033e6bd389 Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Tue, 19 Jan 2021 19:01:07 +0530 Subject: [PATCH 0775/1270] The same test for np.correlate --- numpy/core/tests/test_numeric.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 22c9a5d62c6a..20c0462ecaab 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2958,6 +2958,10 @@ def test_mode(self): with assert_raises(ValueError): np.correlate(d, k, mode=-1) assert_array_equal(np.correlate(d, k, mode=0), valid_mode) + # illegal arguments + with assert_raises(TypeError): + np.correlate(d, k, mode=None) + class TestConvolve: def test_object(self): @@ -2987,6 +2991,7 @@ def test_mode(self): with assert_raises(TypeError): np.convolve(d, k, mode=None) + class TestArgwhere: @pytest.mark.parametrize('nd', [0, 1, 2]) From 89de3d9d21e38e4070681e455d98575df3f3b51f Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Tue, 19 Jan 2021 19:05:14 +0530 Subject: [PATCH 0776/1270] A few whitespace cleanups --- numpy/core/tests/test_numeric.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 20c0462ecaab..aba90ece5e11 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -296,6 +296,7 @@ def test_var(self): B[0] = 1j assert_almost_equal(np.var(B), 0.25) + class TestIsscalar: def test_isscalar(self): assert_(np.isscalar(3.1)) @@ -2362,8 +2363,8 @@ def test_clip_property(self, data, shape): base_shape=shape, # Commenting out the min_dims line allows zero-dimensional arrays, # and zero-dimensional arrays containing NaN make the test fail. - min_dims=1 - + min_dims=1 + ) ) amin = data.draw( @@ -2896,10 +2897,10 @@ def _setup(self, dt): self.x = np.array([1, 2, 3, 4, 5], dtype=dt) self.xs = np.arange(1, 20)[::3] self.y = np.array([-1, -2, -3], dtype=dt) - self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt) self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) - self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) - self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) self.zs = np.array([-3., -14., -30., -48., -66., -84., -102., -54., -19.], dtype=dt) From 9c68c2f7b1b2128a3b4af2134565f60d286fa8b9 Mon Sep 17 00:00:00 2001 From: Abhay Raghuvanshi Date: Fri, 19 Mar 2021 01:49:20 +0530 Subject: [PATCH 0777/1270] MAINT: Added Chain exceptions where appropriate (#18394) * Added chain exception in _bits_of func * Added chain exception * Added chain exception in unixccompiler.py * Added chain exception in config.py * Added chain exception in fcompiler __init__.py * Added chain exception in compaq.py * Added chain exception in format.py * Updated raise chain exception * STY: Break long line. Co-authored-by: Charles Harris --- numpy/core/_type_aliases.py | 3 ++- numpy/distutils/command/config.py | 5 ++--- numpy/distutils/conv_template.py | 4 ++-- numpy/distutils/fcompiler/__init__.py | 4 ++-- numpy/distutils/fcompiler/compaq.py | 4 ++-- numpy/distutils/unixccompiler.py | 4 ++-- numpy/lib/format.py | 8 ++++---- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/numpy/core/_type_aliases.py b/numpy/core/_type_aliases.py index de90fd818686..67addef483f6 100644 --- a/numpy/core/_type_aliases.py +++ b/numpy/core/_type_aliases.py @@ -46,7 +46,8 @@ def _bits_of(obj): info = next(v for v in _concrete_typeinfo.values() if v.type is obj) except StopIteration: if obj in _abstract_types.values(): - raise ValueError("Cannot count the bits of an abstract type") + msg = "Cannot count the bits of an abstract type" + raise ValueError(msg) from None # some third-party type - make a best-guess return dtype(obj).itemsize * 8 diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py index 8b735677a46e..1f4037bb5c7a 100644 --- a/numpy/distutils/command/config.py +++ b/numpy/distutils/command/config.py @@ -64,7 +64,7 @@ def _check_compiler (self): % (e, self.compiler.__class__.__name__) print(textwrap.dedent("""\ ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) + raise distutils.errors.DistutilsPlatformError(msg) from e # After MSVC is initialized, add an explicit /MANIFEST to linker # flags. See issues gh-4245 and gh-4101 for details. Also @@ -97,9 +97,8 @@ def _wrap_method(self, mth, lang, args): try: ret = mth(*((self,)+args)) except (DistutilsExecError, CompileError) as e: - str(e) self.compiler = save_compiler - raise CompileError + raise CompileError from e self.compiler = save_compiler return ret diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py index 65efab0625e0..90e07f8b1036 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/distutils/conv_template.py @@ -285,7 +285,7 @@ def process_file(source): try: code = process_str(''.join(lines)) except ValueError as e: - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) + raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None return '#line 1 "%s"\n%s' % (sourcefile, code) @@ -322,7 +322,7 @@ def main(): try: writestr = process_str(allstr) except ValueError as e: - raise ValueError("In %s loop at %s" % (file, e)) + raise ValueError("In %s loop at %s" % (file, e)) from None outfile.write(writestr) diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index 812461538dc9..d7579e976801 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -610,7 +610,7 @@ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): self.spawn(command, display=display) except DistutilsExecError as e: msg = str(e) - raise CompileError(msg) + raise CompileError(msg) from None def module_options(self, module_dirs, module_build_dir): options = [] @@ -678,7 +678,7 @@ def link(self, target_desc, objects, self.spawn(command) except DistutilsExecError as e: msg = str(e) - raise LinkError(msg) + raise LinkError(msg) from None else: log.debug("skipping %s (up-to-date)", output_filename) diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py index 1a356866a283..351a43dd7618 100644 --- a/numpy/distutils/fcompiler/compaq.py +++ b/numpy/distutils/fcompiler/compaq.py @@ -87,11 +87,11 @@ class CompaqVisualFCompiler(FCompiler): except IOError as e: if not "vcvarsall.bat" in str(e): print("Unexpected IOError in", __file__) - raise e + raise except ValueError as e: if not "'path'" in str(e): print("Unexpected ValueError in", __file__) - raise e + raise executables = { 'version_cmd' : ['', "/what"], diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py index 0cd2d243ee83..fb91f178935f 100644 --- a/numpy/distutils/unixccompiler.py +++ b/numpy/distutils/unixccompiler.py @@ -54,7 +54,7 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts extra_postargs, display = display) except DistutilsExecError as e: msg = str(e) - raise CompileError(msg) + raise CompileError(msg) from None # add commandline flags to dependency file if deps: @@ -131,7 +131,7 @@ def UnixCCompiler_create_static_lib(self, objects, output_libname, display = display) except DistutilsExecError as e: msg = str(e) - raise LibError(msg) + raise LibError(msg) from None else: log.debug("skipping %s (up-to-date)", output_filename) return diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 5d951e262570..ac5f75fba1cf 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -378,7 +378,7 @@ def _wrap_header(header, version): header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) except struct.error: msg = "Header length {} too big for version={}".format(hlen, version) - raise ValueError(msg) + raise ValueError(msg) from None # Pad the header with spaces and a final newline such that the magic # string, the header-length short and the header are aligned on a @@ -595,7 +595,7 @@ def _read_array_header(fp, version): d = safe_eval(header) except SyntaxError as e: msg = "Cannot parse header: {!r}\nException: {!r}" - raise ValueError(msg.format(header, e)) + raise ValueError(msg.format(header, e)) from None if not isinstance(d, dict): msg = "Header is not a dictionary: {!r}" raise ValueError(msg.format(d)) @@ -614,9 +614,9 @@ def _read_array_header(fp, version): raise ValueError(msg.format(d['fortran_order'])) try: dtype = descr_to_dtype(d['descr']) - except TypeError: + except TypeError as e: msg = "descr is not a valid dtype descriptor: {!r}" - raise ValueError(msg.format(d['descr'])) + raise ValueError(msg.format(d['descr'])) from e return d['shape'], d['fortran_order'], dtype From f531110689a646f574ad1529d78b6047cf397f3e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 5 Dec 2020 22:38:47 -0600 Subject: [PATCH 0778/1270] ENH: Use new argument parsing for array creation functions The array creation functions have the most to gain: 1. np.asarray is 4 times faster and commonly used. 2. Other functions are wrapped using __array_function__ in Python making it more difficult This commit (unfortunatly) has to do a few things: * Modify __array_function__ C-side dispatching to accomodate the fastcall argument convention. * Move asarray, etc. to C after removing all "fast paths" from np.array (simplifying the code) * Fixup imports, since asarray was imported directly in a few places * Replace some places where `np.array` was probably used for speed instead of np.asarray or similar. (or by accident in 1 or 2 places) --- numpy/core/_add_newdocs.py | 205 +++++++ numpy/core/_asarray.py | 273 +-------- numpy/core/_methods.py | 2 +- numpy/core/fromnumeric.py | 3 +- numpy/core/multiarray.py | 7 +- numpy/core/numeric.py | 9 +- numpy/core/shape_base.py | 3 +- .../src/multiarray/arrayfunction_override.c | 103 ++-- .../src/multiarray/arrayfunction_override.h | 4 +- numpy/core/src/multiarray/multiarraymodule.c | 516 +++++++++++------- numpy/core/tests/test_overrides.py | 3 +- numpy/ctypeslib.py | 4 +- numpy/lib/function_base.py | 9 +- numpy/lib/shape_base.py | 2 +- numpy/ma/core.py | 2 +- numpy/ma/testutils.py | 4 +- numpy/testing/_private/utils.py | 24 +- 17 files changed, 629 insertions(+), 544 deletions(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index fb293014395e..1acf4e7d6280 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -904,6 +904,211 @@ array_function_like_doc, )) +add_newdoc('numpy.core.multiarray', 'asarray', + """ + asarray(a, dtype=None, order=None) + + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major (C-style) or + column-major (Fortran-style) memory representation. + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray with matching dtype and order. If `a` is a + subclass of ndarray, a base class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy.core.multiarray', 'asanyarray', + """ + asanyarray(a, dtype=None, order=None) + + Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major (C-style) or column-major + (Fortran-style) memory representation. Defaults to 'C'. + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and + Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy.core.multiarray', 'ascontiguousarray', + """ + ascontiguousarray(a, dtype=None) + + Return a contiguous array (ndim >= 1) in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major + memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> np.ascontiguousarray(x, dtype=np.float32) + array([[0., 1., 2.], + [3., 4., 5.]], dtype=float32) + >>> x.flags['C_CONTIGUOUS'] + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + +add_newdoc('numpy.core.multiarray', 'asfortranarray', + """ + asfortranarray(a, dtype=None) + + Return an array (ndim >= 1) laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> y = np.asfortranarray(x) + >>> x.flags['F_CONTIGUOUS'] + False + >>> y.flags['F_CONTIGUOUS'] + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + add_newdoc('numpy.core.multiarray', 'empty', """ empty(shape, dtype=float, order='C', *, like=None) diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py index a406308f3dd3..9522b7e38833 100644 --- a/numpy/core/_asarray.py +++ b/numpy/core/_asarray.py @@ -11,280 +11,9 @@ from .multiarray import array -__all__ = [ - "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require", -] +__all__ = ["require"] -def _asarray_dispatcher(a, dtype=None, order=None, *, like=None): - return (like,) - - -@set_array_function_like_doc -@set_module('numpy') -def asarray(a, dtype=None, order=None, *, like=None): - """Convert the input to an array. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. - ${ARRAY_FUNCTION_LIKE} - - .. versionadded:: 1.20.0 - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray with matching dtype and order. If `a` is a - subclass of ndarray, a base class ndarray is returned. - - See Also - -------- - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asarray(a) - array([1, 2]) - - Existing arrays are not copied: - - >>> a = np.array([1, 2]) - >>> np.asarray(a) is a - True - - If `dtype` is set, array is copied only if dtype does not match: - - >>> a = np.array([1, 2], dtype=np.float32) - >>> np.asarray(a, dtype=np.float32) is a - True - >>> np.asarray(a, dtype=np.float64) is a - False - - Contrary to `asanyarray`, ndarray subclasses are not passed through: - - >>> issubclass(np.recarray, np.ndarray) - True - >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) - >>> np.asarray(a) is a - False - >>> np.asanyarray(a) is a - True - - """ - if like is not None: - return _asarray_with_like(a, dtype=dtype, order=order, like=like) - - return array(a, dtype, copy=False, order=order) - - -_asarray_with_like = array_function_dispatch( - _asarray_dispatcher -)(asarray) - - -@set_array_function_like_doc -@set_module('numpy') -def asanyarray(a, dtype=None, order=None, *, like=None): - """Convert the input to an ndarray, but pass ndarray subclasses through. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes scalars, lists, lists of tuples, tuples, tuples of tuples, - tuples of lists, and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. - ${ARRAY_FUNCTION_LIKE} - - .. versionadded:: 1.20.0 - - Returns - ------- - out : ndarray or an ndarray subclass - Array interpretation of `a`. If `a` is an ndarray or a subclass - of ndarray, it is returned as-is and no copy is performed. - - See Also - -------- - asarray : Similar function which always returns ndarrays. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asanyarray(a) - array([1, 2]) - - Instances of `ndarray` subclasses are passed through as-is: - - >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) - >>> np.asanyarray(a) is a - True - - """ - if like is not None: - return _asanyarray_with_like(a, dtype=dtype, order=order, like=like) - - return array(a, dtype, copy=False, order=order, subok=True) - - -_asanyarray_with_like = array_function_dispatch( - _asarray_dispatcher -)(asanyarray) - - -def _asarray_contiguous_fortran_dispatcher(a, dtype=None, *, like=None): - return (like,) - - -@set_array_function_like_doc -@set_module('numpy') -def ascontiguousarray(a, dtype=None, *, like=None): - """ - Return a contiguous array (ndim >= 1) in memory (C order). - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - Data-type of returned array. - ${ARRAY_FUNCTION_LIKE} - - .. versionadded:: 1.20.0 - - Returns - ------- - out : ndarray - Contiguous array of same shape and content as `a`, with type `dtype` - if specified. - - See Also - -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> np.ascontiguousarray(x, dtype=np.float32) - array([[0., 1., 2.], - [3., 4., 5.]], dtype=float32) - >>> x.flags['C_CONTIGUOUS'] - True - - Note: This function returns an array with at least one-dimension (1-d) - so it will not preserve 0-d arrays. - - """ - if like is not None: - return _ascontiguousarray_with_like(a, dtype=dtype, like=like) - - return array(a, dtype, copy=False, order='C', ndmin=1) - - -_ascontiguousarray_with_like = array_function_dispatch( - _asarray_contiguous_fortran_dispatcher -)(ascontiguousarray) - - -@set_array_function_like_doc -@set_module('numpy') -def asfortranarray(a, dtype=None, *, like=None): - """ - Return an array (ndim >= 1) laid out in Fortran order in memory. - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - By default, the data-type is inferred from the input data. - ${ARRAY_FUNCTION_LIKE} - - .. versionadded:: 1.20.0 - - Returns - ------- - out : ndarray - The input `a` in Fortran, or column-major, order. - - See Also - -------- - ascontiguousarray : Convert input to a contiguous (C order) array. - asanyarray : Convert input to an ndarray with either row or - column-major memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> y = np.asfortranarray(x) - >>> x.flags['F_CONTIGUOUS'] - False - >>> y.flags['F_CONTIGUOUS'] - True - - Note: This function returns an array with at least one-dimension (1-d) - so it will not preserve 0-d arrays. - - """ - if like is not None: - return _asfortranarray_with_like(a, dtype=dtype, like=like) - - return array(a, dtype, copy=False, order='F', ndmin=1) - - -_asfortranarray_with_like = array_function_dispatch( - _asarray_contiguous_fortran_dispatcher -)(asfortranarray) - def _require_dispatcher(a, dtype=None, requirements=None, *, like=None): return (like,) diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index 09147fe5b7aa..e475b94dfb4e 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -8,7 +8,7 @@ from numpy.core import multiarray as mu from numpy.core import umath as um -from numpy.core._asarray import asanyarray +from numpy.core.multiarray import asanyarray from numpy.core import numerictypes as nt from numpy.core import _exceptions from numpy._globals import _NoValue diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index bb736d1a0e51..3646b39b0c70 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -10,8 +10,7 @@ from . import overrides from . import umath as um from . import numerictypes as nt -from ._asarray import asarray, array, asanyarray -from .multiarray import concatenate +from .multiarray import asarray, array, asanyarray, concatenate from . import _methods _dt_ = nt.sctype2char diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index b7277ac240d2..b7a3a8d67534 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -26,7 +26,8 @@ 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose', '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity', - 'add_docstring', 'arange', 'array', 'bincount', 'broadcast', + 'add_docstring', 'arange', 'array', 'asarray', 'asanyarray', + 'ascontiguousarray', 'asfortranarray', 'bincount', 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', @@ -49,6 +50,10 @@ arange.__module__ = 'numpy' array.__module__ = 'numpy' +asarray.__module__ = 'numpy' +asanyarray.__module__ = 'numpy' +ascontiguousarray.__module__ = 'numpy' +asfortranarray.__module__ = 'numpy' datetime_data.__module__ = 'numpy' empty.__module__ = 'numpy' frombuffer.__module__ = 'numpy' diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 7675386e70b1..45d41fb1852b 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -10,7 +10,8 @@ from .multiarray import ( _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS, BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE, - WRAP, arange, array, broadcast, can_cast, compare_chararrays, + WRAP, arange, array, asarray, asanyarray, ascontiguousarray, + asfortranarray, broadcast, can_cast, compare_chararrays, concatenate, copyto, dot, dtype, empty, empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring, inner, lexsort, matmul, may_share_memory, @@ -26,7 +27,6 @@ from . import numerictypes from .numerictypes import longlong, intc, int_, float_, complex_, bool_ from ._exceptions import TooHardError, AxisError -from ._asarray import asarray, asanyarray from ._ufunc_config import errstate bitwise_not = invert @@ -39,7 +39,8 @@ __all__ = [ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', - 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', @@ -2362,7 +2363,7 @@ def within_tol(x, y, atol, rtol): # Although, the default tolerances are unlikely to be useful if y.dtype.kind != "m": dt = multiarray.result_type(y, 1.) - y = array(y, dtype=dt, copy=False, subok=True) + y = asanyarray(y, dtype=dt) xfin = isfinite(x) yfin = isfinite(y) diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 89e98ab3072d..a81a04f7ff0e 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -8,8 +8,7 @@ from . import numeric as _nx from . import overrides -from ._asarray import array, asanyarray -from .multiarray import normalize_axis_index +from .multiarray import array, asanyarray, normalize_axis_index from . import fromnumeric as _from_nx diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c index 2c07cdebc625..899919ff70c6 100644 --- a/numpy/core/src/multiarray/arrayfunction_override.c +++ b/numpy/core/src/multiarray/arrayfunction_override.c @@ -213,7 +213,7 @@ call_array_function(PyObject* argument, PyObject* method, * to NotImplemented to indicate the default implementation should * be used. */ -NPY_NO_EXPORT PyObject * +static PyObject * array_implement_array_function_internal( PyObject *public_api, PyObject *relevant_args, PyObject *args, PyObject *kwargs) @@ -364,66 +364,99 @@ array_implement_array_function( return res; } - /* * Implements the __array_function__ protocol for C array creation functions * only. Added as an extension to NEP-18 in an effort to bring NEP-35 to * life with minimal dispatch overhead. + * + * The caller must ensure that `like != NULL`. */ NPY_NO_EXPORT PyObject * array_implement_c_array_function_creation( - const char *function_name, PyObject *args, PyObject *kwargs) + const char *function_name, PyObject *like, + PyObject *args, PyObject *kwargs, + PyObject *const *fast_args, Py_ssize_t len_args, PyObject *kwnames) { - if (kwargs == NULL) { - return Py_NotImplemented; + PyObject *relevant_args = NULL; + PyObject *numpy_module = NULL; + PyObject *public_api = NULL; + PyObject *result = NULL; + + if (!get_array_function(like)) { + return PyErr_Format(PyExc_TypeError, + "The `like` argument must be an array-like that implements " + "the `__array_function__` protocol."); } - /* Remove `like=` kwarg, which is NumPy-exclusive and thus not present - * in downstream libraries. If that key isn't present, return NULL and - * let originating call to continue. If the key is present but doesn't - * implement `__array_function__`, raise a `TypeError`. - */ - if (!PyDict_Contains(kwargs, npy_ma_str_like)) { - return Py_NotImplemented; + if (fast_args != NULL) { + /* + * Convert from vectorcall convention, since the protocol requires + * the normal convention. We have to do this late to ensure the + * normal path where NotImplemented is returned is fast. + */ + assert(args == NULL); + assert(kwargs == NULL); + args = PyTuple_New(len_args); + if (args == NULL) { + return NULL; + } + for (Py_ssize_t i = 0; i < len_args; i++) { + Py_INCREF(fast_args[i]); + PyTuple_SET_ITEM(args, i, fast_args[i]); + } + if (kwnames != NULL) { + kwargs = PyDict_New(); + if (kwargs == NULL) { + Py_DECREF(args); + } + Py_ssize_t nkwargs = PyTuple_GET_SIZE(kwnames); + for (Py_ssize_t i = 0; i < nkwargs; i++) { + PyObject *key = PyTuple_GET_ITEM(kwnames, i); + PyObject *value = fast_args[i+len_args]; + if (PyDict_SetItem(kwargs, key, value) < 0) { + Py_DECREF(args); + Py_DECREF(kwargs); + } + } + } } - PyObject *like_arg = PyDict_GetItem(kwargs, npy_ma_str_like); - if (like_arg == NULL) { - return NULL; + relevant_args = PyTuple_Pack(1, like); + if (relevant_args == NULL) { + goto finish; } - else if (!get_array_function(like_arg)) { - return PyErr_Format(PyExc_TypeError, - "The `like` argument must be an array-like that implements " - "the `__array_function__` protocol."); + /* The like argument must be present in the keyword arguments, remove it */ + if (PyDict_DelItem(kwargs, npy_ma_str_like) < 0) { + goto finish; } - PyObject *relevant_args = PyTuple_Pack(1, like_arg); - PyDict_DelItem(kwargs, npy_ma_str_like); - PyObject *numpy_module = PyImport_Import(npy_ma_str_numpy); + numpy_module = PyImport_Import(npy_ma_str_numpy); if (numpy_module == NULL) { - Py_DECREF(relevant_args); - return NULL; + goto finish; } - PyObject *public_api = PyObject_GetAttrString(numpy_module, function_name); + public_api = PyObject_GetAttrString(numpy_module, function_name); Py_DECREF(numpy_module); if (public_api == NULL) { - Py_DECREF(relevant_args); - return NULL; + goto finish; } if (!PyCallable_Check(public_api)) { - Py_DECREF(relevant_args); - Py_DECREF(public_api); - return PyErr_Format(PyExc_RuntimeError, - "numpy.%s is not callable.", - function_name); + PyErr_Format(PyExc_RuntimeError, + "numpy.%s is not callable.", function_name); + goto finish; } - PyObject* result = array_implement_array_function_internal( + result = array_implement_array_function_internal( public_api, relevant_args, args, kwargs); - Py_DECREF(relevant_args); - Py_DECREF(public_api); + finish: + if (kwnames != NULL) { + /* args and kwargs were converted from vectorcall convention */ + Py_XDECREF(args); + Py_XDECREF(kwargs); + } + Py_XDECREF(relevant_args); + Py_XDECREF(public_api); return result; } diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h index fdcf1746d62d..fdf0dfcaf3f5 100644 --- a/numpy/core/src/multiarray/arrayfunction_override.h +++ b/numpy/core/src/multiarray/arrayfunction_override.h @@ -11,7 +11,9 @@ array__get_implementing_args( NPY_NO_EXPORT PyObject * array_implement_c_array_function_creation( - const char *function_name, PyObject *args, PyObject *kwargs); + const char *function_name, PyObject *like, + PyObject *args, PyObject *kwargs, + PyObject *const *fast_args, Py_ssize_t len_args, PyObject *kwnames); NPY_NO_EXPORT PyObject * array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index a0f7afeb5938..0da27ac171bd 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1556,135 +1556,23 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order) ((order) == NPY_CORDER && PyArray_IS_C_CONTIGUOUS(op)) || \ ((order) == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(op))) -static PyObject * -_array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) +static NPY_INLINE PyObject * +_array_fromobject_generic( + PyObject *op, PyArray_Descr *type, npy_bool copy, NPY_ORDER order, + npy_bool subok, int ndmin) { - PyObject *op; PyArrayObject *oparr = NULL, *ret = NULL; - npy_bool subok = NPY_FALSE; - npy_bool copy = NPY_TRUE; - int ndmin = 0, nd; - PyObject* like; - PyArray_Descr *type = NULL; PyArray_Descr *oldtype = NULL; - NPY_ORDER order = NPY_KEEPORDER; - int flags = 0; - - PyObject* array_function_result = NULL; - - static char *kwd[] = {"object", "dtype", "copy", "order", "subok", - "ndmin", "like", NULL}; - - if (PyTuple_GET_SIZE(args) > 2) { - PyErr_Format(PyExc_TypeError, - "array() takes from 1 to 2 positional arguments but " - "%zd were given", PyTuple_GET_SIZE(args)); - return NULL; - } - - array_function_result = array_implement_c_array_function_creation( - "array", args, kws); - if (array_function_result != Py_NotImplemented) { - return array_function_result; - } - - /* super-fast path for ndarray argument calls */ - if (PyTuple_GET_SIZE(args) == 0) { - goto full_path; - } - op = PyTuple_GET_ITEM(args, 0); - if (PyArray_CheckExact(op)) { - PyObject * dtype_obj = Py_None; - oparr = (PyArrayObject *)op; - /* get dtype which can be positional */ - if (PyTuple_GET_SIZE(args) == 2) { - dtype_obj = PyTuple_GET_ITEM(args, 1); - } - else if (kws) { - dtype_obj = PyDict_GetItemWithError(kws, npy_ma_str_dtype); - if (dtype_obj == NULL && PyErr_Occurred()) { - return NULL; - } - if (dtype_obj == NULL) { - dtype_obj = Py_None; - } - } - if (dtype_obj != Py_None) { - goto full_path; - } - - /* array(ndarray) */ - if (kws == NULL) { - ret = (PyArrayObject *)PyArray_NewCopy(oparr, order); - goto finish; - } - else { - /* fast path for copy=False rest default (np.asarray) */ - PyObject * copy_obj, * order_obj, *ndmin_obj; - copy_obj = PyDict_GetItemWithError(kws, npy_ma_str_copy); - if (copy_obj == NULL && PyErr_Occurred()) { - return NULL; - } - if (copy_obj != Py_False) { - goto full_path; - } - copy = NPY_FALSE; - - /* order does not matter for contiguous 1d arrays */ - if (PyArray_NDIM((PyArrayObject*)op) > 1 || - !PyArray_IS_C_CONTIGUOUS((PyArrayObject*)op)) { - order_obj = PyDict_GetItemWithError(kws, npy_ma_str_order); - if (order_obj == NULL && PyErr_Occurred()) { - return NULL; - } - else if (order_obj != Py_None && order_obj != NULL) { - goto full_path; - } - } - - ndmin_obj = PyDict_GetItemWithError(kws, npy_ma_str_ndmin); - if (ndmin_obj == NULL && PyErr_Occurred()) { - return NULL; - } - else if (ndmin_obj) { - long t = PyLong_AsLong(ndmin_obj); - if (error_converting(t)) { - goto clean_type; - } - else if (t > NPY_MAXDIMS) { - goto full_path; - } - ndmin = t; - } - - /* copy=False with default dtype, order (any is OK) and ndim */ - ret = oparr; - Py_INCREF(ret); - goto finish; - } - } - -full_path: - if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i$O:array", kwd, - &op, - PyArray_DescrConverter2, &type, - PyArray_BoolConverter, ©, - PyArray_OrderConverter, &order, - PyArray_BoolConverter, &subok, - &ndmin, - &like)) { - goto clean_type; - } + int nd, flags = 0; if (ndmin > NPY_MAXDIMS) { PyErr_Format(PyExc_ValueError, "ndmin bigger than allowable number of dimensions " "NPY_MAXDIMS (=%d)", NPY_MAXDIMS); - goto clean_type; + return NULL; } /* fast exit if simple call */ - if ((subok && PyArray_Check(op)) || - (!subok && PyArray_CheckExact(op))) { + if (PyArray_CheckExact(op) || (subok && PyArray_Check(op))) { oparr = (PyArrayObject *)op; if (type == NULL) { if (!copy && STRIDING_OK(oparr, order)) { @@ -1739,8 +1627,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) ret = (PyArrayObject *)PyArray_CheckFromAny(op, type, 0, 0, flags, NULL); - finish: - Py_XDECREF(type); +finish: if (ret == NULL) { return NULL; } @@ -1754,16 +1641,215 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) * steals a reference to ret */ return _prepend_ones(ret, nd, ndmin, order); +} + +#undef STRIDING_OK + -clean_type: +static PyObject * +array_array(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *op; + npy_bool subok = NPY_FALSE; + npy_bool copy = NPY_TRUE; + int ndmin = 0; + PyArray_Descr *type = NULL; + NPY_ORDER order = NPY_KEEPORDER; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; + + if (len_args != 1 || (kwnames != NULL)) { + if (npy_parse_arguments("array", args, len_args, kwnames, + "object", NULL, &op, + "|dtype", &PyArray_DescrConverter2, &type, + "$copy", &PyArray_BoolConverter, ©, + "$order", &PyArray_OrderConverter, &order, + "$subok", &PyArray_BoolConverter, &subok, + "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { + Py_XDECREF(type); + return NULL; + } + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "array", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(type); + return deferred; + } + } + } + else { + /* Fast path for symmetry (we copy by default which is slow) */ + op = args[0]; + } + + PyObject *res = _array_fromobject_generic( + op, type, copy, order, subok, ndmin); Py_XDECREF(type); - return NULL; + return res; } static PyObject * -array_copyto(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +array_asarray(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *op; + PyArray_Descr *type = NULL; + NPY_ORDER order = NPY_KEEPORDER; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; + + if (len_args != 1 || (kwnames != NULL)) { + if (npy_parse_arguments("asarray", args, len_args, kwnames, + "object", NULL, &op, + "|dtype", &PyArray_DescrConverter2, &type, + "|order", &PyArray_OrderConverter, &order, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { + Py_XDECREF(type); + return NULL; + } + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "asarray", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(type); + return deferred; + } + } + } + else { + op = args[0]; + } + + PyObject *res = _array_fromobject_generic( + op, type, NPY_FALSE, order, NPY_FALSE, 0); + Py_XDECREF(type); + return res; +} + +static PyObject * +array_asanyarray(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *op; + PyArray_Descr *type = NULL; + NPY_ORDER order = NPY_KEEPORDER; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; + + if (len_args != 1 || (kwnames != NULL)) { + if (npy_parse_arguments("asanyarray", args, len_args, kwnames, + "object", NULL, &op, + "|dtype", &PyArray_DescrConverter2, &type, + "|order", &PyArray_OrderConverter, &order, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { + Py_XDECREF(type); + return NULL; + } + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "asanyarray", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(type); + return deferred; + } + } + } + else { + op = args[0]; + } + + PyObject *res = _array_fromobject_generic( + op, type, NPY_FALSE, order, NPY_TRUE, 0); + Py_XDECREF(type); + return res; +} + + +static PyObject * +array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *op; + PyArray_Descr *type = NULL; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; + + if (len_args != 1 || (kwnames != NULL)) { + if (npy_parse_arguments("ascontiguousarray", args, len_args, kwnames, + "object", NULL, &op, + "|dtype", &PyArray_DescrConverter2, &type, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { + Py_XDECREF(type); + return NULL; + } + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "ascontiguousarray", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(type); + return deferred; + } + } + } + else { + op = args[0]; + } + + PyObject *res = _array_fromobject_generic( + op, type, NPY_FALSE, NPY_CORDER, NPY_FALSE, 1); + Py_XDECREF(type); + return res; +} + + +static PyObject * +array_asfortranarray(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { + PyObject *op; + PyArray_Descr *type = NULL; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; + + if (len_args != 1 || (kwnames != NULL)) { + if (npy_parse_arguments("asfortranarray", args, len_args, kwnames, + "object", NULL, &op, + "|dtype", &PyArray_DescrConverter2, &type, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { + Py_XDECREF(type); + return NULL; + } + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "asfortranarray", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(type); + return deferred; + } + } + } + else { + op = args[0]; + } + + PyObject *res = _array_fromobject_generic( + op, type, NPY_FALSE, NPY_FORTRANORDER, NPY_FALSE, 1); + Py_XDECREF(type); + return res; +} + +static PyObject * +array_copyto(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +{ static char *kwlist[] = {"dst", "src", "casting", "where", NULL}; PyObject *wheremask_in = NULL; PyArrayObject *dst = NULL, *src = NULL, *wheremask = NULL; @@ -1806,32 +1892,34 @@ array_copyto(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) } static PyObject * -array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +array_empty(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - - static char *kwlist[] = {"shape", "dtype", "order", "like", NULL}; PyArray_Descr *typecode = NULL; PyArray_Dims shape = {NULL, 0}; NPY_ORDER order = NPY_CORDER; - PyObject *like = NULL; npy_bool is_f_order; - PyObject *array_function_result = NULL; PyArrayObject *ret = NULL; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&$O:empty", kwlist, - PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &typecode, - PyArray_OrderConverter, &order, - &like)) { + if (npy_parse_arguments("empty", args, len_args, kwnames, + "shape", &PyArray_IntpConverter, &shape, + "|dtype", &PyArray_DescrConverter, &typecode, + "|order", &PyArray_OrderConverter, &order, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { goto fail; } - array_function_result = array_implement_c_array_function_creation( - "empty", args, kwds); - if (array_function_result != Py_NotImplemented) { - Py_XDECREF(typecode); - npy_free_cache_dim_obj(shape); - return array_function_result; + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "empty", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(typecode); + npy_free_cache_dim_obj(shape); + return deferred; + } } switch (order) { @@ -2006,31 +2094,35 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) } static PyObject * -array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +array_zeros(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - static char *kwlist[] = {"shape", "dtype", "order", "like", NULL}; PyArray_Descr *typecode = NULL; PyArray_Dims shape = {NULL, 0}; NPY_ORDER order = NPY_CORDER; - PyObject *like = NULL; npy_bool is_f_order = NPY_FALSE; - PyObject *array_function_result = NULL; PyArrayObject *ret = NULL; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&$O:zeros", kwlist, - PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &typecode, - PyArray_OrderConverter, &order, - &like)) { + if (npy_parse_arguments("zeros", args, len_args, kwnames, + "shape", &PyArray_IntpConverter, &shape, + "|dtype", &PyArray_DescrConverter, &typecode, + "|order", &PyArray_OrderConverter, &order, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { goto fail; } - array_function_result = array_implement_c_array_function_creation( - "zeros", args, kwds); - if (array_function_result != Py_NotImplemented) { - Py_XDECREF(typecode); - npy_free_cache_dim_obj(shape); - return array_function_result; + + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "zeros", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(typecode); + npy_free_cache_dim_obj(shape); + return deferred; + } } switch (order) { @@ -2088,7 +2180,6 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds static char *kwlist[] = {"string", "dtype", "count", "sep", "like", NULL}; PyObject *like = NULL; PyArray_Descr *descr = NULL; - PyObject *array_function_result = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "s#|O&" NPY_SSIZE_T_PYFMT "s$O:fromstring", kwlist, @@ -2096,11 +2187,13 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds Py_XDECREF(descr); return NULL; } - - array_function_result = array_implement_c_array_function_creation( - "fromstring", args, keywds); - if (array_function_result != Py_NotImplemented) { - return array_function_result; + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "fromstring", like, args, keywds, NULL, 0, NULL); + if (deferred != Py_NotImplemented) { + Py_XDECREF(descr); + return deferred; + } } /* binary mode, condition copied from PyArray_FromString */ @@ -2128,7 +2221,6 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) static char *kwlist[] = {"file", "dtype", "count", "sep", "offset", "like", NULL}; PyObject *like = NULL; PyArray_Descr *type = NULL; - PyObject *array_function_result = NULL; int own; npy_off_t orig_pos = 0, offset = 0; FILE *fp; @@ -2140,11 +2232,13 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) return NULL; } - array_function_result = array_implement_c_array_function_creation( - "fromfile", args, keywds); - if (array_function_result != Py_NotImplemented) { - Py_XDECREF(type); - return array_function_result; + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "fromfile", like, args, keywds, NULL, 0, NULL); + if (deferred != Py_NotImplemented) { + Py_XDECREF(type); + return deferred; + } } file = NpyPath_PathlikeToFspath(file); @@ -2217,7 +2311,6 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) static char *kwlist[] = {"iter", "dtype", "count", "like", NULL}; PyObject *like = NULL; PyArray_Descr *descr = NULL; - PyObject *array_function_result = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "OO&|" NPY_SSIZE_T_PYFMT "$O:fromiter", kwlist, @@ -2225,12 +2318,13 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) Py_XDECREF(descr); return NULL; } - - array_function_result = array_implement_c_array_function_creation( - "fromiter", args, keywds); - if (array_function_result != Py_NotImplemented) { - Py_DECREF(descr); - return array_function_result; + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "fromiter", like, args, keywds, NULL, 0, NULL); + if (deferred != Py_NotImplemented) { + Py_XDECREF(descr); + return deferred; + } } return PyArray_FromIter(iter, descr, (npy_intp)nin); @@ -2244,7 +2338,6 @@ array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds static char *kwlist[] = {"buffer", "dtype", "count", "offset", "like", NULL}; PyObject *like = NULL; PyArray_Descr *type = NULL; - PyObject *array_function_result = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT "$O:frombuffer", kwlist, @@ -2253,11 +2346,13 @@ array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds return NULL; } - array_function_result = array_implement_c_array_function_creation( - "frombuffer", args, keywds); - if (array_function_result != Py_NotImplemented) { - Py_XDECREF(type); - return array_function_result; + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "frombuffer", like, args, keywds, NULL, 0, NULL); + if (deferred != Py_NotImplemented) { + Py_XDECREF(type); + return deferred; + } } if (type == NULL) { @@ -2865,25 +2960,35 @@ array_correlate2(PyObject *NPY_UNUSED(dummy), } static PyObject * -array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { +array_arange(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ PyObject *o_start = NULL, *o_stop = NULL, *o_step = NULL, *range=NULL; - PyObject *like = NULL; - PyObject *array_function_result = NULL; - static char *kwd[] = {"start", "stop", "step", "dtype", "like", NULL}; PyArray_Descr *typecode = NULL; + PyObject *like = NULL; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kws, "|OOOO&$O:arange", kwd, - &o_start, - &o_stop, - &o_step, - PyArray_DescrConverter2, &typecode, - &like)) { + if (npy_parse_arguments("arange", args, len_args, kwnames, + "|start", NULL, &o_start, + "|stop", NULL, &o_stop, + "|step", NULL, &o_step, + "|dtype", &PyArray_DescrConverter2, &typecode, + "$like", NULL, &like, + NULL, NULL, NULL) < 0) { Py_XDECREF(typecode); return NULL; } + if (like != NULL) { + PyObject *deferred = array_implement_c_array_function_creation( + "arange", like, NULL, NULL, args, len_args, kwnames); + if (deferred != Py_NotImplemented) { + Py_XDECREF(typecode); + return deferred; + } + } if (o_stop == NULL) { - if (args == NULL || PyTuple_GET_SIZE(args) == 0){ + if (len_args == 0){ PyErr_SetString(PyExc_TypeError, "arange() requires stop to be specified."); Py_XDECREF(typecode); @@ -2895,13 +3000,6 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { o_stop = NULL; } - array_function_result = array_implement_c_array_function_creation( - "arange", args, kws); - if (array_function_result != Py_NotImplemented) { - Py_XDECREF(typecode); - return array_function_result; - } - range = PyArray_ArangeObj(o_start, o_stop, o_step, typecode); Py_XDECREF(typecode); @@ -4149,8 +4247,20 @@ static struct PyMethodDef array_module_methods[] = { (PyCFunction)array_set_typeDict, METH_VARARGS, NULL}, {"array", - (PyCFunction)_array_fromobject, - METH_VARARGS|METH_KEYWORDS, NULL}, + (PyCFunction)array_array, + METH_FASTCALL | METH_KEYWORDS, NULL}, + {"asarray", + (PyCFunction)array_asarray, + METH_FASTCALL | METH_KEYWORDS, NULL}, + {"asanyarray", + (PyCFunction)array_asanyarray, + METH_FASTCALL | METH_KEYWORDS, NULL}, + {"ascontiguousarray", + (PyCFunction)array_ascontiguousarray, + METH_FASTCALL | METH_KEYWORDS, NULL}, + {"asfortranarray", + (PyCFunction)array_asfortranarray, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"copyto", (PyCFunction)array_copyto, METH_VARARGS|METH_KEYWORDS, NULL}, @@ -4159,16 +4269,16 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS|METH_KEYWORDS, NULL}, {"arange", (PyCFunction)array_arange, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"zeros", (PyCFunction)array_zeros, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"count_nonzero", (PyCFunction)array_count_nonzero, METH_VARARGS|METH_KEYWORDS, NULL}, {"empty", (PyCFunction)array_empty, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"empty_like", (PyCFunction)array_empty_like, METH_VARARGS|METH_KEYWORDS, NULL}, diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py index 6862fca03826..0809e1e92da8 100644 --- a/numpy/core/tests/test_overrides.py +++ b/numpy/core/tests/test_overrides.py @@ -577,5 +577,6 @@ def test_exception_handling(self): ref = self.MyArray.array() - with assert_raises(ValueError): + with assert_raises(TypeError): + # Raises the error about `value_error` being invalid first np.array(1, value_error=True, like=ref) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index dbc683a6b24b..8ba6f15e5a6d 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -54,7 +54,7 @@ import os from numpy import ( - integer, ndarray, dtype as _dtype, array, frombuffer + integer, ndarray, dtype as _dtype, asarray, frombuffer ) from numpy.core.multiarray import _flagdict, flagsobj @@ -515,7 +515,7 @@ def as_array(obj, shape=None): p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) obj = ctypes.cast(obj, p_arr_type).contents - return array(obj, copy=False) + return asarray(obj) def as_ctypes(obj): diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c6db42ce4905..44eac31ef5cf 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -593,7 +593,7 @@ def piecewise(x, condlist, funclist, *args, **kw): not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): condlist = [condlist] - condlist = array(condlist, dtype=bool) + condlist = asarray(condlist, dtype=bool) n = len(condlist) if n == n2 - 1: # compute the "otherwise" condition. @@ -2191,15 +2191,14 @@ def _vectorize_call(self, func, args): ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first - inputs = [array(a, copy=False, subok=True, dtype=object) - for a in args] + inputs = [asanyarray(a, dtype=object) for a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: - res = array(outputs, copy=False, subok=True, dtype=otypes[0]) + res = asanyarray(outputs, dtype=otypes[0]) else: - res = tuple([array(x, copy=False, subok=True, dtype=t) + res = tuple([asanyarray(x, dtype=t) for x, t in zip(outputs, otypes)]) return res diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index d19bfb8f8073..a3fbee3d5fb3 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -649,7 +649,7 @@ def column_stack(tup): arrays = [] for v in tup: - arr = array(v, copy=False, subok=True) + arr = asanyarray(v) if arr.ndim < 2: arr = array(arr, copy=False, subok=True, ndmin=2).T arrays.append(arr) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 10ee0fb06f56..90c808df4d98 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3895,7 +3895,7 @@ def compress(self, condition, axis=None, out=None): # Force the condition to a regular ndarray and forget the missing # values. - condition = np.array(condition, copy=False, subok=False) + condition = np.asarray(condition) _new = _data.compress(condition, axis=axis, out=out).view(type(self)) _new._update_from(self) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index 8d55e1763da1..2dd479abe480 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -134,8 +134,8 @@ def assert_equal(actual, desired, err_msg=''): msg = build_err_msg([actual, desired], err_msg, header='', names=('x', 'y')) raise ValueError(msg) - actual = np.array(actual, copy=False, subok=True) - desired = np.array(desired, copy=False, subok=True) + actual = np.asanyarray(actual) + desired = np.asanyarray(desired) (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) if actual_dtype.char == "S" and desired_dtype.char == "S": return _assert_equal_on_sequences(actual.tolist(), diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index b4d42728e3cd..f0fd1857a9ee 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -17,6 +17,7 @@ from warnings import WarningMessage import pprint +import numpy as np from numpy.core import( intp, float32, empty, arange, array_repr, ndarray, isnat, array) import numpy.linalg.lapack_lite @@ -378,7 +379,8 @@ def assert_equal(actual, desired, err_msg='', verbose=True): try: isdesnat = isnat(desired) isactnat = isnat(actual) - dtypes_match = array(desired).dtype.type == array(actual).dtype.type + dtypes_match = (np.asarray(desired).dtype.type == + np.asarray(actual).dtype.type) if isdesnat and isactnat: # If both are NaT (and have the same dtype -- datetime or # timedelta) they are considered equal. @@ -398,8 +400,8 @@ def assert_equal(actual, desired, err_msg='', verbose=True): return # both nan, so equal # handle signed zero specially for floats - array_actual = array(actual) - array_desired = array(desired) + array_actual = np.asarray(actual) + array_desired = np.asarray(desired) if (array_actual.dtype.char in 'Mm' or array_desired.dtype.char in 'Mm'): # version 1.18 @@ -701,8 +703,8 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', __tracebackhide__ = True # Hide traceback for py.test from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_ - x = array(x, copy=False, subok=True) - y = array(y, copy=False, subok=True) + x = np.asanyarray(x) + y = np.asanyarray(y) # original array for output formatting ox, oy = x, y @@ -1033,7 +1035,7 @@ def compare(x, y): # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) - y = array(y, dtype=dtype, copy=False, subok=True) + y = np.asanyarray(y, dtype) z = abs(x - y) if not issubdtype(z.dtype, number): @@ -1678,11 +1680,11 @@ def nulp_diff(x, y, dtype=None): """ import numpy as np if dtype: - x = np.array(x, dtype=dtype) - y = np.array(y, dtype=dtype) + x = np.asarray(x, dtype=dtype) + y = np.asarray(y, dtype=dtype) else: - x = np.array(x) - y = np.array(y) + x = np.asarray(x) + y = np.asarray(y) t = np.common_type(x, y) if np.iscomplexobj(x) or np.iscomplexobj(y): @@ -1699,7 +1701,7 @@ def nulp_diff(x, y, dtype=None): (x.shape, y.shape)) def _diff(rx, ry, vdt): - diff = np.array(rx-ry, dtype=vdt) + diff = np.asarray(rx-ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) From 972127ddff5482ab32fa48eb175dd90ea7b7e993 Mon Sep 17 00:00:00 2001 From: Neil Date: Thu, 18 Mar 2021 17:13:49 -0400 Subject: [PATCH 0779/1270] Remove mention of nose from README As per NEP 29, Numpy < 1.15 is dead, so there is no need to mention it in the README. --- README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/README.md b/README.md index cc34f47bf6c7..9ed3e7742243 100644 --- a/README.md +++ b/README.md @@ -19,10 +19,7 @@ It provides: Testing: -- NumPy versions ≥ 1.15 require `pytest` -- NumPy versions < 1.15 require `nose` - -Tests can then be run after installation with: +NumPy requires `pytest`. Tests can then be run after installation with: python -c 'import numpy; numpy.test()' From 0ea47e1901d08cad5fba9cba6ddb40a9ed947bd8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 16:18:52 -0500 Subject: [PATCH 0780/1270] BUG: import `asanyarray` to `_asarray.py` as its used in `np.require` --- numpy/core/_asarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py index 9522b7e38833..ecb4e7c39d0c 100644 --- a/numpy/core/_asarray.py +++ b/numpy/core/_asarray.py @@ -8,7 +8,7 @@ set_array_function_like_doc, set_module, ) -from .multiarray import array +from .multiarray import array, asanyarray __all__ = ["require"] From b2190e837e20f3720809791ccb4ba6f9d080f642 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 16:27:41 -0500 Subject: [PATCH 0781/1270] Update numpy/core/src/multiarray/calculation.c Co-authored-by: Matti Picus --- numpy/core/src/multiarray/calculation.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index 9fe05f7a393d..7308c6b714ec 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -423,7 +423,8 @@ __New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, return NULL; } arr2 = (PyArrayObject *)PyArray_EnsureAnyArray( - PyArray_GenericBinaryFunction((PyObject *)arr1, obj3, n_ops.multiply)); + PyArray_GenericBinaryFunction((PyObject *)arr1, obj3, + n_ops.multiply)); Py_DECREF(arr1); Py_DECREF(obj3); if (arr2 == NULL) { From d7af05a849b3b81922ec3da988494a70a875ca91 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 16:35:36 -0500 Subject: [PATCH 0782/1270] TST: Add minimal test for passing an out argument to arr.conjugate() This can currently only be passed positionally, and this is undocumented. --- numpy/core/tests/test_multiarray.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index b30fcb812aa5..0b61140973d2 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3390,6 +3390,15 @@ def test_conjugate(self): assert_raises(TypeError, lambda: a.conj()) assert_raises(TypeError, lambda: a.conjugate()) + def test_conjugate_out(self): + # Minimal test for the out argument being passed on correctly + # NOTE: The ability to pass `out` is currently undocumented! + a = np.array([1-1j, 1+1j, 23+23.0j]) + out = np.empty_like(a) + res = a.conjugate(out) + assert res is out + assert_array_equal(out, a.conjugate()) + def test__complex__(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', From ee296b231dc94f8908b06247e063d2a653de6c7b Mon Sep 17 00:00:00 2001 From: Andras Deak Date: Thu, 18 Mar 2021 21:16:59 +0100 Subject: [PATCH 0783/1270] DOC: Make inline code example consistent in ufunc reference --- doc/source/reference/ufuncs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 06fbe28ddcba..c919ec9b8e65 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -628,8 +628,8 @@ Math operations for large calculations. If your arrays are large, complicated expressions can take longer than absolutely necessary due to the creation and (later) destruction of temporary calculation - spaces. For example, the expression ``G = a * b + c`` is equivalent to - ``t1 = A * B; G = T1 + C; del t1``. It will be more quickly executed + spaces. For example, the expression ``G = A * B + C`` is equivalent to + ``T1 = A * B; G = T1 + C; del T1``. It will be more quickly executed as ``G = A * B; add(G, C, G)`` which is the same as ``G = A * B; G += C``. From cb71c43d45c0ad6e14db97a51e317b771f6a9ebb Mon Sep 17 00:00:00 2001 From: Abhay Raghuvanshi Date: Fri, 19 Mar 2021 04:45:07 +0530 Subject: [PATCH 0784/1270] MAINT: Added Chain exceptions where appropriate (#18400) * Added chain exception in _pocketfft.py * Added chain exception in format.py * Added chain exception in make_lite.py * Added chain exception in mrecords.py * added from e for exceptions * Minor update for _read_array_header exception msg * Removed \n from excp msg and e from msg format. Co-authored-by: Eric Wieser * Update numpy/linalg/lapack_lite/make_lite.py Co-authored-by: Eric Wieser Co-authored-by: Eric Wieser Co-authored-by: Charles Harris --- numpy/fft/_pocketfft.py | 2 +- numpy/lib/format.py | 4 ++-- numpy/ma/mrecords.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index bf0e60b6de56..4ed3042a63e5 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -112,7 +112,7 @@ def _swap_direction(norm): return _SWAP_DIRECTION_MAP[norm] except KeyError: raise ValueError(f'Invalid norm value {norm}; should be "backward", ' - '"ortho" or "forward".') + '"ortho" or "forward".') from None def _fft_dispatcher(a, n=None, axis=None, norm=None): diff --git a/numpy/lib/format.py b/numpy/lib/format.py index ac5f75fba1cf..904c32cc7c48 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -594,8 +594,8 @@ def _read_array_header(fp, version): try: d = safe_eval(header) except SyntaxError as e: - msg = "Cannot parse header: {!r}\nException: {!r}" - raise ValueError(msg.format(header, e)) from None + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e if not isinstance(d, dict): msg = "Header is not a dictionary: {!r}" raise ValueError(msg.format(d)) diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 126bdd9dc8b0..9ea4e4e3627d 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -660,8 +660,8 @@ def openfile(fname): # Try to open the file and guess its type try: f = open(fname) - except IOError: - raise IOError(f"No such file: '{fname}'") + except IOError as e: + raise IOError(f"No such file: '{fname}'") from e if f.readline()[:2] != "\\x": f.seek(0, 0) return f From 75bb1a80d104561c7a1870c86e6e5f0972cde5df Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Mon, 15 Mar 2021 08:27:59 -0400 Subject: [PATCH 0785/1270] BUG/ENH: fix array2string rounding bug by adding min_digits option Fixes #18609 --- numpy/core/arrayprint.py | 64 ++++++--- numpy/core/arrayprint.pyi | 2 + numpy/core/src/multiarray/dragon4.c | 131 ++++++++++++------- numpy/core/src/multiarray/dragon4.h | 15 ++- numpy/core/src/multiarray/multiarraymodule.c | 14 +- numpy/core/src/multiarray/scalartypes.c.src | 4 +- numpy/core/tests/test_arrayprint.py | 4 + numpy/core/tests/test_scalarprint.py | 69 +++++++++- 8 files changed, 215 insertions(+), 88 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 5c1d6cb63c6c..523820c33c11 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -914,6 +914,7 @@ def fillFormat(self, data): self.trim = '.' self.exp_size = -1 self.unique = True + self.min_digits = None elif self.exp_format: trim, unique = '.', True if self.floatmode == 'fixed' or self._legacy == '1.13': @@ -927,6 +928,8 @@ def fillFormat(self, data): self.trim = 'k' self.precision = max(len(s) for s in frac_part) + self.min_digits = self.precision + self.unique = unique # for back-compat with np 1.13, use 2 spaces & sign and full prec if self._legacy == '1.13': @@ -936,10 +939,7 @@ def fillFormat(self, data): self.pad_left = max(len(s) for s in int_part) # pad_right is only needed for nan length calculation self.pad_right = self.exp_size + 2 + self.precision - - self.unique = False else: - # first pass printing to determine sizes trim, unique = '.', True if self.floatmode == 'fixed': trim, unique = 'k', False @@ -955,14 +955,14 @@ def fillFormat(self, data): self.pad_left = max(len(s) for s in int_part) self.pad_right = max(len(s) for s in frac_part) self.exp_size = -1 + self.unique = unique if self.floatmode in ['fixed', 'maxprec_equal']: - self.precision = self.pad_right - self.unique = False + self.precision = self.min_digits = self.pad_right self.trim = 'k' else: - self.unique = True self.trim = '.' + self.min_digits = 0 if self._legacy != '1.13': # account for sign = ' ' by adding one to pad_left @@ -991,6 +991,7 @@ def __call__(self, x): if self.exp_format: return dragon4_scientific(x, precision=self.precision, + min_digits=self.min_digits, unique=self.unique, trim=self.trim, sign=self.sign == '+', @@ -999,6 +1000,7 @@ def __call__(self, x): else: return dragon4_positional(x, precision=self.precision, + min_digits=self.min_digits, unique=self.unique, fractional=True, trim=self.trim, @@ -1009,7 +1011,8 @@ def __call__(self, x): @set_module('numpy') def format_float_scientific(x, precision=None, unique=True, trim='k', - sign=False, pad_left=None, exp_digits=None): + sign=False, pad_left=None, exp_digits=None, + min_digits=None): """ Format a floating-point scalar as a decimal string in scientific notation. @@ -1027,11 +1030,12 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', If `True`, use a digit-generation strategy which gives the shortest representation which uniquely identifies the floating-point number from other values of the same type, by judicious rounding. If `precision` - was omitted, print all necessary digits, otherwise digit generation is - cut off after `precision` digits and the remaining value is rounded. + is given fewer digits than necessary can be printed, or if `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. If `False`, digits are generated as if printing an infinite-precision value and stopping after `precision` digits, rounding the remaining - value. + value with unbiased rounding trim : one of 'k', '.', '0', '-', optional Controls post-processing trimming of trailing digits, as follows: @@ -1048,6 +1052,10 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', exp_digits : non-negative integer, optional Pad the exponent with zeros until it contains at least this many digits. If omitted, the exponent will be at least 2 digits. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. Only has an effect if `unique=True` + in which case additional digits past those necessary to uniquely + identify the value may be printed, rounding the last additional digit. Returns ------- @@ -1071,15 +1079,18 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', precision = _none_or_positive_arg(precision, 'precision') pad_left = _none_or_positive_arg(pad_left, 'pad_left') exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") return dragon4_scientific(x, precision=precision, unique=unique, trim=trim, sign=sign, pad_left=pad_left, - exp_digits=exp_digits) + exp_digits=exp_digits, min_digits=min_digits) @set_module('numpy') def format_float_positional(x, precision=None, unique=True, fractional=True, trim='k', sign=False, - pad_left=None, pad_right=None): + pad_left=None, pad_right=None, min_digits=None): """ Format a floating-point scalar as a decimal string in positional notation. @@ -1097,16 +1108,19 @@ def format_float_positional(x, precision=None, unique=True, If `True`, use a digit-generation strategy which gives the shortest representation which uniquely identifies the floating-point number from other values of the same type, by judicious rounding. If `precision` - was omitted, print out all necessary digits, otherwise digit generation - is cut off after `precision` digits and the remaining value is rounded. + is given fewer digits than necessary can be printed, or if `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. If `False`, digits are generated as if printing an infinite-precision value and stopping after `precision` digits, rounding the remaining - value. + value with unbiased rounding fractional : boolean, optional - If `True`, the cutoff of `precision` digits refers to the total number - of digits after the decimal point, including leading zeros. - If `False`, `precision` refers to the total number of significant - digits, before or after the decimal point, ignoring leading zeros. + If `True`, the cutoffs of `precision` and `min_digits` refer to the + total number of digits after the decimal point, including leading + zeros. + If `False`, `precision` and `min_digits` refer to the total number of + significant digits, before or after the decimal point, ignoring leading + zeros. trim : one of 'k', '.', '0', '-', optional Controls post-processing trimming of trailing digits, as follows: @@ -1123,6 +1137,10 @@ def format_float_positional(x, precision=None, unique=True, pad_right : non-negative integer, optional Pad the right side of the string with whitespace until at least that many characters are to the right of the decimal point. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. Only has an effect if `unique=True` + in which case additional digits past those necessary to uniquely + identify the value may be printed, rounding the last additional digit. Returns ------- @@ -1147,10 +1165,16 @@ def format_float_positional(x, precision=None, unique=True, precision = _none_or_positive_arg(precision, 'precision') pad_left = _none_or_positive_arg(pad_left, 'pad_left') pad_right = _none_or_positive_arg(pad_right, 'pad_right') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if not fractional and precision == 0: + raise ValueError("precision must be greater than 0 if " + "fractional=False") + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") return dragon4_positional(x, precision=precision, unique=unique, fractional=fractional, trim=trim, sign=sign, pad_left=pad_left, - pad_right=pad_right) + pad_right=pad_right, min_digits=min_digits) class IntegerFormat: diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi index d2a5fdef963f..ac2b6f5a8abb 100644 --- a/numpy/core/arrayprint.pyi +++ b/numpy/core/arrayprint.pyi @@ -103,6 +103,7 @@ def format_float_scientific( sign: bool = ..., pad_left: Optional[int] = ..., exp_digits: Optional[int] = ..., + min_digits: Optional[int] = ..., ) -> str: ... def format_float_positional( x: _FloatLike_co, @@ -113,6 +114,7 @@ def format_float_positional( sign: bool = ..., pad_left: Optional[int] = ..., pad_right: Optional[int] = ..., + min_digits: Optional[int] = ..., ) -> str: ... def array_repr( arr: ndarray[Any, Any], diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index a7b252a77e7e..1d8c275700a2 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -1130,8 +1130,9 @@ BigInt_ShiftLeft(BigInt *result, npy_uint32 shift) * * exponent - value exponent in base 2 * * mantissaBit - index of the highest set mantissa bit * * hasUnequalMargins - is the high margin twice as large as the low margin - * * cutoffMode - how to interpret cutoffNumber: fractional or total digits? - * * cutoffNumber - cut off printing after this many digits. -1 for no cutoff + * * cutoffMode - how to interpret cutoff_*: fractional or total digits? + * * cutoff_max - cut off printing after this many digits. -1 for no cutoff + * * cutoff_min - print at least this many digits. -1 for no cutoff * * pOutBuffer - buffer to output into * * bufferSize - maximum characters that can be printed to pOutBuffer * * pOutExponent - the base 10 exponent of the first digit @@ -1142,7 +1143,7 @@ static npy_uint32 Dragon4(BigInt *bigints, const npy_int32 exponent, const npy_uint32 mantissaBit, const npy_bool hasUnequalMargins, const DigitMode digitMode, const CutoffMode cutoffMode, - npy_int32 cutoffNumber, char *pOutBuffer, + npy_int32 cutoff_max, npy_int32 cutoff_min, char *pOutBuffer, npy_uint32 bufferSize, npy_int32 *pOutExponent) { char *curDigit = pOutBuffer; @@ -1169,7 +1170,8 @@ Dragon4(BigInt *bigints, const npy_int32 exponent, BigInt *temp2 = &bigints[6]; const npy_float64 log10_2 = 0.30102999566398119521373889472449; - npy_int32 digitExponent, cutoffExponent, hiBlock; + npy_int32 digitExponent, hiBlock; + npy_int32 cutoff_max_Exponent, cutoff_min_Exponent; npy_uint32 outputDigit; /* current digit being output */ npy_uint32 outputLen; npy_bool isEven = BigInt_IsEven(mantissa); @@ -1294,9 +1296,9 @@ Dragon4(BigInt *bigints, const npy_int32 exponent, * increases the number. This will either correct digitExponent to an * accurate value or it will clamp it above the accurate value. */ - if (cutoffNumber >= 0 && cutoffMode == CutoffMode_FractionLength && - digitExponent <= -cutoffNumber) { - digitExponent = -cutoffNumber + 1; + if (cutoff_max >= 0 && cutoffMode == CutoffMode_FractionLength && + digitExponent <= -cutoff_max) { + digitExponent = -cutoff_max + 1; } @@ -1347,26 +1349,44 @@ Dragon4(BigInt *bigints, const npy_int32 exponent, } /* - * Compute the cutoff exponent (the exponent of the final digit to print). - * Default to the maximum size of the output buffer. + * Compute the cutoff_max exponent (the exponent of the final digit to + * print). Default to the maximum size of the output buffer. */ - cutoffExponent = digitExponent - bufferSize; - if (cutoffNumber >= 0) { + cutoff_max_Exponent = digitExponent - bufferSize; + if (cutoff_max >= 0) { npy_int32 desiredCutoffExponent; if (cutoffMode == CutoffMode_TotalLength) { - desiredCutoffExponent = digitExponent - cutoffNumber; - if (desiredCutoffExponent > cutoffExponent) { - cutoffExponent = desiredCutoffExponent; + desiredCutoffExponent = digitExponent - cutoff_max; + if (desiredCutoffExponent > cutoff_max_Exponent) { + cutoff_max_Exponent = desiredCutoffExponent; } } - /* Otherwise it's CutoffMode_FractionLength. Print cutoffNumber digits + /* Otherwise it's CutoffMode_FractionLength. Print cutoff_max digits * past the decimal point or until we reach the buffer size */ else { - desiredCutoffExponent = -cutoffNumber; - if (desiredCutoffExponent > cutoffExponent) { - cutoffExponent = desiredCutoffExponent; + desiredCutoffExponent = -cutoff_max; + if (desiredCutoffExponent > cutoff_max_Exponent) { + cutoff_max_Exponent = desiredCutoffExponent; + } + } + } + /* Also compute the cutoff_min exponent. */ + cutoff_min_Exponent = digitExponent; + if (cutoff_min >= 0) { + npy_int32 desiredCutoffExponent; + + if (cutoffMode == CutoffMode_TotalLength) { + desiredCutoffExponent = digitExponent - cutoff_min; + if (desiredCutoffExponent < cutoff_min_Exponent) { + cutoff_min_Exponent = desiredCutoffExponent; + } + } + else { + desiredCutoffExponent = -cutoff_min; + if (desiredCutoffExponent < cutoff_min_Exponent) { + cutoff_min_Exponent = desiredCutoffExponent; } } } @@ -1432,14 +1452,17 @@ Dragon4(BigInt *bigints, const npy_int32 exponent, /* * stop looping if we are far enough away from our neighboring - * values or if we have reached the cutoff digit + * values (and we have printed at least the requested minimum + * digits) or if we have reached the cutoff digit */ cmp = BigInt_Compare(scaledValue, scaledMarginLow); low = isEven ? (cmp <= 0) : (cmp < 0); cmp = BigInt_Compare(scaledValueHigh, scale); high = isEven ? (cmp >= 0) : (cmp > 0); - if (low | high | (digitExponent == cutoffExponent)) + if (((low | high) & (digitExponent <= cutoff_min_Exponent)) | + (digitExponent == cutoff_max_Exponent)) { break; + } /* store the output digit */ *curDigit = (char)('0' + outputDigit); @@ -1471,7 +1494,7 @@ Dragon4(BigInt *bigints, const npy_int32 exponent, DEBUG_ASSERT(outputDigit < 10); if ((scaledValue->length == 0) | - (digitExponent == cutoffExponent)) { + (digitExponent == cutoff_max_Exponent)) { break; } @@ -1589,6 +1612,7 @@ typedef struct Dragon4_Options { DigitMode digit_mode; CutoffMode cutoff_mode; npy_int32 precision; + npy_int32 min_digits; npy_bool sign; TrimMode trim_mode; npy_int32 digits_left; @@ -1617,11 +1641,12 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, CutoffMode cutoff_mode, npy_int32 precision, - TrimMode trim_mode, npy_int32 digits_left, - npy_int32 digits_right) + npy_int32 min_digits, TrimMode trim_mode, + npy_int32 digits_left, npy_int32 digits_right) { npy_int32 printExponent; npy_int32 numDigits, numWholeDigits=0, has_sign=0; + npy_int32 add_digits; npy_int32 maxPrintLen = (npy_int32)bufferSize - 1, pos = 0; @@ -1644,8 +1669,9 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, } numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, - digit_mode, cutoff_mode, precision, buffer + has_sign, - maxPrintLen - has_sign, &printExponent); + digit_mode, cutoff_mode, precision, min_digits, + buffer + has_sign, maxPrintLen - has_sign, + &printExponent); DEBUG_ASSERT(numDigits > 0); DEBUG_ASSERT(numDigits <= bufferSize); @@ -1744,9 +1770,10 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = '.'; } - desiredFractionalDigits = precision; - if (cutoff_mode == CutoffMode_TotalLength && precision >= 0) { - desiredFractionalDigits = precision - numWholeDigits; + add_digits = digit_mode == DigitMode_Unique ? min_digits : precision; + desiredFractionalDigits = add_digits < 0 ? 0 : add_digits; + if (cutoff_mode == CutoffMode_TotalLength) { + desiredFractionalDigits = add_digits - numWholeDigits; } if (trim_mode == TrimMode_LeaveOneZero) { @@ -1757,10 +1784,9 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, } } else if (trim_mode == TrimMode_None && - digit_mode != DigitMode_Unique && desiredFractionalDigits > numFractionDigits && pos < maxPrintLen) { - /* add trailing zeros up to precision length */ + /* add trailing zeros up to add_digits length */ /* compute the number of trailing zeros needed */ npy_int32 count = desiredFractionalDigits - numFractionDigits; if (pos + count > maxPrintLen) { @@ -1778,7 +1804,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * when rounding, we may still end up with trailing zeros. Remove them * depending on trim settings. */ - if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0) { + if (trim_mode != TrimMode_None && numFractionDigits > 0) { while (buffer[pos-1] == '0') { pos--; numFractionDigits--; @@ -1852,7 +1878,7 @@ static npy_uint32 FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, - npy_int32 precision, TrimMode trim_mode, + npy_int32 precision, npy_int32 min_digits, TrimMode trim_mode, npy_int32 digits_left, npy_int32 exp_digits) { npy_int32 printExponent; @@ -1860,12 +1886,12 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, char *pCurOut; npy_int32 numFractionDigits; npy_int32 leftchars; + npy_int32 add_digits; if (digit_mode != DigitMode_Unique) { DEBUG_ASSERT(precision >= 0); } - DEBUG_ASSERT(bufferSize > 0); pCurOut = buffer; @@ -1893,7 +1919,9 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, } numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, - digit_mode, CutoffMode_TotalLength, precision + 1, + digit_mode, CutoffMode_TotalLength, + precision < 0 ? -1 : precision + 1, + min_digits < 0 ? -1 : min_digits + 1, pCurOut, bufferSize, &printExponent); DEBUG_ASSERT(numDigits > 0); @@ -1928,6 +1956,8 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, --bufferSize; } + add_digits = digit_mode == DigitMode_Unique ? min_digits : precision; + add_digits = add_digits < 0 ? 0 : add_digits; if (trim_mode == TrimMode_LeaveOneZero) { /* if we didn't print any fractional digits, add the 0 */ if (numFractionDigits == 0 && bufferSize > 1) { @@ -1937,13 +1967,12 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, ++numFractionDigits; } } - else if (trim_mode == TrimMode_None && - digit_mode != DigitMode_Unique) { - /* add trailing zeros up to precision length */ - if (precision > (npy_int32)numFractionDigits) { + else if (trim_mode == TrimMode_None) { + /* add trailing zeros up to add_digits length */ + if (add_digits > (npy_int32)numFractionDigits) { char *pEnd; /* compute the number of trailing zeros needed */ - npy_int32 numZeros = (precision - numFractionDigits); + npy_int32 numZeros = (add_digits - numFractionDigits); if (numZeros > (npy_int32)bufferSize - 1) { numZeros = (npy_int32)bufferSize - 1; @@ -1961,7 +1990,7 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * when rounding, we may still end up with trailing zeros. Remove them * depending on trim settings. */ - if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0) { + if (trim_mode != TrimMode_None && numFractionDigits > 0) { --pCurOut; while (*pCurOut == '0') { --pCurOut; @@ -2153,14 +2182,14 @@ Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, mantissaBit, hasUnequalMargins, opt->digit_mode, opt->precision, - opt->trim_mode, opt->digits_left, - opt->exp_digits); + opt->min_digits, opt->trim_mode, + opt->digits_left, opt->exp_digits); } else { return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, mantissaBit, hasUnequalMargins, opt->digit_mode, opt->cutoff_mode, - opt->precision, opt->trim_mode, + opt->precision, opt->min_digits, opt->trim_mode, opt->digits_left, opt->digits_right); } } @@ -3100,7 +3129,7 @@ Dragon4_Positional_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ \ PyObject *\ Dragon4_Positional_##Type(npy_type *val, DigitMode digit_mode,\ - CutoffMode cutoff_mode, int precision,\ + CutoffMode cutoff_mode, int precision, int min_digits, \ int sign, TrimMode trim, int pad_left, int pad_right)\ {\ Dragon4_Options opt;\ @@ -3109,6 +3138,7 @@ Dragon4_Positional_##Type(npy_type *val, DigitMode digit_mode,\ opt.digit_mode = digit_mode;\ opt.cutoff_mode = cutoff_mode;\ opt.precision = precision;\ + opt.min_digits = min_digits;\ opt.sign = sign;\ opt.trim_mode = trim;\ opt.digits_left = pad_left;\ @@ -3136,7 +3166,8 @@ Dragon4_Scientific_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ }\ PyObject *\ Dragon4_Scientific_##Type(npy_type *val, DigitMode digit_mode, int precision,\ - int sign, TrimMode trim, int pad_left, int exp_digits)\ + int min_digits, int sign, TrimMode trim, int pad_left, \ + int exp_digits)\ {\ Dragon4_Options opt;\ \ @@ -3144,6 +3175,7 @@ Dragon4_Scientific_##Type(npy_type *val, DigitMode digit_mode, int precision,\ opt.digit_mode = digit_mode;\ opt.cutoff_mode = CutoffMode_TotalLength;\ opt.precision = precision;\ + opt.min_digits = min_digits;\ opt.sign = sign;\ opt.trim_mode = trim;\ opt.digits_left = pad_left;\ @@ -3166,8 +3198,8 @@ make_dragon4_typefuncs(LongDouble, npy_longdouble, NPY_LONGDOUBLE_BINFMT_NAME) PyObject * Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, - int precision, int sign, TrimMode trim, int pad_left, - int pad_right) + int precision, int min_digits, int sign, TrimMode trim, + int pad_left, int pad_right) { npy_double val; Dragon4_Options opt; @@ -3176,6 +3208,7 @@ Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, opt.digit_mode = digit_mode; opt.cutoff_mode = cutoff_mode; opt.precision = precision; + opt.min_digits = min_digits; opt.sign = sign; opt.trim_mode = trim; opt.digits_left = pad_left; @@ -3208,7 +3241,8 @@ Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, PyObject * Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, - int sign, TrimMode trim, int pad_left, int exp_digits) + int min_digits, int sign, TrimMode trim, int pad_left, + int exp_digits) { npy_double val; Dragon4_Options opt; @@ -3217,6 +3251,7 @@ Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, opt.digit_mode = digit_mode; opt.cutoff_mode = CutoffMode_TotalLength; opt.precision = precision; + opt.min_digits = min_digits; opt.sign = sign; opt.trim_mode = trim; opt.digits_left = pad_left; diff --git a/numpy/core/src/multiarray/dragon4.h b/numpy/core/src/multiarray/dragon4.h index 3a99bde6c53b..4b76bf9e582c 100644 --- a/numpy/core/src/multiarray/dragon4.h +++ b/numpy/core/src/multiarray/dragon4.h @@ -112,12 +112,12 @@ typedef enum TrimMode PyObject *\ Dragon4_Positional_##Type(npy_type *val, DigitMode digit_mode,\ CutoffMode cutoff_mode, int precision,\ - int sign, TrimMode trim, int pad_left,\ - int pad_right);\ + int min_digits, int sign, TrimMode trim, \ + int pad_left, int pad_right);\ PyObject *\ Dragon4_Scientific_##Type(npy_type *val, DigitMode digit_mode,\ - int precision, int sign, TrimMode trim,\ - int pad_left, int exp_digits); + int precision, int min_digits, int sign, \ + TrimMode trim, int pad_left, int exp_digits); make_dragon4_typedecl(Half, npy_half) make_dragon4_typedecl(Float, npy_float) @@ -128,12 +128,13 @@ make_dragon4_typedecl(LongDouble, npy_longdouble) PyObject * Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, - int precision, int sign, TrimMode trim, int pad_left, - int pad_right); + int precision, int min_digits, int sign, TrimMode trim, + int pad_left, int pad_right); PyObject * Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, - int sign, TrimMode trim, int pad_left, int exp_digits); + int min_digits, int sign, TrimMode trim, int pad_left, + int exp_digits); #endif diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index a0f7afeb5938..415aa7dacbbd 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -3481,7 +3481,7 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *obj; - int precision=-1, pad_left=-1, exp_digits=-1; + int precision=-1, pad_left=-1, exp_digits=-1, min_digits=-1; DigitMode digit_mode; TrimMode trim = TrimMode_None; int sign=0, unique=1; @@ -3495,6 +3495,7 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), "|trim", &trimmode_converter, &trim, "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, "|exp_digits", &PyArray_PythonPyIntFromInt, &exp_digits, + "|min_digits", &PyArray_PythonPyIntFromInt, &min_digits, NULL, NULL, NULL) < 0) { return NULL; } @@ -3507,7 +3508,7 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), return NULL; } - return Dragon4_Scientific(obj, digit_mode, precision, sign, trim, + return Dragon4_Scientific(obj, digit_mode, precision, min_digits, sign, trim, pad_left, exp_digits); } @@ -3522,7 +3523,7 @@ dragon4_positional(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *obj; - int precision=-1, pad_left=-1, pad_right=-1; + int precision=-1, pad_left=-1, pad_right=-1, min_digits=-1; CutoffMode cutoff_mode; DigitMode digit_mode; TrimMode trim = TrimMode_None; @@ -3538,6 +3539,7 @@ dragon4_positional(PyObject *NPY_UNUSED(dummy), "|trim", &trimmode_converter, &trim, "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, "|pad_right", &PyArray_PythonPyIntFromInt, &pad_right, + "|min_digits", &PyArray_PythonPyIntFromInt, &min_digits, NULL, NULL, NULL) < 0) { return NULL; } @@ -3552,8 +3554,8 @@ dragon4_positional(PyObject *NPY_UNUSED(dummy), return NULL; } - return Dragon4_Positional(obj, digit_mode, cutoff_mode, precision, sign, - trim, pad_left, pad_right); + return Dragon4_Positional(obj, digit_mode, cutoff_mode, precision, + min_digits, sign, trim, pad_left, pad_right); } static PyObject * @@ -3572,7 +3574,7 @@ format_longfloat(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) "not a longfloat"); return NULL; } - return Dragon4_Scientific(obj, DigitMode_Unique, precision, 0, + return Dragon4_Scientific(obj, DigitMode_Unique, precision, -1, 0, TrimMode_LeaveOneZero, -1, -1); } diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 10f304fe7133..a001500b0a97 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -331,13 +331,13 @@ format_@name@(@type@ val, npy_bool scientific, { if (scientific) { return Dragon4_Scientific_@Name@(&val, - DigitMode_Unique, precision, + DigitMode_Unique, precision, -1, sign, trim, pad_left, exp_digits); } else { return Dragon4_Positional_@Name@(&val, DigitMode_Unique, CutoffMode_TotalLength, precision, - sign, trim, pad_left, pad_right); + -1, sign, trim, pad_left, pad_right); } } diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 2c5f1577ddab..03eca2f7438e 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -759,6 +759,10 @@ def test_floatmode(self): assert_equal(repr(c), "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])") + # test unique special case (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(np.float64(np.array2string(a, floatmode='unique')), a) + def test_legacy_mode_scalars(self): # in legacy mode, str of floats get truncated, and complex scalars # use * for non-finite imaginary part diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py index 6502ec4c1486..620472683391 100644 --- a/numpy/core/tests/test_scalarprint.py +++ b/numpy/core/tests/test_scalarprint.py @@ -9,7 +9,7 @@ from tempfile import TemporaryFile import numpy as np -from numpy.testing import assert_, assert_equal +from numpy.testing import assert_, assert_equal, assert_raises class TestRealScalars: def test_str(self): @@ -176,7 +176,8 @@ def test_dragon4(self): "87538682506419718265533447265625") # largest numbers - assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)), + f32x = np.finfo(np.float32).max + assert_equal(fpos32(f32x, **preckwd(0)), "340282346638528859811704183484516925440.") assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), "1797693134862315708145274237317043567980705675258449965989" @@ -186,10 +187,66 @@ def test_dragon4(self): "6580855933212334827479782620414472316873817718091929988125" "0404026184124858368.") # Warning: In unique mode only the integer digits necessary for - # uniqueness are computed, the rest are 0. Should we change this? - assert_equal(fpos32(np.finfo(np.float32).max, precision=0), + # uniqueness are computed, the rest are 0. + assert_equal(fpos32(f32x), "340282350000000000000000000000000000000.") + # Further tests of zero-padding vs rounding in different combinations + # of unique, fractional, precision, min_digits + # precision can only reduce digits, not add them. + # min_digits can only extend digits, not reduce them. + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0), + "340282346638528859811704183484516925440.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4), + "340282346638528859811704183484516925440.0000") + assert_equal(fpos32(f32x, unique=True, fractional=True, + min_digits=4, precision=4), + "340282346638528859811704183484516925440.0000") + assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False, + precision=0) + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=20), + "340282346638528859810000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=15), + "340282346638529000000000000000000000000.") + assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + # test that unique rounding is preserved when precision is supplied + # but no extra digits need to be printed (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=False, precision=15), + '-6.310887241768094e-30') + assert_equal(fsci64(a, unique=True, precision=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, precision=15, min_digits=15), + '-6.310887241768095e-30') + # adds/remove digits in unique mode with unbiased rnding + assert_equal(fsci64(a, unique=True, precision=14), + '-6.31088724176809e-30') + assert_equal(fsci64(a, unique=True, min_digits=16), + '-6.3108872417680944e-30') + assert_equal(fsci64(a, unique=True, precision=16), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=14), + '-6.310887241768095e-30') + # test min_digits in unique mode with different rounding cases + assert_equal(fsci64('1e120', min_digits=3), '1.000e+120') + assert_equal(fsci64('1e100', min_digits=3), '1.000e+100') + # test trailing zeros assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") @@ -200,7 +257,9 @@ def test_dragon4(self): assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") # gh-10713 - assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00") + assert_equal(fpos64('324', unique=False, precision=5, + fractional=False), "324.00") + def test_dragon4_interface(self): tps = [np.float16, np.float32, np.float64] From 6b840a49345fc3c90e1c49e10b1ddc946abbf2b8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 19:08:23 -0500 Subject: [PATCH 0786/1270] DOC: Add TODO comment that `asarray` stubs will need moving --- numpy/core/_asarray.pyi | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/core/_asarray.pyi b/numpy/core/_asarray.pyi index 8c200ba22ae2..ee21fc0f1492 100644 --- a/numpy/core/_asarray.pyi +++ b/numpy/core/_asarray.pyi @@ -11,6 +11,10 @@ else: _ArrayType = TypeVar("_ArrayType", bound=ndarray) +# TODO: The following functions are now defined in C, so should be defined +# in a (not yet existing) `multiarray.pyi`. +# (with the exception of `require`) + def asarray( a: object, dtype: DTypeLike = ..., From c9e7ff76780480ed8fdba30df5dde07ac6736323 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 20:32:20 -0500 Subject: [PATCH 0787/1270] BUG,TST: Fix error path and argument name (and add test coverage) The argument name for the python side defined function was always `a` and not `object` so fix it and add a test. Also add a test to ensure the error paths are taken for all of the function. The missing NULL returns cannot be tested easily (but thanks to code coverage its nice to look at the uncovered stuff closer...) --- .../src/multiarray/arrayfunction_override.c | 2 ++ numpy/core/src/multiarray/multiarraymodule.c | 8 +++--- numpy/core/tests/test_multiarray.py | 27 +++++++++++++++++++ 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c index 899919ff70c6..31415e4f2117 100644 --- a/numpy/core/src/multiarray/arrayfunction_override.c +++ b/numpy/core/src/multiarray/arrayfunction_override.c @@ -408,6 +408,7 @@ array_implement_c_array_function_creation( kwargs = PyDict_New(); if (kwargs == NULL) { Py_DECREF(args); + return NULL; } Py_ssize_t nkwargs = PyTuple_GET_SIZE(kwnames); for (Py_ssize_t i = 0; i < nkwargs; i++) { @@ -416,6 +417,7 @@ array_implement_c_array_function_creation( if (PyDict_SetItem(kwargs, key, value) < 0) { Py_DECREF(args); Py_DECREF(kwargs); + return NULL; } } } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 0da27ac171bd..7514c4d5da12 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1704,7 +1704,7 @@ array_asarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asarray", args, len_args, kwnames, - "object", NULL, &op, + "a", NULL, &op, "|dtype", &PyArray_DescrConverter2, &type, "|order", &PyArray_OrderConverter, &order, "$like", NULL, &like, @@ -1743,7 +1743,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asanyarray", args, len_args, kwnames, - "object", NULL, &op, + "a", NULL, &op, "|dtype", &PyArray_DescrConverter2, &type, "|order", &PyArray_OrderConverter, &order, "$like", NULL, &like, @@ -1782,7 +1782,7 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("ascontiguousarray", args, len_args, kwnames, - "object", NULL, &op, + "a", NULL, &op, "|dtype", &PyArray_DescrConverter2, &type, "$like", NULL, &like, NULL, NULL, NULL) < 0) { @@ -1820,7 +1820,7 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asfortranarray", args, len_args, kwnames, - "object", NULL, &op, + "a", NULL, &op, "|dtype", &PyArray_DescrConverter2, &type, "$like", NULL, &like, NULL, NULL, NULL) < 0) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index cffb1af99c31..b2f948d3007e 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -484,6 +484,33 @@ def test_array_cont(self): assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_bad_arguments_error(self, func): + with pytest.raises(TypeError): + func(3, dtype="bad dtype") + with pytest.raises(TypeError): + func() # missing arguments + with pytest.raises(TypeError): + func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments + + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_array_as_keyword(self, func): + # This should likely be made positional only, but do not change + # the name accidentally. + if func is np.array: + func(object=3) + else: + func(a=3) class TestAssignment: def test_assignment_broadcasting(self): From 40b8ba3f43e28cb543bbb1d5aff095da264bafd8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 20:48:51 -0500 Subject: [PATCH 0788/1270] DOC: Update the docs to match how they changed in master I had forgotten that the docs for these functions where updated when rebasing. --- numpy/core/_add_newdocs.py | 59 +++++++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 1acf4e7d6280..bba9f95fc3f5 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -906,7 +906,7 @@ add_newdoc('numpy.core.multiarray', 'asarray', """ - asarray(a, dtype=None, order=None) + asarray(a, dtype=None, order=None, *, like=None) Convert the input to an array. @@ -918,10 +918,16 @@ of lists and ndarrays. dtype : data-type, optional By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or - column-major (Fortran-style) memory representation. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order Defaults to 'C'. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 Returns ------- @@ -974,11 +980,14 @@ >>> np.asanyarray(a) is a True - """) + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) add_newdoc('numpy.core.multiarray', 'asanyarray', """ - asanyarray(a, dtype=None, order=None) + asanyarray(a, dtype=None, order=None, *, like=None) Convert the input to an ndarray, but pass ndarray subclasses through. @@ -990,9 +999,16 @@ tuples of lists, and ndarrays. dtype : data-type, optional By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or column-major - (Fortran-style) memory representation. Defaults to 'C'. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 Returns ------- @@ -1027,11 +1043,14 @@ >>> np.asanyarray(a) is a True - """) + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) add_newdoc('numpy.core.multiarray', 'ascontiguousarray', """ - ascontiguousarray(a, dtype=None) + ascontiguousarray(a, dtype=None, *, like=None) Return a contiguous array (ndim >= 1) in memory (C order). @@ -1041,6 +1060,9 @@ Input array. dtype : str or dtype object, optional Data-type of returned array. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 Returns ------- @@ -1067,11 +1089,14 @@ Note: This function returns an array with at least one-dimension (1-d) so it will not preserve 0-d arrays. - """) + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) add_newdoc('numpy.core.multiarray', 'asfortranarray', """ - asfortranarray(a, dtype=None) + asfortranarray(a, dtype=None, *, like=None) Return an array (ndim >= 1) laid out in Fortran order in memory. @@ -1081,6 +1106,9 @@ Input array. dtype : str or dtype object, optional By default, the data-type is inferred from the input data. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 Returns ------- @@ -1107,7 +1135,10 @@ Note: This function returns an array with at least one-dimension (1-d) so it will not preserve 0-d arrays. - """) + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) add_newdoc('numpy.core.multiarray', 'empty', """ From c1aa1af62f6e9fcdda92d6d0991b15051a565814 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Fri, 19 Mar 2021 22:03:06 +1300 Subject: [PATCH 0789/1270] MAINT: use super() as described by PEP 3135 --- doc/source/user/basics.subclassing.rst | 16 +++++----- numpy/_globals.py | 2 +- numpy/core/_exceptions.py | 2 +- numpy/core/arrayprint.py | 4 +-- numpy/core/memmap.py | 4 +-- numpy/core/records.py | 6 ++-- numpy/core/tests/test_arrayprint.py | 6 ++-- numpy/core/tests/test_regression.py | 2 +- numpy/core/tests/test_umath.py | 4 +-- numpy/f2py/tests/test_assumed_shape.py | 2 +- numpy/lib/index_tricks.py | 4 +-- numpy/lib/tests/test_format.py | 2 +- numpy/ma/core.py | 41 +++++++++++++------------- numpy/ma/extras.py | 4 +-- numpy/ma/tests/test_subclassing.py | 18 +++++------ numpy/testing/_private/utils.py | 6 ++-- numpy/testing/tests/test_utils.py | 8 ++--- 17 files changed, 63 insertions(+), 68 deletions(-) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 8ffa31688531..1b78809865aa 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -223,8 +223,8 @@ where our object creation housekeeping usually goes. new ndarray instance of its own class. In practice this means that we, the authors of the code, will need to make a call to ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to - ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an - existing array (see below) + ``super().__new__(cls, ...)``, or do view casting of an existing array + (see below) * For view casting and new-from-template, the equivalent of ``ndarray.__new__(MySubClass,...`` is called, at the C level. @@ -240,7 +240,7 @@ The following code allows us to look at the call sequences and arguments: class C(np.ndarray): def __new__(cls, *args, **kwargs): print('In __new__ with class %s' % cls) - return super(C, cls).__new__(cls, *args, **kwargs) + return super().__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): # in practice you probably will not need or want an __init__ @@ -312,9 +312,8 @@ Simple example - adding an extra attribute to ndarray # ndarray input arguments. This will call the standard # ndarray constructor, but return an object of our type. # It also triggers a call to InfoArray.__array_finalize__ - obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, - buffer, offset, strides, - order) + obj = super().__new__(subtype, shape, dtype, + buffer, offset, strides, order) # set the new 'info' attribute to the value passed obj.info = info # Finally, we must return the newly created object: @@ -486,8 +485,7 @@ following. if out_no: info['outputs'] = out_no - results = super(A, self).__array_ufunc__(ufunc, method, - *args, **kwargs) + results = super().__array_ufunc__(ufunc, method, *args, **kwargs) if results is NotImplemented: return NotImplemented @@ -600,7 +598,7 @@ some print statements: print(' self is %s' % repr(self)) print(' arr is %s' % repr(out_arr)) # then just call the parent - return super(MySubClass, self).__array_wrap__(self, out_arr, context) + return super().__array_wrap__(self, out_arr, context) We run a ufunc on an instance of our new array: diff --git a/numpy/_globals.py b/numpy/_globals.py index 4a8c266d30db..0b715c870870 100644 --- a/numpy/_globals.py +++ b/numpy/_globals.py @@ -77,7 +77,7 @@ class _NoValueType: def __new__(cls): # ensure that only one instance exists if not cls.__instance: - cls.__instance = super(_NoValueType, cls).__new__(cls) + cls.__instance = super().__new__(cls) return cls.__instance # needed for python 2 to preserve identity through a pickle diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py index 5e17ed3b21e0..77aa2f6e1926 100644 --- a/numpy/core/_exceptions.py +++ b/numpy/core/_exceptions.py @@ -135,7 +135,7 @@ def __init__(self, axis, ndim=None, msg_prefix=None): if msg_prefix is not None: msg = "{}: {}".format(msg_prefix, msg) - super(AxisError, self).__init__(msg) + super().__init__(msg) @_display_as_base diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 5c1d6cb63c6c..3251c51e3587 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1253,12 +1253,12 @@ def __init__(self, x, unit=None, timezone=None, casting='same_kind', self.legacy = legacy # must be called after the above are configured - super(DatetimeFormat, self).__init__(x) + super().__init__(x) def __call__(self, x): if self.legacy == '1.13': return self._format_non_nat(x) - return super(DatetimeFormat, self).__call__(x) + return super().__call__(x) def _format_non_nat(self, x): return "'%s'" % datetime_as_string(x, diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py index 892ad2540a2a..b0d9cb3af7bf 100644 --- a/numpy/core/memmap.py +++ b/numpy/core/memmap.py @@ -316,7 +316,7 @@ def flush(self): self.base.flush() def __array_wrap__(self, arr, context=None): - arr = super(memmap, self).__array_wrap__(arr, context) + arr = super().__array_wrap__(arr, context) # Return a memmap if a memmap was given as the output of the # ufunc. Leave the arr class unchanged if self is not a memmap @@ -331,7 +331,7 @@ def __array_wrap__(self, arr, context=None): return arr.view(np.ndarray) def __getitem__(self, index): - res = super(memmap, self).__getitem__(index) + res = super().__getitem__(index) if type(res) is memmap and res._mmap is None: return res.view(type=ndarray) return res diff --git a/numpy/core/records.py b/numpy/core/records.py index a626a05893d8..0efc951a3d57 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -244,12 +244,12 @@ class record(nt.void): def __repr__(self): if get_printoptions()['legacy'] == '1.13': return self.__str__() - return super(record, self).__repr__() + return super().__repr__() def __str__(self): if get_printoptions()['legacy'] == '1.13': return str(self.item()) - return super(record, self).__str__() + return super().__str__() def __getattribute__(self, attr): if attr in ('setfield', 'getfield', 'dtype'): @@ -518,7 +518,7 @@ def __setattr__(self, attr, val): return self.setfield(val, *res) def __getitem__(self, indx): - obj = super(recarray, self).__getitem__(indx) + obj = super().__getitem__(indx) # copy behavior of getattr, except that here # we might also be returning a single element diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 2c5f1577ddab..8f63b5b709b5 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -45,7 +45,7 @@ def __new__(cls, inp): return obj def __getitem__(self, ind): - ret = super(sub, self).__getitem__(ind) + ret = super().__getitem__(ind) return sub(ret) # test that object + subclass is OK: @@ -67,7 +67,7 @@ def __new__(cls, inp): return obj def __getitem__(self, ind): - ret = super(sub, self).__getitem__(ind) + ret = super().__getitem__(ind) return sub(ret) x = sub(1) @@ -101,7 +101,7 @@ def __getitem__(self, ind): # gh-10663 class DuckCounter(np.ndarray): def __getitem__(self, item): - result = super(DuckCounter, self).__getitem__(item) + result = super().__getitem__(item) if not isinstance(result, DuckCounter): result = result[...].view(DuckCounter) return result diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 09929330788a..66b5d5516529 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -1750,7 +1750,7 @@ def __new__(cls, # it is designed to simulate an old API # expectation to guard against regression def squeeze(self): - return super(OldSqueeze, self).squeeze() + return super().squeeze() oldsqueeze = OldSqueeze(np.array([[1],[2],[3]])) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 2249c866caf5..556856faf32e 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -2703,8 +2703,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs): if out_no: info['outputs'] = out_no - results = super(A, self).__array_ufunc__(ufunc, method, - *args, **kwargs) + results = super().__array_ufunc__(ufunc, method, + *args, **kwargs) if results is NotImplemented: return NotImplemented diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py index dfc252660c59..79e3ad138426 100644 --- a/numpy/f2py/tests/test_assumed_shape.py +++ b/numpy/f2py/tests/test_assumed_shape.py @@ -47,7 +47,7 @@ def setup(self): self.sources.append(self.f2cmap_file.name) self.options = ["--f2cmap", self.f2cmap_file.name] - super(TestF2cmapOption, self).setup() + super().setup() def teardown(self): os.unlink(self.f2cmap_file.name) diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index fa7c518326ae..72d8e9de4f75 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -251,7 +251,7 @@ class MGridClass(nd_grid): """ def __init__(self): - super(MGridClass, self).__init__(sparse=False) + super().__init__(sparse=False) mgrid = MGridClass() @@ -298,7 +298,7 @@ class OGridClass(nd_grid): """ def __init__(self): - super(OGridClass, self).__init__(sparse=True) + super().__init__(sparse=True) ogrid = OGridClass() diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index bac42fad3ec2..10656a23322b 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -402,7 +402,7 @@ class BytesIOSRandomSize(BytesIO): def read(self, size=None): import random size = random.randint(1, size) - return super(BytesIOSRandomSize, self).read(size) + return super().read(size) def roundtrip(arr): diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 10ee0fb06f56..cebacf5e1e2e 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -914,7 +914,7 @@ class _MaskedUnaryOperation(_MaskedUFunc): """ def __init__(self, mufunc, fill=0, domain=None): - super(_MaskedUnaryOperation, self).__init__(mufunc) + super().__init__(mufunc) self.fill = fill self.domain = domain ufunc_domain[mufunc] = domain @@ -996,7 +996,7 @@ def __init__(self, mbfunc, fillx=0, filly=0): abfunc(x, filly) = x for all x to enable reduce. """ - super(_MaskedBinaryOperation, self).__init__(mbfunc) + super().__init__(mbfunc) self.fillx = fillx self.filly = filly ufunc_domain[mbfunc] = None @@ -1142,7 +1142,7 @@ def __init__(self, dbfunc, domain, fillx=0, filly=0): """abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ - super(_DomainedBinaryOperation, self).__init__(dbfunc) + super().__init__(dbfunc) self.domain = domain self.fillx = fillx self.filly = filly @@ -3400,7 +3400,7 @@ def __setitem__(self, indx, value): # Define so that we can overwrite the setter. @property def dtype(self): - return super(MaskedArray, self).dtype + return super().dtype @dtype.setter def dtype(self, dtype): @@ -3416,7 +3416,7 @@ def dtype(self, dtype): @property def shape(self): - return super(MaskedArray, self).shape + return super().shape @shape.setter def shape(self, shape): @@ -4985,8 +4985,8 @@ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): #!!!: implement out + test! m = self._mask if m is nomask: - result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, - axis2=axis2, out=out) + result = super().trace(offset=offset, axis1=axis1, axis2=axis2, + out=out) return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) @@ -5237,8 +5237,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} if self._mask is nomask: - result = super(MaskedArray, self).mean(axis=axis, - dtype=dtype, **kwargs)[()] + result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] else: dsum = self.sum(axis=axis, dtype=dtype, **kwargs) cnt = self.count(axis=axis, **kwargs) @@ -5315,8 +5314,8 @@ def var(self, axis=None, dtype=None, out=None, ddof=0, # Easy case: nomask, business as usual if self._mask is nomask: - ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out, - ddof=ddof, **kwargs)[()] + ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs)[()] if out is not None: if isinstance(out, MaskedArray): out.__setmask__(nomask) @@ -5947,13 +5946,13 @@ def partition(self, *args, **kwargs): warnings.warn("Warning: 'partition' will ignore the 'mask' " f"of the {self.__class__.__name__}.", stacklevel=2) - return super(MaskedArray, self).partition(*args, **kwargs) + return super().partition(*args, **kwargs) def argpartition(self, *args, **kwargs): warnings.warn("Warning: 'argpartition' will ignore the 'mask' " f"of the {self.__class__.__name__}.", stacklevel=2) - return super(MaskedArray, self).argpartition(*args, **kwargs) + return super().argpartition(*args, **kwargs) def take(self, indices, axis=None, out=None, mode='raise'): """ @@ -6179,7 +6178,7 @@ def __getstate__(self): """ cf = 'CF'[self.flags.fnc] - data_state = super(MaskedArray, self).__reduce__()[2] + data_state = super().__reduce__()[2] return data_state + (getmaskarray(self).tobytes(cf), self._fill_value) def __setstate__(self, state): @@ -6195,7 +6194,7 @@ def __setstate__(self, state): """ (_, shp, typ, isf, raw, msk, flv) = state - super(MaskedArray, self).__setstate__((shp, typ, isf, raw)) + super().__setstate__((shp, typ, isf, raw)) self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) self.fill_value = flv @@ -6256,7 +6255,7 @@ def __new__(self, data, mask=nomask, dtype=None, fill_value=None, @property def _data(self): # Make sure that the _data part is a np.void - return super(mvoid, self)._data[()] + return super()._data[()] def __getitem__(self, indx): """ @@ -6293,7 +6292,7 @@ def __str__(self): return str(self._data) rdtype = _replace_dtype_fields(self._data.dtype, "O") - data_arr = super(mvoid, self)._data + data_arr = super()._data res = data_arr.astype(rdtype) _recursive_printoption(res, self._mask, masked_print_option) return str(res) @@ -6455,7 +6454,7 @@ def __array_finalize__(self, obj): if not self.__has_singleton(): # this handles the `.view` in __new__, which we want to copy across # properties normally - return super(MaskedConstant, self).__array_finalize__(obj) + return super().__array_finalize__(obj) elif self is self.__singleton: # not clear how this can happen, play it safe pass @@ -6529,14 +6528,14 @@ def __deepcopy__(self, memo): def __setattr__(self, attr, value): if not self.__has_singleton(): # allow the singleton to be initialized - return super(MaskedConstant, self).__setattr__(attr, value) + return super().__setattr__(attr, value) elif self is self.__singleton: raise AttributeError( f"attributes of {self!r} are not writeable") else: # duplicate instance - we can end up here from __array_finalize__, # where we set the __class__ attribute - return super(MaskedConstant, self).__setattr__(attr, value) + return super().__setattr__(attr, value) masked = masked_singleton = MaskedConstant() @@ -6628,7 +6627,7 @@ class _extrema_operation(_MaskedUFunc): """ def __init__(self, ufunc, compare, fill_value): - super(_extrema_operation, self).__init__(ufunc) + super().__init__(ufunc) self.compare = compare self.fill_value_func = fill_value diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index a775a15bfcfc..8c123bc3bcc4 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1483,7 +1483,7 @@ def makemat(cls, arr): # deprecate that class. In preparation, we use the unmasked version # to construct the matrix (with copy=False for backwards compatibility # with the .view) - data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False) + data = super().makemat(arr.data, copy=False) return array(data, mask=arr.mask) def __getitem__(self, key): @@ -1491,7 +1491,7 @@ def __getitem__(self, key): if isinstance(key, str): raise MAError("Unavailable for masked array.") - return super(MAxisConcatenator, self).__getitem__(key) + return super().__getitem__(key) class mr_class(MAxisConcatenator): diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index f2623406d183..1af5396252e9 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -28,19 +28,18 @@ def __new__(cls,arr,info={}): return x def __array_finalize__(self, obj): - if callable(getattr(super(SubArray, self), - '__array_finalize__', None)): - super(SubArray, self).__array_finalize__(obj) + if callable(getattr(super(), '__array_finalize__', None)): + super().__array_finalize__(obj) self.info = getattr(obj, 'info', {}).copy() return def __add__(self, other): - result = super(SubArray, self).__add__(other) + result = super().__add__(other) result.info['added'] = result.info.get('added', 0) + 1 return result def __iadd__(self, other): - result = super(SubArray, self).__iadd__(other) + result = super().__iadd__(other) result.info['iadded'] = result.info.get('iadded', 0) + 1 return result @@ -51,7 +50,7 @@ def __iadd__(self, other): class SubMaskedArray(MaskedArray): """Pure subclass of MaskedArray, keeping some info on subclass.""" def __new__(cls, info=None, **kwargs): - obj = super(SubMaskedArray, cls).__new__(cls, **kwargs) + obj = super().__new__(cls, **kwargs) obj._optinfo['info'] = info return obj @@ -123,12 +122,11 @@ def _validate_input(self, value): def __setitem__(self, item, value): # validation ensures direct assignment with ndarray or # masked_print_option will fail - super(ComplicatedSubArray, self).__setitem__( - item, self._validate_input(value)) + super().__setitem__(item, self._validate_input(value)) def __getitem__(self, item): # ensure getter returns our own class also for scalars - value = super(ComplicatedSubArray, self).__getitem__(item) + value = super().__getitem__(item) if not isinstance(value, np.ndarray): # scalar value = value.__array__().view(ComplicatedSubArray) return value @@ -143,7 +141,7 @@ def flat(self, value): y[:] = value def __array_wrap__(self, obj, context=None): - obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context) + obj = super().__array_wrap__(obj, context) if context is not None and context[0] is np.multiply: obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index b4d42728e3cd..5b87d0a06457 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2006,7 +2006,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): def __init__(self, record=False, modules=()): self.modules = set(modules).union(self.class_modules) self._warnreg_copies = {} - super(clear_and_catch_warnings, self).__init__(record=record) + super().__init__(record=record) def __enter__(self): for mod in self.modules: @@ -2014,10 +2014,10 @@ def __enter__(self): mod_reg = mod.__warningregistry__ self._warnreg_copies[mod] = mod_reg.copy() mod_reg.clear() - return super(clear_and_catch_warnings, self).__enter__() + return super().__enter__() def __exit__(self, *exc_info): - super(clear_and_catch_warnings, self).__exit__(*exc_info) + super().__exit__(*exc_info) for mod in self.modules: if hasattr(mod, '__warningregistry__'): mod.__warningregistry__.clear() diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 261ed9705947..31d2cdc76b3e 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -434,10 +434,10 @@ def test_subclass_that_cannot_be_bool(self): # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. class MyArray(np.ndarray): def __eq__(self, other): - return super(MyArray, self).__eq__(other).view(np.ndarray) + return super().__eq__(other).view(np.ndarray) def __lt__(self, other): - return super(MyArray, self).__lt__(other).view(np.ndarray) + return super().__lt__(other).view(np.ndarray) def all(self, *args, **kwargs): raise NotImplementedError @@ -585,10 +585,10 @@ def test_subclass_that_cannot_be_bool(self): # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. class MyArray(np.ndarray): def __eq__(self, other): - return super(MyArray, self).__eq__(other).view(np.ndarray) + return super().__eq__(other).view(np.ndarray) def __lt__(self, other): - return super(MyArray, self).__lt__(other).view(np.ndarray) + return super().__lt__(other).view(np.ndarray) def all(self, *args, **kwargs): raise NotImplementedError From 9aacb3a2a736ed5e0ec885e3b7cae9629c683ef2 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 19 Mar 2021 13:13:28 +0000 Subject: [PATCH 0790/1270] MAINT: Add mising type to cdef statement Add missing type to cdef statement Small spelling fix --- numpy/random/_common.pyx | 4 ++-- numpy/random/_generator.pyx | 2 +- numpy/random/mtrand.pyx | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index 19fb34d4daa9..719647c3e8c5 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -219,8 +219,8 @@ cdef np.ndarray int_to_array(object value, object name, object bits, object uint cdef validate_output_shape(iter_shape, np.ndarray output): - cdef np.npy_intp *shape - cdef ndim, i + cdef np.npy_intp *dims + cdef np.npy_intp ndim, i cdef bint error dims = np.PyArray_DIMS(output) ndim = np.PyArray_NDIM(output) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index bf83c4a0c428..17a52a8d5261 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -1745,7 +1745,7 @@ cdef class Generator: either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our signifance level to 95% and + we have N-1=10 degrees of freedom. We set our significance level to 95% and compute the t statistic using the empirical mean and empirical standard deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index e5083bdf10e6..23cb5ea319b2 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -2156,7 +2156,7 @@ cdef class RandomState: either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our signifance level to 95% and + we have N-1=10 degrees of freedom. We set our significance level to 95% and compute the t statistic using the empirical mean and empirical standard deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: From f14a028404e0457b7ca049fdd4968675cbc9cf54 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Fri, 19 Mar 2021 12:42:57 -0400 Subject: [PATCH 0791/1270] Update numpy/core/arrayprint.py Co-authored-by: Sebastian Berg --- numpy/core/arrayprint.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 523820c33c11..e8698651895f 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1053,9 +1053,9 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', Pad the exponent with zeros until it contains at least this many digits. If omitted, the exponent will be at least 2 digits. min_digits : non-negative integer or None, optional - Minimum number of digits to print. Only has an effect if `unique=True` - in which case additional digits past those necessary to uniquely - identify the value may be printed, rounding the last additional digit. + Minimum number of digits to print. This only has an effect for `unique=True`. + In that case more digits than necessary to uniquely identify the value may + be printed. The last additional digit is rounded unbiased. Returns ------- From 5abc3946bd951211df4d0b492bd1781394dddcde Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Fri, 19 Mar 2021 12:45:28 -0400 Subject: [PATCH 0792/1270] Update numpy/core/arrayprint.py Co-authored-by: Sebastian Berg --- numpy/core/arrayprint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index e8698651895f..8a93705ca0f8 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1030,7 +1030,7 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', If `True`, use a digit-generation strategy which gives the shortest representation which uniquely identifies the floating-point number from other values of the same type, by judicious rounding. If `precision` - is given fewer digits than necessary can be printed, or if `min_digits` + is given fewer digits than necessary can be printed. If `min_digits` is given more can be printed, in which cases the last digit is rounded with unbiased rounding. If `False`, digits are generated as if printing an infinite-precision From d177382065ca135b921175b94a79119c61a1dbd5 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Fri, 19 Mar 2021 12:58:15 -0400 Subject: [PATCH 0793/1270] Update arrayprint.py --- numpy/core/arrayprint.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 8a93705ca0f8..d9d2dbe68fac 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1053,9 +1053,10 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', Pad the exponent with zeros until it contains at least this many digits. If omitted, the exponent will be at least 2 digits. min_digits : non-negative integer or None, optional - Minimum number of digits to print. This only has an effect for `unique=True`. - In that case more digits than necessary to uniquely identify the value may - be printed. The last additional digit is rounded unbiased. + Minimum number of digits to print. This only has an effect for + `unique=True`. In that case more digits than necessary to uniquely + identify the value may be printed and rounded unbiased. + Returns ------- From 2ccc7942f072b7fdbe54f148cdcb6a7d79388a89 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 19 Mar 2021 23:20:57 -0500 Subject: [PATCH 0794/1270] BUG: Fix small valgrind-found issues (#18651) * BUG: Fix small valgrind-found issues This should be backportable. There was at least one that I could not reproduce when running the tests again. And the new random-shuffle tests give false-positives (which is just slightly annoying, considering that we are very close to almost only "longdouble" related false-positives) * BUG: Add missing decref in user-dtype fallback paths The missing decref here only leaks references and can never leak actual memory fortunately. * MAINT,TST: Simplify the "refcount logic" in the dispatch tests again Using SETREF can be nice, but was just overcomplicating thing here... --- numpy/core/src/multiarray/array_coercion.c | 1 + numpy/core/src/multiarray/buffer.c | 1 + numpy/core/src/multiarray/legacy_dtype_implementation.c | 2 ++ numpy/core/src/umath/_umath_tests.c.src | 1 + numpy/core/tests/test_multiarray.py | 2 +- 5 files changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 63fa5f511f97..5be4ea2b270f 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -748,6 +748,7 @@ find_descriptor_from_array( NULL, DType, &flags, item_DType) < 0) { Py_DECREF(iter); Py_DECREF(elem); + Py_XDECREF(*out_descr); Py_XDECREF(item_DType); return -1; } diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index 813850224714..5458c81cccec 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -878,6 +878,7 @@ void_getbuffer(PyObject *self, Py_buffer *view, int flags) */ _buffer_info_t *info = _buffer_get_info(&scalar->_buffer_info, self, flags); if (info == NULL) { + Py_DECREF(self); return -1; } view->format = info->format; diff --git a/numpy/core/src/multiarray/legacy_dtype_implementation.c b/numpy/core/src/multiarray/legacy_dtype_implementation.c index 3ce4710fddb4..d2e95348dd70 100644 --- a/numpy/core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/core/src/multiarray/legacy_dtype_implementation.c @@ -161,10 +161,12 @@ PyArray_LegacyCanCastSafely(int fromtype, int totype) while (*curtype != NPY_NOTYPE) { if (*curtype++ == totype) { + Py_DECREF(from); return 1; } } } + Py_DECREF(from); return 0; } diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index 750fbeb92a7b..4e250e43b650 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -614,6 +614,7 @@ UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dumm if (item == NULL || PyDict_SetItemString(dict, "@str@", item) < 0) { goto err; } + Py_DECREF(item); /**end repeat**/ item = PyList_New(0); if (item == NULL || PyDict_SetItemString(dict, "all", item) < 0) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index cffb1af99c31..ddd0e31c562b 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -7486,7 +7486,7 @@ def test_out_of_order_fields(self): memoryview(arr) def test_max_dims(self): - a = np.empty((1,) * 32) + a = np.ones((1,) * 32) self._check_roundtrip(a) @pytest.mark.slow From 50752aa920be32b74c1a7d0e4242e84b15ffa73c Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 8 Mar 2021 14:17:19 +0530 Subject: [PATCH 0795/1270] ENH, SIMD: Added integer dispatch --- .../src/umath/loops_arithmetic.dispatch.c.src | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 numpy/core/src/umath/loops_arithmetic.dispatch.c.src diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src new file mode 100644 index 000000000000..0e68f1b7b26e --- /dev/null +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -0,0 +1,131 @@ +/*@targets + ** $maxopt baseline + ** sse2 sse41 avx2 avx512_skx + ** vsx2 + ** neon + **/ +#define _UMATHMODULE +#define _MULTIARRAYMODULE +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "lowlevel_strided_loops.h" +#include +// Provides the various *_LOOP macros +#include "fast_loop_macros.h" + +//############################################################################### +//## Unsigned Integers +//############################################################################### +/******************************************************************************** + ** Defining the SIMD kernels + ********************************************************************************/ +#ifdef NPY_SIMD +/**begin repeat + * #sfx = u8, u16, u32, u64# + */ + +static void simd_divide_by_scalar_contig_contig_@sfx@ +(npyv_lanetype_@sfx@ *src, const npyv_lanetype_@sfx@ scalar, npyv_lanetype_@sfx@ *dst, + int len) +{ + const int vstep = npyv_nlanes_@sfx@; + const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); + + for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { + npyv_@sfx@ a = npyv_load_@sfx@(src); + npyv_@sfx@ c = npyv_divc_@sfx@(a, divisor); + npyv_store_@sfx@(dst, c); + } + for (; len > 0; --len, ++src, ++dst) { + const npyv_lanetype_@sfx@ a = *src; + *dst = a / scalar; + } + npyv_cleanup(); +} + +/**end repeat**/ +#endif + + + +// XXX Need to see what can be done for 64 bits +/**begin repeat + * Unsigned types + * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# + * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# + * #SIGNED_TYPE = BYTE, SHORT, INT, LONG, LONGLONG# + */ +#if NPY_BITSOF_@SIGNED_TYPE@ <= 8 + #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u8 +#elif NPY_BITSOF_@SIGNED_TYPE@ <= 16 + #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u16 +#elif NPY_BITSOF_@SIGNED_TYPE@ <= 32 + #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u32 +#else + #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u64 +#endif +static NPY_INLINE int +run_binary_simd_divide_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) +{ + BINARY_DEFS + + if (n == 0) { + return 1; + } + + const @type@ in2 = *(@type@ *)ip2; + if (in2 == 0) { + npy_set_floatstatus_divbyzero(); + BINARY_LOOP_SLIDING { + *((@type@ *)op1) = 0; + } + return 1; + } +#if defined NPY_SIMD + #ifdef NPY_HAVE_AVX512F + const npy_intp vector_size_bytes = 64; + #elif defined NPY_HAVE_AVX2 + const npy_intp vector_size_bytes = 32; + #else + const npy_intp vector_size_bytes = 16; + #endif + // XXX Implement other loops + if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), (npy_uintp)vector_size_bytes)) { + simd_divide_by_scalar_@type@(ip1, in2, op1, n); + return 1; + } +#endif + return 0; +} +/**end repeat**/ + +/**begin repeat + * Unsigned types + * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# + * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + if (IS_BINARY_REDUCE) { + BINARY_REDUCE_LOOP(@type@) { + io1 /= *(@type@ *)ip2; + } + *((@type@ *)iop1) = io1; + } + else if (!run_binary_simd_divide_@TYPE@(args, dimensions, steps)) { + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + if (in2 == 0) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } + *((@type@ *)op1) = in1 / in2; + } + } +} +/**end repeat**/ From 6b2fb9e6a567e24a8940d0c8d78410a310c531a1 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 8 Mar 2021 14:18:23 +0530 Subject: [PATCH 0796/1270] ENH, SIMD: Use integer dispatch --- numpy/core/src/umath/loops.c.src | 16 ---------------- numpy/core/src/umath/loops.h.src | 13 ++++++++++++- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 68e209fe9312..04665dc5296e 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1014,22 +1014,6 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : 0); } -NPY_NO_EXPORT void -@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else { - *((@type@ *)op1)= in1/in2; - } - } -} - NPY_NO_EXPORT void @TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index b3a19be12d62..0301aa5ed7b8 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -53,6 +53,17 @@ BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void ***************************************************************************** */ +#ifndef NPY_DISABLE_OPTIMIZATION + #include "loops_arithmetic.dispatch.h" +#endif + +/**begin repeat + * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# + */ + NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_divide, + (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) +/**end repeat**/ + /**begin repeat * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# */ @@ -141,7 +152,7 @@ NPY_NO_EXPORT void @S@@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); NPY_NO_EXPORT void -@S@@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); NPY_NO_EXPORT void @S@@TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); From f2cb33bcf60e72924b46dd652af64d0af8da2508 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 8 Mar 2021 14:19:18 +0530 Subject: [PATCH 0797/1270] ENH, SIMD: Add dispatch to build process --- numpy/core/code_generators/generate_umath.py | 2 +- numpy/core/setup.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index b5305fbfce98..2e5548b6924a 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -325,7 +325,7 @@ def english_upper(s): Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.floor_divide'), 'PyUFunc_DivisionTypeResolver', - TD(intfltcmplx), + TD(intfltcmplx, cfunc_alias='divide', dispatch=[('loops_arithmetic', 'BHILQ')]), [TypeDescription('m', FullTypeDescr, 'mq', 'm'), TypeDescription('m', FullTypeDescr, 'md', 'm'), TypeDescription('m', FullTypeDescr, 'mm', 'q'), diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 8c34a3286d72..df405bcaf487 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -931,6 +931,7 @@ def generate_umath_c(ext, build_dir): join('src', 'umath', 'loops.c.src'), join('src', 'umath', 'loops_unary_fp.dispatch.c.src'), join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'), + join('src', 'umath', 'loops_arithmetic.dispatch.c.src'), join('src', 'umath', 'loops_trigonometric.dispatch.c.src'), join('src', 'umath', 'loops_exponent_log.dispatch.c.src'), join('src', 'umath', 'matmul.h.src'), From 453043c10a1d343a0ecc16c9a88bcfd0dfdfd4ce Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 8 Mar 2021 14:19:40 +0530 Subject: [PATCH 0798/1270] MAINT, SIMD: Add loops_arithmetic.dispatch.c.src --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 05df19335be2..736597b6b7af 100644 --- a/.gitignore +++ b/.gitignore @@ -215,5 +215,6 @@ numpy/core/src/_simd/_simd_inc.h # umath module numpy/core/src/umath/loops_unary_fp.dispatch.c numpy/core/src/umath/loops_arithm_fp.dispatch.c +numpy/core/src/umath/loops_arithmetic.dispatch.c numpy/core/src/umath/loops_trigonometric.dispatch.c numpy/core/src/umath/loops_exponent_log.dispatch.c From 71e84dcd2ec1a59b6426f05b9095a3a2fd51c01d Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 10 Mar 2021 20:05:34 +0530 Subject: [PATCH 0799/1270] MAINT: Fixed dispatch in generate_umath --- numpy/core/code_generators/generate_umath.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 2e5548b6924a..57c811ff3306 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -245,6 +245,8 @@ def english_upper(s): O = 'O' P = 'P' ints = 'bBhHiIlLqQ' +sints = 'bhilq' +uints = 'BHILQ' times = 'Mm' timedeltaonly = 'm' intsO = ints + O @@ -325,7 +327,9 @@ def english_upper(s): Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.floor_divide'), 'PyUFunc_DivisionTypeResolver', - TD(intfltcmplx, cfunc_alias='divide', dispatch=[('loops_arithmetic', 'BHILQ')]), + TD(uints, cfunc_alias='divide', + dispatch=[('loops_arithmetic', 'BHILQ')]), + TD(sints + flts + cmplx), [TypeDescription('m', FullTypeDescr, 'mq', 'm'), TypeDescription('m', FullTypeDescr, 'md', 'm'), TypeDescription('m', FullTypeDescr, 'mm', 'q'), From bbb143646cbaad2866ed401ca3c795f083285f78 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 20 Mar 2021 16:22:06 +0530 Subject: [PATCH 0800/1270] SIMD, MAINT: Refined kernel and inner ufunc functions --- .../src/umath/loops_arithmetic.dispatch.c.src | 109 +++++++----------- 1 file changed, 43 insertions(+), 66 deletions(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index 0e68f1b7b26e..a012d50dd72c 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -1,6 +1,6 @@ /*@targets ** $maxopt baseline - ** sse2 sse41 avx2 avx512_skx + ** sse2 sse41 avx2 avx512f avx512_skx ** vsx2 ** neon **/ @@ -12,26 +12,26 @@ #include "loops_utils.h" #include "loops.h" #include "lowlevel_strided_loops.h" -#include // Provides the various *_LOOP macros #include "fast_loop_macros.h" //############################################################################### -//## Unsigned Integers +//## Division //############################################################################### /******************************************************************************** ** Defining the SIMD kernels ********************************************************************************/ -#ifdef NPY_SIMD +#if NPY_SIMD /**begin repeat * #sfx = u8, u16, u32, u64# */ - -static void simd_divide_by_scalar_contig_contig_@sfx@ -(npyv_lanetype_@sfx@ *src, const npyv_lanetype_@sfx@ scalar, npyv_lanetype_@sfx@ *dst, - int len) +static NPY_INLINE void +simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) { - const int vstep = npyv_nlanes_@sfx@; + npyv_lanetype_@sfx@ *src = (npyv_lanetype_@sfx@ *) args[0]; + npyv_lanetype_@sfx@ scalar = *(npyv_lanetype_@sfx@ *) args[1]; + npyv_lanetype_@sfx@ *dst = (npyv_lanetype_@sfx@ *) args[2]; + const int vstep = npyv_nlanes_@sfx@; const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { @@ -39,92 +39,69 @@ static void simd_divide_by_scalar_contig_contig_@sfx@ npyv_@sfx@ c = npyv_divc_@sfx@(a, divisor); npyv_store_@sfx@(dst, c); } + for (; len > 0; --len, ++src, ++dst) { const npyv_lanetype_@sfx@ a = *src; *dst = a / scalar; } + npyv_cleanup(); } - /**end repeat**/ #endif +/******************************************************************************** + ** Defining ufunc inner functions + ********************************************************************************/ - -// XXX Need to see what can be done for 64 bits /**begin repeat * Unsigned types - * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# - * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# - * #SIGNED_TYPE = BYTE, SHORT, INT, LONG, LONGLONG# + * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# + * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# + * #STYPE = BYTE, SHORT, INT, LONG, LONGLONG# */ -#if NPY_BITSOF_@SIGNED_TYPE@ <= 8 - #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u8 -#elif NPY_BITSOF_@SIGNED_TYPE@ <= 16 - #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u16 -#elif NPY_BITSOF_@SIGNED_TYPE@ <= 32 - #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u32 -#else - #define simd_divide_by_scalar_@type@ simd_divide_by_scalar_contig_contig_u64 -#endif -static NPY_INLINE int -run_binary_simd_divide_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ - BINARY_DEFS - - if (n == 0) { - return 1; - } - - const @type@ in2 = *(@type@ *)ip2; - if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - BINARY_LOOP_SLIDING { - *((@type@ *)op1) = 0; - } - return 1; - } -#if defined NPY_SIMD - #ifdef NPY_HAVE_AVX512F - const npy_intp vector_size_bytes = 64; - #elif defined NPY_HAVE_AVX2 - const npy_intp vector_size_bytes = 32; - #else - const npy_intp vector_size_bytes = 16; - #endif - // XXX Implement other loops - if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), (npy_uintp)vector_size_bytes)) { - simd_divide_by_scalar_@type@(ip1, in2, op1, n); - return 1; - } +#undef TO_SIMD_SFX +#if 0 +/**begin repeat1 + * #len = 8, 16, 32, 64# + */ +#elif NPY_BITSOF_@STYPE@ == @len@ + #define TO_SIMD_SFX(X) X##_u@len@ +/**end repeat1**/ #endif - return 0; -} -/**end repeat**/ -/**begin repeat - * Unsigned types - * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# - * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# - */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { if (IS_BINARY_REDUCE) { BINARY_REDUCE_LOOP(@type@) { - io1 /= *(@type@ *)ip2; + const @type@ d = *(@type@ *)ip2; + if (NPY_UNLIKELY(d == 0)) { + npy_set_floatstatus_divbyzero(); + io1 = 0; + } else { + io1 /= d; + } } *((@type@ *)iop1) = io1; } - else if (!run_binary_simd_divide_@TYPE@(args, dimensions, steps)) { +#if NPY_SIMD && defined(TO_SIMD_SFX) + // for contiguous block of memory, divisor is a scalar and not 0 + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && + (*(@type@ *)args[1]) != 0) { + TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); + } +#endif + else { BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - if (in2 == 0) { + if (NPY_UNLIKELY(in2 == 0)) { npy_set_floatstatus_divbyzero(); *((@type@ *)op1) = 0; + } else{ + *((@type@ *)op1) = in1 / in2; } - *((@type@ *)op1) = in1 / in2; } } } From c78d9a0bb1429f3c4d56d8687ae54cbbe7158838 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 20 Mar 2021 17:01:43 +0530 Subject: [PATCH 0801/1270] TST: Division tests for unsigned ints --- numpy/core/tests/test_umath.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 2249c866caf5..b31b84d0cc2b 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -250,13 +250,22 @@ def test_division_int(self): assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) @pytest.mark.parametrize("input_dtype", - [np.int8, np.int16, np.int32, np.int64]) + np.sctypes['int'] + np.sctypes['uint']) def test_division_int_boundary(self, input_dtype): iinfo = np.iinfo(input_dtype) + # Unsigned: + # Create list with 0, 25th, 50th, 75th percentile and max + if iinfo.min == 0: + lst = [0, iinfo.max//4, iinfo.max//2, + int(iinfo.max/1.33), iinfo.max] + divisors = [iinfo.max//4, iinfo.max//2, + int(iinfo.max/1.33), iinfo.max] + # Signed: # Create list with min, 25th percentile, 0, 75th percentile, max - lst = [iinfo.min, iinfo.min//2, 0, iinfo.max//2, iinfo.max] - divisors = [iinfo.min, iinfo.min//2, iinfo.max//2, iinfo.max] + else: + lst = [iinfo.min, iinfo.min//2, 0, iinfo.max//2, iinfo.max] + divisors = [iinfo.min, iinfo.min//2, iinfo.max//2, iinfo.max] a = np.array(lst, dtype=input_dtype) for divisor in divisors: @@ -926,7 +935,7 @@ def test_log_values(self): assert_raises(FloatingPointError, np.log, np.float32(-np.inf)) assert_raises(FloatingPointError, np.log, np.float32(-1.0)) - # See https://github.com/numpy/numpy/issues/18005 + # See https://github.com/numpy/numpy/issues/18005 with assert_no_warnings(): a = np.array(1e9, dtype='float32') np.log(a) From 6115fc52703baf07d4cfc629efb58866f0d16856 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 21 Mar 2021 17:45:23 +0100 Subject: [PATCH 0802/1270] BLD: remove /usr/include from default include dirs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Including this directory is painful for cross-compiling, see gh-14980 and gh-13280. Removing this directory fixes the following build failure: ``` gcc: numpy/core/src/common/numpyos.c In file included from numpy/core/src/common/numpyos.c:23: /usr/include/xlocale.h:27:16: error: redefinition of ‘struct __locale_struct’ 27 | typedef struct __locale_struct ``` This error also shows up in various other build issues outside of the NumPy issue tracker. Compilers normally always include this path, so this shouldn't break anything. The default include paths for the compiler can be checked, e.g. for gcc with `cpp -v`. That will typically have /usr/include last. In case this breaks something for a nonstandard compiler, that can be worked around via a site.cfg file in the root of the repo (or equivalently, `~/numpy-site.cfg`) containing: ``` [DEFAULT] include_dirs = /usr/include ``` The same principle should apply to `/usr/lib`. I will make that change in a separate commit, because the failure mode for that will be different (and I'm not running into it right now). --- numpy/distutils/system_info.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 2e355a34aa49..ae6f83e12fbd 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -314,7 +314,7 @@ def add_system_root(library_root): '/opt/local/lib', '/sw/lib'], platform_bits) default_runtime_dirs = [] default_include_dirs = ['/usr/local/include', - '/opt/include', '/usr/include', + '/opt/include', # path of umfpack under macports '/opt/local/include/ufsparse', '/opt/local/include', '/sw/include', @@ -323,8 +323,7 @@ def add_system_root(library_root): default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include', - '/usr/include'] + default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] if os.path.exists('/usr/lib/X11'): globbed_x11_dir = glob('/usr/lib/*/libX11.so') From 326a447e2997918975aa8cb2578ea13855497b76 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 21 Mar 2021 18:08:26 +0100 Subject: [PATCH 0803/1270] DEV: add a conda environment.yml with all development dependencies There are enough dependencies now that it's too hard to remember. This set is comprehensive, so a single `conda env create -f environment.yml` gives you everything in a fresh environment named numpy-dev. [ci skip] --- environment.yml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 environment.yml diff --git a/environment.yml b/environment.yml new file mode 100644 index 000000000000..ecfebee3e35e --- /dev/null +++ b/environment.yml @@ -0,0 +1,33 @@ +# To use: +# $ conda env create -f environment.yml # `mamba` works too for this command +# $ conda activate numpy-dev +name: numpy-dev +channels: + - conda-forge +dependencies: + - python + - cython + - compilers + - openblas + # For testing + - pytest + - pytest-cov + - pytest-xdist + - hypothesis + # For type annotations + - mypy=0.812 + - typing_extensions + # For building docs + - sphinx=3.5.2 + - numpydoc=1.1.0 + - ipython + - scipy + - pandas + - matplotlib + - pydata-sphinx-theme + # For linting + - pycodestyle=2.7.0 + - gitpython + # Used in some tests + - cffi + - pytz From 4f5be827dbdbe5337845a5dea1bd9342a5eeba60 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 21 Mar 2021 20:29:19 +0100 Subject: [PATCH 0804/1270] DOC: update development environment docs for conda virtual environments --- doc/source/dev/development_environment.rst | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index fb1b8cd6ae45..665198c69de6 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -57,7 +57,8 @@ When using pytest as a target (the default), you can Using ``runtests.py`` is the recommended approach to running tests. There are also a number of alternatives to it, for example in-place -build or installing to a virtualenv. See the FAQ below for details. +build or installing to a virtualenv or a conda environment. See the FAQ below +for details. .. note:: @@ -130,17 +131,27 @@ to see this output, you can run the ``build_src`` stage verbosely:: $ python build build_src -v -Using virtualenvs ------------------ +Using virtual environments +-------------------------- A frequently asked question is "How do I set up a development version of NumPy in parallel to a released version that I use to do my job/research?". One simple way to achieve this is to install the released version in -site-packages, by using a binary installer or pip for example, and set -up the development version in a virtualenv. First install +site-packages, by using pip or conda for example, and set +up the development version in a virtual environment. + +If you use conda, we recommend creating a separate virtual environment for +numpy development using the ``environment.yml`` file in the root of the repo +(this will create the environment and install all development dependencies at +once):: + + $ conda env create -f environment.yml # `mamba` works too for this command + $ conda activate numpy-dev + +If you installed Python some other way than conda, first install `virtualenv`_ (optionally use `virtualenvwrapper`_), then create your -virtualenv (named numpy-dev here) with:: +virtualenv (named ``numpy-dev`` here) with:: $ virtualenv numpy-dev From a2f7cd1e8510a26edc22297d67729ea6fbf922e4 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 21 Mar 2021 20:52:29 +0100 Subject: [PATCH 0805/1270] DOC: add release note for removal of /usr/include from include paths Follow-up to gh-18658 --- doc/release/upcoming_changes/18658.compatibility.rst | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 doc/release/upcoming_changes/18658.compatibility.rst diff --git a/doc/release/upcoming_changes/18658.compatibility.rst b/doc/release/upcoming_changes/18658.compatibility.rst new file mode 100644 index 000000000000..bb4052b9daef --- /dev/null +++ b/doc/release/upcoming_changes/18658.compatibility.rst @@ -0,0 +1,6 @@ +``/usr/include`` removed from default include paths +--------------------------------------------------- +The default include paths when building a package with ``numpy.distutils`` no +longer include ``/usr/include``. This path is normally added by the compiler, +and hardcoding it can be problematic. In case this causes a problem, please +open an issue. A workaround is documented in PR 18658. From bfb5e9f3450b5ecc19c1093b3f09e61037588ede Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Sat, 20 Mar 2021 03:30:24 +0100 Subject: [PATCH 0806/1270] DOC: Update some plotting code to current Matplotlib idioms - 3D Axes are created via add_subplot(projection='3d') - There is now a `stairs()` function that's specifically designed for showing histogram curves - Labels should be passed as keyword arguments to the plot functions instead of to `legend()`, which reduces the risk of mixing them up. - ensure equal axis scaling in the meshgrid example --- doc/source/reference/routines.polynomials.classes.rst | 6 ++++-- doc/source/user/absolute_beginners.rst | 3 +-- doc/source/user/plots/matplotlib3.py | 3 +-- doc/source/user/quickstart.rst | 4 +++- doc/source/user/tutorial-ma.rst | 9 +++++---- numpy/fft/_pocketfft.py | 8 +++++--- numpy/lib/function_base.py | 3 ++- 7 files changed, 21 insertions(+), 15 deletions(-) diff --git a/doc/source/reference/routines.polynomials.classes.rst b/doc/source/reference/routines.polynomials.classes.rst index 10331e9c17a4..5f575bed13d4 100644 --- a/doc/source/reference/routines.polynomials.classes.rst +++ b/doc/source/reference/routines.polynomials.classes.rst @@ -290,7 +290,8 @@ polynomials up to degree 5 are plotted below. >>> import matplotlib.pyplot as plt >>> from numpy.polynomial import Chebyshev as T >>> x = np.linspace(-1, 1, 100) - >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="$T_%d$"%i) + >>> for i in range(6): + ... ax = plt.plot(x, T.basis(i)(x), lw=2, label=f"$T_{i}$") ... >>> plt.legend(loc="upper left") @@ -304,7 +305,8 @@ The same plots over the range -2 <= `x` <= 2 look very different: >>> import matplotlib.pyplot as plt >>> from numpy.polynomial import Chebyshev as T >>> x = np.linspace(-2, 2, 100) - >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="$T_%d$"%i) + >>> for i in range(6): + ... ax = plt.plot(x, T.basis(i)(x), lw=2, label=f"$T_{i}$") ... >>> plt.legend(loc="lower right") diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 126f5f2a395e..fda73c5fb5de 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -1672,9 +1672,8 @@ For example, you can plot a 1D array like this:: With Matplotlib, you have access to an enormous number of visualization options. :: - >>> from mpl_toolkits.mplot3d import Axes3D >>> fig = plt.figure() - >>> ax = Axes3D(fig) + >>> ax = fig.add_subplot(projection='3d') >>> X = np.arange(-5, 5, 0.15) >>> Y = np.arange(-5, 5, 0.15) >>> X, Y = np.meshgrid(X, Y) diff --git a/doc/source/user/plots/matplotlib3.py b/doc/source/user/plots/matplotlib3.py index 20a8c0767321..7b56067ef463 100644 --- a/doc/source/user/plots/matplotlib3.py +++ b/doc/source/user/plots/matplotlib3.py @@ -1,9 +1,8 @@ import numpy as np import matplotlib.pyplot as plt -from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() -ax = Axes3D(fig) +ax = fig.add_subplot(projection='3d') X = np.arange(-5, 5, 0.15) Y = np.arange(-5, 5, 0.15) X, Y = np.meshgrid(X, Y) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index b50d8a5ba888..28262c89ee90 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -1464,11 +1464,13 @@ that ``pylab.hist`` plots the histogram automatically, while >>> mu, sigma = 2, 0.5 >>> v = rg.normal(mu, sigma, 10000) >>> # Plot a normalized histogram with 50 bins - >>> plt.hist(v, bins=50, density=1) # matplotlib version (plot) + >>> plt.hist(v, bins=50, density=True) # matplotlib version (plot) >>> # Compute the histogram with numpy and then plot it >>> (n, bins) = np.histogram(v, bins=50, density=True) # NumPy version (no plot) >>> plt.plot(.5 * (bins[1:] + bins[:-1]), n) +With Matplotlib >=3.4 you can also use ``plt.stairs(n, bins)``. + Further reading =============== diff --git a/doc/source/user/tutorial-ma.rst b/doc/source/user/tutorial-ma.rst index 88bad3cbeed9..a21c4aae12f4 100644 --- a/doc/source/user/tutorial-ma.rst +++ b/doc/source/user/tutorial-ma.rst @@ -366,12 +366,13 @@ after the beginning of the records: .. ipython:: python - plt.plot(t, china_total); - plt.plot(t[china_total.mask], cubic_fit[china_total.mask], '--', color='orange'); - plt.plot(7, np.polyval(params, 7), 'r*'); + plt.plot(t, china_total, label='Mainland China'); + plt.plot(t[china_total.mask], cubic_fit[china_total.mask], '--', + color='orange', label='Cubic estimate'); + plt.plot(7, np.polyval(params, 7), 'r*', label='7 days after start'); plt.xticks([0, 7, 13], dates[[0, 7, 13]]); plt.yticks([0, np.polyval(params, 7), 10000, 17500]); - plt.legend(['Mainland China', 'Cubic estimate', '7 days after start']); + plt.legend(); @savefig plot_covid_5.png plt.title("COVID-19 cumulative cases from Jan 21 to Feb 3 2020 - Mainland China\n" "Cubic estimate for 7 days after start"); diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 4ed3042a63e5..ad69f7c837bb 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -300,9 +300,11 @@ def ifft(a, n=None, axis=-1, norm=None): >>> n = np.zeros((400,), dtype=complex) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) - >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') - [, ] - >>> plt.legend(('real', 'imaginary')) + >>> plt.plot(t, s.real, label='real') + [] + >>> plt.plot(t, s.imag, '--', label='imaginary') + [] + >>> plt.legend() >>> plt.show() diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c6db42ce4905..2586e3f6281f 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4277,7 +4277,8 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = np.meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) - >>> h = plt.contourf(x,y,z) + >>> h = plt.contourf(x, y, z) + >>> plt.axis('scaled') >>> plt.show() """ From d482ba810eaa4f18921c638aaaa89bb8983ba210 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 22 Mar 2021 07:27:04 +0000 Subject: [PATCH 0807/1270] MAINT: Bump sphinx from 3.5.2 to 3.5.3 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 3.5.2 to 3.5.3. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/3.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/commits) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index e366020205f2..ba9d3028e78d 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==3.5.2 +sphinx==3.5.3 numpydoc==1.1.0 ipython scipy From a2c5af9c4f170cd452645a5d938d93ed24f246fa Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 22 Mar 2021 20:57:54 +0530 Subject: [PATCH 0808/1270] BENCH: Benchmarks for unsigned ints (#18075) --- benchmarks/benchmarks/bench_ufunc.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 13b7382a1708..b036581e1aae 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -135,18 +135,19 @@ def time_less_than_scalar2(self, dtype): class CustomScalarFloorDivideInt(Benchmark): - params = ([np.int8, np.int16, np.int32, np.int64], [8, -8, 43, -43, 0]) + params = (np.sctypes['int'] + np.sctypes['uint'], [8, -8, 43, -43]) param_names = ['dtype', 'divisors'] - max_value = 10**7 - min_value = -10**7 def setup(self, dtype, divisor): + if dtype in np.sctypes['uint'] and divisor < 0: + raise NotImplementedError( + "Skipping test for negative divisor with unsigned type") + iinfo = np.iinfo(dtype) - self.x = np.arange( - max(iinfo.min, self.min_value), - min(iinfo.max, self.max_value), dtype=dtype) + self.x = np.random.randint( + iinfo.min, iinfo.max, size=10000, dtype=dtype) - def time_floor_divide_int(self, dtpye, divisor): + def time_floor_divide_int(self, dtype, divisor): self.x // divisor From d4a270640b451636c76cb7cff52cc5a1a8a12b6c Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Mon, 22 Mar 2021 12:28:45 +0000 Subject: [PATCH 0809/1270] PERF: Use exponentials in place of inversion Use exponentials rather than generating exponentials via inversion --- .../upcoming_changes/18666.improvement.rst | 9 +++++++++ numpy/random/include/legacy-distributions.h | 1 + numpy/random/mtrand.pyx | 3 ++- numpy/random/src/distributions/distributions.c | 4 ++-- numpy/random/src/legacy/legacy-distributions.c | 4 ++++ numpy/random/tests/test_generator_mt19937.py | 18 +++++++++++------- 6 files changed, 29 insertions(+), 10 deletions(-) create mode 100644 doc/release/upcoming_changes/18666.improvement.rst diff --git a/doc/release/upcoming_changes/18666.improvement.rst b/doc/release/upcoming_changes/18666.improvement.rst new file mode 100644 index 000000000000..70b87ecf4e35 --- /dev/null +++ b/doc/release/upcoming_changes/18666.improvement.rst @@ -0,0 +1,9 @@ +``Generator.rayleigh`` and ``Generator.geometric`` performance improved +----------------------------------------------------------------------- +The performance of Rayleigh and geometric random variate generation +in ``Generator`` has improved. These are both transformation of exponential +random variables and the slow log-based inverse cdf transformation has +been replaced with the Ziggurat-based exponential variate generator. + +This change breaks the stream of variates generated when variates from +either of these distributions are produced. diff --git a/numpy/random/include/legacy-distributions.h b/numpy/random/include/legacy-distributions.h index 3d882b73ba00..f6c5cf0532d1 100644 --- a/numpy/random/include/legacy-distributions.h +++ b/numpy/random/include/legacy-distributions.h @@ -17,6 +17,7 @@ extern double legacy_weibull(aug_bitgen_t *aug_state, double a); extern double legacy_power(aug_bitgen_t *aug_state, double a); extern double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale); extern double legacy_chisquare(aug_bitgen_t *aug_state, double df); +extern double legacy_rayleigh(bitgen_t *bitgen_state, double mode); extern double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df, double nonc); extern double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 23cb5ea319b2..e166634be1df 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -79,6 +79,7 @@ cdef extern from "include/legacy-distributions.h": double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil double legacy_power(aug_bitgen_t *aug_state, double a) nogil double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil + double legacy_rayleigh(aug_bitgen_t *aug_state, double mode) nogil double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df, double nonc) nogil double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden, @@ -3086,7 +3087,7 @@ cdef class RandomState: 0.087300000000000003 # random """ - return cont(&random_rayleigh, &self._bitgen, size, self.lock, 1, + return cont(&legacy_rayleigh, &self._bitgen, size, self.lock, 1, scale, 'scale', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 6b4deb925ff5..9bdfa9bead82 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -508,7 +508,7 @@ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) { } double random_rayleigh(bitgen_t *bitgen_state, double mode) { - return mode * sqrt(-2.0 * npy_log1p(-next_double(bitgen_state))); + return mode * sqrt(2.0 * random_standard_exponential(bitgen_state)); } double random_standard_t(bitgen_t *bitgen_state, double df) { @@ -960,7 +960,7 @@ RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p) { } int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) { - return (int64_t)ceil(npy_log1p(-next_double(bitgen_state)) / npy_log1p(-p)); + return (int64_t)ceil(-random_standard_exponential(bitgen_state) / npy_log1p(-p)); } int64_t random_geometric(bitgen_t *bitgen_state, double p) { diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index bfea15e40361..443c1a4bf781 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -112,6 +112,10 @@ double legacy_chisquare(aug_bitgen_t *aug_state, double df) { return 2.0 * legacy_standard_gamma(aug_state, df / 2.0); } +double legacy_rayleigh(bitgen_t *bitgen_state, double mode) { + return mode * sqrt(-2.0 * npy_log1p(-next_double(bitgen_state))); +} + double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df, double nonc) { double out; diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 310545e0d8ea..0108d84b3ab8 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1252,9 +1252,9 @@ def test_gamma_0(self): def test_geometric(self): random = Generator(MT19937(self.seed)) actual = random.geometric(.123456789, size=(3, 2)) - desired = np.array([[ 1, 10], - [ 1, 12], - [ 9, 10]]) + desired = np.array([[1, 11], + [1, 12], + [11, 17]]) assert_array_equal(actual, desired) def test_geometric_exceptions(self): @@ -1557,9 +1557,9 @@ def test_power(self): def test_rayleigh(self): random = Generator(MT19937(self.seed)) actual = random.rayleigh(scale=10, size=(3, 2)) - desired = np.array([[ 4.51734079831581, 15.6802442485758 ], - [ 4.19850651287094, 17.08718809823704], - [14.7907457708776 , 15.85545333419775]]) + desired = np.array([[4.19494429102666, 16.66920198906598], + [3.67184544902662, 17.74695521962917], + [16.27935397855501, 21.08355560691792]]) assert_array_almost_equal(actual, desired, decimal=14) def test_rayleigh_0(self): @@ -2114,7 +2114,11 @@ def test_lognormal(self): def test_rayleigh(self): scale = [1] bad_scale = [-1] - desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499]) + desired = np.array( + [1.1597068009872629, + 0.6539188836253857, + 1.1981526554349398] + ) random = Generator(MT19937(self.seed)) actual = random.rayleigh(scale * 3) From bbc07e76c1ac0395e48500dd943dd89231e5b35b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 14:11:52 -0500 Subject: [PATCH 0810/1270] BUG: Fix small issues found with pytest-leaks None of these are particularly worrying as they either usually only leak reference (and not memory) or appear in rare or almost impossible error-paths, or are limited to the tests. Unfortunately, this PR will not apply to 1.20.x, due to small changes in the overrides. --- .../src/multiarray/arrayfunction_override.c | 26 ++++++++++++------- numpy/core/src/multiarray/convert_datatype.c | 3 +-- numpy/core/src/umath/_umath_tests.c.src | 1 + numpy/core/src/umath/_umath_tests.dispatch.c | 1 + numpy/core/src/umath/ufunc_object.c | 4 ++- numpy/core/tests/test_overrides.py | 26 ++++++++++--------- 6 files changed, 37 insertions(+), 24 deletions(-) diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c index 31415e4f2117..463a2d4d8724 100644 --- a/numpy/core/src/multiarray/arrayfunction_override.c +++ b/numpy/core/src/multiarray/arrayfunction_override.c @@ -341,18 +341,23 @@ array_implement_array_function( return NULL; } - /* Remove `like=` kwarg, which is NumPy-exclusive and thus not present + /* + * Remove `like=` kwarg, which is NumPy-exclusive and thus not present * in downstream libraries. If `like=` is specified but doesn't * implement `__array_function__`, raise a `TypeError`. */ if (kwargs != NULL && PyDict_Contains(kwargs, npy_ma_str_like)) { PyObject *like_arg = PyDict_GetItem(kwargs, npy_ma_str_like); - if (like_arg && !get_array_function(like_arg)) { - return PyErr_Format(PyExc_TypeError, - "The `like` argument must be an array-like that implements " - "the `__array_function__` protocol."); + if (like_arg != NULL) { + PyObject *tmp_has_override = get_array_function(like_arg); + if (tmp_has_override == NULL) { + return PyErr_Format(PyExc_TypeError, + "The `like` argument must be an array-like that " + "implements the `__array_function__` protocol."); + } + Py_DECREF(tmp_has_override); + PyDict_DelItem(kwargs, npy_ma_str_like); } - PyDict_DelItem(kwargs, npy_ma_str_like); } PyObject *res = array_implement_array_function_internal( @@ -382,11 +387,14 @@ array_implement_c_array_function_creation( PyObject *public_api = NULL; PyObject *result = NULL; - if (!get_array_function(like)) { + /* If `like` doesn't implement `__array_function__`, raise a `TypeError` */ + PyObject *tmp_has_override = get_array_function(like); + if (tmp_has_override == NULL) { return PyErr_Format(PyExc_TypeError, - "The `like` argument must be an array-like that implements " - "the `__array_function__` protocol."); + "The `like` argument must be an array-like that " + "implements the `__array_function__` protocol."); } + Py_DECREF(tmp_has_override); if (fast_args != NULL) { /* diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index d3c9690345b4..18179f253172 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -916,8 +916,7 @@ PyArray_FindConcatenationDescriptor( "The dtype `%R` is not a valid dtype for concatenation " "since it is a subarray dtype (the subarray dimensions " "would be added as array dimensions).", result); - Py_DECREF(result); - return NULL; + Py_SETREF(result, NULL); } goto finish; } diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index 4e250e43b650..7cc74a4f35ed 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -621,6 +621,7 @@ UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dumm goto err; } NPY_CPU_DISPATCH_CALL_ALL(_umath_tests_dispatch_attach, (item)); + Py_SETREF(item, NULL); if (PyErr_Occurred()) { goto err; } diff --git a/numpy/core/src/umath/_umath_tests.dispatch.c b/numpy/core/src/umath/_umath_tests.dispatch.c index d86a54411367..85f3650106ea 100644 --- a/numpy/core/src/umath/_umath_tests.dispatch.c +++ b/numpy/core/src/umath/_umath_tests.dispatch.c @@ -29,5 +29,6 @@ void NPY_CPU_DISPATCH_CURFX(_umath_tests_dispatch_attach)(PyObject *list) PyObject *item = PyUnicode_FromString(NPY_TOSTRING(NPY_CPU_DISPATCH_CURFX(func))); if (item) { PyList_Append(list, item); + Py_DECREF(item); } } diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 653e0b5befd7..d70d15c50301 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5495,6 +5495,7 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { + Py_DECREF(tmp); return NULL; } ap1 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0); @@ -5514,6 +5515,7 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { + Py_DECREF(tmp); Py_DECREF(ap1); return NULL; } @@ -5538,7 +5540,7 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) "maximum supported dimension for an ndarray is %d, but " "`%s.outer()` result would have %d.", NPY_MAXDIMS, ufunc->name, newdims.len); - return NPY_FAIL; + goto fail; } if (newdims.ptr == NULL) { goto fail; diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py index 0809e1e92da8..9216a3f5fdfa 100644 --- a/numpy/core/tests/test_overrides.py +++ b/numpy/core/tests/test_overrides.py @@ -1,5 +1,6 @@ import inspect import sys +import os import tempfile from io import StringIO from unittest import mock @@ -558,18 +559,19 @@ def test_array_like_fromfile(self, numpy_ref): data = np.random.random(5) - fname = tempfile.mkstemp()[1] - data.tofile(fname) - - array_like = np.fromfile(fname, like=ref) - if numpy_ref is True: - assert type(array_like) is np.ndarray - np_res = np.fromfile(fname, like=ref) - assert_equal(np_res, data) - assert_equal(array_like, np_res) - else: - assert type(array_like) is self.MyArray - assert array_like.function is self.MyArray.fromfile + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, "testfile") + data.tofile(fname) + + array_like = np.fromfile(fname, like=ref) + if numpy_ref is True: + assert type(array_like) is np.ndarray + np_res = np.fromfile(fname, like=ref) + assert_equal(np_res, data) + assert_equal(array_like, np_res) + else: + assert type(array_like) is self.MyArray + assert array_like.function is self.MyArray.fromfile @requires_array_function def test_exception_handling(self): From 797c3aaf6108699e46407096c5fecd77f62a535c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 11 Mar 2021 16:09:50 -0600 Subject: [PATCH 0811/1270] ENH: Optimize and cleanup ufunc calls and ufunc CheckOverrides This is a rather large commit which clean up the ufunc override code for argument parsing. (I may attempt to split it up) There are two main things, argument parsing, especially for reductions is now much faster since `METH_FASTCALL` is used (when keyword arguments are being used). By moving the argument parsing and using more generic code this also simplifies the ufunc override checking especially when keyword arguments are present. Both of this decreases the argument parsing overhead quite a lot for ufunc calls, especially for small reductions. (Without double checking, I believe the speedup was up to around 30% for small reductions.) The downside is some added/annoyance due to the use of marcos to support both FASTCALL and no FASTCALL semantics. As a side note: vectorcall is likely not a huge factor for ufuncs since it is very common not to use any keyword arguments. OTOH, code that uses ``out=...`` a lot on small arrays should see a nice difference. --- numpy/core/include/numpy/ufuncobject.h | 6 +- numpy/core/src/umath/override.c | 520 ++------- numpy/core/src/umath/override.h | 7 +- numpy/core/src/umath/ufunc_object.c | 1419 +++++++++++++----------- numpy/core/src/umath/ufunc_object.h | 14 - numpy/core/src/umath/umathmodule.c | 28 - numpy/core/tests/test_multiarray.py | 2 +- numpy/core/tests/test_ufunc.py | 2 +- numpy/core/tests/test_umath.py | 40 +- 9 files changed, 883 insertions(+), 1155 deletions(-) diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h index e5d845842976..a20d827e34f2 100644 --- a/numpy/core/include/numpy/ufuncobject.h +++ b/numpy/core/include/numpy/ufuncobject.h @@ -41,7 +41,7 @@ struct _tagPyUFuncObject; * * For backwards compatibility, the regular type resolution function does not * support auxiliary data with object semantics. The type resolution call - * which returns a masked generic function returns a standard NpyAuxData + * which returns a masked generic function returns 5015a standard NpyAuxData * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros * work. * @@ -194,7 +194,11 @@ typedef struct _tagPyUFuncObject { * but this was never implemented. (This is also why the above * selector is called the "legacy" selector.) */ + #if PY_VERSION_HEX >= 0x03080000 + vectorcallfunc vectorcall; + #else void *reserved2; + #endif /* * A function which returns a masked inner loop for the ufunc. */ diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index a0090e302324..f02e03933eef 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -1,6 +1,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define NO_IMPORT_ARRAY +#include "npy_argparse.h" #include "npy_pycompat.h" #include "numpy/ufuncobject.h" #include "npy_import.h" @@ -8,6 +9,7 @@ #include "override.h" #include "ufunc_override.h" + /* * For each positional argument and each argument in a possible "out" * keyword, look for overrides of the standard ufunc behaviour, i.e., @@ -22,25 +24,16 @@ * Returns -1 on failure. */ static int -get_array_ufunc_overrides(PyObject *args, PyObject *kwds, +get_array_ufunc_overrides(PyObject *in_args, PyObject *out_args, PyObject **with_override, PyObject **methods) { int i; int num_override_args = 0; int narg, nout = 0; - PyObject *out_kwd_obj; - PyObject **arg_objs, **out_objs; - - narg = PyTuple_Size(args); - if (narg < 0) { - return -1; - } - arg_objs = PySequence_Fast_ITEMS(args); - nout = PyUFuncOverride_GetOutObjects(kwds, &out_kwd_obj, &out_objs); - if (nout < 0) { - return -1; - } + narg = (int)PyTuple_GET_SIZE(in_args); + /* It is valid for out_args to be NULL: */ + nout = (out_args != NULL) ? (int)PyTuple_GET_SIZE(out_args) : 0; for (i = 0; i < narg + nout; ++i) { PyObject *obj; @@ -48,10 +41,10 @@ get_array_ufunc_overrides(PyObject *args, PyObject *kwds, int new_class = 1; if (i < narg) { - obj = arg_objs[i]; + obj = PyTuple_GET_ITEM(in_args, i); } else { - obj = out_objs[i - narg]; + obj = PyTuple_GET_ITEM(out_args, i - narg); } /* * Have we seen this class before? If so, ignore. @@ -86,7 +79,6 @@ get_array_ufunc_overrides(PyObject *args, PyObject *kwds, ++num_override_args; } } - Py_DECREF(out_kwd_obj); return num_override_args; fail: @@ -94,359 +86,117 @@ get_array_ufunc_overrides(PyObject *args, PyObject *kwds, Py_DECREF(with_override[i]); Py_DECREF(methods[i]); } - Py_DECREF(out_kwd_obj); return -1; } -/* - * The following functions normalize ufunc arguments. The work done is similar - * to what is done inside ufunc_object by get_ufunc_arguments for __call__ and - * generalized ufuncs, and by PyUFunc_GenericReduction for the other methods. - * It would be good to unify (see gh-8892). - */ /* - * ufunc() and ufunc.outer() accept 'sig' or 'signature'; - * normalize to 'signature' + * Build a dictionary from the keyword arguments, but replace out with the + * normalized version (and always pass it even if it was passed by position). */ static int -normalize_signature_keyword(PyObject *normal_kwds) +initialize_normal_kwds(PyObject *out_args, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, + PyObject *normal_kwds) { - PyObject *obj = _PyDict_GetItemStringWithError(normal_kwds, "sig"); - if (obj == NULL && PyErr_Occurred()){ - return -1; - } - if (obj != NULL) { - PyObject *sig = _PyDict_GetItemStringWithError(normal_kwds, "signature"); - if (sig == NULL && PyErr_Occurred()) { - return -1; - } - if (sig) { - PyErr_SetString(PyExc_TypeError, - "cannot specify both 'sig' and 'signature'"); - return -1; - } - /* - * No INCREF or DECREF needed: got a borrowed reference above, - * and, unlike e.g. PyList_SetItem, PyDict_SetItem INCREF's it. - */ - PyDict_SetItemString(normal_kwds, "signature", obj); - PyDict_DelItemString(normal_kwds, "sig"); - } - return 0; -} - -static int -normalize___call___args(PyUFuncObject *ufunc, PyObject *args, - PyObject **normal_args, PyObject **normal_kwds) -{ - /* - * ufunc.__call__(*args, **kwds) - */ - npy_intp i; - int not_all_none; - npy_intp nin = ufunc->nin; - npy_intp nout = ufunc->nout; - npy_intp nargs = PyTuple_GET_SIZE(args); - npy_intp nkwds = PyDict_Size(*normal_kwds); - PyObject *obj; - - if (nargs < nin) { - PyErr_Format(PyExc_TypeError, - "ufunc() missing %"NPY_INTP_FMT" of %"NPY_INTP_FMT - "required positional argument(s)", nin - nargs, nin); - return -1; - } - if (nargs > nin+nout) { - PyErr_Format(PyExc_TypeError, - "ufunc() takes from %"NPY_INTP_FMT" to %"NPY_INTP_FMT - "arguments but %"NPY_INTP_FMT" were given", - nin, nin+nout, nargs); - return -1; - } - - *normal_args = PyTuple_GetSlice(args, 0, nin); - if (*normal_args == NULL) { - return -1; - } - - /* If we have more args than nin, they must be the output variables.*/ - if (nargs > nin) { - if (nkwds > 0) { - PyObject *out_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "out"); - if (out_kwd == NULL && PyErr_Occurred()) { + if (kwnames != NULL) { + for (Py_ssize_t i = 0; i < PyTuple_GET_SIZE(kwnames); i++) { + if (PyDict_SetItem(normal_kwds, + PyTuple_GET_ITEM(kwnames, i), args[i + len_args]) < 0) { return -1; } - else if (out_kwd) { - PyErr_Format(PyExc_TypeError, - "argument given by name ('out') and position " - "(%"NPY_INTP_FMT")", nin); - return -1; - } - } - for (i = nin; i < nargs; i++) { - not_all_none = (PyTuple_GET_ITEM(args, i) != Py_None); - if (not_all_none) { - break; - } - } - if (not_all_none) { - if (nargs - nin == nout) { - obj = PyTuple_GetSlice(args, nin, nargs); - } - else { - PyObject *item; - - obj = PyTuple_New(nout); - if (obj == NULL) { - return -1; - } - for (i = 0; i < nout; i++) { - if (i + nin < nargs) { - item = PyTuple_GET_ITEM(args, nin+i); - } - else { - item = Py_None; - } - Py_INCREF(item); - PyTuple_SET_ITEM(obj, i, item); - } - } - PyDict_SetItemString(*normal_kwds, "out", obj); - Py_DECREF(obj); } } - /* gufuncs accept either 'axes' or 'axis', but not both */ - if (nkwds >= 2) { - PyObject *axis_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "axis"); - if (axis_kwd == NULL && PyErr_Occurred()) { - return -1; - } - PyObject *axes_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "axes"); - if (axes_kwd == NULL && PyErr_Occurred()) { - return -1; - } - if (axis_kwd && axes_kwd) { - PyErr_SetString(PyExc_TypeError, - "cannot specify both 'axis' and 'axes'"); + static PyObject *out_str = NULL; + if (out_str == NULL) { + out_str = PyUnicode_InternFromString("out"); + if (out_str == NULL) { return -1; } } - /* finally, ufuncs accept 'sig' or 'signature' normalize to 'signature' */ - return nkwds == 0 ? 0 : normalize_signature_keyword(*normal_kwds); -} - -static int -normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args, - PyObject **normal_args, PyObject **normal_kwds) -{ - /* - * ufunc.reduce(a[, axis, dtype, out, keepdims]) - */ - npy_intp nargs = PyTuple_GET_SIZE(args); - npy_intp i; - PyObject *obj; - static PyObject *NoValue = NULL; - static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims", - "initial", "where"}; - - npy_cache_import("numpy", "_NoValue", &NoValue); - if (NoValue == NULL) return -1; - - if (nargs < 1 || nargs > 7) { - PyErr_Format(PyExc_TypeError, - "ufunc.reduce() takes from 1 to 7 positional " - "arguments but %"NPY_INTP_FMT" were given", nargs); - return -1; - } - *normal_args = PyTuple_GetSlice(args, 0, 1); - if (*normal_args == NULL) { - return -1; - } - for (i = 1; i < nargs; i++) { - PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]); - if (kwd == NULL && PyErr_Occurred()) { + if (out_args != NULL) { + /* Replace `out` argument with the normalized version */ + int res = PyDict_SetItem(normal_kwds, out_str, out_args); + if (res < 0) { return -1; } - else if (kwd) { - PyErr_Format(PyExc_TypeError, - "argument given by name ('%s') and position " - "(%"NPY_INTP_FMT")", kwlist[i], i); + } + else { + /* Ensure that `out` is not present. */ + int res = PyDict_Contains(normal_kwds, out_str); + if (res < 0) { return -1; } - obj = PyTuple_GET_ITEM(args, i); - if (i == 3) { - /* remove out=None */ - if (obj == Py_None) { - continue; - } - obj = PyTuple_GetSlice(args, 3, 4); - } - /* Remove initial=np._NoValue */ - if (i == 5 && obj == NoValue) { - continue; - } - PyDict_SetItemString(*normal_kwds, kwlist[i], obj); - if (i == 3) { - Py_DECREF(obj); + if (res) { + return PyDict_DelItem(normal_kwds, out_str); } } return 0; } +/* + * ufunc() and ufunc.outer() accept 'sig' or 'signature'; + * normalize to 'signature' + */ static int -normalize_accumulate_args(PyUFuncObject *ufunc, PyObject *args, - PyObject **normal_args, PyObject **normal_kwds) +normalize_signature_keyword(PyObject *normal_kwds) { /* - * ufunc.accumulate(a[, axis, dtype, out]) + * If the keywords include sign rename to signature. An error + * will have been raised if both were given. */ - npy_intp nargs = PyTuple_GET_SIZE(args); - npy_intp i; - PyObject *obj; - static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims"}; - - if (nargs < 1 || nargs > 4) { - PyErr_Format(PyExc_TypeError, - "ufunc.accumulate() takes from 1 to 4 positional " - "arguments but %"NPY_INTP_FMT" were given", nargs); - return -1; - } - *normal_args = PyTuple_GetSlice(args, 0, 1); - if (*normal_args == NULL) { + PyObject* obj = _PyDict_GetItemStringWithError(normal_kwds, "sig"); + if (obj == NULL && PyErr_Occurred()) { return -1; } - - for (i = 1; i < nargs; i++) { - PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]); - if (kwd == NULL && PyErr_Occurred()) { + if (obj != NULL) { + /* + * No INCREF or DECREF needed: got a borrowed reference above, + * and, unlike e.g. PyList_SetItem, PyDict_SetItem INCREF's it. + */ + if (PyDict_SetItemString(normal_kwds, "signature", obj) < 0) { return -1; } - else if (kwd) { - PyErr_Format(PyExc_TypeError, - "argument given by name ('%s') and position " - "(%"NPY_INTP_FMT")", kwlist[i], i); + if (PyDict_DelItemString(normal_kwds, "sig") < 0) { return -1; } - obj = PyTuple_GET_ITEM(args, i); - if (i == 3) { - /* remove out=None */ - if (obj == Py_None) { - continue; - } - obj = PyTuple_GetSlice(args, 3, 4); - } - PyDict_SetItemString(*normal_kwds, kwlist[i], obj); - if (i == 3) { - Py_DECREF(obj); - } } return 0; } + static int -normalize_reduceat_args(PyUFuncObject *ufunc, PyObject *args, - PyObject **normal_args, PyObject **normal_kwds) +copy_positional_args_to_kwargs(const char **keywords, + PyObject *const *args, Py_ssize_t len_args, + PyObject *normal_kwds) { - /* - * ufunc.reduceat(a, indices[, axis, dtype, out]) - * the number of arguments has been checked in PyUFunc_GenericReduction. - */ - npy_intp i; - npy_intp nargs = PyTuple_GET_SIZE(args); - PyObject *obj; - static char *kwlist[] = {"array", "indices", "axis", "dtype", "out"}; - - if (nargs < 2 || nargs > 5) { - PyErr_Format(PyExc_TypeError, - "ufunc.reduceat() takes from 2 to 4 positional " - "arguments but %"NPY_INTP_FMT" were given", nargs); - return -1; - } - /* a and indices */ - *normal_args = PyTuple_GetSlice(args, 0, 2); - if (*normal_args == NULL) { - return -1; - } - - for (i = 2; i < nargs; i++) { - PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]); - if (kwd == NULL && PyErr_Occurred()) { - return -1; - } - else if (kwd) { - PyErr_Format(PyExc_TypeError, - "argument given by name ('%s') and position " - "(%"NPY_INTP_FMT")", kwlist[i], i); - return -1; + for (Py_ssize_t i = 0; i < len_args; i++) { + if (keywords[i] == NULL) { + /* keyword argument is either input or output and not set here */ + continue; } - obj = PyTuple_GET_ITEM(args, i); - if (i == 4) { - /* remove out=None */ - if (obj == Py_None) { + if (NPY_UNLIKELY(i == 5)) { + /* + * This is only relevant for reduce, which is the only one with + * 5 keyword arguments. + */ + PyObject *NoValue = NULL; + assert(strcmp(keywords[i], "initial") == 0); + npy_cache_import("numpy", "_NoValue", &NoValue); + if (args[i] == NoValue) { continue; } - obj = PyTuple_GetSlice(args, 4, 5); } - PyDict_SetItemString(*normal_kwds, kwlist[i], obj); - if (i == 4) { - Py_DECREF(obj); + + int res = PyDict_SetItemString(normal_kwds, keywords[i], args[i]); + if (res < 0) { + return -1; } } return 0; } -static int -normalize_outer_args(PyUFuncObject *ufunc, PyObject *args, - PyObject **normal_args, PyObject **normal_kwds) -{ - /* - * ufunc.outer(*args, **kwds) - * all positional arguments should be inputs. - * for the keywords, we only need to check 'sig' vs 'signature'. - */ - npy_intp nin = ufunc->nin; - npy_intp nargs = PyTuple_GET_SIZE(args); - - if (nargs < nin) { - PyErr_Format(PyExc_TypeError, - "ufunc.outer() missing %"NPY_INTP_FMT" of %"NPY_INTP_FMT - "required positional " "argument(s)", nin - nargs, nin); - return -1; - } - if (nargs > nin) { - PyErr_Format(PyExc_TypeError, - "ufunc.outer() takes %"NPY_INTP_FMT" arguments but" - "%"NPY_INTP_FMT" were given", nin, nargs); - return -1; - } - - *normal_args = PyTuple_GetSlice(args, 0, nin); - if (*normal_args == NULL) { - return -1; - } - /* ufuncs accept 'sig' or 'signature' normalize to 'signature' */ - return normalize_signature_keyword(*normal_kwds); -} - -static int -normalize_at_args(PyUFuncObject *ufunc, PyObject *args, - PyObject **normal_args, PyObject **normal_kwds) -{ - /* ufunc.at(a, indices[, b]) */ - npy_intp nargs = PyTuple_GET_SIZE(args); - - if (nargs < 2 || nargs > 3) { - PyErr_Format(PyExc_TypeError, - "ufunc.at() takes from 2 to 3 positional " - "arguments but %"NPY_INTP_FMT" were given", nargs); - return -1; - } - *normal_args = PyTuple_GetSlice(args, 0, nargs); - return (*normal_args == NULL); -} - /* * Check a set of args for the `__array_ufunc__` method. If more than one of * the input arguments implements `__array_ufunc__`, they are tried in the @@ -460,31 +210,26 @@ normalize_at_args(PyUFuncObject *ufunc, PyObject *args, */ NPY_NO_EXPORT int PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, - PyObject *args, PyObject *kwds, - PyObject **result) + PyObject *in_args, PyObject *out_args, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, + PyObject **result) { - int i; - int j; int status; int num_override_args; PyObject *with_override[NPY_MAXARGS]; PyObject *array_ufunc_methods[NPY_MAXARGS]; - PyObject *out; - PyObject *method_name = NULL; - PyObject *normal_args = NULL; /* normal_* holds normalized arguments. */ PyObject *normal_kwds = NULL; PyObject *override_args = NULL; - Py_ssize_t len; /* * Check inputs for overrides */ num_override_args = get_array_ufunc_overrides( - args, kwds, with_override, array_ufunc_methods); + in_args, out_args, with_override, array_ufunc_methods); if (num_override_args == -1) { goto fail; } @@ -495,104 +240,58 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, } /* - * Normalize ufunc arguments. + * Normalize ufunc arguments, note that args does not hold any positional + * arguments. (len_args is 0) */ - - /* Build new kwds */ - if (kwds && PyDict_CheckExact(kwds)) { - - /* ensure out is always a tuple */ - normal_kwds = PyDict_Copy(kwds); - out = _PyDict_GetItemStringWithError(normal_kwds, "out"); - if (out == NULL && PyErr_Occurred()) { - goto fail; - } - else if (out) { - int nout = ufunc->nout; - - if (PyTuple_CheckExact(out)) { - int all_none = 1; - - if (PyTuple_GET_SIZE(out) != nout) { - PyErr_Format(PyExc_ValueError, - "The 'out' tuple must have exactly " - "%d entries: one per ufunc output", nout); - goto fail; - } - for (i = 0; i < PyTuple_GET_SIZE(out); i++) { - all_none = (PyTuple_GET_ITEM(out, i) == Py_None); - if (!all_none) { - break; - } - } - if (all_none) { - PyDict_DelItemString(normal_kwds, "out"); - } - } - else { - /* not a tuple */ - if (nout > 1) { - PyErr_SetString(PyExc_TypeError, - "'out' must be a tuple of arguments"); - goto fail; - } - if (out != Py_None) { - /* not already a tuple and not None */ - PyObject *out_tuple = PyTuple_New(1); - - if (out_tuple == NULL) { - goto fail; - } - /* out was borrowed ref; make it permanent */ - Py_INCREF(out); - /* steals reference */ - PyTuple_SET_ITEM(out_tuple, 0, out); - PyDict_SetItemString(normal_kwds, "out", out_tuple); - Py_DECREF(out_tuple); - } - else { - /* out=None; remove it */ - PyDict_DelItemString(normal_kwds, "out"); - } - } - } - } - else { - normal_kwds = PyDict_New(); - } + normal_kwds = PyDict_New(); if (normal_kwds == NULL) { goto fail; } + if (initialize_normal_kwds(out_args, + args, len_args, kwnames, normal_kwds) < 0) { + goto fail; + } - /* decide what to do based on the method. */ + /* + * Reduce-like methods can pass keyword arguments also by position, + * in which case the additional positional arguments have to be copied + * into the keyword argument dictionary. The __call__ method has to + * normalize sig and signature away. + */ /* ufunc.__call__ */ if (strcmp(method, "__call__") == 0) { - status = normalize___call___args(ufunc, args, &normal_args, - &normal_kwds); + status = normalize_signature_keyword(normal_kwds); } /* ufunc.reduce */ else if (strcmp(method, "reduce") == 0) { - status = normalize_reduce_args(ufunc, args, &normal_args, - &normal_kwds); + static const char *keywords[] = { + NULL, "axis", "dtype", NULL, "keepdims", + "initial", "where"}; + status = copy_positional_args_to_kwargs(keywords, + args, len_args, normal_kwds); } /* ufunc.accumulate */ else if (strcmp(method, "accumulate") == 0) { - status = normalize_accumulate_args(ufunc, args, &normal_args, - &normal_kwds); + static const char *keywords[] = { + NULL, "axis", "dtype", NULL}; + status = copy_positional_args_to_kwargs(keywords, + args, len_args, normal_kwds); } /* ufunc.reduceat */ else if (strcmp(method, "reduceat") == 0) { - status = normalize_reduceat_args(ufunc, args, &normal_args, - &normal_kwds); + static const char *keywords[] = { + NULL, NULL, "axis", "dtype", NULL}; + status = copy_positional_args_to_kwargs(keywords, + args, len_args, normal_kwds); } - /* ufunc.outer */ + /* ufunc.outer (identical to call) */ else if (strcmp(method, "outer") == 0) { - status = normalize_outer_args(ufunc, args, &normal_args, &normal_kwds); + status = normalize_signature_keyword(normal_kwds); } /* ufunc.at */ else if (strcmp(method, "at") == 0) { - status = normalize_at_args(ufunc, args, &normal_args, &normal_kwds); + status = 0; } /* unknown method */ else { @@ -610,7 +309,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, goto fail; } - len = PyTuple_GET_SIZE(normal_args); + int len = (int)PyTuple_GET_SIZE(in_args); /* Call __array_ufunc__ functions in correct order */ while (1) { @@ -621,14 +320,14 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, *result = NULL; /* Choose an overriding argument */ - for (i = 0; i < num_override_args; i++) { + for (int i = 0; i < num_override_args; i++) { override_obj = with_override[i]; if (override_obj == NULL) { continue; } /* Check for sub-types to the right of obj. */ - for (j = i + 1; j < num_override_args; j++) { + for (int j = i + 1; j < num_override_args; j++) { PyObject *other_obj = with_override[j]; if (other_obj != NULL && Py_TYPE(other_obj) != Py_TYPE(override_obj) && @@ -662,8 +361,8 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, PyTuple_SET_ITEM(override_args, 1, (PyObject *)ufunc); Py_INCREF(method_name); PyTuple_SET_ITEM(override_args, 2, method_name); - for (i = 0; i < len; i++) { - PyObject *item = PyTuple_GET_ITEM(normal_args, i); + for (int i = 0; i < len; i++) { + PyObject *item = PyTuple_GET_ITEM(in_args, i); Py_INCREF(item); PyTuple_SET_ITEM(override_args, i + 3, item); @@ -724,11 +423,10 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, fail: status = -1; cleanup: - for (i = 0; i < num_override_args; i++) { + for (int i = 0; i < num_override_args; i++) { Py_XDECREF(with_override[i]); Py_XDECREF(array_ufunc_methods[i]); } - Py_XDECREF(normal_args); Py_XDECREF(method_name); Py_XDECREF(normal_kwds); return status; diff --git a/numpy/core/src/umath/override.h b/numpy/core/src/umath/override.h index 68f3c6ef0814..4e9a323ca629 100644 --- a/numpy/core/src/umath/override.h +++ b/numpy/core/src/umath/override.h @@ -6,6 +6,9 @@ NPY_NO_EXPORT int PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, - PyObject *args, PyObject *kwds, - PyObject **result); + PyObject *in_args, PyObject *out_args, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, + PyObject **result); + + #endif diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index d70d15c50301..98e97a112a59 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -28,10 +28,11 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" +#include "stddef.h" #include "npy_config.h" - #include "npy_pycompat.h" +#include "npy_argparse.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -95,6 +96,13 @@ _get_wrap_prepare_args(ufunc_full_args full_args) { /* ---------------------------------------------------------------- */ +static PyObject * +ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, + PyArrayObject **mps_in); + +static PyObject * +prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc); + static int _does_loop_use_arrays(void *data); @@ -270,7 +278,7 @@ _get_output_array_method(PyObject *obj, PyObject *method, */ static void _find_array_prepare(ufunc_full_args args, - PyObject **output_prep, int nin, int nout) + PyObject **output_prep, int nout) { int i; PyObject *prep; @@ -397,27 +405,19 @@ _ufunc_setup_flags(PyUFuncObject *ufunc, npy_uint32 op_in_flags, * A NULL is placed in output_wrap for outputs that * should just have PyArray_Return called. */ -static int -_find_array_wrap(ufunc_full_args args, PyObject *kwds, - PyObject **output_wrap, int nin, int nout) +static void +_find_array_wrap(ufunc_full_args args, npy_bool subok, + PyObject **output_wrap, int nin, int nout) { int i; - PyObject *obj; PyObject *wrap = NULL; /* * If a 'subok' parameter is passed and isn't True, don't wrap but put None * into slots with out arguments which means return the out argument */ - if (kwds != NULL) { - obj = PyDict_GetItemWithError(kwds, npy_um_str_subok); - if (obj == NULL && PyErr_Occurred()) { - return -1; - } - else if (obj != NULL && obj != Py_True) { - /* skip search for wrap members */ - goto handle_out; - } + if (!subok) { + goto handle_out; } /* @@ -453,7 +453,6 @@ _find_array_wrap(ufunc_full_args args, PyObject *kwds, } Py_XDECREF(wrap); - return 0; } @@ -849,103 +848,12 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc) { return ufunc->name ? ufunc->name : ""; } -/* - * Helpers for keyword parsing - */ - -/* - * Find key in a list of pointers to keyword names. - * The list should end with NULL. - * - * Returns either the index into the list (pointing to the final key with NULL - * if no match was found), or -1 on failure. - */ -static npy_intp -locate_key(PyObject **kwnames, PyObject *key) -{ - PyObject **kwname = kwnames; - while (*kwname != NULL && *kwname != key) { - kwname++; - } - /* Slow fallback, just in case */ - if (*kwname == NULL) { - int cmp = 0; - kwname = kwnames; - while (*kwname != NULL && - (cmp = PyObject_RichCompareBool(key, *kwname, - Py_EQ)) == 0) { - kwname++; - } - if (cmp < 0) { - return -1; - } - } - return kwname - kwnames; -} - -/* - * Parse keyword arguments, matching against kwnames - * - * Arguments beyond kwnames (the va_list) should contain converters and outputs - * for each keyword name (where an output can be NULL to indicate the particular - * keyword should be ignored). - * - * Returns 0 on success, -1 on failure with an error set. - * - * Note that the parser does not clean up on failure, i.e., already parsed keyword - * values may hold new references, which the caller has to remove. - * - * TODO: ufunc is only used for the name in error messages; passing on the - * name instead might be an option. - * - * TODO: instead of having this function ignore of keywords for which the - * corresponding output is NULL, the calling routine should prepare the - * correct list. - */ -static int -parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, ...) -{ - va_list va; - PyObject *key, *value; - Py_ssize_t pos = 0; - typedef int converter(PyObject *, void *); - - while (PyDict_Next(kwds, &pos, &key, &value)) { - npy_intp i; - converter *convert; - void *output = NULL; - npy_intp index = locate_key(kwnames, key); - if (index < 0) { - return -1; - } - if (kwnames[index]) { - va_start(va, kwnames); - for (i = 0; i <= index; i++) { - convert = va_arg(va, converter *); - output = va_arg(va, void *); - } - va_end(va); - } - if (output) { - if (!convert(value, output)) { - return -1; - } - } - else { - PyErr_Format(PyExc_TypeError, - "'%S' is an invalid keyword to ufunc '%s'", - key, ufunc_get_name_cstr(ufunc)); - return -1; - } - } - return 0; -} /* * Converters for use in parsing of keywords arguments. */ NPY_NO_EXPORT int -_subok_converter(PyObject *obj, int *subok) +_subok_converter(PyObject *obj, npy_bool *subok) { if (PyBool_Check(obj)) { *subok = (obj == Py_True); @@ -1012,59 +920,33 @@ _borrowed_reference(PyObject *obj, PyObject **out) } /* - * Parses the positional and keyword arguments for a generic ufunc call. - * All returned arguments are new references (with optional ones NULL - * if not present) + * Due to the array override, do the actual parameter conversion + * only in this step. This function takes the reference objects and + * parses them into the desired values. + * This function cleans up after itself and NULLs references on error, + * the caller has to ensure that out_op[0:nargs] is NULLed. */ static int -get_ufunc_arguments(PyUFuncObject *ufunc, - PyObject *args, PyObject *kwds, - PyArrayObject **out_op, - NPY_ORDER *out_order, - NPY_CASTING *out_casting, - PyObject **out_extobj, - PyObject **out_typetup, /* type: Tuple[np.dtype] */ - int *out_subok, /* bool */ - PyArrayObject **out_wheremask, /* PyArray of bool */ - PyObject **out_axes, /* type: List[Tuple[T]] */ - PyObject **out_axis, /* type: T */ - int *out_keepdims) /* bool */ +convert_ufunc_arguments(PyUFuncObject *ufunc, + ufunc_full_args full_args, PyArrayObject **out_op, + PyObject *order_obj, NPY_ORDER *out_order, + PyObject *casting_obj, NPY_CASTING *out_casting, + PyObject *subok_obj, npy_bool *out_subok, + PyObject *where_obj, PyArrayObject **out_wheremask, /* PyArray of bool */ + PyObject *keepdims_obj, int *out_keepdims) { - int i, nargs; int nin = ufunc->nin; int nout = ufunc->nout; int nop = ufunc->nargs; PyObject *obj; - PyArray_Descr *dtype = NULL; - /* - * Initialize output objects so caller knows when outputs and optional - * arguments are set (also means we can safely XDECREF on failure). - */ - for (i = 0; i < nop; i++) { - out_op[i] = NULL; - } - *out_extobj = NULL; - *out_typetup = NULL; - if (out_axes != NULL) { - *out_axes = NULL; - } - if (out_axis != NULL) { - *out_axis = NULL; - } + if (out_wheremask != NULL) { *out_wheremask = NULL; } - /* Check number of arguments */ - nargs = PyTuple_Size(args); - if ((nargs < nin) || (nargs > nop)) { - PyErr_SetString(PyExc_ValueError, "invalid number of arguments"); - return -1; - } - - /* Get input arguments */ - for (i = 0; i < nin; ++i) { - obj = PyTuple_GET_ITEM(args, i); + /* Convert and fill in input arguments */ + for (int i = 0; i < nin; i++) { + obj = PyTuple_GET_ITEM(full_args.in, i); if (PyArray_Check(obj)) { PyArrayObject *obj_a = (PyArrayObject *)obj; @@ -1080,148 +962,43 @@ get_ufunc_arguments(PyUFuncObject *ufunc, } } - /* Get positional output arguments */ - for (i = nin; i < nargs; ++i) { - obj = PyTuple_GET_ITEM(args, i); - if (_set_out_array(obj, out_op + i) < 0) { - goto fail; + /* Convert and fill in output arguments */ + if (full_args.out != NULL) { + for (int i = 0; i < nout; i++) { + obj = PyTuple_GET_ITEM(full_args.out, i); + if (_set_out_array(obj, out_op + i + nin) < 0) { + goto fail; + } } } /* - * If keywords are present, get keyword output and other arguments. - * Raise an error if anything else is present in the keyword dictionary. + * Convert most arguments manually here, since it is easier to handle + * the ufunc override if we first parse only to objects. */ - if (kwds) { - PyObject *out_kwd = NULL; - PyObject *sig = NULL; - static PyObject *kwnames[13] = {NULL}; - if (kwnames[0] == NULL) { - kwnames[0] = npy_um_str_out; - kwnames[1] = npy_um_str_where; - kwnames[2] = npy_um_str_axes; - kwnames[3] = npy_um_str_axis; - kwnames[4] = npy_um_str_keepdims; - kwnames[5] = npy_um_str_casting; - kwnames[6] = npy_um_str_order; - kwnames[7] = npy_um_str_dtype; - kwnames[8] = npy_um_str_subok; - kwnames[9] = npy_um_str_signature; - kwnames[10] = npy_um_str_sig; - kwnames[11] = npy_um_str_extobj; - kwnames[12] = NULL; /* sentinel */ - } - /* - * Parse using converters to calculate outputs - * (NULL outputs are treated as indicating a keyword is not allowed). - */ - if (parse_ufunc_keywords( - ufunc, kwds, kwnames, - _borrowed_reference, &out_kwd, - _wheremask_converter, out_wheremask, /* new reference */ - _new_reference, out_axes, - _new_reference, out_axis, - _keepdims_converter, out_keepdims, - PyArray_CastingConverter, out_casting, - PyArray_OrderConverter, out_order, - PyArray_DescrConverter2, &dtype, /* new reference */ - _subok_converter, out_subok, - _new_reference, out_typetup, - _borrowed_reference, &sig, - _new_reference, out_extobj) < 0) { - goto fail; - } - /* - * Check that outputs were not passed as positional as well, - * and that they are either None or an array. - */ - if (out_kwd) { /* borrowed reference */ - /* - * Output arrays are generally specified as a tuple of arrays - * and None, but may be a single array or None for ufuncs - * with a single output. - */ - if (nargs > nin) { - PyErr_SetString(PyExc_ValueError, - "cannot specify 'out' as both a " - "positional and keyword argument"); - goto fail; - } - if (PyTuple_CheckExact(out_kwd)) { - if (PyTuple_GET_SIZE(out_kwd) != nout) { - PyErr_SetString(PyExc_ValueError, - "The 'out' tuple must have exactly " - "one entry per ufunc output"); - goto fail; - } - /* 'out' must be a tuple of arrays and Nones */ - for(i = 0; i < nout; ++i) { - PyObject *val = PyTuple_GET_ITEM(out_kwd, i); - if (_set_out_array(val, out_op+nin+i) < 0) { - goto fail; - } - } - } - else if (nout == 1) { - /* Can be an array if it only has one output */ - if (_set_out_array(out_kwd, out_op + nin) < 0) { - goto fail; - } - } - else { - PyErr_SetString(PyExc_TypeError, - nout > 1 ? "'out' must be a tuple of arrays" : - "'out' must be an array or a tuple with " - "a single array"); - goto fail; - } - } - /* - * Check we did not get both axis and axes, or multiple ways - * to define a signature. - */ - if (out_axes != NULL && out_axis != NULL && - *out_axes != NULL && *out_axis != NULL) { - PyErr_SetString(PyExc_TypeError, - "cannot specify both 'axis' and 'axes'"); - goto fail; - } - if (sig) { /* borrowed reference */ - if (*out_typetup != NULL) { - PyErr_SetString(PyExc_ValueError, - "cannot specify both 'sig' and 'signature'"); - goto fail; - } - Py_INCREF(sig); - *out_typetup = sig; - } - if (dtype) { /* new reference */ - if (*out_typetup != NULL) { - PyErr_SetString(PyExc_RuntimeError, - "cannot specify both 'signature' and 'dtype'"); - goto fail; - } - /* Note: "N" uses the reference */ - *out_typetup = Py_BuildValue("(N)", dtype); - } + if (where_obj && !_wheremask_converter(where_obj, out_wheremask)) { + goto fail; + } + if (keepdims_obj && !_keepdims_converter(keepdims_obj, out_keepdims)) { + goto fail; + } + if (casting_obj && !PyArray_CastingConverter(casting_obj, out_casting)) { + goto fail; + } + if (order_obj && !PyArray_OrderConverter(order_obj, out_order)) { + goto fail; + } + if (subok_obj && !_subok_converter(subok_obj, out_subok)) { + goto fail; } return 0; fail: - Py_XDECREF(dtype); - Py_XDECREF(*out_typetup); - Py_XDECREF(*out_extobj); if (out_wheremask != NULL) { - Py_XDECREF(*out_wheremask); - } - if (out_axes != NULL) { - Py_XDECREF(*out_axes); + Py_XSETREF(*out_wheremask, NULL); } - if (out_axis != NULL) { - Py_XDECREF(*out_axis); - } - for (i = 0; i < nop; i++) { - Py_XDECREF(out_op[i]); + for (int i = 0; i < nop; i++) { + Py_XSETREF(out_op[i], NULL); } return -1; } @@ -1882,122 +1659,6 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc, return NpyIter_Deallocate(iter); } -static npy_bool -tuple_all_none(PyObject *tup) { - npy_intp i; - for (i = 0; i < PyTuple_GET_SIZE(tup); ++i) { - if (PyTuple_GET_ITEM(tup, i) != Py_None) { - return NPY_FALSE; - } - } - return NPY_TRUE; -} - -/* - * Convert positional args and the out kwarg into an input and output tuple. - * - * If the output tuple would be all None, return NULL instead. - * - * This duplicates logic in many places, so further refactoring is needed: - * - get_ufunc_arguments - * - PyUFunc_WithOverride - * - normalize___call___args - */ -static int -make_full_arg_tuple( - ufunc_full_args *full_args, - npy_intp nin, npy_intp nout, - PyObject *args, PyObject *kwds) -{ - PyObject *out_kwd = NULL; - npy_intp nargs = PyTuple_GET_SIZE(args); - npy_intp i; - - /* This should have been checked by the caller */ - assert(nin <= nargs && nargs <= nin + nout); - - /* Initialize so we can XDECREF safely */ - full_args->in = NULL; - full_args->out = NULL; - - /* Get the input arguments*/ - full_args->in = PyTuple_GetSlice(args, 0, nin); - if (full_args->in == NULL) { - goto fail; - } - - /* Look for output keyword arguments */ - if (kwds) { - out_kwd = PyDict_GetItemWithError(kwds, npy_um_str_out); - if (out_kwd == NULL && PyErr_Occurred()) { - goto fail; - } - } - else { - out_kwd = NULL; - } - - if (out_kwd != NULL) { - assert(nargs == nin); - if (out_kwd == Py_None) { - return 0; - } - else if (PyTuple_Check(out_kwd)) { - assert(PyTuple_GET_SIZE(out_kwd) == nout); - if (tuple_all_none(out_kwd)) { - return 0; - } - Py_INCREF(out_kwd); - full_args->out = out_kwd; - return 0; - } - else { - /* A single argument x is promoted to (x, None, None ...) */ - full_args->out = PyTuple_New(nout); - if (full_args->out == NULL) { - goto fail; - } - Py_INCREF(out_kwd); - PyTuple_SET_ITEM(full_args->out, 0, out_kwd); - for (i = 1; i < nout; ++i) { - Py_INCREF(Py_None); - PyTuple_SET_ITEM(full_args->out, i, Py_None); - } - return 0; - } - } - - /* No outputs in kwargs; if also none in args, we're done */ - if (nargs == nin) { - return 0; - } - /* copy across positional output arguments, adding trailing Nones */ - full_args->out = PyTuple_New(nout); - if (full_args->out == NULL) { - goto fail; - } - for (i = nin; i < nargs; ++i) { - PyObject *item = PyTuple_GET_ITEM(args, i); - Py_INCREF(item); - PyTuple_SET_ITEM(full_args->out, i - nin, item); - } - for (i = nargs; i < nin + nout; ++i) { - Py_INCREF(Py_None); - PyTuple_SET_ITEM(full_args->out, i - nin, Py_None); - } - - /* don't return a tuple full of None */ - if (tuple_all_none(full_args->out)) { - Py_DECREF(full_args->out); - full_args->out = NULL; - } - return 0; - -fail: - Py_XDECREF(full_args->in); - Py_XDECREF(full_args->out); - return -1; -} /* * Validate that operands have enough dimensions, accounting for @@ -2487,14 +2148,15 @@ _initialize_variable_parts(PyUFuncObject *ufunc, } static int -PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, - PyObject *args, PyObject *kwds, - PyArrayObject **op) +PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, + ufunc_full_args full_args, PyObject *type_tup, PyObject *extobj, + NPY_CASTING casting, NPY_ORDER order, npy_bool subok, + PyObject *axis, PyObject *axes, int keepdims) { int nin, nout; int i, j, idim, nop; const char *ufunc_name; - int retval, subok = 1; + int retval; int needs_api = 0; PyArray_Descr *dtypes[NPY_MAXARGS]; @@ -2531,20 +2193,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, int **remap_axis = NULL; /* The __array_prepare__ function to call for each output */ PyObject *arr_prep[NPY_MAXARGS]; - /* The separated input and output arguments, parsed from args and kwds */ - ufunc_full_args full_args = {NULL, NULL}; - - NPY_ORDER order = NPY_KEEPORDER; - /* Use the default assignment casting rule */ - NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING; - /* other possible keyword arguments */ - PyObject *extobj, *type_tup, *axes, *axis; - int keepdims = -1; - - if (ufunc == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return -1; - } nin = ufunc->nin; nout = ufunc->nout; @@ -2566,18 +2214,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, goto fail; } - NPY_UF_DBG_PRINT("Getting arguments\n"); - - /* - * Get all the arguments. - */ - retval = get_ufunc_arguments(ufunc, args, kwds, - op, &order, &casting, &extobj, - &type_tup, &subok, NULL, &axes, &axis, &keepdims); - if (retval < 0) { - NPY_UF_DBG_PRINT("Failure in getting arguments\n"); - return retval; - } /* * If keepdims was passed in (and thus changed from the initial value * on top), check the gufunc is suitable, i.e., that its inputs share @@ -2637,6 +2273,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, /* Possibly remap axes. */ if (axes != NULL || axis != NULL) { + assert((axes == NULL) || (axis == NULL)); + remap_axis = PyArray_malloc(sizeof(remap_axis[0]) * nop); remap_axis_memory = PyArray_malloc(sizeof(remap_axis_memory[0]) * nop * NPY_MAXDIMS); @@ -2814,15 +2452,11 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, #endif if (subok) { - if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) { - goto fail; - } - /* * Get the appropriate __array_prepare__ function to call * for each output */ - _find_array_prepare(full_args, arr_prep, nin, nout); + _find_array_prepare(full_args, arr_prep, nout); } /* If the loop wants the arrays, provide them */ @@ -3036,12 +2670,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } - Py_XDECREF(type_tup); - Py_XDECREF(extobj); - Py_XDECREF(axes); - Py_XDECREF(axis); - Py_XDECREF(full_args.in); - Py_XDECREF(full_args.out); PyArray_free(remap_axis_memory); PyArray_free(remap_axis); @@ -3054,36 +2682,25 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, PyArray_free(inner_strides); NpyIter_Deallocate(iter); for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - op[i] = NULL; Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } - Py_XDECREF(type_tup); - Py_XDECREF(extobj); - Py_XDECREF(axes); - Py_XDECREF(axis); - Py_XDECREF(full_args.in); - Py_XDECREF(full_args.out); PyArray_free(remap_axis_memory); PyArray_free(remap_axis); return retval; } -/* - * This generic function is called with the ufunc object, the arguments to it, - * and an array of (pointers to) PyArrayObjects which are NULL. - * - * 'op' is an array of at least NPY_MAXARGS PyArrayObject *. - */ + static int -PyUFunc_GenericFunction_int(PyUFuncObject *ufunc, - PyObject *args, PyObject *kwds, PyArrayObject **op) +PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, + ufunc_full_args full_args, PyObject *type_tup, PyObject *extobj, + NPY_CASTING casting, NPY_ORDER order, npy_bool subok, + PyArrayObject *wheremask) { int nin, nout; int i, nop; const char *ufunc_name; - int retval = -1, subok = 1; + int retval = -1; npy_uint32 op_flags[NPY_MAXARGS]; npy_intp default_op_out_flags; @@ -3092,32 +2709,11 @@ PyUFunc_GenericFunction_int(PyUFuncObject *ufunc, /* These parameters come from extobj= or from a TLS global */ int buffersize = 0, errormask = 0; - /* The mask provided in the 'where=' parameter */ - PyArrayObject *wheremask = NULL; - /* The __array_prepare__ function to call for each output */ PyObject *arr_prep[NPY_MAXARGS]; - /* - * This is either args, or args with the out= parameter from - * kwds added appropriately. - */ - ufunc_full_args full_args = {NULL, NULL}; int trivial_loop_ok = 0; - NPY_ORDER order = NPY_KEEPORDER; - /* Use the default assignment casting rule */ - NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING; - PyObject *extobj, *type_tup; - - if (ufunc == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return -1; - } - - if (ufunc->core_enabled) { - return PyUFunc_GeneralizedFunction(ufunc, args, kwds, op); - } nin = ufunc->nin; nout = ufunc->nout; @@ -3133,17 +2729,6 @@ PyUFunc_GenericFunction_int(PyUFuncObject *ufunc, arr_prep[i] = NULL; } - NPY_UF_DBG_PRINT("Getting arguments\n"); - - /* Get all the arguments */ - retval = get_ufunc_arguments(ufunc, args, kwds, - op, &order, &casting, &extobj, - &type_tup, &subok, &wheremask, NULL, NULL, NULL); - if (retval < 0) { - NPY_UF_DBG_PRINT("Failure in getting arguments\n"); - return retval; - } - /* Get the buffersize and errormask */ if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) { retval = -1; @@ -3189,17 +2774,13 @@ PyUFunc_GenericFunction_int(PyUFuncObject *ufunc, #endif if (subok) { - if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) { - goto fail; - } /* * Get the appropriate __array_prepare__ function to call * for each output */ - _find_array_prepare(full_args, arr_prep, nin, nout); + _find_array_prepare(full_args, arr_prep, nout); } - /* Do the ufunc loop */ if (wheremask != NULL) { NPY_UF_DBG_PRINT("Executing fancy inner loop\n"); @@ -3261,11 +2842,6 @@ PyUFunc_GenericFunction_int(PyUFuncObject *ufunc, Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } - Py_XDECREF(type_tup); - Py_XDECREF(extobj); - Py_XDECREF(full_args.in); - Py_XDECREF(full_args.out); - Py_XDECREF(wheremask); NPY_UF_DBG_PRINT("Returning success code 0\n"); @@ -3274,38 +2850,52 @@ PyUFunc_GenericFunction_int(PyUFuncObject *ufunc, fail: NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval); for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - op[i] = NULL; Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } - Py_XDECREF(type_tup); - Py_XDECREF(extobj); - Py_XDECREF(full_args.in); - Py_XDECREF(full_args.out); - Py_XDECREF(wheremask); return retval; } -/*UFUNC_API*/ +/*UFUNC_API + * This generic function is called with the ufunc object, the arguments to it, + * and an array of (pointers to) PyArrayObjects which are NULL. + * + * 'op' is an array of at least NPY_MAXARGS PyArrayObject *. + */ NPY_NO_EXPORT int PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, PyArrayObject **op) { /* NumPy 1.19, 2020-01-24 */ if (DEPRECATE( - "PyUFunc_GenericFunction() C-API function is deprecated " - "and expected to be removed rapidly. If you are using it (i.e. see " - "this warning/error), please notify the NumPy developers. " - "As of now it is expected that any use case is served better by " - "the direct use of `PyObject_Call(ufunc, args, kwargs)`. " - "PyUFunc_GenericFunction function has slightly different " - "untested behaviour.") < 0) { + "PyUFunc_GenericFunction() C-API function is deprecated " + "and expected to be removed rapidly. If you are using it (i.e. see " + "this warning/error), please notify the NumPy developers. " + "As of now it is expected that any use case is served better by " + "the direct use of `PyObject_Call(ufunc, args, kwargs)`. " + "PyUFunc_GenericFunction function has slightly different " + "untested behaviour.") < 0) { + return -1; + } + if (ufunc == NULL) { + PyErr_SetString(PyExc_ValueError, "function not supported"); return -1; } - return PyUFunc_GenericFunction_int(ufunc, args, kwds, op); + if (op == NULL) { + PyErr_SetString(PyExc_ValueError, + "PyUFunc_GenericFunction() op must not be NULL."); + return -1; + } + + PyObject *res = ufunc_generic_call_mps(ufunc, args, kwds, op); + if (res == NULL) { + return -1; + } + assert(res == Py_None); + Py_DECREF(res); + return 0; } @@ -3621,13 +3211,9 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, const char *ufunc_name = ufunc_get_name_cstr(ufunc); /* These parameters come from a TLS global */ int buffersize = 0, errormask = 0; - static PyObject *NoValue = NULL; NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s.reduce\n", ufunc_name); - npy_cache_import("numpy", "_NoValue", &NoValue); - if (NoValue == NULL) return NULL; - ndim = PyArray_NDIM(arr); /* Create an array of flags for reduction */ @@ -3653,7 +3239,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, } /* Get the initial value */ - if (initial == NULL || initial == NoValue) { + if (initial == NULL) { initial = identity; /* @@ -4432,32 +4018,101 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, } +static npy_bool +tuple_all_none(PyObject *tup) { + npy_intp i; + for (i = 0; i < PyTuple_GET_SIZE(tup); ++i) { + if (PyTuple_GET_ITEM(tup, i) != Py_None) { + return NPY_FALSE; + } + } + return NPY_TRUE; +} + + +static int +_set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args) +{ + if (PyTuple_CheckExact(out_obj)) { + if (PyTuple_GET_SIZE(out_obj) != nout) { + // TODO: Was ValueError (and error order changed) + PyErr_SetString(PyExc_TypeError, + "The 'out' tuple must have exactly " + "one entry per ufunc output"); + return -1; + } + if (tuple_all_none(out_obj)) { + return 0; + } + else { + Py_INCREF(out_obj); + full_args->out = out_obj; + } + } + else if (nout == 1) { + if (out_obj == Py_None) { + return 0; + } + /* Can be an array if it only has one output */ + full_args->out = PyTuple_Pack(1, out_obj); + if (full_args->out == NULL) { + return -1; + } + } + else { + PyErr_SetString(PyExc_TypeError, + nout > 1 ? "'out' must be a tuple of arrays" : + "'out' must be an array or a tuple with " + "a single array"); + return -1; + } + return 0; +} + + +/* + * Parser function which replaces np._NoValue with NULL. + */ +static int +_not_NoValue(PyObject *obj, PyObject **out) +{ + static PyObject *NoValue = NULL; + npy_cache_import("numpy", "_NoValue", &NoValue); + if (NoValue == NULL) { + return 0; + } + if (obj == NoValue) { + *out = NULL; + } + else { + *out = obj; + } + return 1; +} + /* * This code handles reduce, reduceat, and accumulate * (accumulate and reduce are special cases of the more general reduceat * but they are handled separately for speed) */ static PyObject * -PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, - PyObject *kwds, int operation) +PyUFunc_GenericReduction(PyUFuncObject *ufunc, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, int operation) { int i, naxes=0, ndim; int axes[NPY_MAXDIMS]; + + ufunc_full_args full_args = {NULL, NULL}; PyObject *axes_in = NULL; PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL; - PyObject *op; - PyObject *obj_ind; + PyObject *op = NULL; PyArrayObject *indices = NULL; PyArray_Descr *otype = NULL; PyArrayObject *out = NULL; int keepdims = 0; PyObject *initial = NULL; - static char *reduce_kwlist[] = { - "array", "axis", "dtype", "out", "keepdims", "initial", "where", NULL}; - static char *accumulate_kwlist[] = { - "array", "axis", "dtype", "out", NULL}; - static char *reduceat_kwlist[] = { - "array", "indices", "axis", "dtype", "out", NULL}; + npy_bool out_is_passed_by_position; + static char *_reduce_type[] = {"reduce", "accumulate", "reduceat", NULL}; @@ -4483,62 +4138,130 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, _reduce_type[operation]); return NULL; } - /* if there is a tuple of 1 for `out` in kwds, unpack it */ - if (kwds != NULL) { - PyObject *out_obj = PyDict_GetItemWithError(kwds, npy_um_str_out); - if (out_obj == NULL && PyErr_Occurred()){ - return NULL; - } - else if (out_obj != NULL && PyTuple_CheckExact(out_obj)) { - if (PyTuple_GET_SIZE(out_obj) != 1) { - PyErr_SetString(PyExc_ValueError, - "The 'out' tuple must have exactly one entry"); - return NULL; - } - out_obj = PyTuple_GET_ITEM(out_obj, 0); - PyDict_SetItem(kwds, npy_um_str_out, out_obj); - } - } + /* + * Perform argument parsing, but start by only extracting. This is + * just to preserve the behaviour that __array_ufunc__ did not perform + * any checks on arguments, and we could change this or change it for + * certain parameters. + */ + PyObject *otype_obj = NULL, *out_obj = NULL, *indices_obj = NULL; + PyObject *keepdims_obj = NULL, *wheremask_obj = NULL; if (operation == UFUNC_REDUCEAT) { - PyArray_Descr *indtype; - indtype = PyArray_DescrFromType(NPY_INTP); - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|OO&O&:reduceat", reduceat_kwlist, - &op, - &obj_ind, - &axes_in, - PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out)) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("reduceat", args, len_args, kwnames, + "array", NULL, &op, + "indices", NULL, &indices_obj, + "|axis", NULL, &axes_in, + "|dtype", NULL, &otype_obj, + "|out", NULL, &out_obj, + NULL, NULL, NULL) < 0) { goto fail; } - indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, - 1, 1, NPY_ARRAY_CARRAY, NULL); - if (indices == NULL) { + /* Prepare inputs for PyUfunc_CheckOverride */ + full_args.in = PyTuple_Pack(2, op, indices_obj); + if (full_args.in == NULL) { goto fail; } + out_is_passed_by_position = len_args >= 5; } else if (operation == UFUNC_ACCUMULATE) { - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&:accumulate", - accumulate_kwlist, - &op, - &axes_in, - PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out)) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("accumulate", args, len_args, kwnames, + "array", NULL, &op, + "|axis", NULL, &axes_in, + "|dtype", NULL, &otype_obj, + "|out", NULL, &out_obj, + NULL, NULL, NULL) < 0) { + goto fail; + } + /* Prepare input for PyUfunc_CheckOverride */ + full_args.in = PyTuple_Pack(1, op); + if (full_args.in == NULL) { goto fail; } + out_is_passed_by_position = len_args >= 4; } else { - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&iOO&:reduce", - reduce_kwlist, - &op, - &axes_in, - PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out, - &keepdims, &initial, - _wheremask_converter, &wheremask)) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("reduce", args, len_args, kwnames, + "array", NULL, &op, + "|axis", NULL, &axes_in, + "|dtype", NULL, &otype_obj, + "|out", NULL, &out_obj, + "|keepdims", NULL, &keepdims_obj, + "|initial", &_not_NoValue, &initial, + "|where", NULL, &wheremask_obj, + NULL, NULL, NULL) < 0) { + goto fail; + } + /* Prepare input for PyUfunc_CheckOverride */ + full_args.in = PyTuple_Pack(1, op); + if (full_args.in == NULL) { + goto fail; + } + out_is_passed_by_position = len_args >= 4; + } + + /* Normalize output for PyUFunc_CheckOverride and conversion. */ + if (out_is_passed_by_position) { + /* in this branch, out is always wrapped in a tuple. */ + if (out_obj != Py_None) { + full_args.out = PyTuple_Pack(1, out_obj); + if (full_args.out == NULL) { + goto fail; + } + } + } + else if (out_obj) { + if (_set_full_args_out(1, out_obj, &full_args) < 0) { + goto fail; + } + /* Ensure that out_obj is the array, not the tuple: */ + if (full_args.out != NULL) { + out_obj = PyTuple_GET_ITEM(full_args.out, 0); + } + } + + /* We now have all the information required to check for Overrides */ + PyObject *override = NULL; + int errval = PyUFunc_CheckOverride(ufunc, _reduce_type[operation], + full_args.in, full_args.out, args, len_args, kwnames, &override); + if (errval) { + return NULL; + } + else if (override) { + Py_XDECREF(full_args.in); + Py_XDECREF(full_args.out); + return override; + } + + /* Finish parsing of all parameters (no matter which reduce-like) */ + if (indices_obj) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + + indices = (PyArrayObject *)PyArray_FromAny(indices_obj, + indtype, 1, 1, NPY_ARRAY_CARRAY, NULL); + if (indices == NULL) { goto fail; } } + if (otype_obj && !PyArray_DescrConverter2(otype_obj, &otype)) { + goto fail; + } + if (out_obj && !PyArray_OutputConverter(out_obj, &out)) { + goto fail; + } + if (keepdims_obj && !PyArray_PythonPyIntFromInt(keepdims_obj, &keepdims)) { + goto fail; + } + if (wheremask_obj && !_wheremask_converter(wheremask_obj, &wheremask)) { + goto fail; + } + /* Ensure input is an array */ mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, NULL); if (mp == NULL) { @@ -4686,6 +4409,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, } Py_DECREF(mp); Py_DECREF(otype); + Py_XDECREF(full_args.in); + Py_XDECREF(full_args.out); if (ret == NULL) { return NULL; @@ -4721,37 +4446,319 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, Py_XDECREF(otype); Py_XDECREF(mp); Py_XDECREF(wheremask); + Py_XDECREF(full_args.in); + Py_XDECREF(full_args.out); return NULL; } +/* + * Sets typetup to a new reference to the passed in dtype information + * tuple or NULL. Returns -1 on failure. + */ +static int +_get_typetup(PyObject *sig_obj, PyObject *signature_obj, PyObject *dtype, + PyObject **out_typetup) +{ + *out_typetup = NULL; + if (signature_obj != NULL) { + Py_INCREF(signature_obj); + *out_typetup = signature_obj; + } + + if (sig_obj != NULL) { + if (*out_typetup != NULL) { + PyErr_SetString(PyExc_TypeError, + "cannot specify both 'sig' and 'signature'"); + Py_SETREF(*out_typetup, NULL); + return -1; + } + Py_INCREF(sig_obj); + *out_typetup = sig_obj; + } + + if (dtype != NULL) { + if (*out_typetup != NULL) { + // TODO: change to typeerreor + PyErr_SetString(PyExc_RuntimeError, + "cannot specify both 'signature' and 'dtype'"); + Py_SETREF(*out_typetup, NULL); + return -1; + } + /* dtype needs to be converted, delay after the override check */ + } + return 0; +} + +/* + * Finish conversion parsing of the type tuple. This is currenlty only + * conversion of the `dtype` argument, but should do more in the future. + * + * TODO: The parsing of the typetup should be moved here (followup cleanup). + */ +static int +_convert_typetup(PyObject *dtype_obj, PyObject **out_typetup) +{ + if (dtype_obj != NULL) { + PyArray_Descr *dtype = NULL; + if (!PyArray_DescrConverter2(dtype_obj, &dtype)) { + return -1; + } + if (dtype == NULL) { + /* dtype=None, no need to set typetup. */ + return 0; + } + *out_typetup = PyTuple_Pack(1, (PyObject *)dtype); + if (*out_typetup == NULL) { + return -1; + } + } + /* sig and signature are not converted here right now. */ + return 0; +} + + +/* + * Make use of fastcall. If Python has tp_vectorcall we can use this + * directly, otherwise we have to unpack it. + */ static PyObject * -ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) +ufunc_generic_fastcall(PyUFuncObject *ufunc, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, + npy_bool outer, PyArrayObject **mps_in) { - int i; - PyArrayObject *mps[NPY_MAXARGS]; + PyArrayObject *mps_buffer[NPY_MAXARGS] = {NULL}; + PyArrayObject **mps; PyObject *retobj[NPY_MAXARGS]; PyObject *wraparr[NPY_MAXARGS]; PyObject *override = NULL; ufunc_full_args full_args = {NULL, NULL}; + PyObject *typetup = NULL; + int errval; + int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs; - errval = PyUFunc_CheckOverride(ufunc, "__call__", args, kwds, &override); - if (errval) { + /* Deprecated PyUfunc_GenericFunction support only */ + if (mps_in != NULL) { + mps = mps_in; + } + else { + mps = mps_buffer; + } + + /* + * Note that the input (and possibly output) arguments are passed in as + * positional arguments. We extract these first and check for `out` + * passed by keyword later. + * Outputs and inputs are stored in `full_args.in` and `full_args.out` + * as tuples (or NULL when no outputs are passed). + */ + + /* Check number of arguments */ + if ((len_args < nin) || (len_args > nop)) { + PyErr_Format(PyExc_TypeError, + "%s() takes from %d to %d positional arguments but " + "%zd were given", + ufunc_get_name_cstr(ufunc) , nin, nop, len_args); return NULL; } - else if (override) { - return override; + + /* Fetch input arguments. */ + full_args.in = PyTuple_New(ufunc->nin); + if (full_args.in == NULL) { + return NULL; + } + for (int i = 0; i < ufunc->nin; i++) { + PyObject *tmp = args[i]; + Py_INCREF(tmp); + PyTuple_SET_ITEM(full_args.in, i, tmp); + } + + /* + * If there are more arguments, they define the out args. Otherwise + * full_args.out is NULL for now, and the `out` kwarg may still be passed. + */ + npy_bool out_is_passed_by_position = NPY_FALSE; + if (len_args > nin) { + npy_bool all_none = NPY_TRUE; + out_is_passed_by_position = NPY_TRUE; + + full_args.out = PyTuple_New(nout); + if (full_args.out == NULL) { + goto fail; + } + for (int i = nin; i < nop; i++) { + PyObject *tmp; + if (i < (int)len_args) { + tmp = args[i]; + if (tmp != Py_None) { + all_none = NPY_FALSE; + } + } + else { + tmp = Py_None; + } + Py_INCREF(tmp); + PyTuple_SET_ITEM(full_args.out, i-nin, tmp); + } + if (all_none) { + Py_SETREF(full_args.out, NULL); + } + } + else { + full_args.out = NULL; + } + + /* + * We have now extracted (but not converted) the input arguments. + * To simplify overrides, extract all other arguments (as objects only) + */ + PyObject *out_obj = NULL, *where_obj = NULL; + PyObject *axes_obj = NULL, *axis_obj = NULL; + PyObject *keepdims_obj = NULL, *casting_obj = NULL, *order_obj = NULL; + PyObject *subok_obj = NULL, *signature_obj = NULL, *sig_obj = NULL; + PyObject *dtype_obj = NULL, *extobj = NULL; + + /* Skip parsing if there are no keyword arguments, nothing left to do */ + if (kwnames != NULL) { + if (!ufunc->core_enabled) { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames, + "$out", NULL, &out_obj, + "$where", NULL, &where_obj, + "$casting", NULL, &casting_obj, + "$order", NULL, &order_obj, + "$subok", NULL, &subok_obj, + "$dtype", NULL, &dtype_obj, + "$signature", NULL, &signature_obj, + "$sig", NULL, &sig_obj, + "$extobj", NULL, &extobj, + NULL, NULL, NULL) < 0) { + goto fail; + } + } + else { + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames, + "$out", NULL, &out_obj, + "$axes", NULL, &axes_obj, + "$axis", NULL, &axis_obj, + "$keepdims", NULL, &keepdims_obj, + "$casting", NULL, &casting_obj, + "$order", NULL, &order_obj, + "$subok", NULL, &subok_obj, + "$dtype", NULL, &dtype_obj, + "$signature", NULL, &signature_obj, + "$sig", NULL, &sig_obj, + "$extobj", NULL, &extobj, + NULL, NULL, NULL) < 0) { + goto fail; + } + if (NPY_UNLIKELY((axes_obj != NULL) && (axis_obj != NULL))) { + PyErr_SetString(PyExc_TypeError, + "cannot specify both 'axis' and 'axes'"); + goto fail; + } + } + + /* Handle `out` arguments passed by keyword */ + if (out_obj != NULL) { + if (out_is_passed_by_position) { + // TODO: Was ValueError (and error order changed) + PyErr_SetString(PyExc_TypeError, + "cannot specify 'out' as both a " + "positional and keyword argument"); + goto fail; + } + if (_set_full_args_out(nout, out_obj, &full_args) < 0) { + goto fail; + } + } + + /* Only one of signature, sig, and dtype should be passed */ + if (_get_typetup(sig_obj, signature_obj, dtype_obj, &typetup) < 0) { + goto fail; + } + } + + char *method; + if (!outer) { + method = "__call__"; + } + else { + method = "outer"; + } + /* We now have all the information required to check for Overrides */ + if (mps_in == NULL) { + /* Deprecated PyUfunc_GenericFunction path does not use overrides */ + errval = PyUFunc_CheckOverride(ufunc, method, + full_args.in, full_args.out, + args, len_args, kwnames, &override); + if (errval) { + goto fail; + } + else if (override) { + Py_XDECREF(typetup); + Py_DECREF(full_args.in); + Py_XDECREF(full_args.out); + return override; + } + } + + if (outer) { + /* Outer uses special preparation of inputs (expand dims) */ + PyObject *new_in = prepare_input_arguments_for_outer(full_args.in, ufunc); + if (new_in == NULL) { + goto fail; + } + Py_SETREF(full_args.in, new_in); + } + + /* Finish argument parsing/converting for the dtype and all others */ + if (_convert_typetup(dtype_obj, &typetup) < 0) { + goto fail; + } + + NPY_ORDER order = NPY_KEEPORDER; + NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING; + npy_bool subok = NPY_TRUE; + int keepdims = -1; /* We need to know if it was passed */ + PyArrayObject *wheremask = NULL; + if (convert_ufunc_arguments(ufunc, full_args, mps, + order_obj, &order, + casting_obj, &casting, + subok_obj, &subok, + where_obj, &wheremask, + keepdims_obj, &keepdims) < 0) { + goto fail; + } + + if (!ufunc->core_enabled) { + errval = PyUFunc_GenericFunctionInternal(ufunc, mps, + full_args, typetup, extobj, casting, order, subok, + wheremask); + Py_XDECREF(wheremask); + } + else { + errval = PyUFunc_GeneralizedFunctionInternal(ufunc, mps, + full_args, typetup, extobj, casting, order, subok, + axis_obj, axes_obj, keepdims); } - errval = PyUFunc_GenericFunction_int(ufunc, args, kwds, mps); if (errval < 0) { - return NULL; + goto fail; + } + + if (mps_in != NULL) { + /* Deprecated PyUfunc_GenericFunction path does not wrap. */ + Py_RETURN_NONE; } /* Free the input references */ - for (i = 0; i < ufunc->nin; i++) { - Py_XDECREF(mps[i]); + for (int i = 0; i < ufunc->nin; i++) { + Py_XSETREF(mps[i], NULL); } /* @@ -4771,15 +4778,10 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) * None --- array-object passed in don't call PyArray_Return * method --- the __array_wrap__ method to call. */ - if (make_full_arg_tuple(&full_args, ufunc->nin, ufunc->nout, args, kwds) < 0) { - goto fail; - } - if (_find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout) < 0) { - goto fail; - } + _find_array_wrap(full_args, subok, wraparr, ufunc->nin, ufunc->nout); /* wrap outputs */ - for (i = 0; i < ufunc->nout; i++) { + for (int i = 0; i < ufunc->nout; i++) { int j = ufunc->nin+i; _ufunc_context context; PyObject *wrapped; @@ -4791,7 +4793,7 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) wrapped = _apply_array_wrap(wraparr[i], mps[j], &context); mps[j] = NULL; /* Prevent fail double-freeing this */ if (wrapped == NULL) { - for (j = 0; j < i; j++) { + for (int j = 0; j < i; j++) { Py_DECREF(retobj[j]); } goto fail; @@ -4800,6 +4802,7 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) retobj[i] = wrapped; } + Py_XDECREF(typetup); Py_XDECREF(full_args.in); Py_XDECREF(full_args.out); @@ -4810,21 +4813,111 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) PyTupleObject *ret; ret = (PyTupleObject *)PyTuple_New(ufunc->nout); - for (i = 0; i < ufunc->nout; i++) { + for (int i = 0; i < ufunc->nout; i++) { PyTuple_SET_ITEM(ret, i, retobj[i]); } return (PyObject *)ret; } fail: + Py_XDECREF(typetup); Py_XDECREF(full_args.in); Py_XDECREF(full_args.out); - for (i = ufunc->nin; i < ufunc->nargs; i++) { + for (int i = 0; i < ufunc->nargs; i++) { Py_XDECREF(mps[i]); } return NULL; } + +/* + * TODO: The implementation below can be replaced with PyVectorcall_Call + * when available (should be Python 3.8+). + */ +static PyObject * +ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, + PyArrayObject **mps_in) +{ + Py_ssize_t len_args = PyTuple_GET_SIZE(args); + /* + * Wrapper for tp_call to tp_fastcall, to support both on older versions + * of Python. (and generally simplifying support of both versions in the + * same codebase. + */ + if (kwds == NULL) { + return ufunc_generic_fastcall(ufunc, + PySequence_Fast_ITEMS(args), len_args, NULL, NPY_FALSE, mps_in); + } + + PyObject *new_args[NPY_MAXARGS]; + Py_ssize_t len_kwds = PyDict_Size(kwds); + + if (NPY_UNLIKELY(len_args + len_kwds > NPY_MAXARGS)) { + /* + * We do not have enough scratch-space, so we have to abort; + * In practice this error should not be seen by users. + */ + // TODO: This was a ValueError (but TypeError is more correct) + PyErr_Format(PyExc_ValueError, + "%s() takes from %d to %d positional arguments but " + "%zd were given", + ufunc_get_name_cstr(ufunc) , ufunc->nin, ufunc->nargs, len_args); + return NULL; + } + + /* Copy args into the scratch space */ + for (Py_ssize_t i = 0; i < len_args; i++) { + new_args[i] = PyTuple_GET_ITEM(args, i); + } + + PyObject *kwnames = PyTuple_New(len_kwds); + + PyObject *key, *value; + Py_ssize_t pos = 0; + Py_ssize_t i = 0; + // TODO: do compilers optimize this all the way? + while (PyDict_Next(kwds, &pos, &key, &value)) { + Py_INCREF(key); + PyTuple_SET_ITEM(kwnames, i, key); + new_args[i + len_args] = value; + i++; + } + + PyObject *res = ufunc_generic_fastcall(ufunc, + new_args, len_args, kwnames, NPY_FALSE, mps_in); + Py_DECREF(kwnames); + return res; +} + + +static PyObject * +ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) +{ + return ufunc_generic_call_mps(ufunc, args, kwds, NULL); +} + + +#if PY_VERSION_HEX >= 0x03080000 +/* + * Implement vectorcallfunc which should be defined with Python 3.8+. + * In principle this could be backported, but the speed gain seems moderate + * since ufunc calls often do not have keyword arguments and always have + * a large overhead. The only user would potentially be cython probably. + */ +static PyObject * +ufunc_generic_vectorcall(PyObject *ufunc, + PyObject *const *args, size_t len_args, PyObject *kwnames) +{ + /* + * Unlike METH_FASTCALL, `len_args` may have a flag to signal that + * args[-1] may be (temporarily) used. So normalize it here. + */ + return ufunc_generic_fastcall((PyUFuncObject *)ufunc, + args, PyVectorcall_NARGS(len_args), kwnames, NPY_FALSE, NULL); +} +#endif /* PY_VERSION_HEX >= 0x03080000 */ + + NPY_NO_EXPORT PyObject * ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args) { @@ -4857,6 +4950,7 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args) return res; } + NPY_NO_EXPORT PyObject * ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) { @@ -4998,7 +5092,11 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi ufunc->core_dim_flags = NULL; ufunc->userloops = NULL; ufunc->ptr = NULL; +#if PY_VERSION_HEX >= 0x03080000 + ufunc->vectorcall = &ufunc_generic_vectorcall; +#else ufunc->reserved2 = NULL; +#endif ufunc->reserved1 = 0; ufunc->iter_flags = 0; @@ -5437,24 +5535,9 @@ ufunc_traverse(PyUFuncObject *self, visitproc visit, void *arg) * The result has dimensions a.ndim + b.ndim */ static PyObject * -ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) +ufunc_outer(PyUFuncObject *ufunc, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - int errval; - PyObject *override = NULL; - PyObject *ret; - PyArrayObject *ap1 = NULL, *ap2 = NULL, *ap_new = NULL; - PyObject *new_args, *tmp; - static PyObject *_numpy_matrix; - - - errval = PyUFunc_CheckOverride(ufunc, "outer", args, kwds, &override); - if (errval) { - return NULL; - } - else if (override) { - return override; - } - if (ufunc->core_enabled) { PyErr_Format(PyExc_TypeError, "method outer is not allowed in ufunc with non-trivial"\ @@ -5469,20 +5552,22 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) return NULL; } - if (PySequence_Length(args) != 2) { + if (len_args != 2) { PyErr_SetString(PyExc_TypeError, "exactly two arguments expected"); return NULL; } - tmp = PySequence_GetItem(args, 0); - if (tmp == NULL) { - return NULL; - } + return ufunc_generic_fastcall(ufunc, args, len_args, kwnames, NPY_TRUE, NULL); +} - npy_cache_import( - "numpy", - "matrix", - &_numpy_matrix); + +static PyObject * +prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) +{ + PyArrayObject *ap1 = NULL; + PyObject *tmp; + static PyObject *_numpy_matrix; + npy_cache_import("numpy", "matrix", &_numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " @@ -5491,6 +5576,8 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) "array to retain the old behaviour. You can use `matrix.A` " "to achieve this."); + tmp = PyTuple_GET_ITEM(args, 0); + if (PyObject_IsInstance(tmp, _numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, @@ -5503,14 +5590,12 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) else { ap1 = (PyArrayObject *) PyArray_FROM_O(tmp); } - Py_DECREF(tmp); if (ap1 == NULL) { return NULL; } - tmp = PySequence_GetItem(args, 1); - if (tmp == NULL) { - return NULL; - } + + PyArrayObject *ap2 = NULL; + tmp = PyTuple_GET_ITEM(args, 1); if (PyObject_IsInstance(tmp, _numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, @@ -5524,7 +5609,6 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) else { ap2 = (PyArrayObject *) PyArray_FROM_O(tmp); } - Py_DECREF(tmp); if (ap2 == NULL) { Py_DECREF(ap1); return NULL; @@ -5550,6 +5634,7 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) newshape[i] = 1; } + PyArrayObject *ap_new; ap_new = (PyArrayObject *)PyArray_Newshape(ap1, &newdims, NPY_CORDER); if (ap_new == NULL) { goto fail; @@ -5568,68 +5653,38 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) goto fail; } - new_args = Py_BuildValue("(OO)", ap_new, ap2); Py_DECREF(ap1); - Py_DECREF(ap2); - Py_DECREF(ap_new); - ret = ufunc_generic_call(ufunc, new_args, kwds); - Py_DECREF(new_args); - return ret; + return Py_BuildValue("(NN)", ap_new, ap2); fail: Py_XDECREF(ap1); Py_XDECREF(ap2); - Py_XDECREF(ap_new); return NULL; } static PyObject * -ufunc_reduce(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) +ufunc_reduce(PyUFuncObject *ufunc, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - int errval; - PyObject *override = NULL; - - errval = PyUFunc_CheckOverride(ufunc, "reduce", args, kwds, &override); - if (errval) { - return NULL; - } - else if (override) { - return override; - } - return PyUFunc_GenericReduction(ufunc, args, kwds, UFUNC_REDUCE); + return PyUFunc_GenericReduction( + ufunc, args, len_args, kwnames, UFUNC_REDUCE); } static PyObject * -ufunc_accumulate(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) +ufunc_accumulate(PyUFuncObject *ufunc, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - int errval; - PyObject *override = NULL; - - errval = PyUFunc_CheckOverride(ufunc, "accumulate", args, kwds, &override); - if (errval) { - return NULL; - } - else if (override) { - return override; - } - return PyUFunc_GenericReduction(ufunc, args, kwds, UFUNC_ACCUMULATE); + return PyUFunc_GenericReduction( + ufunc, args, len_args, kwnames, UFUNC_ACCUMULATE); } static PyObject * -ufunc_reduceat(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) +ufunc_reduceat(PyUFuncObject *ufunc, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - int errval; - PyObject *override = NULL; - - errval = PyUFunc_CheckOverride(ufunc, "reduceat", args, kwds, &override); - if (errval) { - return NULL; - } - else if (override) { - return override; - } - return PyUFunc_GenericReduction(ufunc, args, kwds, UFUNC_REDUCEAT); + return PyUFunc_GenericReduction( + ufunc, args, len_args, kwnames, UFUNC_REDUCEAT); } /* Helper for ufunc_at, below */ @@ -5686,14 +5741,6 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) char * err_msg = NULL; NPY_BEGIN_THREADS_DEF; - errval = PyUFunc_CheckOverride(ufunc, "at", args, NULL, &override); - if (errval) { - return NULL; - } - else if (override) { - return override; - } - if (ufunc->nin > 2) { PyErr_SetString(PyExc_ValueError, "Only unary and binary ufuncs supported at this time"); @@ -5715,6 +5762,15 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) "second operand needed for ufunc"); return NULL; } + errval = PyUFunc_CheckOverride(ufunc, "at", + args, NULL, NULL, 0, NULL, &override); + + if (errval) { + return NULL; + } + else if (override) { + return override; + } if (!PyArray_Check(op1)) { PyErr_SetString(PyExc_TypeError, @@ -5967,16 +6023,16 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) static struct PyMethodDef ufunc_methods[] = { {"reduce", (PyCFunction)ufunc_reduce, - METH_VARARGS | METH_KEYWORDS, NULL }, + METH_FASTCALL | METH_KEYWORDS, NULL }, {"accumulate", (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS, NULL }, + METH_FASTCALL | METH_KEYWORDS, NULL }, {"reduceat", (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS, NULL }, + METH_FASTCALL | METH_KEYWORDS, NULL }, {"outer", (PyCFunction)ufunc_outer, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"at", (PyCFunction)ufunc_at, METH_VARARGS, NULL}, @@ -6163,10 +6219,17 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { .tp_repr = (reprfunc)ufunc_repr, .tp_call = (ternaryfunc)ufunc_generic_call, .tp_str = (reprfunc)ufunc_repr, - .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, + .tp_flags = Py_TPFLAGS_DEFAULT | +#if PY_VERSION_HEX >= 0x03080000 + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif + Py_TPFLAGS_HAVE_GC, .tp_traverse = (traverseproc)ufunc_traverse, .tp_methods = ufunc_methods, .tp_getset = ufunc_getset, +#if PY_VERSION_HEX >= 0x03080000 + .tp_vectorcall_offset = offsetof(PyUFuncObject, vectorcall), +#endif }; /* End of code for ufunc objects */ diff --git a/numpy/core/src/umath/ufunc_object.h b/numpy/core/src/umath/ufunc_object.h index f5de9f9b79d1..6d4fed7c02d2 100644 --- a/numpy/core/src/umath/ufunc_object.h +++ b/numpy/core/src/umath/ufunc_object.h @@ -13,22 +13,8 @@ NPY_NO_EXPORT const char* ufunc_get_name_cstr(PyUFuncObject *ufunc); /* strings from umathmodule.c that are interned on umath import */ -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_out; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_where; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_axes; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_axis; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_keepdims; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_casting; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_order; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_dtype; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_subok; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_signature; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_sig; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_extobj; NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_prepare; NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_wrap; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_finalize; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_ufunc; NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_pyvals_name; #endif diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 474db02453b5..b4b7db760c64 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -216,44 +216,16 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) ***************************************************************************** */ -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_out = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_where = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_axes = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_axis = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_keepdims = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_casting = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_order = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_dtype = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_subok = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_signature = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_sig = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_extobj = NULL; NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_prepare = NULL; NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_wrap = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_finalize = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_ufunc = NULL; NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_pyvals_name = NULL; /* intern some strings used in ufuncs, returns 0 on success */ static int intern_strings(void) { - if (!(npy_um_str_out = PyUnicode_InternFromString("out"))) return -1; - if (!(npy_um_str_where = PyUnicode_InternFromString("where"))) return -1; - if (!(npy_um_str_axes = PyUnicode_InternFromString("axes"))) return -1; - if (!(npy_um_str_axis = PyUnicode_InternFromString("axis"))) return -1; - if (!(npy_um_str_keepdims = PyUnicode_InternFromString("keepdims"))) return -1; - if (!(npy_um_str_casting = PyUnicode_InternFromString("casting"))) return -1; - if (!(npy_um_str_order = PyUnicode_InternFromString("order"))) return -1; - if (!(npy_um_str_dtype = PyUnicode_InternFromString("dtype"))) return -1; - if (!(npy_um_str_subok = PyUnicode_InternFromString("subok"))) return -1; - if (!(npy_um_str_signature = PyUnicode_InternFromString("signature"))) return -1; - if (!(npy_um_str_sig = PyUnicode_InternFromString("sig"))) return -1; - if (!(npy_um_str_extobj = PyUnicode_InternFromString("extobj"))) return -1; if (!(npy_um_str_array_prepare = PyUnicode_InternFromString("__array_prepare__"))) return -1; if (!(npy_um_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"))) return -1; - if (!(npy_um_str_array_finalize = PyUnicode_InternFromString("__array_finalize__"))) return -1; - if (!(npy_um_str_ufunc = PyUnicode_InternFromString("__array_ufunc__"))) return -1; if (!(npy_um_str_pyvals_name = PyUnicode_InternFromString(UFUNC_PYVALS_NAME))) return -1; return 0; } diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 7656b4d0a385..c2358f785946 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3746,7 +3746,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kw): # Out argument must be tuple, since there are multiple outputs np.modf(dummy, out=a) - assert_raises(ValueError, np.modf, dummy, out=(a,)) + assert_raises(TypeError, np.modf, dummy, out=(a,)) # 2 inputs, 1 output assert_equal(np.add(a, dummy), 0) diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index aa17d6b089a7..e9b9890c234f 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -34,7 +34,7 @@ def test_kwarg_exact(self): assert_raises(TypeError, np.add, 1, 2, wherex=[True]) def test_sig_signature(self): - assert_raises(ValueError, np.add, 1, 2, sig='ii->i', + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', signature='ii->i') def test_sig_dtype(self): diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 556856faf32e..0844ddf05cb9 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -99,11 +99,11 @@ def test_out_subok(self): # Out argument must be tuple, since there are multiple outputs. r1, r2 = np.frexp(d, out=o1, subok=subok) - assert_raises(ValueError, np.add, a, 2, o, o, subok=subok) - assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) + assert_raises(TypeError, np.add, a, 2, o, o, subok=subok) + assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok) + assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok) + assert_raises(TypeError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(TypeError, np.add, a, 2, out=(), subok=subok) assert_raises(TypeError, np.add, a, 2, [], subok=subok) assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) @@ -2364,12 +2364,14 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # __call__ a = A() - res = np.multiply.__call__(1, a, foo='bar', answer=42) + with assert_raises(TypeError): + np.multiply.__call__(1, a, foo='bar', answer=42) + res = np.multiply.__call__(1, a, subok='bar', where=42) assert_equal(res[0], a) assert_equal(res[1], np.multiply) assert_equal(res[2], '__call__') assert_equal(res[3], (1, a)) - assert_equal(res[4], {'foo': 'bar', 'answer': 42}) + assert_equal(res[4], {'subok': 'bar', 'where': 42}) # __call__, wrong args assert_raises(TypeError, np.multiply, a) @@ -2425,8 +2427,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): 'initial': None, 'where': True}) # reduce, wrong args - assert_raises(ValueError, np.multiply.reduce, a, out=()) - assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, out=()) + assert_raises(TypeError, np.multiply.reduce, a, out=('out0', 'out1')) assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') # accumulate, pos args @@ -2459,8 +2461,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[4], {'axis': None, 'dtype': None}) # accumulate, wrong args - assert_raises(ValueError, np.multiply.accumulate, a, out=()) - assert_raises(ValueError, np.multiply.accumulate, a, + assert_raises(TypeError, np.multiply.accumulate, a, out=()) + assert_raises(TypeError, np.multiply.accumulate, a, out=('out0', 'out1')) assert_raises(TypeError, np.multiply.accumulate, a, 'axis0', axis='axis0') @@ -2495,8 +2497,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[4], {'axis': None, 'dtype': None}) # reduceat, wrong args - assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) - assert_raises(ValueError, np.multiply.reduce, a, [4, 2], + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], out=('out0', 'out1')) assert_raises(TypeError, np.multiply.reduce, a, [4, 2], 'axis0', axis='axis0') @@ -2574,12 +2576,12 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # wrong number of arguments in the tuple is an error too. assert_raises(TypeError, np.multiply, a, b, 'one', out='two') assert_raises(TypeError, np.multiply, a, b, 'one', 'two') - assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) - assert_raises(ValueError, np.multiply, a, out=()) + assert_raises(TypeError, np.multiply, a, b, out=('one', 'two')) + assert_raises(TypeError, np.multiply, a, out=()) assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') - assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) - assert_raises(ValueError, np.modf, a, out=('one',)) + assert_raises(TypeError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(TypeError, np.modf, a, out=('one',)) def test_ufunc_override_exception(self): @@ -2666,8 +2668,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_raises(TypeError, inner1d, a, out='two') assert_raises(TypeError, inner1d, a, a, 'one', out='two') assert_raises(TypeError, inner1d, a, a, 'one', 'two') - assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) - assert_raises(ValueError, inner1d, a, a, out=()) + assert_raises(TypeError, inner1d, a, a, out=('one', 'two')) + assert_raises(TypeError, inner1d, a, a, out=()) def test_ufunc_override_with_super(self): # NOTE: this class is used in doc/source/user/basics.subclassing.rst From ac100c80b3c521bc1d8b3c3539ce989d22602eda Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Mar 2021 16:13:19 -0500 Subject: [PATCH 0812/1270] DOC: Fixup comments based on review and remove TODO The TypeError TODO is also reflected in the tests I think, the other TODO seems pretty unnecessary (most users will go through `tp_vectorcall` anyway, so micro-optimizing that is not all that relevant). --- numpy/core/include/numpy/ufuncobject.h | 2 +- numpy/core/src/umath/override.c | 2 +- numpy/core/src/umath/ufunc_object.c | 14 +++++++++----- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h index a20d827e34f2..333a326ee60e 100644 --- a/numpy/core/include/numpy/ufuncobject.h +++ b/numpy/core/include/numpy/ufuncobject.h @@ -41,7 +41,7 @@ struct _tagPyUFuncObject; * * For backwards compatibility, the regular type resolution function does not * support auxiliary data with object semantics. The type resolution call - * which returns a masked generic function returns 5015a standard NpyAuxData + * which returns a masked generic function returns a standard NpyAuxData * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros * work. * diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index f02e03933eef..cbe280e82d2d 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -143,7 +143,7 @@ static int normalize_signature_keyword(PyObject *normal_kwds) { /* - * If the keywords include sign rename to signature. An error + * If the keywords include `sig` rename to `signature`. An error * will have been raised if both were given. */ PyObject* obj = _PyDict_GetItemStringWithError(normal_kwds, "sig"); diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 98e97a112a59..8b79eab9b23a 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4519,8 +4519,13 @@ _convert_typetup(PyObject *dtype_obj, PyObject **out_typetup) /* - * Make use of fastcall. If Python has tp_vectorcall we can use this - * directly, otherwise we have to unpack it. + * Main ufunc call implementation. + * + * This implementation makes use of the "fastcall" way of passing keyword + * arguments and is called directly from `ufunc_generic_vectorcall` when + * Python has `tp_vectorcall` (Python 3.8+). + * If `tp_vectorcall` is not available, the dictionary `kwargs` are unpacked in + * `ufunc_generic_call`/`ufunc_generic_call_mps` with fairly little overhead. */ static PyObject * ufunc_generic_fastcall(PyUFuncObject *ufunc, @@ -4832,7 +4837,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* * TODO: The implementation below can be replaced with PyVectorcall_Call - * when available (should be Python 3.8+). + * when available (should be Python 3.8+). And removed entirely if + * `PyUFunc_GenericFunction` is disabled as well. */ static PyObject * ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, @@ -4857,7 +4863,6 @@ ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, * We do not have enough scratch-space, so we have to abort; * In practice this error should not be seen by users. */ - // TODO: This was a ValueError (but TypeError is more correct) PyErr_Format(PyExc_ValueError, "%s() takes from %d to %d positional arguments but " "%zd were given", @@ -4875,7 +4880,6 @@ ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, PyObject *key, *value; Py_ssize_t pos = 0; Py_ssize_t i = 0; - // TODO: do compilers optimize this all the way? while (PyDict_Next(kwds, &pos, &key, &value)) { Py_INCREF(key); PyTuple_SET_ITEM(kwnames, i, key); From 3731478f3ea3d4f6031c545667ddb900bd3b6918 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 19 Mar 2021 14:23:49 -0500 Subject: [PATCH 0813/1270] DOC: Fixup comment style for now --- numpy/core/src/umath/ufunc_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 8b79eab9b23a..0e3ad2fe3128 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4479,7 +4479,7 @@ _get_typetup(PyObject *sig_obj, PyObject *signature_obj, PyObject *dtype, if (dtype != NULL) { if (*out_typetup != NULL) { - // TODO: change to typeerreor + /* TODO: This should be changed to a TypeError */ PyErr_SetString(PyExc_RuntimeError, "cannot specify both 'signature' and 'dtype'"); Py_SETREF(*out_typetup, NULL); From b33cf6606a31981adfbc872363969627982bdf6a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 22 Mar 2021 18:00:11 -0500 Subject: [PATCH 0814/1270] MAINT: Address comments by Marten This fixes the review comments by Marten, mainly small cleanups. The largest change is that it is now back to a ValueError if the out tuple has the wrong length. Co-Authored-By: Marten van Kerkwijk --- numpy/core/src/umath/override.c | 22 +++-- numpy/core/src/umath/ufunc_object.c | 123 +++++++++++++--------------- numpy/core/tests/test_multiarray.py | 2 +- numpy/core/tests/test_ufunc.py | 4 +- numpy/core/tests/test_umath.py | 26 +++--- 5 files changed, 83 insertions(+), 94 deletions(-) diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index cbe280e82d2d..84d39567d3ed 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -1,7 +1,6 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define NO_IMPORT_ARRAY -#include "npy_argparse.h" #include "npy_pycompat.h" #include "numpy/ufuncobject.h" #include "npy_import.h" @@ -29,7 +28,7 @@ get_array_ufunc_overrides(PyObject *in_args, PyObject *out_args, { int i; int num_override_args = 0; - int narg, nout = 0; + int narg, nout; narg = (int)PyTuple_GET_SIZE(in_args); /* It is valid for out_args to be NULL: */ @@ -136,16 +135,15 @@ initialize_normal_kwds(PyObject *out_args, } /* - * ufunc() and ufunc.outer() accept 'sig' or 'signature'; - * normalize to 'signature' + * ufunc() and ufunc.outer() accept 'sig' or 'signature'. We guarantee + * that it is passed as 'signature' by renaming 'sig' if present. + * Note that we have already validated that only one of them was passed + * before checking for checking for overrides. */ static int normalize_signature_keyword(PyObject *normal_kwds) { - /* - * If the keywords include `sig` rename to `signature`. An error - * will have been raised if both were given. - */ + /* If the keywords include `sig` rename to `signature`. */ PyObject* obj = _PyDict_GetItemStringWithError(normal_kwds, "sig"); if (obj == NULL && PyErr_Occurred()) { return -1; @@ -240,8 +238,8 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, } /* - * Normalize ufunc arguments, note that args does not hold any positional - * arguments. (len_args is 0) + * Normalize ufunc arguments, note that any input and output arguments + * have already been stored in `in_args` and `out_args`. */ normal_kwds = PyDict_New(); if (normal_kwds == NULL) { @@ -255,8 +253,8 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, /* * Reduce-like methods can pass keyword arguments also by position, * in which case the additional positional arguments have to be copied - * into the keyword argument dictionary. The __call__ method has to - * normalize sig and signature away. + * into the keyword argument dictionary. The `__call__` and `__outer__` + * method has to normalize sig and signature. */ /* ufunc.__call__ */ diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 0e3ad2fe3128..57788450319d 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -97,8 +97,9 @@ _get_wrap_prepare_args(ufunc_full_args full_args) { /* ---------------------------------------------------------------- */ static PyObject * -ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, - PyArrayObject **mps_in); +ufunc_generic_call_with_operands( + PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, + PyArrayObject **operands_in); static PyObject * prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc); @@ -904,27 +905,14 @@ _wheremask_converter(PyObject *obj, PyArrayObject **wheremask) } } -NPY_NO_EXPORT int -_new_reference(PyObject *obj, PyObject **out) -{ - Py_INCREF(obj); - *out = obj; - return NPY_SUCCEED; -} - -NPY_NO_EXPORT int -_borrowed_reference(PyObject *obj, PyObject **out) -{ - *out = obj; - return NPY_SUCCEED; -} /* * Due to the array override, do the actual parameter conversion * only in this step. This function takes the reference objects and * parses them into the desired values. * This function cleans up after itself and NULLs references on error, - * the caller has to ensure that out_op[0:nargs] is NULLed. + * however, the caller has to ensure that `out_op[0:nargs]` and `out_whermeask` + * are NULL initialized. */ static int convert_ufunc_arguments(PyUFuncObject *ufunc, @@ -940,10 +928,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, int nop = ufunc->nargs; PyObject *obj; - if (out_wheremask != NULL) { - *out_wheremask = NULL; - } - /* Convert and fill in input arguments */ for (int i = 0; i < nin; i++) { obj = PyTuple_GET_ITEM(full_args.in, i); @@ -2273,7 +2257,7 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, /* Possibly remap axes. */ if (axes != NULL || axis != NULL) { - assert((axes == NULL) || (axis == NULL)); + assert(!(axes != NULL && axis != NULL)); remap_axis = PyArray_malloc(sizeof(remap_axis[0]) * nop); remap_axis_memory = PyArray_malloc(sizeof(remap_axis_memory[0]) * @@ -2889,7 +2873,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc, return -1; } - PyObject *res = ufunc_generic_call_mps(ufunc, args, kwds, op); + PyObject *res = ufunc_generic_call_with_operands(ufunc, args, kwds, op); if (res == NULL) { return -1; } @@ -4035,8 +4019,7 @@ _set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args) { if (PyTuple_CheckExact(out_obj)) { if (PyTuple_GET_SIZE(out_obj) != nout) { - // TODO: Was ValueError (and error order changed) - PyErr_SetString(PyExc_TypeError, + PyErr_SetString(PyExc_ValueError, "The 'out' tuple must have exactly " "one entry per ufunc output"); return -1; @@ -4071,7 +4054,8 @@ _set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args) /* - * Parser function which replaces np._NoValue with NULL. + * Convert function which replaces np._NoValue with NULL. + * As a converter returns 0 on error and 1 on success. */ static int _not_NoValue(PyObject *obj, PyObject **out) @@ -4103,7 +4087,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, int axes[NPY_MAXDIMS]; ufunc_full_args full_args = {NULL, NULL}; - PyObject *axes_in = NULL; + PyObject *axes_obj = NULL; PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL; PyObject *op = NULL; PyArrayObject *indices = NULL; @@ -4153,7 +4137,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (npy_parse_arguments("reduceat", args, len_args, kwnames, "array", NULL, &op, "indices", NULL, &indices_obj, - "|axis", NULL, &axes_in, + "|axis", NULL, &axes_obj, "|dtype", NULL, &otype_obj, "|out", NULL, &out_obj, NULL, NULL, NULL) < 0) { @@ -4171,7 +4155,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (npy_parse_arguments("accumulate", args, len_args, kwnames, "array", NULL, &op, - "|axis", NULL, &axes_in, + "|axis", NULL, &axes_obj, "|dtype", NULL, &otype_obj, "|out", NULL, &out_obj, NULL, NULL, NULL) < 0) { @@ -4189,7 +4173,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (npy_parse_arguments("reduce", args, len_args, kwnames, "array", NULL, &op, - "|axis", NULL, &axes_in, + "|axis", NULL, &axes_obj, "|dtype", NULL, &otype_obj, "|out", NULL, &out_obj, "|keepdims", NULL, &keepdims_obj, @@ -4280,7 +4264,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, } /* Convert the 'axis' parameter into a list of axes */ - if (axes_in == NULL) { + if (axes_obj == NULL) { /* apply defaults */ if (ndim == 0) { naxes = 0; @@ -4290,22 +4274,22 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, axes[0] = 0; } } - else if (axes_in == Py_None) { + else if (axes_obj == Py_None) { /* Convert 'None' into all the axes */ naxes = ndim; for (i = 0; i < naxes; ++i) { axes[i] = i; } } - else if (PyTuple_Check(axes_in)) { - naxes = PyTuple_Size(axes_in); + else if (PyTuple_Check(axes_obj)) { + naxes = PyTuple_Size(axes_obj); if (naxes < 0 || naxes > NPY_MAXDIMS) { PyErr_SetString(PyExc_ValueError, "too many values for 'axis'"); goto fail; } for (i = 0; i < naxes; ++i) { - PyObject *tmp = PyTuple_GET_ITEM(axes_in, i); + PyObject *tmp = PyTuple_GET_ITEM(axes_obj, i); int axis = PyArray_PyIntAsInt(tmp); if (error_converting(axis)) { goto fail; @@ -4318,7 +4302,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, } else { /* Try to interpret axis as an integer */ - int axis = PyArray_PyIntAsInt(axes_in); + int axis = PyArray_PyIntAsInt(axes_obj); /* TODO: PyNumber_Index would be good to use here */ if (error_converting(axis)) { goto fail; @@ -4479,8 +4463,7 @@ _get_typetup(PyObject *sig_obj, PyObject *signature_obj, PyObject *dtype, if (dtype != NULL) { if (*out_typetup != NULL) { - /* TODO: This should be changed to a TypeError */ - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_TypeError, "cannot specify both 'signature' and 'dtype'"); Py_SETREF(*out_typetup, NULL); return -1; @@ -4525,15 +4508,16 @@ _convert_typetup(PyObject *dtype_obj, PyObject **out_typetup) * arguments and is called directly from `ufunc_generic_vectorcall` when * Python has `tp_vectorcall` (Python 3.8+). * If `tp_vectorcall` is not available, the dictionary `kwargs` are unpacked in - * `ufunc_generic_call`/`ufunc_generic_call_mps` with fairly little overhead. + * `ufunc_generic_call`/`ufunc_generic_call_with_operands` with fairly little + * overhead. */ static PyObject * ufunc_generic_fastcall(PyUFuncObject *ufunc, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, - npy_bool outer, PyArrayObject **mps_in) + npy_bool outer, PyArrayObject **operands_in) { - PyArrayObject *mps_buffer[NPY_MAXARGS] = {NULL}; - PyArrayObject **mps; + PyArrayObject *operands_buffer[NPY_MAXARGS] = {NULL}; + PyArrayObject **operands; PyObject *retobj[NPY_MAXARGS]; PyObject *wraparr[NPY_MAXARGS]; PyObject *override = NULL; @@ -4543,12 +4527,16 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, int errval; int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs; - /* Deprecated PyUfunc_GenericFunction support only */ - if (mps_in != NULL) { - mps = mps_in; + /* + * `PyUfunc_GenericFunction` uses `ufunc_generic_call_with_operands` + * which passes in the operands explicitly. `PyUfunc_GenericFunction` + * is deprecated and this can be simplified when the deprecation is over. + */ + if (operands_in != NULL) { + operands = operands_in; } else { - mps = mps_buffer; + operands = operands_buffer; } /* @@ -4583,10 +4571,9 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, * If there are more arguments, they define the out args. Otherwise * full_args.out is NULL for now, and the `out` kwarg may still be passed. */ - npy_bool out_is_passed_by_position = NPY_FALSE; - if (len_args > nin) { + npy_bool out_is_passed_by_position = len_args > nin; + if (out_is_passed_by_position) { npy_bool all_none = NPY_TRUE; - out_is_passed_by_position = NPY_TRUE; full_args.out = PyTuple_New(nout); if (full_args.out == NULL) { @@ -4671,7 +4658,6 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Handle `out` arguments passed by keyword */ if (out_obj != NULL) { if (out_is_passed_by_position) { - // TODO: Was ValueError (and error order changed) PyErr_SetString(PyExc_TypeError, "cannot specify 'out' as both a " "positional and keyword argument"); @@ -4696,7 +4682,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, method = "outer"; } /* We now have all the information required to check for Overrides */ - if (mps_in == NULL) { + if (operands_in == NULL) { /* Deprecated PyUfunc_GenericFunction path does not use overrides */ errval = PyUFunc_CheckOverride(ufunc, method, full_args.in, full_args.out, @@ -4731,7 +4717,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, npy_bool subok = NPY_TRUE; int keepdims = -1; /* We need to know if it was passed */ PyArrayObject *wheremask = NULL; - if (convert_ufunc_arguments(ufunc, full_args, mps, + if (convert_ufunc_arguments(ufunc, full_args, operands, order_obj, &order, casting_obj, &casting, subok_obj, &subok, @@ -4741,13 +4727,13 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, } if (!ufunc->core_enabled) { - errval = PyUFunc_GenericFunctionInternal(ufunc, mps, + errval = PyUFunc_GenericFunctionInternal(ufunc, operands, full_args, typetup, extobj, casting, order, subok, wheremask); Py_XDECREF(wheremask); } else { - errval = PyUFunc_GeneralizedFunctionInternal(ufunc, mps, + errval = PyUFunc_GeneralizedFunctionInternal(ufunc, operands, full_args, typetup, extobj, casting, order, subok, axis_obj, axes_obj, keepdims); } @@ -4756,14 +4742,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, goto fail; } - if (mps_in != NULL) { + if (operands_in != NULL) { /* Deprecated PyUfunc_GenericFunction path does not wrap. */ Py_RETURN_NONE; } /* Free the input references */ for (int i = 0; i < ufunc->nin; i++) { - Py_XSETREF(mps[i], NULL); + Py_XSETREF(operands[i], NULL); } /* @@ -4795,8 +4781,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, context.args = full_args; context.out_i = i; - wrapped = _apply_array_wrap(wraparr[i], mps[j], &context); - mps[j] = NULL; /* Prevent fail double-freeing this */ + wrapped = _apply_array_wrap(wraparr[i], operands[j], &context); + operands[j] = NULL; /* Prevent fail double-freeing this */ if (wrapped == NULL) { for (int j = 0; j < i; j++) { Py_DECREF(retobj[j]); @@ -4829,7 +4815,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_XDECREF(full_args.in); Py_XDECREF(full_args.out); for (int i = 0; i < ufunc->nargs; i++) { - Py_XDECREF(mps[i]); + Py_XDECREF(operands[i]); } return NULL; } @@ -4837,12 +4823,16 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* * TODO: The implementation below can be replaced with PyVectorcall_Call - * when available (should be Python 3.8+). And removed entirely if - * `PyUFunc_GenericFunction` is disabled as well. + * when available (should be Python 3.8+). + * TODO: After `PyUFunc_GenericFunction` is disabled `operands_in` becomes + * unnecessary and this function can be merged with `ufunc_generic_call`. + * The `operands_in` handling can also be removed entirely from + * `ufunc_generic_fastcall`. */ static PyObject * -ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, - PyArrayObject **mps_in) +ufunc_generic_call_with_operands( + PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, + PyArrayObject **operands_in) { Py_ssize_t len_args = PyTuple_GET_SIZE(args); /* @@ -4852,7 +4842,8 @@ ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, */ if (kwds == NULL) { return ufunc_generic_fastcall(ufunc, - PySequence_Fast_ITEMS(args), len_args, NULL, NPY_FALSE, mps_in); + PySequence_Fast_ITEMS(args), len_args, NULL, NPY_FALSE, + operands_in); } PyObject *new_args[NPY_MAXARGS]; @@ -4888,7 +4879,7 @@ ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, } PyObject *res = ufunc_generic_fastcall(ufunc, - new_args, len_args, kwnames, NPY_FALSE, mps_in); + new_args, len_args, kwnames, NPY_FALSE, operands_in); Py_DECREF(kwnames); return res; } @@ -4897,7 +4888,7 @@ ufunc_generic_call_mps(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, static PyObject * ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { - return ufunc_generic_call_mps(ufunc, args, kwds, NULL); + return ufunc_generic_call_with_operands(ufunc, args, kwds, NULL); } diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index c2358f785946..7656b4d0a385 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3746,7 +3746,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kw): # Out argument must be tuple, since there are multiple outputs np.modf(dummy, out=a) - assert_raises(TypeError, np.modf, dummy, out=(a,)) + assert_raises(ValueError, np.modf, dummy, out=(a,)) # 2 inputs, 1 output assert_equal(np.add(a, dummy), 0) diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index e9b9890c234f..96bfe7c33b2d 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -38,9 +38,9 @@ def test_sig_signature(self): signature='ii->i') def test_sig_dtype(self): - assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i', + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', dtype=int) - assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i', + assert_raises(TypeError, np.add, 1, 2, signature='ii->i', dtype=int) def test_extobj_refcount(self): diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 0844ddf05cb9..f7d248068e7f 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -102,8 +102,8 @@ def test_out_subok(self): assert_raises(TypeError, np.add, a, 2, o, o, subok=subok) assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok) assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok) - assert_raises(TypeError, np.add, a, 2, out=(o, o), subok=subok) - assert_raises(TypeError, np.add, a, 2, out=(), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) assert_raises(TypeError, np.add, a, 2, [], subok=subok) assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) @@ -2427,8 +2427,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): 'initial': None, 'where': True}) # reduce, wrong args - assert_raises(TypeError, np.multiply.reduce, a, out=()) - assert_raises(TypeError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(ValueError, np.multiply.reduce, a, out=()) + assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') # accumulate, pos args @@ -2461,8 +2461,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[4], {'axis': None, 'dtype': None}) # accumulate, wrong args - assert_raises(TypeError, np.multiply.accumulate, a, out=()) - assert_raises(TypeError, np.multiply.accumulate, a, + assert_raises(ValueError, np.multiply.accumulate, a, out=()) + assert_raises(ValueError, np.multiply.accumulate, a, out=('out0', 'out1')) assert_raises(TypeError, np.multiply.accumulate, a, 'axis0', axis='axis0') @@ -2497,8 +2497,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[4], {'axis': None, 'dtype': None}) # reduceat, wrong args - assert_raises(TypeError, np.multiply.reduce, a, [4, 2], out=()) - assert_raises(TypeError, np.multiply.reduce, a, [4, 2], + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=('out0', 'out1')) assert_raises(TypeError, np.multiply.reduce, a, [4, 2], 'axis0', axis='axis0') @@ -2576,12 +2576,12 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # wrong number of arguments in the tuple is an error too. assert_raises(TypeError, np.multiply, a, b, 'one', out='two') assert_raises(TypeError, np.multiply, a, b, 'one', 'two') - assert_raises(TypeError, np.multiply, a, b, out=('one', 'two')) + assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) assert_raises(TypeError, np.multiply, a, out=()) assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') - assert_raises(TypeError, np.modf, a, out=('one', 'two', 'three')) - assert_raises(TypeError, np.modf, a, out=('one',)) + assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(ValueError, np.modf, a, out=('one',)) def test_ufunc_override_exception(self): @@ -2668,8 +2668,8 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_raises(TypeError, inner1d, a, out='two') assert_raises(TypeError, inner1d, a, a, 'one', out='two') assert_raises(TypeError, inner1d, a, a, 'one', 'two') - assert_raises(TypeError, inner1d, a, a, out=('one', 'two')) - assert_raises(TypeError, inner1d, a, a, out=()) + assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) + assert_raises(ValueError, inner1d, a, a, out=()) def test_ufunc_override_with_super(self): # NOTE: this class is used in doc/source/user/basics.subclassing.rst From ed0ebd78b6e73de6550cb01ad8107f2b1b127e37 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 22 Mar 2021 18:31:39 -0500 Subject: [PATCH 0815/1270] DOC: Add release notes for ufunc (and FASTCALL) refactor --- .../upcoming_changes/15271.compatibility.rst | 33 +++++++++++++++++++ .../upcoming_changes/15271.performance.rst | 16 +++++++++ 2 files changed, 49 insertions(+) create mode 100644 doc/release/upcoming_changes/15271.compatibility.rst create mode 100644 doc/release/upcoming_changes/15271.performance.rst diff --git a/doc/release/upcoming_changes/15271.compatibility.rst b/doc/release/upcoming_changes/15271.compatibility.rst new file mode 100644 index 000000000000..b37a989775d9 --- /dev/null +++ b/doc/release/upcoming_changes/15271.compatibility.rst @@ -0,0 +1,33 @@ +Error type changes in universal functions +----------------------------------------- +The universal functions may now raise different errors +on invalid input in some cases. +The main changes should be that a ``RuntimeError`` was +replaced with a more fitting ``TypeError``. +When multiple errors were present in the same call, +NumPy may now raise a different one. + + +``__array_ufunc__`` argument validation +--------------------------------------- +NumPy will now partially validate arguments before calling +``__array_ufunc__``. Previously, it was possible to pass +on invalid arguments (such as a non-existing keyword +argument) when dispatch was known to occur. +This was always the intended behaviour, if existing +``__array_ufunc__`` users rely on certain keyword arguments +to be passed on, it may be possible to make an exception. + + +``__array_ufunc__`` and additional positional arguments +------------------------------------------------------- +Previously, all positionally passed arguments were used +where used for ``__array_ufunc__``. In the case of reduce, +accumulate, and reduceat all arguments may be passed by +position. This means that when they were passed by +position, they could previously have been asked to handle +the ufunc call via ``__array_ufunc__``. +Since this depended on the way the arguments were passed +(by position or by keyword), NumPy will now only dispatch +on the input and output array. +For example, it will never dispatch on the ``where`` array. diff --git a/doc/release/upcoming_changes/15271.performance.rst b/doc/release/upcoming_changes/15271.performance.rst new file mode 100644 index 000000000000..43502fb79101 --- /dev/null +++ b/doc/release/upcoming_changes/15271.performance.rst @@ -0,0 +1,16 @@ +Reduced call overheads with keyword arguments +--------------------------------------------- +In the following cases NumPy now has a much reduced call +overhead: + +* The array creation functions ``np.asarray``, + ``np.asanyarray``, ``np.ascontiguousarray``, and + ``np.asfortranarray``. These are now always as fast + as ``np.array`` itself. +* Universal functions (partially limited to Python 3.8+). +* Many NumPy array methods when keyword arguments are used. + +This can lead to large performance improvements for some +function calls when working with small arrays. +For functions improved here, the use of keyword arguments +will only have a minimal performance impact. From ace44ff58daecd5d20d4ce9198f38681c6e5d9ee Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 11:01:34 -0500 Subject: [PATCH 0816/1270] BUG: Add missing decref to `dtype` parsing --- numpy/core/src/umath/ufunc_object.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 57788450319d..df0a778895e6 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4492,6 +4492,7 @@ _convert_typetup(PyObject *dtype_obj, PyObject **out_typetup) return 0; } *out_typetup = PyTuple_Pack(1, (PyObject *)dtype); + Py_DECREF(dtype); if (*out_typetup == NULL) { return -1; } From d2a608224de02dff0c62cc2eeab3882a5402cb91 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 11:53:11 -0500 Subject: [PATCH 0817/1270] BUG: Add missing `static` leading to a (harmless) ref-leak --- numpy/core/src/umath/override.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index 84d39567d3ed..e218c64c0bf0 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -179,7 +179,7 @@ copy_positional_args_to_kwargs(const char **keywords, * This is only relevant for reduce, which is the only one with * 5 keyword arguments. */ - PyObject *NoValue = NULL; + static PyObject *NoValue = NULL; assert(strcmp(keywords[i], "initial") == 0); npy_cache_import("numpy", "_NoValue", &NoValue); if (args[i] == NoValue) { From 4b4b6ef78f1174baac462a1e00c9c090c8a47b35 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 15:41:02 -0500 Subject: [PATCH 0818/1270] MAINT: Declare functions `static` rather than `NPY_NO_EXPORT` These functions do not require `NPY_NO_EXPORT`. One of them was (incorrectly) flagged by code coverage. Maybe this helps... --- numpy/core/src/umath/ufunc_object.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index df0a778895e6..e5b182270e61 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -853,7 +853,7 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc) { /* * Converters for use in parsing of keywords arguments. */ -NPY_NO_EXPORT int +static int _subok_converter(PyObject *obj, npy_bool *subok) { if (PyBool_Check(obj)) { @@ -867,7 +867,7 @@ _subok_converter(PyObject *obj, npy_bool *subok) } } -NPY_NO_EXPORT int +static int _keepdims_converter(PyObject *obj, int *keepdims) { if (PyBool_Check(obj)) { @@ -881,7 +881,7 @@ _keepdims_converter(PyObject *obj, int *keepdims) } } -NPY_NO_EXPORT int +static int _wheremask_converter(PyObject *obj, PyArrayObject **wheremask) { /* From 605be4f81ffad7475e6e07d6c19fca35c6789c41 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 15:48:53 -0500 Subject: [PATCH 0819/1270] DOC: Fixup ufunc-argparse release notes and code comments This is based on Marten's review again. --- doc/release/upcoming_changes/15271.compatibility.rst | 12 +++++------- numpy/core/src/umath/override.c | 4 ++-- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/doc/release/upcoming_changes/15271.compatibility.rst b/doc/release/upcoming_changes/15271.compatibility.rst index b37a989775d9..0c649500fb62 100644 --- a/doc/release/upcoming_changes/15271.compatibility.rst +++ b/doc/release/upcoming_changes/15271.compatibility.rst @@ -14,20 +14,18 @@ NumPy will now partially validate arguments before calling ``__array_ufunc__``. Previously, it was possible to pass on invalid arguments (such as a non-existing keyword argument) when dispatch was known to occur. -This was always the intended behaviour, if existing -``__array_ufunc__`` users rely on certain keyword arguments -to be passed on, it may be possible to make an exception. ``__array_ufunc__`` and additional positional arguments ------------------------------------------------------- -Previously, all positionally passed arguments were used -where used for ``__array_ufunc__``. In the case of reduce, -accumulate, and reduceat all arguments may be passed by +Previously, all positionally passed arguments were check for +``__array_ufunc__`` support. In the case of ``reduce``, +``accumulate``, and ``reduceat`` all arguments may be passed by position. This means that when they were passed by position, they could previously have been asked to handle the ufunc call via ``__array_ufunc__``. Since this depended on the way the arguments were passed (by position or by keyword), NumPy will now only dispatch on the input and output array. -For example, it will never dispatch on the ``where`` array. +For example, NumPy will never dispatch on the ``where`` array +in a reduction such as ``np.add.reduce``. diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index e218c64c0bf0..d247c263986f 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -138,7 +138,7 @@ initialize_normal_kwds(PyObject *out_args, * ufunc() and ufunc.outer() accept 'sig' or 'signature'. We guarantee * that it is passed as 'signature' by renaming 'sig' if present. * Note that we have already validated that only one of them was passed - * before checking for checking for overrides. + * before checking for overrides. */ static int normalize_signature_keyword(PyObject *normal_kwds) @@ -254,7 +254,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, * Reduce-like methods can pass keyword arguments also by position, * in which case the additional positional arguments have to be copied * into the keyword argument dictionary. The `__call__` and `__outer__` - * method has to normalize sig and signature. + * method have to normalize sig and signature. */ /* ufunc.__call__ */ From 0b61abaed5049c148c30766db01b00a940b9b2f8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 18:15:34 -0500 Subject: [PATCH 0820/1270] BUG: Fix bug(s) introduced due to rebase/merge conflict --- numpy/core/src/umath/ufunc_object.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index e5b182270e61..6510709db91c 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5578,7 +5578,6 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { - Py_DECREF(tmp); return NULL; } ap1 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0); @@ -5596,7 +5595,6 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { - Py_DECREF(tmp); Py_DECREF(ap1); return NULL; } @@ -5646,6 +5644,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) "To work around this issue, please convert the inputs to " "numpy arrays.", ufunc->name, Py_TYPE(ap_new)->tp_name); + Py_DECREF(ap_new); goto fail; } From 10ee4e04091cccfb90832071b04b84dc8866d6ff Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 18:33:15 -0500 Subject: [PATCH 0821/1270] DOC: Link all three PRs and use the first PR as the "base" (filename) --- .../upcoming_changes/15271.performance.rst | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 doc/release/upcoming_changes/15271.performance.rst diff --git a/doc/release/upcoming_changes/15271.performance.rst b/doc/release/upcoming_changes/15271.performance.rst deleted file mode 100644 index 43502fb79101..000000000000 --- a/doc/release/upcoming_changes/15271.performance.rst +++ /dev/null @@ -1,16 +0,0 @@ -Reduced call overheads with keyword arguments ---------------------------------------------- -In the following cases NumPy now has a much reduced call -overhead: - -* The array creation functions ``np.asarray``, - ``np.asanyarray``, ``np.ascontiguousarray``, and - ``np.asfortranarray``. These are now always as fast - as ``np.array`` itself. -* Universal functions (partially limited to Python 3.8+). -* Many NumPy array methods when keyword arguments are used. - -This can lead to large performance improvements for some -function calls when working with small arrays. -For functions improved here, the use of keyword arguments -will only have a minimal performance impact. From bebf5c891cb1c547b6de97ecd48af66853841028 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 23 Mar 2021 19:04:26 -0500 Subject: [PATCH 0822/1270] DOC: Fix spelling in release note [skip CI] --- doc/release/upcoming_changes/15271.compatibility.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/15271.compatibility.rst b/doc/release/upcoming_changes/15271.compatibility.rst index 0c649500fb62..7deefe2566a2 100644 --- a/doc/release/upcoming_changes/15271.compatibility.rst +++ b/doc/release/upcoming_changes/15271.compatibility.rst @@ -18,7 +18,7 @@ argument) when dispatch was known to occur. ``__array_ufunc__`` and additional positional arguments ------------------------------------------------------- -Previously, all positionally passed arguments were check for +Previously, all positionally passed arguments were checked for ``__array_ufunc__`` support. In the case of ``reduce``, ``accumulate``, and ``reduceat`` all arguments may be passed by position. This means that when they were passed by From badbf70324274bdb4299d8c64d3d83a26be2d4c0 Mon Sep 17 00:00:00 2001 From: Ohad Ravid Date: Wed, 24 Mar 2021 15:45:16 +0200 Subject: [PATCH 0823/1270] ENH: Improve performance of `np.save` for small arrays (gh-18657) * ENH: Remove call to `_filter_header` from `_write_array_header` Improve performance of `np.save` by removing the call when writing the header, as it is known to be done in Python 3. * ENH: Only call `_filter_header` from `_read_array_header` for old vers Improve performance of `np.load` for arrays with version >= (3,0) by removing the call, as it is known to be done in Python 3. * ENH: Use a set of keys when checking `read_array` Improve performance of `np.load`. * DOC: Improve performance of `np.{save,load}` for small arrays --- .../upcoming_changes/18657.performance.rst | 10 ++++++++++ numpy/lib/format.py | 16 ++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/18657.performance.rst diff --git a/doc/release/upcoming_changes/18657.performance.rst b/doc/release/upcoming_changes/18657.performance.rst new file mode 100644 index 000000000000..b9d436725620 --- /dev/null +++ b/doc/release/upcoming_changes/18657.performance.rst @@ -0,0 +1,10 @@ +Improve performance of ``np.save`` and ``np.load`` for small arrays +------------------------------------------------------------------- +``np.save`` is now a lot faster for small arrays. + +``np.load`` is also faster for small arrays, +but only when serializing with a version >= `(3, 0)`. + +Both are done by removing checks that are only relevant for Python 2, +while still maintaining compatibility with arrays +which might have been created by Python 2. diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 904c32cc7c48..ead6a0420251 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -173,6 +173,7 @@ __all__ = [] +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 @@ -432,7 +433,6 @@ def _write_array_header(fp, d, version=None): header.append("'%s': %s, " % (key, repr(value))) header.append("}") header = "".join(header) - header = _filter_header(header) if version is None: header = _wrap_header_guess_version(header) else: @@ -590,7 +590,10 @@ def _read_array_header(fp, version): # "shape" : tuple of int # "fortran_order" : bool # "descr" : dtype.descr - header = _filter_header(header) + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + if version <= (2, 0): + header = _filter_header(header) try: d = safe_eval(header) except SyntaxError as e: @@ -599,14 +602,15 @@ def _read_array_header(fp, version): if not isinstance(d, dict): msg = "Header is not a dictionary: {!r}" raise ValueError(msg.format(d)) - keys = sorted(d.keys()) - if keys != ['descr', 'fortran_order', 'shape']: + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) msg = "Header does not contain the correct keys: {!r}" - raise ValueError(msg.format(keys)) + raise ValueError(msg.format(d.keys())) # Sanity-check the values. if (not isinstance(d['shape'], tuple) or - not numpy.all([isinstance(x, int) for x in d['shape']])): + not all(isinstance(x, int) for x in d['shape'])): msg = "shape is not valid: {!r}" raise ValueError(msg.format(d['shape'])) if not isinstance(d['fortran_order'], bool): From 0d5eefb1ebd05f3d4aeaf2572460afe9d027aa8f Mon Sep 17 00:00:00 2001 From: Filip Ter Date: Tue, 23 Mar 2021 15:52:45 -0700 Subject: [PATCH 0824/1270] MAINT: Adding exception chaining and switching to fstring --- numpy/lib/function_base.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index ddc919e4f5dc..6921e3df1b78 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -675,8 +675,8 @@ def select(condlist, choicelist, default=0): try: intermediate_dtype = np.result_type(*choicelist) except TypeError as e: - raise TypeError('Choicelist elements do not have a common dtype: {}' - .format(e)) + msg = f'Choicelist elements do not have a common dtype: {e}' + raise TypeError(msg) from None default_array = np.asarray(default) choicelist.append(default_array) @@ -685,9 +685,8 @@ def select(condlist, choicelist, default=0): try: dtype = np.result_type(intermediate_dtype, default_array) except TypeError as e: - raise TypeError( - 'Choicelists and default do not have a common dtype: {}' - .format(e)) + msg = f'Choicelists and default value do not have a common dtype: {e}' + raise TypeError(msg) from None # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it separately optimizes From 6eacf152e1e41e92d6a6e8b4d3ccfa01a8e275ef Mon Sep 17 00:00:00 2001 From: mrios Date: Thu, 25 Mar 2021 16:48:03 -0400 Subject: [PATCH 0825/1270] BUG: Changed METH_VARARGS to METH_NOARGS --- numpy/core/src/multiarray/nditer_pywrap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index e0ec9d5cff87..5ac0f8442e01 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -2369,7 +2369,7 @@ static PyMethodDef npyiter_methods[] = { {"__exit__", (PyCFunction)npyiter_exit, METH_VARARGS, NULL}, {"close", (PyCFunction)npyiter_close, - METH_VARARGS, NULL}, + METH_NOARGS, NULL}, {NULL, NULL, 0, NULL}, }; From 14e4812e08348da47dec76940498813870f3b1ba Mon Sep 17 00:00:00 2001 From: mrios Date: Thu, 25 Mar 2021 17:23:36 -0400 Subject: [PATCH 0826/1270] BUG: Changed METH_VARARGS to METH_NOARGS --- numpy/core/tests/test_nditer.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 82ba5a01b619..ddcc8f2834b1 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -3065,6 +3065,10 @@ def test_close_raises(): assert_raises(StopIteration, next, it) assert_raises(ValueError, getattr, it, 'operands') +def test_close_parameters(): + it = np.nditer(np.arange(3)) + assert_raises(TypeError, it.close, 1) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_warn_noclose(): a = np.arange(6, dtype='f4') From 6af7320a7b8883337cd0c8f4233ba79b9b0885e8 Mon Sep 17 00:00:00 2001 From: Maxim Belkin Date: Thu, 25 Mar 2021 19:31:39 -0500 Subject: [PATCH 0827/1270] Docs: simd-optimizations.rst: fix basline typo (#18680) --- doc/source/reference/simd/simd-optimizations.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/simd/simd-optimizations.rst b/doc/source/reference/simd/simd-optimizations.rst index 59a4892b24d8..956824321023 100644 --- a/doc/source/reference/simd/simd-optimizations.rst +++ b/doc/source/reference/simd/simd-optimizations.rst @@ -96,8 +96,8 @@ NOTES arguments must be enclosed in quotes. - The operand ``+`` is only added for nominal reasons, For example: - ``--cpu-basline= "min avx2"`` is equivalent to ``--cpu-basline="min + avx2"``. - ``--cpu-basline="min,avx2"`` is equivalent to ``--cpu-basline`="min,+avx2"`` + ``--cpu-baseline= "min avx2"`` is equivalent to ``--cpu-baseline="min + avx2"``. + ``--cpu-baseline="min,avx2"`` is equivalent to ``--cpu-baseline`="min,+avx2"`` - If the CPU feature is not supported by the user platform or compiler, it will be skipped rather than raising a fatal error. From 56ca9b580ac244ad6b7f01c987b740b654c08153 Mon Sep 17 00:00:00 2001 From: Alexander Hunt Date: Sat, 27 Mar 2021 07:53:51 -0400 Subject: [PATCH 0828/1270] Added _DatetimeScalar to replace dt.Datetime in np.datetime64 --- numpy/__init__.pyi | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 68a14ced6865..e2a99bdbeb33 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2537,11 +2537,24 @@ class object_(generic): object0 = object_ +#Added Protocol to typecheck for day, month, year property to be used in +#datetime64 constructor per Issue #18640 and BvB93s recommendation +class _DatetimeScalar(Protocol): + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + + class datetime64(generic): - @overload + #Replaced dt.datetime with _DatetimeScalar per issue #18640 and + #BvB93s recommendation + @overload def __init__( self, - __value: Union[None, datetime64, _CharLike_co, dt.datetime] = ..., + __value: Union[None, datetime64, _CharLike_co, _DatetimeScalar] = ..., __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ..., ) -> None: ... @overload From f29ee7be2f1d1b7377753eb3d36334902b7f280d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 27 Mar 2021 18:45:47 -0600 Subject: [PATCH 0829/1270] REL: Update main after 1.20.2 release. --- doc/changelog/1.20.2-changelog.rst | 40 +++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/1.20.2-notes.rst | 49 +++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 doc/changelog/1.20.2-changelog.rst create mode 100644 doc/source/release/1.20.2-notes.rst diff --git a/doc/changelog/1.20.2-changelog.rst b/doc/changelog/1.20.2-changelog.rst new file mode 100644 index 000000000000..831cf03324de --- /dev/null +++ b/doc/changelog/1.20.2-changelog.rst @@ -0,0 +1,40 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Bas van Beek +* Charles Harris +* Christoph Gohlke +* Mateusz Sokół + +* Michael Lamparski +* Sebastian Berg + +Pull requests merged +==================== + +A total of 20 pull requests were merged for this release. + +* `#18382 `__: MAINT: Update f2py from master. +* `#18459 `__: BUG: ``diagflat`` could overflow on windows or 32-bit platforms +* `#18460 `__: BUG: Fix refcount leak in f2py ``complex_double_from_pyobj``. +* `#18461 `__: BUG: Fix tiny memory leaks when ``like=`` overrides are used +* `#18462 `__: BUG: Remove temporary change of descr/flags in VOID functions +* `#18469 `__: BUG: Segfault in nditer buffer dealloc for Object arrays +* `#18485 `__: BUG: Remove suspicious type casting +* `#18486 `__: BUG: remove nonsensical comparison of pointer < 0 +* `#18487 `__: BUG: verify pointer against NULL before using it +* `#18488 `__: BUG: check if PyArray_malloc succeeded +* `#18546 `__: BUG: incorrect error fallthrough in nditer +* `#18559 `__: CI: Backport CI fixes from main. +* `#18599 `__: MAINT: Add annotations for ``dtype.__getitem__``, ``__mul__`` and... +* `#18611 `__: BUG: NameError in numpy.distutils.fcompiler.compaq +* `#18612 `__: BUG: Fixed ``where`` keyword for ``np.mean`` & ``np.var`` methods +* `#18617 `__: CI: Update apt package list before Python install +* `#18636 `__: MAINT: Ensure that re-exported sub-modules are properly annotated +* `#18638 `__: BUG: Fix ma coercion list-of-ma-arrays if they do not cast to... +* `#18661 `__: BUG: Fix small valgrind-found issues +* `#18671 `__: BUG: Fix small issues found with pytest-leaks diff --git a/doc/source/release.rst b/doc/source/release.rst index 1ea46e232999..8cc4a57507ad 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release Notes :maxdepth: 3 1.21.0 + 1.20.2 1.20.1 1.20.0 1.19.5 diff --git a/doc/source/release/1.20.2-notes.rst b/doc/source/release/1.20.2-notes.rst new file mode 100644 index 000000000000..cdf45b65ea26 --- /dev/null +++ b/doc/source/release/1.20.2-notes.rst @@ -0,0 +1,49 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.20.2 Release Notes +========================== + +NumPy 1,20.2 is a bugfix release containing several fixes merged to the main +branch after the NumPy 1.20.1 release. + + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Bas van Beek +* Charles Harris +* Christoph Gohlke +* Mateusz Sokół + +* Michael Lamparski +* Sebastian Berg + +Pull requests merged +==================== + +A total of 20 pull requests were merged for this release. + +* `#18382 `__: MAINT: Update f2py from master. +* `#18459 `__: BUG: ``diagflat`` could overflow on windows or 32-bit platforms +* `#18460 `__: BUG: Fix refcount leak in f2py ``complex_double_from_pyobj``. +* `#18461 `__: BUG: Fix tiny memory leaks when ``like=`` overrides are used +* `#18462 `__: BUG: Remove temporary change of descr/flags in VOID functions +* `#18469 `__: BUG: Segfault in nditer buffer dealloc for Object arrays +* `#18485 `__: BUG: Remove suspicious type casting +* `#18486 `__: BUG: remove nonsensical comparison of pointer < 0 +* `#18487 `__: BUG: verify pointer against NULL before using it +* `#18488 `__: BUG: check if PyArray_malloc succeeded +* `#18546 `__: BUG: incorrect error fallthrough in nditer +* `#18559 `__: CI: Backport CI fixes from main. +* `#18599 `__: MAINT: Add annotations for `dtype.__getitem__`, `__mul__` and... +* `#18611 `__: BUG: NameError in numpy.distutils.fcompiler.compaq +* `#18612 `__: BUG: Fixed ``where`` keyword for ``np.mean`` & ``np.var`` methods +* `#18617 `__: CI: Update apt package list before Python install +* `#18636 `__: MAINT: Ensure that re-exported sub-modules are properly annotated +* `#18638 `__: BUG: Fix ma coercion list-of-ma-arrays if they do not cast to... +* `#18661 `__: BUG: Fix small valgrind-found issues +* `#18671 `__: BUG: Fix small issues found with pytest-leaks From 6f2f26e08c6e0d476593c82ad31d13847f30cbf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Sun, 28 Mar 2021 10:00:14 +0200 Subject: [PATCH 0830/1270] BUG: Fix test_ccompiler_opt when path contains dots Fix test_ccompiler_opt not to be confused by dots occurring on the path to the temporary directory, by using only the source file's basename when grabbing options. Otherwise, the test can fail with mismatches such as: E AssertionError: 'sources_status' returns different targets than the compiled targets E ['AVX512F', 'AVX2'] != ['(20 2/TEMP/TMPB0YHSCAI/TEST_TARGETS AVX512F)', '(20 2/TEMP/TMPB0YHSCAI/TEST_TARGETS AVX2)'] This is because our TMPDIR value includes numpy version, i.e. 1.20.2. The splitting happens on the first dot that is part of the directory path rather than test filename. --- numpy/distutils/tests/test_ccompiler_opt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py index 287a683c8d63..d2b0a4c581ad 100644 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ b/numpy/distutils/tests/test_ccompiler_opt.py @@ -112,7 +112,7 @@ def get_targets(self, targets, groups, **kwargs): gflags = {} fake_objects = opt.try_dispatch([file]) for source, flags in fake_objects: - gtar = source.split('.')[1:-1] + gtar = path.basename(source).split('.')[1:-1] glen = len(gtar) if glen == 0: gtar = "baseline" From 18db7880daf6cd20bd7539ab65a3f06663f10d90 Mon Sep 17 00:00:00 2001 From: Christopher Dahlin Date: Sun, 28 Mar 2021 11:57:21 +0200 Subject: [PATCH 0831/1270] Changed matrix size in absolute beginners doc. --- doc/source/user/absolute_beginners.rst | 5 +++-- doc/source/user/images/np_create_matrix.png | Bin 36296 -> 10556 bytes 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index fda73c5fb5de..53be9aca8f8d 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -871,10 +871,11 @@ Creating matrices You can pass Python lists of lists to create a 2-D array (or "matrix") to represent them in NumPy. :: - >>> data = np.array([[1, 2], [3, 4]]) + >>> data = np.array([[1, 2], [3, 4], [5, 6]]) >>> data array([[1, 2], - [3, 4]]) + [3, 4], + [5, 6]]) .. image:: images/np_create_matrix.png diff --git a/doc/source/user/images/np_create_matrix.png b/doc/source/user/images/np_create_matrix.png index cd685c4f5a00dcaf26df97423ed9d93652b8b77e..65e4535e5708df8f4e9bab35020fba6d000e2422 100644 GIT binary patch literal 10556 zcmeHtcU)81_HXQ?SWr<0r8uLA0R=>)Bnntjz@Qw8N)?ERFn~yi2m!2!1r!e=!XZ)* zy$VPlK}15Yp+~?#5=sQ5gaG-S1e|&E-o0<;-hbcAAMi=`-m839+3T!z{LkaGH>}&S z4uwK(ID6)#0SdJqMxmBp{AmU93&tOuL;kI_J9h3E3Y8HkGjl!4ei0-nDC2U0t1(l@&@3>zmU; zCX6OO{&u=>agOfOa}=b(f(Noe8D7xU8Ehg~XI#GnYPrdgDU_n!R@ycFcct)X6TDp?W<-@WF1+hN-H0w>@KBB~ zw;KI+K7Rn6%M7ffE2j3+MkZ04#q?mx`P$jt=gd*tB^0rm=M!6f>v`;{S=6rmdm<<+ z)(dL|B(2{eg*tYT{P_#&=tY!!R9!zijX8fWGS%>^^JW=^z%O&CEB8^lc0Wt(LSOz3 zuIU!P9Y$%RDZ4#IMRv6_*sFh*5+H@{hbTfTbGF^xJjInvP+UPkwt&h2u;wH)m}YSd)ZQa7S}&ceq3|V zY*OMh>5v#d4*vh=ZxHa66b4MijQ@ZSo{9Lutuq%)Qto zU9G(^*?6C01T9Ch(J91?zPJW721!D0k8#UTC@2bWLsAz*WNDotfw3$pgHn>lB~~si1l^)n}yGu_0UEEDcd}S1dF>uglxV%j40?OMuY@_nc4q zseH_ym*O}J>=2Fb6(xW-)|A~bB%qW6$OZ>h1I+!xx=k|CiJ>uIm~S&0vv|JT!iCAvH~a1p|EVMj?M?&*pn32%K@t zOI#k7jJ7o8i=+jAi~)+@)PXXuSOTv+$OMNba~IY@GIH@~z1KV%6zxcU+Jt z`&!T|cGhrX1-^N32)uKLOIXaRACrOA@~nwtvI5P7OK@UsZM1uai+mqKje}c7h6MEX zWAR^JX%tWEE)r}zggYY9tjk^NMg2VC^^RV~i;k&Ph>2zI-Pg*$8hlr}QGS;o9bTfn zClZ)-SUPj&BywgrdhKIG!)2&lVvv%eO9>(Y|I}*)yc}<_n8h2%p9vO+zP%`rG^h!^ zV#OxbqU%1o1b?qsSuTL&c{~D1>zW^8XCkonuUJwKft95ALxfZWNFC0uZ34m1SRg{^ zP5=021;=>(5YHA~-xbv7S;B|TH`P(1GW6QaLWqVjJ$P1IGb@UcSdQ&jRXqA8*r;`X z&lRZz?{4`VT4p{apd33I$)a{QjvpP5on50n;hby_HAehKZW4HhV1yZ=L$j-x$j+iS$g_$q(sQQ}^OhJg?9xezo^Wuc$^P zPO3#N7Qxvsio{elZD=vBXR)Xf*Ey1$Yh}@8oZMDpsFA${L3i78o}$>dv>qWQxPo?T z{&Am;ghcg+w5|!;7H`e@HfsVUVmEA&>dl~6OuG>=)Rh%yY#~<*-%-rfYm*)G8%!1A zfz@Hs>A1C1d@n|>AW&~mQ#Zh_bOsAw(gWl zb8c0bUn{y?NcLsI?1r1XQzIR1H{ck;-M(*FdbX6;U;>Flqyo-`FOzYcJ{i|yTN}eDEzi9|KK7}BM(`1jy2oi=_;T2^yL+2CTxdq}Ywed9ogc$L=|-Or z8UzNt1j=-X@2GS-gdbr^oG@J;(M*8B z<7%fw4WroLoo96{uoZIUZnBW`A*3osc-#Gop(|t|5!?a19+tyybBE2wW>q&PGva^< zujDc5R|=Xxli)5F>V_6ef@8{+23v~b!K}I~VvDy1l{-lG^cgSGSP!$@l&-_Zz@%U+eG;(_# zs~EUkQtqa6h!}JIZr>xSex4FmIxsleQpaOWrQj+XUpGh)ufa`!YXSuh%@%Lmvk`)C^iHy$XCy7F?b|H4}j}+pm z1A5AkQWGl(!Z8EaKHfbF!V}IJ)IJ*8=MeF==IskGs=l~y$ENlV{uIB3%1_$bepP`1 z**@p+IajRT^iJMUu$Vn!;;G7WLGM|ebT8&xTkF`Yd0?>dvG$H{HnyjAV@1A+xn72w zO`A@^BFpIy4i_=NNclP!`)<%Yg`DGyLl>9X5p>{th6c*VZZr=y_-SfM2jM&p5~);L zI8xg(3V|HsSS|OMU-mpqy7zYEynVP%;!13{Y=d>%O@>p`!`7T2Sae7 zLt7C;3_=E;9@eDp=(6mThEX`x_f>pe%!Llpxj1Dv%EJs5taL&4R0gG6C9D0D*dwZH zvtBhOR!xKcxZ0XXc_BX_tzq63MT9c*`Y)@S4TA?ZyHi|humJ3)bnM3E)d?rN(ApF1 z^ynLGP=6p`{0U~2uM;M=^-PuaJ<-#SSwJE?Yh4eC1UGl9w?+Y<9M-;FlOW&fVRk7W)r1tQ6S1m76GCYu`YlY5xOdOg?$b_%`2FvJhjyn|uUP}z4avt>w@M5cs#vK_93k)W>_G>rNJMNgf}2yc zOXB9|x7#TdM`}X`cpt6}OZQ28U3n#|QAxIdy*>{ZNVozuHr~VFEgpFImu#KJRnjZj zh4F}OGXYnsIz`p$@R9s0sgkEA`^Y@rOh2k=}2(r zb?e)OiNMv|S2q)s>%I5B*S&gg)MTgAeDwExKnHtExhV{%PCBuMYi`mr(&tWL`O+E0 z?1Coj6MF+OeVXmW12>A*P=OFZM@sL!xIH{vZAU4NKF9{i^nh{CK_s*14}qnFlVcqY zJLuXKttLKSoTtft6%O)NA|XC$`H2^pV6W@(r?o2Xl#BSsy-WjGueDlS1@JVpIUMrf zbk?z|k!RT!a)lL;fdzd`OCLl3PTA-*OF3fEws-3joI2Yj3~O%Q!R+-=u{;AmjdR>J z|0@xxM??^D&LZM?3{uhtY35vu0>uGne9Sc*sh}`yu&?{#?sFJ?{uE%6lSJK-TGDh~ zai_A2?Yl@|?_J51jO?lRSFg)=>*4PO?df#QPm?54?#R8$z4hQT7SA@f!?X>B76o%Z zTGyjTa6BWkuQw(u^K01avjC;`R`q0{QS;c1J&*xOVYuKIJDnM5Jf!^Ccf>bSG5ENt zs(c)pmZPLz2XnQCo+N#8jap}-H!Wam!Es*=K!tLuPu(h_o3;qZC@LD z4SBzlw|<-qq%>&A6*aWjm;CLuzb-uNgs&Jgyyn3Rf zJNz{S=ds|0gkcDz)SU@X><_l|YHkX|)dZ-Dt+mYN?#+F%AvgJT^_Oawrh_wOMpXOL zD{k5;eu7-n)uhM9^`(X{!TUQqM^VA0tVwwKdn(}0zFcHe!LpICIFELxbadb3spq)+ zObi_qVu9c}y{>yWvG@CW?-~9&3X+_UNKE56`msM$Pp5}$5Yo(1tz$9to`@9;&^le} z3D1K*tzsr&@rkVbIzy{bq@W$B0^>1wa|>z1j)G|i{od3xKc%A#P{LtwSa#LN`h|eg z@t}5{R27YxjVEMnmc8Ppb9wLH(NpmHBX5DrJ=582vqCKWC0)SwCrqpo+8Dtt)IftwsY{)0K3j% z@IwF=zXz)i>%B@Vf4cfW#Fjx^o?Wq?|Lvo*M-}$VAu5%W0#cWOZ@28ng4Lh&py{Cn z7#2urd|q%@l}IyDz)Ewf8nrPBV@(glN_zZnz(*{_P9n+skuI!vb@vcB+0Mb>qrCd5 zZuNcWV407pfKI4u`3H`>q!A1{(cesa7(rWX$FTVLC%A;miu76rrd(~-TjG=y=l1tp zzY<@acxb%b5ieYXTej;iuD#&kNgZ;Qi^EMS!IF&NCUC2=8x%Z+=m>K)10d?Yi?uG+P8TG{CG+st+cTbsPv{>Lgx z(z7_UUT^j={T5PrMHU_iW;>!EetI>E!RxvV+;2b+5p>9@<3D@AW-}0yfD_)k>v#*# zPmuR>le(yyb@Q}3qyUa|qu9i!K?XLq<=V`Qn~u#jx0^KTT$n-(uy=c=;`I_DMIy}LMs3t62yuuPld;nEG6K>% z9AH83{3WpUhPe0!7C0Uo(op{i2C07gee5!C=x3MN>`*|}{N;mt6Lpv3DaS~INV)5{tK|{s%M)c6~3OITF`sdFvvA2rvX|mT4DXiIoLKU|2 zzL3O}Azd5ovUAo2W#o`sF_nyckP^Edv}XqZ+Ko;^_$;VvlrR3X!iPj~va6+ZKKDm{{eb)QmntP)+PM zCh=Vh+LWcDpsy=FfCDM#Lp8=&awNf2h^<{>>05^ZKa|(d7gCyz`=-;nZDDjTNM&#gBzDu=w`O zCSpoEFvqG+FbG60DBwDwsMbalDJYty3W$!ORRJM7RSj%fmoaSHv;QbB_T^1%=Z)mb zrtQsWn^p(pMb>GGz{3dXQFNp*UAfjLq)BDY}5sEvsW z3@)J6$!fd%BU2~T46>DVjWk+D>@%mXpD#*ybz;qII>3S`ACWR|coXZXO{fYs_Uz#L z2W{wb47mXf%?wW&&Wr7If``5GM|V1ZrCDp%qfy@-;HNF_l-3*!eoy`+L%U+YG_W`) zbGhZ)_b(4ZTe{Gm(;tff%8A~j5n7`=46b=+=!xEH4Ohnv=cJutf}ozZ_yyQ3l$3b; zTtzg8&gd_c<2N=-FmmD*z6R$TlN@hR(ob3v$M_FWwi zGI&FE;N1C{Na@+Bl)(ml&#SvDHMzqpA5d6&k07NtnV0-UV}au5IhrQvOpSIqow4J;))Ijy4e6^~Q!!#Z6nW{u;SI@^Z zuXicuuq;p^1Visz()=v=)<{OziEPuIIcwVe{_gJs!a z?2V6KjAHTmBSsepI`-EnD-yVeROWE@F5Cymy}mF4R$DPUK3?oEeYE?WbJ~ssGe0p4 z@>--GxEK7>j3r zmhTox*INl0u#$Yp%w#|*RKnRe5Dsg4;?P(}8vV5l=(geZu zof-x^16d+IAuD1Uu8kOs6D*BMb5f9`WZlL(+#0Webryle=_Lu3c{M8Q1HX>ellL8Q zh-!aAb-3l-UADWdtUaT*G9k|MjbjmL!>kBfmjP&G$zH6v`FNSDzGs-^a15}J;L=ty zts?^k-7I?%Jk**Zr#DO3lYUb2a_cAA7oK-tB|Aq08m%m)^HsRYqlGq;;gJVE1UhM? zO}2&}IFVOFR33Vc;1}hz>K7!XxP1&Wn6Yzw54k#7l46IkqEjN)2#kAP58l=8+Ue+6 z-0Ac%Y?IW*FPOj-uju#T{U4ls(zip6*WGluLk(;4lhi!NBAhlA7{Ole=(2kz+;kj> zD#=({WGWV~LcjAyz#VV(*2<9+i(k2pG9DmLD{PWi?F7Qf8z}+juh~p2c-^bf zR8vg|HfmA5O1LbSMwC*YC>kvB;3Tske2aZUj3cJ+Ge&xl2@R%(Pt?|)d1dRL2}DH#C^wx_JzG=x=RvBS zZY%ZfLk1U#3zbgtaP@S4c_&;9t}Q_xBDGW9CUg%v!*i#4-}98W9)pKBn#Fi;#^Cqj z9gdvtLW};U!wh|N`{-?cE6hzNXd~9{s$;~3@9IMpZ|1!k2$Vae#({*1mo?M*KCP_L zga-u5i&A5UUzwohfv1Tca>y0m=vLhO2#a^y|AG3eC^FK}9|;w-KOuuxzh!smro6a{ zbP9&RZOd+M@oPpqXNwzVrQ_5Ov%^0MF+fcR4Vf{d&w~^LR zOYC)Fk{guyo9E-Y`}}q$1NJZ@^u?BK zlp?>5Ib|G$CeaQ zbEg~#o8>I2upUeQu%NNTrvL%AsO|^36d>m}*7OoRr1Q*$1g1efGJ+io8~hjB>=y}? zoRQjli=FF41jVmQP4;9A{g}+L#qKsezz$l$H|x?o^6#dS|7_Udo6eu0s`E!N5*$t5 zx3nz-QVX<_Gp~86{Q25$sfA}kTLx4YMuP5hRQV$YfgAi^#;8k;^~IUv-%lR@Ah)R2 zA0weZr!)VAsQ|_$(f*)_#NWS~^ZsiA5&Z>o4L;w$OlK~dakijPs}J(u1z4P~Ez$kY zE#W@`>h6$^0-W+WFHEv-YD|8vspIoHV*=amlzt01VJ?tNCy`6My8;u+8Tnu*V4n#d;2SV!XM!(>!v23@+Y+Kh?GGE^#1S}&U8RU zF_2m-am`7-FH9=$cCEi>9e*Bq#s)Ub`+BPy8>@DNK9EH5^XlUZN4)1|RA~h%Ga0;lNMS_h zaEo1e9+UO-5SnL{3zwQ?CqN_~a4Hz>9p`D9>N%gU2tntW zA1%P@klO_8^R~-a`dw$b<|I#T6mkVx za>qptd#LXYFnDMj`buun+0<})$oZn zHln4((>UXP&VD7;Hf@mW&C#DMkD77`HOnXY>sz|fR6Nal)Q_GPQDu3q(tF{q8j;eT zblv#2*Fa*upCB?AH{h>M6Svv3O;sNnM0xWWMj$$#dcSV?_0c3^<^f!T< zJx1@|BI;bZucN|ENYXbmj|Ydjy8Hl&c@H=VVS!RMm_<&+4s2cg5e6cX>cDBq;$2fv z=GWofhbz3U`7fG^vPyIAi)l!gaX#o;salo>?!(|KgQ}`xU0eLaUI{cef<+|ATU*}j zzu-m*lD1ROR303tif$9UKcMEO14c2yeSeOx|B?s({p6MZ`p7>Y4I}ya&$RQe6UyHu z5yTFEq_cms7g_&vH2jBv2&(^4zWvW$TbUmc^_j2RGLXg_($s&=8y8|9`kVK4{vTfU b6`u3aDYex)uz_`&55w70xRa^Jt?vF0w<3?O literal 36296 zcmeFZX*kqx^gph&DWyar(WXqc5Ftz@$sV#~o2-?6Nwyi&CfWCpWy&^L$CB)WY}vDu zEe3G4G+b7#7{+c_N*q0K>9x-@&g8=6$D}$win46ooKY47QQ{L+8D*IQ4Wfu|D zy;arNN+fQ5$uB;fv%W9o4@*AgU;*Yp-qMf0sq8xbOAx%_z;>;a`8(K8A!mcW|`K>5k~9Sys&1)-u$7(8tHe&prjKMU-Y6wG% z%+-RM9~}!LI?r+^$)tyVCLH+hc6nk_bH{?Fm7bSz7@5JbcOUnSFB&4`1lr#L*@}@V zk=wmb{oZ3Olm{Z!Xcp5^yN2EH0G-;q9V-A45uL(+N-bh2OWSF6AY7MOa_du)SC5QYs1b5;G$)y6Pkbl-W{5R%`$Sz%f zipiaP+=hfJdm(NX8u*FRzO|d8mQc=|Eqg|4uK_KK6Vx6L4!ZjPmk#_lJsXQ79knFC zYxg(b?MChy@SAeaWLdU(^xcKJWAfJ!T>mUHSf`{hrAs-IP@9QU=bHZ+4pJ&vuRSDN z(vi^F{-m=>z@cYv#~O+Ew8swyM{a{gpzr_7$$%#?)o4_4^60aGe|DLvF6%bEKJ@rk zNdC79Om4jO^gj$@1)$Fcu(}x~_Mheedn{PNd9%z?<*eaG*G2v(8Pp-E^QMbpywYwk zsN?y)wKJ0MHVx&M7E>!LF?@SBm;JkKUV^hi6Z{>@i zc@0wjPgXCEXslc$5YwLRr8!6`_rY=nt?Ixh%9@a)E%$#kc#>~R5P^9E0{jE_3hQoU zs!>h|U$JB2a<2#>*?uq2D*unv|Buw$#QA@u{(nmSPlI}V|9_;^6yWBnMS1v@V=T{k zpO-A@zh%A0|KUK2RyJ&e!GN#$$s80t6U!}M|MhwK2>g#*{p<|#^8-cywSTP${59cY zO2@LbJT}NWG{r-Nz2n?JnUvT}d-5XZ6FMTkVEA3Q)zg115IJCwInc@@cwV$X+zA6n zjCuDjE5m+*2hXhN2%4HdxFa3X_^(HRi{ZaAs*xK<>@nJ6?yG>r)Bl-A>&JhTIx`%3 zGCQoT<$tXI`7;sYc|F=ABIeQ$xwOCkdPj4>0qI(2q4^kc|M-6bAF8ApcXMn-?UYGc zTf(^ge`0R!+Mp3Z??$7H{JM|sjVFO=_T?JM2R_-e$5NK!eO{IBjVPCNYp;t6l0J=E zfnf4`-6v0xm`3qaQf+c72X-Ce`PU#pXh3*c{6L9FfxnO{nBQh^Z~+%%JwoC%?>Du6 zw0xke8RjhVUp)Oz#-n5!&8hoT)-%&S@aumYB=~9o+D%MgI2g7|$ko4T^fkgZ#aYDzO=N zSY$pB3iI8Z= z;F~}0J!p@+l!_3to*i*M5H-X@U)Ms;%8KlP&*Oj0 z$YaA>S+Km%)9CD8U2WW~HvDs?=%!8LxG|!sGs>vckTfMnyVy^ z4`TqA?5k-nWGnVktDyU0BR}(##fu3MzzpiFW9J?{AqfVV=9exhKiH7+GqrXy+hbf- zntgTq5ElE)GeTIYKW3hFFI$}k8hR~1$uE}YT4#Hz1WEhOu(1U&mxj{taYzXhR7XsK$GZpzn@OYrfdjqCrJ zuYG1@nmbo%V}~I;wK@AAT?kwN0CZh2 zU*V$#eOKRP@ixvP{e;pcM7Obko@P*brWnX!7?9DzD8&k*CGZ1zKi8vOlg@7vl6F`& z)x*HAnQoBRXeeq~ntu0~i#`NGxUURqZ%}h*|Nd#SSAV zb4ZXx0?n`1(glm2;+O(OpbY|)V$ams zYyAdN$+QpjEhNOk1vHE)m=_Z#enxCLBq01ym;nDhXLJFoyvH#8IfBAM5x=f(=a_C! zM@yD)_q6Boa)CEFuxJ^W$0+Kx(*CU~$jWs6972h6q0CrD|M{R1c3uoN!*s?`@qcMv zq-0VgJr0!dV+l`0Fr1J2ia^0rSfUbN#b9o(zer&ffmkpES^@r-R3)87|HZLyGsT6< z=T&y#rd!gXXb7a6GoOg>X(bNNZL*&v=UtVg=O&Ol#z@9p@a=>uV3P&QZ@MOMoieNI zl-^;(IfjJ4cj?%J!uM8*2W-nezc{<^Zb-+WnnXLn!_xJ(uV?A5m~SDORd(_^DQwvy z^^Ujcy_b2PxJ}QHYhnGB58iH`I=$s0PKz_B zJ}J9#O|XzHnh+=#fs%_jw>}ooj@3hGd0-u#tru?odXnHc_b8qH;{ne?8dtYq1a_8i zFA?Jy62x}^z`_2xMrvmJIhRh?O^CC-3bpFwNLr6(ToNJiUf7C9ykgXh{BTm#^PV!D zY37yzlhT;O>bbzhYDe!Toy*%BqFQPQ1~rTa+3B$5%2i5;jH%=`+Ew(Udr?rdET?ss z888qZgz~RA*D~#QeEH_Ux?KhMxg^T(o6^qH_S6>vPcBjq0Xu3XcnyH)lLq1iFTR$& zX{k?m+LN`jsg9+@d&uwbr*L|w$Oh=`_MR2zU41tKY8~dVhwkTBZWzSq@ex=ei3yP<;`^XBLUd8iyRvf7y?Gr->i;-Jd&c6t{Tf8bo%s25|)G^LHM z{G!`N!&i*ev?~3T^u3RxDezqytPYuGpC}lBv+EpQ4RS7cw-Y5B);}2Q?Kl|oRE(FM z8EKr!J>_euCb~kcQL*pq;ZavuCet)`pt)Vyf0v@S`T9IYI;gv>uwlpHL|6O*+;#JY z6X~WzWn_NqNsWOpPO)Pl?C;{vSXsCA?`9V(A6XcI7?_#6YY1OcV+P9K8czD5Q&|n2 z6P)rH>qQG?#~2hG(ic0T&tCj~ZrS9aX(2Xxxc#s4=dRD ziJh9P8~JJX6YdM}LtIbu6xQd@Oa}<*somFSA58=8WQ#uKzmoqv&#ucvet`1SnfL4t zo3g1dHYGyej){M`wSd>p^uMTb;UZcTbdwF~@+anPLz-slPbNUAv_^!%nGhARH?RKRU}_gnLhz~-s0VoEY!LxgTgg53OPeP-FKYP`#LU(32CYWnYK zl?)%el0}<`?Y+*S@n|$E}?5oZRF@pVaS+G3n#kr};6s>E~hp zLeGtDUbm*+UdVvo%{Cgp5hi5{J&Qop?_xb15HfYOL3hfu+OB18()ehB+`}02^mohh zX)2%Q12<&y{}of+8EG#kyXA`n4y7J}ZLr;K=o-vt$E(*7hm)6=J+LCPQ3&U-FT}i< z)d>YOpNVy+qks~cqt)A8-Tx(I9BHxtoFCrqw}sWV;}Oa0JYMa|ZJ|cs&k087-e2m+ zTwD_PYszn)aVETSR?%a-fQPN^&SF~KYLsV({ylYW-4d=Pfq0@_F7pnM5TFtAgvVi! zU@FNi#u;KCzra#037U`ImYhmV|Me=iP6D6(w_XvHLf5WxoWgpAK%+}WX1Hl9n_}n0 zDab3zdY3g!qoeHJTO4IQZYH{Ez6xkQoh0F@7r~1Ctm-690OSll%{PR zOiW(=AT(Ut#F>4zJBC)M`Nm833dT+($>J8e3LCn;Kl#zK)w#UoLm1R12c-Vsj)B{g zWI+E!K*8AN2LNn7t6(H%GcfylcHfzOn41rOZGw~3wk3M5yDW(tKEa)8S*f@&T{U!e zH`$X!P6`hW8<1>e-G$CF-d~IzIx4Awx|gmI)A=#+A43A7gL;owu`a8CIEGqGP5&I- zEe!LexGu3g?e}e;ZLPcPri_Ct57gRlTZdfKcyUrC`t99@6ilte_TH$%$UOcfR03M# z5iRGHsX27tjsbgcGQL9nEs=JZ@%%Sh2p(k+_Jn-q>p*3ce(Yb(heBg}^iDaaD^@hA zG;l3N2US+WyUh|NyLIs!>-L5cNB_`7{4MbYB~7uJDf(g4<_6q0VHX#Tp8Ef5PN+i5 z6X)h~8b?}}gpii5AI=~CSDRG{CP0^1O{#W%`gvG_zQ)J9u~UxO)?EQTl}%9+SZ3z7 zB{X?hA#1)5lRI<97+a|MbEmY*=Ooq_T(d`MW^B0I{JJ_Hi=e?1%@5Ce5|Wq@ zS2J6W&dR-!-!O2+)%!y{yRGz>D+N%eR(pvAu=}45Y~6SEv}2lS_hn`^5ZC-Jb`oTo zRgS-nJA6pnaxY1@C`j4Tivbo#oO&DuAYp)}5pm--RzR9(DqHZTIcBGOqm@bnqB6;` z$m&;W%Qye^ZZlvrrMj^B73P!JFa%WmKx961YL;K)$U1iX!M%K64BU-LXAbX1IdjCD z)DNae_5=cs1hGR!jUdqIfO(AMizE%__+q&PDwLkVQR&8K-6 zMP+`>cMWuZ+0S0s4jszp9D#P^?#I;^6G(AY>$g6{HOl`;tsm|sJl)IdIWvUaOEGT! z!ng8m9~8r!Lw3tYlYVomHBKw}QUmZaUVYubIzloo&QU@|xtY(xgY>JIDY63FJV>Z^ zegI+UT<1%*laJWZ8Z*yIVp%YhNRR*YK|VpA+-E;1v}u9N=Jfbob*C)_!@n(4MECOJ zuLoRp3v~+??%mjBo-Y<&BU`;H2nv-Rox0ZLs|7&V-O)52aCvYL@ViDq#xe8=N!yIPcBlV{f@sL@#c(MG?2~tqHCm=pxoQLEdi%X#Gxh zxN@^cjp7T%hf}H&ACB+Ut|?p#A@q~NV^$H+#GsbEx|Rl=AxsfoScXjIg`oLExDMw$ z*H9lg70sm*5SgIs?bY6jUdIGL3A?B;{1K_#&Sm+$B0+wF`C_b%J6$Ud*hRO~a~o@3 z!)^fU#;LW8)nUfR9?j_0K=wE9Q%b?TSGKBNXdeCG?Mp!fkSKU`9p!itqHc|t_kTSF z5LD>U&DH1S&M9n-MF-?QH6MMT5;dfE;mf{Fpuw#Ljw_+s#SgKJ^dadhcBAf`X1$*j zd?voDQ%&w${W{F*G4J@HD>$g%6I7Zn(W4Fx8J4eiKAXcGvj2R^TxxIjrX!dEg!pK= z*rX2jlq0KVdp#M`K36$1>K!0g*~KQC-d+_ru@fxVO597MRLOgGi5i^Gp)>^Ofij@q z4D2oH*hv8ACH-6W=rF2ewbAX70_hg>ISvk88>rT~>z)OON$+u}Tv}IXRmcP(`B6ju zJp;|!6J|GV?bL)sCkF*Em#4hB{_R0UXJbG+K5|8N05EI`4ZbCnFhY*fzjXPv8Q^$L zOG7!p3U?H$us-wwNhS>$V{fkl`FrMr9>rHpHvJ`={B)YJwb`i`p1G;2`R4*|(nqfY zP}0Vv#^J*``ueFJ1VVyOiso{-`;ex!I^O}&i&r_1xhl_Dsbs;tAuAbZdLvHJl1l{g z%0PWsL(j16`w(Jj`sl97c`INR|Fwo&uHh3^^&Rs)=(sOZOS(zUw7(v!9Z4qqa_Zu} z4_>)?w{*-0E<1vM`WG0a&0ExAD;REy$sEmE&&_fS9@4biOLz1RY!z^oX)m?lzwU47u0yXMti-4@E@Yv3}KW1w6Yu%H`U_U1*F|Aa{@D}Ttliw?Nc0z@WOAP<4c zZHF9-^#`Z(OgVJ9gP!cpK@z_VQu|T_N*y8`KjhIeb&|Xwqq^=xm^%eGEIX=FK?RxKH0eUmU}$#@vYRr) zgfYc3FwLJNUfQ5vP1AO8+Yv#qJTnEKjqzm><+lonCU7kz{k&_Ha>Pi?$}w%jm(KhL z@+U5|KwDrxO=gmYvDKfMK6(1;HKoPh^EUqOZ^sx&u*#R*4G@1bp+YejD$&Y&H5P0R z@2m+JhHKPkkD9iaj#fl{SZRX=<76x}+!W z)xHonDExCa%2uk2g9h+KF!N-JG0S+C9~M7zPlMIb6mXX7YBeKg0`O{;Kp5vC3PzxD zm&)?;f8r^_%=zf*@^`Mq{ktSZp;B7)IKfE)fN}$x572IiP(67Ym0D)GDW-LfeD6=x zfhoRW@}j`c1(-ePA2)d@pj(nf$O<-z5vQfEB{@6Y5tInUeD04m%n|;GMr8ZtGP4ex zh5H0wbDxxV>=MtsZt57`pHm;JWZMhusY9;9-I%H&An|j$$kftwFsHzB21BqNsrtEX zjLG#x&3sU6o6wwI{Y-p-LV`~Vp&IFIW)pt3E=a36indytQYSU3MZo?^S%)V)A#Wwc>hTzf@&~WuWV#%r3E7T4?~+PyDgq8kiKGY~h+8 z%~r1f>v+LZPxa38(FK{s|MgTVEyKhPJF2APli|8Z%ms{g1} z;Z%P>JG2OyLWz+jq~01L$-0=nH}VNLyLnnM9$0J88h10}(${gFjf;^1n}l<1 zy2X>G{R)DcQnP~R(ls|hlRt*Z1|Pq|z_iv$Z<*~{TenWpg9XHdJn(-qB(qcfnx67R znGmQ%hqM`AKh77*b`vJvt{&hj4NM6&|o#PiA znC`*A9LDyD2%>v$xvpB7ua2!m0)MI)oEDssfcBIH@U9zx9iAlU)f-7`d>4J5#hV#^ z>S{tIR&WDK-rCyu21N~nqzeSc3Fl$D%svf37av&>`RY@tHz-c!+r|}7>X@Ldcu&F% z1cw2LZczM~w}2~25Y(fXhBgB(!z+fWA04lFbW+aI$=tt3x)Z>E7qk_^{v+*FuySHcYk*wCXwpU?$dLeSrj?`QK(W9Jr7I zC@&{^GxB|*C;rc|4MJ(dNZ<++QjeXpney)bC0OcM_HIJKrjvVh+_#)*v=+O?v?(jG zbp6$^>%D`eN43QW3Qi5<+q1rpGPE+57Dq#^W=+$0sYNh-TblB`hvbR$`AbmtVo(|1I-vU-X^BfKMCBb67* ze?B}O_Ew{Pt8+%NTWGUI4kJ{2<@o+jCr<|l-sY2kjF`D0_C&p57CB7Fa ze42P9ifL~YENp*oOe6K)lB<5&&N(pJvrRXePgOI1EJ30Mxb$q@puM*;Zyj8uZlB-@ zYB>jVvN@{+ntQoA)E!hG6X?qv$(VMYnD+^U4!8(%yfJ)%&+6M9r_wq=-<9k}^hjlM zv}i${rMUHXpSDze`U%CdHws=N&gFT&R&l>^UrfAH2p!#XE#jG{m{G0**bpiEYI{~6Pe}m%oF~V_TeZ-iT9zJ;LGYf0S*g8d02 zz+xz|THtCS4(xnspPip$poZSe+k6e{zK?RP!}8mu2sc_&A_Ayh8QRq8l68DJJi-BqeDaL$7fy~HVF*L%@@A;$Vy`$ zD=S)V7-9u^`)+JHEjlmE7D_Xh%7s15ZkA@PZ`i(^*Ieo(R<;t|SB57Hdvu4<#i#?! zPkgAi5Bnl@W^M$>YY4lZ%1%F~-pr*nS-o(D~N-sp#Ykai>KfVS=F ze;%R^$7=TKMDimZtqe;d=SjYuaCp4oRpH+R<5hQ@uKKUED>^eKId0;GA-Mr(J&m6d z<)SA~)|IWGU#6^$1LA`db>Qt|zpLi1)* zl}%V)(W*wg*XQi7M@LB(irmc=$ItKI>}1dk6{icx z9PfBZ`^+2PZsl8s_nc9iGAN}?d3-GY1t%$F>(pPp6BQnF7XQd;S_LgfQnY3!&un&v zD!1P0vSiL5Hk<5wV%M|d0l()g^S|~qsW-o5Nj{(FmFUMK&Lac@;M5E5vem z`zmkcU=sHd9BmJ>{c7zYI;vdNPFf^TmY%Cne_Sl`H>WPdhp1$|bmw0AIH-6^`=dvs zXUf}cdy(gQ$ddAdx?3InGS;yy{M*;Kz%59y_MyOcabbZ3YdvV9C5l-KAy|%*GZTeo zF7B(utR_>*VA9=~N2%PjWJmRnj^?{yI=i{@hb2oDwaNYVqde_w2z#crvKy-N+PhWk z;8X=I;mnX=o=6I;m3s4@&tag!1W(%lTK(J1l|l7m5ywXkDIcFealxN%Cx1r&Bnz&nVav0O=yxv6@WH_voeqQ>I?bw* z>TSo;Qn?M5%I27rY6AQFcc_AAd~!U*wE3_4>FKqe-J&1by89l_nZ`vHv@?-9UAu6o zc~}PMtZYXU ztTBPYDAsSYq`9_Gt=9McspF1MTv)dy&dn{9dcnC?OIFo!sX0yAQGxr}z?ZGh^Fd2U zg-t9HdNpgB|Jr#-ZrPG8MQ6$;7co=uI@&YEYhyd$R~5rDuV;f!_k1&|xOjG#yGlCb zxO*at(iy2Rub*6v#9o=M$ToONe<8&iy{xs22T&AS-1|Q4`(qJZ%1eOE@JIerrq#0I zu#ZNw0Gr5zM3hTD>yFCN^L)p!BGPFasRJP{q-LQ0yP3Qsrqub2m}%a7D{JfaF;+Q{ z&p`;E$c=IT{Ov|?+X3YhHd3#>BMp=fVovjqa}rIS3qBGoGV`4uc^d4vo5U5Iip|nU zu%;=d^@$BR(XMsLZyt_O145*Orq96UF-;GLa=44EBRNkT)8~Ab`R8YokiecgZ7KR) z0!9Cw`vAyW`tR1%`u)2;cC0Q+1#=kdPV4qA+^jDC)iIH zJ`GyXn1_cOWTW|~X|dwwI%_YGI+cYQU9mT^{$9`-&AMNF|B4Z_Yq-AL!KN-v(>~BZ ztf001;Znm(=^JitYaAUJcF(HMuO)x0rnO^2b-Ka0t$D-2$aIigUHJqr;gI5zO$`wG zTCmC^F5-P!NbOOZ$5U%FKHlf?_(Tf)?pTQCK8P|=9 zR)9{!Hocl^>+zWo_l&30RYlROCuI*EJ`80jlBqe9$`Xgc#K`U+0$~zZU;p&#a54-8 zx@m!ICW42q$wR$xIJG&4GRQYqj=Z3i#*QRhuLl{L#Fz-_shYzZv(gc(@9R<`pqZe4 zIPh`}w3_6=#t~!Q&`}A`kHU| zyOhK?T@3w`ikel6?BQ{9(=<1IbFfD3^BLWjcQQg7nY5S!GJZlC%y$X_DN=vu`f1tp z^f6j3p8zyhV>Fw`67)DebShFe8IE!dg@EVDME);Mv(&*kL>>6Ab9FkUq%}dJdKiuH zx={=Xn27IyrA~!_whF{fJ>)3AKn*x z^Z0dC=0SdbDg^(Ko=ore_@%oJxTYv9|M++Ek|0B8B~l@7#3@&^(Dg|q_|JkZ+j$1j z!Hq@{M~FjdUMVFI6Qz9N)DpiJzK@mh^nz)ScL(hA)Yvl!)gscL3x2Z|aR|k<2sdYB z3jpbq($9>wSDjT3T)D*d?{*>LTYT${>nH?P7B<82aydZafm@JnjzqCWQ`UP^7C0B* z6{o{6z97|MW`9B-V@@8EpK}j1+&?#rIccAM`y8`r)_Kz5nFif!v2t^93EEMY+%-Bn zP}w<0GTQSs3nk{P&ha_7NUZ9#V5@7YsDuPA6(@|zoAeI}C!iMUpR?vb9k8USRNgua z-h#L__>@{G2xqUj@|aHE!uHe_jylxvmHm9b$)##PuK%Z#Ey4dq12|en2IT72|7D96 zfo%o+DVRSD-DErinq{JvO@Pfdz^GeE~dgl1ha}GDzmiSo2 z@_bFy_W-V&m$%s>FG%~!hz=e!OBQYoHOi+%Cjr9;n^lf6ElU?@u1Qx$ zGMF$UhhV!^zgh;7mG?!rI7-H4li8V;9NffxpZ6+b)+F=PA(nx}p zZW_b^0(sAA8phv_9ze+9aG=fZqFe43Y>ju*kH0v%l>VsjdwR~tGlg5fqq5y}HBLH_ zelr-6E{dpP=b7*CH$R?CQc@e+Y_NIOf6}r=+BJ3S9Y<$lQInSaXW%CqQVTT*ZkI&E z^H%C$w>3hvGNq4JwX5~oreD!UHe`J18osp{N5{$LAq#o=nx7~OosIbN^~{f9zC$)A z*=*LXzuCX}0e25RLJ6Jw1PZzRxL|*3J%8T~aZ`j($>(N|4zW6oRoqgX@#zG?<>b~@ zVb!-DRA3axXgJ76Ijfu85{a!=-q?5wz7u%;?u(j6v@S*vTJzQjl*~C*XSaC#+|-Jn zC6q%y|{F@^tA$hzh1d8=$@Ke2?sy{!*7fD;O*oUI#-G5*^CPAX*)O+I%7FLD7 z${z(Po%3|8WJaPifesJi#c2}p2K(#!-aJ|HN&{?R$9w_Qg0uWH@-$&k^V z6vtFoSS8ZB{NCwR(vEXin7EK+@BnB<8nCOp!%jK*I=DLrq==7_Na$GUMBHXv?0oXq z+DSPXGY%j1KM6h7gA=;eIddARhjJ{Qquz8W%;(^kyOHXMM~zKpxzj924an$YsgGtX z!g{>g6=*FaUM|$I$rq2N)Fo2g$Eon77?V57s^|#i01hd;GQ?>pz2PnN4ghk|ogGj4 zeJ1HgYM-!@+$kGI;q!YI!O*vLaAybvMT!f^sr!^R`J4+I z)Kk-ZMicZp&-%uE`0KBHvn~UQmJ3||(6aG$P50Jme--{;g^#NFK##$uE~G(EI?@%> z>FY49oTT1et~#Mx%L<*LK};?qfgQ2_gkq)VDTgimG^kg2#=}THDo&qaJgrmccJfxl z#i0SCZdm`tq#<2W96AJrq66;E;k@0)cx)BBm4K^!`ce|r?WBG(8ff_@WCSb&{ZLz1 zqJWcrZeP>(<%nRAKx<@!s{U55pl253Zc^QPxZes=**h0eQ>1w}x#gixI+Z2zgBS#o zfow{>Od&=rS@qo%P>$t1tM6KH?|f0y!P4t&CgBfJTb|Fch)-q#l9o})GxEP#JK>v8 zYw`oD3z%5Od-ehFS>Q&-^@`ts!>XK4x{-)uYy#%fSMF1j*2C_*eKok**?P_fMXeci zJEdM288CZp9>zC}02jH=7H>ScDF}_kj%qM_=FoxyP}CjWK@vfg27t8SU>SzMk07xb zKHROMMMtdN^ST(`84B*`ylxl9pwGl^Qt2bRNhlMr^OXweHBsH+4vo{W>EB-}AP842 zxNvno*Rj)!wtY9d$&m1%K+e}nM&2qvIzrUnO;04U9bySb6>Zm&SUFW{{HsU2IK9S4 zTsjwPmEHVxA2ZhGl$Y&BkUHoE9rE8V!EdRvTZhE-KC-pFcx4zge(i0nPOqe(YeYC6 zy?$qleBw&U0HG`EhExH_S<&wAh$}|un<8D@ZS^d4p0t=zO4Brzhng~|rQybT*H&Tr zN{qkn&Pk;SK&sU<(PGqtA-l*>R9@Vwpx4<}_n@o*g!tvH6$u>WLtk$P8%v$I;jW8DFAUy*Ur9so}F0CzM4D4k=6PkC43d!C!?qRUMs# zZgr;}acNOR%FDu_0F`O-^Ra{<$dq=IP7ItJ$1E$(OW&A(XdRt$wnrIAzwK8UBM2A|f0y_FFbl}%@24%8qEvkf#9xoWwY4Qthwn=MZ)_%XUA*g=7zLdDiNBV5%2HcVyw z(b7R%$JPV3_=|Mvubf1v^63W+Uv>eX(5$S z`=whvdxeh)>ceN9YJ?5ibi-g*NRQoe-ymUZg-4>3B2mxOXKr5&!|}FkiT*&5p?omJLG<^-ZnHr-z`qKwCaAyW~1{TZ_C}F3M5{AX_H~9d$P~4(3toFtd zN6yJJ^t%rD3g{jeDmTA@)159OL?`B$K#d8$H`mS@K78a$RbET*@>?Y>)-`Um66)(y zO>)2Z;i`XQ9rqCccHQfMU&eyCFD=*v;MP+>+^r$`-mehQ2VB0g-5Uxr3@<|BVokVK zP&Nf(CQDOxp!nmBED}cZN{Xp9}hUJiz4>-&v<*K8N|Gk3)IYR!5uv_ zer?jLjO6*X=yeA*Bx7K(=lj-rhA)b2@dRc)>oH~Aj0=T;o~6M^uKWy6sO+{}w(2S} zf46Ae+z-er7cIb1%7SR-0JnB{;(VlJ08=Wv(8ZmDS4_+hJ*gw9s;jsfeu2+klE>m; z^S!p{@K_+UXFK zwoq$I)=E3^gueBhHJjGkkGz*@@ad-V<5$6{3XGR$kn?K~??c6mc`jA@*3`%2I*Nm@ zWd@IsJv3NzP_%b{K>7Tj4)|HQmFir@0MHgZ#VddctT_Soo@dHsLu5cR5fblv)PuFG z$vT<@Z)u&dJS%AMI!cZS>Lp#9cO`jTmf=1i71~G8JRA2#=Zj*iQOlWu#{I^-6Vb!~}5e}Wu zDaO-yyrAbSZ$yt4zV$4yz(kN%YMc+em^D6w@jSdzdDQ_bPQxMAhJhjYHy} zzn5*1X9G6sF3r2|KUaDKCz-@=_?apkIdi5KdHyG~5R01{W>`$1Wd2pZcg|EFX@N4Lk|bHip!^^rkgiN6*84>EYJO~5Ir zR#=fGj(Cozn_ikdgJ$Mu9ry8S6g}RH%S8bnNe>;G%0F>da4KBXfX`w zr==-WLk3qX{nS;DAQyZD4Kcay(++h9S3IQ43RDl>^6>=2p}*me&J@cBP}3Le8UozT z*RBA6p6v=*TL~136ti6D7$;j%K2ZiFfo%rM6@BnHB6*ysD*I^x5UCgGDE_XF4(7jp zI$?o+NYRcf;%Cw64^~=>`-E6Gv}@1*xSbKCPy-N9_SF$)d+r6`{$l=xZT1V>o8Oq0DdTFy3cDaB^+Y_>4CY3_RR>>zU+df2v~$z4%zih7bmJAx&Za*@t3}3OS(; zFeF`B)&4n-Y_5{!%XkSEjK|Pr6TeR{WMkhO&!TD3w?bRf~B6Ta$ z=JODBO41T+>l5Q)Sq#+n{Gy$Vq&)J6iMuZc|GgKq*P% z65nryCWJvJfpf}bJ-UlbAaga%|6op&;osj3C*54;qw#cUfiRwP`-Q|v!)>adt%UCz5eu+Uvnvet8c&bMnwGV< zBm{P3)_%G8E{SvWEdAi9dqL!<*S3vp?-fm8szH#ZwJ~3EFUqsDNqHkITW*-xg-fNs z5!Nb(w&U<9yLPGXziv}c3urZ%u;C+R8ng5Ap>KVb5@EeSp&*XOuvTV?QoibGCzi7J z8dx3+2cMwd7Mq|s-^gjTsy(f*|A^yuhK^ExIpz+#!io-mOPny*$-`-Vx!?V|EXxTQ z`-|Dhh3df#kw$^*tY>&cb zCjY~<67J3PkfBXyM)X2iN$gf^JLR5#IDBLt9@AM;u`bpE?VABm8Rwgj!Seul#@k;b z!)mlvzWcr7`JF%h{SjtxrrHiaq)8PG?N+ z9~|q#9fdm_14fD|iwmQN82X$nr$SvKrQRBqs!kDO{txbkDo5^XH;$?STL~LX4>Lh6 zZ7uT&nLH`k9~p3US=Bh^b|d4>a3An2-Mi#l@mXkgMN>SlZgG!pvq4G48clN*HUJl< zsHs@hd6JBzJ z>ZmWDZB0S?@de*%t(C077Q(Jcu&)`n+!oE2VH*Ww=dwaMd>LTZ@(6;Y*M@w`mmwvu zLf57>c}B3P&Li8pE4-8z*NriOI(KSBcn_S8jUTjJT!@4Th98vmzj8>YLO*Q*pBH0n zWA;=Vbr4e7PDZbo>9^9`?8;|u_twKZhtWvaA9h@Zm4$i^>1EHU24Fd=F#zlOu1)1g zuwiONcO%b=-!%cHFq0Qzzv`}EQ1+z<%e?mQ$|OiH-nBMFw5goc>a|ofUs|By2T=kH z2I}k+tFLDUZL&g0PixL5GUJ}F&`U7EaYbmRt#k1@{Wa@aL*TjQpdvE>YwEQpw@tzj z`>zg>)T5y%rgAGztisj{MqNF_4Ye4DFBX+YB9J%C+<)F*uRqPddFspwq;`Sa-DsMa zRj@AvaT?dln z`99#mDv{OHbhw$Q0{6upgYf3uS;UY1vv7%|Th>MD^D%;~}%uT4qh(K1X5- zo*uN{B-{jIZ@=C0gycRBcUBMtU5G;`&1n`heGoA;Tj%lG48kgSXXIAoZbvlVbVaPY z=uVkuk6}vk##nflTWdogQg;y$$DkyQAWM~ICC=7b>GZeFW<_m@Oa=Z6vEL0gnL@yS zFNZ6@C459+8?`NsohsIULEpyymCeo8ro=bSCJ$O;T~ z+E?E}D!c{&@Y?vEYxpw<6;Xy#_1Ev(&q>gR-@XFuR8Qi5#eW|&bxf!=o^$L`6e#x1Hg9R~U=`sFtd_@$Z|7h+{y%SV{Kh zqik4Op`pJoF75HhK*)#O8VUQq8}E-2OuBAUlctRO?8b&*mjZ>}+s_Ve&qbbn z6ZxfhCYJ>k|6%KWkRb2!&2pRREtwW$38rgFh+vUsm%X!oe-7G#VC>-Z_g|EQa~V6m z)_g?EjYi&eWBl3H)Iqx<-M4^Oe`=u8G*D%DRnriP@U)zb7e2T%Lu;)QR1pAF{=IQ_mxi!OUgwHJ3tFchB?|P3)RoU*%%NA%ux^W-sx@i%;GM` z`-du5O8dDVl41^9E>|sHG9NP$vNcI?pJQL`6q^sU?UK>hibbSsJ&7xwd{MXjx$9W`ujNqku0wXMA^p69vD#@0RtUbtV4V7U{YW@A7r| zwh^AF)bX9qwr-+{`l19-&!J8qq-sAzZ;{?%xgc+yr2D+^h3nD| z>57tX4LgN-efgcJ!u}zY&WCYdm%htyMi;l8Wx_!%(2Uud+Vl4l3j6fVs&M-Cp7#5+ z_w82Dw_mEKtp`DAZ8vH>C2Ghf1Fl*cG_bvxT*D?n5iaA?4;odJ=%h8Brd{Op@|_F~ zfV_8F`-*u_^VPJ^u)(e$gl0q&Xps#3l0iIQz8JWNX3SV;SQ;Fp#9TbxzH+cLza!16 z*X%qsC~E<+e0v+36Lw|zi=Moo95tTY2x7Pr<%-(LTG?3?VS&AT=_LL5@d#!*f^J@; z@G9#w)xx@mZNry?!tSEL|M5}7;hflIfpi(*`H^#XWPn?t9T)c(rY%FcA*M+vlxuze z{34|J2dUM#UV-|nyNtZaQfklKHpbtC>XeUo{?N$PeC@%$#aN>8@qUEPx>b7W_EC!Y zoh~SkWPqc-X`E$sK5YUiz0Y;Dqmju^R_H`UPkCf81hlJ<1sweB#48fYuhSGgTD+v> z3iQ30u-$MG9pNd>72SL8rdPE!wxJJ{MB%)M$8GlNmH5axl3fuAivhP@1s z=kYD^{u+E+K!$<@W~zUmhhSwHkMi4*uiI9LL0i@-F-p`?P0@5@ zFWTuxI;zkbeAG|`Vi0e~5x`)_>vOs&KTgz#AdUN<{%5$8uF(KBXfI8&bmHO&o`YknlF=5D1di3`r1FwTKifW?9H!rjn^&{ z%#f~_ib2?f&t+=2@fo0`nYOw=3CUWVZKS>ykv#ai!j^~9-RM=y<<90v6O_c|%bj++ z2M4`|jyy?G+nwR^LAeE^ z&%u?yBKjhAlXzI7@~y43N!wrrGU*Mqjkdy5kp#ChwFMU_`mt8GYd(p?Q{Xuatq7jA zArC(C8>bB1zv_8%ubE#j%Dj>x;^Gc9?1o>YF@eQ0l+(+;6IFLxNCscConwkGke1VC zNq8YrJ|An^H@>rN0mbLogzFpPEyY`h!&KyqnRU9IMXcPs1B9Aj=GAa$&QGW_8V41s zIM@RLqZ)K~%?Elc)0HwA6ssYD9ZS{1z0|4@h*-x$_~I^lvH;B_{oKB~fE`x*r;)Th zeQQ5iy=|JK%;=&>_;Uw}nolaznMx1OOa$$P1d}*~ zm+J<8#0t)cY~VR;=K`&-aEOMvW2;8yqcA` zxv^XM)x|U$x=+P0*Lun!cKi2+v?a@+Q7F|_Oapr9@0R!}X`2b0ywsQF++y8Y$F17r zy9c}=cp`QA>J#5G5V8qq_m-9n;IHR=w~~U-U5ila-X(D0Ah)JwU8Dn11*#ndJ@o2V zuG646(AKL-NqYHaxG|R7c5Ve#DQALSuZ`H~GSi|-?R&M$ z2s>b}Dn=mVUEGpcGvYCOIve}$EVTu@yLqe`4C+kCzH3zOALYgmIGpH^gpzJ&$cHM% zL1S0Bt(}AR2X4i+1TLrm4|bP-+vzEHl1yg79~XxGV|(vA!*2LYTCS0YO>cp1>VWtU zZlok`FvF0s(6)?uSOGNj%v1Y`tcCEWQBTP{uUX28h{;4>O2X{9@HBUxn-8zSM)I?W z!4a1|6)Wf;VdDC>Ecct5*M$mTTVJl~Z(qx^Nm^ddmK)KnZC{z&0qELWl{;pJ@(h~+ zo7{$8IJ-PZy8-uE$VJ;RS^x8+Mdi+C{#?AB`L}gHnoB{;SAOc~yGED~|1uTWyxZ%Z z#@J&=RSV@ap13l^&s&_LXHS*^=3g!vk{Dr9Qs`U+doZc->+B{9=;Zq4VqlC-+96FlD zHDdE54)U(unE7)$18lBKNUvso-6WPo9T~k#Zvza1jnlPs>0?=6d9`5yFd{}M>jLPEzl5yWie#m?f7|0{Zs;2@Olb^uKW>5TAU>Ef3$w1h-O)- zZ&LG@($wpQ55PyCU_S@dhHn3Gf$qP$s%#1e7!;!>k@}2%ugj#>SJnV|P$h)Mso1xR zK|48~`3`v^qrONoJ5K2$YjKpREXXO04pWHY!TIf^{)%)Jdud3GczTng8zj>CMtIn* z4xgr>4_o%yll5KR@kwtoocL227d?N`iBo54&d-<5I#X@D$*3cxWF=uDp-c9d|C*0b@9$-&dEw*!|PzX_Az8Glo0wy6(dSO zGNp|fS?)uP7$?>WW}2qAt)9*EJ z*ouFh%ZeZ4FLYe<0VeSZ9~R^89!7*9i7Ooh{}ht&m4%0NTKL$ z-EYHmGD6ADXL~-gdUz@EX+AmCM)M#{>h!gj0o?0MAN{zC?p~nuDPU}MIfgX#P^M6) zm+#gaAywLS?9SlC3NK)uPHgIdTC47FwjR?&T@0;dZ4a`RzFtXhUVyu++rT7^?>mXe z7oPdOH}v^mAlu`=Y_Y)%Wm-d6kw2fE#K~~_W@8RBk}Z^JpJAu{-_{jAXSgQVnSTQS z;_mxyIIrAOZ<~eQY_&bh#K<@=;C>c5vGO>z)}*Cyp3Cl#nDXeCqAaWrgo8SgH=Bad z1pC)0DqvVx)y=5KZJxCZGAfNDS)>ym;{Ui^RxXtYE*=(LIFZ z+F-Y2-LJ>{chT&#Ff{_8!HxM@^Z`K+R;qPzcdv@WEOo%UL?Y6{K2^azLK}zd*_X5#$s6lJJWMc|T!p$a3oo_gv~2bEQPNZ5VNfF*_@FUN5$UZB^mI zJ-1}vHjewYbDc4dj3*f6Tb&oY0uU|Nl0!3k4SK*AbVqR)g``&kH+Woh76r#x> zCgu{Q?o1{1uJY~XLiI~h*dQGU;qecD-8kPWN>Vih!i@28{ zMVP+}m2OgJ%#-@k#F37%x?NGF&apVrE01ESOO3mM@XU3_{mok2&Yt*1kQr3|jhjsX zxS=A&=kwjAzXwa+RZA~!i)`uu{|&T;_n;JKQm*@TRotKTN0iv^8Bb)SDTCed@=aOH zoi8K{5pM5kW+b8?9WITLaPkY(PO;+4m(70WBfX}t{TCO*Z8UM4+MfPqj)>&;_-Q+} zwn=+1IJl9aMv9||SH_zEyL#}?Yr8UcZ`@d1$9fgW&vo9Mjvc94{9WSLw68V!7j+ox zZlIJ!((0Wq_ZWrLe628{K(YF}IZ76g@nS$NBZ}6s62;8?Kgl@v^(4 z7j@@wrxPwMen|7iFfl(?t2`s=IBq4Oa()&7xKD_v%csf<)b75k*{pZ^xSlN@b~adm zYLv7%jQcxWAxbmP9b}IH(hYx}sDIW_q}%gW5ypRNp>-(BJ{hPcE0Ju?imD4|zfpbQ zBW)QG1!{IF^-sZh35{!{OwWA>tlm@msL=!WJ^liGk*20vc{_+O@{jGkjS}-nYD{ zm2#t1$YnA!V&A7T^v%s3@|lQF4+@#GT8Azp#DISv&=D^g*aDVRPfWewy&Jzz^8nzDXNZ?MJy6;#yItm*VkJ7r_l!?aw-E{#zs-oGat&ya1X++ zw}LXxHh8ctE`)%Eb*955lhkqh9mWW;xQU+^@{b|0rTu$%9-gqCoZE{q0AnknoLkko)y4ap1h;zxyLyj99J*r-=O=qAFB zSB6fo6gOl=E(JwI^n2p;-T{l!__l8J4Q+=ZUcg^We*s~!lVZC;`W}(=^)lqC-@n!= z>&Q$37;QrZuZoCXyjYw*7#Cu`TS*>-TQ!!}SgJdK@5T zPTxkrUXjG4GQFQek&iRhHo{jH#*Ayl;&dFRgD$C)A_qh+Rmk?`%L*4iL&H?WJ6_*V z5fH&fh0>p=9Z6BlX&30Q9hHRPFVnzg_^5LK8o&RrK1#ul5TvF7mR@smr!omIiXkYp zT!gMJ6)%|dx|+PFe2C(aZr3YW5WMZ=Ji+uQaU0RGwf$!OZcaQWN3!Krl<$M_Br7L= zdB1VTp#l~U_18)=);w&{!iORViPeqDzS2C(lfTW}IV%6y=g9fUBsUrs z{=PmzeJ!TMZNrv}brKkZ?hnyHH^qOGxj=K{i)9T>a<5u)SpROeFd5LF<*OD!iMbq_ z$RHm|2deJQGGl4OcxWtvn|CfBCkhi5?zYaVzw6&}aeCJ({rcUAgK%XQpt&lj0PH5c)z#^PQ3Y$%F_Kv2#u&qX zS)l#vHiF&SoK>$K263`zZ*#|@@@{$h-(70j$D%A8tXDE$TB}4kh!}K)H=ao(gY%;V zy&dZ~jBhegu|A2^6)<&{b7i!KHca-$dq%*6LAd=LWt#*#@+12gK-#Tt5K%}Yb{(e!^&J8w){VwM{zvBxKoXUuWW&}l6s#&B$p=k1@nRs{PF`g2y+80M%m@Q zEJhDs!)jl4_#L*VAGs5>Hr zW_jImO$%8ZeSVK{hS)CuKzPw#M*G)BYhSpKonl=*v>>R~s8EkF8s=A4%Q4#@)RCi{ zp4p%MqdQ9X&oVtzMyEUBLdIW$SEGS*=uTpUS24$U4W9_skim@l1R-sal8ll8OOKxR z;HQ{1Sjfmq)OxRJ+dh)&ilfWGB~6F>Mme-#Mk-fxCKIs<%gQCfQ3`6$BEaLP#HtEKK0{ZulKC!RQ zo&Cd&&96V|5|XQo@3Hi492y2_0)O5gY{z#(WwaJ#SXPLEgjZ7KSqzy27q8szhhNfR&u=%GAOH#r zm)J20fG>Grl(%!7?7#luP}fdjAft?_Y1&P7OezNZNi?de0*YW8Yd@46~LxF6Q37 z&2#xZx;yFH=8N#t6F;xKGbIZ#uGm;SlTXW#0q=Gd>tgBt5P4OcJb&_1j?aVB!X}1} zPjE43t|ZCF;N99Y{_a!ww%U&DRY*7V@mE%;wSguDj)`oIr$P)m8z;ua!oaGjp3bWG ze6*DBeTUnxm!9pwdp+^LmlN#S!wgyYNd?DjXCurwDX97qvJpC4-K}i4hc@GBX<&2V z^6GexLJ_!=fW?<^Ca*)FK*V(qe`a82ohNRs?Hn zSV_G7phk1xZHpcOwbiQ`r)Gr?1A*QanL4!Bij7jHA$m&d>hs^1g)DWB)9K68xt}OW zbGE&IE%Z{i!LQ<$or&Nv?HdcmVznS6Eu{gqi4-YK=@0Fyb06oH@-3`15^Sz(O@)v2 zpahRnnCP2;6*AA(#TZYSH4Ty-k>#3qWfPm&)o)oEC0f=y%ZuE(e7H|$U64(lM@dK~ z^G4Cd-xYFf0e-PV3y*Beeppmt7Q@k=GVy$PoLG{_HWLG5V;KBrD_@~K_oR&2-sHRc zXJuB7km1YI@&u9%q4CrlG$9l$DCU_kiwx+*&{mf8)^@pxb>VH=pUNmaOLP0(AXHi` z!lWjYmcHv<%!=6aUql#*B~4v{AN*aD;wID6b-FYB6mH{XMR{!U>`uC+?1+8VTm!pO zhD4OI9$oB^{qDsmQ5cVHl00P7UGwXdwx29Z&28?SWo}!Q$0J#bOM3E*TN$0M2;c){ zAi|QE?-jMxQ5F$(OS}z}=o5nh63-bdwadY`*c&_*72~vA+p^yggeMr@-0#p^#o$vz zxzPMn(H^I%E^j4&P-E}s=|y46qL;E&9zY8Sd* zCC=YVmnT>G1utkgh;i<@I{`iH94642v@q6`Nu5YM$>FkAJHOrfh9owwKE*yK5hwe7 z{l&`i966LeBc2x8h}-YosL&PnEOELBPo>JqZp-$2bloCF=5kF1Ovy+TxqVb0k{dJo zBvS4baxaFcXh}VfG5iqath7i&nEO)LEdqnTdtB}4wSUrE6jL0R}iA- zO!;ts(v+iv4t&||;7;;c*Gzoux?6`e)v>YpMFISIKV>$&Zmwdsi)fDXC%%);bKXA3 z7E`rq7ZWMK^;zCt_Dp$zoaGwRB0@*U7zE@#f=zQoVui2WkQ%8Hqa+;ByX&Y^DOle# z{t7KRqhutK6u-gdbWlGTA~Gp~_n7_)MDd1f z(Vf*Xm2YyTem4Bt_FjHOCPrq@DjK&($P|PsD9qit zi4f%cuyd2?=9~3%PkJe$1P!|}BUv!ULne(?dKgyllD=bqEk$44nVy@v1AIhCjr4PI zh<#vkanG1wchl+kZwws`FaDBzd=>KP^h;8&U)+ixryGVF+O`j*U`9TQTSDV2c?OeB zQ?<^;`?g=W)ZRa|jHSwn@3|>sH}sUr_h)KpCSj3{N$@RB4Y#-M5OTe){+{dW&r~@c zZHN6hw8`1NeQ6mZwu|ki*WI1B2CD7gxHa#&#UY$U(a~6Uw2k=E&yGtnvE9E0VCelr zjabSzbgGr!`%B#C77CUULCtI`z@VIW0m*O3BQ#=uJunMnw z>fHBid}1@e$};UAMRPU(+}P|{J%M8^H0<$LRV=fHh_4k*htzovTL0<=EsUoUidJ#c zUDq_^erxt&`gNssHbvh1OXs~=WU?aAA`l3jblTnf*6mBMg*JASaR$&1LalZrxKrzH zxQqK-WwLn|=HGZ?{9J~DD_WE163cl z`IM)ew9s)c`q2$Bo76AvzbOb|P&Mq{x{)GP%~^g`|MG}?3;MJr zroG?UMgG=yWaL`AJFKR*0Ois_IQyDs6I*5*)eL{5pN(OaE3JJO*yIrWq z9-O!Jqh#iFl!Qz9B5s-sOGiObbJ(R{~k$ltmcn zC?4>kiw~JbkU=N94_@%|{NmFx4Ci0CjQ9UD}RkVMXY)}+O)furo& zA$geQtW}PqwjCIQbKJJ;Zo^U5E11JGB`poIePBGVA*`;Sp|SsmOB!z3=<{(Hz9h9s zElpqq^Q3fxGE8NW&h}rSO}_Xs{92Ao^u|6)8fDC6*`{`xeJsiR$LA+?50tEFrpGZ> ze+z(yCnrhXUiS#F`QO=dbvC$ceMR@XkJ@!BCujE?K#z_U3F#1*UGR_Zoc}(&crhb= z6OZp%Mq5@gLrO8tX{$K}0ePXasP~>B`4{3e#j_u{W#Kip-4eIG@f5z63#FUmee$vA zAD55ScAn=xW|t1@#nhoug%%IjewLW}yJa?y@Gr$^dtC4pP_t0Rl(oOaKJ zUN?6otW>_@FnLhXBd}G*CNgd`xM1&C3Npm9{4gO_TnS7^rv1f@0k&uJPAqU`tx{UN z6{Hpk5))fRqlYHX)gGY5CSHV6e&d5z-P6j=8}0ikCm6$#QF!X1((@r51B# zE*iQ$5qkI=N1MEpp8E}L9sH{HrkrF<;cPrc$FW3b-Hg$fnMQt(U9~>3e7560Zh1}^%%6;zsD!bZQ-zC;7)s-AN@W4Mb(DS z4#9?gm#)qW{tV{Q{RVW-x{6X|p@uB?Sv0jPHrTPB&v^gdJLVMI_6!D_=W3$0&gCD! zgzFyn%R0eIOf?zM_y#MNU~z!Bv7x_NyIaS+w<3m{!b%jSm`OVVZ!a_msAY-t00(uz z8ZIrWhJKOsib(O`%@^y<_$^1M=l!t9nguG@EvU;AavsJTMKn3!b%daHO zEB&Jin{wr9wg}J?AJ%s_Dgi5Co()>_Z5Pn@B|ix@x3zQ-8N4qxl0%NE$REQVw&4Z` zrYrWRStlr7^8s4nROxyz{T_b~ z>%BJRn4K^E+Q;Hz1aN>lCXB_QTUg9pzYi5rhRBoZ>ykG>u~<&p(OhC>5AMETYP?Oh z>vT?Jb?EnPE*;XO%!c;O2;TTMsac`z-f4i?$0~O1Pu~NHWLBDaQ+S->bt=ASyBn`H(Mwb>{MAif>MMuTC8%E-Ktqd&;6uacLNN+n}|;+a#&OpKm+Yh z%5^fM2-~06-L=GBrCNTA>+?{)t&S_&?z5b=U*5-u$>k_No9lg#lvrHL=<=Yb2!HRB zatPG2PKCU*t=rn@b}{;DMl61jsOU4E^Y$o;#WU84tq1Ph+-zNyFfsZ5Si}e8*V2$o z#UCyA&0ezi5NU5)gx%zD4v2R|_1evUy~QTqtDIQ}tlU+0Q&d6#c_hnyZ=6$ceC@;V z%FJ{gnsYAhp$48)@yAVDSG-5e?nB3-<%}l+!tK>odTKQ%dtY=?%X1~Y2#mAA*HsPF z^abaVs{^`p^V2JL9)0qBy%kDAr%D#4bZxv>Cz0p4g@U3()hCrZ0P(Mce$UyHXS0p| zHmj+yE0?L^lsbIZxMWGwsdB~}bfV?0r6az}ie58d4SSxZdVjNR^u1zsyRF|5Uj%9i zNVn}wrFnWUH8?oGP&#{#*3ir zwaaS3kxXQ9@72$WvUQ)t87!Mny+nJde))S=+K3F*+zFfkQcZ9OI+rczKgs$kc;{PR z2)Q}kJRH=#mf{F(I^o@g&VjFKF8X1XE*`Q)=RxblpArd6PQ7au--|S-#3|Zi|BlFgLS*Z)=~;=V5sR{-+>d zh#YdsKc!>5uWrddC`%!J!xK^457c?LFwj7W%r?c!`=d>iE+wgOS#%A7`Q2N;8(6tNz*rUK}m@Q11FB=um!?H-fV{Dx6;!Axpjvg>#{?|5_vqCl3SKh8X}`jZS?f4?{03>mdN z6lzATeJU5nVjf&|+vi5nhj!Z|VF8{)VFCMb1pjB83vS9DeC^PV>GZv#4nyy75+(Ee z(O6l>k9m#jo~tBfa~c^R$lau@+0vd#8RG|l>E=fw1ABZ!`pw(84rn4zCGwxhKx;nk zYZ!HA&(jhylSO=)oZ)2T^C(2YC6WSq`c_oR^x|%DdP5v0^+=*bWp%N{&w|9Z)Ms7D zCG8-==y>$^7togM)I`VAJXu0#mCDjijzm;0IRu)<8&Gt<11!aCO5%1yv!-!J!%nDB zC<<{ne_wZo+tC|`L8QqIcesij&TwTYN5Qlyp8Kerk(tX2S@O4u;hi<&a81M`A|0sB zsTMfA1?RGY-%5yUxaQ*#f@oO6#yt#crap(6|_Ahdi(i;ZsE8wlYOr^s`B80(4s__%~^PBDKzmsh9j;HUo zXl+9lF6BM0 z&pb^b$X-^Z9x90`qu|HqtN3qzWW-v*dT( z16S^3`H|&TZXL=o0HCwi0zRn%B}RPP>kT@bUAg&kJypS?3g{WY!rF&@~b;N zKMK!L^7O)`-xX35_~^*T?lPD594Td}>Jv{;S3NzHPdZo)+j};IH8$)!gbbj7!vaIJ zfv-Bh)u!@#eRtBfb>JNkpf{ByA0z|0zap6>qrC2sy#Q)p;&7ZkU{0k_Zj>ZM_eiNg zxKZp_&!%OssE1_>l2C4MQ3lsDOGMe5-~S5c2R!o%);Fc+I-f$$*-j=AJoRlr&%umf zd#ER32)s5CFH?}?C!0;EgUR)}(h%v6faU2(h2T9u2b~z(5t;FJZmulq z-zi+G{uU&1&f%gXww3aS$~^XoVN90!2tdC8uSrl%I>uiBXv^s5Y~5O?o?j*BhcaWG zAEh_!vXmmxSCl^3s(s}=ZBbi_VdqjfM@F=Sdz($$gc5_D9E-USgZy$BnF?lLT)v9f zCY6CjqR@wRoK_r0x-lMIVA;lT_Vp8!v}pZ+=M>L>mv6mean-Ro9KY}Gjq-U29=d{O zAZ6omlGRx_g7K3%V33H6N3y!W_g84%nE`vQV&)b%Xz8d@=_^@<@bF6`KL*l=BM}^~ z`Sw)6F#ZBetU8({3(+SJc4?|&Je&}MPte$3x0 zExiCfICqk{pTV0g`1nzds2ma7#y8bawzw%651@E({-5H(mJzq(wWmM0@z9~X7~`{E zQ4=`>-#QRHU*85Ok_9AdX?(weNLi(zVIwu;KC*^qWBkIF5DV%mK;Wk&^qab0N3`*CMd6l$$ANjG2bG7!$>+F_YBf$& z%^FVt^oWkQs)7q(6_dn`5|`^*xa9;0O&AAu+ zJK9w_itRGxUduRzqu{VR9&~0FfL@EI7IP*YgLiPRNZt%l9v!E6Pf8{D!OQpYki(i> ztU})QFX5*Yd+j?=Q8JzR+POXQu{@=_Wz|>WRHv@pbs94mfxo*bLM~l z>s&zFwTVVzQWDCv4#*engUoInQeLa7~N}IpyHVjGx)Ip+v8M%lYCC(&3QABoCyM(tBd14K0e?-b) z03O*CYII}H-qoT?LuR)CNc(QL@TckM21mc&YILnO-pQ9&w)WzWB!}>#$lQ6A4$GEy zu5Hf3lG<-^mC=yMxtxl9uvL-Jf{co{?At*CU#Zc z9!^9TZ01#>h4yRC?IH$p>Xz4IS%=CBw?IF|c#A*L#)V@{DBl`}a zgK+vocckoe7;1vi|J@=S{tHVxaE6NA<>(xNfSz8CeKh~^5dNdJ#%J>%LMO`m0renY z%Iwh|1KdJo71LSc7WL`~lFr*X91R zbvs&A=o}ezGWTKbUYMO@3jE6Ih*z9moXff(33+hln=Mos*&2lVHaOCKA z{GO97)^J2?9Jx`xOa0HEF|Adls^1spmKw_+G4s_P%Oo9I9hi8tK}%a{+4jPB_+J6% zw14-gtur&KiTb`P2hv`+xa9KBmnYSj8qnC`BzStLn+fl<>4`$zVKAX1)t=)mde^AF zX$_};nK(f7eF^Qm{NXVzS8%`lYT^Zub7wX4at2>^ix{qf@Zdxh)4e-25x-E7Pgj0F8q>aYK#7i-_!gj2YV5Ay`-RBK|ZOoOM8eYWU7aBWlWvs(1wpI zIsqD7PS2~m&2UM;L-E7<--mh`u2?r*7w%TBJ(Y;XMe+2DeL_vGH{}!|d z#6rXtlA8_a_;darg7#$pC1?+PXne!WdY@gdCH5mxQdLrPDpvkKg7)OcPR28Bl}8^6 z+OuG{C;sUjp9tEUl5{|Dp#ner4#NS&gci@K`eXM=(zb$|@jngZ{}QwZ2K~zZQ#)pc ztx|PtBMcubgU});nIhDR2-_au;5nwPE9hHRL{V2;#v(qB5EZ&dHJ4OCXf*?33YTkb zz%!ZIyXpfS?Ir!AWkhXMNl^!_*+mpcB}OIHQbWJC?nE23RpgFot~b_t`!nxi-p7Ad zyV}{RL4u4{)F4KIe%UUeL~mS&*lMEOl@0UkvFkKviF&-#6LCxh!!SwZfdM8wH8Dz< zOa%i0uURp-nwxj<;zR@I|6I80`o?%=!0%VYNA+7Ty;M2Vk2T(M4}a3PbfA9<>s^`AP zT8g1lcm5;c?a3&dr${HDKlE@U2fP?(M2yHw#y@nCXh?_LFm%G1m?@~U!yEzQ3?2v? zLkj;TViYB!Z>V}Fhuz49CEIHZ3^{bs*nqEC)(62di<)78_bA`JMU86<2oj2%Iu=oC5&sTEc+0Sg( zSX!LEb&06S+gkGk_)3cjmYJOc+PY4R(wstSiz#IXWXI*nt1bV3>8)nw+=`xB77tFM z5PDKTyyF1{Qh2F&F^d}qbsPD-T_d76{oXfNc4v_ir~Drku;I){>JI;Z9D&U3%!xz?MdaY?2OkpEi9w4B z7ex^}vW2fy`Uk)d?-GCB;{qee!F~6=3RC%&JNNI`%L~gVM+al6!LUKKUj)(yYQ3s7QcGISOy&vF${wG~Tp!$kbad0B> zkqLzda11e12OGv}=c?W-l76 z@No1gnvGEr&BG2`N8}_OC>yEWVV_TuY**R?Y?b#@gZN(Tp8h5sqi!(1)h;rbv(1r;!K`ACKrSlzudRU&ZULL`0- zYw=~7o`|t;jxW`gd7DT4Z-ZzzbX^^T#{{Wgo(@0PdZibSbTn14nIm1COFkXD!!~mH z4$gQ(nn;&SJKecG+0`w3{sVU(6G2Gp`hIqd{PB;DsLpov&L*1D6Spu16X*3n5efoZ zZS-3(Vkm`RRUk+CoR5r_-j6`U@=%9qIC9Z(9D!cUejrw#$S`4y2gdzq$J8HloE_T4 z)Q?Udocf7#XiC(`%4Pi*mGwMBmk(*M_nkm@Qu|*_ZhMIOKmQ-pqeLWN0O~hlG_VQv zM0-8OviVvQ^(&W@>{TSXTMFUC(ktBVYKNQRjB>U9(3VAhCPqU6<~NURMw7}Yi)%y3 z9{nKxJa?PDFjn5hWL=G{%an$=kv1|$m{qykk=bJ3%KU+lVTdj^63zqd2}gx{#6tt~ ze?s-CjMiNlcOOZJ8wF-oE$;tMdje27Gd3yVf7_G)7nO6Mf%e4lzuFT~B9-@YU=#i? zDo-qgBK~zwy3Sjj`F@+Yqn24X*enLn6iN=5RCmUHOTrSY3$noD? z{y(@UBj#-GqJjb0U~2|#fP$r=5;48Qsd0^q0Pb5CADJmD4-4?S2nu#3qV4q3%m1q0KAx^|8;LmGuxmWow-g*2VO888- From d0794c94932d349d045e773c54c7b0d3d24eebfe Mon Sep 17 00:00:00 2001 From: Christopher Dahlin Date: Sun, 28 Mar 2021 14:52:04 +0200 Subject: [PATCH 0832/1270] Changed doc code to be consistent with the images. --- doc/source/user/absolute_beginners.rst | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index fda73c5fb5de..084bb6d223f8 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -871,10 +871,11 @@ Creating matrices You can pass Python lists of lists to create a 2-D array (or "matrix") to represent them in NumPy. :: - >>> data = np.array([[1, 2], [3, 4]]) + >>> data = np.array([[1, 2], [3, 4], [5, 6]]) >>> data array([[1, 2], - [3, 4]]) + [3, 4], + [5, 6]]) .. image:: images/np_create_matrix.png @@ -883,7 +884,8 @@ Indexing and slicing operations are useful when you're manipulating matrices:: >>> data[0, 1] 2 >>> data[1:3] - array([[3, 4]]) + array([[3, 4], + [5, 6]]) >>> data[0:2, 0] array([1, 3]) @@ -892,11 +894,11 @@ Indexing and slicing operations are useful when you're manipulating matrices:: You can aggregate matrices the same way you aggregated vectors:: >>> data.max() - 4 + 6 >>> data.min() 1 >>> data.sum() - 10 + 21 .. image:: images/np_matrix_aggregation.png @@ -904,9 +906,9 @@ You can aggregate all the values in a matrix and you can aggregate them across columns or rows using the ``axis`` parameter:: >>> data.max(axis=0) - array([3, 4]) + array([5, 6]) >>> data.max(axis=1) - array([2, 4]) + array([2, 4, 6]) .. image:: images/np_matrix_aggregation_row.png From 069827964d57a9f5f99566d74d739864f42cf530 Mon Sep 17 00:00:00 2001 From: Alexander Hunt Date: Sun, 28 Mar 2021 09:26:32 -0400 Subject: [PATCH 0833/1270] Add test case for np.datetime64(dt.date(2000, 5, 3)) --- numpy/typing/tests/data/pass/scalars.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index c3f4ddbccd3f..815566b6889f 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -84,6 +84,7 @@ def __float__(self) -> float: np.datetime64("2019", "D") np.datetime64(np.datetime64()) np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3)) np.datetime64(None) np.datetime64(None, "D") From 7a4098825a301be93f6e13e69cf12add9a940cd4 Mon Sep 17 00:00:00 2001 From: Anne Archibald Date: Sun, 28 Mar 2021 13:36:25 +0100 Subject: [PATCH 0834/1270] BUG: fix segfault in object/longdouble operations The operation None*np.longdouble(3) was causing infinite recursion as it searched for the appropriate conversion method. This resolves that, both for general operations and for remainders specifically (they fail in a subtly different way). Closes #18548 --- numpy/core/src/umath/scalarmath.c.src | 61 +++++++++++++++++++- numpy/core/tests/test_scalarmath.py | 81 +++++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 86dade0f180c..32ddb58a2e59 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -737,6 +737,9 @@ _@name@_convert2_to_ctypes(PyObject *a, @type@ *arg1, { int ret; ret = _@name@_convert_to_ctype(a, arg1); + if (ret == -2) { + ret = -3; + } if (ret < 0) { return ret; } @@ -1029,7 +1032,7 @@ static PyObject * /**begin repeat * - * #name = cfloat, cdouble, clongdouble# + * #name = cfloat, cdouble# * */ @@ -1045,6 +1048,62 @@ static PyObject * /**end repeat**/ +/**begin repeat + * + * #oper = divmod, remainder# + * + */ + +/* +Complex numbers do not support remainder operations. Unfortunately, +the type inference for long doubles is complicated, and if a remainder +operation is not defined - if the relevant field is left NULL - then +operations between long doubles and objects lead to an infinite recursion +instead of a TypeError. This should ensure that once everything gets +converted to complex long doubles you correctly get a reasonably +informative TypeError. This fixes the last part of bug gh-18548. +*/ + +static PyObject * +clongdouble_@oper@(PyObject *a, PyObject *b) +{ + PyObject *ret; + npy_clongdouble arg1, arg2; + npy_clongdouble out; + + BINOP_GIVE_UP_IF_NEEDED(a, b, nb_@oper@, clongdouble_@oper@); + + switch(_clongdouble_convert2_to_ctypes(a, &arg1, b, &arg2)) { + case 0: + break; + case -1: + /* one of them can't be cast safely must be mixed-types*/ + return PyArray_Type.tp_as_number->nb_@oper@(a,b); + case -2: + /* use default handling */ + if (PyErr_Occurred()) { + return NULL; + } + return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); + case -3: + /* + * special case for longdouble and clongdouble + * because they have a recursive getitem in their dtype + */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + + /* + * here we do the actual calculation with arg1 and arg2 + * as a function call. + */ + PyErr_SetString(PyExc_TypeError, "complex long doubles do not support remainder"); + return NULL; +} + +/**end repeat**/ + /**begin repeat * * #name = half, float, double, longdouble, cfloat, cdouble, clongdouble# diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 0b615edfadcd..c27a732a798d 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -1,9 +1,12 @@ +import contextlib import sys import warnings import itertools import operator import platform import pytest +from hypothesis import given, settings, Verbosity, assume +from hypothesis.strategies import sampled_from import numpy as np from numpy.testing import ( @@ -707,3 +710,81 @@ def test_shift_all_bits(self, type_code, op): shift_arr = np.array([shift]*32, dtype=dt) res_arr = op(val_arr, shift_arr) assert_equal(res_arr, res_scl) + + +@contextlib.contextmanager +def recursionlimit(n): + o = sys.getrecursionlimit() + try: + sys.setrecursionlimit(n) + yield + finally: + sys.setrecursionlimit(o) + + +objecty_things = [object(), None] +reasonable_operators_for_scalars = [ + operator.lt, operator.le, operator.eq, operator.ne, operator.ge, + operator.gt, operator.add, operator.floordiv, operator.mod, + operator.mul, operator.matmul, operator.pow, operator.sub, + operator.truediv, +] + + +@given(sampled_from(objecty_things), + sampled_from(reasonable_operators_for_scalars), + sampled_from(types)) +@settings(verbosity=Verbosity.verbose) +def test_operator_object_left(o, op, type_): + try: + with recursionlimit(100): + op(o, type_(1)) + except TypeError: + pass + + +@given(sampled_from(objecty_things), + sampled_from(reasonable_operators_for_scalars), + sampled_from(types)) +def test_operator_object_right(o, op, type_): + try: + with recursionlimit(100): + op(type_(1), o) + except TypeError: + pass + + +@given(sampled_from(reasonable_operators_for_scalars), + sampled_from(types), + sampled_from(types)) +def test_operator_scalars(op, type1, type2): + try: + op(type1(1), type2(1)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +def test_longdouble_inf_loop(op): + try: + op(np.longdouble(3), None) + except TypeError: + pass + try: + op(None, np.longdouble(3)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +def test_clongdouble_inf_loop(op): + if op in {operator.mod} and False: + pytest.xfail("The modulo operator is known to be broken") + try: + op(np.clongdouble(3), None) + except TypeError: + pass + try: + op(None, np.longdouble(3)) + except TypeError: + pass From 9a81d19dfe91833ebfce9e19f17a3e670ef7039e Mon Sep 17 00:00:00 2001 From: Anne Archibald Date: Sun, 28 Mar 2021 16:22:17 +0100 Subject: [PATCH 0835/1270] Remove unused variables --- numpy/core/src/umath/scalarmath.c.src | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 32ddb58a2e59..66f97a831431 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -1067,9 +1067,7 @@ informative TypeError. This fixes the last part of bug gh-18548. static PyObject * clongdouble_@oper@(PyObject *a, PyObject *b) { - PyObject *ret; npy_clongdouble arg1, arg2; - npy_clongdouble out; BINOP_GIVE_UP_IF_NEEDED(a, b, nb_@oper@, clongdouble_@oper@); From eb87d72ec797072747f2782540782ab91897b8c6 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 29 Mar 2021 08:03:34 +0000 Subject: [PATCH 0836/1270] MAINT: Bump pydata-sphinx-theme from 0.5.0 to 0.5.2 Bumps [pydata-sphinx-theme](https://github.com/pydata/pydata-sphinx-theme) from 0.5.0 to 0.5.2. - [Release notes](https://github.com/pydata/pydata-sphinx-theme/releases) - [Changelog](https://github.com/pydata/pydata-sphinx-theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/pydata/pydata-sphinx-theme/compare/v0.5.0...v0.5.2) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index ba9d3028e78d..526d5cfbaf87 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -4,4 +4,4 @@ ipython scipy matplotlib pandas -pydata-sphinx-theme==0.5.0 +pydata-sphinx-theme==0.5.2 From 0265b3c0579fdbf5661a3ab7324d8375e424d1d2 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 29 Mar 2021 08:04:13 +0000 Subject: [PATCH 0837/1270] MAINT: Bump hypothesis from 6.8.1 to 6.8.3 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.8.1 to 6.8.3. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.8.1...hypothesis-python-6.8.3) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 0762d5acf586..ab73b2d359aa 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.8.1 +hypothesis==6.8.3 pytest==6.2.2 pytz==2021.1 pytest-cov==2.11.1 From d3b48edb84d40c1269b5cb37f0175270f66bb13a Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 29 Mar 2021 14:51:56 +0300 Subject: [PATCH 0838/1270] TST: pin pypy version to 7.3.4rc1 --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 540baa25d6f0..7a6522f6a386 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -212,7 +212,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 with: - python-version: pypy-3.7-nightly + python-version: pypy-3.7-v7.3.4rc1 - uses: ./.github/actions sdist: From 84952e26303598c44988328a8c6b327662bdacc2 Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 29 Mar 2021 09:27:08 -0400 Subject: [PATCH 0839/1270] Clarify comments on Protocol Co-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com> --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e2a99bdbeb33..535fb086b626 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2537,8 +2537,8 @@ class object_(generic): object0 = object_ -#Added Protocol to typecheck for day, month, year property to be used in -#datetime64 constructor per Issue #18640 and BvB93s recommendation +# The `datetime64` constructors requires an object with the three attributes below, +# and thus supports datetime duck typing class _DatetimeScalar(Protocol): @property def day(self) -> int: ... From 3eabb1e37b8e88e5ea12492dadd226d8f3984266 Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 29 Mar 2021 09:27:44 -0400 Subject: [PATCH 0840/1270] Remove unnecessary comments Co-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com> --- numpy/__init__.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 535fb086b626..0009d5fd9a17 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2549,8 +2549,6 @@ class _DatetimeScalar(Protocol): class datetime64(generic): - #Replaced dt.datetime with _DatetimeScalar per issue #18640 and - #BvB93s recommendation @overload def __init__( self, From 3d4783c4beac8d91fc103542dd186581f83440bc Mon Sep 17 00:00:00 2001 From: Alexander Hunt Date: Mon, 29 Mar 2021 10:43:03 -0400 Subject: [PATCH 0841/1270] Remove trailing whitespace --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0009d5fd9a17..70f1138d39ca 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2537,7 +2537,7 @@ class object_(generic): object0 = object_ -# The `datetime64` constructors requires an object with the three attributes below, +# The `datetime64` constructors requires an object with the three attributes below, # and thus supports datetime duck typing class _DatetimeScalar(Protocol): @property @@ -2549,7 +2549,7 @@ class _DatetimeScalar(Protocol): class datetime64(generic): - @overload + @overload def __init__( self, __value: Union[None, datetime64, _CharLike_co, _DatetimeScalar] = ..., From a115369c630fd373acbb80464a0122722a521185 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 29 Mar 2021 13:46:39 -0500 Subject: [PATCH 0842/1270] DEP: Disable PyUFunc_GenericFunction and PyUFunc_SetUsesArraysAsData Both functions have been deprecated in 1.19.x without complaints (so far). The first function is largely identical to a Python ufunc call. The second function had served internal NumPy purposes until NumPy 1.6 (probably), and since then had a comment that it should probably be removed (it was not documented). --- doc/source/reference/c-api/ufunc.rst | 14 --- numpy/__init__.cython-30.pxd | 2 - numpy/__init__.pxd | 2 - numpy/core/src/umath/ufunc_object.c | 180 ++++++--------------------- 4 files changed, 38 insertions(+), 160 deletions(-) diff --git a/doc/source/reference/c-api/ufunc.rst b/doc/source/reference/c-api/ufunc.rst index 9eb70c3fbe34..95dc47839e4b 100644 --- a/doc/source/reference/c-api/ufunc.rst +++ b/doc/source/reference/c-api/ufunc.rst @@ -283,20 +283,6 @@ Functions signature is an array of data-type numbers indicating the inputs followed by the outputs assumed by the 1-d loop. -.. c:function:: int PyUFunc_GenericFunction( \ - PyUFuncObject* self, PyObject* args, PyObject* kwds, PyArrayObject** mps) - - .. deprecated:: NumPy 1.19 - - Unless NumPy is made aware of an issue with this, this function - is scheduled for rapid removal without replacement. - - Instead of this function ``PyObject_Call(ufunc, args, kwds)`` should be - used. The above function differs from this because it ignores support - for non-array, or array subclasses as inputs. - To ensure identical behaviour, it may be necessary to convert all inputs - using ``PyArray_FromAny(obj, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL)``. - .. c:function:: int PyUFunc_checkfperr(int errmask, PyObject* errobj) A simple interface to the IEEE error-flag checking support. The diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index a2c451bc1761..42a46d0b832b 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -910,8 +910,6 @@ cdef extern from "numpy/ufuncobject.h": void **, char *, int, int, int, int, char *, char *, int) int PyUFunc_RegisterLoopForType(ufunc, int, PyUFuncGenericFunction, int *, void *) - int PyUFunc_GenericFunction \ - (ufunc, PyObject *, PyObject *, PyArrayObject **) void PyUFunc_f_f_As_d_d \ (char **, npy_intp *, npy_intp *, void *) void PyUFunc_d_d \ diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index fd704b7e3db8..97f3da2e5673 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -868,8 +868,6 @@ cdef extern from "numpy/ufuncobject.h": void **, char *, int, int, int, int, char *, char *, int) int PyUFunc_RegisterLoopForType(ufunc, int, PyUFuncGenericFunction, int *, void *) - int PyUFunc_GenericFunction \ - (ufunc, PyObject *, PyObject *, PyArrayObject **) void PyUFunc_f_f_As_d_d \ (char **, npy_intp *, npy_intp *, void *) void PyUFunc_d_d \ diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 6510709db91c..128706277913 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -96,16 +96,9 @@ _get_wrap_prepare_args(ufunc_full_args full_args) { /* ---------------------------------------------------------------- */ -static PyObject * -ufunc_generic_call_with_operands( - PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, - PyArrayObject **operands_in); - static PyObject * prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc); -static int -_does_loop_use_arrays(void *data); /*UFUNC_API*/ NPY_NO_EXPORT int @@ -1340,10 +1333,6 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc, &innerloop, &innerloopdata, &needs_api) < 0) { return -1; } - /* If the loop wants the arrays, provide them. */ - if (_does_loop_use_arrays(innerloopdata)) { - innerloopdata = (void*)op; - } /* First check for the trivial cases that don't need an iterator */ if (trivial_loop_ok) { @@ -2443,11 +2432,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, _find_array_prepare(full_args, arr_prep, nout); } - /* If the loop wants the arrays, provide them */ - if (_does_loop_use_arrays(innerloopdata)) { - innerloopdata = (void*)op; - } - /* * Set up the iterator per-op flags. For generalized ufuncs, we * can't do buffering, so must COPY or UPDATEIFCOPY. @@ -2842,44 +2826,19 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, } -/*UFUNC_API - * This generic function is called with the ufunc object, the arguments to it, - * and an array of (pointers to) PyArrayObjects which are NULL. - * - * 'op' is an array of at least NPY_MAXARGS PyArrayObject *. - */ +/*UFUNC_API*/ NPY_NO_EXPORT int -PyUFunc_GenericFunction(PyUFuncObject *ufunc, - PyObject *args, PyObject *kwds, PyArrayObject **op) +PyUFunc_GenericFunction(PyUFuncObject *NPY_UNUSED(ufunc), + PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), + PyArrayObject **NPY_UNUSED(op)) { - /* NumPy 1.19, 2020-01-24 */ - if (DEPRECATE( - "PyUFunc_GenericFunction() C-API function is deprecated " - "and expected to be removed rapidly. If you are using it (i.e. see " - "this warning/error), please notify the NumPy developers. " - "As of now it is expected that any use case is served better by " - "the direct use of `PyObject_Call(ufunc, args, kwargs)`. " - "PyUFunc_GenericFunction function has slightly different " - "untested behaviour.") < 0) { - return -1; - } - if (ufunc == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return -1; - } - if (op == NULL) { - PyErr_SetString(PyExc_ValueError, - "PyUFunc_GenericFunction() op must not be NULL."); - return -1; - } - - PyObject *res = ufunc_generic_call_with_operands(ufunc, args, kwds, op); - if (res == NULL) { - return -1; - } - assert(res == Py_None); - Py_DECREF(res); - return 0; + /* NumPy 1.21, 2020-03-29 */ + PyErr_SetString(PyExc_RuntimeError, + "The `PyUFunc_GenericFunction()` C-API function has been disabled. " + "Please use `PyObject_Call(ufunc, args, kwargs)`, which has " + "identical behaviour but allows subclass and `__array_ufunc__` " + "override handling and only returns the normal ufunc result."); + return -1; } @@ -4509,16 +4468,14 @@ _convert_typetup(PyObject *dtype_obj, PyObject **out_typetup) * arguments and is called directly from `ufunc_generic_vectorcall` when * Python has `tp_vectorcall` (Python 3.8+). * If `tp_vectorcall` is not available, the dictionary `kwargs` are unpacked in - * `ufunc_generic_call`/`ufunc_generic_call_with_operands` with fairly little - * overhead. + * `ufunc_generic_call` with fairly little overhead. */ static PyObject * ufunc_generic_fastcall(PyUFuncObject *ufunc, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, - npy_bool outer, PyArrayObject **operands_in) + npy_bool outer) { - PyArrayObject *operands_buffer[NPY_MAXARGS] = {NULL}; - PyArrayObject **operands; + PyArrayObject *operands[NPY_MAXARGS] = {NULL}; PyObject *retobj[NPY_MAXARGS]; PyObject *wraparr[NPY_MAXARGS]; PyObject *override = NULL; @@ -4528,18 +4485,6 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, int errval; int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs; - /* - * `PyUfunc_GenericFunction` uses `ufunc_generic_call_with_operands` - * which passes in the operands explicitly. `PyUfunc_GenericFunction` - * is deprecated and this can be simplified when the deprecation is over. - */ - if (operands_in != NULL) { - operands = operands_in; - } - else { - operands = operands_buffer; - } - /* * Note that the input (and possibly output) arguments are passed in as * positional arguments. We extract these first and check for `out` @@ -4683,20 +4628,17 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, method = "outer"; } /* We now have all the information required to check for Overrides */ - if (operands_in == NULL) { - /* Deprecated PyUfunc_GenericFunction path does not use overrides */ - errval = PyUFunc_CheckOverride(ufunc, method, - full_args.in, full_args.out, - args, len_args, kwnames, &override); - if (errval) { - goto fail; - } - else if (override) { - Py_XDECREF(typetup); - Py_DECREF(full_args.in); - Py_XDECREF(full_args.out); - return override; - } + errval = PyUFunc_CheckOverride(ufunc, method, + full_args.in, full_args.out, + args, len_args, kwnames, &override); + if (errval) { + goto fail; + } + else if (override) { + Py_XDECREF(typetup); + Py_DECREF(full_args.in); + Py_XDECREF(full_args.out); + return override; } if (outer) { @@ -4743,11 +4685,6 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, goto fail; } - if (operands_in != NULL) { - /* Deprecated PyUfunc_GenericFunction path does not wrap. */ - Py_RETURN_NONE; - } - /* Free the input references */ for (int i = 0; i < ufunc->nin; i++) { Py_XSETREF(operands[i], NULL); @@ -4825,15 +4762,10 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* * TODO: The implementation below can be replaced with PyVectorcall_Call * when available (should be Python 3.8+). - * TODO: After `PyUFunc_GenericFunction` is disabled `operands_in` becomes - * unnecessary and this function can be merged with `ufunc_generic_call`. - * The `operands_in` handling can also be removed entirely from - * `ufunc_generic_fastcall`. */ static PyObject * -ufunc_generic_call_with_operands( - PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, - PyArrayObject **operands_in) +ufunc_generic_call( + PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { Py_ssize_t len_args = PyTuple_GET_SIZE(args); /* @@ -4843,8 +4775,7 @@ ufunc_generic_call_with_operands( */ if (kwds == NULL) { return ufunc_generic_fastcall(ufunc, - PySequence_Fast_ITEMS(args), len_args, NULL, NPY_FALSE, - operands_in); + PySequence_Fast_ITEMS(args), len_args, NULL, NPY_FALSE); } PyObject *new_args[NPY_MAXARGS]; @@ -4880,19 +4811,12 @@ ufunc_generic_call_with_operands( } PyObject *res = ufunc_generic_fastcall(ufunc, - new_args, len_args, kwnames, NPY_FALSE, operands_in); + new_args, len_args, kwnames, NPY_FALSE); Py_DECREF(kwnames); return res; } -static PyObject * -ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) -{ - return ufunc_generic_call_with_operands(ufunc, args, kwds, NULL); -} - - #if PY_VERSION_HEX >= 0x03080000 /* * Implement vectorcallfunc which should be defined with Python 3.8+. @@ -4909,7 +4833,7 @@ ufunc_generic_vectorcall(PyObject *ufunc, * args[-1] may be (temporarily) used. So normalize it here. */ return ufunc_generic_fastcall((PyUFuncObject *)ufunc, - args, PyVectorcall_NARGS(len_args), kwnames, NPY_FALSE, NULL); + args, PyVectorcall_NARGS(len_args), kwnames, NPY_FALSE); } #endif /* PY_VERSION_HEX >= 0x03080000 */ @@ -5125,44 +5049,16 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi return (PyObject *)ufunc; } -/* Specify that the loop specified by the given index should use the array of - * input and arrays as the data pointer to the loop. - */ + /*UFUNC_API*/ NPY_NO_EXPORT int -PyUFunc_SetUsesArraysAsData(void **data, size_t i) +PyUFunc_SetUsesArraysAsData(void **NPY_UNUSED(data), size_t NPY_UNUSED(i)) { - /* NumPy 1.19, 2020-01-24 */ - if (DEPRECATE( - "PyUFunc_SetUsesArraysAsData() C-API function is deprecated " - "and expected to be removed rapidly. If you are using it (i.e. see " - "this warning/error), please notify the NumPy developers. " - "It is currently assumed that this function is simply unused and " - "its removal will facilitate the implementation of better " - "approaches.") < 0) { - return -1; - } - data[i] = (void*)PyUFunc_SetUsesArraysAsData; - return 0; -} - -/* - * Return 1 if the given data pointer for the loop specifies that it needs the - * arrays as the data pointer. - * - * NOTE: This is easier to specify with the type_resolver - * in the ufunc object. - * - * TODO: Remove this, since this is already basically broken - * with the addition of the masked inner loops and - * not worth fixing since the new loop selection functions - * have access to the full dtypes and can dynamically allocate - * arbitrary auxiliary data. - */ -static int -_does_loop_use_arrays(void *data) -{ - return (data == PyUFunc_SetUsesArraysAsData); + /* NumPy 1.21, 201-03-29 */ + PyErr_SetString(PyExc_RuntimeError, + "PyUFunc_SetUsesArraysAsData() C-API function has been " + "disabled. It was initially deprecated in NumPy 1.19."); + return -1; } @@ -5553,7 +5449,7 @@ ufunc_outer(PyUFuncObject *ufunc, return NULL; } - return ufunc_generic_fastcall(ufunc, args, len_args, kwnames, NPY_TRUE, NULL); + return ufunc_generic_fastcall(ufunc, args, len_args, kwnames, NPY_TRUE); } From a6250679e794c8383cd09b6090a1aecbbac091c9 Mon Sep 17 00:00:00 2001 From: isabela-pf Date: Mon, 29 Mar 2021 13:22:45 -0700 Subject: [PATCH 0843/1270] Set logo colors explicitly to RGB --- branding/logo/logomark/numpylogoicon.svg | 2 +- branding/logo/primary/numpylogo.svg | 2 +- branding/logo/secondary/numpylogo2.svg | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/branding/logo/logomark/numpylogoicon.svg b/branding/logo/logomark/numpylogoicon.svg index 4fef2a9c8617..50810223b355 100644 --- a/branding/logo/logomark/numpylogoicon.svg +++ b/branding/logo/logomark/numpylogoicon.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/branding/logo/primary/numpylogo.svg b/branding/logo/primary/numpylogo.svg index e5791dc3e666..63d61c50f6a0 100644 --- a/branding/logo/primary/numpylogo.svg +++ b/branding/logo/primary/numpylogo.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/branding/logo/secondary/numpylogo2.svg b/branding/logo/secondary/numpylogo2.svg index 76b076beb74d..20385487c94e 100644 --- a/branding/logo/secondary/numpylogo2.svg +++ b/branding/logo/secondary/numpylogo2.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file From 2cf38000274a94c1b118f1a38e2a6e2c7401eb16 Mon Sep 17 00:00:00 2001 From: Kamil Choudhury Date: Tue, 30 Mar 2021 17:22:42 +0000 Subject: [PATCH 0844/1270] Strip extra newline when dumping gfortran version on MacOS --- numpy/distutils/fcompiler/gnu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 68d1501eee6a..eac4cbb4779b 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -32,7 +32,8 @@ def gnu_version_match(self, version_string): """Handle the different versions of GNU fortran compilers""" # Strip warning(s) that may be emitted by gfortran while version_string.startswith('gfortran: warning'): - version_string = version_string[version_string.find('\n') + 1:] + version_string =\ + version_string[version_string.find('\n') + 1:].strip() # Gfortran versions from after 2010 will output a simple string # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older From 9ba289b8e72324a2a361fd0889bb947545b6c7dd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 30 Mar 2021 14:14:50 -0500 Subject: [PATCH 0845/1270] DOC: Add release note for Umath C-API deprecations [skip ci] --- doc/release/upcoming_changes/18697.expired.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/18697.expired.rst diff --git a/doc/release/upcoming_changes/18697.expired.rst b/doc/release/upcoming_changes/18697.expired.rst new file mode 100644 index 000000000000..5a45ce216924 --- /dev/null +++ b/doc/release/upcoming_changes/18697.expired.rst @@ -0,0 +1,5 @@ +* The function ``PyUFunc_GenericFunction`` has been disabled. + It was deprecated in NumPy 1.19. Users should call the ufunc + directly using the Python API. +* The function ``PyUFunc_SetUsesArraysAsData`` has been disabled. + It was deprecated in NumPy 1.19. From ff3cec0788fb0635b7d39b7f19949cd17c2dc584 Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Mon, 29 Mar 2021 17:58:41 +0300 Subject: [PATCH 0846/1270] ENH: Support parsing Fortran abstract interface blocks. --- numpy/f2py/crackfortran.py | 17 ++++++++++------- numpy/f2py/f90mod_rules.py | 2 +- numpy/f2py/rules.py | 2 +- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 660cdd20653f..eb61aa2f20fd 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -43,7 +43,7 @@ 'implicit','externals','interfaced','common','sortvars', 'commonvars','note']} B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | - 'program' | 'block data' | 'type' | 'pythonmodule' + 'program' | 'block data' | 'type' | 'pythonmodule' | 'abstract interface' B['body'] --- list containing `subblocks' with the same structure as `blocks' B['parent_block'] --- dictionary of a parent block: C['body'][]['parent_block'] is C @@ -138,6 +138,7 @@ The above may be solved by creating appropriate preprocessor program, for example. """ +import io import sys import string import fileinput @@ -567,7 +568,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern77 = re.compile( beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' groupbegins90 = groupbegins77 + \ - r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|type(?!\s*\()' beginpattern90 = re.compile( beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' @@ -941,15 +942,17 @@ def analyzeline(m, case, line): block = block.lower() if re.match(r'block\s*data', block, re.I): block = 'block data' - if re.match(r'python\s*module', block, re.I): + elif re.match(r'python\s*module', block, re.I): block = 'python module' + elif re.match(r'abstract\s*interface', block, re.I): + block = 'abstract interface' name, args, result, bind = _resolvenameargspattern(m.group('after')) if name is None: if block == 'block data': name = '_BLOCK_DATA_' else: name = '' - if block not in ['interface', 'block data']: + if block not in ['interface', 'block data', 'abstract interface']: outmess('analyzeline: No name/args pattern found for line.\n') previous_context = (block, name, groupcounter) @@ -983,7 +986,7 @@ def analyzeline(m, case, line): if f77modulename and neededmodule == -1 and groupcounter <= 1: neededmodule = groupcounter + 2 needmodule = 1 - if block != 'interface': + if block not in ['interface', 'abstract interface']: needinterface = 1 # Create new block(s) groupcounter = groupcounter + 1 @@ -1023,7 +1026,7 @@ def analyzeline(m, case, line): groupname[groupcounter] = block groupcache[groupcounter]['block'] = block if not name: - name = 'unknown_' + block + name = 'unknown_' + block.replace(' ', '_') groupcache[groupcounter]['prefix'] = m.group('before') groupcache[groupcounter]['name'] = rmbadname1(name) groupcache[groupcounter]['result'] = result @@ -2088,7 +2091,7 @@ def analyzebody(block, args, tab=''): else: as_ = args b = postcrack(b, as_, tab=tab + '\t') - if b['block'] == 'interface' and not b['body']: + if b['block'] in ['interface', 'abstract interface'] and not b['body']: if 'f2pyenhancements' not in b: continue if b['block'].replace(' ', '') == 'pythonmodule': diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 122fa89394b7..e359731ac1b5 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -192,7 +192,7 @@ def iadd(line, s=ihooks): if hasbody(m): for b in m['body']: if not isroutine(b): - print('Skipping', b['block'], b['name']) + outmess(f"f90mod_rules.buildhooks: skipping {b['block']} {b['name']}\n") continue modobjs.append('%s()' % (b['name'])) b['modulename'] = m['name'] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index b9cbc5487278..63e47baa20ed 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1163,7 +1163,7 @@ def buildmodule(m, um): for n in m['interfaced']: nb = None for bi in m['body']: - if not bi['block'] == 'interface': + if bi['block'] not in ['interface', 'abstract interface']: errmess('buildmodule: Expected interface block. Skipping.\n') continue for b in bi['body']: From 8336e76a8655494ad9ac641651fb411d34fa844b Mon Sep 17 00:00:00 2001 From: Pearu Peterson Date: Mon, 29 Mar 2021 17:59:53 +0300 Subject: [PATCH 0847/1270] TST: Tests for parsing Fortran abstract interface and a working example. --- numpy/f2py/crackfortran.py | 6 +- numpy/f2py/f90mod_rules.py | 3 +- numpy/f2py/tests/test_abstract_interface.py | 66 +++++++++++++++++++++ 3 files changed, 72 insertions(+), 3 deletions(-) create mode 100644 numpy/f2py/tests/test_abstract_interface.py diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index eb61aa2f20fd..6453bbecb159 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -43,7 +43,8 @@ 'implicit','externals','interfaced','common','sortvars', 'commonvars','note']} B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | - 'program' | 'block data' | 'type' | 'pythonmodule' | 'abstract interface' + 'program' | 'block data' | 'type' | 'pythonmodule' | + 'abstract interface' B['body'] --- list containing `subblocks' with the same structure as `blocks' B['parent_block'] --- dictionary of a parent block: C['body'][]['parent_block'] is C @@ -568,7 +569,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern77 = re.compile( beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' groupbegins90 = groupbegins77 + \ - r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|type(?!\s*\()' + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \ + r'type(?!\s*\()' beginpattern90 = re.compile( beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index e359731ac1b5..3e1c9674f8e2 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -192,7 +192,8 @@ def iadd(line, s=ihooks): if hasbody(m): for b in m['body']: if not isroutine(b): - outmess(f"f90mod_rules.buildhooks: skipping {b['block']} {b['name']}\n") + outmess("f90mod_rules.buildhooks:" + f" skipping {b['block']} {b['name']}\n") continue modobjs.append('%s()' % (b['name'])) b['modulename'] = m['name'] diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py new file mode 100644 index 000000000000..d6c6da743949 --- /dev/null +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -0,0 +1,66 @@ +import textwrap +from . import util +from numpy.f2py import crackfortran + + +class TestAbstractInterface(util.F2PyTest): + suffix = '.f90' + + skip = ['add1', 'add2'] + + code = textwrap.dedent(""" + module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + + contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine + end module + + subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y + end subroutine + + subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y + end subroutine + """) + + def test_abstract_interface(self): + assert self.module.ops_module.foo(3, 5) == (8, 13) + + def test_parse_abstract_interface(self, tmp_path): + # Test gh18403 + f_path = tmp_path / "gh18403_mod.f90" + with f_path.open('w') as ff: + ff.write(textwrap.dedent("""\ + module test + abstract interface + subroutine foo() + end subroutine + end interface + end module test + """)) + mod = crackfortran.crackfortran([str(f_path)]) + assert len(mod) == 1 + assert len(mod[0]['body']) == 1 + assert mod[0]['body'][0]['block'] == 'abstract interface' From b069eb6e645cffc39caa6f053ba7cb77207821f8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 31 Mar 2021 13:19:34 +0200 Subject: [PATCH 0848/1270] REL: do a better job of including files in repo root in sdist Not only environment.yml is added, setup.cfg was missing too. Current result of `ls -a` in the top-level directory of the unpacked sdist: ``` . LICENSE.txt pytest.ini site.cfg.example .. linter_requirements.txt README.md test_requirements.txt benchmarks MANIFEST.in release_requirements.txt THANKS.txt doc numpy runtests.py tools doc_requirements.txt pavement.py setup.cfg tox.ini environment.yml PKG-INFO setup.py versioneer.py INSTALL.rst.txt pyproject.toml ``` --- MANIFEST.in | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 57496ec71653..3e04fb1fa267 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,16 +4,17 @@ # data, etc files to distribution (*for installation*). # Avoid using MANIFEST.in for that. # -include MANIFEST.in -include pyproject.toml -include pytest.ini -include *.txt -include README.md -include site.cfg.example -include runtests.py -include tox.ini -include .coveragerc -include test_requirements.txt +# Files in top-level directory: +include *.* +# Exclude license file that we append to the main license when running +# `python setup.py sdist`. And exclude generated files in repo root. +exclude LICENSES_bundled.txt +exclude .* +exclude azure-*.yml +exclude shippable.yml + +# Sub-directories. Included are: numpy/, doc/, benchmarks/, tools/ +include numpy/_version.py recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in include numpy/py.typed include numpy/random/include/* @@ -45,8 +46,3 @@ prune benchmarks/numpy # Exclude generated files prune */__pycache__ global-exclude *.pyc *.pyo *.pyd *.swp *.bak *~ -# Exclude license file that we append to the main license when running -# `python setup.py sdist` -exclude LICENSES_bundled.txt -include versioneer.py -include numpy/_version.py From f07079b55d1880cd24dfc7425dcbb23ef14b441f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 31 Mar 2021 07:24:12 -0600 Subject: [PATCH 0849/1270] STY: Indentation fixes. --- numpy/f2py/tests/test_abstract_interface.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index d6c6da743949..936c1f7bc9ae 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -53,13 +53,13 @@ def test_parse_abstract_interface(self, tmp_path): f_path = tmp_path / "gh18403_mod.f90" with f_path.open('w') as ff: ff.write(textwrap.dedent("""\ - module test - abstract interface - subroutine foo() - end subroutine - end interface - end module test - """)) + module test + abstract interface + subroutine foo() + end subroutine + end interface + end module test + """)) mod = crackfortran.crackfortran([str(f_path)]) assert len(mod) == 1 assert len(mod[0]['body']) == 1 From 179d62bc18174378e220c6babf3806b838f02229 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 31 Mar 2021 10:14:34 -0600 Subject: [PATCH 0850/1270] DOC: Add versionadded for new min_digits argument. --- numpy/core/arrayprint.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index d9d2dbe68fac..73d27646379c 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1057,7 +1057,8 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', `unique=True`. In that case more digits than necessary to uniquely identify the value may be printed and rounded unbiased. - + -- versionadded:: 1.21.0 + Returns ------- rep : string @@ -1142,6 +1143,8 @@ def format_float_positional(x, precision=None, unique=True, Minimum number of digits to print. Only has an effect if `unique=True` in which case additional digits past those necessary to uniquely identify the value may be printed, rounding the last additional digit. + + -- versionadded:: 1.21.0 Returns ------- From 09b42ebc9c46456f344cdaa91a60d8c07445a54d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 31 Mar 2021 11:31:50 -0600 Subject: [PATCH 0851/1270] DOC: Add release note for gh-18629. --- doc/release/upcoming_changes/18629.new_feature.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 doc/release/upcoming_changes/18629.new_feature.rst diff --git a/doc/release/upcoming_changes/18629.new_feature.rst b/doc/release/upcoming_changes/18629.new_feature.rst new file mode 100644 index 000000000000..7d75c323f40b --- /dev/null +++ b/doc/release/upcoming_changes/18629.new_feature.rst @@ -0,0 +1,10 @@ +New ``min_digits`` argument for printing float values +----------------------------------------------------- +A new ``min_digits`` argument has been added to the dragon4 float printing +functions `np.format_float_positional` and `np.format_float_scientific` . This +kwd guarantees that at least the given number of digits will be printed when +printing in unique=True mode, even if the extra digits are unnecessary to +uniquely specify the value. It is the counterpart to the precision argument +which sets the maximum number of digits to be printed. When unique=False in +fixed precision mode, it has no effect and the precision argument fixes the +number of digits. From 7f14329dc9b1b15b5de5f42cfd1d7ff30b4a871c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 31 Mar 2021 11:37:41 -0600 Subject: [PATCH 0852/1270] DOC: Add release note for gh-18695. --- doc/release/upcoming_changes/18695.new_feature.rst | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 doc/release/upcoming_changes/18695.new_feature.rst diff --git a/doc/release/upcoming_changes/18695.new_feature.rst b/doc/release/upcoming_changes/18695.new_feature.rst new file mode 100644 index 000000000000..a1f1081768f3 --- /dev/null +++ b/doc/release/upcoming_changes/18695.new_feature.rst @@ -0,0 +1,3 @@ +f2py now recognizes Fortran abstract interface blocks +----------------------------------------------------- +`np.f2py` can now parse abstract interface blocks. From ba187c1b306fdc3b341553dbceac12dc34e112d9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 31 Mar 2021 11:46:26 +0200 Subject: [PATCH 0853/1270] DOC: update Steering Council membership and people on governance page We welcome two new SC members, Melissa and Inessa. The people moving to emeritus status have not been very active or not active at all for a while. They all still have commit rights, and if they get more active again their membership can be restored. There are also a few changes to the NumFOCUS Subcommittee and Institutional Partners members, to reflect recent changes. After this update, all listed people are currently actively contributing to NumPy. That does not only include technical contributions, but also participating in community meetings, mentoring newcomers, and many other activities across the project. [ci skip] --- doc/source/dev/governance/people.rst | 38 ++++++++-------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst index 18366402e2be..c48b801ab670 100644 --- a/doc/source/dev/governance/people.rst +++ b/doc/source/dev/governance/people.rst @@ -7,59 +7,43 @@ Steering council ---------------- * Sebastian Berg - -* Jaime Fernández del Río - * Ralf Gommers - -* Allan Haldane - * Charles Harris - * Stephan Hoyer - +* Melissa Weber Mendonça +* Inessa Pawson * Matti Picus - -* Nathaniel Smith - -* Julian Taylor - -* Pauli Virtanen - * Stéfan van der Walt - * Eric Wieser - Emeritus members ---------------- * Travis Oliphant -- project founder / emeritus leader (2005-2012) - * Alex Griffing (2015-2017) - * Marten van Kerkwijk (2017-2019) +* Allan Haldane (2015-2021) +* Nathaniel Smith (2012-2021) +* Julian Taylor (2013-2021) +* Pauli Virtanen (2008-2021) +* Jaime Fernández del Río (2014-2021) NumFOCUS Subcommittee --------------------- -* Chuck Harris - +* Charles Harris * Ralf Gommers - -* Jaime Fernández del Río - +* Melissa Weber Mendonça * Sebastian Berg - * External member: Thomas Caswell Institutional Partners ---------------------- -* UC Berkeley (Stéfan van der Walt, Sebastian Berg, Warren Weckesser, Ross Barnowski) +* UC Berkeley (Stéfan van der Walt, Sebastian Berg, Ross Barnowski) -* Quansight (Ralf Gommers, Hameer Abbasi, Melissa Weber Mendonça, Mars Lee, Matti Picus) +* Quansight (Ralf Gommers, Melissa Weber Mendonça, Mars Lee, Matti Picus, Pearu Peterson) From 3cdb33f02298c3544f7a3bc312c42422a2a7b971 Mon Sep 17 00:00:00 2001 From: DCtheTall Date: Wed, 31 Mar 2021 17:11:41 -0400 Subject: [PATCH 0854/1270] Add tests np.meshgrid for higher dimensional grids. --- numpy/lib/tests/test_function_base.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index afcb81effd95..4201afac3840 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2219,6 +2219,7 @@ def test_simple(self): [0.64864341, 0.79115165, 0.96098397]])) +# Run test using: python3 runtests.py -t numpy.lib.tests.test_function_base class TestMeshgrid: def test_simple(self): @@ -2307,6 +2308,20 @@ def test_writeback(self): assert_equal(x[0, :], 0) assert_equal(x[1, :], X) + def test_higher_dimensions(self): + a, b, c = np.meshgrid([0], [1, 1], [2, 2]) + assert_equal(a, [[[0, 0]], [[0, 0]]]) + assert_equal(b, [[[1, 1]], [[1, 1]]]) + assert_equal(c, [[[2, 2]], [[2, 2]]]) + + a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) + expected_shape = (2, 1, 3, 4, 5) + assert_equal(a.shape, expected_shape) + assert_equal(b.shape, expected_shape) + assert_equal(c.shape, expected_shape) + assert_equal(d.shape, expected_shape) + assert_equal(e.shape, expected_shape) + class TestPiecewise: From 2a880214c0ffb9c21b12ab51fbb364d71aa17cd1 Mon Sep 17 00:00:00 2001 From: DCtheTall Date: Wed, 31 Mar 2021 17:12:37 -0400 Subject: [PATCH 0855/1270] rm comment --- numpy/lib/tests/test_function_base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4201afac3840..8dcbaa034c5e 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2219,7 +2219,6 @@ def test_simple(self): [0.64864341, 0.79115165, 0.96098397]])) -# Run test using: python3 runtests.py -t numpy.lib.tests.test_function_base class TestMeshgrid: def test_simple(self): From 8b3c0038f781863a8ed4cf5e41ec0b6dab9eb968 Mon Sep 17 00:00:00 2001 From: tech-gian Date: Sat, 3 Apr 2021 00:28:37 +0300 Subject: [PATCH 0856/1270] DOC: closing 17486 --- doc/source/user/basics.types.rst | 71 ++++---------------------------- 1 file changed, 9 insertions(+), 62 deletions(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index ec2af409ae86..2f49f052c898 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -96,70 +96,17 @@ The primitive types supported are tied closely to those in C: Since many of these have platform-dependent definitions, a set of fixed-size -aliases are provided: +aliases are provided to this site: :doc:`../reference/arrays.scalars` +under the "Sized aliases" section. -.. list-table:: - :header-rows: 1 - - * - Numpy type - - C type - - Description - - * - `numpy.int8` - - ``int8_t`` - - Byte (-128 to 127) - - * - `numpy.int16` - - ``int16_t`` - - Integer (-32768 to 32767) - - * - `numpy.int32` - - ``int32_t`` - - Integer (-2147483648 to 2147483647) - - * - `numpy.int64` - - ``int64_t`` - - Integer (-9223372036854775808 to 9223372036854775807) - - * - `numpy.uint8` - - ``uint8_t`` - - Unsigned integer (0 to 255) - - * - `numpy.uint16` - - ``uint16_t`` - - Unsigned integer (0 to 65535) +In the site above, are listed some types of the table above such as `byte`, +`short`, `intc` and `int_`. The corresponding unsigned and signed types +of these are written too. - * - `numpy.uint32` - - ``uint32_t`` - - Unsigned integer (0 to 4294967295) +'float' and 'complex' aliases are listed, as well. The functionality +for each of these types are descripted in the table above, as long as +examples with similarities with ``C types``. - * - `numpy.uint64` - - ``uint64_t`` - - Unsigned integer (0 to 18446744073709551615) - - * - `numpy.intp` - - ``intptr_t`` - - Integer used for indexing, typically the same as ``ssize_t`` - - * - `numpy.uintp` - - ``uintptr_t`` - - Integer large enough to hold a pointer - - * - `numpy.float32` - - ``float`` - - - - * - `numpy.float64` / `numpy.float_` - - ``double`` - - Note that this matches the precision of the builtin python `float`. - - * - `numpy.complex64` - - ``float complex`` - - Complex number, represented by two 32-bit floats (real and imaginary components) - - * - `numpy.complex128` / `numpy.complex_` - - ``double complex`` - - Note that this matches the precision of the builtin python `complex`. NumPy numerical types are instances of ``dtype`` (data-type) objects, each @@ -171,7 +118,7 @@ having unique characteristics. Once you have imported NumPy using the dtypes are available as ``np.bool_``, ``np.float32``, etc. -Advanced types, not listed in the table above, are explored in +Advanced types, not listed in the site above, are explored in section :ref:`structured_arrays`. There are 5 basic numerical types representing booleans (bool), integers (int), From 23117049e4129066a85321555363c92da72f9155 Mon Sep 17 00:00:00 2001 From: Giannis Zapantis <59338244+tech-gian@users.noreply.github.com> Date: Sat, 3 Apr 2021 10:52:34 +0300 Subject: [PATCH 0857/1270] DOC: typo fix --- doc/source/user/basics.types.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 2f49f052c898..a2f78398df43 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -104,7 +104,7 @@ In the site above, are listed some types of the table above such as `byte`, of these are written too. 'float' and 'complex' aliases are listed, as well. The functionality -for each of these types are descripted in the table above, as long as +for each of these types is descripted in the table above, as well as examples with similarities with ``C types``. From 036f6c68f849c6659fef9b91aaafa7e447286c5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Sun, 4 Apr 2021 16:24:43 -0300 Subject: [PATCH 0858/1270] DOC: Simplifies Mandelbrot set plot in Quickstart guide (#18712) Closes gh-18409 --- doc/source/user/quickstart.rst | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 28262c89ee90..9f3d6a040dd0 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -1254,19 +1254,21 @@ set `__: >>> import numpy as np >>> import matplotlib.pyplot as plt - >>> def mandelbrot(h, w, maxit=20): + >>> def mandelbrot(h, w, maxit=20, r=2): ... """Returns an image of the Mandelbrot fractal of size (h,w).""" - ... y, x = np.ogrid[-1.4:1.4:h*1j, -2:0.8:w*1j] - ... c = x + y * 1j - ... z = c + ... x = np.linspace(-2.5, 1.5, 4*h+1) + ... y = np.linspace(-1.5, 1.5, 3*w+1) + ... A, B = np.meshgrid(x, y) + ... C = A + B*1j + ... z = np.zeros_like(C) ... divtime = maxit + np.zeros(z.shape, dtype=int) ... ... for i in range(maxit): - ... z = z**2 + c - ... diverge = z * np.conj(z) > 2**2 # who is diverging + ... z = z**2 + C + ... diverge = abs(z) > r # who is diverging ... div_now = diverge & (divtime == maxit) # who is diverging now ... divtime[div_now] = i # note when - ... z[diverge] = 2 # avoid diverging too much + ... z[diverge] = r # avoid diverging too much ... ... return divtime >>> plt.imshow(mandelbrot(400, 400)) From 5724b508066032e52b18df76491d459f612bf076 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 5 Apr 2021 07:34:27 +0000 Subject: [PATCH 0859/1270] MAINT: Bump hypothesis from 6.8.3 to 6.8.4 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.8.3 to 6.8.4. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.8.3...hypothesis-python-6.8.4) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index ab73b2d359aa..48ab654edaf4 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.8.3 +hypothesis==6.8.4 pytest==6.2.2 pytz==2021.1 pytest-cov==2.11.1 From 2ae4e244255f598f880cb6a299c6aca575a7d435 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 5 Apr 2021 12:21:25 +0000 Subject: [PATCH 0860/1270] MAINT: Bump pytest from 6.2.2 to 6.2.3 Bumps [pytest](https://github.com/pytest-dev/pytest) from 6.2.2 to 6.2.3. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/6.2.2...6.2.3) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 48ab654edaf4..65ebc0b453b2 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -2,7 +2,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 hypothesis==6.8.4 -pytest==6.2.2 +pytest==6.2.3 pytz==2021.1 pytest-cov==2.11.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' From 975378b8051d35ab36b68446966422281874d8da Mon Sep 17 00:00:00 2001 From: Giannis Zapantis <59338244+tech-gian@users.noreply.github.com> Date: Mon, 5 Apr 2021 17:53:16 +0300 Subject: [PATCH 0861/1270] Update, sized aliases --- doc/source/reference/arrays.scalars.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 13d117af2432..d0e8efc81608 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -246,6 +246,8 @@ elements the data type consists of.) convention more consistent with other Python modules such as the :mod:`struct` module. +.. _sized-aliases: + Sized aliases ~~~~~~~~~~~~~ From a0dc48ff70fa4f8012c86d23b8de3200af3e5ae4 Mon Sep 17 00:00:00 2001 From: Giannis Zapantis <59338244+tech-gian@users.noreply.github.com> Date: Mon, 5 Apr 2021 17:54:53 +0300 Subject: [PATCH 0862/1270] Update, removed unnecessary things --- doc/source/user/basics.types.rst | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index a2f78398df43..0dc2df9bc441 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -96,16 +96,7 @@ The primitive types supported are tied closely to those in C: Since many of these have platform-dependent definitions, a set of fixed-size -aliases are provided to this site: :doc:`../reference/arrays.scalars` -under the "Sized aliases" section. - -In the site above, are listed some types of the table above such as `byte`, -`short`, `intc` and `int_`. The corresponding unsigned and signed types -of these are written too. - -'float' and 'complex' aliases are listed, as well. The functionality -for each of these types is descripted in the table above, as well as -examples with similarities with ``C types``. +aliases are provided (See :ref:`sized-aliases`). @@ -118,7 +109,7 @@ having unique characteristics. Once you have imported NumPy using the dtypes are available as ``np.bool_``, ``np.float32``, etc. -Advanced types, not listed in the site above, are explored in +Advanced types, not listed above, are explored in section :ref:`structured_arrays`. There are 5 basic numerical types representing booleans (bool), integers (int), From 2c1410becc7fbe660426e2a946d54304fc470148 Mon Sep 17 00:00:00 2001 From: Anthony Vo Date: Mon, 5 Apr 2021 23:24:10 +0700 Subject: [PATCH 0863/1270] chain exceptions when appropriate --- benchmarks/benchmarks/bench_linalg.py | 4 ++-- benchmarks/benchmarks/bench_ufunc_strides.py | 10 +++++----- numpy/lib/format.py | 2 +- numpy/random/_common.pyx | 2 +- tools/cythonize.py | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index a72cccb5f7ed..5ed5b6eecd6d 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -91,8 +91,8 @@ def setup(self, op, typename): # check that dtype is supported at all try: self.func(self.a[:2, :2]) - except TypeError: - raise NotImplementedError() + except TypeError as e: + raise NotImplementedError() from e def time_op(self, op, typename): self.func(self.a) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 58f325e76174..d4e396d057fc 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -33,7 +33,7 @@ def setup(self, ufuncname, stride, stride_out, dtype): try: self.f = getattr(np, ufuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError(f"No ufunc {ufuncname} found") from None N = 10000 self.arr = np.ones(stride*N, dtype) self.arr_out = np.empty(stride_out*N, dtype) @@ -67,8 +67,8 @@ def setup(self, ufuncname, dtype, stride): np.seterr(all='ignore') try: self.f = getattr(np, ufuncname) - except AttributeError: - raise NotImplementedError() + xcept AttributeError: + raise NotImplementedError(f"No ufunc {ufuncname} found") from None N = 10000 self.arr1 = np.array(np.random.rand(stride*N), dtype=dtype) self.arr2 = np.array(np.random.rand(stride*N), dtype=dtype) @@ -109,7 +109,7 @@ def setup(self, bfuncname, stride, dtype): try: self.f = getattr(np, bfuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError(f"No bfunc {bfuncname} found") from None N = 10000 self.arr1 = np.ones(stride*N, dtype) self.arr2 = np.ones(stride*N, dtype) @@ -132,7 +132,7 @@ def setup(self, bfuncname, stride, dtype): try: self.f = getattr(np, bfuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError(f"No bfunc {bfuncname} found") from None N = 10000 self.arr1 = np.ones(stride*N, dtype) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 5d951e262570..60e515e77652 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -378,7 +378,7 @@ def _wrap_header(header, version): header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) except struct.error: msg = "Header length {} too big for version={}".format(hlen, version) - raise ValueError(msg) + raise ValueError(msg) from None # Pad the header with spaces and a final newline such that the magic # string, the header-length short and the header are aligned on a diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index 19fb34d4daa9..f99379e8abf9 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -122,7 +122,7 @@ cdef object prepare_cffi(bitgen_t *bitgen): try: import cffi except ImportError: - raise ImportError('cffi cannot be imported.') + raise ImportError('cffi cannot be imported.') from None ffi = cffi.FFI() _cffi = interface(bitgen.state, diff --git a/tools/cythonize.py b/tools/cythonize.py index 911b812438a0..7a9739c50cf2 100755 --- a/tools/cythonize.py +++ b/tools/cythonize.py @@ -62,7 +62,7 @@ def process_pyx(fromfile, tofile): # The `cython` command need not point to the version installed in the # Python running this script, so raise an error to avoid the chance of # using the wrong version of Cython. - raise OSError('Cython needs to be installed in Python as a module') + raise OSError('Cython needs to be installed in Python as a module') from None else: # check the version, and invoke through python from distutils.version import LooseVersion From 328b5a6e7974730360bb516c62af8e5eddf2926e Mon Sep 17 00:00:00 2001 From: Anthony Vo <43098273+anthonyhvo12@users.noreply.github.com> Date: Mon, 5 Apr 2021 23:59:11 +0700 Subject: [PATCH 0864/1270] Update benchmarks/benchmarks/bench_ufunc_strides.py Co-authored-by: Eric Wieser --- benchmarks/benchmarks/bench_ufunc_strides.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index d4e396d057fc..213ff0020293 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -67,7 +67,7 @@ def setup(self, ufuncname, dtype, stride): np.seterr(all='ignore') try: self.f = getattr(np, ufuncname) - xcept AttributeError: + except AttributeError: raise NotImplementedError(f"No ufunc {ufuncname} found") from None N = 10000 self.arr1 = np.array(np.random.rand(stride*N), dtype=dtype) From 876bc5d36e3993391398ce474b52179868505f92 Mon Sep 17 00:00:00 2001 From: Anthony Vo Date: Tue, 6 Apr 2021 02:03:04 +0700 Subject: [PATCH 0865/1270] change to suitable raise from and add line breaks --- numpy/random/_common.pyx | 2 +- tools/cythonize.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index 6e6e5b77ecf9..85cd6ba9d10d 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -122,7 +122,7 @@ cdef object prepare_cffi(bitgen_t *bitgen): try: import cffi except ImportError: - raise ImportError('cffi cannot be imported.') from None + raise ImportError('cffi cannot be imported.') from e ffi = cffi.FFI() _cffi = interface(bitgen.state, diff --git a/tools/cythonize.py b/tools/cythonize.py index 7a9739c50cf2..9a15584fbc73 100755 --- a/tools/cythonize.py +++ b/tools/cythonize.py @@ -62,7 +62,8 @@ def process_pyx(fromfile, tofile): # The `cython` command need not point to the version installed in the # Python running this script, so raise an error to avoid the chance of # using the wrong version of Cython. - raise OSError('Cython needs to be installed in Python as a module') from None + raise OSError("Cython needs to be installed in " + "Python as a module") from e else: # check the version, and invoke through python from distutils.version import LooseVersion From fdeb9794d94a44fedb32c5b0cba5713c1b5f78e6 Mon Sep 17 00:00:00 2001 From: Anthony Vo Date: Tue, 6 Apr 2021 02:14:01 +0700 Subject: [PATCH 0866/1270] change to suitable raise from and add line breaks #2 --- numpy/random/_common.pyx | 2 +- tools/cythonize.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index 85cd6ba9d10d..8e2edd865186 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -121,7 +121,7 @@ cdef object prepare_cffi(bitgen_t *bitgen): """ try: import cffi - except ImportError: + except ImportError as e: raise ImportError('cffi cannot be imported.') from e ffi = cffi.FFI() diff --git a/tools/cythonize.py b/tools/cythonize.py index 9a15584fbc73..06cf54c9a41c 100755 --- a/tools/cythonize.py +++ b/tools/cythonize.py @@ -58,12 +58,12 @@ def process_pyx(fromfile, tofile): # try the cython in the installed python first (somewhat related to scipy/scipy#2397) import Cython from Cython.Compiler.Version import version as cython_version - except ImportError: + except ImportError as e: # The `cython` command need not point to the version installed in the # Python running this script, so raise an error to avoid the chance of # using the wrong version of Cython. - raise OSError("Cython needs to be installed in " - "Python as a module") from e + msg = 'Cython needs to be installed in Python as a module' + raise OSError(msg) from e else: # check the version, and invoke through python from distutils.version import LooseVersion From 9f339758e3faeb447a629d600f7640c8735a6c4a Mon Sep 17 00:00:00 2001 From: DCtheTall Date: Mon, 5 Apr 2021 15:50:16 -0400 Subject: [PATCH 0867/1270] review comments --- numpy/lib/tests/test_function_base.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 8dcbaa034c5e..761ea83a3929 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2307,12 +2307,7 @@ def test_writeback(self): assert_equal(x[0, :], 0) assert_equal(x[1, :], X) - def test_higher_dimensions(self): - a, b, c = np.meshgrid([0], [1, 1], [2, 2]) - assert_equal(a, [[[0, 0]], [[0, 0]]]) - assert_equal(b, [[[1, 1]], [[1, 1]]]) - assert_equal(c, [[[2, 2]], [[2, 2]]]) - + def test_nd_shape(self): a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) expected_shape = (2, 1, 3, 4, 5) assert_equal(a.shape, expected_shape) @@ -2321,6 +2316,18 @@ def test_higher_dimensions(self): assert_equal(d.shape, expected_shape) assert_equal(e.shape, expected_shape) + def test_nd_values(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) + assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) + + def test_nd_indexing(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') + assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5], [3, 4, 5]]]) + class TestPiecewise: From 4d2e4847823d3d3c9b7380f8ee7bc1799bd070f9 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 6 Apr 2021 09:04:53 +0530 Subject: [PATCH 0868/1270] SIMD: Use scalar division for Armv7, Aarch64, and IBM/Power Co-authored-by: Sayed Adel --- numpy/core/src/umath/loops_arithmetic.dispatch.c.src | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index a012d50dd72c..7e9f464636c5 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -69,7 +69,17 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) #define TO_SIMD_SFX(X) X##_u@len@ /**end repeat1**/ #endif - +/* + * For 64-bit division on Armv7, Aarch64, and IBM/Power, NPYV fall-backs to the scalar division + * because emulating multiply-high on these architectures is going to be expensive comparing + * to the native scalar dividers. + * Therefore it's better to disable NPYV in this special case to avoid any unnecessary shuffles. + * Power10(VSX4) is an exception here since it has native support for integer vector division, + * note neither infrastructure nor NPYV has supported VSX4 yet. + */ +#if NPY_BITSOF_@STYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) + #undef TO_SIMD_SFX +#endif NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { From d2cbd17cba2132e8c02da394be5c3f93ddd76919 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Tue, 6 Apr 2021 09:35:02 +0100 Subject: [PATCH 0869/1270] BUG: Check out requirements and raise when not satisfied Check that out is C-contiguous when using user-supplied arguments closes #18704 --- numpy/random/_common.pyx | 39 +++++++++++++++----- numpy/random/tests/test_generator_mt19937.py | 23 ++++++++++++ 2 files changed, 53 insertions(+), 9 deletions(-) diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index 719647c3e8c5..c397180fbe7d 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -232,13 +232,34 @@ cdef validate_output_shape(iter_shape, np.ndarray output): ) -cdef check_output(object out, object dtype, object size): +cdef check_output(object out, object dtype, object size, bint require_c_array): + """ + Check user-supplied output array properties and shape + + Parameters + ---------- + out : {ndarray, None} + The array to check. If None, returns immediately. + dtype : dtype + The required dtype of out. + size : {None, int, tuple[int]} + The size passed. If out is an ndarray, verifies that the shape of out + matches size. + require_c_array : bool + Whether out must be a C-array. If False, out can be either C- or F- + ordered. If True, must be C-ordered. In either case, must be + contiguous, writable, aligned and in native byte-order. + """ if out is None: return cdef np.ndarray out_array = out - if not (np.PyArray_CHKFLAGS(out_array, np.NPY_CARRAY) or - np.PyArray_CHKFLAGS(out_array, np.NPY_FARRAY)): - raise ValueError('Supplied output array is not contiguous, writable or aligned.') + if not (np.PyArray_ISCARRAY(out_array) or + (np.PyArray_ISFARRAY(out_array) and not require_c_array)): + req = "C-" if require_c_array else "" + raise ValueError( + f'Supplied output array must be {req}contiguous, writable, ' + f'aligned, and in machine byte-order.' + ) if out_array.dtype != dtype: raise TypeError('Supplied output array has the wrong type. ' 'Expected {0}, got {1}'.format(np.dtype(dtype), out_array.dtype)) @@ -264,7 +285,7 @@ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, o return out_val if out is not None: - check_output(out, np.float64, size) + check_output(out, np.float64, size, False) out_array = out else: out_array = np.empty(size, np.double) @@ -288,7 +309,7 @@ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, ob return out_val if out is not None: - check_output(out, np.float32, size) + check_output(out, np.float32, size, False) out_array = out else: out_array = np.empty(size, np.float32) @@ -310,7 +331,7 @@ cdef object float_fill_from_double(void *func, bitgen_t *state, object size, obj return random_func(state) if out is not None: - check_output(out, np.float32, size) + check_output(out, np.float32, size, False) out_array = out else: out_array = np.empty(size, np.float32) @@ -521,7 +542,7 @@ cdef object cont(void *func, void *state, object size, object lock, int narg, cdef np.ndarray a_arr, b_arr, c_arr cdef double _a = 0.0, _b = 0.0, _c = 0.0 cdef bint is_scalar = True - check_output(out, np.float64, size) + check_output(out, np.float64, size, narg > 0) if narg > 0: a_arr = np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0 @@ -971,7 +992,7 @@ cdef object cont_f(void *func, bitgen_t *state, object size, object lock, cdef float _a cdef bint is_scalar = True cdef int requirements = np.NPY_ALIGNED | np.NPY_FORCECAST - check_output(out, np.float32, size) + check_output(out, np.float32, size, True) a_arr = np.PyArray_FROMANY(a, np.NPY_FLOAT32, 0, 0, requirements) is_scalar = np.PyArray_NDIM(a_arr) == 0 diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 0108d84b3ab8..4abcf6fe4694 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -2581,3 +2581,26 @@ def test_single_arg_integer_exception(high, endpoint): gen.integers(-1, high, endpoint=endpoint) with pytest.raises(ValueError, match=msg): gen.integers([-1], high, endpoint=endpoint) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +def test_c_contig_req_out(dtype): + # GH 18704 + out = np.empty((2, 3), order="F", dtype=dtype) + shape = [1, 2, 3] + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, dtype=dtype) + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("dist", [random.standard_normal, random.random]) +def test_contig_req_out(dist, order, dtype): + # GH 18704 + out = np.empty((2, 3), dtype=dtype, order=order) + variates = dist(out=out, dtype=dtype) + assert variates is out + variates = dist(out=out, dtype=dtype, size=out.shape) + assert variates is out From d221c5e4856b5087ddd2625d705b7e3d0b012745 Mon Sep 17 00:00:00 2001 From: mdubravski Date: Wed, 7 Apr 2021 09:57:03 -0400 Subject: [PATCH 0870/1270] added exception chaining in genapi.py --- numpy/core/code_generators/genapi.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 9c3666103053..7208ffafe5de 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -281,9 +281,8 @@ def find_functions(filename, tag='API'): state = SCANNING else: function_args.append(line) - except Exception: - print(filename, lineno + 1) - raise + except Exception as e: + raise Exception(filename, lineno + 1) from e fo.close() return functions From 3581e89c6a47e9df0bd0dfa898ee70c6368f2199 Mon Sep 17 00:00:00 2001 From: mdubravski <41096057+mdubravski@users.noreply.github.com> Date: Wed, 7 Apr 2021 10:42:19 -0400 Subject: [PATCH 0871/1270] Update numpy/core/code_generators/genapi.py Co-authored-by: Eric Wieser --- numpy/core/code_generators/genapi.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 7208ffafe5de..b2ea0b81ff94 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -281,8 +281,10 @@ def find_functions(filename, tag='API'): state = SCANNING else: function_args.append(line) + except ParseError: + raise except Exception as e: - raise Exception(filename, lineno + 1) from e + raise ParseError(filename, lineno + 1, "see chained exception for details") from e fo.close() return functions From ee6f4f8f43d95267b19bd282fd9b6f5c3be8ea15 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Apr 2021 14:02:41 +0300 Subject: [PATCH 0872/1270] remove shippable CI --- shippable.yml | 67 --------------------------------------------------- 1 file changed, 67 deletions(-) delete mode 100644 shippable.yml diff --git a/shippable.yml b/shippable.yml deleted file mode 100644 index f26055232014..000000000000 --- a/shippable.yml +++ /dev/null @@ -1,67 +0,0 @@ -branches: - only: - - main - - maintenance/* - -language: python - -python: - # use versions available for job image - # aarch64_u16pytall:v6.7.4 - # (what we currently have access to by default) - # this is a bit restrictive in terms - # of version availability / control, - # but it is convenient - - 3.7 - -runtime: - # use the free open source pool of nodes - # only for ARM platform - nodePool: shippable_shared_aarch64 - -build: - ci: - # install dependencies and newer toolchain for gfortran5 - - sudo add-apt-repository ppa:ubuntu-toolchain-r/test - - sudo apt-get update - - sudo apt-get install gcc gfortran libgfortran5 - - target=$(python tools/openblas_support.py) - - ls -lR "${target}" - - sudo cp -r "${target}"/lib/* /usr/lib - - sudo cp "${target}"/include/* /usr/include - - python -m pip install --upgrade pip - - # we will pay the ~13 minute cost of compiling Cython only when a new - # version is scraped in by pip; otherwise, use the cached - # wheel shippable places on Amazon S3 after we build it once - - python -m pip install -r test_requirements.txt --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION - # install pytest-xdist to leverage a second core - # for unit tests - - python -m pip install pytest-xdist - - # build and test numpy - - export PATH=$PATH:$SHIPPABLE_REPO_DIR - # build first and adjust PATH so f2py is found in scripts dir - # use > 1 core for build sometimes slows down a fair bit, - # other times modestly speeds up, so avoid for now - - python -m pip install . - - extra_directories=($SHIPPABLE_REPO_DIR/build/*scripts*) - - extra_path=$(printf "%s:" "${extra_directories[@]}") - - export PATH="${extra_path}${PATH}" - # check OpenBLAS version - - python tools/openblas_support.py --check_version - # run the test suite - - python runtests.py -n --debug-info --show-build-log -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10 - - cache: false - -# disable email notification -# of CI job result -integrations: - notifications: - - integrationName: email - type: email - on_success: never - on_failure: never - on_cancel: never - on_pull_request: never From bd64b627589e01a8ba202fb7304c0dca1b13b8de Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Apr 2021 14:25:31 +0300 Subject: [PATCH 0873/1270] remove shippable from files excluded in MANIFEST.in --- MANIFEST.in | 1 - 1 file changed, 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 3e04fb1fa267..8ec62123b998 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -11,7 +11,6 @@ include *.* exclude LICENSES_bundled.txt exclude .* exclude azure-*.yml -exclude shippable.yml # Sub-directories. Included are: numpy/, doc/, benchmarks/, tools/ include numpy/_version.py From 5e310e94de468359332ea5366d6555df5bc85231 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 1 Apr 2021 18:09:28 -0500 Subject: [PATCH 0874/1270] API,DEP: Move ufunc signature parsing to the start This may have slight affects on users, see release notes. Mainly, we have to parse the type tuple up-front (because we need to replace the current type resolution). Since we _must_ de-facto replace the current type resolution, I do not see why we should duplicate code for the odd possibility of someone actually calling `ufunc.type_resolver()` with a `typetup` that is not a actually a type tuple. This commit also deprecates `signature="l"` as meaning (normally) the same as `dtype="l"`. --- doc/release/upcoming_changes/18718.c_api.rst | 13 + .../upcoming_changes/18718.compatibility.rst | 53 +++ numpy/core/src/umath/ufunc_object.c | 328 ++++++++++++++++-- numpy/core/src/umath/ufunc_type_resolution.c | 224 +++++------- numpy/core/tests/test_deprecations.py | 34 ++ numpy/core/tests/test_ufunc.py | 51 ++- numpy/typing/tests/data/pass/ufuncs.py | 2 +- 7 files changed, 528 insertions(+), 177 deletions(-) create mode 100644 doc/release/upcoming_changes/18718.c_api.rst create mode 100644 doc/release/upcoming_changes/18718.compatibility.rst diff --git a/doc/release/upcoming_changes/18718.c_api.rst b/doc/release/upcoming_changes/18718.c_api.rst new file mode 100644 index 000000000000..eb9121ab645f --- /dev/null +++ b/doc/release/upcoming_changes/18718.c_api.rst @@ -0,0 +1,13 @@ +Use of ``ufunc->type_resolver`` and "type tuple" +------------------------------------------------ +NumPy now normalizes the "type tuple" argument to the +type resolver functions before calling it. Note that in +the use of this type resolver is legacy behaviour and NumPy +will not do so when possible. +Calling ``ufunc->type_resolver`` or ``PyUFunc_DefaultTypeResolver`` +is strongly discouraged and will now enforce a normalized +type tuple if done. +Note that this does not affect providing a type resolver, which +is expected to keep working in most circumstances. +If you have an unexpected use-case for calling the type resolver, +please inform the NumPy developers so that a solution can be found. diff --git a/doc/release/upcoming_changes/18718.compatibility.rst b/doc/release/upcoming_changes/18718.compatibility.rst new file mode 100644 index 000000000000..19563a25a4c0 --- /dev/null +++ b/doc/release/upcoming_changes/18718.compatibility.rst @@ -0,0 +1,53 @@ +Changes to comparisons with ``dtype=...`` +----------------------------------------- +When the ``dtype=`` (or ``signature``) arguments to comparison +ufuncs (``equal``, ``less``, etc.) is used, this will denote +the desired output dtype in the future. +This means that: + + np.equal(2, 3, dtype=object) + +will give a ``FutureWarning`` that it will return an ``object`` +array in the future, which currently happens for: + + np.equal(None, None, dtype=object) + +due to the fact that ``np.array(None)`` is already an object +array. (This also happens for some other dtypes.) + +Since comparisons normally only return boolean arrays, providing +any other dtype will always raise an error in the future and +give a ``DeprecationWarning`` now. + + +Changes to ``dtype`` and ``signature`` arguments in ufuncs +---------------------------------------------------------- +The universal function arguments ``dtype`` and ``signature`` +which are also valid for reduction such as ``np.add.reduce`` +(which is the implementation for ``np.sum``) will now issue +a warning when the ``dtype`` provided is not a "basic" dtype. + +NumPy almost always ignored metadata, byteorder or time units +on these inputs. NumPy will now always ignore it and issue +a warning if byteorder or time unit changed. +The following are the most important examples of changes which +will issue the warning and in some cases previously returned +different results:: + + # The following will now warn on most systems (unchanged result): + np.add(3, 5, dtype=">i32") + + # The biggest impact is for timedelta or datetimes: + arr = np.arange(10, dtype="m8[s]") + # The examples always ignored the time unit "ns" (using the + # unit of `arr`. They now issue a warning: + np.add(arr, arr, dtype="m8[ns]") + np.maximum.reduce(arr, dtype="m8[ns]") + + # The following issue a warning but previously did return + # a "ns" result. + np.add(3, 5, dtype="m8[ns]") # Now return generic time units + np.maximum(arr, arr, dtype="m8[ns]") # Now returns "s" (from `arr`) + +The same applies for functions like ``np.sum`` which use these internally. +This change is necessary to achieve consistent handling within NumPy. diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 128706277913..d975b0883424 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -47,6 +47,7 @@ #include "npy_import.h" #include "extobj.h" #include "common.h" +#include "dtypemeta.h" #include "numpyos.h" /********** PRINTF DEBUG TRACING **************/ @@ -2682,7 +2683,6 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, int trivial_loop_ok = 0; - nin = ufunc->nin; nout = ufunc->nout; nop = nin + nout; @@ -4033,6 +4033,10 @@ _not_NoValue(PyObject *obj, PyObject **out) return 1; } + +/* forward declaration */ +static PyArray_DTypeMeta * _get_dtype(PyObject *dtype_obj); + /* * This code handles reduce, reduceat, and accumulate * (accumulate and reduce are special cases of the more general reduceat @@ -4192,8 +4196,14 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } } - if (otype_obj && !PyArray_DescrConverter2(otype_obj, &otype)) { - goto fail; + if (otype_obj && otype_obj != Py_None) { + /* Use `_get_dtype` because `dtype` is a DType and not the instance */ + PyArray_DTypeMeta *dtype = _get_dtype(otype_obj); + if (dtype == NULL) { + goto fail; + } + Py_INCREF(dtype->singleton); + otype = dtype->singleton; } if (out_obj && !PyArray_OutputConverter(out_obj, &out)) { goto fail; @@ -4396,35 +4406,42 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, /* - * Sets typetup to a new reference to the passed in dtype information - * tuple or NULL. Returns -1 on failure. + * Perform a basic check on `dtype`, `sig`, and `signature` since only one + * may be set. If `sig` is used, writes it into `out_signature` (which should + * be set to `signature_obj` so that following code only requires to handle + * `signature_obj`). + * + * Does NOT incref the output! This only copies the borrowed references + * gotten during the argument parsing. + * + * This function does not do any normalization of the input dtype tuples, + * this happens after the array-ufunc override check currently. */ static int -_get_typetup(PyObject *sig_obj, PyObject *signature_obj, PyObject *dtype, - PyObject **out_typetup) +_check_and_copy_sig_to_signature( + PyObject *sig_obj, PyObject *signature_obj, PyObject *dtype, + PyObject **out_signature) { - *out_typetup = NULL; + *out_signature = NULL; if (signature_obj != NULL) { - Py_INCREF(signature_obj); - *out_typetup = signature_obj; + *out_signature = signature_obj; } if (sig_obj != NULL) { - if (*out_typetup != NULL) { + if (*out_signature != NULL) { PyErr_SetString(PyExc_TypeError, "cannot specify both 'sig' and 'signature'"); - Py_SETREF(*out_typetup, NULL); + *out_signature = NULL; return -1; } Py_INCREF(sig_obj); - *out_typetup = sig_obj; + *out_signature = sig_obj; } if (dtype != NULL) { - if (*out_typetup != NULL) { + if (*out_signature != NULL) { PyErr_SetString(PyExc_TypeError, "cannot specify both 'signature' and 'dtype'"); - Py_SETREF(*out_typetup, NULL); return -1; } /* dtype needs to be converted, delay after the override check */ @@ -4432,32 +4449,264 @@ _get_typetup(PyObject *sig_obj, PyObject *signature_obj, PyObject *dtype, return 0; } + +/* + * Note: This function currently lets DType classes pass, but in general + * the class (not the descriptor instance) is the preferred input, so the + * parsing should eventually be adapted to prefer classes and possible + * deprecated instances. (Users should not notice that much, since `np.float64` + * or "float64" usually denotes the DType class rather than the instance.) + */ +static PyArray_DTypeMeta * +_get_dtype(PyObject *dtype_obj) { + if (PyObject_TypeCheck(dtype_obj, &PyArrayDTypeMeta_Type)) { + Py_INCREF(dtype_obj); + return (PyArray_DTypeMeta *)dtype_obj; + } + else { + PyArray_Descr *descr = NULL; + if (!PyArray_DescrConverter(dtype_obj, &descr)) { + return NULL; + } + PyArray_DTypeMeta *out = NPY_DTYPE(descr); + if (NPY_UNLIKELY(!out->legacy)) { + /* TODO: this path was unreachable when added. */ + PyErr_SetString(PyExc_TypeError, + "Cannot pass a new user DType instance to the `dtype` or " + "`signature` arguments of ufuncs. Pass the DType class " + "instead."); + Py_DECREF(descr); + return NULL; + } + else if (NPY_UNLIKELY(out->singleton != descr)) { + /* This does not warn about `metadata`, but units is important. */ + if (!PyArray_EquivTypes(out->singleton, descr)) { + if (PyErr_WarnFormat(PyExc_UserWarning, 1, + "The `dtype` and `signature` arguments to " + "ufuncs only select the general DType and not details " + "such as the byte order or time unit. " + "In very rare cases NumPy <1.21 may have preserved the " + "time unit for `dtype=`. The cases are mainly " + "`np.minimum(arr1, arr2, dtype='m8[ms]')` and when the " + "output is timedelta, but the input is integer. " + "(See NumPy 1.21.0 release notes for details.)\n" + "If you wish to set an exact output dtype, you must " + "currently pass `out=` instead.") < 0) { + Py_DECREF(descr); + return NULL; + } + } + } + Py_INCREF(out); + Py_DECREF(descr); + return out; + } +} + + +static int +_make_new_typetup( + int nop, PyArray_DTypeMeta *signature[], PyObject **out_typetup) { + *out_typetup = PyTuple_New(nop); + if (*out_typetup == NULL) { + return -1; + } + for (int i = 0; i < nop; i++) { + PyObject *item; + if (signature[i] == NULL) { + item = Py_None; + } + else { + if (!signature[i]->legacy || signature[i]->abstract) { + /* + * The legacy type resolution can't deal with these. + * This path will return `None` or so in the future to + * set an error later if the legacy type resolution is used. + */ + PyErr_SetString(PyExc_RuntimeError, + "Internal NumPy error: new DType in signature not yet " + "supported. (This should be unreachable code!)"); + Py_SETREF(*out_typetup, NULL); + return -1; + } + item = (PyObject *)signature[i]->singleton; + } + Py_INCREF(item); + PyTuple_SET_ITEM(*out_typetup, i, item); + } + return 0; +} + + /* - * Finish conversion parsing of the type tuple. This is currenlty only - * conversion of the `dtype` argument, but should do more in the future. + * Finish conversion parsing of the type tuple. NumPy always only honored + * the type number for passed in descriptors/dtypes. + * The `dtype` argument is interpreted as the first output DType (not + * descriptor). + * Unlike the dtype of an `out` array, it influences loop selection! * - * TODO: The parsing of the typetup should be moved here (followup cleanup). + * NOTE: This function replaces the type tuple if passed in (it steals + * the original reference and returns a new object and reference)! + * The caller must XDECREF the type tuple both on error or success. + * + * The function returns a new, normalized type-tuple. */ static int -_convert_typetup(PyObject *dtype_obj, PyObject **out_typetup) +_get_normalized_typetup(PyUFuncObject *ufunc, + PyObject *dtype_obj, PyObject *signature_obj, PyObject **out_typetup) { + if (dtype_obj == NULL && signature_obj == NULL) { + return 0; + } + + int res = -1; + int nin = ufunc->nin, nout = ufunc->nout, nop = nin + nout; + /* + * TODO: `signature` will be the main result in the future and + * not the typetup. (Type tuple construction can be deffered to when + * the legacy fallback is used). + */ + PyArray_DTypeMeta *signature[NPY_MAXARGS]; + memset(signature, '\0', sizeof(*signature) * nop); + if (dtype_obj != NULL) { - PyArray_Descr *dtype = NULL; - if (!PyArray_DescrConverter2(dtype_obj, &dtype)) { - return -1; - } - if (dtype == NULL) { - /* dtype=None, no need to set typetup. */ + if (dtype_obj == Py_None) { + /* If `dtype=None` is passed, no need to do anything */ + assert(*out_typetup == NULL); return 0; } - *out_typetup = PyTuple_Pack(1, (PyObject *)dtype); - Py_DECREF(dtype); - if (*out_typetup == NULL) { + if (nout == 0) { + /* This may be allowed (NumPy does not do this)? */ + PyErr_SetString(PyExc_TypeError, + "Cannot provide `dtype` when a ufunc has no outputs"); return -1; } + signature[nin] = _get_dtype(dtype_obj); + if (signature[nin] == NULL) { + return -1; + } + res = _make_new_typetup(nop, signature, out_typetup); + goto finish; } - /* sig and signature are not converted here right now. */ - return 0; + + assert(signature_obj != NULL); + /* Fill in specified_types from the tuple or string (signature_obj) */ + if (PyTuple_Check(signature_obj)) { + int nonecount = 0; + Py_ssize_t n = PyTuple_GET_SIZE(signature_obj); + if (n == 1 && nop != 1) { + /* + * Special handling, because we deprecate this path. The path + * probably mainly existed since the `dtype=obj` was passed through + * as `(obj,)` and parsed later. + */ + if (PyTuple_GET_ITEM(signature_obj, 0) == Py_None) { + PyErr_SetString(PyExc_TypeError, + "a single item type tuple cannot contain None."); + goto finish; + } + if (DEPRECATE("The use of a length 1 tuple for the ufunc " + "`signature` is deprecated. Use `dtype` or fill the" + "tuple with `None`s.") < 0) { + goto finish; + } + /* Use the same logic as for `dtype=` */ + res = _get_normalized_typetup(ufunc, + PyTuple_GET_ITEM(signature_obj, 0), NULL, out_typetup); + goto finish; + } + if (n != nop) { + PyErr_Format(PyExc_ValueError, + "a type-tuple must be specified of length %d for ufunc '%s'", + nop, ufunc_get_name_cstr(ufunc)); + goto finish; + } + for (int i = 0; i < nop; ++i) { + PyObject *item = PyTuple_GET_ITEM(signature_obj, i); + if (item == Py_None) { + ++nonecount; + } + else { + signature[i] = _get_dtype(item); + if (signature[i] == NULL) { + goto finish; + } + } + } + if (nonecount == n) { + PyErr_SetString(PyExc_ValueError, + "the type-tuple provided to the ufunc " + "must specify at least one none-None dtype"); + goto finish; + } + } + else if (PyBytes_Check(signature_obj) || PyUnicode_Check(signature_obj)) { + PyObject *str_object = NULL; + + if (PyBytes_Check(signature_obj)) { + str_object = PyUnicode_FromEncodedObject(signature_obj, NULL, NULL); + if (str_object == NULL) { + goto finish; + } + } + else { + Py_INCREF(signature_obj); + str_object = signature_obj; + } + + Py_ssize_t length; + const char *str = PyUnicode_AsUTF8AndSize(str_object, &length); + if (str == NULL) { + Py_DECREF(str_object); + goto finish; + } + + if (length != 1 && (length != nin+nout + 2 || + str[nin] != '-' || str[nin+1] != '>')) { + PyErr_Format(PyExc_ValueError, + "a type-string for %s, %d typecode(s) before and %d after " + "the -> sign", ufunc_get_name_cstr(ufunc), nin, nout); + Py_DECREF(str_object); + goto finish; + } + if (length == 1 && nin+nout != 1) { + Py_DECREF(str_object); + if (DEPRECATE("The use of a length 1 string for the ufunc " + "`signature` is deprecated. Use `dtype` attribute or " + "pass a tuple with `None`s.") < 0) { + goto finish; + } + /* `signature="l"` is the same as `dtype="l"` */ + res = _get_normalized_typetup(ufunc, str_object, NULL, out_typetup); + goto finish; + } + else { + for (int i = 0; i < nin+nout; ++i) { + npy_intp istr = i < nin ? i : i+2; + PyArray_Descr *descr = PyArray_DescrFromType(str[istr]); + if (descr == NULL) { + Py_DECREF(str_object); + goto finish; + } + signature[i] = NPY_DTYPE(descr); + Py_INCREF(signature[i]); + Py_DECREF(descr); + } + Py_DECREF(str_object); + } + } + else { + PyErr_SetString(PyExc_TypeError, + "The signature object to ufunc must be a string or a tuple."); + goto finish; + } + res = _make_new_typetup(nop, signature, out_typetup); + + finish: + for (int i =0; i < nop; i++) { + Py_XDECREF(signature[i]); + } + return res; } @@ -4613,9 +4862,13 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, goto fail; } } - - /* Only one of signature, sig, and dtype should be passed */ - if (_get_typetup(sig_obj, signature_obj, dtype_obj, &typetup) < 0) { + /* + * Only one of signature, sig, and dtype should be passed. If `sig` + * was passed, this puts it into `signature_obj` instead (these + * are borrowed references). + */ + if (_check_and_copy_sig_to_signature( + sig_obj, signature_obj, dtype_obj, &signature_obj) < 0) { goto fail; } } @@ -4635,7 +4888,6 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, goto fail; } else if (override) { - Py_XDECREF(typetup); Py_DECREF(full_args.in); Py_XDECREF(full_args.out); return override; @@ -4650,8 +4902,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_SETREF(full_args.in, new_in); } - /* Finish argument parsing/converting for the dtype and all others */ - if (_convert_typetup(dtype_obj, &typetup) < 0) { + /* + * Parse the passed `dtype` or `signature` into an array containing + * PyArray_DTypeMeta and/or None. + */ + if (_get_normalized_typetup(ufunc, dtype_obj, signature_obj, &typetup) < 0) { goto fail; } @@ -4734,7 +4989,6 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_XDECREF(typetup); Py_XDECREF(full_args.in); Py_XDECREF(full_args.out); - if (ufunc->nout == 1) { return retobj[0]; } diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index c46346118dea..4cf3b3076d8b 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -355,30 +355,43 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, Py_INCREF(out_dtypes[1]); } else { - PyObject *item; - PyArray_Descr *dtype = NULL; - + PyArray_Descr *descr; /* - * If the type tuple isn't a single-element tuple, let the - * default type resolution handle this one. + * If the type tuple was originally a single element (probably), + * issue a deprecation warning, but otherwise accept it. Since the + * result dtype is always boolean, this is not actually valid unless it + * is `object` (but if there is an object input we already deferred). */ - if (!PyTuple_Check(type_tup) || PyTuple_GET_SIZE(type_tup) != 1) { + if (PyTuple_Check(type_tup) && PyTuple_GET_SIZE(type_tup) == 3 && + PyTuple_GET_ITEM(type_tup, 0) == Py_None && + PyTuple_GET_ITEM(type_tup, 1) == Py_None && + PyArray_DescrCheck(PyTuple_GET_ITEM(type_tup, 2))) { + descr = (PyArray_Descr *)PyTuple_GET_ITEM(type_tup, 2); + if (descr->type_num == NPY_OBJECT) { + if (DEPRECATE_FUTUREWARNING( + "using `dtype=object` (or equivalent signature) will " + "return object arrays in the future also when the " + "inputs do not already have `object` dtype.") < 0) { + return -1; + } + } + else if (descr->type_num != NPY_BOOL) { + if (DEPRECATE( + "using `dtype=` in comparisons is only useful for " + "`dtype=object` (and will do nothing for bool). " + "This operation will fail in the future.") < 0) { + return -1; + } + } + } + else { + /* Usually a failure, but let the the default version handle it */ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); } - item = PyTuple_GET_ITEM(type_tup, 0); - - if (item == Py_None) { - PyErr_SetString(PyExc_ValueError, - "require data type in the type tuple"); - return -1; - } - else if (!PyArray_DescrConverter(item, &dtype)) { - return -1; - } - - out_dtypes[0] = ensure_dtype_nbo(dtype); + Py_INCREF(descr); + out_dtypes[0] = ensure_dtype_nbo(descr); if (out_dtypes[0] == NULL) { return -1; } @@ -536,34 +549,42 @@ PyUFunc_SimpleUniformOperationTypeResolver( } } else { - PyObject *item; - PyArray_Descr *dtype = NULL; - /* - * If the type tuple isn't a single-element tuple, let the - * default type resolution handle this one. + * This is a fast-path, since all descriptors will be identical, mainly + * when only a single descriptor was passed (which would set the out + * one in the tuple), there is no need to check all loops. */ - if (!PyTuple_Check(type_tup) || PyTuple_GET_SIZE(type_tup) != 1) { + PyArray_Descr *descr = NULL; + if (PyTuple_CheckExact(type_tup) && + PyTuple_GET_SIZE(type_tup) == nop) { + for (int i = 0; i < nop; i++) { + PyObject *item = PyTuple_GET_ITEM(type_tup, i); + if (item == Py_None) { + continue; + } + if (!PyArray_DescrCheck(item)) { + /* bad type tuple (maybe not normalized correctly?) */ + descr = NULL; + break; + } + if (descr != NULL && descr != (PyArray_Descr *)item) { + /* descriptor mismatch, probably a bad signature. */ + descr = NULL; + break; + } + descr = (PyArray_Descr *)item; + } + } + if (descr == NULL) { + /* in all bad/unlikely cases, use the default type resolver: */ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); } - - item = PyTuple_GET_ITEM(type_tup, 0); - - if (item == Py_None) { - PyErr_SetString(PyExc_ValueError, - "require data type in the type tuple"); - return -1; - } - else if (!PyArray_DescrConverter(item, &dtype)) { - return -1; - } - - out_dtypes[0] = ensure_dtype_nbo(dtype); - Py_DECREF(dtype); - if (out_dtypes[0] == NULL) { - return -1; + else if (descr->type_num == PyArray_DESCR(operands[0])->type_num) { + /* Prefer the input descriptor if it matches (preserve metadata) */ + descr = PyArray_DESCR(operands[0]); } + out_dtypes[0] = ensure_dtype_nbo(descr); } /* All types are the same - copy the first one to the rest */ @@ -2057,8 +2078,7 @@ type_tuple_type_resolver(PyUFuncObject *self, int any_object, PyArray_Descr **out_dtype) { - npy_intp i, j, n, nin = self->nin, nop = nin + self->nout; - int n_specified = 0; + int i, j, nin = self->nin, nop = nin + self->nout; int specified_types[NPY_MAXARGS], types[NPY_MAXARGS]; const char *ufunc_name; int no_castable_output = 0, use_min_scalar; @@ -2071,105 +2091,45 @@ type_tuple_type_resolver(PyUFuncObject *self, use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); /* Fill in specified_types from the tuple or string */ - if (PyTuple_Check(type_tup)) { - int nonecount = 0; - n = PyTuple_GET_SIZE(type_tup); - if (n != 1 && n != nop) { - PyErr_Format(PyExc_ValueError, - "a type-tuple must be specified " - "of length 1 or %d for ufunc '%s'", (int)nop, - ufunc_get_name_cstr(self)); + const char *bad_type_tup_msg = ( + "Only NumPy must call `ufunc->type_resolver()` explicitly. " + "NumPy ensures that a type-tuple is normalized now to be a tuple " + "only containing None or descriptors. If anything else is passed " + "(you are seeing this message), the `type_resolver()` was called " + "directly by a third party. " + "This is unexpected, please inform the NumPy developers about it. " + "Also note that `type_resolver` will be phased out, since it must " + "be replaced."); + + if (PyTuple_CheckExact(type_tup)) { + Py_ssize_t n = PyTuple_GET_SIZE(type_tup); + if (n != nop) { + PyErr_SetString(PyExc_RuntimeError, bad_type_tup_msg); return -1; } - - for (i = 0; i < n; ++i) { + for (i = 0; i < nop; ++i) { PyObject *item = PyTuple_GET_ITEM(type_tup, i); if (item == Py_None) { specified_types[i] = NPY_NOTYPE; - ++nonecount; } else { - PyArray_Descr *dtype = NULL; - if (!PyArray_DescrConverter(item, &dtype)) { + if (!PyArray_DescrCheck(item)) { + PyErr_SetString(PyExc_RuntimeError, bad_type_tup_msg); return -1; } - specified_types[i] = dtype->type_num; - Py_DECREF(dtype); + specified_types[i] = ((PyArray_Descr *)item)->type_num; } } - - if (nonecount == n) { - PyErr_SetString(PyExc_ValueError, - "the type-tuple provided to the ufunc " - "must specify at least one none-None dtype"); - return -1; - } - - n_specified = n; } - else if (PyBytes_Check(type_tup) || PyUnicode_Check(type_tup)) { - Py_ssize_t length; - char *str; - PyObject *str_obj = NULL; - - if (PyUnicode_Check(type_tup)) { - str_obj = PyUnicode_AsASCIIString(type_tup); - if (str_obj == NULL) { - return -1; - } - type_tup = str_obj; - } - - if (PyBytes_AsStringAndSize(type_tup, &str, &length) < 0) { - Py_XDECREF(str_obj); - return -1; - } - if (length != 1 && (length != nop + 2 || - str[nin] != '-' || str[nin+1] != '>')) { - PyErr_Format(PyExc_ValueError, - "a type-string for %s, " \ - "requires 1 typecode, or " - "%d typecode(s) before " \ - "and %d after the -> sign", - ufunc_get_name_cstr(self), - self->nin, self->nout); - Py_XDECREF(str_obj); - return -1; - } - if (length == 1) { - PyArray_Descr *dtype; - n_specified = 1; - dtype = PyArray_DescrFromType(str[0]); - if (dtype == NULL) { - Py_XDECREF(str_obj); - return -1; - } - specified_types[0] = dtype->type_num; - Py_DECREF(dtype); - } - else { - PyArray_Descr *dtype; - n_specified = (int)nop; - - for (i = 0; i < nop; ++i) { - npy_intp istr = i < nin ? i : i+2; - - dtype = PyArray_DescrFromType(str[istr]); - if (dtype == NULL) { - Py_XDECREF(str_obj); - return -1; - } - specified_types[i] = dtype->type_num; - Py_DECREF(dtype); - } - } - Py_XDECREF(str_obj); + else { + PyErr_SetString(PyExc_RuntimeError, bad_type_tup_msg); + return -1; } /* If the ufunc has userloops, search for them. */ if (self->userloops) { switch (type_tuple_userloop_type_resolver(self, - n_specified, specified_types, + nop, specified_types, op, casting, any_object, use_min_scalar, out_dtype)) { @@ -2190,19 +2150,13 @@ type_tuple_type_resolver(PyUFuncObject *self, types[j] = orig_types[j]; } - if (n_specified == nop) { - for (j = 0; j < nop; ++j) { - if (types[j] != specified_types[j] && - specified_types[j] != NPY_NOTYPE) { - break; - } - } - if (j < nop) { - /* no match */ - continue; + for (j = 0; j < nop; ++j) { + if (types[j] != specified_types[j] && + specified_types[j] != NPY_NOTYPE) { + break; } } - else if (types[nin] != specified_types[0]) { + if (j < nop) { /* no match */ continue; } diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 4d840ec1ae0c..ec4112e69587 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -1141,3 +1141,37 @@ def test_not_deprecated(self, dtype, string_dt): # np.equal uses a different type resolver: with pytest.raises(TypeError): self.assert_not_deprecated(lambda: np.equal(arr1, arr2)) + + +class TestSingleElementSignature(_DeprecationTestCase): + # Deprecated 2021-04-01, NumPy 1.21 + message = r"The use of a length 1" + + def test_deprecated(self): + self.assert_deprecated(lambda: np.add(1, 2, signature="d")) + self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),))) + + +class TestComparisonBadDType(_DeprecationTestCase): + # Deprecated 2021-04-01, NumPy 1.21 + message = r"using `dtype=` in comparisons is only useful for" + + def test_deprecated(self): + self.assert_deprecated(lambda: np.equal(1, 1, dtype=np.int64)) + # Not an error only for the transition + self.assert_deprecated(lambda: np.equal(1, 1, sig=(None, None, "l"))) + + def test_not_deprecated(self): + np.equal(True, False, dtype=bool) + np.equal(3, 5, dtype=bool, casting="unsafe") + np.equal([None], [4], dtype=object) + +class TestComparisonBadObjectDType(_DeprecationTestCase): + # Deprecated 2021-04-01, NumPy 1.21 (different branch of the above one) + message = r"using `dtype=object` \(or equivalent signature\) will" + warning_cls = FutureWarning + + def test_deprecated(self): + self.assert_deprecated(lambda: np.equal(1, 1, dtype=object)) + self.assert_deprecated( + lambda: np.equal(1, 1, sig=(None, None, object))) diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 96bfe7c33b2d..7b71a4a65417 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -410,9 +410,12 @@ def test_get_signature(self): def test_forced_sig(self): a = 0.5*np.arange(3, dtype='f8') assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) - assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + with pytest.warns(DeprecationWarning): + assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) + with pytest.warns(DeprecationWarning): + assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), + [0, 0, 1]) assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), casting='unsafe'), [0, 0, 1]) @@ -420,18 +423,58 @@ def test_forced_sig(self): np.add(a, 0.5, out=b) assert_equal(b, [0.5, 1, 1.5]) b[:] = 0 - np.add(a, 0.5, sig='i', out=b, casting='unsafe') + with pytest.warns(DeprecationWarning): + np.add(a, 0.5, sig='i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 - np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + with pytest.warns(DeprecationWarning): + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) + def test_forced_dtype_times(self): + # Signatures only set the type numbers (not the actual loop dtypes) + # so using `M` in a signature/dtype should generally work: + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]') + np.maximum(a, a, dtype="M") + np.maximum.reduce(a, dtype="M") + + arr = np.arange(10, dtype="m8[s]") + np.add(arr, arr, dtype="m") + np.maximum(arr, arr, dtype="m") + + def test_forced_dtype_warning(self): + # does not warn (test relies on bad pickling behaviour, simply remove + # it if the `assert int64 is not int64_2` should start failing. + int64 = np.dtype("int64") + int64_2 = pickle.loads(pickle.dumps(int64)) + assert int64 is not int64_2 + np.add(3, 4, dtype=int64_2) + + arr = np.arange(10, dtype="m8[s]") + with pytest.warns(UserWarning, + match="The `dtype` and `signature` arguments to") as rec: + np.add(3, 5, dtype=int64.newbyteorder()) + np.add(3, 5, dtype="m8[ns]") # previously used the "ns" + np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns" + np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns" + np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns" + + assert len(rec) == 5 # each of the above call should cause one + + # Also check the error paths: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + with pytest.raises(UserWarning): + np.add(3, 5, dtype="m8[ns]") + with pytest.raises(UserWarning): + np.maximum.reduce(arr, dtype="m8[ns]") + def test_true_divide(self): a = np.array(10) b = np.array(20) diff --git a/numpy/typing/tests/data/pass/ufuncs.py b/numpy/typing/tests/data/pass/ufuncs.py index ad4d483d4a70..3c93fb2cf1d0 100644 --- a/numpy/typing/tests/data/pass/ufuncs.py +++ b/numpy/typing/tests/data/pass/ufuncs.py @@ -4,7 +4,7 @@ np.sin([1, 2, 3]) np.sin(1, out=np.empty(1)) np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)]) -np.sin(1, signature="D") +np.sin(1, signature="D->D") np.sin(1, extobj=[16, 1, lambda: None]) # NOTE: `np.generic` subclasses are not guaranteed to support addition; # re-enable this we can infer the exact return type of `np.sin(...)`. From a26730f1082ff4e5f4819a038761ebfebfd73745 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 4 Apr 2021 12:06:24 -0500 Subject: [PATCH 0875/1270] MAINT: Implement comments from Marten's review --- numpy/core/src/umath/ufunc_object.c | 28 ++++++++-------- numpy/core/src/umath/ufunc_type_resolution.c | 4 +-- numpy/core/tests/test_ufunc.py | 35 ++++++++++++++++++++ 3 files changed, 50 insertions(+), 17 deletions(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index d975b0883424..2c82e5cb6bfc 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4434,7 +4434,6 @@ _check_and_copy_sig_to_signature( *out_signature = NULL; return -1; } - Py_INCREF(sig_obj); *out_signature = sig_obj; } @@ -4511,10 +4510,13 @@ _make_new_typetup( if (*out_typetup == NULL) { return -1; } + + int noncount = 0; for (int i = 0; i < nop; i++) { PyObject *item; if (signature[i] == NULL) { item = Py_None; + noncount++; } else { if (!signature[i]->legacy || signature[i]->abstract) { @@ -4534,6 +4536,11 @@ _make_new_typetup( Py_INCREF(item); PyTuple_SET_ITEM(*out_typetup, i, item); } + if (noncount == nop) { + /* The whole signature was None, simply ignore type tuple */ + Py_DECREF(*out_typetup); + *out_typetup = NULL; + } return 0; } @@ -4592,7 +4599,6 @@ _get_normalized_typetup(PyUFuncObject *ufunc, assert(signature_obj != NULL); /* Fill in specified_types from the tuple or string (signature_obj) */ if (PyTuple_Check(signature_obj)) { - int nonecount = 0; Py_ssize_t n = PyTuple_GET_SIZE(signature_obj); if (n == 1 && nop != 1) { /* @@ -4624,21 +4630,13 @@ _get_normalized_typetup(PyUFuncObject *ufunc, for (int i = 0; i < nop; ++i) { PyObject *item = PyTuple_GET_ITEM(signature_obj, i); if (item == Py_None) { - ++nonecount; + continue; } - else { - signature[i] = _get_dtype(item); - if (signature[i] == NULL) { - goto finish; - } + signature[i] = _get_dtype(item); + if (signature[i] == NULL) { + goto finish; } } - if (nonecount == n) { - PyErr_SetString(PyExc_ValueError, - "the type-tuple provided to the ufunc " - "must specify at least one none-None dtype"); - goto finish; - } } else if (PyBytes_Check(signature_obj) || PyUnicode_Check(signature_obj)) { PyObject *str_object = NULL; @@ -4697,7 +4695,7 @@ _get_normalized_typetup(PyUFuncObject *ufunc, } else { PyErr_SetString(PyExc_TypeError, - "The signature object to ufunc must be a string or a tuple."); + "the signature object to ufunc must be a string or a tuple."); goto finish; } res = _make_new_typetup(nop, signature, out_typetup); diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 4cf3b3076d8b..465deb87b756 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -563,12 +563,12 @@ PyUFunc_SimpleUniformOperationTypeResolver( continue; } if (!PyArray_DescrCheck(item)) { - /* bad type tuple (maybe not normalized correctly?) */ + /* Defer to default resolver (will raise an error there) */ descr = NULL; break; } if (descr != NULL && descr != (PyArray_Descr *)item) { - /* descriptor mismatch, probably a bad signature. */ + /* Descriptor mismatch: try with default (probable error) */ descr = NULL; break; } diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 7b71a4a65417..86cadccd8123 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -437,6 +437,41 @@ def test_forced_sig(self): np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) + def test_signature_all_None(self): + # signature all None, is an acceptable alternative (since 1.21) + # to not providing a signature. + res1 = np.add([3], [4], sig=(None, None, None)) + res2 = np.add([3], [4]) + assert_array_equal(res1, res2) + res1 = np.maximum([3], [4], sig=(None, None, None)) + res2 = np.maximum([3], [4]) + assert_array_equal(res1, res2) + + with pytest.raises(TypeError): + # special case, that would be deprecated anyway, so errors: + np.add(3, 4, signature=(None,)) + + def test_signature_dtype_type(self): + # Since that will be the normal behaviour (past NumPy 1.21) + # we do support the types already: + float_dtype = type(np.dtype(np.float64)) + np.add(3, 4, signature=(float_dtype, float_dtype, None)) + + def test_signature_errors(self): + with pytest.raises(TypeError, + match="the signature object to ufunc must be a string or"): + np.add(3, 4, signature=123.) # neither a string nor a tuple + + with pytest.raises(ValueError): + # bad symbols that do not translate to dtypes + np.add(3, 4, signature="%^->#") + + with pytest.raises(ValueError): + np.add(3, 4, signature=(None, "f8")) # bad length + + with pytest.raises(UnicodeDecodeError): + np.add(3, 4, signature=b"\xff\xff->i") + def test_forced_dtype_times(self): # Signatures only set the type numbers (not the actual loop dtypes) # so using `M` in a signature/dtype should generally work: From 48207204e9b76b1593e1fcb1883346f57993cc2d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 5 Apr 2021 11:07:31 -0500 Subject: [PATCH 0876/1270] TST: Add coverage signature parsing code coverage where possible --- numpy/core/tests/test_ufunc.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 86cadccd8123..93bfd9adfbda 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -466,6 +466,12 @@ def test_signature_errors(self): # bad symbols that do not translate to dtypes np.add(3, 4, signature="%^->#") + with pytest.raises(ValueError): + np.add(3, 4, signature=b"ii-i") # incomplete and byte string + + with pytest.raises(ValueError): + np.add(3, 4, signature="ii>i") # incomplete string + with pytest.raises(ValueError): np.add(3, 4, signature=(None, "f8")) # bad length From 44dbf648aacfe85c426f0a667b7670e20ba70bad Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 8 Apr 2021 12:12:02 -0500 Subject: [PATCH 0877/1270] MAINT: Raise error instead of warning and ammend NEP on `type_resolver` --- doc/neps/nep-0043-extensible-ufuncs.rst | 5 ++++ .../upcoming_changes/18718.compatibility.rst | 24 ++++++++++++------- numpy/core/src/umath/ufunc_object.c | 23 +++++++++--------- numpy/core/tests/test_ufunc.py | 18 +++++--------- 4 files changed, 37 insertions(+), 33 deletions(-) diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 7dbad289b476..6442b9b03a0a 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -150,6 +150,11 @@ The masked type resolvers specifically will *not* remain supported, but has no known users (including NumPy itself, which only uses the default version). +Further, no compatibility attempt will be made for *calling* as opposed +to providing either the normal or the masked type resolver. As NumPy +will use it only as a fallback. There are no known users of this +(undocumented) possibility. + While the above changes potentially break some workflows, we believe that the long-term improvements vastly outweigh this. Further, packages such as astropy and Numba are capable of adapting so that diff --git a/doc/release/upcoming_changes/18718.compatibility.rst b/doc/release/upcoming_changes/18718.compatibility.rst index 19563a25a4c0..18bf7158da50 100644 --- a/doc/release/upcoming_changes/18718.compatibility.rst +++ b/doc/release/upcoming_changes/18718.compatibility.rst @@ -28,26 +28,32 @@ which are also valid for reduction such as ``np.add.reduce`` a warning when the ``dtype`` provided is not a "basic" dtype. NumPy almost always ignored metadata, byteorder or time units -on these inputs. NumPy will now always ignore it and issue -a warning if byteorder or time unit changed. +on these inputs. NumPy will now always ignore it and raise an +error if byteorder or time unit changed. The following are the most important examples of changes which -will issue the warning and in some cases previously returned -different results:: +will give the error. In some cases previously the information +stored was not ignored, in all of these an error is now raised:: - # The following will now warn on most systems (unchanged result): + # Previously ignored the byte-order (affect if non-native) np.add(3, 5, dtype=">i32") # The biggest impact is for timedelta or datetimes: arr = np.arange(10, dtype="m8[s]") - # The examples always ignored the time unit "ns" (using the - # unit of `arr`. They now issue a warning: + # The examples always ignored the time unit "ns": np.add(arr, arr, dtype="m8[ns]") np.maximum.reduce(arr, dtype="m8[ns]") - # The following issue a warning but previously did return - # a "ns" result. + # The following previously did use "ns" (as opposed to `arr.dtype`) np.add(3, 5, dtype="m8[ns]") # Now return generic time units np.maximum(arr, arr, dtype="m8[ns]") # Now returns "s" (from `arr`) The same applies for functions like ``np.sum`` which use these internally. This change is necessary to achieve consistent handling within NumPy. + +If you run into these, in most cases pass for example ``dtype=np.timedelta64`` +which clearly denotes a general ``timedelta64`` without any unit or byte-order +defined. If you need to specify the output dtype precisely, you may do so +by either casting the inputs or providing an output array using `out=`. + +NumPy may choose to allow providing an exact output ``dtype`` here in the +future, which would be preceded by a ``FutureWarning``. diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 2c82e5cb6bfc..f17dd1e61c1e 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4480,20 +4480,19 @@ _get_dtype(PyObject *dtype_obj) { else if (NPY_UNLIKELY(out->singleton != descr)) { /* This does not warn about `metadata`, but units is important. */ if (!PyArray_EquivTypes(out->singleton, descr)) { - if (PyErr_WarnFormat(PyExc_UserWarning, 1, + PyErr_Format(PyExc_TypeError, "The `dtype` and `signature` arguments to " "ufuncs only select the general DType and not details " - "such as the byte order or time unit. " - "In very rare cases NumPy <1.21 may have preserved the " - "time unit for `dtype=`. The cases are mainly " - "`np.minimum(arr1, arr2, dtype='m8[ms]')` and when the " - "output is timedelta, but the input is integer. " - "(See NumPy 1.21.0 release notes for details.)\n" - "If you wish to set an exact output dtype, you must " - "currently pass `out=` instead.") < 0) { - Py_DECREF(descr); - return NULL; - } + "such as the byte order or time unit (with rare " + "exceptions see release notes). To avoid this warning " + "please use the scalar types `np.float64`, or string " + "notation.\n" + "In rare cases where the time unit was preserved, " + "either cast the inputs or provide an output array. " + "In the future NumPy may transition to allow providing " + "`dtype=` to denote the outputs `dtype` as well"); + Py_DECREF(descr); + return NULL; } } Py_INCREF(out); diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 93bfd9adfbda..c13865ce4c28 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -498,24 +498,18 @@ def test_forced_dtype_warning(self): np.add(3, 4, dtype=int64_2) arr = np.arange(10, dtype="m8[s]") - with pytest.warns(UserWarning, - match="The `dtype` and `signature` arguments to") as rec: + msg = "The `dtype` and `signature` arguments to ufuncs only select the" + with pytest.raises(TypeError, match=msg): np.add(3, 5, dtype=int64.newbyteorder()) + with pytest.raises(TypeError, match=msg): np.add(3, 5, dtype="m8[ns]") # previously used the "ns" + with pytest.raises(TypeError, match=msg): np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns" + with pytest.raises(TypeError, match=msg): np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns" + with pytest.raises(TypeError, match=msg): np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns" - assert len(rec) == 5 # each of the above call should cause one - - # Also check the error paths: - with warnings.catch_warnings(): - warnings.simplefilter("error", UserWarning) - with pytest.raises(UserWarning): - np.add(3, 5, dtype="m8[ns]") - with pytest.raises(UserWarning): - np.maximum.reduce(arr, dtype="m8[ns]") - def test_true_divide(self): a = np.array(10) b = np.array(20) From 1dbfb0c4945729d93d78a820d298efdf288843ae Mon Sep 17 00:00:00 2001 From: mdubravski Date: Thu, 8 Apr 2021 13:32:46 -0400 Subject: [PATCH 0878/1270] shortened length of line 287 in genapi.py --- numpy/core/code_generators/genapi.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 7208ffafe5de..7c1cdb26b3aa 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -281,8 +281,11 @@ def find_functions(filename, tag='API'): state = SCANNING else: function_args.append(line) + except ParseError: + raise except Exception as e: - raise Exception(filename, lineno + 1) from e + raise ParseError(filename, lineno + 1, + "see chained exception for details") from e fo.close() return functions From bbda9ef5e5801efdceff947a3a19d019d4ef2627 Mon Sep 17 00:00:00 2001 From: mdubravski Date: Thu, 8 Apr 2021 13:42:28 -0400 Subject: [PATCH 0879/1270] shortened length of line 287 in genapi.py --- numpy/core/code_generators/genapi.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 32173cbd3b2c..7c1cdb26b3aa 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -284,12 +284,8 @@ def find_functions(filename, tag='API'): except ParseError: raise except Exception as e: -<<<<<<< HEAD raise ParseError(filename, lineno + 1, "see chained exception for details") from e -======= - raise ParseError(filename, lineno + 1, "see chained exception for details") from e ->>>>>>> 3581e89c6a47e9df0bd0dfa898ee70c6368f2199 fo.close() return functions From 4e4362e296c1716d34a60fb23f05735f22c8280d Mon Sep 17 00:00:00 2001 From: mdubravski Date: Thu, 8 Apr 2021 13:46:30 -0400 Subject: [PATCH 0880/1270] shortened length of line 287 in genapi.py --- numpy/core/code_generators/genapi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 7c1cdb26b3aa..4a4f3b6e7649 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -284,8 +284,8 @@ def find_functions(filename, tag='API'): except ParseError: raise except Exception as e: - raise ParseError(filename, lineno + 1, - "see chained exception for details") from e + msg = "see chained exception for details" + raise ParseError(filename, lineno + 1, msg) from e fo.close() return functions From 36eb76c9ded81c6626a86aa06d137dd800350751 Mon Sep 17 00:00:00 2001 From: Mike Boyle Date: Fri, 9 Apr 2021 12:49:07 -0400 Subject: [PATCH 0881/1270] DOC: Update ndarray.copy docstring (#18741) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * DOC: Emphasize distinctions between np.copy and ndarray.copy Co-authored-by: Melissa Weber Mendonça --- numpy/core/_add_newdocs.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index bba9f95fc3f5..b0dee66d9008 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -3113,14 +3113,20 @@ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :func:`numpy.copy` are very - similar, but have different default values for their order= - arguments.) + similar but have different default values for their order= + arguments, and this function always passes sub-classes through.) See also -------- - numpy.copy + numpy.copy : Similar function with different default behavior numpy.copyto + Notes + ----- + This function is the preferred method for creating an array copy. The + function :func:`numpy.copy` is similar, but it defaults to using order 'K', + and will not pass sub-classes through by default. + Examples -------- >>> x = np.array([[1,2,3],[4,5,6]], order='F') From 4a53ea1ce65536748e68b7ab76779b9bb72e4619 Mon Sep 17 00:00:00 2001 From: "Thomas J. Fan" Date: Sun, 11 Apr 2021 10:24:49 -0400 Subject: [PATCH 0882/1270] DEV: add Gitpod to numpy (#18733) --- .gitpod.yml | 10 ++++++ environment.yml | 1 + tools/gitpod/Dockerfile | 74 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+) create mode 100644 .gitpod.yml create mode 100644 tools/gitpod/Dockerfile diff --git a/.gitpod.yml b/.gitpod.yml new file mode 100644 index 000000000000..c1755607b626 --- /dev/null +++ b/.gitpod.yml @@ -0,0 +1,10 @@ +image: thomasjpfan/numpy-gitpod:latest +tasks: + # The base image complied numpy with ccache enabled. This second build + # should be faster since it is using the cache. + - init: python setup.py build_ext -i + +github: + prebuilds: + master: true + branches: true diff --git a/environment.yml b/environment.yml index ecfebee3e35e..ea38882a2a32 100644 --- a/environment.yml +++ b/environment.yml @@ -9,6 +9,7 @@ dependencies: - cython - compilers - openblas + - nomkl # For testing - pytest - pytest-cov diff --git a/tools/gitpod/Dockerfile b/tools/gitpod/Dockerfile new file mode 100644 index 000000000000..b9c0d4449cb7 --- /dev/null +++ b/tools/gitpod/Dockerfile @@ -0,0 +1,74 @@ +# Builds a development environment for gitpod by building numpy with +# ccache enabled. When gitpod is prebuilding or starting up it clones +# a branch into `/workspace/numpy`. The gitpod clone will build numpy +# faster because it is using compliers with ccache enabled. +FROM gitpod/workspace-base as clone + +COPY --chown=gitpod . /tmp/numpy_repo + +# We use a multistage build to create a shallow clone of the repo to avoid +# having the complete git history in the build stage and reducing the image +# size. During the build stage, the shallow clone is used to install the +# dependencies and build numpy to populate the cache used by ccache. Building +# numpy with setup.py uses versioneer.py which requires a git history. +RUN git clone --depth 1 file:////tmp/numpy_repo /tmp/numpy + +FROM gitpod/workspace-base as build + +# gitpod/workspace-base needs at least one file here +RUN touch /home/gitpod/.bashrc.d/empty + +ARG MAMBAFORGE_VERSION="4.10.0-0" +ARG CONDA_ENV=numpy-dev + +ENV CONDA_DIR=/home/gitpod/mambaforge3 +ENV PATH=$CONDA_DIR/bin:$PATH + +USER root +RUN install-packages texlive-latex-extra dvisvgm +USER gitpod + +# Allows this Dockerfile to activate conda environments +SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"] + +# Install mambaforge3 +RUN wget -q -O mambaforge3.sh \ + https://github.com/conda-forge/miniforge/releases/download/$MAMBAFORGE_VERSION/Mambaforge-$MAMBAFORGE_VERSION-Linux-x86_64.sh && \ + bash mambaforge3.sh -p $CONDA_DIR -b && \ + rm mambaforge3.sh + +# makes conda activate command for this Dockerfile +RUN echo ". $CONDA_DIR/etc/profile.d/conda.sh" >> ~/.profile +# enables conda for interactive sessions +RUN conda init bash + +# Install numpy dev dependencies +COPY --from=clone --chown=gitpod /tmp/numpy /workspace/numpy +RUN mamba env create -f /workspace/numpy/environment.yml -n $CONDA_ENV && \ + conda activate $CONDA_ENV && \ + mamba install ccache -y && \ + conda clean --all -f -y + +# Set up ccache for compilers for this Dockerfile and interactino sessions +# Using `conda env config vars set` does not work with Docker +# REF: https://github.com/conda-forge/compilers-feedstock/issues/31 +RUN echo "conda activate $CONDA_ENV" >> ~/.startuprc && \ + echo "export CC=\"ccache \$CC\"" >> ~/.startuprc && \ + echo "export CXX=\"ccache \$CXX\"" >> ~/.startuprc && \ + echo "export F77=\"ccache \$F77\"" >> ~/.startuprc && \ + echo "export F90=\"ccache \$F90\"" >> ~/.startuprc && \ + echo "export GFORTRAN=\"ccache \$GFORTRAN\"" >> ~/.startuprc && \ + echo "export FC=\"ccache \$FC\"" >> ~/.startuprc && \ + echo "source ~/.startuprc" >> ~/.profile && \ + echo "source ~/.startuprc" >> ~/.bashrc + +# Build numpy to populate the cache used by ccache +RUN python /workspace/numpy/setup.py build_ext -i && \ + ccache -s + +# .gitpod.yml is configured to install numpy from /workspace/numpy +RUN echo "export PYTHONPATH=/workspace/numpy" >> ~/.bashrc + +# gitpod will load the repository into /workspace/numpy. We remove the +# directoy from the image to prevent conflicts +RUN sudo rm -rf /workspace/numpy From 98ec22a2e5d82da7ee5bc63e4f2445df0e100e64 Mon Sep 17 00:00:00 2001 From: drhpc Date: Sun, 11 Apr 2021 16:52:03 +0200 Subject: [PATCH 0883/1270] BLD: introduce use of BLAS_LIBS and LAPACK_LIBS in distutils/system_info (#18737) This enables the behaviour of the build honouring the environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and NPY_LAPACK_LIBS to select any standard-conforming implementation of the f77 interfaces. This is needed to sanely have automated builds provide BLAS libs in a way common to multiple packages (using some environment variable, BLAS_LIBS being canonical, but variants exist). Hacking a site.cfg is fine for a single user, but does not scale well. As example, pkgsrc, the NetBSD packages collection, offers differing builds of Netlib or OpenBLAS, the latter in variables openblas, openblas_openmp, and openblas_pthread. With this patch, differing builds can just use different variable values like NPY_BLAS_LIBS=-lblas NPY_CBLAS_LIBS=-lcblas \ NPY_LAPACK_LIBS='-lapack -lblas' \ python setup.py build NPY_BLAS_LIBS=-lopenblas_pthread NPY_CBLAS_LIBS= \ NPY_LAPACK_LIBS='-lopenblas_pthread' \ python setup.py build (NPY_LAPACK_LIBS contains reference to BLAS, too, for packages that only use LAPACK and would miss underlying BLAS otherwise for static linking.) --- .../upcoming_changes/18737.new_feature.rst | 12 +++++ doc/source/user/building.rst | 15 +++++- numpy/distutils/system_info.py | 49 +++++++++++++++++++ 3 files changed, 74 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/18737.new_feature.rst diff --git a/doc/release/upcoming_changes/18737.new_feature.rst b/doc/release/upcoming_changes/18737.new_feature.rst new file mode 100644 index 000000000000..e451ac90ab69 --- /dev/null +++ b/doc/release/upcoming_changes/18737.new_feature.rst @@ -0,0 +1,12 @@ +BLAS and LAPACK configuration via environment variables +------------------------------------------------------- +Autodetection of installed BLAS and LAPACK libraries can be bypassed by using +the ``NPY_BLAS_LIBS`` and ``NPY_LAPACK_LIBS`` environment variables. Instead, +the link flags in these environment variables will be used directly, and the +language is assumed to be F77. This is especially useful in automated builds +where the BLAS and LAPACK that are installed are known exactly. A use case is +replacing the actual implementation at runtime via stub library links. + +If ``NPY_CBLAS_LIBS`` is set (optional in addition to ``NPY_BLAS_LIBS``), this +will be used as well, by defining ``HAVE_CBLAS`` and appending the environment +variable content to the link flags. diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index 47399139e6e8..52d7330bf753 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -6,7 +6,7 @@ Building from source A general overview of building NumPy from source is given here, with detailed instructions for specific platforms given separately. -.. +.. This page is referenced from numpy/numpy/__init__.py. Please keep its location in sync with the link there. @@ -123,6 +123,9 @@ in the ``site.cfg.example`` file. BLAS ~~~~ +Note that both BLAS and CBLAS interfaces are needed for a properly +optimized build of NumPy. + The default order for the libraries are: 1. MKL @@ -131,6 +134,12 @@ The default order for the libraries are: 4. ATLAS 5. BLAS (NetLIB) +The detection of BLAS libraries may be bypassed by defining the environment +variable ``NPY_BLAS_LIBS`` , which should contain the exact linker flags you +want to use (interface is assumed to be Fortran 77). Also define +``NPY_CBLAS_LIBS`` (even empty if CBLAS is contained in your BLAS library) to +trigger use of CBLAS and avoid slow fallback code for matrix calculations. + If you wish to build against OpenBLAS but you also have BLIS available one may predefine the order of searching via the environment variable ``NPY_BLAS_ORDER`` which is a comma-separated list of the above names which @@ -163,6 +172,9 @@ The default order for the libraries are: 4. ATLAS 5. LAPACK (NetLIB) +The detection of LAPACK libraries may be bypassed by defining the environment +variable ``NPY_LAPACK_LIBS``, which should contain the exact linker flags you +want to use (language is assumed to be Fortran 77). If you wish to build against OpenBLAS but you also have MKL available one may predefine the order of searching via the environment variable @@ -185,7 +197,6 @@ list is retained. One cannot mix negation and positives, nor have multiple negations, such cases will raise an error. - .. deprecated:: 1.20 The native libraries on macOS, provided by Accelerate, are not fit for use in NumPy since they have bugs that cause wrong output under easily reproducible diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index ae6f83e12fbd..9e192329f635 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -114,6 +114,19 @@ x11_info:x11 xft_info:xft +Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER +and NPY_LAPACK_ORDER environment variables to determine the order in which +specific BLAS and LAPACK libraries are searched for. + +This search (or autodetection) can be bypassed by defining the environment +variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the +exact linker flags to use (language will be set to F77). Building against +Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK +implementations at runtime. If using this to build NumPy itself, it is +recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a +CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized +otherwise). + Example: ---------- [DEFAULT] @@ -1847,6 +1860,16 @@ def _calc_info_lapack(self): return True return False + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() + self.set_info(**info) + return True + def _calc_info(self, name): return getattr(self, '_calc_info_{}'.format(name))() @@ -1857,6 +1880,12 @@ def calc_info(self): "LAPACK order has unacceptable " "values: {}".format(unknown_order)) + if 'NPY_LAPACK_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + for lapack in lapack_order: if self._calc_info(lapack): return @@ -1979,6 +2008,20 @@ def _calc_info_blas(self): self.set_info(**info) return True + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() + if 'NPY_CBLAS_LIBS' in os.environ: + info['define_macros'].append(('HAVE_CBLAS', None)) + info['extra_link_args'].extend( + os.environ['NPY_CBLAS_LIBS'].split()) + self.set_info(**info) + return True + def _calc_info(self, name): return getattr(self, '_calc_info_{}'.format(name))() @@ -1987,6 +2030,12 @@ def calc_info(self): if len(unknown_order) > 0: raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) + if 'NPY_BLAS_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + for blas in blas_order: if self._calc_info(blas): return From a100272b6f8cbfad2b18dfd638a0fb2bf51563d6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 11 Apr 2021 10:50:47 -0600 Subject: [PATCH 0884/1270] MAINT: Allow more recursion depth for scalar tests. The test_operator_object_left and test_operator_object_right tests in test_scalarmath.py set a maximum recursion depth of 100 that caused an error in gitpod because it was too close to the starting depth of 75. The fix here is to raise the maximum depth to 200. Closes #18749 --- numpy/core/tests/test_scalarmath.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 1ae8c9a65212..d91b4a39146d 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -737,7 +737,7 @@ def recursionlimit(n): @settings(verbosity=Verbosity.verbose) def test_operator_object_left(o, op, type_): try: - with recursionlimit(100): + with recursionlimit(200): op(o, type_(1)) except TypeError: pass @@ -748,7 +748,7 @@ def test_operator_object_left(o, op, type_): sampled_from(types)) def test_operator_object_right(o, op, type_): try: - with recursionlimit(100): + with recursionlimit(200): op(type_(1), o) except TypeError: pass From 6cc0a19d8092967a48e9830f1a4d5cf0b2251256 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 12 Apr 2021 00:26:20 +0530 Subject: [PATCH 0885/1270] BUG: Fixed loop order of umath generate in floor divide --- numpy/core/code_generators/generate_umath.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 57c811ff3306..6b6a0fe64ad5 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -327,9 +327,9 @@ def english_upper(s): Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.floor_divide'), 'PyUFunc_DivisionTypeResolver', - TD(uints, cfunc_alias='divide', + TD(ints, cfunc_alias='divide', dispatch=[('loops_arithmetic', 'BHILQ')]), - TD(sints + flts + cmplx), + TD(flts + cmplx), [TypeDescription('m', FullTypeDescr, 'mq', 'm'), TypeDescription('m', FullTypeDescr, 'md', 'm'), TypeDescription('m', FullTypeDescr, 'mm', 'q'), From 468ff05d6a3bf1daa19a3066330619bc483b30f6 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Mon, 12 Apr 2021 00:26:26 +0530 Subject: [PATCH 0886/1270] TST: Test for ufunc orders. Reg (#18075) --- numpy/core/tests/test_regression.py | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index e1a5a8a80500..d1af7f1d8667 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2527,3 +2527,36 @@ def cassé(x): f = np.frompyfunc(cassé, 1, 1) assert str(f) == "" + + @pytest.mark.parametrize("operation", [ + 'add', 'subtract', 'multiply', 'floor_divide', + 'conjugate', 'fmod', 'square', 'reciprocal', + 'power', 'absolute', 'negative', 'positive', + 'greater', 'greater_equal', 'less', + 'less_equal', 'equal', 'not_equal', 'logical_and', + 'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or', + 'bitwise_xor', 'invert', 'left_shift', 'right_shift', + 'gcd', 'lcm' + ] + ) + @pytest.mark.parametrize("order", [ + ('b->', 'B->'), + ('h->', 'H->'), + ('i->', 'I->'), + ('l->', 'L->'), + ('q->', 'Q->'), + ] + ) + def test_ufunc_order(self, operation, order): + # gh-18075 + # Ensure signed types before unsigned + def get_idx(string, str_lst): + for i, s in enumerate(str_lst): + if string in s: + return i + raise ValueError(f"{string} not in list") + types = getattr(np, operation).types + assert get_idx(order[0], types) < get_idx(order[1], types), ( + f"Unexpected types order of ufunc in {operation}" + f"for {order}. Possible fix: Use signed before unsigned" + "in generate_umath.py") From 0d3e433862105579a80b791d76f921dcc261928b Mon Sep 17 00:00:00 2001 From: h6197627 <44726212+h6197627@users.noreply.github.com> Date: Mon, 12 Apr 2021 01:02:36 +0300 Subject: [PATCH 0887/1270] BLD: Negative zero handling with ifort Fixes https://github.com/scipy/scipy/issues/11339 **What does this implement/fix?** NumPy/SciPy compiled with Intel Fortran compiler using default flags treats both -0.0 and +0.0 as 0.0, however it is not IEEE 754 compliant and some SciPy logic relies on this -0.0 and +0.0 differentiation. --- numpy/distutils/fcompiler/intel.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index c7b3c2340df6..e97cadbb18e1 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -59,7 +59,7 @@ def get_flags(self): def get_flags_opt(self): # Scipy test failures with -O2 v = self.get_version() mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', '-{}'.format(mpopt)] + return ['-fp-model', 'strict', '-O1', '-assume', 'minus0', '-{}'.format(mpopt)] def get_flags_arch(self): return [] @@ -119,17 +119,6 @@ class IntelEM64TFCompiler(IntelFCompiler): 'ranlib' : ["ranlib"] } - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', '-{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - # Is there no difference in the version string between the above compilers # and the Visual compilers? @@ -174,7 +163,7 @@ def get_flags_debug(self): return ['/4Yb', '/d2'] def get_flags_opt(self): - return ['/O1'] # Scipy test failures with /O2 + return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 def get_flags_arch(self): return ["/arch:IA32", "/QaxSSE3"] From bd7b3993c358ad880c0dc80e1661bd02e9388f4d Mon Sep 17 00:00:00 2001 From: h6197627 <44726212+h6197627@users.noreply.github.com> Date: Mon, 12 Apr 2021 01:34:35 +0300 Subject: [PATCH 0888/1270] STY: lint line length --- numpy/distutils/fcompiler/intel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index e97cadbb18e1..f97c5b3483e1 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -59,7 +59,8 @@ def get_flags(self): def get_flags_opt(self): # Scipy test failures with -O2 v = self.get_version() mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', '-assume', 'minus0', '-{}'.format(mpopt)] + return ['-fp-model', 'strict', '-O1', + '-assume', 'minus0', '-{}'.format(mpopt)] def get_flags_arch(self): return [] From 5f8c540885378f5e34c676ce4a77b10d56b18e54 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 12 Apr 2021 08:04:36 +0000 Subject: [PATCH 0889/1270] MAINT: Bump sphinx from 3.5.3 to 3.5.4 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 3.5.3 to 3.5.4. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/commits/v3.5.4) Signed-off-by: dependabot-preview[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 526d5cfbaf87..97cba369d207 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==3.5.3 +sphinx==3.5.4 numpydoc==1.1.0 ipython scipy From 9766f908347928c51bff3d03b4fab643a8ca796f Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 12 Apr 2021 08:06:32 +0000 Subject: [PATCH 0890/1270] MAINT: Bump hypothesis from 6.8.4 to 6.9.1 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.8.4 to 6.9.1. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.8.4...hypothesis-python-6.9.1) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 65ebc0b453b2..902ff3306b65 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.8.4 +hypothesis==6.9.1 pytest==6.2.3 pytz==2021.1 pytest-cov==2.11.1 From c377c04a88cedec8ed2f1655245b179d0a674515 Mon Sep 17 00:00:00 2001 From: melissawm Date: Mon, 12 Apr 2021 12:00:00 -0300 Subject: [PATCH 0891/1270] DOC: Update howto-docs with link to NumPy tutorials. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Includes a few textual fixes suggested in gh-16386 and new link to Daniele Procidas's newly renamed Diátaxis Framework (instead of the Divio blog). --- doc/source/dev/howto-docs.rst | 49 +++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 17194bd58a99..310756763db3 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -24,13 +24,16 @@ and stored in the `NumPy Archive repository ************************* What's needed ************************* -NumPy docs have the details covered. API reference -documentation is generated directly from -`docstrings `_ in the code -when the documentation is :ref:`built`. -What we lack are docs with broader scope -- tutorials, how-tos, and explanations. -Reporting defects is another way to contribute. We discuss both. +The :ref:`NumPy Documentation ` has the details covered. +API reference documentation is generated directly from +`docstrings `_ in the code when the +documentation is :ref:`built`. Although we have mostly +complete reference documentation for each function and class exposed to users, +there is a lack of usage examples for some of them. + +What we lack are docs with broader scope -- tutorials, how-tos, and +explanations. Reporting defects is another way to contribute. We discuss both. ************************* Contributing fixes @@ -48,8 +51,8 @@ a `pull request (PR) `__ with the fix, if you know how to do that; otherwise please `open an issue `__. -**Typos and misspellings** fall on a lower rung; we welcome hearing about them but -may not be able to fix them promptly. These too can be handled as pull +**Typos and misspellings** fall on a lower rung; we welcome hearing about them +but may not be able to fix them promptly. These too can be handled as pull requests or issues. Obvious **wording** mistakes (like leaving out a "not") fall into the typo @@ -74,28 +77,36 @@ If you're looking for subjects, our formal roadmap for documentation is a *NumPy Enhancement Proposal (NEP)*, `NEP 44 - Restructuring the NumPy Documentation `__. It identifies areas where our docs need help and lists several -additions we'd like to see, including Jupyter notebooks. - -You can find larger planned and in-progress ideas `at -our GitHub project `__. +additions we'd like to see, including :ref:`Jupyter notebooks `. .. _tutorials_howtos_explanations: - -Formula writing +Documentation framework ============================================================================== + There are formulas for writing useful documents, and four formulas cover nearly everything. There are four formulas because there are four categories of document -- ``tutorial``, ``how-to guide``, ``explanation``, and ``reference``. The insight that docs divide up this way belongs to -Daniele Procida, who goes on -`in this short article `__ to explain -the differences and reveal the formulas. When you begin a document or -propose one, have in mind which of these types it will be. +Daniele Procida and his `Diátaxis Framework `__. When you +begin a document or propose one, have in mind which of these types it will be. +.. _numpy_tutorials: -.. _contributing: +NumPy tutorials +=============== + +In addition to the documentation that is part of the NumPy source tree, you can +submit content in Jupyter Notebook format to the +`NumPy Tutorials `__ page. This +set of tutorials and educational materials is meant to provide high-quality +resources by the NumPy project, both for self-learning and for teaching classes +with. These resources are developed in a separate GitHub repository, +`numpy-tutorials `__, where you can +check out existing notebooks, open issues to suggest new topics or submit your +own tutorials as pull requests. +.. _contributing: More on contributing ============================================================================== From 3c95a4d2398bc0e472d9db2c3c7936e306f24ca1 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Mon, 12 Apr 2021 09:38:26 -0700 Subject: [PATCH 0892/1270] DOC: trim headings - purely cosmetic. --- doc/source/dev/howto-docs.rst | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 310756763db3..3687d7550ebe 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -7,9 +7,9 @@ How to contribute to the NumPy documentation This guide will help you decide what to contribute and how to submit it to the official NumPy documentation. -****************************************************************************** +*************************** Documentation team meetings -****************************************************************************** +*************************** The NumPy community has set a firm goal of improving its documentation. We hold regular documentation meetings on Zoom (dates are announced on the @@ -21,9 +21,9 @@ Minutes are taken `on hackmd.io `__ and stored in the `NumPy Archive repository `__. -************************* +************* What's needed -************************* +************* The :ref:`NumPy Documentation ` has the details covered. API reference documentation is generated directly from @@ -35,9 +35,9 @@ there is a lack of usage examples for some of them. What we lack are docs with broader scope -- tutorials, how-tos, and explanations. Reporting defects is another way to contribute. We discuss both. -************************* +****************** Contributing fixes -************************* +****************** We're eager to hear about and fix doc defects. But to attack the biggest problems we end up having to defer or overlook some bug reports. Here are the @@ -59,9 +59,9 @@ Obvious **wording** mistakes (like leaving out a "not") fall into the typo category, but other rewordings -- even for grammar -- require a judgment call, which raises the bar. Test the waters by first presenting the fix as an issue. -****************************************************************************** +********************** Contributing new pages -****************************************************************************** +********************** Your frustrations using our documents are our best guide to what needs fixing. @@ -82,7 +82,7 @@ additions we'd like to see, including :ref:`Jupyter notebooks ` .. _tutorials_howtos_explanations: Documentation framework -============================================================================== +======================= There are formulas for writing useful documents, and four formulas cover nearly everything. There are four formulas because there are four @@ -109,7 +109,7 @@ own tutorials as pull requests. .. _contributing: More on contributing -============================================================================== +==================== Don't worry if English is not your first language, or if you can only come up with a rough draft. Open source is a community effort. Do your best -- we'll @@ -137,9 +137,9 @@ rST, see the `Quick reStructuredText Guide `__ -************************************************************ +*********************** Contributing indirectly -************************************************************ +*********************** If you run across outside material that would be a useful addition to the NumPy docs, let us know by `opening an issue `__. @@ -149,9 +149,9 @@ if you write a tutorial on your blog, create a YouTube video, or answer question on Stack Overflow and other sites. -************************************************************ +********************* Documentation reading -************************************************************ +********************* - The leading organization of technical writers, `Write the Docs `__, From 90a40895d78ab793e087b8433298903966643816 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 12 Apr 2021 19:41:16 -0500 Subject: [PATCH 0893/1270] DOC: Small fixes (including formatting) for NEP 43 Just have been reviewing the text, I may do larger changes, but lets start with the small ones... --- doc/neps/nep-0043-extensible-ufuncs.rst | 55 +++++++++++++------------ 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 6442b9b03a0a..3c64077284de 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -337,12 +337,12 @@ steps involved in a call to a universal function in NumPy. A UFunc call is split into the following steps: -1. *Handle ``__array_ufunc__`` protocol:* +1. Handle ``__array_ufunc__`` protocol: * For array-likes such as a Dask arrays, NumPy can defer the operation. This step is performed first, and unaffected by this NEP (compare :ref:`NEP18`). -2. *Promotion and dispatching* +2. Promotion and dispatching * Given the DTypes of all inputs, find the correct implementation. E.g. an implementation for ``float64``, ``int64`` or a user-defined DType. @@ -351,7 +351,7 @@ A UFunc call is split into the following steps: For example, adding a ``float32`` and a ``float64`` is implemented by first casting the ``float32`` to ``float64``. -3. *Parametric ``dtype`` resolution:* +3. Parametric ``dtype`` resolution: * In general, whenever an output DType is parametric the parameters have to be found (resolved). @@ -362,15 +362,16 @@ A UFunc call is split into the following steps: which fills in the default dtype instances (ensuring for example native byte order). -4. *Preparing the iteration:* +4. Preparing the iteration: * This step is largely handled by ``NpyIter`` internally (the iterator). * Allocate all outputs and temporary buffers necessary to perform casts. + This *requires* the dtypes as resolved in step 3. * Find the best iteration order, which includes information to efficiently implement broadcasting. For example, adding a single value to an array repeats the same value. -5. *Setup and fetch the C-level function:* +5. Setup and fetch the C-level function: * If necessary, allocate temporary working space. * Find the C-implemented, light weight, inner-loop function. @@ -383,33 +384,35 @@ A UFunc call is split into the following steps: the GIL may be released (to allow threading). * Clear floating point exception flags. -6. *Perform the actual calculation:* +6. Perform the actual calculation: * Run the DType specific inner-loop function. * The inner-loop may require access to additional data, such as dtypes or additional data set in the previous step. * The inner-loop function may be called an undefined number of times. -7. *Finalize:* +7. Finalize: - * Free any temporary working space allocated in 5. + * Free any temporary working space allocated in step 5. * Check for floating point exception flags. * Return the result. The ``ArrayMethod`` provides a concept to group steps 3 to 6 and partially 7. -However, implementers of a new ufunc or ``ArrayMethod`` do not need to -customize the behaviour in steps 4 or 6, aside from the inner-loop function. -For the ``ArrayMethod`` implementer, the central steps to have control over -are step 3 and step 5 to provide the custom inner-loop function. -Further customization is a potential future extension. +However, implementers of a new ufunc or ``ArrayMethod`` usually do not need to +customize the behaviour in steps 4 or 6 which NumPy can and does provide. +For the ``ArrayMethod`` implementer, the central steps to customize +are step 3 and step 5. These provide the custom inner-loop function and +potentially inner-loop specific setup. +Further customization is possible and anticipated as future extensions. -Step 2. is promotion and dispatching which will also be restructured -with new API which allows influencing the process where necessary. +Step 2. is promotion and dispatching and will be restructured +with new API to allow customization of the process where necessary. Step 1 is listed for completeness and is unaffected by this NEP. The following sketch provides an overview of step 2 to 6 with an emphasize -of how dtypes are handled: +of how dtypes are handled and which parts are customizable ("Registered") +and which are handled by NumPy: .. figure:: _static/nep43-sketch.svg :figclass: align-center @@ -425,7 +428,6 @@ We use the ``class`` syntax to describe the information required to create a new ``ArrayMethod`` object: .. code-block:: python - :dedent: 0 class ArrayMethod: name: str # Name, mainly useful for debugging @@ -470,7 +472,6 @@ a new ``ArrayMethod`` object: With ``Context`` providing mostly static information about the function call: .. code-block:: python - :dedent: 0 class Context: # The ArrayMethod object itself: @@ -660,7 +661,7 @@ definitions (see also :ref:`NEP 42 ` ``CastingImpl``): for example for implementing warnings (see Error and Warning Handling below). To simplify this NumPy will pass a single zero initialized ``npy_intp *`` when ``user_data`` is not set. - *NOTE that it would be possible to pass this as part of ``Context``.* + *Note that it would be possible to pass this as part of Context.* * The optional ``get_loop`` function will not be public initially, to avoid finalizing the API which requires design choices also with casting: @@ -757,7 +758,7 @@ While the returned casting-safety (``NPY_CASTING``) will almost always be cast and a custom error will be set. (This distinction is important for the ``np.can_cast()`` function, which should raise the first one and return ``False`` in the second case, it is not noteworthy for typical ufuncs). - *This point is under consideration, we may use ``-1`` to indicate + *This point is under consideration, we may use -1 to indicate a general error, and use a different return value for an impossible cast.* * Returning the casting safety is central to NEP 42 for casting and allows the unmodified use of ``ArrayMethod`` there. @@ -794,7 +795,7 @@ Extending the inner-loop signature Extending the inner-loop signature is another central and necessary part of the NEP. -**Passing in the ``Context``:** +**Passing in the Context:** Passing in the ``Context`` potentially allows for the future extension of the signature by adding new fields to the context struct. @@ -822,8 +823,8 @@ exists in NumPy for this purpose, it seems a natural choice. To simplify some use-cases (see "Error Handling" below), we will pass a ``npy_intp *innerloop_data = 0`` instead when ``innerloop_data`` is not provided. -*Note: Since ``get_loop`` is expected to be private initially we can gain -experience with ``innerloop_data`` before exposing it as public API.* +*Note:* Since ``get_loop`` is expected to be private initially we can gain +experience with ``innerloop_data`` before exposing it as public API. **Return value:** @@ -954,7 +955,7 @@ This wrapped ``ArrayMethod`` will have two additional methods: have changed the byte-order), and further resolve the physical unit making the final signature:: - ``Unit[Float64]("m") + Unit[Float64]("m") -> Unit[Float64]("m")`` + Unit[Float64]("m") + Unit[Float64]("m") -> Unit[Float64]("m") the UFunc machinery will take care of casting the "km" input to "m". @@ -1039,8 +1040,8 @@ In the future we expect that ``ArrayMethod``\ s can also be defined for **Promotion:** If a matching ``ArrayMethod`` exists, dispatching is straight forward. -However, when it does not, require additional definitions to implement -promotion: +However, when it does not, additional definitions are required to implement +this "promotion": * By default any UFunc has a promotion which uses the common DType of all inputs and dispatches a second time. This is well-defined for most @@ -1085,7 +1086,7 @@ In this case, just as a ``Timedelta64 * int64`` and ``int64 * timedelta64`` ``ArrayMethod`` is necessary, a second promoter will have to be registered to handle the case where the integer is passed first. -**Dispatching rules for ``ArrayMethod`` and Promoters:** +**Dispatching rules for ArrayMethod and Promoters:** Promoter and ``ArrayMethod`` are discovered by finding the best match as defined by the DType class hierarchy. From b9d9e03014aaa4d7b6f06a89ceae06317a0012a3 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Apr 2021 17:10:59 +0200 Subject: [PATCH 0894/1270] ENH: Improve placeholder annotation for non-function objects e.g. types and instances --- numpy/__init__.pyi | 69 ++++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a963493ce0e6..cbf3c0898257 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -365,9 +365,32 @@ __path__: List[str] __version__: str __git_version__: str -DataSource: Any -MachAr: Any -ScalarType: Any +# TODO: Move placeholders to their respective module once +# their annotations are properly implemented +# +# Placeholders for non-functions (types and other objects) +DataSource: Type[Any] +MachAr: Type[Any] +ScalarType: Tuple[Type[Any], ...] +broadcast: Type[Any] +busdaycalendar: Type[Any] +cast: Dict[generic, Callable[..., ndarray[Any, dtype[Any]]]] +chararray: Type[Any] +finfo: Type[Any] +format_parser: Type[Any] +iinfo: Type[Any] +matrix: Type[Any] +memmap: Type[Any] +nbytes: Dict[generic, int] +nditer: Type[Any] +numarray: str +poly1d: Type[Any] +recarray: Type[Any] +record: Type[Any] +typecodes: Dict[str, str] +vectorize: Type[Any] + +# Placeholders for python-based functions angle: Any append: Any apply_along_axis: Any @@ -384,16 +407,12 @@ bincount: Any bitwise_not: Any blackman: Any bmat: Any -broadcast: Any broadcast_arrays: Any broadcast_to: Any busday_count: Any busday_offset: Any -busdaycalendar: Any byte_bounds: Any can_cast: Any -cast: Any -chararray: Any column_stack: Any common_type: Any compare_chararrays: Any @@ -421,20 +440,9 @@ dstack: Any ediff1d: Any expand_dims: Any extract: Any -def eye( - N: int, - M: Optional[int] = ..., - k: int = ..., - dtype: DTypeLike = ..., - order: _OrderCF = ..., - *, - like: Optional[ArrayLike] = ... -) -> ndarray[Any, Any]: ... -finfo: Any flip: Any fliplr: Any flipud: Any -format_parser: Any frombuffer: Any fromfile: Any fromiter: Any @@ -453,7 +461,6 @@ histogram_bin_edges: Any histogramdd: Any hsplit: Any i0: Any -iinfo: Any imag: Any in1d: Any info: Any @@ -478,11 +485,9 @@ lookfor: Any mafromtxt: Any mask_indices: Any mat: Any -matrix: Any max: Any may_share_memory: Any median: Any -memmap: Any meshgrid: Any min: Any min_scalar_type: Any @@ -504,19 +509,14 @@ nanquantile: Any nanstd: Any nansum: Any nanvar: Any -nbytes: Any ndfromtxt: Any -nditer: Any nested_iters: Any -newaxis: Any -numarray: Any packbits: Any pad: Any percentile: Any piecewise: Any place: Any poly: Any -poly1d: Any polyadd: Any polyder: Any polydiv: Any @@ -532,10 +532,8 @@ putmask: Any quantile: Any real: Any real_if_close: Any -recarray: Any recfromcsv: Any recfromtxt: Any -record: Any result_type: Any roots: Any rot90: Any @@ -567,7 +565,6 @@ trim_zeros: Any triu: Any triu_indices: Any triu_indices_from: Any -typecodes: Any typename: Any union1d: Any unique: Any @@ -575,7 +572,6 @@ unpackbits: Any unwrap: Any vander: Any vdot: Any -vectorize: Any vsplit: Any where: Any who: Any @@ -584,6 +580,17 @@ _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"] +# TODO: Move to `np.lib.twodim_base` +def eye( + N: int, + M: Optional[int] = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: Optional[ArrayLike] = ... +) -> ndarray[Any, Any]: ... + class dtype(Generic[_DTypeScalar_co]): names: Optional[Tuple[str, ...]] # Overload for subclass of generic @@ -2907,6 +2914,8 @@ False_: Final[bool_] UFUNC_PYVALS_NAME: Final[str] +newaxis: None + class ufunc: @property def __name__(self) -> str: ... From 3a7e7610ab54200b2640b395ab738ec67c6e3db4 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Apr 2021 17:11:47 +0200 Subject: [PATCH 0895/1270] ENH: Improve placeholder annotations for Python-based functions --- numpy/__init__.pyi | 299 +++++++++++++++++++++++---------------------- 1 file changed, 150 insertions(+), 149 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index cbf3c0898257..23d40360d7be 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -390,191 +390,192 @@ record: Type[Any] typecodes: Dict[str, str] vectorize: Type[Any] -# Placeholders for python-based functions -angle: Any -append: Any -apply_along_axis: Any -apply_over_axes: Any +# Placeholders for Python-based functions +def angle(z, deg=...): ... +def append(arr, values, axis=...): ... +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def array_split(ary, indices_or_sections, axis=...): ... +def asarray_chkfinite(a, dtype=..., order=...): ... +def asfarray(a, dtype=...): ... +def asmatrix(data, dtype=...): ... +def asscalar(a): ... +def average(a, axis=..., weights=..., returned=...): ... +def bartlett(M): ... +def blackman(M): ... +def bmat(obj, ldict=..., gdict=...): ... +def broadcast_arrays(*args, subok=...): ... +def broadcast_to(array, shape, subok=...): ... +def byte_bounds(a): ... +def column_stack(tup): ... +def common_type(*arrays): ... +def copy(a, order=..., subok=...): ... +def corrcoef(x, y=..., rowvar=..., bias=..., ddof=..., *, dtype=...): ... +def cov(m, y=..., rowvar=..., bias=..., ddof=..., fweights=..., aweights=..., *, dtype=...): ... +def cumproduct(*args, **kwargs): ... +def delete(arr, obj, axis=...): ... +def deprecate(*args, **kwargs): ... +def deprecate_with_doc(msg): ... +def diag(v, k=...): ... +def diagflat(v, k=...): ... +def diff(a, n=..., axis=..., prepend=..., append=...): ... +def digitize(x, bins, right=...): ... +def disp(mesg, device=..., linefeed=...): ... +def dsplit(ary, indices_or_sections): ... +def dstack(tup): ... +def ediff1d(ary, to_end=..., to_begin=...): ... +def expand_dims(a, axis): ... +def extract(condition, arr): ... +def flip(m, axis=...): ... +def fliplr(m): ... +def flipud(m): ... +def fromregex(file, regexp, dtype, encoding=...): ... +def genfromtxt(fname, dtype=..., comments=..., delimiter=..., skip_header=..., skip_footer=..., converters=..., missing_values=..., filling_values=..., usecols=..., names=..., excludelist=..., deletechars=..., replace_space=..., autostrip=..., case_sensitive=..., defaultfmt=..., unpack=..., usemask=..., loose=..., invalid_raise=..., max_rows=..., encoding=..., *, like=...): ... +def get_include(): ... +def gradient(f, *varargs, axis=..., edge_order=...): ... +def hamming(M): ... +def hanning(M): ... +def histogram(a, bins=..., range=..., normed=..., weights=..., density=...): ... +def histogram2d(x, y, bins=..., range=..., normed=..., weights=..., density=...): ... +def histogram_bin_edges(a, bins=..., range=..., weights=...): ... +def histogramdd(sample, bins=..., range=..., normed=..., weights=..., density=...): ... +def hsplit(ary, indices_or_sections): ... +def i0(x): ... +def imag(val): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def info(object=..., maxwidth=..., output=..., toplevel=...): ... +def insert(arr, obj, values, axis=...): ... +def interp(x, xp, fp, left=..., right=..., period=...): ... +def intersect1d(ar1, ar2, assume_unique=..., return_indices=...): ... +def iscomplex(x): ... +def iscomplexobj(x): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def isreal(x): ... +def isrealobj(x): ... +def iterable(y): ... +def kaiser(M, beta): ... +def kron(a, b): ... +def load(file, mmap_mode=..., allow_pickle=..., fix_imports=..., encoding=...): ... +def loads(*args, **kwargs): ... +def loadtxt(fname, dtype=..., comments=..., delimiter=..., converters=..., skiprows=..., usecols=..., unpack=..., ndmin=..., encoding=..., max_rows=..., *, like=...): ... +def lookfor(what, module=..., import_modules=..., regenerate=..., output=...): ... +def mafromtxt(fname, **kwargs): ... +def mask_indices(n, mask_func, k=...): ... +def mat(data, dtype=...): ... +def max(a, axis=..., out=..., keepdims=..., initial=..., where=...): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def meshgrid(*xi, copy=..., sparse=..., indexing=...): ... +def min(a, axis=..., out=..., keepdims=..., initial=..., where=...): ... +def mintypecode(typechars, typeset=..., default=...): ... +def msort(a): ... +def nan_to_num(x, copy=..., nan=..., posinf=..., neginf=...): ... +def nanargmax(a, axis=...): ... +def nanargmin(a, axis=...): ... +def nancumprod(a, axis=..., dtype=..., out=...): ... +def nancumsum(a, axis=..., dtype=..., out=...): ... +def nanmax(a, axis=..., out=..., keepdims=...): ... +def nanmean(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nanmedian(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def nanmin(a, axis=..., out=..., keepdims=...): ... +def nanpercentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def nanprod(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nanquantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def nanstd(a, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... +def nansum(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nanvar(a, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... +def ndfromtxt(fname, **kwargs): ... +def pad(array, pad_width, mode=..., **kwargs): ... +def percentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def piecewise(x, condlist, funclist, *args, **kw): ... +def place(arr, mask, vals): ... +def poly(seq_of_zeros): ... +def polyadd(a1, a2): ... +def polyder(p, m=...): ... +def polydiv(u, v): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... +def polyint(p, m=..., k=...): ... +def polymul(a1, a2): ... +def polysub(a1, a2): ... +def polyval(p, x): ... +def product(*args, **kwargs): ... +def put_along_axis(arr, indices, values, axis): ... +def quantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def real(val): ... +def real_if_close(a, tol=...): ... +def recfromcsv(fname, **kwargs): ... +def recfromtxt(fname, **kwargs): ... +def roots(p): ... +def rot90(m, k=..., axes=...): ... +def round(a, decimals=..., out=...): ... +def round_(a, decimals=..., out=...): ... +def row_stack(tup): ... +def save(file, arr, allow_pickle=..., fix_imports=...): ... +def savetxt(fname, X, fmt=..., delimiter=..., newline=..., header=..., footer=..., comments=..., encoding=...): ... +def savez(file, *args, **kwds): ... +def savez_compressed(file, *args, **kwds): ... +def select(condlist, choicelist, default=...): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def show_config(): ... +def sinc(x): ... +def sort_complex(a): ... +def source(object, output=...): ... +def split(ary, indices_or_sections, axis=...): ... +def take_along_axis(arr, indices, axis): ... +def tile(A, reps): ... +def trapz(y, x=..., dx=..., axis=...): ... +def tri(N, M=..., k=..., dtype=..., *, like=...): ... +def tril(m, k=...): ... +def tril_indices(n, k=..., m=...): ... +def tril_indices_from(arr, k=...): ... +def trim_zeros(filt, trim=...): ... +def triu(m, k=...): ... +def triu_indices(n, k=..., m=...): ... +def triu_indices_from(arr, k=...): ... +def typename(char): ... +def union1d(ar1, ar2): ... +def unique(ar, return_index=..., return_inverse=..., return_counts=..., axis=...): ... +def unwrap(p, discont=..., axis=...): ... +def vander(x, N=..., increasing=...): ... +def vsplit(ary, indices_or_sections): ... +def who(vardict=...): ... + arange: Any -array_split: Any -asarray_chkfinite: Any -asfarray: Any -asmatrix: Any -asscalar: Any -average: Any -bartlett: Any bincount: Any bitwise_not: Any -blackman: Any -bmat: Any -broadcast_arrays: Any -broadcast_to: Any busday_count: Any busday_offset: Any -byte_bounds: Any can_cast: Any -column_stack: Any -common_type: Any compare_chararrays: Any concatenate: Any conj: Any -copy: Any copyto: Any -corrcoef: Any -cov: Any -cumproduct: Any datetime_as_string: Any datetime_data: Any -delete: Any -deprecate: Any -deprecate_with_doc: Any -diag: Any -diagflat: Any -diff: Any -digitize: Any -disp: Any divide: Any dot: Any -dsplit: Any -dstack: Any -ediff1d: Any -expand_dims: Any -extract: Any -flip: Any -fliplr: Any -flipud: Any frombuffer: Any fromfile: Any fromiter: Any frompyfunc: Any -fromregex: Any fromstring: Any -genfromtxt: Any -get_include: Any geterrobj: Any -gradient: Any -hamming: Any -hanning: Any -histogram: Any -histogram2d: Any -histogram_bin_edges: Any -histogramdd: Any -hsplit: Any -i0: Any -imag: Any -in1d: Any -info: Any inner: Any -insert: Any -interp: Any -intersect1d: Any is_busday: Any -iscomplex: Any -iscomplexobj: Any -isin: Any -isreal: Any -isrealobj: Any -iterable: Any -kaiser: Any -kron: Any lexsort: Any -load: Any -loads: Any -loadtxt: Any -lookfor: Any -mafromtxt: Any -mask_indices: Any -mat: Any -max: Any may_share_memory: Any -median: Any -meshgrid: Any -min: Any min_scalar_type: Any -mintypecode: Any mod: Any -msort: Any -nan_to_num: Any -nanargmax: Any -nanargmin: Any -nancumprod: Any -nancumsum: Any -nanmax: Any -nanmean: Any -nanmedian: Any -nanmin: Any -nanpercentile: Any -nanprod: Any -nanquantile: Any -nanstd: Any -nansum: Any -nanvar: Any -ndfromtxt: Any nested_iters: Any packbits: Any -pad: Any -percentile: Any -piecewise: Any -place: Any -poly: Any -polyadd: Any -polyder: Any -polydiv: Any -polyfit: Any -polyint: Any -polymul: Any -polysub: Any -polyval: Any -product: Any promote_types: Any -put_along_axis: Any putmask: Any -quantile: Any -real: Any -real_if_close: Any -recfromcsv: Any -recfromtxt: Any result_type: Any -roots: Any -rot90: Any -round: Any -round_: Any -row_stack: Any -save: Any -savetxt: Any -savez: Any -savez_compressed: Any -select: Any -setdiff1d: Any seterrobj: Any -setxor1d: Any shares_memory: Any -show_config: Any -sinc: Any -sort_complex: Any -source: Any -split: Any -take_along_axis: Any -tile: Any -trapz: Any -tri: Any -tril: Any -tril_indices: Any -tril_indices_from: Any -trim_zeros: Any -triu: Any -triu_indices: Any -triu_indices_from: Any -typename: Any -union1d: Any -unique: Any unpackbits: Any -unwrap: Any -vander: Any vdot: Any -vsplit: Any where: Any -who: Any _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) From 67d6abe5684fc49371ac6d21619ef8a88d7e3878 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Apr 2021 17:12:41 +0200 Subject: [PATCH 0896/1270] ENH: Improve placeholder annotations for C-based functions --- numpy/__init__.pyi | 73 +++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 36 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 23d40360d7be..41e494fb4f4c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -540,42 +540,43 @@ def vander(x, N=..., increasing=...): ... def vsplit(ary, indices_or_sections): ... def who(vardict=...): ... -arange: Any -bincount: Any -bitwise_not: Any -busday_count: Any -busday_offset: Any -can_cast: Any -compare_chararrays: Any -concatenate: Any -conj: Any -copyto: Any -datetime_as_string: Any -datetime_data: Any -divide: Any -dot: Any -frombuffer: Any -fromfile: Any -fromiter: Any -frompyfunc: Any -fromstring: Any -geterrobj: Any -inner: Any -is_busday: Any -lexsort: Any -may_share_memory: Any -min_scalar_type: Any -mod: Any -nested_iters: Any -packbits: Any -promote_types: Any -putmask: Any -result_type: Any -seterrobj: Any -shares_memory: Any -unpackbits: Any -vdot: Any -where: Any +# Placeholders for C-based functions +arange: Callable[..., Any] +bincount: Callable[..., Any] +bitwise_not: Callable[..., Any] +busday_count: Callable[..., Any] +busday_offset: Callable[..., Any] +can_cast: Callable[..., Any] +compare_chararrays: Callable[..., Any] +concatenate: Callable[..., Any] +conj: Callable[..., Any] +copyto: Callable[..., Any] +datetime_as_string: Callable[..., Any] +datetime_data: Callable[..., Any] +divide: Callable[..., Any] +dot: Callable[..., Any] +frombuffer: Callable[..., Any] +fromfile: Callable[..., Any] +fromiter: Callable[..., Any] +frompyfunc: Callable[..., Any] +fromstring: Callable[..., Any] +geterrobj: Callable[..., Any] +inner: Callable[..., Any] +is_busday: Callable[..., Any] +lexsort: Callable[..., Any] +may_share_memory: Callable[..., Any] +min_scalar_type: Callable[..., Any] +mod: Callable[..., Any] +nested_iters: Callable[..., Any] +packbits: Callable[..., Any] +promote_types: Callable[..., Any] +putmask: Callable[..., Any] +result_type: Callable[..., Any] +seterrobj: Callable[..., Any] +shares_memory: Callable[..., Any] +unpackbits: Callable[..., Any] +vdot: Callable[..., Any] +where: Callable[..., Any] _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) From 1935b2a6b95b236c63672339ea73e9aadfd0ca3e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Apr 2021 17:17:05 +0200 Subject: [PATCH 0897/1270] ENH: Annotate a few previously missing ufuncs as such --- numpy/__init__.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 41e494fb4f4c..c6c5b71ff508 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -543,17 +543,14 @@ def who(vardict=...): ... # Placeholders for C-based functions arange: Callable[..., Any] bincount: Callable[..., Any] -bitwise_not: Callable[..., Any] busday_count: Callable[..., Any] busday_offset: Callable[..., Any] can_cast: Callable[..., Any] compare_chararrays: Callable[..., Any] concatenate: Callable[..., Any] -conj: Callable[..., Any] copyto: Callable[..., Any] datetime_as_string: Callable[..., Any] datetime_data: Callable[..., Any] -divide: Callable[..., Any] dot: Callable[..., Any] frombuffer: Callable[..., Any] fromfile: Callable[..., Any] @@ -566,7 +563,6 @@ is_busday: Callable[..., Any] lexsort: Callable[..., Any] may_share_memory: Callable[..., Any] min_scalar_type: Callable[..., Any] -mod: Callable[..., Any] nested_iters: Callable[..., Any] packbits: Callable[..., Any] promote_types: Callable[..., Any] @@ -2996,16 +2992,19 @@ arctan2: ufunc arctan: ufunc arctanh: ufunc bitwise_and: ufunc +bitwise_not: ufunc bitwise_or: ufunc bitwise_xor: ufunc cbrt: ufunc ceil: ufunc +conj: ufunc conjugate: ufunc copysign: ufunc cos: ufunc cosh: ufunc deg2rad: ufunc degrees: ufunc +divide: ufunc divmod: ufunc equal: ufunc exp2: ufunc @@ -3047,6 +3046,7 @@ logical_xor: ufunc matmul: ufunc maximum: ufunc minimum: ufunc +mod: ufunc modf: ufunc multiply: ufunc negative: ufunc From 53421202b93e82d8c790c81d35e38cfec5ce276c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 13 Apr 2021 17:25:21 +0200 Subject: [PATCH 0898/1270] DEP: Remove all references to `numarray` from the namespace It has been deprecated and removed for quite some time --- numpy/__init__.pyi | 1 - tools/functions_missing_types.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c6c5b71ff508..8565d4b4052a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -383,7 +383,6 @@ matrix: Type[Any] memmap: Type[Any] nbytes: Dict[generic, int] nditer: Type[Any] -numarray: str poly1d: Type[Any] recarray: Type[Any] record: Type[Any] diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py index b283f5a35640..0461aabd3634 100755 --- a/tools/functions_missing_types.py +++ b/tools/functions_missing_types.py @@ -41,6 +41,7 @@ "fastCopyAndTranspose", "get_array_wrap", "int_asbuffer", + "numarray", "oldnumeric", "safe_eval", "set_numeric_ops", From 375363d41dd77a0f77e95f44347b0bbf0fe242fb Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Tue, 13 Apr 2021 23:14:12 +0200 Subject: [PATCH 0899/1270] More concise "How to import NumPy" description --- doc/source/user/absolute_beginners.rst | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 084bb6d223f8..3ec95927b84d 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -63,17 +63,13 @@ at `SciPy `_. How to import NumPy ------------------- -Any time you want to use a package or library in your code, you first need to -make it accessible. - -In order to start using NumPy and all of the functions available in NumPy, -you'll need to import it. This can be easily done with this import statement:: +To access NumPy and its functions import it in your Python code like this:: import numpy as np -(We shorten ``numpy`` to ``np`` in order to save time and also to keep code -standardized so that anyone working with your code can easily understand and -run it.) +We shorten the imported name to ``np`` for better readability of code using +NumPy. This is a widely adopted convention that you should follow too, so that +anyone working with your code can easily understand it. Reading the example code ------------------------ From 80e4bf97bc54a70870b0f7e02101f0d6bef4147c Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Tue, 13 Apr 2021 23:54:54 +0200 Subject: [PATCH 0900/1270] DOC: Use: from numpy.testing import ... instead of `import numpy.testing as npt`. - all of NumPy's own tests themselves use the from-import variant - There's `import numpy.typing as npt` around in some places so we should not abbreviate another module import with `npt`. Optional further enhancement: If this is a convention we also want users to adopt, this should be mentioned in the module docstring of `numpy.testing`. --- numpy/testing/_private/utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 1bdb00fd5350..393fedc2705f 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -518,9 +518,9 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): Examples -------- - >>> import numpy.testing as npt - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + >>> from numpy.testing import assert_almost_equal + >>> assert_almost_equal(2.3333333333333, 2.33333334) + >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) Traceback (most recent call last): ... AssertionError: @@ -528,8 +528,8 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): ACTUAL: 2.3333333333333 DESIRED: 2.33333334 - >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), - ... np.array([1.0,2.33333334]), decimal=9) + >>> assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) Traceback (most recent call last): ... AssertionError: From 5d3d2f98086e48f938cbf81930d0fadfe9bca795 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 13 Apr 2021 17:15:09 -0600 Subject: [PATCH 0901/1270] MAINT: Fix comma usage. --- doc/source/user/absolute_beginners.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 3ec95927b84d..0de08c3f0a9f 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -68,7 +68,7 @@ To access NumPy and its functions import it in your Python code like this:: import numpy as np We shorten the imported name to ``np`` for better readability of code using -NumPy. This is a widely adopted convention that you should follow too, so that +NumPy. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it. Reading the example code From 645b3045d21f2f209e5ff6dd3b9c6ec4eaa28c20 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 13 Apr 2021 17:40:25 -0500 Subject: [PATCH 0902/1270] CI: Use informational mode for codecov [skip CI] --- .codecov.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 8c19f9e8ee58..165b3099df18 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -6,8 +6,9 @@ coverage: status: project: default: - # Require 1% coverage, i.e., always succeed - target: 1 - patch: true + informational: true + patch: + default: + informational: true changes: false comment: off From 0345c8ce3b90ab8a0f5395715d4ac0af60d5ec08 Mon Sep 17 00:00:00 2001 From: Greg Lucas Date: Tue, 13 Apr 2021 20:38:41 -0600 Subject: [PATCH 0903/1270] CI: Fixing typo in Azure job run --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 936ef7473a76..3240b5fc9e76 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -207,7 +207,7 @@ stages: - script: python -m pip install matplotlib displayName: 'Install matplotlib before refguide run' - script: python runtests.py -g --refguide-check - displayName: 'Run Refuide Check' + displayName: 'Run Refguide Check' condition: eq(variables['USE_OPENBLAS'], '1') - script: python runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml displayName: 'Run Full NumPy Test Suite' From 04c97a63e1420402b54b89e05082a0ff4cb1c001 Mon Sep 17 00:00:00 2001 From: Pamphile ROY Date: Wed, 14 Apr 2021 19:35:07 +0200 Subject: [PATCH 0904/1270] DOC: remove legacy global seed, assert_almost_equal and assert_ --- doc/TESTS.rst.txt | 31 +++++++++++------------ doc/source/reference/routines.testing.rst | 11 +++++--- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index 21cc0867333f..c7e89b5b9e4d 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -59,7 +59,7 @@ that are run; but if it is greater than 1, then the tests will also provide warnings on missing tests. So if you want to run every test and get messages about which modules don't have tests:: - >>> numpy.test(label='full', verbose=2) # or numpy.test('full', 2) + >>> numpy.test(label='full', verbose=2) # or numpy.test('full', 2) Finally, if you are only interested in testing a subset of NumPy, for example, the ``core`` module, use the following:: @@ -101,27 +101,25 @@ module called ``test_yyy.py``. If you only need to test one aspect of ``zzz``, you can simply add a test function:: def test_zzz(): - assert_(zzz() == 'Hello from zzz') + assert zzz() == 'Hello from zzz' More often, we need to group a number of tests together, so we create a test class:: - from numpy.testing import assert_, assert_raises - # import xxx symbols from numpy.xxx.yyy import zzz + import pytest class TestZzz: def test_simple(self): - assert_(zzz() == 'Hello from zzz') + assert zzz() == 'Hello from zzz' def test_invalid_parameter(self): - assert_raises(...) + with pytest.raises(xxxError, match='expected error message'): + ... -Within these test methods, ``assert_()`` and related functions are used to test +Within these test methods, ``assert`` and related functions are used to test whether a certain assumption is valid. If the assertion fails, the test fails. -Note that the Python builtin ``assert`` should not be used, because it is -stripped during compilation with ``-O``. Note that ``test_`` functions or methods should not have a docstring, because that makes it hard to identify the test from the output of running the test @@ -282,12 +280,12 @@ from one in `numpy/linalg/tests/test_linalg.py class LinalgTestCase: def test_single(self): - a = array([[1.,2.], [3.,4.]], dtype=single) + a = array([[1., 2.], [3., 4.]], dtype=single) b = array([2., 1.], dtype=single) self.do(a, b) def test_double(self): - a = array([[1.,2.], [3.,4.]], dtype=double) + a = array([[1., 2.], [3., 4.]], dtype=double) b = array([2., 1.], dtype=double) self.do(a, b) @@ -296,14 +294,14 @@ from one in `numpy/linalg/tests/test_linalg.py class TestSolve(LinalgTestCase): def do(self, a, b): x = linalg.solve(a, b) - assert_almost_equal(b, dot(a, x)) - assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) + assert_allclose(b, dot(a, x)) + assert imply(isinstance(b, matrix), isinstance(x, matrix)) class TestInv(LinalgTestCase): def do(self, a, b): a_inv = linalg.inv(a) - assert_almost_equal(dot(a, a_inv), identity(asarray(a).shape[0])) - assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix))) + assert_allclose(dot(a, a_inv), identity(asarray(a).shape[0])) + assert imply(isinstance(a, matrix), isinstance(a_inv, matrix)) In this case, we wanted to test solving a linear algebra problem using matrices of several data types, using ``linalg.solve`` and @@ -353,7 +351,8 @@ new bugs or regressions, a test that passes most of the time but fails occasionally with no code changes is not helpful. Make the random data deterministic by setting the random number seed before generating it. Use either Python's ``random.seed(some_number)`` or NumPy's -``numpy.random.seed(some_number)``, depending on the source of random numbers. +``rng = np.random.default_rng(some_number)``, depending on the source of +random numbers. Alternatively, you can use `Hypothesis`_ to generate arbitrary data. Hypothesis manages both Python's and Numpy's random seeds for you, and diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst index 98ce3f37718b..7bd499fdfc90 100644 --- a/doc/source/reference/routines.testing.rst +++ b/doc/source/reference/routines.testing.rst @@ -18,9 +18,6 @@ Asserts .. autosummary:: :toctree: generated/ - assert_almost_equal - assert_approx_equal - assert_array_almost_equal assert_allclose assert_array_almost_equal_nulp assert_array_max_ulp @@ -32,6 +29,14 @@ Asserts assert_warns assert_string_equal +.. autosummary:: + :toctree: generated/ + :hidden: + + assert_almost_equal + assert_approx_equal + assert_array_almost_equal + Decorators ---------- .. autosummary:: From 64bb06f002f2085c172c2f7dc621289d3cd54cbe Mon Sep 17 00:00:00 2001 From: Pamphile ROY Date: Wed, 14 Apr 2021 21:03:49 +0200 Subject: [PATCH 0905/1270] DOC: revert global seed --- doc/TESTS.rst.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index c7e89b5b9e4d..fd88452914fe 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -351,8 +351,7 @@ new bugs or regressions, a test that passes most of the time but fails occasionally with no code changes is not helpful. Make the random data deterministic by setting the random number seed before generating it. Use either Python's ``random.seed(some_number)`` or NumPy's -``rng = np.random.default_rng(some_number)``, depending on the source of -random numbers. +``numpy.random.seed(some_number)``, depending on the source of random numbers. Alternatively, you can use `Hypothesis`_ to generate arbitrary data. Hypothesis manages both Python's and Numpy's random seeds for you, and From 92549ce03ecff346f4b10b181dcfa93ca1c22b3f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 14 Apr 2021 23:09:28 +0200 Subject: [PATCH 0906/1270] MAINT: Ensure that the `np.ndarray` constructor can take any shape-like oject --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8565d4b4052a..c82f1642f2f5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1375,7 +1375,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def imag(self, value: ArrayLike) -> None: ... def __new__( cls: Type[_ArraySelf], - shape: Sequence[int], + shape: _ShapeLike, dtype: DTypeLike = ..., buffer: _BufferType = ..., offset: int = ..., From 7afccd97397a6f102b83ae4c0e0f77e0fbde435c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 14 Apr 2021 23:11:09 +0200 Subject: [PATCH 0907/1270] MAINT: Relax the integer-type-constraint of `npt._ShapeLike` Change `int` into the more forgiving `SupportsIndex` protocol --- numpy/typing/_shape.py | 11 ++++++++++- numpy/typing/tests/data/pass/array_constructors.py | 3 +++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/typing/_shape.py b/numpy/typing/_shape.py index 4629046ea3eb..b720c3ffc192 100644 --- a/numpy/typing/_shape.py +++ b/numpy/typing/_shape.py @@ -1,6 +1,15 @@ +import sys from typing import Sequence, Tuple, Union +if sys.version_info >= (3, 8): + from typing import SupportsIndex +else: + try: + from typing_extensions import SupportsIndex + except ImportError: + SupportsIndex = NotImplemented + _Shape = Tuple[int, ...] # Anything that can be coerced to a shape tuple -_ShapeLike = Union[int, Sequence[int]] +_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 63208f139c39..722fa4b7e3f6 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -17,6 +17,9 @@ class SubClass(np.ndarray): ... def func(i: int, j: int, **kwargs: Any) -> SubClass: return B +np.ndarray(Index()) +np.ndarray([Index()]) + np.array(1, dtype=float) np.array(1, copy=False) np.array(1, order='F') From 51cb79e22d93abed0d46f4e2a26bdc9af589cd7c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 15 Apr 2021 00:29:00 +0200 Subject: [PATCH 0908/1270] STY: pep8 fixes --- numpy/typing/tests/data/pass/array_constructors.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 722fa4b7e3f6..13159f6bbd14 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,11 +1,19 @@ from typing import List, Any import numpy as np + class Index: def __index__(self) -> int: return 0 -class SubClass(np.ndarray): ... + +class SubClass(np.ndarray): + pass + + +def func(i: int, j: int, **kwargs: Any) -> SubClass: + return B + i8 = np.int64(1) @@ -14,9 +22,6 @@ class SubClass(np.ndarray): ... B_stack = np.array([[1], [1]]).view(SubClass) C = [1] -def func(i: int, j: int, **kwargs: Any) -> SubClass: - return B - np.ndarray(Index()) np.ndarray([Index()]) From 10cb25f6dd600f3aa636b3b8db7b7fd102c2eccf Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 15 Apr 2021 02:07:47 +0200 Subject: [PATCH 0909/1270] TST: Fix a test failure for python 3.7 --- numpy/typing/tests/data/pass/array_constructors.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 13159f6bbd14..206f70a15bb1 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,3 +1,4 @@ +import sys from typing import List, Any import numpy as np @@ -22,8 +23,9 @@ def func(i: int, j: int, **kwargs: Any) -> SubClass: B_stack = np.array([[1], [1]]).view(SubClass) C = [1] -np.ndarray(Index()) -np.ndarray([Index()]) +if sys.version_info >= (3, 8): + np.ndarray(Index()) + np.ndarray([Index()]) np.array(1, dtype=float) np.array(1, copy=False) From 181f273a59744d58f90f45d953a3285484c72cba Mon Sep 17 00:00:00 2001 From: Deepyaman Datta Date: Thu, 15 Apr 2021 05:21:52 -0400 Subject: [PATCH 0910/1270] DOC: Fix spelling of "reccomended" ("recommended") (#18779) --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 17a52a8d5261..a68303501464 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4592,7 +4592,7 @@ def default_rng(seed=None): Examples -------- - ``default_rng`` is the reccomended constructor for the random number class + ``default_rng`` is the recommended constructor for the random number class ``Generator``. Here are several ways we can construct a random number generator using ``default_rng`` and the ``Generator`` class. From f47f64e319faca16519fb2a229b4ebb5c0271850 Mon Sep 17 00:00:00 2001 From: Pamphile ROY Date: Thu, 15 Apr 2021 12:00:22 +0200 Subject: [PATCH 0911/1270] DOC: not recommended section --- doc/source/reference/routines.testing.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst index 7bd499fdfc90..40b385cb647a 100644 --- a/doc/source/reference/routines.testing.rst +++ b/doc/source/reference/routines.testing.rst @@ -29,9 +29,14 @@ Asserts assert_warns assert_string_equal +Asserts (not recommanded) +------------------------- +It is recommended to use one of `assert_allclose`, +`assert_array_almost_equal_nulp` or `assert_array_max_ulp` instead of these +functions for more consistent floating point comparisons. + .. autosummary:: :toctree: generated/ - :hidden: assert_almost_equal assert_approx_equal From d4683c99a78e8e7658ac53b435fca34de52ef52e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 15 Apr 2021 12:49:36 +0200 Subject: [PATCH 0912/1270] ENH: Improve the placeholder annotations for classes Use explicitly defined classes rather than annotating them as `Type[Any]`. The latter currently causes issues with mypy, which does not allow it to be used as a baseclass (because reasons?) --- numpy/__init__.pyi | 64 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 16 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c82f1642f2f5..52e6ecbeb837 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -368,26 +368,58 @@ __git_version__: str # TODO: Move placeholders to their respective module once # their annotations are properly implemented # -# Placeholders for non-functions (types and other objects) -DataSource: Type[Any] -MachAr: Type[Any] +# Placeholders for classes +# TODO: Remove the `__getattr__` methods once the classes are stubbed out +class DataSource: + def __getattr__(self, key: str) -> Any: ... + +class MachAr: + def __getattr__(self, key: str) -> Any: ... + +class broadcast: + def __getattr__(self, key: str) -> Any: ... + +class busdaycalendar: + def __getattr__(self, key: str) -> Any: ... + +class chararray(ndarray[_ShapeType, _DType_co]): + def __getattr__(self, key: str) -> Any: ... + +class finfo: + def __getattr__(self, key: str) -> Any: ... + +class format_parser: + def __getattr__(self, key: str) -> Any: ... + +class iinfo: + def __getattr__(self, key: str) -> Any: ... + +class matrix(ndarray[_ShapeType, _DType_co]): + def __getattr__(self, key: str) -> Any: ... + +class memmap(ndarray[_ShapeType, _DType_co]): + def __getattr__(self, key: str) -> Any: ... + +class nditer: + def __getattr__(self, key: str) -> Any: ... + +class poly1d: + def __getattr__(self, key: str) -> Any: ... + +class recarray(ndarray[_ShapeType, _DType_co]): + def __getattr__(self, key: str) -> Any: ... + +class record(void): + def __getattr__(self, key: str) -> Any: ... + +class vectorize: + def __getattr__(self, key: str) -> Any: ... + +# Placeholders for miscellaneous objects ScalarType: Tuple[Type[Any], ...] -broadcast: Type[Any] -busdaycalendar: Type[Any] cast: Dict[generic, Callable[..., ndarray[Any, dtype[Any]]]] -chararray: Type[Any] -finfo: Type[Any] -format_parser: Type[Any] -iinfo: Type[Any] -matrix: Type[Any] -memmap: Type[Any] nbytes: Dict[generic, int] -nditer: Type[Any] -poly1d: Type[Any] -recarray: Type[Any] -record: Type[Any] typecodes: Dict[str, str] -vectorize: Type[Any] # Placeholders for Python-based functions def angle(z, deg=...): ... From 5258da3790a1904efe5043b822835e5ad583534b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 15 Apr 2021 13:12:20 +0200 Subject: [PATCH 0913/1270] ENH: Improve the placeholder annotations of the C-based functions All new function definitions based on the respective functions' docstring --- numpy/__init__.pyi | 90 +++++++++++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 32 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 52e6ecbeb837..f3b331d505b2 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -572,38 +572,64 @@ def vsplit(ary, indices_or_sections): ... def who(vardict=...): ... # Placeholders for C-based functions -arange: Callable[..., Any] -bincount: Callable[..., Any] -busday_count: Callable[..., Any] -busday_offset: Callable[..., Any] -can_cast: Callable[..., Any] -compare_chararrays: Callable[..., Any] -concatenate: Callable[..., Any] -copyto: Callable[..., Any] -datetime_as_string: Callable[..., Any] -datetime_data: Callable[..., Any] -dot: Callable[..., Any] -frombuffer: Callable[..., Any] -fromfile: Callable[..., Any] -fromiter: Callable[..., Any] -frompyfunc: Callable[..., Any] -fromstring: Callable[..., Any] -geterrobj: Callable[..., Any] -inner: Callable[..., Any] -is_busday: Callable[..., Any] -lexsort: Callable[..., Any] -may_share_memory: Callable[..., Any] -min_scalar_type: Callable[..., Any] -nested_iters: Callable[..., Any] -packbits: Callable[..., Any] -promote_types: Callable[..., Any] -putmask: Callable[..., Any] -result_type: Callable[..., Any] -seterrobj: Callable[..., Any] -shares_memory: Callable[..., Any] -unpackbits: Callable[..., Any] -vdot: Callable[..., Any] -where: Callable[..., Any] +# TODO: Sort out which parameters are positional-only +@overload +def arange(stop, dtype=..., *, like=...): ... +@overload +def arange(start, stop, step, dtype=..., *, like=...): ... +def bincount(x, weights=..., minlength=...): ... +def busday_count( + begindates, + enddates, + weekmask=..., + holidays=..., + busdaycal=..., + out=..., +): ... +def busday_offset( + dates, + offsets, + roll=..., + weekmask=..., + holidays=..., + busdaycal=..., + out=..., +): ... +def can_cast(from_, to, casting=...): ... +def compare_chararrays(a, b, cmp_op, rstrip): ... +def concatenate(__a, axis=..., out=..., dtype=..., casting=...): ... +def copyto(dst, src, casting=..., where=...): ... +def datetime_as_string(arr, unit=..., timezone=..., casting=...): ... +def datetime_data(__dtype): ... +def dot(a, b, out=...): ... +def frombuffer(buffer, dtype=..., count=..., offset=..., *, like=...): ... +def fromfile( + file, dtype=..., count=..., sep=..., offset=..., *, like=... +): ... +def fromiter(iter, dtype, count=..., *, like=...): ... +def frompyfunc(func, nin, nout, * identity): ... +def fromstring(string, dtype=..., count=..., sep=..., *, like=...): ... +def geterrobj(): ... +def inner(a, b): ... +def is_busday( + dates, weekmask=..., holidays=..., busdaycal=..., out=... +): ... +def lexsort(keys, axis=...): ... +def may_share_memory(a, b, max_work=...): ... +def min_scalar_type(a): ... +def nested_iters(*args, **kwargs): ... # TODO: Sort out parameters +def packbits(a, axis=..., bitorder=...): ... +def promote_types(type1, type2): ... +def putmask(a, mask, values): ... +def result_type(*arrays_and_dtypes): ... +def seterrobj(errobj): ... +def shares_memory(a, b, max_work=...): ... +def unpackbits(a, axis=..., count=..., bitorder=...): ... +def vdot(a, b): ... +@overload +def where(__condition): ... +@overload +def where(__condition, __x, __y): ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) From cfbcd38558045197eded5cb84b8c8fe006a77480 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 15 Apr 2021 14:40:21 +0200 Subject: [PATCH 0914/1270] ENH: Add `__all__` to a number of public modules --- numpy/fft/__init__.py | 4 ++++ numpy/fft/__init__.pyi | 4 +++- numpy/linalg/__init__.py | 3 +++ numpy/linalg/__init__.pyi | 4 +++- numpy/polynomial/__init__.py | 16 +++++++++++++--- numpy/polynomial/__init__.pyi | 4 +++- 6 files changed, 29 insertions(+), 6 deletions(-) diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index a86bb3ac0bad..fd5e47580a54 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -200,9 +200,13 @@ """ +from . import _pocketfft, helper from ._pocketfft import * from .helper import * +__all__ = _pocketfft.__all__.copy() +__all__ += helper.__all__ + from numpy._pytesttester import PytestTester test = PytestTester(__name__) del PytestTester diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 45190517f2af..bb4fae9037d6 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] fft: Any ifft: Any diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index 3a53ac6ecac6..93943de3896c 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -70,8 +70,11 @@ """ # To get sub-modules +from . import linalg from .linalg import * +__all__ = linalg.__all__.copy() + from numpy._pytesttester import PytestTester test = PytestTester(__name__) del PytestTester diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index ffb05bb8130a..39b061969d0d 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, List + +__all__: List[str] matrix_power: Any solve: Any diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index d629df29f974..4b4361163b2e 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -37,13 +37,13 @@ The classes provide a more consistent and concise interface than the type-specific functions defined in the submodules for each type of polynomial. For example, to fit a Chebyshev polynomial with degree ``1`` to data given -by arrays ``xdata`` and ``ydata``, the +by arrays ``xdata`` and ``ydata``, the `~chebyshev.Chebyshev.fit` class method:: >>> from numpy.polynomial import Chebyshev >>> c = Chebyshev.fit(xdata, ydata, deg=1) -is preferred over the `chebyshev.chebfit` function from the +is preferred over the `chebyshev.chebfit` function from the ``np.polynomial.chebyshev`` module:: >>> from numpy.polynomial.chebyshev import chebfit @@ -76,7 +76,7 @@ - ``Poly.basis(degree)`` -- Basis polynomial of given degree - ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x`` -- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients +- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients determined by the least-squares fit to the data ``x``, ``y`` - ``Poly.fromroots(roots)`` -- ``p`` with specified roots - ``p.copy()`` -- Create a copy of ``p`` @@ -120,6 +120,16 @@ from .hermite_e import HermiteE from .laguerre import Laguerre +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + def set_default_printstyle(style): """ diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 755f7521bfb2..6a740604184c 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, List from numpy.polynomial import ( chebyshev as chebyshev, @@ -9,6 +9,8 @@ from numpy.polynomial import ( polynomial as polynomial, ) +__all__: List[str] + Polynomial: Any Chebyshev: Any Legendre: Any From 89ce34e27fe14ce1d89fe9ca62427cc261ebd3e4 Mon Sep 17 00:00:00 2001 From: endolith Date: Thu, 15 Apr 2021 23:28:59 -0400 Subject: [PATCH 0915/1270] DOC: Fix dec.paramaterize typo --- doc/TESTS.rst.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index 21cc0867333f..170cc1663dcf 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -187,7 +187,7 @@ Parametric tests One very nice feature of testing is allowing easy testing across a range of parameters - a nasty problem for standard unit tests. Use the -``dec.paramaterize`` decorator. +``pytest.mark.parametrize`` decorator. Doctests -------- From c16a619ec2bdf2227e5247466b69cd9e25e36358 Mon Sep 17 00:00:00 2001 From: xamm <39380924+xamm@users.noreply.github.com> Date: Fri, 16 Apr 2021 11:24:05 +0200 Subject: [PATCH 0916/1270] DOC: add note for clip() special case a_min > a_max See #18782 --- doc/release/upcoming_changes/18782.change.rst | 5 +++++ numpy/core/fromnumeric.py | 14 ++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/18782.change.rst diff --git a/doc/release/upcoming_changes/18782.change.rst b/doc/release/upcoming_changes/18782.change.rst new file mode 100644 index 000000000000..c401b673327c --- /dev/null +++ b/doc/release/upcoming_changes/18782.change.rst @@ -0,0 +1,5 @@ +Added `numpy.clip` note and example for special case `a_min` greater than `a_max` to the documentation +------------------------------------------------------------------------------------------------------ +The documentation of `numpy.clip` has been changed to include an example and a note +for the special case where `a_min` is set to a value greater than `a_max`, +which causes `numpy.clip` to return an array in which all values are equal to `a_max`. \ No newline at end of file diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 3646b39b0c70..c8ab0ae25fdc 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2086,15 +2086,25 @@ def clip(a, a_min, a_max, out=None, **kwargs): -------- :ref:`ufuncs-output-type` + Notes + -------- + When `a_min` is greater than `a_max`, `clip` returns an + array in which all values are equal to `a_max`, + as shown in the second example. + Examples -------- >>> a = np.arange(10) - >>> np.clip(a, 1, 8) - array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> np.clip(a, a_max = 1, a_min = 8) + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) >>> np.clip(a, 3, 6, out=a) array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) From 1d2efc4140b2b1f819db181259d9cca6553d126c Mon Sep 17 00:00:00 2001 From: xamm <39380924+xamm@users.noreply.github.com> Date: Fri, 16 Apr 2021 11:30:25 +0200 Subject: [PATCH 0917/1270] MAINT: changing news fragment filename to correct pull request --- .../upcoming_changes/{18782.change.rst => 18786.change.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/release/upcoming_changes/{18782.change.rst => 18786.change.rst} (100%) diff --git a/doc/release/upcoming_changes/18782.change.rst b/doc/release/upcoming_changes/18786.change.rst similarity index 100% rename from doc/release/upcoming_changes/18782.change.rst rename to doc/release/upcoming_changes/18786.change.rst From 71115524c292a7f4ac7e5a8e6b89e8cf41d1ab72 Mon Sep 17 00:00:00 2001 From: xamm <39380924+xamm@users.noreply.github.com> Date: Fri, 16 Apr 2021 11:45:16 +0200 Subject: [PATCH 0918/1270] MAINT: removing news fragments documentation changes are usually not added to release notes --- doc/release/upcoming_changes/18786.change.rst | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 doc/release/upcoming_changes/18786.change.rst diff --git a/doc/release/upcoming_changes/18786.change.rst b/doc/release/upcoming_changes/18786.change.rst deleted file mode 100644 index c401b673327c..000000000000 --- a/doc/release/upcoming_changes/18786.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -Added `numpy.clip` note and example for special case `a_min` greater than `a_max` to the documentation ------------------------------------------------------------------------------------------------------- -The documentation of `numpy.clip` has been changed to include an example and a note -for the special case where `a_min` is set to a value greater than `a_max`, -which causes `numpy.clip` to return an array in which all values are equal to `a_max`. \ No newline at end of file From c5e9e140e57774262cf5ac4762d19a9da5c9477d Mon Sep 17 00:00:00 2001 From: xamm <39380924+xamm@users.noreply.github.com> Date: Fri, 16 Apr 2021 11:49:26 +0200 Subject: [PATCH 0919/1270] MAINT: change argument type in numpy.clip documentation example from keyword to positional as suggested by reviewer --- numpy/core/fromnumeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index c8ab0ae25fdc..7c43cd3139f7 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2099,7 +2099,7 @@ def clip(a, a_min, a_max, out=None, **kwargs): array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.clip(a, 1, 8) array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) - >>> np.clip(a, a_max = 1, a_min = 8) + >>> np.clip(a, 8, 1) array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) >>> np.clip(a, 3, 6, out=a) array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) From 065de62261cb41dc02d4483b7b5565e929e8c653 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 16 Apr 2021 08:09:18 -0400 Subject: [PATCH 0920/1270] DOC: Document newer pytest conventions --- doc/TESTS.rst.txt | 22 ++++++++++++++++------ doc/source/conf.py | 1 + 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index 21cc0867333f..cb8578e5ab70 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -106,22 +106,32 @@ module called ``test_yyy.py``. If you only need to test one aspect of More often, we need to group a number of tests together, so we create a test class:: - from numpy.testing import assert_, assert_raises + import pytest # import xxx symbols from numpy.xxx.yyy import zzz class TestZzz: def test_simple(self): - assert_(zzz() == 'Hello from zzz') + assert zzz() == 'Hello from zzz' def test_invalid_parameter(self): - assert_raises(...) + with pytest.raises(ValueError, match='.*some matching regex.*'): + ... -Within these test methods, ``assert_()`` and related functions are used to test +Within these test methods, ``assert`` and related functions are used to test whether a certain assumption is valid. If the assertion fails, the test fails. -Note that the Python builtin ``assert`` should not be used, because it is -stripped during compilation with ``-O``. +``pytest`` internally rewrites the ``assert`` statement to give informative +output when it fails, so should be preferred over the legacy variant +``numpy.testing.assert_``. Whereas plain ``assert`` statements are ignored +when running Python in optimized mode with ``-O``, this is not an issue when +running tests with pytest. + +Similarly, the pytest functions :func:`pytest.raises` and :func:`pytest.warns` +should be preferred over their legacy counterparts +:func:`numpy.testing.assert_raises` and :func:`numpy.testing.assert_warns`, +since the pytest variants are more broadly used and allow more explicit +targeting of warnings and errors when used with the ``match`` regex. Note that ``test_`` functions or methods should not have a docstring, because that makes it hard to identify the test from the output of running the test diff --git a/doc/source/conf.py b/doc/source/conf.py index 95865c0249c7..fdb9f926ddc4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -292,6 +292,7 @@ def setup(app): 'skimage': ('https://scikit-image.org/docs/stable', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None), 'scipy-lecture-notes': ('https://scipy-lectures.org', None), + 'pytest': ('https://docs.pytest.org/en/stable', None), } From 59aec5a2d0973bf9368ddb08d622cdb727b21de9 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Fri, 16 Apr 2021 10:36:48 -0700 Subject: [PATCH 0921/1270] Pin pydata-sphinx-theme to 0.5.2. --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index ea38882a2a32..3acc3dda5992 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - scipy - pandas - matplotlib - - pydata-sphinx-theme + - pydata-sphinx-theme=0.5.2 # For linting - pycodestyle=2.7.0 - gitpython From 853e2334449429860769bf7e19e7d151c7125729 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 16 Apr 2021 12:52:55 -0500 Subject: [PATCH 0922/1270] CI: Use `towncrier build` explicitly Without any argument towncrier defaults to `build`. But towncrier added a `--version` argument to just print the towncrier version. NumPy relies on passing `--version` which collides. Even if towncrier might revert the change, using `towncrier build` explicitly just doesn't hurt. Closes gh-18788 --- .circleci/config.yml | 2 +- doc/release/upcoming_changes/README.rst | 2 +- tools/ci/test_all_newsfragments_used.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6f4a32513e4a..ac85d49373ff 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -42,7 +42,7 @@ jobs: . venv/bin/activate pip install git+https://github.com/hawkowl/towncrier.git@master VERSION=$(python -c "import setup; print(setup.VERSION)") - towncrier --version $VERSION --yes + towncrier build --version $VERSION --yes ./tools/ci/test_all_newsfragments_used.py - run: diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst index ff5ca514c3b2..436535ecddbc 100644 --- a/doc/release/upcoming_changes/README.rst +++ b/doc/release/upcoming_changes/README.rst @@ -50,7 +50,7 @@ and double-backticks for code. If you are unsure what pull request type to use, don't hesitate to ask in your PR. -You can install ``towncrier`` and run ``towncrier --draft --version 1.18`` +You can install ``towncrier`` and run ``towncrier build --draft --version 1.18`` if you want to get a preview of how your change will look in the final release notes. diff --git a/tools/ci/test_all_newsfragments_used.py b/tools/ci/test_all_newsfragments_used.py index c2e03154945d..62c9a05f95da 100755 --- a/tools/ci/test_all_newsfragments_used.py +++ b/tools/ci/test_all_newsfragments_used.py @@ -12,5 +12,5 @@ if fragments: print("The following files were not found by towncrier:") - print(" " + " \n".join(fragments)) + print(" " + "\n ".join(fragments)) sys.exit(1) From 54dc47920a2d674f2889374d1e7f8377b7ea17ff Mon Sep 17 00:00:00 2001 From: Matt Hall Date: Fri, 16 Apr 2021 15:52:47 -0300 Subject: [PATCH 0923/1270] Fixes small typos in the genfromtext docstring - `comments`: Added a full stop after 'discarded'. - `names`: changed 'proceeded' to 'preceeded'. - `excludelist`: inserted 'with' because it sounds odd without it; the example makes it clear. - `replace_space`: should strictly be "variable names" or "variables' names". The latter seems fussy so I chose the former. --- numpy/lib/npyio.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index efebb5fb7098..9552579cf4e6 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -1587,7 +1587,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, column, individually. comments : str, optional The character used to indicate the start of a comment. - All the characters occurring on a line after a comment are discarded + All the characters occurring on a line after a comment are discarded. delimiter : str, int, or sequence, optional The string used to separate values. By default, any consecutive whitespaces act as delimiter. An integer or sequence of integers @@ -1614,15 +1614,15 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first line after - the first `skip_header` lines. This line can optionally be proceeded + the first `skip_header` lines. This line can optionally be preceeded by a comment delimiter. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list - ['return','file','print']. Excluded names are appended an underscore: - for example, `file` would become `file_`. + ['return','file','print']. Excluded names are appended with an + underscore: for example, `file` would become `file_`. deletechars : str, optional A string combining invalid characters that must be deleted from the names. @@ -1631,7 +1631,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, autostrip : bool, optional Whether to automatically strip white spaces from the variables. replace_space : char, optional - Character(s) used in replacement of white spaces in the variables + Character(s) used in replacement of white spaces in the variable names. By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. From 75664b3368e73825409d523743fcaa7690ae0297 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 16 Apr 2021 13:59:17 -0600 Subject: [PATCH 0924/1270] MAINT: Update towncrier version in install and notes We were installing towncrier from the github master branch because there had been no releases with the needed fixes. That has changed with the recent release of versions 19.9.0 and 21.3.0. --- .circleci/config.yml | 2 +- doc/HOWTO_RELEASE.rst.txt | 6 ++---- release_requirements.txt | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ac85d49373ff..8cf18d809d86 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -40,7 +40,7 @@ jobs: name: create release notes command: | . venv/bin/activate - pip install git+https://github.com/hawkowl/towncrier.git@master + pip install towncrier VERSION=$(python -c "import setup; print(setup.VERSION)") towncrier build --version $VERSION --yes ./tools/ci/test_all_newsfragments_used.py diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt index c64a36691705..9af58dd24e96 100644 --- a/doc/HOWTO_RELEASE.rst.txt +++ b/doc/HOWTO_RELEASE.rst.txt @@ -258,10 +258,8 @@ Check the release notes Use `towncrier`_ to build the release note and commit the changes. This will remove all the fragments from ``doc/release/upcoming_changes`` and add ``doc/release/-note.rst``. -Note that currently towncrier must be installed from its master branch as the -last release (19.2.0) is outdated. - towncrier --version "" + towncrier build --version "" git commit -m"Create release note" Check that the release notes are up-to-date. @@ -275,7 +273,7 @@ following: - for SciPy, supported NumPy version(s) - outlook for the near future -.. _towncrier: https://github.com/hawkowl/towncrier +.. _towncrier: https://pypi.org/project/towncrier/ Update the release status and create a release "tag" diff --git a/release_requirements.txt b/release_requirements.txt index 805ce9a8afe1..c24e39c7849d 100644 --- a/release_requirements.txt +++ b/release_requirements.txt @@ -14,4 +14,4 @@ twine # building and notes Paver -git+https://github.com/hawkowl/towncrier.git@master +towncrier From 610031c1ae4c496f0f1b747a5ae13667a484f04d Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 16 Apr 2021 23:36:24 +0200 Subject: [PATCH 0925/1270] SIMD, TEST: Workaround for misaligned stack GCC BUG ABI on WIN64 This patch fixes the segfault error for GCC SIMD module builds on WIN64, the problem occurs when GCC aligned load the AVX registers(256-bit) from stack pointer with 128-bit alignment. --- numpy/core/src/_simd/_simd_vector.inc | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/_simd/_simd_vector.inc b/numpy/core/src/_simd/_simd_vector.inc index 2a1378f22100..d4b6310fde04 100644 --- a/numpy/core/src/_simd/_simd_vector.inc +++ b/numpy/core/src/_simd/_simd_vector.inc @@ -86,7 +86,22 @@ static PyTypeObject PySIMDVectorType = { /************************************ ** Protected Definitions ************************************/ -static PySIMDVectorObject * +/* + * Force inlining the following functions on CYGWIN to avoid spilling vector + * registers into the stack to workaround GCC/WIN64 bug that performs + * miss-align load variable of 256/512-bit vector from non-aligned + * 256/512-bit stack pointer. + * + * check the following links for more clearification: + * https://github.com/numpy/numpy/pull/18330#issuecomment-821539919 + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49001 + */ +#if defined(__CYGWIN__) || (defined(__GNUC__) && defined(_WIN64)) + #define CYG_FINLINE NPY_FINLINE +#else + #define CYG_FINLINE static +#endif +CYG_FINLINE PySIMDVectorObject * PySIMDVector_FromData(simd_data data, simd_data_type dtype) { const simd_data_info *info = simd_data_getinfo(dtype); @@ -118,7 +133,7 @@ PySIMDVector_FromData(simd_data data, simd_data_type dtype) return vec; } -static simd_data +CYG_FINLINE simd_data PySIMDVector_AsData(PySIMDVectorObject *vec, simd_data_type dtype) { const simd_data_info *info = simd_data_getinfo(dtype); From 6f7e6638162b8c74088e5c212971ca90f13d8145 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Fri, 16 Apr 2021 19:24:35 -0700 Subject: [PATCH 0926/1270] DOC: Format version number in directive --- numpy/core/_add_newdocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index b0dee66d9008..f870dc8ad7fd 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -3936,7 +3936,7 @@ actual implementation will vary with datatype. The 'mergesort' option is retained for backwards compatibility. - .. versionchanged:: 1.15.0. + .. versionchanged:: 1.15.0 The 'stable' option was added. order : str or list of str, optional From 0ef6508397fcdd8f2bbace2a0b91a863b1617728 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Fri, 16 Apr 2021 19:34:17 -0700 Subject: [PATCH 0927/1270] DOC: Add blank line before section. Otherwise Numpydoc does not see the section. --- numpy/lib/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 24252c834aaf..91e7cb8a3531 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -1019,6 +1019,7 @@ def _median_nancheck(data, result, axis, out): Axis along which the median was computed. out : ndarray, optional Output array in which to place the result. + Returns ------- median : scalar or ndarray From ff72d214082784bbd081dd053c56ab5984b52662 Mon Sep 17 00:00:00 2001 From: Pamphile ROY Date: Sat, 17 Apr 2021 08:40:42 +0200 Subject: [PATCH 0928/1270] Update doc/source/reference/routines.testing.rst Co-authored-by: Matthias Bussonnier --- doc/source/reference/routines.testing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst index 40b385cb647a..d9e98e94188d 100644 --- a/doc/source/reference/routines.testing.rst +++ b/doc/source/reference/routines.testing.rst @@ -29,7 +29,7 @@ Asserts assert_warns assert_string_equal -Asserts (not recommanded) +Asserts (not recommended) ------------------------- It is recommended to use one of `assert_allclose`, `assert_array_almost_equal_nulp` or `assert_array_max_ulp` instead of these From 379ffd443c12ef45f178a30605a237198689f29b Mon Sep 17 00:00:00 2001 From: Patrick <39380924+xamm@users.noreply.github.com> Date: Sat, 17 Apr 2021 10:37:33 +0200 Subject: [PATCH 0929/1270] MAINT: Remove unnecessary dash for the dividing line Co-authored-by: Matthias Bussonnier --- numpy/core/fromnumeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 7c43cd3139f7..5c7b3372b0f4 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2087,7 +2087,7 @@ def clip(a, a_min, a_max, out=None, **kwargs): :ref:`ufuncs-output-type` Notes - -------- + ----- When `a_min` is greater than `a_max`, `clip` returns an array in which all values are equal to `a_max`, as shown in the second example. From 8f10be19469eefba16446172132f9f68389fd31c Mon Sep 17 00:00:00 2001 From: zoj <44142765+zoj613@users.noreply.github.com> Date: Sat, 17 Apr 2021 11:21:44 +0200 Subject: [PATCH 0930/1270] DOC: Update random c-api documentation --- doc/source/reference/random/c-api.rst | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/random/c-api.rst b/doc/source/reference/random/c-api.rst index a79da7a492f7..7c7996115331 100644 --- a/doc/source/reference/random/c-api.rst +++ b/doc/source/reference/random/c-api.rst @@ -40,9 +40,9 @@ The functions are named with the following conventions: - The functions without "standard" in their name require additional parameters to describe the distributions. -- ``zig`` in the name are based on a ziggurat lookup algorithm is used instead - of calculating the ``log``, which is significantly faster. The non-ziggurat - variants are used in corner cases and for legacy compatibility. +- Functions with ``inv`` in their name are based on the slower inverse method + instead of a ziggurat lookup algorithm, which is significantly faster. The + non-ziggurat variants are used in corner cases and for legacy compatibility. .. c:function:: double random_standard_uniform(bitgen_t *bitgen_state) @@ -53,6 +53,8 @@ The functions are named with the following conventions: .. c:function:: void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) +.. c:function:: void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) + .. c:function:: double random_standard_normal(bitgen_t* bitgen_state) .. c:function:: void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) @@ -69,6 +71,8 @@ The functions are named with the following conventions: .. c:function:: void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) +.. c:function:: void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) + .. c:function:: float random_standard_normal_f(bitgen_t* bitgen_state) .. c:function:: float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) From aaf02cf910840a22f1ed0623af57ed29a79d4bb2 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 17 Apr 2021 17:58:53 +0200 Subject: [PATCH 0931/1270] MAINT: Add `__new__` to the placeholder stubs for classes The default `__new__` is inherited from `object`, which takes no arguments and is therefore too restrictive --- numpy/__init__.pyi | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f3b331d505b2..fe973c9a45a9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -369,50 +369,66 @@ __git_version__: str # their annotations are properly implemented # # Placeholders for classes -# TODO: Remove the `__getattr__` methods once the classes are stubbed out +# TODO: Remove `__getattr__` and update/replace `__new__` once +# the classes are stubbed out class DataSource: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class MachAr: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class broadcast: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class busdaycalendar: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class chararray(ndarray[_ShapeType, _DType_co]): + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class finfo: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class format_parser: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class iinfo: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class matrix(ndarray[_ShapeType, _DType_co]): + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class memmap(ndarray[_ShapeType, _DType_co]): + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class nditer: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class poly1d: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class recarray(ndarray[_ShapeType, _DType_co]): + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class record(void): + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class vectorize: + def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... # Placeholders for miscellaneous objects From 3c7f23e15b8c68a0f02638db0de83bb8a2c71f2e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 17 Apr 2021 17:59:42 +0200 Subject: [PATCH 0932/1270] MAINT: Update the key-type of `cast` and `nbytes` `cast` and `nbytes` are in fact instances of a `dict` subclass that converts passed `DTypeLike` objects into the actual keys (`np.generic`) --- numpy/__init__.pyi | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fe973c9a45a9..14aeb5513f94 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -432,9 +432,11 @@ class vectorize: def __getattr__(self, key: str) -> Any: ... # Placeholders for miscellaneous objects +# NOTE: `cast` and `nbytes` are in fact instances of a `dict` subclass that +# converts passed `DTypeLike` objects into the actual keys (`np.generic`) ScalarType: Tuple[Type[Any], ...] -cast: Dict[generic, Callable[..., ndarray[Any, dtype[Any]]]] -nbytes: Dict[generic, int] +cast: Dict[DTypeLike, Callable[..., ndarray[Any, dtype[Any]]]] +nbytes: Dict[DTypeLike, int] typecodes: Dict[str, str] # Placeholders for Python-based functions From 658399fa2b1d77c86196552c1a03afa37cc35389 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 17 Apr 2021 18:00:46 +0200 Subject: [PATCH 0933/1270] BUG: Add a default value to the `step` parameter of `np.arange` --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 14aeb5513f94..99afbc14b869 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -594,7 +594,7 @@ def who(vardict=...): ... @overload def arange(stop, dtype=..., *, like=...): ... @overload -def arange(start, stop, step, dtype=..., *, like=...): ... +def arange(start, stop, step=..., dtype=..., *, like=...): ... def bincount(x, weights=..., minlength=...): ... def busday_count( begindates, From 8d5fb1c14e1b9b88fe968211bd91e1d65c150939 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 17 Apr 2021 18:08:23 +0200 Subject: [PATCH 0934/1270] MAINT: Add an explicit `__call__` method to `np.vectorize` and `np.poly1d` While the presence of `__getattr__` should be enough to consider an object callable, this is apparently not the case. --- numpy/__init__.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 99afbc14b869..826448b13333 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -417,6 +417,7 @@ class nditer: class poly1d: def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __call__(self, val: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class recarray(ndarray[_ShapeType, _DType_co]): @@ -429,6 +430,7 @@ class record(void): class vectorize: def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... # Placeholders for miscellaneous objects From 1579e3bfac9e287b2021a006f8f6720f18273ad3 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 17 Apr 2021 18:43:02 +0200 Subject: [PATCH 0935/1270] MAINT: Relax the miscellaneous integer-type-constraints --- numpy/__init__.pyi | 85 +++++++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 34 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f3b331d505b2..dba330b2e42d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -992,11 +992,11 @@ class _ArrayOrScalarCommon: def ravel(self, order: _OrderKACF = ...) -> ndarray: ... @overload def reshape( - self, __shape: Sequence[int], *, order: _OrderACF = ... + self, __shape: _ShapeLike, *, order: _OrderACF = ... ) -> ndarray: ... @overload def reshape( - self, *shape: int, order: _OrderACF = ... + self, *shape: SupportsIndex, order: _OrderACF = ... ) -> ndarray: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... # NOTE: `tostring()` is deprecated and therefore excluded @@ -1078,7 +1078,7 @@ class _ArrayOrScalarCommon: ) -> _NdArraySubClass: ... def argsort( self, - axis: Optional[int] = ..., + axis: Optional[SupportsIndex] = ..., kind: Optional[_SortKind] = ..., order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... @@ -1124,33 +1124,45 @@ class _ArrayOrScalarCommon: ) -> _NdArraySubClass: ... @overload def compress( - self, a: ArrayLike, axis: Optional[int] = ..., out: None = ..., + self, + a: ArrayLike, + axis: Optional[SupportsIndex] = ..., + out: None = ..., ) -> ndarray: ... @overload def compress( - self, a: ArrayLike, axis: Optional[int] = ..., out: _NdArraySubClass = ..., + self, + a: ArrayLike, + axis: Optional[SupportsIndex] = ..., + out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... def conj(self: _ArraySelf) -> _ArraySelf: ... def conjugate(self: _ArraySelf) -> _ArraySelf: ... @overload def cumprod( - self, axis: Optional[int] = ..., dtype: DTypeLike = ..., out: None = ..., + self, + axis: Optional[SupportsIndex] = ..., + dtype: DTypeLike = ..., + out: None = ..., ) -> ndarray: ... @overload def cumprod( self, - axis: Optional[int] = ..., + axis: Optional[SupportsIndex] = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... @overload def cumsum( - self, axis: Optional[int] = ..., dtype: DTypeLike = ..., out: None = ..., + self, + axis: Optional[SupportsIndex] = ..., + dtype: DTypeLike = ..., + out: None = ..., ) -> ndarray: ... @overload def cumsum( self, - axis: Optional[int] = ..., + axis: Optional[SupportsIndex] = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... @@ -1279,13 +1291,15 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., ) -> _NdArraySubClass: ... def repeat( - self, repeats: _ArrayLikeIntOrBool, axis: Optional[int] = ... + self, repeats: _ArrayLikeIntOrBool, axis: Optional[SupportsIndex] = ... ) -> ndarray: ... @overload - def round(self: _ArraySelf, decimals: int = ..., out: None = ...) -> _ArraySelf: ... + def round( + self: _ArraySelf, decimals: SupportsIndex = ..., out: None = ... + ) -> _ArraySelf: ... @overload def round( - self, decimals: int = ..., out: _NdArraySubClass = ... + self, decimals: SupportsIndex = ..., out: _NdArraySubClass = ... ) -> _NdArraySubClass: ... @overload def std( @@ -1348,7 +1362,7 @@ class _ArrayOrScalarCommon: def take( self, indices: _IntLike_co, - axis: Optional[int] = ..., + axis: Optional[SupportsIndex] = ..., out: None = ..., mode: _ModeKind = ..., ) -> generic: ... @@ -1356,7 +1370,7 @@ class _ArrayOrScalarCommon: def take( self, indices: _ArrayLikeIntOrBool, - axis: Optional[int] = ..., + axis: Optional[SupportsIndex] = ..., out: None = ..., mode: _ModeKind = ..., ) -> ndarray: ... @@ -1364,7 +1378,7 @@ class _ArrayOrScalarCommon: def take( self, indices: _ArrayLikeIntOrBool, - axis: Optional[int] = ..., + axis: Optional[SupportsIndex] = ..., out: _NdArraySubClass = ..., mode: _ModeKind = ..., ) -> _NdArraySubClass: ... @@ -1459,37 +1473,40 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... @overload - def item(self, *args: int) -> Any: ... + def item(self, *args: SupportsIndex) -> Any: ... @overload - def item(self, __args: Tuple[int, ...]) -> Any: ... + def item(self, __args: Tuple[SupportsIndex, ...]) -> Any: ... @overload def itemset(self, __value: Any) -> None: ... @overload def itemset(self, __item: _ShapeLike, __value: Any) -> None: ... @overload - def resize(self, __new_shape: Sequence[int], *, refcheck: bool = ...) -> None: ... + def resize(self, __new_shape: _ShapeLike, *, refcheck: bool = ...) -> None: ... @overload - def resize(self, *new_shape: int, refcheck: bool = ...) -> None: ... + def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ... def setflags( self, write: bool = ..., align: bool = ..., uic: bool = ... ) -> None: ... def squeeze( - self: _ArraySelf, axis: Union[int, Tuple[int, ...]] = ... + self: _ArraySelf, axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ... ) -> _ArraySelf: ... - def swapaxes(self: _ArraySelf, axis1: int, axis2: int) -> _ArraySelf: ... + def swapaxes(self: _ArraySelf, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArraySelf: ... @overload - def transpose(self: _ArraySelf, __axes: Sequence[int]) -> _ArraySelf: ... + def transpose(self: _ArraySelf, __axes: _ShapeLike) -> _ArraySelf: ... @overload - def transpose(self: _ArraySelf, *axes: int) -> _ArraySelf: ... + def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... def argpartition( self, kth: _ArrayLikeIntOrBool, - axis: Optional[int] = ..., + axis: Optional[SupportsIndex] = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... def diagonal( - self: _ArraySelf, offset: int = ..., axis1: int = ..., axis2: int = ... + self: _ArraySelf, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., ) -> _ArraySelf: ... @overload def dot(self, b: ArrayLike, out: None = ...) -> Union[number, ndarray]: ... @@ -1500,7 +1517,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def partition( self, kth: _ArrayLikeIntOrBool, - axis: int = ..., + axis: SupportsIndex = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., ) -> None: ... @@ -1516,29 +1533,29 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array ) -> ndarray: ... def setfield( - self, val: ArrayLike, dtype: DTypeLike, offset: int = ... + self, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = ... ) -> None: ... def sort( self, - axis: int = ..., + axis: SupportsIndex = ..., kind: Optional[_SortKind] = ..., order: Union[None, str, Sequence[str]] = ..., ) -> None: ... @overload def trace( self, # >= 2D array - offset: int = ..., - axis1: int = ..., - axis2: int = ..., + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., dtype: DTypeLike = ..., out: None = ..., ) -> Union[number, ndarray]: ... @overload def trace( self, # >= 2D array - offset: int = ..., - axis1: int = ..., - axis2: int = ..., + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... From 3510e04d618bb5aad39cd3422316d0031351d8c8 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sun, 18 Apr 2021 00:41:44 +0200 Subject: [PATCH 0936/1270] typo fix within docstring ("is lies") --- numpy/core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 5026bfae8b5b..f19946be408a 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -4081,7 +4081,7 @@ def add_newdoc(place, name, doc): Decompose the elements of x into mantissa and twos exponent. Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``. - The mantissa is lies in the open interval(-1, 1), while the twos + The mantissa lies in the open interval(-1, 1), while the twos exponent is a signed integer. Parameters From 2232a473f8713f532c8164c8cf616f7bd05f54a7 Mon Sep 17 00:00:00 2001 From: Bharat123rox Date: Sun, 18 Apr 2021 12:10:07 +0530 Subject: [PATCH 0937/1270] DOC: Improve random.choice() documentation --- numpy/random/_generator.pyx | 7 ++++--- numpy/random/mtrand.pyx | 9 +++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index a68303501464..cd0b248723d2 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -614,11 +614,12 @@ cdef class Generator: len(size)``. Default is None, in which case a single value is returned. replace : bool, optional - Whether the sample is with or without replacement + Whether the sample is with or without replacement. Default is True, + meaning that a value of ``a`` can be selected multiple times. p : 1-D array_like, optional The probabilities associated with each entry in a. - If not given the sample assumes a uniform distribution over all - entries in a. + If not given, the sample assumes a uniform distribution over all + entries in ``a``. axis : int, optional The axis along which the selection is performed. The default, 0, selects by row. diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index e166634be1df..863879a0465f 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -820,17 +820,18 @@ cdef class RandomState: ---------- a : 1-D array-like or int If an ndarray, a random sample is generated from its elements. - If an int, the random sample is generated as if a were np.arange(a) + If an int, the random sample is generated as if it were ``np.arange(a)`` size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a single value is returned. replace : boolean, optional - Whether the sample is with or without replacement + Whether the sample is with or without replacement. Default is True, + meaning that a value of ``a`` can be selected multiple times. p : 1-D array-like, optional The probabilities associated with each entry in a. - If not given the sample assumes a uniform distribution over all - entries in a. + If not given, the sample assumes a uniform distribution over all + entries in ``a``. Returns ------- From 8840b8d0088eb1b1c5b78587926b03a43db01e6e Mon Sep 17 00:00:00 2001 From: Christopher Dahlin Date: Mon, 19 Apr 2021 09:19:38 +0200 Subject: [PATCH 0938/1270] MAINT: clarify runtests.py verbosity help text --- runtests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtests.py b/runtests.py index 5885d2df647c..c71d8557a3dd 100755 --- a/runtests.py +++ b/runtests.py @@ -74,7 +74,7 @@ def main(argv): parser = ArgumentParser(usage=__doc__.lstrip()) parser.add_argument("--verbose", "-v", action="count", default=1, - help="more verbosity") + help="Add one verbosity level to pytest. Default is 0") parser.add_argument("--debug-info", action="store_true", help=("add --verbose-cfg to build_src to show compiler " "configuration output while creating " From fa0ad80819ef6827ee8d036ed272fbc5d5a58033 Mon Sep 17 00:00:00 2001 From: Christopher Dahlin Date: Mon, 19 Apr 2021 09:23:16 +0200 Subject: [PATCH 0939/1270] MAINT: make runtests.py help texts sentence case --- runtests.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/runtests.py b/runtests.py index c71d8557a3dd..cec2d494ddde 100755 --- a/runtests.py +++ b/runtests.py @@ -76,19 +76,19 @@ def main(argv): parser.add_argument("--verbose", "-v", action="count", default=1, help="Add one verbosity level to pytest. Default is 0") parser.add_argument("--debug-info", action="store_true", - help=("add --verbose-cfg to build_src to show compiler " + help=("Add --verbose-cfg to build_src to show compiler " "configuration output while creating " "_numpyconfig.h and config.h")) parser.add_argument("--no-build", "-n", action="store_true", default=False, - help="do not build the project (use system installed version)") + help="Do not build the project (use system installed version)") parser.add_argument("--build-only", "-b", action="store_true", default=False, - help="just build, do not run any tests") + help="Just build, do not run any tests") parser.add_argument("--doctests", action="store_true", default=False, help="Run doctests in module") parser.add_argument("--refguide-check", action="store_true", default=False, help="Run refguide (doctest) check (do not run regular tests.)") parser.add_argument("--coverage", action="store_true", default=False, - help=("report coverage of project code. HTML output goes " + help=("Report coverage of project code. HTML output goes " "under build/coverage")) parser.add_argument("--lint", default=None, help="'' or 'uncommitted', passed to " @@ -97,10 +97,10 @@ def main(argv): parser.add_argument("--durations", action="store", default=-1, type=int, help=("Time N slowest tests, time all if 0, time none if < 0")) parser.add_argument("--gcov", action="store_true", default=False, - help=("enable C code coverage via gcov (requires GCC). " + help=("Enable C code coverage via gcov (requires GCC). " "gcov output goes to build/**/*.gc*")) parser.add_argument("--lcov-html", action="store_true", default=False, - help=("produce HTML for C code coverage information " + help=("Produce HTML for C code coverage information " "from a previous run with --gcov. " "HTML output goes to build/lcov/")) parser.add_argument("--mode", "-m", default="fast", From 6d82526cd3f6d8319ef718332dd1091e8fdca877 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Apr 2021 07:46:02 +0000 Subject: [PATCH 0940/1270] MAINT: Bump hypothesis from 6.9.1 to 6.10.0 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.9.1 to 6.10.0. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.9.1...hypothesis-python-6.10.0) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 902ff3306b65..0ddf21810ace 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.22 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.9.1 +hypothesis==6.10.0 pytest==6.2.3 pytz==2021.1 pytest-cov==2.11.1 From 7c7d68d6bdf144da31dfa872932f0c751fcb8e66 Mon Sep 17 00:00:00 2001 From: Christopher Dahlin Date: Mon, 19 Apr 2021 10:24:46 +0200 Subject: [PATCH 0941/1270] MAINT: make runtests.py argument list fulfill E501 --- runtests.py | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/runtests.py b/runtests.py index cec2d494ddde..7216712e6970 100755 --- a/runtests.py +++ b/runtests.py @@ -76,29 +76,32 @@ def main(argv): parser.add_argument("--verbose", "-v", action="count", default=1, help="Add one verbosity level to pytest. Default is 0") parser.add_argument("--debug-info", action="store_true", - help=("Add --verbose-cfg to build_src to show compiler " - "configuration output while creating " + help=("Add --verbose-cfg to build_src to show compiler" + " configuration output while creating " "_numpyconfig.h and config.h")) parser.add_argument("--no-build", "-n", action="store_true", default=False, - help="Do not build the project (use system installed version)") - parser.add_argument("--build-only", "-b", action="store_true", default=False, - help="Just build, do not run any tests") + help="Do not build the project (use system installed " + "version)") + parser.add_argument("--build-only", "-b", action="store_true", + default=False, help="Just build, do not run any tests") parser.add_argument("--doctests", action="store_true", default=False, help="Run doctests in module") parser.add_argument("--refguide-check", action="store_true", default=False, - help="Run refguide (doctest) check (do not run regular tests.)") + help="Run refguide (doctest) check (do not run regular" + " tests.)") parser.add_argument("--coverage", action="store_true", default=False, - help=("Report coverage of project code. HTML output goes " - "under build/coverage")) + help=("Report coverage of project code. HTML output " + "goes under build/coverage")) parser.add_argument("--lint", default=None, help="'' or 'uncommitted', passed to " "tools/linter.py [--branch BRANCH] " "[--uncommitted]") parser.add_argument("--durations", action="store", default=-1, type=int, - help=("Time N slowest tests, time all if 0, time none if < 0")) + help=("Time N slowest tests, time all if 0, time none " + "if < 0")) parser.add_argument("--gcov", action="store_true", default=False, - help=("Enable C code coverage via gcov (requires GCC). " - "gcov output goes to build/**/*.gc*")) + help=("Enable C code coverage via gcov (requires GCC)." + " gcov output goes to build/**/*.gc*")) parser.add_argument("--lcov-html", action="store_true", default=False, help=("Produce HTML for C code coverage information " "from a previous run with --gcov. " @@ -107,7 +110,8 @@ def main(argv): help="'fast', 'full', or something that could be " "passed to nosetests -A [default: fast]") parser.add_argument("--submodule", "-s", default=None, - help="Submodule whose tests to run (cluster, constants, ...)") + help="Submodule whose tests to run (cluster, constants" + ", ...)") parser.add_argument("--pythonpath", "-p", default=None, help="Paths to prepend to PYTHONPATH") parser.add_argument("--tests", "-t", action='append', @@ -125,13 +129,16 @@ def main(argv): parser.add_argument("--parallel", "-j", type=int, default=0, help="Number of parallel jobs during build") parser.add_argument("--warn-error", action="store_true", - help="Set -Werror to convert all compiler warnings to errors") + help="Set -Werror to convert all compiler warnings to " + "errors") parser.add_argument("--cpu-baseline", default=None, - help="Specify a list of enabled baseline CPU optimizations"), + help="Specify a list of enabled baseline CPU " + "optimizations"), parser.add_argument("--cpu-dispatch", default=None, help="Specify a list of dispatched CPU optimizations"), parser.add_argument("--disable-optimization", action="store_true", - help="Disable CPU optimized code(dispatch,simd,fast...)"), + help="Disable CPU optimized code (dispatch, simd, fast" + ", ...)"), parser.add_argument("--simd-test", default=None, help="Specify a list of CPU optimizations to be " "tested against NumPy SIMD interface"), @@ -146,7 +153,8 @@ def main(argv): "COMMIT. Note that you need to commit your " "changes first!")) parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER, - help="Arguments to pass to pytest, asv, mypy, Python or shell") + help="Arguments to pass to pytest, asv, mypy, Python " + "or shell") args = parser.parse_args(argv) if args.durations < 0: From 6cbe53eb3653162534e73112064c80f494788c4c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 19 Apr 2021 13:26:02 +0200 Subject: [PATCH 0942/1270] ENH: Define proper (unannotated) parameters for the class constructors --- numpy/__init__.pyi | 105 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 88 insertions(+), 17 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 826448b13333..a550d673c969 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -369,67 +369,138 @@ __git_version__: str # their annotations are properly implemented # # Placeholders for classes -# TODO: Remove `__getattr__` and update/replace `__new__` once -# the classes are stubbed out +# TODO: Remove `__getattr__` once the classes are stubbed out class DataSource: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __init__(self, destpath: Any = ...) -> None: ... def __getattr__(self, key: str) -> Any: ... class MachAr: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __init__( + self, + float_conv: Any = ..., + int_conv: Any = ..., + float_to_float: Any = ..., + float_to_str: Any = ..., + title: Any = ..., + ) -> None: ... def __getattr__(self, key: str) -> Any: ... class broadcast: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__(cls: Any, *args: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class busdaycalendar: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__(cls, weekmask: Any = ..., holidays: Any = ...) -> Any: ... def __getattr__(self, key: str) -> Any: ... class chararray(ndarray[_ShapeType, _DType_co]): - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__( + subtype, + shape: Any, + itemsize: Any = ..., + unicode: Any = ..., + buffer: Any = ..., + offset: Any = ..., + strides: Any = ..., + order: Any = ..., + ) -> Any: ... def __getattr__(self, key: str) -> Any: ... class finfo: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__(cls, dtype: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class format_parser: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __init__( + self, + formats: Any, + names: Any, + titles: Any, + aligned: Any = ..., + byteorder: Any = ..., + ) -> None: ... def __getattr__(self, key: str) -> Any: ... class iinfo: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __init__(self, int_type: Any) -> None: ... def __getattr__(self, key: str) -> Any: ... class matrix(ndarray[_ShapeType, _DType_co]): - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__( + subtype, + data: Any, + dtype: Any = ..., + copy: Any = ..., + ) -> Any: ... def __getattr__(self, key: str) -> Any: ... class memmap(ndarray[_ShapeType, _DType_co]): - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__( + subtype, + filename: Any, + dtype: Any = ..., + mode: Any = ..., + offset: Any = ..., + shape: Any = ..., + order: Any = ..., + ) -> Any: ... def __getattr__(self, key: str) -> Any: ... class nditer: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__( + cls, + op: Any, + flags: Any = ..., + op_flags: Any = ..., + op_dtypes: Any = ..., + order: Any = ..., + casting: Any = ..., + op_axes: Any = ..., + itershape: Any = ..., + buffersize: Any = ..., + ) -> Any: ... def __getattr__(self, key: str) -> Any: ... class poly1d: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __init__( + self, + c_or_r: Any, + r: Any = ..., + variable: Any = ..., + ) -> None: ... def __call__(self, val: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class recarray(ndarray[_ShapeType, _DType_co]): - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __new__( + subtype, + shape: Any, + dtype: Any = ..., + buf: Any = ..., + offset: Any = ..., + strides: Any = ..., + formats: Any = ..., + names: Any = ..., + titles: Any = ..., + byteorder: Any = ..., + aligned: Any = ..., + order: Any = ..., + ) -> Any: ... def __getattr__(self, key: str) -> Any: ... class record(void): - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... class vectorize: - def __new__(self, *args: Any, **kwargs: Any) -> Any: ... + def __init__( + self, + pyfunc, + otypes: Any = ..., + doc: Any = ..., + excluded: Any = ..., + cache: Any = ..., + signature: Any = ..., + ) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... From 6bc0c88d5dd5aac26a9e134dfed97bf0ad7150e8 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Apr 2021 11:28:07 +0000 Subject: [PATCH 0943/1270] MAINT: Bump cython from 0.29.22 to 0.29.23 Bumps [cython](https://github.com/cython/cython) from 0.29.22 to 0.29.23. - [Release notes](https://github.com/cython/cython/releases) - [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) - [Commits](https://github.com/cython/cython/compare/0.29.22...0.29.23) Signed-off-by: dependabot-preview[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 0ddf21810ace..3a4a89969818 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,4 +1,4 @@ -cython==0.29.22 +cython==0.29.23 wheel<0.36.3 setuptools<49.2.0 hypothesis==6.10.0 From 0bc562e24856145027d66c5722c2305e02c0a487 Mon Sep 17 00:00:00 2001 From: Ayush Verma Date: Mon, 19 Apr 2021 20:39:04 +0530 Subject: [PATCH 0944/1270] added note about import numpy error when building numpy in a venv --- doc/source/docs/howto_build_docs.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index 5db67b9b8231..2f19d3c98d97 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -48,6 +48,7 @@ with:: Now you are ready to generate the docs, so write:: + cd doc make html in the ``doc/`` directory. If all goes well, this will generate a @@ -55,6 +56,14 @@ in the ``doc/`` directory. If all goes well, this will generate a a message about ``installed numpy != current repo git version``, you must either override the check by setting ``GITVER`` or re-install NumPy. +Note that if you have built numpy into a virtual enviornment and getting +an error that says ``numpy not found, cannot build documentation without successful "import numpy"`` +you need to override the makefile variable at the commandline so instead of writing ``make html`` +write:: + + make PYTHON=python html + + Note that building the documentation on Windows is currently not actively supported, though it should be possible. (See Sphinx_ documentation for more information.) From b16429d188ae8427585ea5b6759579e94057d804 Mon Sep 17 00:00:00 2001 From: Christopher Dahlin Date: Mon, 19 Apr 2021 19:15:12 +0200 Subject: [PATCH 0945/1270] MAINT: make runtests.py argument list help strings end with space --- runtests.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/runtests.py b/runtests.py index 7216712e6970..ea16845e327f 100755 --- a/runtests.py +++ b/runtests.py @@ -76,8 +76,8 @@ def main(argv): parser.add_argument("--verbose", "-v", action="count", default=1, help="Add one verbosity level to pytest. Default is 0") parser.add_argument("--debug-info", action="store_true", - help=("Add --verbose-cfg to build_src to show compiler" - " configuration output while creating " + help=("Add --verbose-cfg to build_src to show " + "compiler configuration output while creating " "_numpyconfig.h and config.h")) parser.add_argument("--no-build", "-n", action="store_true", default=False, help="Do not build the project (use system installed " @@ -87,8 +87,8 @@ def main(argv): parser.add_argument("--doctests", action="store_true", default=False, help="Run doctests in module") parser.add_argument("--refguide-check", action="store_true", default=False, - help="Run refguide (doctest) check (do not run regular" - " tests.)") + help="Run refguide (doctest) check (do not run " + "regular tests.)") parser.add_argument("--coverage", action="store_true", default=False, help=("Report coverage of project code. HTML output " "goes under build/coverage")) @@ -100,8 +100,8 @@ def main(argv): help=("Time N slowest tests, time all if 0, time none " "if < 0")) parser.add_argument("--gcov", action="store_true", default=False, - help=("Enable C code coverage via gcov (requires GCC)." - " gcov output goes to build/**/*.gc*")) + help=("Enable C code coverage via gcov (requires " + "GCC). gcov output goes to build/**/*.gc*")) parser.add_argument("--lcov-html", action="store_true", default=False, help=("Produce HTML for C code coverage information " "from a previous run with --gcov. " @@ -110,8 +110,8 @@ def main(argv): help="'fast', 'full', or something that could be " "passed to nosetests -A [default: fast]") parser.add_argument("--submodule", "-s", default=None, - help="Submodule whose tests to run (cluster, constants" - ", ...)") + help="Submodule whose tests to run (cluster, " + "constants, ...)") parser.add_argument("--pythonpath", "-p", default=None, help="Paths to prepend to PYTHONPATH") parser.add_argument("--tests", "-t", action='append', @@ -137,8 +137,8 @@ def main(argv): parser.add_argument("--cpu-dispatch", default=None, help="Specify a list of dispatched CPU optimizations"), parser.add_argument("--disable-optimization", action="store_true", - help="Disable CPU optimized code (dispatch, simd, fast" - ", ...)"), + help="Disable CPU optimized code (dispatch, simd, " + "fast, ...)"), parser.add_argument("--simd-test", default=None, help="Specify a list of CPU optimizations to be " "tested against NumPy SIMD interface"), From a8fc5bddaff31bfda7da0420e7c7337ae20e9ee3 Mon Sep 17 00:00:00 2001 From: Ayush Verma Date: Tue, 20 Apr 2021 01:16:30 +0530 Subject: [PATCH 0946/1270] word wrapped around 80 chars and proper ellipses as per suggested --- doc/source/docs/howto_build_docs.rst | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index 2f19d3c98d97..fd1a26d1d88e 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -56,13 +56,10 @@ in the ``doc/`` directory. If all goes well, this will generate a a message about ``installed numpy != current repo git version``, you must either override the check by setting ``GITVER`` or re-install NumPy. -Note that if you have built numpy into a virtual enviornment and getting -an error that says ``numpy not found, cannot build documentation without successful "import numpy"`` -you need to override the makefile variable at the commandline so instead of writing ``make html`` -write:: +Note that if you have built numpy into a virtual enviornment and getting an error that says ``numpy not found, cannot build documentation without...`` you need to override the makefile variable at the commandline, so instead of writing ``make html`` write:: make PYTHON=python html - + Note that building the documentation on Windows is currently not actively supported, though it should be possible. (See Sphinx_ documentation From fb0526919a427ffc5d12637b962552b4c877f443 Mon Sep 17 00:00:00 2001 From: Ayush Verma Date: Tue, 20 Apr 2021 01:23:58 +0530 Subject: [PATCH 0947/1270] added 'PYTHON variable' --- doc/source/docs/howto_build_docs.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index fd1a26d1d88e..e017ef03ff4d 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -56,7 +56,7 @@ in the ``doc/`` directory. If all goes well, this will generate a a message about ``installed numpy != current repo git version``, you must either override the check by setting ``GITVER`` or re-install NumPy. -Note that if you have built numpy into a virtual enviornment and getting an error that says ``numpy not found, cannot build documentation without...`` you need to override the makefile variable at the commandline, so instead of writing ``make html`` write:: +Note that if you have built numpy into a virtual enviornment and getting an error that says ``numpy not found, cannot build documentation without...`` you need to override the makefile ``PYTHON`` variable at the commandline, so instead of writing ``make html`` write:: make PYTHON=python html From 10308afc38c04be94bfb99cc821d22c01118b761 Mon Sep 17 00:00:00 2001 From: Ayush Verma Date: Tue, 20 Apr 2021 02:15:07 +0530 Subject: [PATCH 0948/1270] line breaks maintaining <80 chars --- doc/source/docs/howto_build_docs.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index e017ef03ff4d..d54c52e8fd6c 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -56,7 +56,11 @@ in the ``doc/`` directory. If all goes well, this will generate a a message about ``installed numpy != current repo git version``, you must either override the check by setting ``GITVER`` or re-install NumPy. -Note that if you have built numpy into a virtual enviornment and getting an error that says ``numpy not found, cannot build documentation without...`` you need to override the makefile ``PYTHON`` variable at the commandline, so instead of writing ``make html`` write:: +Note that if you have built numpy into a virtual enviornment and +getting an error that says ``numpy not found, cannot build +documentation without...`` you need to override the makefile +``PYTHON`` variable at the commandline, so instead of writing ``make +html`` write:: make PYTHON=python html From 1928509787a031e151c4f69a5cec9533466e99b6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 19 Apr 2021 15:09:02 -0600 Subject: [PATCH 0949/1270] MAINT: Fix spelling and layout. --- doc/source/docs/howto_build_docs.rst | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index d54c52e8fd6c..38ea1338c2a1 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -56,11 +56,10 @@ in the ``doc/`` directory. If all goes well, this will generate a a message about ``installed numpy != current repo git version``, you must either override the check by setting ``GITVER`` or re-install NumPy. -Note that if you have built numpy into a virtual enviornment and -getting an error that says ``numpy not found, cannot build -documentation without...`` you need to override the makefile -``PYTHON`` variable at the commandline, so instead of writing ``make -html`` write:: +If you have built numpy into a virtual environment and get an error +that says ``numpy not found, cannot build documentation without...``, +you need to override the makefile ``PYTHON`` variable at the command +line, so instead of writing ``make html`` write:: make PYTHON=python html From 3e8b865690457abf8ec710e2fd867476432bea73 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 24 Feb 2021 00:47:52 +0100 Subject: [PATCH 0950/1270] STY: Renamed a variable `_DefaultType` -> `_T` --- numpy/core/numerictypes.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi index 238495fd39e3..c8b4e1c658c2 100644 --- a/numpy/core/numerictypes.pyi +++ b/numpy/core/numerictypes.pyi @@ -3,7 +3,7 @@ from typing import TypeVar, Optional, Type, Union, Tuple, Sequence, overload, An from numpy import generic, ndarray, dtype from numpy.typing import DTypeLike -_DefaultType = TypeVar("_DefaultType") +_T = TypeVar("_T") def maximum_sctype(t: DTypeLike) -> dtype: ... def issctype(rep: object) -> bool: ... @@ -13,8 +13,8 @@ def obj2sctype(rep: object) -> Optional[generic]: ... def obj2sctype(rep: object, default: None) -> Optional[generic]: ... @overload def obj2sctype( - rep: object, default: Type[_DefaultType] -) -> Union[generic, Type[_DefaultType]]: ... + rep: object, default: Type[_T] +) -> Union[generic, Type[_T]]: ... def issubclass_(arg1: object, arg2: Union[object, Tuple[object, ...]]) -> bool: ... def issubsctype( arg1: Union[ndarray, DTypeLike], arg2: Union[ndarray, DTypeLike] From 83c420f9d0a81e546b61af36392c4cbddcd727a6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 24 Feb 2021 00:49:52 +0100 Subject: [PATCH 0951/1270] ENH: Add annotations for 4 objects in `np.core.numerictypes` * `cast` * `nbytes` * `ScalarType` * `typecodes` --- numpy/__init__.pyi | 12 +++----- numpy/core/numerictypes.pyi | 56 ++++++++++++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2fa5b3d41eae..08050a524679 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -327,6 +327,10 @@ from numpy.core.numerictypes import ( issubdtype as issubdtype, sctype2char as sctype2char, find_common_type as find_common_type, + nbytes as nbytes, + cast as cast, + ScalarType as ScalarType, + typecodes as typecodes, ) from numpy.core.shape_base import ( @@ -504,14 +508,6 @@ class vectorize: def __call__(self, *args: Any, **kwargs: Any) -> Any: ... def __getattr__(self, key: str) -> Any: ... -# Placeholders for miscellaneous objects -# NOTE: `cast` and `nbytes` are in fact instances of a `dict` subclass that -# converts passed `DTypeLike` objects into the actual keys (`np.generic`) -ScalarType: Tuple[Type[Any], ...] -cast: Dict[DTypeLike, Callable[..., ndarray[Any, dtype[Any]]]] -nbytes: Dict[DTypeLike, int] -typecodes: Dict[str, str] - # Placeholders for Python-based functions def angle(z, deg=...): ... def append(arr, values, axis=...): ... diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi index c8b4e1c658c2..1c9c84e953bf 100644 --- a/numpy/core/numerictypes.pyi +++ b/numpy/core/numerictypes.pyi @@ -1,9 +1,55 @@ -from typing import TypeVar, Optional, Type, Union, Tuple, Sequence, overload, Any +import sys +from typing import ( + TypeVar, + Optional, + Type, + Union, + Tuple, + Sequence, + overload, + Any, + TypeVar, + Dict, + List, +) from numpy import generic, ndarray, dtype -from numpy.typing import DTypeLike + +from numpy.core._type_aliases import ( + sctypeDict as sctypeDict, + sctypes as sctypes, +) + +from numpy.typing import DTypeLike, ArrayLike + +if sys.version_info >= (3, 8): + from typing import Literal, Protocol, TypedDict +else: + from typing_extensions import Literal, Protocol, TypedDict _T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) + +class _CastFunc(Protocol): + def __call__( + self, x: ArrayLike, k: DTypeLike = ... + ) -> ndarray[Any, dtype[Any]]: ... + +class _TypeCodes(TypedDict): + Character: Literal['c'] + Integer: Literal['bhilqp'] + UnsignedInteger: Literal['BHILQP'] + Float: Literal['efdg'] + Complex: Literal['FDG'] + AllInteger: Literal['bBhHiIlLqQpP'] + AllFloat: Literal['efdgFDG'] + Datetime: Literal['Mm'] + All: Literal['?bhilqpBHILQPefdgFDGSUVOMm'] + +class _typedict(Dict[Type[generic], _T]): + def __getitem__(self, key: DTypeLike) -> _T: ... + +__all__: List[str] def maximum_sctype(t: DTypeLike) -> dtype: ... def issctype(rep: object) -> bool: ... @@ -25,5 +71,7 @@ def find_common_type( array_types: Sequence[DTypeLike], scalar_types: Sequence[DTypeLike] ) -> dtype: ... -# TODO: Add annotations for the following objects: -# nbytes, cast, ScalarType & typecodes +cast: _typedict[_CastFunc] +nbytes: _typedict[int] +typecodes: _TypeCodes +ScalarType: Tuple[type, ...] From 72c5ab87ade16cabcb21d7a9a9177fbff587a4f8 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 24 Feb 2021 00:51:19 +0100 Subject: [PATCH 0952/1270] DOC: Add a note regarding future work --- numpy/core/numerictypes.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi index 1c9c84e953bf..cf2cc1e8db75 100644 --- a/numpy/core/numerictypes.pyi +++ b/numpy/core/numerictypes.pyi @@ -51,6 +51,8 @@ class _typedict(Dict[Type[generic], _T]): __all__: List[str] +# TODO: Clean up the annotations for the 7 functions below + def maximum_sctype(t: DTypeLike) -> dtype: ... def issctype(rep: object) -> bool: ... @overload From a25f8b9f964095aebcd2b218586cdf39598c8cf6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 24 Feb 2021 15:37:19 +0100 Subject: [PATCH 0953/1270] TST: Update the `np.core.numerictypes` typing tests --- numpy/typing/tests/data/pass/numerictypes.py | 18 ++++++++++++++++++ numpy/typing/tests/data/reveal/numerictypes.py | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/numpy/typing/tests/data/pass/numerictypes.py b/numpy/typing/tests/data/pass/numerictypes.py index 4f205cabcb0d..5af0d171ca04 100644 --- a/numpy/typing/tests/data/pass/numerictypes.py +++ b/numpy/typing/tests/data/pass/numerictypes.py @@ -27,3 +27,21 @@ np.find_common_type((), (np.int64, np.float32, complex)) np.find_common_type([np.int64, np.float32], []) np.find_common_type([np.float32], [np.int64, np.float64]) + +np.cast[int] +np.cast["i8"] +np.cast[np.int64] + +np.nbytes[int] +np.nbytes["i8"] +np.nbytes[np.int64] + +np.ScalarType +np.ScalarType[0] +np.ScalarType[4] +np.ScalarType[9] +np.ScalarType[11] + +np.typecodes["Character"] +np.typecodes["Complex"] +np.typecodes["All"] diff --git a/numpy/typing/tests/data/reveal/numerictypes.py b/numpy/typing/tests/data/reveal/numerictypes.py index e026158cd14f..0f886b3fbff4 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.py +++ b/numpy/typing/tests/data/reveal/numerictypes.py @@ -16,3 +16,21 @@ reveal_type(np.sctype2char(list)) # E: str reveal_type(np.find_common_type([np.int64], [np.int64])) # E: numpy.dtype + +reveal_type(np.cast[int]) # E: _CastFunc +reveal_type(np.cast["i8"]) # E: _CastFunc +reveal_type(np.cast[np.int64]) # E: _CastFunc + +reveal_type(np.nbytes[int]) # E: int +reveal_type(np.nbytes["i8"]) # E: int +reveal_type(np.nbytes[np.int64]) # E: int + +reveal_type(np.ScalarType) # E: Tuple +reveal_type(np.ScalarType[0]) # E: Type[builtins.int] +reveal_type(np.ScalarType[4]) # E: Type[builtins.bool] +reveal_type(np.ScalarType[9]) # E: Type[{csingle}] +reveal_type(np.ScalarType[11]) # E: Type[{clongdouble}] + +reveal_type(np.typecodes["Character"]) # E: Literal['c'] +reveal_type(np.typecodes["Complex"]) # E: Literal['FDG'] +reveal_type(np.typecodes["All"]) # E: Literal['?bhilqpBHILQPefdgFDGSUVOMm'] From e4b10510507d0b4d22e8fa9589a6232889d40fac Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 20 Apr 2021 14:22:24 +0200 Subject: [PATCH 0954/1270] DEP: Remove python 2-specific code --- numpy/core/numerictypes.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 93af5c95d986..467e2ed23ec8 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -79,7 +79,6 @@ \\-> object_ (not used much) (kind=O) """ -import types as _types import numbers import warnings @@ -512,14 +511,7 @@ def sctype2char(sctype): for key in _concrete_types: cast[key] = lambda x, k=key: array(x, copy=False).astype(k) -try: - ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType, - _types.LongType, _types.BooleanType, - _types.StringType, _types.UnicodeType, _types.BufferType] -except AttributeError: - # Py3K - ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] - +ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] ScalarType.extend(_concrete_types) ScalarType = tuple(ScalarType) From 37015a2786115aa554bb9abfd7396a5a707bc931 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 20 Apr 2021 14:36:10 +0200 Subject: [PATCH 0955/1270] ENH: Sort the set of `generic` subclasses before appending it to `ScalarType` --- numpy/core/numerictypes.py | 9 ++++- numpy/core/numerictypes.pyi | 65 +++++++++++++++++++++++++++++++++++-- 2 files changed, 71 insertions(+), 3 deletions(-) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 467e2ed23ec8..12f424fd4167 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -511,8 +511,15 @@ def sctype2char(sctype): for key in _concrete_types: cast[key] = lambda x, k=key: array(x, copy=False).astype(k) + +def _scalar_type_key(typ): + """A ``key`` function for `sorted`.""" + dt = dtype(typ) + return (dt.kind.lower(), dt.itemsize) + + ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] -ScalarType.extend(_concrete_types) +ScalarType += sorted(_concrete_types, key=_scalar_type_key) ScalarType = tuple(ScalarType) diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi index cf2cc1e8db75..fd4aa3fdada3 100644 --- a/numpy/core/numerictypes.pyi +++ b/numpy/core/numerictypes.pyi @@ -13,7 +13,35 @@ from typing import ( List, ) -from numpy import generic, ndarray, dtype +from numpy import ( + ndarray, + dtype, + generic, + bool_, + ubyte, + ushort, + uintc, + uint, + ulonglong, + byte, + short, + intc, + int_, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + datetime64, + timedelta64, + object_, + str_, + bytes_, + void, +) from numpy.core._type_aliases import ( sctypeDict as sctypeDict, @@ -76,4 +104,37 @@ def find_common_type( cast: _typedict[_CastFunc] nbytes: _typedict[int] typecodes: _TypeCodes -ScalarType: Tuple[type, ...] +ScalarType: Tuple[ + Type[int], + Type[float], + Type[complex], + Type[int], + Type[bool], + Type[bytes], + Type[str], + Type[memoryview], + Type[bool_], + Type[csingle], + Type[cdouble], + Type[clongdouble], + Type[half], + Type[single], + Type[double], + Type[longdouble], + Type[byte], + Type[short], + Type[intc], + Type[int_], + Type[longlong], + Type[timedelta64], + Type[datetime64], + Type[object_], + Type[bytes_], + Type[str_], + Type[ubyte], + Type[ushort], + Type[uintc], + Type[uint], + Type[ulonglong], + Type[void], +] From 809734a9b7685b0127345f62b3ca6940a851acab Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Tue, 20 Apr 2021 15:17:03 +0100 Subject: [PATCH 0956/1270] MAINT: Remove incorrect inline Remove request for inlining that is incorrect where located xref #13816 --- numpy/core/src/multiarray/array_coercion.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 5be4ea2b270f..ef99ae479bcf 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -573,7 +573,7 @@ npy_new_coercion_cache( * @param current * @return next coercion cache object (or NULL) */ -NPY_NO_EXPORT NPY_INLINE coercion_cache_obj * +NPY_NO_EXPORT coercion_cache_obj * npy_unlink_coercion_cache(coercion_cache_obj *current) { coercion_cache_obj *next = current->next; @@ -588,7 +588,7 @@ npy_unlink_coercion_cache(coercion_cache_obj *current) return next; } -NPY_NO_EXPORT NPY_INLINE void +NPY_NO_EXPORT void npy_free_coercion_cache(coercion_cache_obj *next) { /* We only need to check from the last used cache pos */ while (next != NULL) { From bb2a1ad9161838212ac90af4f6d6af430bbab00d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 19 Apr 2021 19:16:58 -0500 Subject: [PATCH 0957/1270] BUG: Initialize the full nditer buffer in case of error This is necessary because in some rare cases (reductions), we may not actually use the full buffer. In that case, the cleanup-on-error code would have to grow smart enough to handle these cases. It seems much simpler to just always initialize the full buffers, even if we may not end up using them. Admittedly, the old logic might have skipped the buffer clearing (especially the full buffer) in a fair bit of cases, but since this is only relevant for `object` dtype, I assume this is fine. --- numpy/core/src/multiarray/nditer_api.c | 3 +++ numpy/core/src/multiarray/nditer_constr.c | 3 +++ numpy/core/tests/test_nditer.py | 19 +++++++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 81209651bbf1..063e30919e44 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -1760,6 +1760,9 @@ npyiter_allocate_buffers(NpyIter *iter, char **errmsg) } goto fail; } + if (PyDataType_FLAGCHK(op_dtype[iop], NPY_NEEDS_INIT)) { + memset(buffer, '\0', itemsize*buffersize); + } buffers[iop] = buffer; } } diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 982dca8495ad..2197fe798be5 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -594,6 +594,9 @@ NpyIter_Copy(NpyIter *iter) if (buffers[iop] == NULL) { out_of_memory = 1; } + if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { + memset(buffers[iop], '\0', itemsize*buffersize); + } } } diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index ddcc8f2834b1..c32822944599 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2920,6 +2920,25 @@ def __bool__(self): assert_raises(TypeError, np.logical_or.reduce, np.array([T(), T()], dtype='O')) +def test_object_iter_cleanup_reduce(): + # Similar as above, but a complex reduction case that was previously + # missed (see gh-18810)/ + # the following array is special in that it cananot be flattened: + arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2] + with pytest.raises(TypeError): + np.sum(arr) + +@pytest.mark.parametrize("arr", [ + np.ones((8000, 4, 2), dtype=object)[:, ::2, :], + np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :], + np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")]) +def test_object_iter_cleanup_large_reduce(arr): + # More complicated calls are possible for large arrays: + out = np.ones(8000, dtype=np.intp) + # force casting with `dtype=object` + res = np.sum(arr, axis=(1, 2), dtype=object, out=out) + assert_array_equal(res, np.full(8000, 4, dtype=object)) + def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to From b2926034b6056e42525ad702b380339940755124 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 19 Apr 2021 19:20:50 -0500 Subject: [PATCH 0958/1270] MAINT: Remove buffer-clearing from copy code Buffer must always either contain NULL or valid references (assuming the dtype supports references) in order to allow cleanup in case of errors. It is thus unnecessary to clear buffers before every copy, if they contain NULL, all is fine. If they contain non-NULL, we should also DECREF those references (so it would be incorrect as well). Buffers thus need the memset exactly once: directly upon allcoation (which we do now). After this any transfer from/to the buffer needs to ensure that the buffer is always in a good state. --- numpy/core/src/multiarray/nditer_api.c | 32 ++++++++++---------------- numpy/core/tests/test_nditer.py | 4 ++-- 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 063e30919e44..a1ca5bff51d4 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -2532,16 +2532,18 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) skip_transfer = 1; } - /* If the data type requires zero-inititialization */ - if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { - NPY_IT_DBG_PRINT("Iterator: Buffer requires init, " - "memsetting to 0\n"); - memset(ptrs[iop], 0, dtypes[iop]->elsize*op_transfersize); - /* Can't skip the transfer in this case */ - skip_transfer = 0; - } - - if (!skip_transfer) { + /* + * Copy data to the buffers if necessary. + * + * We always copy if the operand has references. In that case + * a "write" function must be in use that either copies or clears + * the buffer. + * This write from buffer call does not check for skip-transfer + * so we have to assume the buffer is cleared. For dtypes that + * do not have references, we can assume that the write function + * will leave the source (buffer) unmodified. + */ + if (!skip_transfer || PyDataType_REFCHK(dtypes[iop])) { NPY_IT_DBG_PRINT2("Iterator: Copying operand %d to " "buffer (%d items)\n", (int)iop, (int)op_transfersize); @@ -2557,16 +2559,6 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) } } } - else if (ptrs[iop] == buffers[iop]) { - /* If the data type requires zero-inititialization */ - if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { - NPY_IT_DBG_PRINT1("Iterator: Write-only buffer for " - "operand %d requires init, " - "memsetting to 0\n", (int)iop); - memset(ptrs[iop], 0, dtypes[iop]->elsize*transfersize); - } - } - } /* diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index c32822944599..b44343c5755c 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2922,8 +2922,8 @@ def __bool__(self): def test_object_iter_cleanup_reduce(): # Similar as above, but a complex reduction case that was previously - # missed (see gh-18810)/ - # the following array is special in that it cananot be flattened: + # missed (see gh-18810). + # The following array is special in that it cannot be flattened: arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2] with pytest.raises(TypeError): np.sum(arr) From c559bc8038ab684c68601757d2735c7be39befc4 Mon Sep 17 00:00:00 2001 From: Nirjas Jakilim Date: Wed, 21 Apr 2021 13:41:13 +0600 Subject: [PATCH 0959/1270] Minor fix to add reference link to numpy.fill_diagonal function --- doc/source/user/tutorial-svd.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/tutorial-svd.rst b/doc/source/user/tutorial-svd.rst index fd9e366e0c77..7b905e51eede 100644 --- a/doc/source/user/tutorial-svd.rst +++ b/doc/source/user/tutorial-svd.rst @@ -431,7 +431,7 @@ Now, to build our approximation, we first need to make sure that our singular values are ready for multiplication, so we build our ``Sigma`` matrix similarly to what we did before. The ``Sigma`` array must have dimensions ``(3, 768, 1024)``. In order to add the singular values to the diagonal of -``Sigma``, we will use the ``fill_diagonal`` function from NumPy, using each of +``Sigma``, we will use the `numpy.fill_diagonal` function from NumPy, using each of the 3 rows in ``s`` as the diagonal for each of the 3 matrices in ``Sigma``: :: From fe8da1b429f1436aeb4cba62aff8149875f0f722 Mon Sep 17 00:00:00 2001 From: Scian <65375075+hoony6134@users.noreply.github.com> Date: Wed, 21 Apr 2021 22:40:37 +0900 Subject: [PATCH 0960/1270] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9ed3e7742243..88c1151a0f89 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ comment on a relevant issue that is already open. Our preferred channels of communication are all public, but if you’d like to speak to us in private first, contact our community coordinators at numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for -an invite). +an invitation). We also have a biweekly community call, details of which are announced on the mailing list. You are very welcome to join. From 341316d5158477b06f877db60049b0995ab78128 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 22 Apr 2021 14:35:43 +0100 Subject: [PATCH 0961/1270] BUG: Prevent nan being used in percentile (gh-18831) Reject NaN as a percentile/quantile value. Previously NaNs could pass the range check `0 <= q <= 1`. closes #18830 --- numpy/lib/function_base.py | 5 ++--- numpy/lib/tests/test_function_base.py | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index af5a6e45cb14..0bb41c270c26 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3957,11 +3957,10 @@ def _quantile_is_valid(q): # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.ndim == 1 and q.size < 10: for i in range(q.size): - if q[i] < 0.0 or q[i] > 1.0: + if not (0.0 <= q[i] <= 1.0): return False else: - # faster than any() - if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0): + if not (np.all(0 <= q) and np.all(q <= 1)): return False return True diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 761ea83a3929..0b66ccf8c795 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2750,6 +2750,10 @@ def test_fraction(self): assert_equal(p, Fraction(7, 4)) assert_equal(type(p), Fraction) + p = np.percentile(x, [Fraction(50)]) + assert_equal(p, np.array([Fraction(7, 4)])) + assert_equal(type(p), np.ndarray) + def test_api(self): d = np.ones(5) np.percentile(d, 5, None, None, False) @@ -3144,6 +3148,16 @@ def test_nan_behavior(self): assert_equal(np.percentile( a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) + def test_nan_q(self): + # GH18830 + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], np.nan) + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], [np.nan]) + q = np.linspace(1.0, 99.0, 16) + q[0] = np.nan + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], q) class TestQuantile: # most of this is already tested by TestPercentile @@ -3180,6 +3194,14 @@ def test_fraction(self): assert_equal(q, Fraction(7, 4)) assert_equal(type(q), Fraction) + q = np.quantile(x, [Fraction(1, 2)]) + assert_equal(q, np.array([Fraction(7, 4)])) + assert_equal(type(q), np.ndarray) + + q = np.quantile(x, [[Fraction(1, 2)]]) + assert_equal(q, np.array([[Fraction(7, 4)]])) + assert_equal(type(q), np.ndarray) + # repeat with integral input but fractional quantile x = np.arange(8) assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) From 67cdf3d1de1947e3a51e2c5a56e5a898d713e588 Mon Sep 17 00:00:00 2001 From: Christina Hedges Date: Thu, 22 Apr 2021 09:18:19 -0700 Subject: [PATCH 0962/1270] Fix typo in random docs DOC Fix typo in random docs `Randomstate` -> `RandomState` --- doc/source/reference/random/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 69d597874a07..fc7743c64d0c 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -25,7 +25,7 @@ nep-0019-rng-policy.html>`_ for context on the updated random Numpy number routines. The legacy `RandomState` random number routines are still available, but limited to a single BitGenerator. See :ref:`new-or-different` for a complete list of improvements and differences from the legacy -``Randomstate``. +``RandomState``. For convenience and backward compatibility, a single `RandomState` instance's methods are imported into the numpy.random namespace, see From 57b483d3728fbf0f1a5c162a4ab0e657aab23669 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 Apr 2021 11:48:14 -0500 Subject: [PATCH 0963/1270] MAINT: Generalize and shorten the ufunc "trivially iterable" path This avoids the large macros designed for specific number of arguments and generalizes it to all ufuncs with a single output (Instead of only 1 and 2 operand input ufuncs.) That is not a big generalization, but the code actually shortens and while it is slightly more complex in umath itself, it avoids the fairly complex macros as well. The `==` fixes a case where the fast-path was previously not used even though it would be valid to use it. These are still used outside the ufunc machinery. --- .../core/src/common/lowlevel_strided_loops.h | 70 ---- numpy/core/src/umath/ufunc_object.c | 313 ++++++++---------- numpy/core/tests/test_mem_overlap.py | 2 +- 3 files changed, 146 insertions(+), 239 deletions(-) diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h index 1255e51ddf71..3df054b40727 100644 --- a/numpy/core/src/common/lowlevel_strided_loops.h +++ b/numpy/core/src/common/lowlevel_strided_loops.h @@ -647,25 +647,6 @@ npy_bswap8_unaligned(char * x) * // Create iterator, etc... * } * - * Here is example code for a pair of arrays: - * - * if (PyArray_TRIVIALLY_ITERABLE_PAIR(a1, a2)) { - * char *data1, *data2; - * npy_intp count, stride1, stride2; - * - * PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(a1, a2, count, - * data1, data2, stride1, stride2); - * - * while (count--) { - * // Use the data1 and data2 pointers - * - * data1 += stride1; - * data2 += stride2; - * } - * } - * else { - * // Create iterator, etc... - * } */ /* @@ -776,16 +757,6 @@ PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr PyArray_STRIDE(arr, 0) : \ PyArray_ITEMSIZE(arr))); -#define PyArray_TRIVIALLY_ITERABLE_PAIR(arr1, arr2, arr1_read, arr2_read) ( \ - PyArray_TRIVIALLY_ITERABLE(arr1) && \ - (PyArray_NDIM(arr2) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE_BASE(arr1, arr2) || \ - (PyArray_NDIM(arr1) == 0 && \ - PyArray_TRIVIALLY_ITERABLE(arr2) \ - ) \ - ) && \ - PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(arr1, arr2, arr1_read, arr2_read) \ - ) #define PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(arr1, arr2, \ count, \ data1, data2, \ @@ -799,45 +770,4 @@ PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr stride2 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2); \ } -#define PyArray_TRIVIALLY_ITERABLE_TRIPLE(arr1, arr2, arr3, arr1_read, arr2_read, arr3_read) ( \ - PyArray_TRIVIALLY_ITERABLE(arr1) && \ - ((PyArray_NDIM(arr2) == 0 && \ - (PyArray_NDIM(arr3) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE_BASE(arr1, arr3) \ - ) \ - ) || \ - (PyArray_EQUIVALENTLY_ITERABLE_BASE(arr1, arr2) && \ - (PyArray_NDIM(arr3) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE_BASE(arr1, arr3) \ - ) \ - ) || \ - (PyArray_NDIM(arr1) == 0 && \ - PyArray_TRIVIALLY_ITERABLE(arr2) && \ - (PyArray_NDIM(arr3) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE_BASE(arr2, arr3) \ - ) \ - ) \ - ) && \ - PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(arr1, arr2, arr1_read, arr2_read) && \ - PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(arr1, arr3, arr1_read, arr3_read) && \ - PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(arr2, arr3, arr2_read, arr3_read) \ - ) - -#define PyArray_PREPARE_TRIVIAL_TRIPLE_ITERATION(arr1, arr2, arr3, \ - count, \ - data1, data2, data3, \ - stride1, stride2, stride3) { \ - npy_intp size1 = PyArray_SIZE(arr1); \ - npy_intp size2 = PyArray_SIZE(arr2); \ - npy_intp size3 = PyArray_SIZE(arr3); \ - count = ((size1 > size2) || size1 == 0) ? size1 : size2; \ - count = ((size3 > count) || size3 == 0) ? size3 : count; \ - data1 = PyArray_BYTES(arr1); \ - data2 = PyArray_BYTES(arr2); \ - data3 = PyArray_BYTES(arr3); \ - stride1 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size1, arr1); \ - stride2 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2); \ - stride3 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size3, arr3); \ - } - #endif diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index f17dd1e61c1e..d29a7e01f4da 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -1033,65 +1033,6 @@ check_for_trivial_loop(PyUFuncObject *ufunc, return 1; } -static void -trivial_two_operand_loop(PyArrayObject **op, - PyUFuncGenericFunction innerloop, - void *innerloopdata) -{ - char *data[2]; - npy_intp count[2], stride[2]; - int needs_api; - NPY_BEGIN_THREADS_DEF; - - needs_api = PyDataType_REFCHK(PyArray_DESCR(op[0])) || - PyDataType_REFCHK(PyArray_DESCR(op[1])); - - PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(op[0], op[1], - count[0], - data[0], data[1], - stride[0], stride[1]); - count[1] = count[0]; - NPY_UF_DBG_PRINT1("two operand loop count %d\n", (int)count[0]); - - if (!needs_api) { - NPY_BEGIN_THREADS_THRESHOLDED(count[0]); - } - - innerloop(data, count, stride, innerloopdata); - - NPY_END_THREADS; -} - -static void -trivial_three_operand_loop(PyArrayObject **op, - PyUFuncGenericFunction innerloop, - void *innerloopdata) -{ - char *data[3]; - npy_intp count[3], stride[3]; - int needs_api; - NPY_BEGIN_THREADS_DEF; - - needs_api = PyDataType_REFCHK(PyArray_DESCR(op[0])) || - PyDataType_REFCHK(PyArray_DESCR(op[1])) || - PyDataType_REFCHK(PyArray_DESCR(op[2])); - - PyArray_PREPARE_TRIVIAL_TRIPLE_ITERATION(op[0], op[1], op[2], - count[0], - data[0], data[1], data[2], - stride[0], stride[1], stride[2]); - count[1] = count[0]; - count[2] = count[0]; - NPY_UF_DBG_PRINT1("three operand loop count %d\n", (int)count[0]); - - if (!needs_api) { - NPY_BEGIN_THREADS_THRESHOLDED(count[0]); - } - - innerloop(data, count, stride, innerloopdata); - - NPY_END_THREADS; -} /* * Calls the given __array_prepare__ function on the operand *op, @@ -1165,6 +1106,145 @@ prepare_ufunc_output(PyUFuncObject *ufunc, return 0; } + +/* + * Check whether a trivial loop is possible and call the innerloop if it is. + * A trivial loop is defined as one where a single strided inner-loop call + * is possible. + * + * This function only supports a single output (due to the overlap check). + * It always accepts 0-D arrays and will broadcast them. The function will + * cannot broadcast any other array (as it requires a single stride). + * The function accepts all 1-D arrays, and N-D arrays that are either all + * C- or all F-contiguous. + * + * Returns -2 if a trivial loop is not possible, 0 on success and -1 on error. + */ +static NPY_INLINE int +try_trivial_single_output_loop(PyUFuncObject *ufunc, + PyArrayObject *op[], PyArray_Descr *dtypes[], + NPY_ORDER order, PyObject *arr_prep[], ufunc_full_args full_args, + PyUFuncGenericFunction innerloop, void *innerloopdata) +{ + int nin = ufunc->nin; + int nop = nin + 1; + assert(ufunc->nout == 1); + + /* The order of all N-D contiguous operands, can be fixed by `order` */ + int operation_order = 0; + if (order == NPY_CORDER) { + operation_order = NPY_ARRAY_C_CONTIGUOUS; + } + else if (order == NPY_FORTRANORDER) { + operation_order = NPY_ARRAY_F_CONTIGUOUS; + } + + int operation_ndim = 0; + npy_intp *operation_shape = NULL; + npy_intp fixed_strides[NPY_MAXARGS]; + + for (int iop = 0; iop < nop; iop++) { + if (op[iop] == NULL) { + /* The out argument may be NULL (and only that one); fill later */ + assert(iop == nin); + continue; + } + + int op_ndim = PyArray_NDIM(op[iop]); + + /* Special case 0-D since we can handle broadcasting using a 0-stride */ + if (op_ndim == 0) { + fixed_strides[iop] = 0; + continue; + } + + /* First non 0-D op: fix dimensions, shape (order is fixed later) */ + if (operation_ndim == 0) { + operation_ndim = op_ndim; + operation_shape = PyArray_SHAPE(op[iop]); + } + else if (op_ndim != operation_ndim) { + return -2; /* dimension mismatch (except 0-d ops) */ + } + else if (!PyArray_CompareLists( + operation_shape, PyArray_DIMS(op[iop]), op_ndim)) { + return -2; /* shape mismatch */ + } + + if (op_ndim == 1) { + fixed_strides[iop] = PyArray_STRIDES(op[iop])[0]; + } + else { + fixed_strides[iop] = PyArray_ITEMSIZE(op[iop]); /* contiguous */ + + /* This op must match the operation order (and be contiguous) */ + int op_order = (PyArray_FLAGS(op[iop]) & + (NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS)); + if (op_order == 0) { + return -2; /* N-dimensional op must be contiguous */ + } + else if (operation_order == 0) { + operation_order = op_order; /* op fixes order */ + } + else if (operation_order != op_order) { + return -2; + } + } + } + + if (op[nin] == NULL) { + Py_INCREF(dtypes[nin]); + op[nin] = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, + dtypes[nin], operation_ndim, operation_shape, + NULL, NULL, operation_order==NPY_ARRAY_F_CONTIGUOUS, NULL); + if (op[nin] == NULL) { + return -1; + } + fixed_strides[nin] = dtypes[nin]->elsize; + } + else { + /* If any input overlaps with the output, we use the full path. */ + for (int iop = 0; iop < nin; iop++) { + if (!PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK( + op[iop], op[nin], + PyArray_TRIVIALLY_ITERABLE_OP_READ, + PyArray_TRIVIALLY_ITERABLE_OP_NOREAD)) { + return -2; + } + } + } + + /* Call the __prepare_array__ if necessary */ + if (prepare_ufunc_output(ufunc, &op[nin], + arr_prep[0], full_args, 0) < 0) { + return -1; + } + + /* + * We can use the trivial (single inner-loop call) optimization + * and `fixed_strides` holds the strides for that call. + */ + char *data[NPY_MAXARGS]; + npy_intp count = PyArray_MultiplyList(operation_shape, operation_ndim); + int needs_api = 0; + NPY_BEGIN_THREADS_DEF; + + for (int iop = 0; iop < nop; iop++) { + data[iop] = PyArray_BYTES(op[iop]); + needs_api |= PyDataType_REFCHK(dtypes[iop]); + } + + if (!needs_api) { + NPY_BEGIN_THREADS_THRESHOLDED(count); + } + + innerloop(data, &count, fixed_strides, innerloopdata); + + NPY_END_THREADS; + return 0; +} + + static int iterator_loop(PyUFuncObject *ufunc, PyArrayObject **op, @@ -1325,7 +1405,6 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc, ufunc_full_args full_args, npy_uint32 *op_flags) { - npy_intp nin = ufunc->nin, nout = ufunc->nout; PyUFuncGenericFunction innerloop; void *innerloopdata; int needs_api = 0; @@ -1336,113 +1415,12 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc, } /* First check for the trivial cases that don't need an iterator */ - if (trivial_loop_ok) { - if (nin == 1 && nout == 1) { - if (op[1] == NULL && - (order == NPY_ANYORDER || order == NPY_KEEPORDER) && - PyArray_TRIVIALLY_ITERABLE(op[0])) { - Py_INCREF(dtypes[1]); - op[1] = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtypes[1], - PyArray_NDIM(op[0]), - PyArray_DIMS(op[0]), - NULL, NULL, - PyArray_ISFORTRAN(op[0]) ? - NPY_ARRAY_F_CONTIGUOUS : 0, - NULL); - if (op[1] == NULL) { - return -1; - } - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(ufunc, &op[1], - arr_prep[0], full_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 1 input with allocated output\n"); - trivial_two_operand_loop(op, innerloop, innerloopdata); - - return 0; - } - else if (op[1] != NULL && - PyArray_NDIM(op[1]) >= PyArray_NDIM(op[0]) && - PyArray_TRIVIALLY_ITERABLE_PAIR(op[0], op[1], - PyArray_TRIVIALLY_ITERABLE_OP_READ, - PyArray_TRIVIALLY_ITERABLE_OP_NOREAD)) { - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(ufunc, &op[1], - arr_prep[0], full_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 1 input\n"); - trivial_two_operand_loop(op, innerloop, innerloopdata); - - return 0; - } - } - else if (nin == 2 && nout == 1) { - if (op[2] == NULL && - (order == NPY_ANYORDER || order == NPY_KEEPORDER) && - PyArray_TRIVIALLY_ITERABLE_PAIR(op[0], op[1], - PyArray_TRIVIALLY_ITERABLE_OP_READ, - PyArray_TRIVIALLY_ITERABLE_OP_READ)) { - PyArrayObject *tmp; - /* - * Have to choose the input with more dimensions to clone, as - * one of them could be a scalar. - */ - if (PyArray_NDIM(op[0]) >= PyArray_NDIM(op[1])) { - tmp = op[0]; - } - else { - tmp = op[1]; - } - Py_INCREF(dtypes[2]); - op[2] = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtypes[2], - PyArray_NDIM(tmp), - PyArray_DIMS(tmp), - NULL, NULL, - PyArray_ISFORTRAN(tmp) ? - NPY_ARRAY_F_CONTIGUOUS : 0, - NULL); - if (op[2] == NULL) { - return -1; - } - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(ufunc, &op[2], - arr_prep[0], full_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 2 input with allocated output\n"); - trivial_three_operand_loop(op, innerloop, innerloopdata); - - return 0; - } - else if (op[2] != NULL && - PyArray_NDIM(op[2]) >= PyArray_NDIM(op[0]) && - PyArray_NDIM(op[2]) >= PyArray_NDIM(op[1]) && - PyArray_TRIVIALLY_ITERABLE_TRIPLE(op[0], op[1], op[2], - PyArray_TRIVIALLY_ITERABLE_OP_READ, - PyArray_TRIVIALLY_ITERABLE_OP_READ, - PyArray_TRIVIALLY_ITERABLE_OP_NOREAD)) { - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(ufunc, &op[2], - arr_prep[0], full_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 2 input\n"); - trivial_three_operand_loop(op, innerloop, innerloopdata); - - return 0; - } + if (trivial_loop_ok && ufunc->nout == 1) { + int fast_path_result = try_trivial_single_output_loop(ufunc, + op, dtypes, order, arr_prep, full_args, + innerloop, innerloopdata); + if (fast_path_result != -2) { + return fast_path_result; } } @@ -1450,7 +1428,6 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc, * If no trivial loop matched, an iterator is required to * resolve broadcasting, etc */ - NPY_UF_DBG_PRINT("iterator loop\n"); if (iterator_loop(ufunc, op, dtypes, order, buffersize, arr_prep, full_args, diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py index 675613de4b03..ba73dad62c4c 100644 --- a/numpy/core/tests/test_mem_overlap.py +++ b/numpy/core/tests/test_mem_overlap.py @@ -792,7 +792,7 @@ def check(ufunc, a, ind, b=None): check(np.add, a, ind, a[25:75]) def test_unary_ufunc_1d_manual(self): - # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE + # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`) def check(a, b): a_orig = a.copy() From 2d9e75f3a1dbe21de6b53cd2996e45054d3b86c5 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 22 Apr 2021 21:35:53 +0200 Subject: [PATCH 0964/1270] ENH, SIMD: Add support for dispatching C++ sources Same usage as the C dispatch-able sources except files extensions should be `.dispatcher.cpp` or `.dispatch.cxx` rather than `.dispatch.c` --- numpy/distutils/ccompiler_opt.py | 22 ++++++++++++++------- numpy/distutils/command/build_clib.py | 28 +++++++++++++++++++++++---- numpy/distutils/command/build_ext.py | 28 +++++++++++++++++++++++---- 3 files changed, 63 insertions(+), 15 deletions(-) diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index e6c720399143..aea9835c78e2 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -543,14 +543,14 @@ class _Distutils: def __init__(self, ccompiler): self._ccompiler = ccompiler - def dist_compile(self, sources, flags, **kwargs): + def dist_compile(self, sources, flags, ccompiler=None, **kwargs): """Wrap CCompiler.compile()""" assert(isinstance(sources, list)) assert(isinstance(flags, list)) flags = kwargs.pop("extra_postargs", []) + flags - return self._ccompiler.compile( - sources, extra_postargs=flags, **kwargs - ) + if not ccompiler: + ccompiler = self._ccompiler + return ccompiler.compile(sources, extra_postargs=flags, **kwargs) def dist_test(self, source, flags): """Return True if 'CCompiler.compile()' able to compile @@ -2143,7 +2143,7 @@ def cpu_dispatch_names(self): """ return self.parse_dispatch_names - def try_dispatch(self, sources, src_dir=None, **kwargs): + def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): """ Compile one or more dispatch-able sources and generates object files, also generates abstract C config headers and macros that @@ -2166,6 +2166,11 @@ def try_dispatch(self, sources, src_dir=None, **kwargs): Path of parent directory for the generated headers and wrapped sources. If None(default) the files will generated in-place. + ccompiler: CCompiler + Distutils `CCompiler` instance to be used for compilation. + If None (default), the provided instance during the initialization + will be used instead. + **kwargs : any Arguments to pass on to the `CCompiler.compile()` @@ -2220,7 +2225,9 @@ def try_dispatch(self, sources, src_dir=None, **kwargs): # among them. objects = [] for flags, srcs in to_compile.items(): - objects += self.dist_compile(srcs, list(flags), **kwargs) + objects += self.dist_compile( + srcs, list(flags), ccompiler=ccompiler, **kwargs + ) return objects def generate_dispatch_header(self, header_path): @@ -2454,7 +2461,8 @@ def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): return wrap_path def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): - config_path = os.path.basename(dispatch_src).replace(".c", ".h") + config_path = os.path.basename(dispatch_src) + config_path = os.path.splitext(config_path)[0] + '.h' config_path = os.path.join(output_dir, config_path) # check if targets didn't change to avoid recompiling cache_hash = self.cache_hash(targets, has_baseline) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 1b3004c2f099..a4f49b00eaa2 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -271,6 +271,7 @@ def build_a_library(self, build_info, lib_name, libraries): # filtering C dispatch-table sources when optimization is not disabled, # otherwise treated as normal sources. copt_c_sources = [] + copt_cxx_sources = [] copt_baseline_flags = [] copt_macros = [] if not self.disable_optimization: @@ -280,15 +281,34 @@ def build_a_library(self, build_info, lib_name, libraries): include_dirs.append(dispatch_hpath) copt_build_src = None if self.inplace else bsrc_dir - copt_c_sources = [ - c_sources.pop(c_sources.index(src)) - for src in c_sources[:] if src.endswith(".dispatch.c") - ] + for _srcs, _dst, _ext in ( + ((c_sources,), copt_c_sources, ('.dispatch.c',)), + ((c_sources, cxx_sources), copt_cxx_sources, + ('.dispatch.cpp', '.dispatch.cxx')) + ): + for _src in _srcs: + _dst += [ + _src.pop(_src.index(s)) + for s in _src[:] if s.endswith(_ext) + ] copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() else: copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) objects = [] + if copt_cxx_sources: + log.info("compiling C++ dispatch-able sources") + objects += self.compiler_opt.try_dispatch( + copt_c_sources, + output_dir=self.build_temp, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + ccompiler=cxx_compiler + ) + if copt_c_sources: log.info("compiling C dispatch-able sources") objects += self.compiler_opt.try_dispatch(copt_c_sources, diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 99c6be873683..91cb0d897021 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -418,6 +418,7 @@ def build_extension(self, ext): # filtering C dispatch-table sources when optimization is not disabled, # otherwise treated as normal sources. copt_c_sources = [] + copt_cxx_sources = [] copt_baseline_flags = [] copt_macros = [] if not self.disable_optimization: @@ -427,15 +428,34 @@ def build_extension(self, ext): include_dirs.append(dispatch_hpath) copt_build_src = None if self.inplace else bsrc_dir - copt_c_sources = [ - c_sources.pop(c_sources.index(src)) - for src in c_sources[:] if src.endswith(".dispatch.c") - ] + for _srcs, _dst, _ext in ( + ((c_sources,), copt_c_sources, ('.dispatch.c',)), + ((c_sources, cxx_sources), copt_cxx_sources, + ('.dispatch.cpp', '.dispatch.cxx')) + ): + for _src in _srcs: + _dst += [ + _src.pop(_src.index(s)) + for s in _src[:] if s.endswith(_ext) + ] copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() else: copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) c_objects = [] + if copt_cxx_sources: + log.info("compiling C++ dispatch-able sources") + c_objects += self.compiler_opt.try_dispatch( + copt_cxx_sources, + output_dir=output_dir, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args, + ccompiler=cxx_compiler, + **kws + ) if copt_c_sources: log.info("compiling C dispatch-able sources") c_objects += self.compiler_opt.try_dispatch(copt_c_sources, From ec912112407ece5f6269040ffde0a5cffc840e12 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 9 Nov 2020 03:47:09 +0000 Subject: [PATCH 0965/1270] MAINT, Benchmark: print the supported CPU features during the run of performance tests --- benchmarks/benchmarks/__init__.py | 52 +++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index e4193cf0582f..50dd0898c86d 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -1 +1,53 @@ from . import common +import sys +import os + +def show_cpu_features(): + from numpy.lib.utils import _opt_info + info = _opt_info() + info = "NumPy CPU features: " + (info if info else 'nothing enabled') + # ASV wrapping stdout & stderr, so we assume having a tty here + if 'SHELL' in os.environ and sys.platform != 'win32': + # to avoid the red color that imposed by ASV + print(f"\033[33m{info}\033[0m") + else: + print(info) + +def dirty_lock(lock_name, lock_on_count=1): + # this lock occurred before each round to avoid duplicate printing + if not hasattr(os, "getppid"): + return False + ppid = os.getppid() + if not ppid or ppid == os.getpid(): + # not sure if this gonna happen, but ASV run each round in + # a separate process so the lock should be based on the parent + # process id only + return False + lock_path = os.path.abspath(os.path.join( + os.path.dirname(__file__), "..", "env", lock_name) + ) + # ASV load the 'benchmark_dir' to discovering the available benchmarks + # the issue here is ASV doesn't capture any strings from stdout or stderr + # during this stage so we escape it and lock on the second increment + try: + with open(lock_path, 'a+') as f: + f.seek(0) + count, _ppid = (f.read().split() + [0, 0])[:2] + count, _ppid = int(count), int(_ppid) + if _ppid == ppid: + if count >= lock_on_count: + return True + count += 1 + else: + count = 0 + f.seek(0) + f.truncate() + f.write(f"{str(count)} {str(ppid)}") + except IOError: + pass + return False + +# FIXME: there's no official way to provide extra information to the test log +if not dirty_lock("print_cpu_features.lock"): + show_cpu_features() + From 472b152fdaad523b451410165f126b473f6de0df Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 10 Nov 2020 16:16:50 +0000 Subject: [PATCH 0966/1270] ENH: add new function `_opt_info()` to utils provides the optimization info of NumPy build --- benchmarks/benchmarks/__init__.py | 2 +- numpy/_pytesttester.py | 16 ++--------- numpy/lib/utils.py | 44 ++++++++++++++++++++++++------- 3 files changed, 38 insertions(+), 24 deletions(-) diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index 50dd0898c86d..7b9f1d3e688d 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -47,7 +47,7 @@ def dirty_lock(lock_name, lock_on_count=1): pass return False + # FIXME: there's no official way to provide extra information to the test log if not dirty_lock("print_cpu_features.lock"): show_cpu_features() - diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 813e069a4702..acfaa1ca54a1 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -35,25 +35,13 @@ def _show_numpy_info(): - from numpy.core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ - ) import numpy as np print("NumPy version %s" % np.__version__) relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous print("NumPy relaxed strides checking option:", relaxed_strides) - - if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: - enabled_features = "nothing enabled" - else: - enabled_features = ' '.join(__cpu_baseline__) - for feature in __cpu_dispatch__: - if __cpu_features__[feature]: - enabled_features += " %s*" % feature - else: - enabled_features += " %s?" % feature - print("NumPy CPU features:", enabled_features) + info = np.lib.utils._opt_info() + print("NumPy CPU features: ", (info if info else 'nothing enabled')) diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 91e7cb8a3531..12a7cacdce87 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -197,20 +197,20 @@ def deprecate(*args, **kwargs): def deprecate_with_doc(msg): """ Deprecates a function and includes the deprecation in its docstring. - - This function is used as a decorator. It returns an object that can be - used to issue a DeprecationWarning, by passing the to-be decorated - function as argument, this adds warning to the to-be decorated function's + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's docstring and returns the new function object. - + See Also -------- - deprecate : Decorate a function such that it issues a `DeprecationWarning` - + deprecate : Decorate a function such that it issues a `DeprecationWarning` + Parameters ---------- msg : str - Additional explanation of the deprecation. Displayed in the + Additional explanation of the deprecation. Displayed in the docstring after the warning. Returns @@ -218,7 +218,7 @@ def deprecate_with_doc(msg): obj : object """ - return _Deprecate(message=msg) + return _Deprecate(message=msg) #-------------------------------------------- @@ -1042,4 +1042,30 @@ def _median_nancheck(data, result, axis, out): result[n] = np.nan return result +def _opt_info(): + """ + Returns a string contains the supported CPU features by the current build. + + The string format can be explained as follows: + - dispatched features that are supported by the running machine + end with `*`. + - dispatched features that are "not" supported by the running machine + end with `?`. + - remained features are representing the baseline. + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + + if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: + return '' + + enabled_features = ' '.join(__cpu_baseline__) + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + enabled_features += f" {feature}*" + else: + enabled_features += f" {feature}?" + + return enabled_features #----------------------------------------------------------------------------- From d028598dfeaba4919cf21eeabe07747f58da4e0d Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Fri, 23 Apr 2021 11:31:07 +0100 Subject: [PATCH 0967/1270] DOC: Add Gitpod development documentation --- doc/source/dev/development_gitpod.rst | 168 ++++++++++++++++++ doc/source/dev/gitpod-imgs/NumPy-github.png | Bin 0 -> 16266 bytes .../dev/gitpod-imgs/NumPy-gitpod-branches.png | Bin 0 -> 138394 bytes doc/source/dev/gitpod-imgs/github-gitpod.png | Bin 0 -> 22137 bytes .../gitpod-edit-permissions-gh.png | Bin 0 -> 23308 bytes .../dev/gitpod-imgs/gitpod-workspace.png | Bin 0 -> 86700 bytes .../dev/gitpod-imgs/installing-gitpod-io.png | Bin 0 -> 33186 bytes doc/source/dev/gitpod-imgs/rst-rendering.png | Bin 0 -> 228437 bytes doc/source/dev/gitpod-imgs/vscode-rst.png | Bin 0 -> 16443 bytes .../dev/gitpod-imgs/vscode-statusbar.png | Bin 0 -> 4492 bytes doc/source/dev/index.rst | 2 + 11 files changed, 170 insertions(+) create mode 100644 doc/source/dev/development_gitpod.rst create mode 100644 doc/source/dev/gitpod-imgs/NumPy-github.png create mode 100644 doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png create mode 100644 doc/source/dev/gitpod-imgs/github-gitpod.png create mode 100644 doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.png create mode 100644 doc/source/dev/gitpod-imgs/gitpod-workspace.png create mode 100644 doc/source/dev/gitpod-imgs/installing-gitpod-io.png create mode 100644 doc/source/dev/gitpod-imgs/rst-rendering.png create mode 100644 doc/source/dev/gitpod-imgs/vscode-rst.png create mode 100644 doc/source/dev/gitpod-imgs/vscode-statusbar.png diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst new file mode 100644 index 000000000000..21b3b5de24cd --- /dev/null +++ b/doc/source/dev/development_gitpod.rst @@ -0,0 +1,168 @@ +.. _development-gitpod: + + +Using Gitpod for NumPy development +======================================================= + +This section of the documentation will guide you through: + +* using GitPod for your NumPy development environment +* creating a personal fork of the NumPy repository on GitHub +* a quick tour of Gitpod and VSCode +* working on the NumPy documentation in Gitpod + +Gitpod +------- + +`Gitpod`_ is an open-source platform for automated and ready-to-code development environments. It enables developers to describe their dev environment as code and start instant and fresh development environments for each new task directly from your browser. This reduces the need to install local development environments and deal with incompatible dependencies. + +Gitpod GitHub integration +-------------------------- + +To be able to use Gitpod, you will need tohave the Gitpod app installed on your GitHub account, so if +you do not have an account yet, you will need to create one first. + +Head over to the `Gitpod`_ website and click on the **Continue with GitHub** button. You will be redirected to the GitHub authentication page. +You will then be asked to install the `Gitpod GitHub app `_. + +Make sure to select **All repositories** access option to avoid issues with permissions later on. Click on the green **Install** button + +.. image:: ./gitpod-imgs/installing-gitpod-io.png + :alt: Gitpod repository access and installation screenshot + +This will install the necessary hooks for the integration. + +Forking the NumPy repository +----------------------------- + +The best way to work on NumPy as a contributor is by making a fork of the repository first. + +#. Browse to the `NumPy repository on GitHub`_ and `create your own fork`_. + +#. Browse to your fork. Your fork will have a URL like https://github.com/melissawm/NumPy, except with your GitHub username in place of "melissawm". + +Starting Gitpod +---------------- +Once you have authenticated to Gitpod through GitHub, you can install the `Gitpod browser extension `_ which will add a **Gitpod** button next to the **Code** button in the repository: + +.. image:: ./gitpod-imgs/NumPy-github.png + :alt: NumPy repository with Gitpod button screenshot + +#. If you install the extension - you can click the **Gitpod** button to start a new workspace. +#. Alternatively, if you do not want to install the browser extension you can visit https://gitpod.io/#https://github.com/USERNAME/NumPy replacing ``USERNAME`` with your GitHub username. + +#. In both cases, this will open a new tab on your web browser and start building your development environment. Please note this can take a few minutes. + +#. Once the build is complete, you will be directed to your workspace, including VSCode and all the dependencies you need to work on NumPy. The first time you start your workspace, you will notice that there might be some actions running. This will ensure that you have a development version of NumPy installed and that the docs are being pre-built for you. + +#. Once the build is complete, you can test the build by entering:: + + python runtests.py -v + +``runtests.py`` is another script in the NumPy root directory. It runs a suite of tests that make sure NumPy is working as it should, and ``-v`` activates the ``--verbose`` option to show all the test output. + +Quick workspace tour +--------------------- +Gitpod uses VSCode as the editor. If you have not used this editor before, you can check the Getting started `VSCode docs`_ to familiarise yourself with it. + +Your workspace will look similar to the image below: + +.. image:: ./gitpod-imgs/gitpod-workspace.png + :alt: Gitpod workspace screenshot + +.. note:: By default VSCode initialises with a light theme, you can change to a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. + +We have marked some important sections in the editor: + +#. Your current Python interpreter - by default, this is ``numpy-dev`` and should be displayed in the status bar and on your terminal. You do not need to activate the conda environment as this will always be activated for you. +#. Your current branch is always displayed in the status bar. You can also use this button to change or create branches. +#. GitHub Pull Requests extension - you can use this to work with Pull Requests from your workspace. +#. Marketplace extensions - we have added some essential extensions to the NumPy Gitpod. Still, you can also install other extensions or syntax highlighting themes for your user, and these will be preserved for you. +#. Your workspace directory - by default is ``/workspace/numpy`` **do not change this** as this is the only directory preserved in Gitpod. + +We have also pre-installed a few tools and VSCode extensions to help with the development experience: + +* `GitHub CLI `_ +* `VSCode rst extension `_ +* `VSCode Live server extension `_ +* `VSCode Gitlens extension `_ +* `VSCode autodocstrings extension `_ +* `VSCode Git Graph extension `_ + +Development workflow +----------------------- +The :ref:`development-workflow` section of this documentation contains information regarding the NumPy development workflow. Make sure to check this before working on your contributions. + +When using Gitpod git is pre configured for you: + +#. You do not need to configure your git username, and email as this should be done for you as you authenticated through GitHub. You can check the git configuration with the command ``git config --list`` in your terminal. +#. As you started your workspace from your own NumPy fork, you will by default have both "upstream "and "origin "added as remotes. You can verify this by typing ``git remote`` on your terminal or by clicking on the **branch name** on the status bar (see image below). + +.. image:: ./gitpod-imgs/NumPy-gitpod-branches.png + :alt: Gitpod workspace branches plugin screenshot + +Rendering the NumPy documentation +---------------------------------- +You can find the detailed documentation on how rendering the documentation with Sphinx works in the :ref:`howto-build-docs` section. + +The documentation is pre-built during your workspace initialization. So once this task is completed, you have two main options to render the documentation in Gitpod. + +Option 1: Using Liveserve +*************************** + +#. View the documentation in ``NumPy/doc/build/html``. You can start with "index.html "and browse, or you can jump straight to the file you're interested in. +#. To see the rendered version of a page, you can right-click on the ``.html`` file and click on **Open with Live Serve**. Alternatively, you can open the file in the editor and click on the **Go live** button on the status bar. + + .. image:: ./gitpod-imgs/vscode-statusbar.png + :alt: Gitpod workspace VSCode start live serve screenshot + +#. A simple browser will open to the right-hand side of the editor. We recommend closing it and click on the **Open in browser** button in the pop-up. +#. To stop the server click on the **Port: 5500** button on the status bar. + +Option 2: Using the rst extension +*********************************** + +A quick and easy way to see live changes in a ``.rst`` file as you work on it uses the rst extension with docutils. + +.. note:: This will generate a simple live preview of the document without the ``html`` theme, and some backlinks might not be added correctly. But it is an easy and lightweight way to get instant feedback on your work. + +#. Open any of the source documentation files located in ``doc/source`` in the editor. +#. Open VSCode Command Palette with :kbd:`Cmd-Shift-P` in Mac or :kbd:`Ctrl-Shift-P` in Linux and Windows. Start typing "restructured" and choose either "Open preview" or "Open preview to the Side". + + .. image:: ./gitpod-imgs/vscode-rst.png + :alt: Gitpod workspace VSCode open rst screenshot + +#. As you work on the document, you will see a live rendering of it on the editor. + + .. image:: ./gitpod-imgs/rst-rendering.png + :alt: Gitpod workspace VSCode rst rendering screenshot + +If you want to see the final output with the ``html`` theme you will need to rebuild the docs with ``make html`` and use Live Serve as described in option 1. + +FAQ's +----- + +#. How long is my Gitpod workspace kept for? + Your stopped workspace will be kept for 14 days and deleted afterwards if you do not use them. + +#. Can I come back to a previous workspace? + Yes, let's say you stepped away for a while and you want to carry on working on your NumPy contributions. You need to visit https://gitpod.io/workspaces and click on the workspace you want to spin up again. All your changes will be there as you last left them. + +#. Can I install additional VSCode extensions? + Absolutely! Any extensions you installed will be installed in your own workspace and preserved. + +#. I registered on Gitpod but I still cannot see a **Gitpod** button in my repositories + Head to https://gitpod.io/integrations and make sure you are logged in. Hover over GitHub and click on the three buttons that appear on the right. Click on edit permissions and make sure you have ``user:email``, ``read:user``, and ``public_repo`` checked. + Click on **Update Permissions** and confirm the changes in the GitHub application page. + + .. image:: ./gitpod-imgs/gitpod-edit-permissions-gh.png + :alt: Gitpod integrations - edit GH permissions screenshot + +#. How long does my workspace stay active if I'm not using it? + If you keep your workspace open in a browser tab but don't interact with it, it will shut down after 30 minutes. If you close the browser tab, it will shut down after 3 minutes. + +.. _Gitpod: https://www.gitpod.io/ +.. _NumPy repository on GitHub: https://github.com/NumPy/NumPy +.. _create your own fork: https://help.github.com/en/articles/fork-a-repo +.. _VSCode docs: https://code.visualstudio.com/docs/getstarted/tips-and-tricks + diff --git a/doc/source/dev/gitpod-imgs/NumPy-github.png b/doc/source/dev/gitpod-imgs/NumPy-github.png new file mode 100644 index 0000000000000000000000000000000000000000..63326d12e0294f69ea34d6337c418af38182d207 GIT binary patch literal 16266 zcmbulWmp_duqccLcXvW?Slm4X3oOA2i@RI!;O=gVLxKih+}+)SyA#}$7oV+3TE$;5oO}u561J+1=GAS zIi=9ccQE0|l&NG9i>I-VM*GXz^182o@Ajvfg%+(mHW?}S4Q0$%E2u+;E|Wp6S>=k~ zVNX6x>~+e)`OhUzILc8y@^*!%rj~Im!BqE-j3}S+c}=fsT97d?y=yO4DNlVdGjn}e zFFHxJ0E7DUnigFL_FtE$*>SO2sq_R`PcCW$_i@Y)Nyxn?D^w9F12I1##>J*+tFQgI z#SBkdZ0gBNGz~M!7Rx<3o;MmCs;^yh7+&k@CCh244+r1{_>{`?BR%gm|DDOWbf6v!F#eL^Z6=m z2FeIVAnITX+X3nT=ImBG4P&xJCtvDK0)t+n%|iwY`W1+48CLTqNi z!E?4;`Q{dqTP=V7wa_NXTm~a^u{oqLa+^U#V=mRBA*JCJ^8P`%ajvGSSb%=jLuWNJ zqWTV8)72Cwc;Uk(a!iLa6>L3t>&6j&NC@S(HZTPsOxFG4D&vH79L?ky&P4KEQto1S z(1IyC`RrHD?nY<&Wjim=wON9)^eI4p5bl}kmSwcus^Q$*TkyC~`p#O}y9_PGv0>jw zFHXq_t07CvN=!sv{a*`W6}+kcbFLXfXCk)w_gH8NLeGfmZx86CSLfrNlPPR&?#x<3 zG~h9UVA}SU6+E~|(M*inFCTNhYbvgOye(TPpLE$JW1+q4xUgQVy^fji_Q?*ch><&w zX)m9iogP#>J#fyft9~>L@@nZ9Z**E>9`{y5*yLreZBA4ZPnY&XcN40NUl;VyE?h~u z2xt6C%`jRl^4Y693Yy(w7XP4Ggw@>7*LnJRZW;AaradJ?wp$EW z6fc#Ku#;cO{uS}b-7-zIR%WDYyKB#lCkl&@rrf_D8_E_3iZ7?8B*Fze z^_w~*iHQX#UGbAzX5p3Oo(*Pt+?bf|l}2kydz|`-cXm^Qzs=!;0`y59W{Uw=njTm9qCo zXPf40xf~(UfKR@+fz~~=sKM}!(%C&G7YJL*tsZxpK|Vdq&vikmX6WVxLD=B)oQ0e`gW;VTWdbT2!q_y@s=qn~Z%)}UWG~ap~#9z`brj+7th1MpY;B?_7AV~{#2BUtuAzc&s{>c>&fb$nbhj< z2Nkbp1cz4k@>ydk!mO6Z?z=~`cJI6Ibp0D>8mAL$uJ(#R@F#4>MwD0_*38&I9-IfL zVYi1m8HlXv4Q3N))3hd2T%8H=!pM9p+iw`J4y5)M42O&R-1#-QOY2|gxP9lVe{rKTpmL=(fav>$lm0v3PPC@_d zf`}AhG$aftuZ{N>3Z5oV-Gro;P=btDpQboYG00PG#o=6 z`(l8!7BV=)HiEa95?}0UT#rn%3r0VFO{g=Guxq|t4JG%2*`u~KOpQxjq0NWfIv);Z z|9o@vO?C1?#v;2SP+H@=FcasM(E;fPga{Kw9VUEBjsqmQTY-{*Oz$6|E=J|F)RiOm zz^_5rKvv%CSz)8fD`YkG9$0lbK)~L4OdkI4cgMIv2+3wz=n2>E;qj~xUC(>?hGK(pYm&2bepjGjrpPpYFz@tXoxKoVk9W&qCeyW4g7pJJj8@# zJ72TVb8&P}7%|<4SJsy#14y-4lmW-btCP6wn{&m)D^JM*mMmj-TFE?OQ?26-jA-lM z*84XUJ;3pnyrC|0C$FUTb+zTei;;ZrFlBq7i_ur%nlO{Bm}cApkW0+aHM@6}{GH(- zrIZNsP6PfnNV7?d!-6#m1DoKVrI@1@3wmIOv=vpc3snhzHV-kJO{;xGC%<92F8TU1 z9OveQ!a__H=hg^Fom2_Rc~?P0jV0uL=tN%$=a7C>Uh^;?KgCg}T~*8RP5z{vtC!EE0Rlwo5T>#C%3MC6gFZxgp|uHXzi zFrx)8yMI~&+N|7a0s)V94U^m_8fkbn8ws+S5fZjT^?h}G9@PItPl<-lfYy_oWYR?@HF3=(VEk_t-BWt1? z>nUBSalV_E*@(!ui`l;W`bT%5RP4Ts0Jz=<^cZSQ-ewF#^}JY~yVS)=Ys+Zdkj)+7I?vVLz3y(SFvUsHVWqy|T= zOtI8B?ep|ALc>a6DG3@)OkkTbriCNrMDR>BWt0* zQUHxM;`3|#&gq%3TvX%V)xM{x%{ZT~GaR&4BCV1z*gkoV!9H)?}g*F|0GihM>BC2wdh8WB1B2Q{6bDrdx(^ni^V z0+O)7tuu*#GeArmYOsqchK4MJIoTjSGd(1<^{n?INGcW-?7ygfLS-;NCtVOojN^Ka zd@!_AGB@qrhD8nj(lwiGwo#FjJAF ze=_->*!@HL599yF`)4dIy~4Sm9_w5BeW9(%S06?TFjjB=Xd`>;d?TxJ-P!@%INwT(-{Omw|row}? z^~T^7n?fF%ygjN9kyhgVQQO{D^zHYInPBxzxT6wc&D*1`XK5hg9}Psh5uc!#bhK?f@b7igaJD@KXXnw@X?abI{AIvh^hp2fR!Lt@-7Qe|H|6hBMg#-t0r zL$Y|H`6YPcSY)g5XMe7Ve|lv3(^|*b#UgrRhL*Ox{5Ux&S>bUs;gFeI@WA~ia_u5T z8i}D{FfKzf@2a2(e`aPn-El@s*JLFU27F}`Ej=+I5lMQ6{h_wz%Ib=!Z=E@p@t=7M z3pX`qn}8svkAMMvll~HC(kD+(2kA4OC=?H&Fh^YA`P&=bC$Dv`>f z5z+CvB-4B8a}6T*x=1B!cCjl@%gPuEAkt_oI90P`^zr|uKP@?wCO`|*S70O1=&cCH z_jL-B%LD0A2YhxAXM=hsEZ zK+0PPwIm-L8hl-i@u57YDhfEicsib(9X~Ws3U!K@ojM52vIotE*U7y|; zEMa`s@nm@$FUE@=hC~N)S&AY-LqpW7+A`Ts<$^7SSX>L@P0Hsz9M%EzEVZXz_E=)tr6BTWK%X6fXZ}rf!Q8Hh?GRL z$Sq2h0OaW4kI@GvtEJXFq>0m)tNLb&?Wn^{bD+Fcb;Mx8$|H z$1%7t=2fbo#bH#QUs=3k_8r;coMhpTxwmwYC-fc7`bkW^KQrO}IV; z%&f<;+lQ_&b|`nz`>?v*d>ET}51F}B8-DZag%0Ha#wa6TxY`+Rvf8h#xmC}QtfwwR z{vV${y{=i=CLWdLw~ANBQ79f4ydLc7A)_L>!hyNAf9u?e(C>V+OORl7#_i|+gY7St z(SS8KGMKS++Lo=smRu7Fa9?&7dAYirv=6h4Eh%r7wE3B5whJLaea%%69=887`d&(e z2{|JC8U;D2&gYDPLl30u3_^5 zueUz0uWcdZm#M+~$GE5rg7baHs#rxlmqX0-yR7N_#qGHzBxXVS_S%Vb`}TMuMNekl zeq9)C?(1>=8wdr+qFe$z(dg@KAS>II#}2Bh0BXAa9vYF0yXgiACKzCn7rCG}%;5TE z@1@-(hSY4TrnpMUcL%?d0qId(zud|xU2R>jjifn3Xg>*weoL_L7s>6x5`(^z-?dl) zLu(+dlmw=UGDEmWzb8&*lN}OnJ(b|Q zKenI9nfR+GmAg`VtcnsYToGH10Xo$2hsuQjmDni9TTJ7D%>0nun#ZOa(@Ba$*+`L+ z>d%J@s*aszpvLEVf6k#J-0C2+=%)}@2L(JVy+ADgk|A&9QCPq8Ycus+zSWAhgXMrhJ~N%x?LO2*JgpS<<(lPmV3mX>PLSa+Uln$W*abC_M?WSIzi%UM&RhYWvpvZ_9)`J^VBM1Ond|6> zF|)mclO(a(#xEyl@FR>PNv6*HEM*H_$C{xaBMzvV6uUv{d@ms9a$<}0@f~%p16M&# zlZy@sd->DbJ~cz}^1v!YzK*QFu)5-r)!IA}XFS&l_4JuQ@|K_F-*)i7kjxY_Tj3G- zwWx`SPOEyFn_DZsR<>}8dnwH5v7XFKbH5A0W+#mt!lm^PI6cid(WdDa+b8pL@4 z=v%B}JIdP@)){Z%K?6s&UPqkorn8^BD#%0@nevI4U4)&X>M_UQAzZ}8okcSlg&p6q%^{xoka~%QeLmDc}HLuqiGvRR{A1(?=pWayhbHN?&G{hT4QS z1`*I1TvIe3J~fx4_2WtB3~2d2#xk zEhgPdfuFmR+ZZ+sn3EoimwINpcnc}}m=16(_ITDsYiYaRQ!e$Bb-jC)JL1d2OkshY z>_`j?DC|V%miw|&@=$_B2tpG7bG!|i0R1HBmTl9DF`eSetx)9mk@1E*eOgt=f;qu2mW}v{mnEXs#{w{rE)5P*eE#sh71^ z)|qHI3`iut#0RLKo+eoOL0JIaV(L5V(#3#}Wq1i?PXGM7_cM0PVPh6u^cNx)) z&rpZ06i=Zp(8Ck1K4)7humEv9!o3x~4-i})8Nu@#bCtZf1_3n9l%HU1NJeGP*RM-F zZucd6!(<`?(NpXB3mI%VcAk44V_H>>mrAIYH?B?1^U+UDA)*iVi*#rpfdqJv%ZW;2 zFU%z<9Lb$Jt4~-DtfB-9iteM6>z_x1JUxoOB)v4uZc2CSW3_ye3u9n@2N37oqy}b8WS7hU|Cq(+qFQgDQ8p-=#``jU`4JjJLlEM5 z{b%)~68zLei+djK;WlTu?BYA{MmIsrzKIA64B&WSa=a#J(wq_XPf;1o0W3mavW?ak z8H=9~$XY0IM-_J1Qin2&>*~{+TG)0+{pxQJ#dnRaGngT$tLXPSp5Ii`SrSMoCcc%U z62bu2%2a-LJ_*a(B-KnxX)yxGI9b)fcQ?5FBZ`^U4CIPm(|i)16y2(^9@KPxSh@i5 zP$1M1Tds*8)q4C|KQmgJ>oQvB6-r14^XWu4tQ80xa7Mpk9b&?sGa^KNKKroxyC~Ud z*SB>Kmf~Df`ly($hVk{pS>y8IUN%xWPP6xjkkG zjGDab>T{>^uw`7P4h`l6^PQ=sL`aSQCOlY&CmGp=GaD!6DTy$9a)u>bd)VpWxSJXW zyC30XyE`ZwKo7`Ix6n6QQu!uQsg2!ypTPh`VWc5A7j_|s7U3e<(ZCJu&i6l!fLm@-*q1p28T+1t>T2$XO%{COZ5^He$` zuU*NlQ+G{#(pB&=gT*Qaad!$AWDbbhy)G+P>F*Hyu zA|xaq4Uy$AkCY`A$WN0my@!%9M6Y67=Rag&IEE zQH&QX;L!;*Xd!vGm@rMQ-&mps3lL0NOf%1o;?ZB|1KX$)Z3fB@jykCJ>l?aYwh=q$ z2s6-2NKH8pNt`9VM8t+eeNdcVc9UR?n;FJCz29uv6n;8UZh1N~Hj1x%dhig$x%$S& zV5jKYN%AAQ!zmZNi&#!ZZU|4a|);U&`$rpfQ_7=xN4 zTWniZM6=Q7?@_YnjH4DfzO7w0TEGAl{x#aFVi84Z6A$D_-IBznW)j)g1Xk-Ncc@q= zE&(iO%*bdUgPq}vctoA|ULigoPFS2$%_oJJ!zH-aBXZ_yOmkVf=4INzc9JwOgP?MQbLz)So8-5cy2pOj=E*v9Cf6TFQ4B^~`4k(WlAO zLxU45-yJ%UOMY10=Lg<=@A3G#s1~NVs6007Ka;5XMVzGhQ?M?xS&(*& z_q-`GNy6@~xo13J_a@R`Gs=tM#>pLmBUZAiu#hJNW(wOo;j8(@L<2Q$)ryfHx3XVz z%IS)LteFa0o3_E{A~uRQ7F%s9Kn>fAcq8Z$LUHk)$lim7Ql3k5u6uE6*l?wbBBs`sF;{HuS|Gi%_+)~;5^WmV_H}2f<>X=oys$;dYje~`G^ly_v+&xMv3ADo&p9208{H*VvJB!nQ}bP&(g#sR*G_8@XG*5O*LmeyS!wzKduW zYi9AT?k6#EN4)xn7yFEiLh6qrlrOo{2>)7^)Wzjj=T#a%7oLs&Ld6WvTVzXu9_|=O zU%mL+O-_NLRyWufG(SPl#?jk`MJb7TfVybhHdmiAspLT&w`E4`Hgr2aP8JuaOykK+ zP%($sU73BT{uxZ=SSV0LBg1!K?3YU4BnpP!^Xhx#b#ObRX?E)Db>_TN8Z1HYo?y33 z#pS1%^5%jCZL;j-9@@GV5Jt_T07NhEH?n_y>K{QTkXP+2=2OAsxk1bx2N`+{r9|F# z`8-{&9Vob49-Y#XjO@Xg@(SIjuf5vOyw>|X?}dmyH;&B1hm@LR$19mS-|pt|&4I%4 z%kh=6UQf2SNF!C*xVM-?W@`$<0#!}Y1Uyoef3XK~u71ek`ol?L)5m=1A`v0>4?FaYaMt}sUKNq-1;*pndualk2n55Z1j4b?p)!J40vBtm?zx{cI zq8K6KD=y2n5q^#@&E|ag79jr%6X%US0Rcwk4yIb&ig=P&Y|Sd>?+;N+%8R(YhDdXM z>}az;zRh5Qe&g;a>b()4E>4{}x94S-gSHCC4t(NH>;t{xN$IgkFV?AjoIa}s1O@qh=( zsP7r@ad2(XhH`NcC8^;za6{Ly&8`cJGB+eiXm1AjB??PW|32v9h8n&%mY4GQQvSn> zeUTMcdJlV_y`fclaim=c_{MLD-{BA`7_|vMsWvE1O3`V;6vF`Nn-c!V_C$QutFqh@ zc)iZN?w*j01Xh7>6U|@j2S*Ec3#tvsI9PSS+OBciH%p7MHgB;)lNYia%CbijmFLpS zHg%@7M?C8pO-L43-oUtc`!Ufy>kt#dxELcZ@(xt)i-W%tYm=Qt|7RM)XB!=macgAN z1s>*ny#TBTc!rKq?84Cw-Ym;RaCy!&mg~jMo0j(pZ?ENJ?!mRj`2F}2C&O*DGb$T2 zE5GsUGVN_~z=BDjnLTf!UD%Z4f@6Y=zeX?sPa z+ObMz&W0*(K)3>X%5c-LWSUxQi*jX&^ghRT_nR*>rl@BqOjrD$P>_5}{RoxY#IZ&f zAxh_+AT8Qkm%4y|==Aj=v5(;Lvhb<<<%W8BegFu;geUZQ&4d1b?SlJ4v?*NC!Ee3u ziGv$YpOQ4S8Ns)kA2vqq(Qo~DC`{iSRvpaN^xn54Nl<*Df7|eee-9V^ZN9(%+im{` z^*`YM>*0US|8GQp@&6OE{~P}Q{t}~hQm3o%F8T+SS~9SKO}f1$?+MCGolUb>aHWmM z?uSCbqLt1VN7J$+eWA6X(YA$^fTpI0oSgWP(u|s`5j}BZDR1LJC|@hZ>lwZ#MKix8 zgux0Pp+&)snN7b(x<;&(oCv36d)Qza-#xc?L6WjYD!}wT1hG4-(2;?0IwcNMD5ziV z2$sw6I={MY40H~fnmyntG6gofGL+Q!qMLZuqW1B1@|vA8ZMpOxA#el3tn^G3X90@O z3NPdu@k~UVP*5zY!raj;GaBnI2iK$G3S)%!&SMk9)xp|X2g*)yCsWQqJ8Sl@-4M86 zWo0UHwSV4yeB2|z9ZH9W`UW{HI#*~yp1js_=AaUt^ql$*IHeY$@H8GeV{+u`){ZKl zj%u2jN9o4?$ozF}V!r%@ii6e;9?G}4zPWEQNC0KIys_14p1+K>KbOS0sFEO<=A)FU zi&eUGdOg!9bJ@i|pGRONBus^(bFAK>}i8`T6ASt33MZf$S!`np1OW521kOW+e(cf3K-!JP%jcBPRHxx1>i z_If(Iq^zvk8w7KPuK(tbY6>9fUq&DfKt#C6+rhIjnk1P2vVN6HcHrZvvz(L(#ysN+9<^c`W^M+kUIV**2ss{b@^Qo4cDrGF1bFr;#54IFUAKw~l8jzZUXk#;y%TK?(1?U33UsuL= zlhra3FSb)rNTrUJ#}}>*1YzbbPlPdBEsy(B5y9=x(`#+7TYBE?RxFt)0BRF?TaggE z)>}%mY>#s_3@f8iW@c}=JZ2Wr-!Auz{;v$7Yw}@Vy>5ck*B$2~u@|q>nehZ(4b^7P z&e*EH@CZZR`ahFuI$8?GSlf1#^*T*eqKv^ui%a#c&US09w@*REuh5Xv;_{7VZHv=y zS>e%9;{!v^htmDDvP038imo9;g9frLf~nVMGu^{s;qL3p3xB^PBLgD202Z&+fqfd?YOJ0aqXmn9NG` zQAGJ9_lMp@TF$Q``M9(G+yTZI1 zr^OI217PS%&34grlCo}_!_L8;E>GDIv9Y>qy;kqL2yrJKW~ynoh0dQFPn+RkgEGeH z)@eU=yyUb$JeS>5jbznB?DQvY4@YXyrTQ(xPX*O1PkQ_4=2pp7Kl;39R&OvhR|SWc*3Qf84?(2@l^wt1y8f3#C%>KH zg=)8oE?~buKEQ3J_}o|6^~HZXZc<4dLS3QJ9`;eNwYBxz`;ctU7d$g=1ta&5D-H%wCZ^umnB}6QuRmMAxEsA zZS-enwz#bHGf7x4$t3OmuCv>W#g@b?Ft=@emV+KHU50U{E%|Mo!UDnl=r0%FPe;qD zXCn>KKaffqIj{izwrtd~FeY<&6i(4^yy)RM)NOYYZluKMlCvIYb^Ak`7NWAYcXYa` zcV{(a#kj!B{{8uP6aoUk^=OfqDc`?#2&oZGJe#|cW?VG&P~ z4G0jY?;I=Qfj#3ssCF9oBoY)uE_k9DOdiXzzqGWJnU@@&N>FDc+SRr52gno+)KgTh zbQ!)$>A}5vGBzK}sL&*6+InWiAJc3=wn`7TS;EOoiN|YveBUdQT04VBi~@`S=+?$v z0M+e^%5B)}k~6@q?4ASyj-iMJ4*?QVz2p`);tm$y57+7^0iK}RFX_AMS7@J)o zJ#_dnh7=3&TA^CeVxP(wA;v_?6Tdiwl16Q6_WztF^A48_Kb|0iclzZg(Vqy+E6>Ot zC)c&|d5q6Z>{LJ48ML)6dX8;!*N8Izo?kx9*RZZ)IMeZZJ(dp}Tex<+U2-|ueRbGW z&ClSRvz?c#h=8x-cYVoO!uw{hlSpfPiO+JLKQv`hBA3LvH^SN4iUVNJzfd5sojgXs zAZxvyES$$XP`LXL=_Hk%l<>L?tT^{$c)yC@dUcI4!G~+wH3Zs)1A6M2aQ!BfE}Orj zUJPOBLIW%Wbi@wf-9e$|Hn5EIL8t`+Z~*yNY3k*=3QUt@QsTGKrYphB0(mP}IFPUL z;MC*mLi<%yP85K2MT}8`C$nq$BtQfkUTe1wOvQ^{bFF|*iTWjaUk749%d`D-ww^m@ ztIOKvxvA~S9Hc}nF@q=2@7{Pce|>&YQrPZuhIJqc(EQE67?8!gqV7dM(0TDwI$N3U zatwhYq4wj5l&m@=FOoq|N5u;NibnsE%j~`%iJKX;=%g^{PeTro_nC{RMWa6tyG8~- z1WdO3Th3S=MPaNm{es--dDyB zmOSpn*@n4BMm62ZDC+*_`<evt*)X0W>+eA3Go}FUSfe8=Y*(EJ#Eq=^Wd|x z1-EyIN8wI1q;02NG{Z!Bv68GXN?rtQRkRJZ`bZzkb?arkA&6*Eqgt*<9Kn2k`Etz; zB&!exRN_LY)oOXGL-82lV=w~+wZ2RXJWj2c{z1W2hKBFDR=|a=hswM8ut5e*dV|&g zu*4WE;QVR&d)uf#DZq#L84k~*@3D|^LQv#AlS%KaEF7}Bi<;3k93W0+-9*iI%-A59Y=3c5!6Jt%2^x+RxPliS z(qow;hon80!(%5e>^|P1Ic~0!r?F@IDI-|17tbC6SiM|vMG{h z)u7AuqphfL2017MaDzoBm3psY=89pq_Pmah8T5m%2dOuJsWe7y+b2Sc>Oxf?NsQgo z-26v^SUW#}gK!^_fJ6kLzv5T-I#-8yt&fC0|- z*=p8D^r-8X=3H-jMxF&Y5z6)8@(#F?fO> zM$_t2Rk!(&OFGs^4ljo0(+LX>X0A2;a+Aq8xQsoIcj_*Ci;9DkJ9$I&-Mcuu?{*1r zR*mA*aj`HmUIdz3%MtIYNh|S7d|T^2>W)PvR66F~_?I(hwMy@&_r;{IdeVhw-LN|E zmd8rwaYyuvJ`1tiBaI<-q@Q}OBZI?jI)&eBD3Hd5X|+{N6m35vD?x3W~B|g00GcIo2QoHN(7LMj20<_IxBayRVoNI zA7fYA=+b8V^q0OahPxi1i^{g`YTQWwS+4zj-eD5+TOIi3UlSVx0Ob$EtG_0;&Bn4y zN`*>ViFzWatTNV2lrfvb)@xV2_#hJuDS}uD-APmVT>@K>hUF| z#KYlYw(29e)P?!z%TSrCO0qS!{j?|+Q|#8m^ve%mMpJueYo3j5u~6vZolqQM8PTlc z2n`J`8c-FD@~KaCbg`$n1sOV%Hu9LFx4W)yh|;w&E?yynzt^ON^EKpbSkN4m^z6*; zS8EMVj5Dmg$DyHTdM3!XddushC`z?bZkJnNsw0&32UY#f@cqT5JT+L8zSGlivR?`8 zG=&uv)XAS1HTi3g+>nKGq-W7ryhA|&^SH_Ld{_9`p?r?28jLjR&yihxL#x-gA~k{M&3}rrvUD*>80@{eLWcc! zRuaD4*i-S9bbz5@fZAbP3HnRIjwoxL9CsAFhCGk=@!xC8Ezdt<)!i@{sZG0lv!StT z=GvLX{X)mXA0>lYF_6-gpNRm$1>QBRxmbj7Bz~PaDjeFD&3nCvIkfhDQ zxCC~Me88EN;t}vPtUS{BRou%($Gdo70vuO8fO|{$j4!H^tjs-aRU0cN7EzJJSr~*I z1PeE@<3ssRJfms!j`g1m=Bw)3kcb|s4M^}JrS-#TaB$ea$ZmaH`RhBTl?-QZA+m~n zYGKoKFyN7U55JH8YxpNysoowdqSgCtDUI!KRDo43>gPlNq`d;HK$(^djrry8YX_lr zaN&G5<~@=sK*N{KOD6xg{h;@-i!;((R9V%ZoZCC&kRD>H7*9`@8sZsuN;q> zvnfuy-?fdb)!WOYs{^I1XUhf{Gzn9+h@VwOM}=2SJE6eiCf?Wu{c|R>SKJKL zfDI}?D#55#e-YdqHe zY2O$c_owVXLBG>E%UV}>i*8l=gQdKNd)~3w(Al8Zj;Wr~>0L}YE*RjbP)(;YY}w|~ zJtBApSrd}z3isKmI8D18V_y_bsqkg|JHDJ1JZ?fzo@Qkm~se3f$M zM~N8X;~X`OqRT23yh@Y4-=J7;5o<&@zF)-bXO5q4t$ylidwOK(KPI%~7T(W4A?a~l zY{EY2h$qUC3`hMvlfg*>=d8ET-E!ZlvFy{S_Lqwh7+`hnRBEc-oi~{++k*|WnxK#W zO}Xuw5AmK2Pj!bfQQ_~HaPaGg8;kK?#PMzsIX@cR3iFqYD`Z(SnNPj`Y4Xo&32Q%^ zRO~vZ{PVxK)NMR7^hH7wR-A>Qk*}h7LjoOrNevS=asGHPr$3nj5!B$-KNMiW>$;R| z>O?XDE^_clPP+FXZ|k;>q<$0+ZoE>}+RbctCSbV^X4sCd*?*@#i5a8`sd>}n5rp_^ zu!=^vfBQ$iER7+?1uAwIQ`sV+Ob1Bu(kk7$N~iT5aLMVGN(<6so;v4ZeEeGLIQLmO zSd7s3GyoXf%ZC+@)%orl-@B8vb!}4{k-z&Ro8jt&gu6HkQznTz#Ka>@%y3L(r zC#xe`gZ?$4vYM+ZKrPZc$P` zEBNosfqKvM(A^82Sz>9mlYs7KyH3T@p7 zLn6Vxf-uhr8aX*RRmR%78+3$cLAt79)j$6~aquOv)`rsUog~AwOswk} mrT!cE@%L}le*+!7o>1MyuQq9x?J92+P%@J866K%t{r?vo4KZ*4 literal 0 HcmV?d00001 diff --git a/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png b/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png new file mode 100644 index 0000000000000000000000000000000000000000..3ee6c5f2022ffeb579d966c50a51de24e8ca78e7 GIT binary patch literal 138394 zcmZU41z1$k7A`0v9gYf;0>U5&g3>S`tso5DAs``0Ne?9rk}9FZz|bAi-KdC^bayvH z*V`EP-uE7#A0ub>K4SN!W=>jWvtOB3Qz;9+235I%b6YE>m5tAsh1K`09oL>T8!+QEGq0VD#yqEo-1*F9t7so{ z1*;*iZ1M1?V8_7}HHP(PD2B_7P?!7UV>`|0SFu(YTdnD@w4xawAGnKjDI+nC--7|k z*^eI6&*5Us1qFWV$o>F*D?f+(1ZkE>{Z&zQwMwdmFN_!C&~TPds--Gl)qzUdBF_Se z5dkI|A{T!MZSQKEn3&L3{t)8q+Qr|&dMkwa4Ki8=+il?>h;fHrsO zP5(?x7P*{%`IIGM@PXlSB?5YU6?{BFf>n_H=YK>P-~#xnVsevo+w&~{8)OFy;~NB{ zpbO%;mT>jsJvfPU{dS_0#b}x3E{GHN|2B`wX$!oo3ev$?ss zsHmv6wiez!s2m<+LV9hh1H&+o3F2{+r!3aAi_U8J4-}FM0I`j*<`s8idcdKOO(V# zjKudK_#4D{Qk0Ct(b&|qIM^7sj0qmmS25FU<1Mi3tpa zq=)AeehKmMk+&-Ie@8|}`sy|GHOeAK_74u0eL4EW)jYMeDGeE2eZ0LL9UV(*ZEbDS z($YLUJnS78zGZ_m%Ch0SR^-%uzw z)avLJ*4}&fsvX-&GUJ?`_(jqktVA$TufDP}%liDSRIVIl;%6P7yDDDRB5Ba4Dnomj zQ(1d0)|&XAewq)v9`=4NY%}C8ju;$Rh%Yj;_mePx)jpvDvh^?`>>5e|$M11)-&E+@+M158uEQ-x z;8NUNsR&*?+bt%=;@9X4Zo*m-ueBiCI>9o;q<}TLY#`sIW(Ed@tHU{HZi=s(y+;-* z^_R9))1*baMIf2&CR$`u>JS8#5aM8`iuBtM?ZxpcwkOK)QiKRswr9%3ic-j*Z<8*x5Sb#BUV z%?bUMp1nNPbADc@MLaUilNcYrw!SVmzSlmhFr=i1jd;JA_fniRw#Ggei#KkNjXUMm zlnS{LnQrBEwC_Fq=U#>+t7u;-u5U)XG*L`TKR%KFg1BdV5Jejg9gmEUw>EWH*pLhP zr`_z!An$>sd>7?hQj`zy#BA0rp9?c&UBT$fPSV7Z4%WC&b6v9O$%Swhqi?>ecpdX^ zK?4=IhcKiDzbnNnKMPPM+qk%B3i9ZmlM*sYKx*ph95<>ouAaNyj~paCo*#;dAhv$W z_|R!Qh+KCKJ2oj&QmJDZSI*$cNFHYi#huuu5E6Rj+si@u$_spZ&6d>M-jL-;wlc1` z+ND8B#FZ0{SgME=>6>FnuwakL1R`)NdyZPGNmWzC@=LN2|`N z+WPF*JR0c6i*X@!zwXHMpJCJ^7&yM1-Jpu@a$UYsI(b2galHEn zVPGhT{lOAf0DyQ)a)}_`UIEzSE*xV=g5(k;>|ou6-^BO_DyaS;j!(Rg9~XQ78Q9{C zp#UJ#B@p@cAJAcf0q#MysMU4-2{Q1?zxhJ{=2sll>FsxV^M}oarO-0Pdvh!*=-=&l zsUUw-^F>)}huiJX)WHte-#Z*T48OEK_$&&QbUn>k-RT?XH0c<~+tE8)$S!dYU9u~`nVe%Y<6K_AOK*5Z8u?Jpd}9S;Bt6U)}E!sek3yy71OWI z3^bq9`6fZZ2uuw92i=fY&Acv$_geTZu%XV;3|QdmTO`I?bxefetI2NU_k%nVF*Sk^ z6ck4kj)4K@O+|SlacIeZ(b2WYG;MJqyh+el;E^+FDYWy9veEY#89z5?W!6MApPNe^F>LlH6e#hpNbDu9S_VHwo$ri)M1v6B)!FIj;85w=-sp4Pp~7O2o$M{K zH*%?Fd)vqyJds4K8-jtc6cop69qUhhc8=(Wh_z3>+LMr!^h=`@LQ6b-`cz&%4DCBI zT5hc_CnNK2#NFN9&dyF$R8+uWa{~7Jc3ZoV$=~MW{O_X zB;<)WR=zYHM=JkhGrCX>FIaqja_B$gn;(~0vqqc^bE`{oi=n`RiT;+`VJfma7fVhr zGcRw?24Y}fKujaFl?@Hi*Qj>Pd+A$@+#>Jo>r2HYUFgn-re~!0hlPZwXwNM!u5wJ- zp~>FjHynt<`4+odX=d3O)bjd8#KoG=f7ywjb$w)0J>j)1P%}ckIqcQCZojGH7edraG=rr#YHUyF^JmR4$GOhC+6Ck{$S zYNSGV7Fhrh^&@qT4RI>|H!brg;N0WwT&5=8cwT%;MVH~m5IsbXzejf0TrEw6E*~*& zXALjmdL!@t)+?kgoPdbnuy#AP4G}sSXPr6aM~Sy-)szk`dCnjppk*lwkBp9{n0>(_ zE-qeb;L9x$GhXdfK(W%daU(P|l*Sc)^Y-nHp`w(^%Hesb=g&hrA30pPO}K;`u;#Xz z=|LMSHBpWrir;)E|1>JkOlXj_?v0$)cgI_=^Q`)^sP_WIo9XQYu-pLA3+=KSozS3!uNtvGjb=_rS~Q{HqD!xGWj6EN-@Khku&JkN+c&I zZ<03Nxq9aCp54I&#H*2R{UdFruKU(ldnx4_BKga9iMhRBX-&zvy)-T!9{$<(sjDeabdRRw{7RpLJ#^*XUlp9XwuFltSbu&5_d9ag zCl{x4*={2An`esO^Bv|>E~a?^vpqj`DOJzYN*kwt7aZI#FDq*|2yxn+P!$ge50`}x zkvRK^m;FJ4FZ4?eCGNTca{RpPDVV_t$Oancu{cRjn~-AgaL_Vcre_p-IN z_wU@mYrB@IG}y~?2Y~j&ve!G)Q;1>c&Bv)J(rM@Q|80u zI2Erm+2PH}?4Gso6!qhK_xpHcB$dTfY8G2mLwlnJtwJ~vV%J7I#9BZ58GpEGLWB3u z%)4@+bw~XV7w6|r3zQo%H%|{wCO@Lcmo}Yh-PRCdW)E>?;ejhF=DYiZs378U>q*B! zh&eA8SEdvpF$`tNM1}U%o0z_`McNJdKx!?5nT`Hdm8tRbflW6gFWj-cPXZe@txt`} zj5oW}n`Jky98L9#0{Q-J;efJ6uweXkErb~SWFR`4r8^#9?j=W*Ns^niAtE2IdO6N2n^!}~S-h$q9#WdHh~%|0Z{q)kal z**_w~XqX{dT-u({x@|BBS$IDo@sXlKh7-U9y!RtZRrS1b&c^ACW^jjeh&;nQNaJ zXQ1P8O`oT_Dfjm#mrdMC0tOiz;LE*{&Xt$m{GsQmxTA^G+xmL;^8t&3@AwrUU`!Vy znsVc8JXHgbN5;2HlzqM`SEW9&+U5zL9tjrpZs_n=s0~2ckRNKm=uj`@3~a(pRG0G6 zm6F=5r5FtIu&bf#I30ENllCXolIHTh13FK| zm$vm-snNa#>-X4TC^kx!yQ?Vif~&MS&=NOsSsw69P{OP=fL*Vqc0kM0fNd-Si}OUy zxgT=lx=BKyuu8SoP<`arGr=WX?vg5ri#J?eVy#*?Q#`K2wl~UH*l;;7{67}s*^AIv z{_}RQ?ZX3ma@h29d9~Y!D-8ip;CwaOD#W9*G^j<>VkX0`ER)?xE5`cQ83+|4nD45n zZb3iCRc=UQFT}K(Y42V^v$AE1-t{VIfqlS!Hi$gt7PJmg@1Bq^(!mtvpI>`L0n`PEeByUkl9nFYsgE_G>gK&9cB)_zL|ZzUV0ri@-Ofp zW;i#OpTIF5$aF(Gdrp|-%fnxFdfR|V8{kqf^ zVp-I>d`_M0DcMO@QdeL&va$J`^S>LOu8C|$CqZxD!T{c^8{+#X{$Ihkje*AcZ;YMx z3&(gN3bg&7pf0}#apZ4M#|M-ah{a$!xJ0)UL`LxU@5kZHpUEE+zvyA`d2EI8r13EG z=+}Eb0j^@8fZzs)!b`Zobij?=s4yx#&W;-z8n(8!4iEXj-P~^eegwEH#?&YmH+N}i zse_~AJ{qj{ydKP|*&gXS-}x%?($s`+vCH%D4DNZ@d~|dKVkSTWF^P#Hf`TXS3pgJ= z`ta$~;dB74M@jc3v@L+$n;8m_&-NCwbITvp=@>4i69H58D$jHjd z%5b-L`T6+(2Gt;7m~WBJTT_h$fA)!iQI`ot8r=WN`U}#gkeZ$@9EAG4kJHmr6ROKC z{ef*oh5m+)jy%lFONZOD04V&3W|NhduXEW=_gEDd6U*1BG&HZOu2xNvWUSiV*?Ci{ z`Wc{;gALt;enJpdW97%gktu!m(Sw(f)6F;EZD;P8ESyT%O`{bP6Z6)sThZBk&RZ`e zBz!9|VLn;cU!c%f2r*jwg^oLCxG>N2(>(2R6Su^PiHXC*!vJF1&YG9<@q9%Nt7?lB zmWzNiD)Tr)oMm=4(EpXn6S#)|j7xq}qQm{G0JYGqPQKTxo!W$Y_OTPU2BS`4nNte{C*NX!9_ zTrPk-!~2yrW4G9u<-XE)A0}pIhv$N4qJe=hM4=_@?26BY-f>$DEG4q4<&snN?Iq%$ z;kdO;cy4`D%{{9>sCLn7`Y0dP4qXf(Ic9j%$-hVyZyCSq*v3D~XGytcIg3Ly_KC9M zr`I_poo%o5NiTb7l_`RkhJ3WxQ$JLyzrWPHUkg@iqB~iajo4osPRRL+5)YvILB515 zAKmx8i~)ZAkFm zuztECBX;a_Wj0BHO;RRAD#}1p;rq>AW~B2BF0p#9hNvpnkGnj4p66~LO;t~uG9Ad( z#3T4|vRQx8te=HhNV(EX4uO%vrt=ohtX@%8?{<$yBO{!;?@&)n71SM@qa~&z*>$T* z^n*!Yq+)?c96AX&!8wfjV&LN7g5hp`6$0S!y)oa_ouxhBRo7wynyM(?r;6`tu=z=I zeNEofz!)1*IoFR!xr4+X3+)dXvA7GF#Y$b2LiJqNx$ku1Nl1;>Y#rqtb23T5CE&SQ zrR|gv;;*@{6H(bD6gjo;cwMK7ph**z9DI=a0E0NueP46(hw?hiVbc9Tb;r-ezfxrF9dih{>B2(ZvVm0qx)|VDv zV2Bm!@U6;`rRlu#iR|dw>-~ZrmXXGu757GTxqZkay*1(n_xA?%V~~Xwdn|iK_xdXg zFdQnCx4Q4x+{w+dbO;$!fLVNadUO-o!jEPd>NeB;h?^qw+SX?Rw+BKOB71pmJ&>y& z@aw$WM~h!e>J9Trd3(?jKZ0!R#y*gZQ3aEXnd09P%3Z^K+#5AL=@V&vjdvj3t54wO zW0MYMSkdd@FM=Bt;;0InY40tQdoTmB0QdX8Th%16Z6ji|gtc+Tb?g1Fw4OSs7~N{e z{<#O!RkFU4w^^nN%syL@$Zn2Ts}nqAQop~Yrb$=M8pJb~PLO55+m|bP+pO?K;}xew zlh3N;n*?TAaP8r;d@cFFrs?JY?#w`4nV20)>ze7&7}q`D;3!f75}&@LgCxY)$&qj8 z95f%qDkI-TM!cyg>n(06>V;5LB)vKXj;+c>jYg)~^Ny6bh_Shh->39NW-5l+sT<}c zfCVqk^BBQJNoLY}NVxPQOO8Y^Ersy@kr18lQjj9YKB8&d&JI!^j4V``4}w6)613&L z)tG&@=412+GThEFRc3i7-F}6Zl8gVyNc0#Wl4<=@*1Z9q$2s3g zqZmnLU?tzF&v;)W#7ZWkOZuMNd-xuuHQ?_S@FNV{L@BUems2RmWTqvEMYlWG_!oc= z`CiMf^Z9G9bFUrd$QegEFR?(sN{AAVWMUbJGDjk9-sj5vAQ&w)_T*y5hN*l;e&Vsz zidrQfj*kvd(&eX<6aSVvWig^i{u~O7+@|!>vV50td1p47e#o8bA-tKkC~w!;+l?Xj zy3cGOc+tsQ1}3E?-X^7U)MM!}vr`Xc;0<3tSq}JXn1xPcBCilDmsZj@o=CJyl(B)fd$C0E9n$K>Z4I*ts8Dx%E zacfnFIf=&KrxH|0d>W8{8t1BHES5SFuoOg+PXx@smjuSiC(G#@E_|Gsj&NQ)K%T4Ih)}o>$1-4 z&Kp>@h^Ejr1=8+5@%ruXBxqkwA6z!F`-pkZ_lSC**;f3HiJMIwUJs-PG6(~X&6flS zj-H^AYaySaI6V%zG@|yLUZknkfj^jYq2u#ou&4?9ez%LW1NCk@VBGFoWjD^W>_pXW za!*}#J5n}LLYu%J=+}*j6Ea`rERM&qEj`E@8hw5}=H@PGEV8iA8#^~3pxNrsY0o#8 zv#6?t^YM?4!|tQ8O1sh5!92D;SXmCN{{4^^(Vvha<)9b9DRG|*Bg7hC#8ctkq=*Bk zF%pp+*25CV^p&5=c=nD2+`0c{KEIUlFEl~!xx5xXf*wemR18P`^HLC|-R)0-6It{d z7J0r8sg!8I_Fz4<+n(-ZaJr2=e$o$lT9JAhT%w^oY&c|@+GC1@FS)p2D7|}|sE5aQ z9Gg+k69oE41auK%!({`hTO@T8$;L`GXZxeaMJdr|qE85NpyuN;8g0D`zpbEpq??3F z^ciMzW?LlYHfIvD5Fe48Q||2ocmi2-xkN`Xu(M_U24GP+!;?H zcel1d{nEtKIjG3RO2>E3exS{l5BL)U!PTSXvr5OkzfXdR;$7-bTPRXv|7R;!;?Z~b zMdJ4hdlcmN1YeP`KZ}EloZUksmexLB-naR@X1D)HR^+S?_qX;n(Mq6Oh(%7fIt4d% z>fAJ4A^}%27X(GxplDOKEA1AMt{av5o@c5qHqj6D&(BXbS$(%oZd@KXosRM4mgt5d zIqC%-+pT1t=SPc*`?E9WTg}A8nb0sA7^0bw_f!p5WWTKRNb+(omn-@sr?x00v-oze z?87J&IVA$@X^^Z3BES!@=5Vp89D&Qxe2mlDP!BG!=ig637={qGj+9z#)*k4+o9>aO zg}Lm!yb~~LRh`$%OyjoIc;~UnnDcBXt9m|FmhCUdq{m5~u7%s6u9K=OU;cgN^szTT z*jDsi=A&d|*t2xXUJsW>fL)+l(&ggLGcJ}r5p(aQYu<0K6?dH<&W1Oam87hXmUjWl z?qFjaM1Eb>Dk7oXj>k1prZd zaID)Izd=;;op+*I8)KEqPm*h8k3fuTyjzjviC9vOLXOva)_dpcY}8+zpBPZ`!VFG9 ztt!0QbGjW6TPhnPrLs^6)ANeu^tk=?(F|?#*{lqt62ECD6=o2yw)aax4yyw$IUYbz z?YS+xT_T&cS!2OyJT_6yWB0`br^^#rA*L3Xn+CpzMu%P?WeQ*mC$E)BlP)C8u`i?1 z>+1D*uTJZ)hOlCjpH<$3d3Rnu%@0igCp4?{$1xr6t&YDW?)`vp24c@%6W9<}EdQa4 zXkx5`&!$5=M&gDJ#N+I083)FU{~P6 z4tRF!nC`Gy8adG*MLdznt)h$46sT4G*}iIyTBs8I^+e6Efz#m!Q*2S)9kHzk-kOBtO13@^SZh!q*qU5g%#R;A; zq4(TevS#=A6=n8fGY!hc;k61@csq?(waBpbUG-+&sQ<)T-Au-~@QBg8K1mkXRdM+Q zA}9FS`b^PRn%%yu#6sO#vA$wXWj!+U3f-!@Zue{)LQm1lzkorNn1;RtSXvIw@j$8e zwV^iaUbbejNoW8CAIURDb8-zbPVO;j$_77tGBXM`?l>OHScnE<1|aQ1hP~X-lUegI zqDDTTSfgskNmixyRq@IH?A&)d87ei+G=)7r7tErL?Q7|ZmZ%3?rV{bc5%qAk2yaJ{ zyk>3|A;z6nR|lekHQw#UY+5s6z67NS&iK`}tCkNfWhnjhWZ;3{b$OrS8FNPkB|XTF zc|Wb`*dYJ?3K^C@%No&}^V6d|+hyFw8t*_|xzf{1o23=5ao#rEd+CMPX~N^f8>+BP z!O?FVA$*B>@**dH49mo;hvTuNQ9=^Yu(OVL$WI(qB^t7adL<0Q5PKHVS%cwdqpx9_ zp3;=urKpEI*+ik;ijm0fhK4>tMVd14!Uujd>F~sSonHnOxUQTBp@ny)|Cly#n{6nh zO|?)`oQ>@IY-238(!rTpsXb-hn!kqzj8h3H8_;tyr(NsXy=YYyH>Kllabh;Dl5c7o zt&&6=hw}j^_B|Pa>WY^Fq0@ZQZN>jw#@39e)$=^@VlfRH;j;4`T3dRuggbeRBKS-s zW_yiCS;;?w#vzI%f0VnvLAhl3c8Az9E?!b}Syo1s)}#B@Kr0Io$z5|q`=W_#w#~`N zH0Yr>1)Q+!Lv3_Ex#ODv%!;vBSuj^6$}%GAnC6GDp3ZRARGu4OSAHu9I&^Yb^hffYkNMGV%*99AdlHO%%q8FNIx6nz8a=+2A`9oE zN2akm?TrRy6P*;mL|fIe{lu3>f>StnB^a%H>j)VCg){@Ir6AS-xrXil?~mtk1QSD^ zIG=CoMw!bb{6>;VbmsvV)1DDuQy=UzLCP&hnL?A9sL`lL8U8$vgBxRG>!8k;>BQH$ z?;op=IbFS_$*VXdLUo9DD(`uIFS#+4^WN2Xrn&p!!jo5lz-r0}f>DU3m^ws$|ln|*k z$rZ}yhVg$uW{TUab-wK&r4*oaz>(U}glNyDa={GNL!vQy+ld-aD`LeS(Od%c|KI=( zx*5+i{R?&4sq%n}dCwz#EsW&H81ycz3V~_(oR8t0xkwI3YP;CMqbpRRKgc2qdKTPq znk<^NhEZ%#P4DJ6QCb5FrB@hnh-^J9t@b5;K`3%Wt*=O)=n>;G;N{WAHj8hOMLnDD z=;k$dlO%oEz`*Q-EOenn?QxIX5nZ7KKf3A~UHJ z5v_f-m0v|@n(?Pse|ZdzzsIkQ`&86Rq0F&_PI}|(PFUpx8&Vvs6tk`j#deQveUji zzGS0xHwh)L6^CqPfAh8N!^Om|I6>2+FCx!q!jKg#;I^FS|3FI*L^}LDZLFnB=hN0Fp_Dwu$^+%O#p?N;bnM>)=Hh-Q(>cXdUA(7P&YbB47QDEf2vi z{~|!Q{oxEHsJXnfr}_PNKVlw+r~M{JLOcPc`mG@7mSYEtGlJforlVbnHeRxaVgLV# zCO`b#VE(7F`kHWfJj~Vc!EZcicnSeGxjeFD=xsI~o2iC#Q7x@_ZzP|~Obcxu^g+Nj zK!~3tAH~X>bbKmuJ+SiQOu3)|UWwmg;DH~Q_1l&IS%n1ekLa^kqXT(9C8rzJn@d5; z8S-JG`zmwnyr@LJj8y#WuKcX$itBkiE-lldHFM9d?1Ava}ZIssWLM}Vv#IU}Xjeyv2>w+C* zdQSV1wTPJOpt-v*cTGx^VtBrjHua3I<$9$Ssqc~5WcEp{dT&X$x?Ug`m0)}jVftA$2JH> zF>tfP=!mk~gx^Ljbeo3z#pCBVjCo$1?JjmbgrP+32nWtf9&U&myWqJ2eC&8n?8jUf z3Cx!5g-QtB^iw!Vx5TqWUN zH+SQnglM_y`?$Muo#!Wv%QsG&-u6TjU>_W(1=!Tb=` z=!d&&(_O;9-h}M6fV819i05Onh{^K@+GV-0nu7_~@7~#DyOVsrKG%t(k!GR&&flmB zMk~<14M`N9=pN}7iA2~tMqm2=@X6hUpR6!C{$%a}B}!9`tDh+>lLG?w$zi0)&|_1s zz~0)7(|vaB60@EvhgGcoc6zs%f_HO_oLPz@mLpua_$+J{+QX1kRP+ZVS0QuA&FoH_ zr|=|b_Jd(j^)&Jq4<~x^vE|@4@1Of7L_68=P0X5h#%I~%ikHSrm{ja`3J&J$Y8$I# ze^r_fCVMFJ#YWT&(L|Wj4g0R6c4X<*xJq-oaTJuIx$W=>4SrZ&dKxb4* z*>VIr)Ji*2$~_VYc98 z{W$oCv&=%|XX>_sW)x2g`yr{cI>PW*x}227z^7GQ%bq*D|FPS6AzN>s2+kh!mn7ei zHU)0SCDltGh0!_3oC6U0cbMb8a8W-(ZqbzV7aA1?y{XSE8FBDo3K(K!iH_zB^NlT` zy^xnJgZiGj25 zv5@2L#3zKX+o4U|*J+f6*e{4=0W#?ZpNwU0p5W`pgY;c1liV$Hq~le0@{Aun7|y{U zi|>Wxb5K~A{ET%>1*FwKjzcZ>lb~st1Luin&eG5-uGEv6hPN&dLjqbT3(l-D70%-+ zD?L2>W!yk^bEMy?pNuNWjm$eE6ECCRQyi?|z86n~Jz`Q_&XH0rD}{EvAXD#44Ucw2 z+0~ZrPwJeeue7Q(5l7W&r5$972O=Zxpa`s)gGbbkz?Pz`Q=e!&bu_cf?Z{Z_g>(s@ ztQFSoHHlUzzvQ?h9(u`;hn*vmnam<6xQYD^sGh-9+OFq5F_6Z?hP$h!q4NQH=}BR^ zRkEzJ`JWR!-EFnL&Hkw#(J!M8BQA(WlEbJlN5qF|*;ku+AQvi?%RyhR0^4hIUn-V1 zeUVDD*2z?xK>lo#<5WER4UP{4*0}NN3Eq-&+FB4Oj@1cBTS=Z&5h9xIy;r)?!7#m4 z{^sY-(hgA-^$NoT;3?Yum&}<|<%rXrRcbj08*+_{LGDy`S8^HmAZtAb2C7vC;){89 zXE*-xB{19N-ZaKwGB(|+mb|YShgu3U$$dVf z5&{hg$+!%z=l)cLQR$yE*LH|@n1mRwCb!%Bce zZ+3P2lp$Pb1|_qb!E%^!HP`BwKD+_%kp%XSCrXDuiR`(TKJL7*tcP|AO5!>ri_xb_>%ELOI%6>AUu#<{Ek;a z7BL`n{a>`-AE{l|l`h9!j{F}SY`FFh8UCIFQ{(UO|Mw^L6YJgLXd8n^O11qRM zKL(N0byeSa+V5fAp<#pkLT5s?r3w>G235vUbtE*aF z|7*fPDw8jnv0qh!%5Iy%c$EYDJYc0_cY4!g`+pJQ)q|Dxq)4mM@YzfOM8gKN9kncX zM=bqvKiTIb>+^3172_LJcz)QFJh=cV)be}{r#0o(y4|LFDQ_S)JHH&YJVQ(&DyrZf zn=rhTMSxn_uK`yD>SUtE(MX$N7r=iCrnFTF?Js5b-{C&CU0&5mvojB z(eEa}Z^agxc3|IsU$@5DmOI*iL5Mu#OOa-Y2Bo@*sdngKrXy$X6xh`G{z}~9BPBz8 zcN@?$kq!koT@=pTQ0XwW&iqp2X=c`gPw7zkL}5$q;w##TZOa|20}46moJMT~AH*(E zfxMpx5`_?R)zv-A;`*lvaGlS!2Ka0Vk1^WK{eci^+i0m3kY*Y+LAnKM5<|C)IZ*+$68? zg<iUG$Y|zro=gzxeU^jyL#^9KIYXz{3$tfvq3p(b%r)|Td;YX} zoe(!r`)VIyeW+ijGyG1ZGb(!DjCC~oW>+|u^WfJ0N{sh%(@i01^c=`u(B0yeFSIE9 zuXNl&t7Op*h<~3gY=Z=5aJ8u1TH zNPXdwZEX2Xm_yhdd(!hwz^Nb8m5>aygt(Y*NyP^u_B~{tomCJXv;M> z5zdX4XL#fwNwm~q#@+oce#1LBuP1n3PfkvgVD~`q{3e9#dmfvjT1#w-ZV-p}Req_^ zTGGZrN(&IvpAJ|GmgJX0U+ak;7D9i%jmlvRBw$r`BqVT`S0B8uI;;kb;-Z86yj5z! znuUj?ezkSB=WFyTpzR?+KV&Q*2cfO@i+^w^ji$3~m^KX>J8mP!cTM}1rc5$^78Qj8 zw0R}yxQ%&!#P7|s_&?N32&@NUH0D`29x{oNa@`yrkHk<%*E~Nf-9v=q?=cSG zfoJPAFEoXe;d3W!%luP!3Wr(ts6pzyG2KMw<0ndId`y7g`WA3wUgo5{ZdTj6wv|ui z>U$piyF>ep&c7hxBIgzR$t~}6`?aN~02RO$_gVvHu;OK-McHWgv%WBoxWANhq-;t2 z4xU{j*HS-($xvljGONkMpvJwQfV9zOm%${5|6M!NUG6?gjl9xj3`Re~9O!X;0LY{_ zpRJ}qWs)j7WRkjv#E4g<1#~^t;e2eJK1889Oy0WM_$ggeX(CIN=_U4vLXYc=)BW#9 zx(ASjQbG6lJRh&P;Po|;oNL;UaCJA;D}jo2PBeFx5Ybh&XWl2QQQStEa(d)jPZ3uh zJp!J^D8BOpu3%bP8~;Bin;FG0Hg@u)quq3NNbWLnS#c4fuH`3-7O}dm`jUUSeLvUl zp27QX>CoItW+5r}`15ZJ;VP7(%Y}$5AS3)$$Kovgzn51pK-%1hMZVy=84@kiO;BabdrUCF_x%d-DtYzRmjPaP z^Vv;bfvjr`eFSN1AMR7ex-b_dvxUAQ#q&{o#$M7-Uqj#DE4#dzdX8i9Mu8Tln4Gb% zQLudYf1z1Dw*fZq^)v5hFelXy*Upjb%DJB>GO{n>g1DQxtoPlRnfj(>jBi$ z-7m3H*f@J6H>m>5U+=OC6Q3=6_8u!w2KHRo6NU{Y*K(Z|1)oD(D*H_u^8H~(&2<7= zdc(F3m;Bs+x$F=&VabBD+arrBk-p($a#=|^=?5?>Tx`kSKsAuYl1-}fcIdWl>nEIep3Qe39)5>=?Cd5Yg$61&{`b-02`7f2^aZNx9_OK5#7_D8 z|2th*ABptbPR#%MyK4XOe7VWF`?5;&FMTrH5z$@JN%SR=zu0P?1O=SGk3E?5tan(A z28?ySR^`l8;>9VjC>=q>4CoSb2G!fk(vRD^&Hh?txNvM1NzG6;0FZ+` zI4}t0gX>3gkPn8PYns|T)>Y|WplStrXEVVMFG;iZK^*CD8rRv-@|$Ah%77YW{Y&s( z$|rGcLJs~^NujML#nXVf?)-SwLK+^4a$Zi0A%6!B2n?tDpW%SD8m+U`c3o18`0Ae^)Ec)|My{D>J)V`?W9J^%g@8b&S1 zk~3qz9Av_L^SIbywG32vGulrMXzvo~wBN_o5-qViM!yoCY48$pb?ixQ1qgB9G?7}^ zam`NHJH#w*ZK&wur%$}rUXL)R(S^hxndF3BcKYhvPgIYy-pZ8!%J8%YRp46JgKV_E zggee43Fy9SIR$`!BLo)|Mr)@1DeEc^nzE0x+N zkkbIh)A{spd*HAeB@VhKvqfa*n*)d~CJWX2D*pA2pj(1wza9mGRy_}p6zIqE8#z3; zgN=ojVn?<#UE3|qN=y(ev%8}%39oqU3X+#w$3|ORjmy7GBj>pv=u|v4ING_j!}QIs zDxEBLeYm*1edB11l%?Ynr(L7BBwMX_eRo?zTHC;K<}qkRh(Yo>ZPa)RstC5`YTAKp zHAC3f0%+2C4afqM8#_PUwQ&cezo3!dYVS|p$NYD%r48Y;EncUt}U{qkkC}06;fVKW{8bj5KA1m!lV%!U*(y`v{_tkIMN3-jGdDFV{u>Mr3W6Bj)!f(Gd1=y|w zG-yEmvU7J=uw!J)op#KtE4@#r%07$9=}@!8ENJwn=TVGodyV*D*@L!pIqi{V&|p}g zQ$e)N5(kp^*MOvmMoc6_KHG*Wlr|G%~mq&wicW;z^`%F0xyM z;qHS18TEy;lLJ>2ul0lu?5WLY$tz_Go9U)&#yvtJ)qh3sIR#eMR~;I5ho&6?sH!-e-!*n>)=)pBYB;| zHrF`9Q&h_;X-wr4t+;6m8qNJ0?-=o7?s9i*M(n+iYU@V0(1PCb?Y?Z)98mXB=V??S z+Uu^=e&+Y;m&pUoFDj^R=fV7Os(>~trJOgr3wdeKF)r4_MHE%j5Ck@D$(m|C%{zGa zyr2PI8~$~?|1$|n=kvb(TcIm;syxw!`l7@IH9PN;bjj~4kQG_1c{AWF3iE{lT&DUQ zKfNJf`d09VD`HM`>_2P{#+`C<3H@@LOyXE6gKA^WGo2^s`5d(bnxER!kB~WABsq#$_kUeiOe<V7Jx$j@Wo;~(aES?YsLL{zJiF7;bi7Bok~Lhs$gWWs;YNC-VbY{S9K~d)5zs_k z25$xo&i-@&L{-bd+x9MBs`|G$h1*QS3IQJ;xjCEp2g-=d3$7V&j2*?WpWXe?(>zkT z+NbYuS&Uj7$I0fX+AYn2O>kCzZcs@XeAxf!~*+ z`;YIoL*?TY9D|1uNzj#D$H1de?-;SO7<~^-)QjhCYnyfsUbX-cu4mhRpkN{kNrgTb zOL4z8XZUI_O#QAdc*ziDy&ME`R@T4(%lTNfj1uAL@@K~B4bON|1(dW^)9$^8BAt_K z-sWOiWy-=!RIY5hJcBUvvGuq0*Xh~g-lP&;#x=?>Ua?;;c5IP26wJbQxI3bb@WxcY z<#tULaKjJc*@8#{xu(KF+Cy@NzzcmYzEwry7hvuz1H=&$mN|YIbM%`$S!k|pJnji0L zQ2)3$(s6|il}@L!fbS+Yfiy=XTQZ9zt4M0&E7xe@l5&A22{+MV=tn3#{|mtt3yeX9 zpZ;d9P|_;2#Hj8ovRV+CB+5WnQF5cg4+)jntC2qhnIrGpKptqRVI)kS_7!`dE|7*~ zfQFl+_Wq3GpJX7t{*JSb`f|4(Es;~A|9{x}>aeJ`hF_8H7+Ol{6qN3g#-U3(q)U+Q z1_5ahW#~p4X-Sa~3F+>V?h@}BJ?DMD@80{zd7kr(%*>v(_g;Jbayd8bqP&08Hy}DJ z&yLs`I2NlQ&poq-6Kc|mIKCq8LG41Xcepr_a;D2(M7tDfj zT6Px|#R+~G0r;Qq6VZ9Sv0-}qNZUhF4g`~gPC2hmh7d{3^bs1BgHZ@Y_}JTt!jSld zK1wh=nv7vP=(0A2FU7ai@fJ>zrnB#{$|k|GI6st&Dw+{c7?`xoL98(g`bc&BMV7ye z`zM!Hx%BcoVc!vaAVc=b!O7k*YWMsxnkn(?EP>=hPCfLls-bfrXh9a^U^#N4ymF!3$mpXxAb;%JSu5&F&-5`V4|5olzO;C ztpY`BBchlL@ap8)LVw@P$K!5x>eZr7fvu6QyOvxtAX z4E0wjv#;@^u!foE_Mu>GUf~HVVvPN9NimmXN9`4dhpF+mW8&|Ashg)s-?{u1t3ydu|)cblgPr;Db0@9$$bg(ju zbmsv#Yl`1xMbU-bi*<=S?U;ws^%caI$j^!K zt@jO&AmS(;MM@FO~As^#U2vnZG4(&^Z5#V&W zzCVJ9dVX`1kqg{DOAIzwaMyD=PY#ep5A>d>)IQ#26H>u3Wg?%=D-A!V#Ax-1vfbLt z^P<9|{_!J1v@Dly9fJgpq4p<}K=VTJ#-E@&5nI z1C{d_9OM6j_2u`$vHm$bAXewV)O`hvWd1;EBZ9$$Zjx$eKy&k4iDc9e1&5qTbzM6+ z2}iHMBIUXo!v`k{fPbmN>-RmF&#`>W0YF>qFFw`6QvZ?sZvU11#^L&%%(|`+a3|$i zlhE*sJ05KXwC_~i}Bjs0x4m+5r11>6B|@*DW?fxH;zJMT7Y z4_r#r`7PjVcQ%i6SB-EpwX7RJmokKE?|@lkpN0tV1kIGBK|MPUKz@(k@3uiwD%hqG z_|r&o;5K~tiEBuSgjt2K;CxHxQJn6~B(Fi~ zUqR>$BBY_z&{_o;GJ-mURd7u6>>Hs~BX}nO@b4y71b|#~22dw~!Bi?-T&!c1$O=5? zn|Dn-;v}()Xe~Oec&51b`t@>WXaS?0?c_QS6M780;<7HW8Tc}miZp;VTP%TFY;c4- zz|(2rBajd`fb2w%H_okZ(qc*UJVP*lP+1CYxo`4ft`Ph3?Vh* z%-;fV9_7LsFE?$L#yvOQS+=NUh@p@Sft-V@A0u!c@qnDbZwas8Ir`xl-^%60*Ht`g zot7mfr!88bPb+qTA?TK@=-_k-IYaQZO|rqzUe(km`xl#Dz={S&4Ddq^8!(XP5XDx& z(`f9xr(g=6m-_9v{=xdgp!14Y9D&;ZwrVOXfzm1><>L~px|+qc&huoWWQ~7KAqf@- zQBBql!!9keI19sQn(KC)Z>r;#-bT;N!pWF>$W8FB;n&B0V3Lz_gq8dj2Q-sDCg=Tc zxt)vS5Uo+o{bylGZ-P<2#!}iQf2@bgP``lZe$OT=^Sjg32w<9uTZugN2hHt%eGcnX z#Y*T{zHfU_UIYd}>jKax+N2aD-%R-4342xtznb4r&---MOkP4oft<8lu@4aKsmigz zxLlUmo7=*rv@vQ?ACIY*N%gFsCQWB*t!G>&G#7{8Vtr<@-LCWAtJo@Zng2VzgU4JT z0^f4eT%j`N&8$?Wx_nQ0Syb4~%_!t$5L3I>4iR_C#<2e$I!Bj?!*=9E1B0{170{=O z7eJVmE~=7!45$xk9pO9ZPE31SN>s(z_B_DG%@xpZ0(d=1e)ffvmyKr=FPr!$;bH=cN-QN043!N4-PJTxt26h}X9PK_NHN*-c~dY_j)VG$U}e+fuywW<%qsSN6J zpFzHA;sL!4#6GFY{lJj2|2~bI_suS76OaewR*nB}zsI`GG3O*`+c(KVY!y6>mjq%!=XB1q~$^nlG#?W`OnM@1KX$KrA_wDS}JN7Az+ z2zjSeVpb);9!|z8dDeR9Y%0kwfp_Q*(5}5Z?`*~=}?$U<`k^Q^`UXqBx^`2yqWb6%X z#=|n@mL$5++#jQuTxu(k17#?F_1nTJWYs1=AZJP1g`VNr-+PJt_Gaq3cwJER&nL@t z&B)~}bOLNfY79sVQNGIyg4t5=S+6a<54zv;dP z#jMtw!^-STpyN8QWT|FYu8n$P*rx^nV z4HfFKWnwr0~hi{ypqmm2tKPAp_#hnE4pte7}`+%(^?TCZGklUA9?8O3#~z+fsP*M$=3 zygx}q@foxeFCpEp16RKwm5v8JI7M@iEQ}bzly^S{$)p_|*%26yt zGWcBCVN=iTA2eJCI|2JQa_b zginylP9iR>?}<@F7o=AVxC(`DM)95yJQ!sYW^ke6ObzYXbUve1P|E&tOR{ek`b;;5 zO+mrbmV%7DHexu<$1<5wRzj8mGr9xg+V2Tfo(d*;FYRj+$aiiMY!04Cw`5@e{4$N| ze-CBb)FCE2{xn2DsGr?BKeqN`#ew}}3*hsFggNu~~!N{a7 z;Wolv-Fn(?1l>>X(9f*l)Ien-j#Ye3;{zi5*gQs!{@>I#)F@++S6pmP-e0_OS|ZnQ zSRiVpJBs37-ihd)jtX^KVvSvW&UwiR#tfmY)RaLkQ%2B;-{Ie>iyawkhGNGoQ(;9Z zJgj*+zFeN~=V?q+c^FPQ+3P?-#&{N()Rx?C*kX!(Q{5}ukw%v8>6%$qzl9xQOgIyL zf0}4ImE3s<3{_Vj^=pK^&O$<(0hZ9fN2HoCoqDW1AyX=)?HEI;r?|rma4DrAm8=QL zzn90sU&U&t#@18m<;(F~%nloBk>xk9S>7nzV>6{tfyzNtIP(zBq}RoX9*yd0snZF& z_Q{|V=3||{e5)5_=UeMwpk%;B^6LyK&)s_4b)5X2n;wL#zYRo^&qhR0CB9&^023tg zH!l^^PeTAEX$Zw#-uMPribYJuvR-8kXNjOva_d5%=20>=M$ZP3J2^mYGo&ARWLt@!C)wdAGk9Gh zp75U*Mhj~Gmd|4Z{?1Ed3B;x1^Xg&T{iKLBUTP=0)!ZQi9YAq7CM)H zD#QV4aS}`Jw!?K`5(}D2w;h&+UTVb_For!U_?YaqS6>&-|LJ<{g>4yx`k-DO0`3V1 zMF3zduVrDUu`UGCGU z*7teGvQjp|V=aMU3my4n#)j@}{K2mp#-&^S*W(M>$uMzc$?bmG%Ia$T3AqIznDmEP zoo3EGq~hd!Z)^6J9C!8`=JK;KlxP`r=1)02W-gfKa>G_?#VpJ1LVxE{QtH1aHeG>q z%^PoSO+HJ-N>M{=YL0qQOpkb#yl~z9w%}yq#)2vMqL}NNI}YU-72D9;lel%7yOo`% z24$0IlX)21H)KS`;>Qh%ZDAMA6UFC$AA+lie*jl^z@=42!eL`Q4<=jU^yd#bV(E>K zcfoodObdSx0+5X&X9jr_&b8bD7HBBW@R$Et?f>&Q4Y7@bf7V~f`aj+Z)_wo4!|}iV zMsOx0VDLBn`&5QwJ1*k?e!%~5FjD0m0d?o_3yBfaEk1u7JVbnZb<+QjtC8q`eUQoR zPxLaw)@$&`w^y5)$u}+_G~5uxdW#PndylU#+9a;PYUc`Ig8%1zK-M$#+NKX%0buf> zKJZ);{`bd5$7E;W!XwK|!MH5KH@h`PzGuk_lde3V>7oJXPTJ-A#V`MXBfM}~=-pGx zl9*$VK5=N;E`YaSASPD|sVmxfoDA|QrdoNefLe%i6_dUMdf>=g28q{#K?qcM*(gE* z1QzzU3o|F~z|YjJvq=`!G-L-q@lfl`kV-iip8Di}wI;V21)PWNz>szhOg?V^$km-^ zil@>~3BjGLTnUVbxw3CD5akfO7eO@G>p@ck+&c*%SW@ut1`stTO0sb6bnMHRoNq6k z6C`!5@(detsx0kagH!^K$Pw^YJ~n952s~3MF1rJOq7?{bEJKpZAU6C2OXvxm{g@H_ zu>O%E1=3Lfy3Dya<|Ap_*2lj&-^*3-PlQbY@$$N@8mPAbc)6Doa0I9qC~e=G#EI^= zz&(X6KR{JYSp1l15w--1CX$_qfGy}K*ntAp=>T%`yFd^i*FIJg-1q~N-3A8@@>{eq7;pagz ziH#bcPh+Tos50EV?iH3X&jF3%bhbl(L0Y3{Ax>)>h2C zQIz)WDSwCmi5o+NGaHz-o8QP&i%2AEy<;Jw)0PQX^m6gFEvDEv+MBU5aAG-6VP5je z*YBMeQV5UZK^G(07vbz{jtY-<5R+{q?yK>Jz3bC@&;&ro^M z28CYhc`c{III+Bc13#be>~?mk<%z9TM6r4ym8eex>wuJRvbAa9fC(?z@;bcJKB%+)!2m3WhdGxJK>fof zK@(wo5w3{$Ow=aX;VjL_j~j>csq&cKpQ;Y1d6c|`Ih|dU35}Jw{t(E51q9t}myitI zN&t#;3q8iSfgO!-F3Pwyo($t`VSB-SA$!LFTl9L1cA%puHO`0xV}t=U+fFp~dLoNg zW7|z-S5GoqQY+&rWq&tP&C&{ulf6s7u!i+paur;e^w6u<+%s8Oi^ESSVcrK_O;?=Jq^wRl3;PE9i6tk=|s| z%l5i!-_FznWt&L>T1j*l@ealYzD=RXR#8OsSOxKs<^21X${E@eKlg{}pMml( z^ieFy7wYOok0~^*el-)z=i z1VCxdxY|^eS-5l4XXZ9QweV`=%|g?;i`N54)z-;t?|rTZ>!%Sl6c*P)s;s5j+GHLU z-QXLQ$JROI5oX13_d<34{t7U2RjU&^N?Lwz#ZGySIQZso^KD`=-?lgdsW z%B>X2P(n}*6Hgx9>e~7Um<~Ng-%DdvM3^y4smc>$gl)E=h>R2@@?Rw@s6STuJ7s_p z8n`n^801sM0PAv&SLosz!vSh*7zTYDUV`er2OGV&5;qE& z0pildaGm?I1W!2~8hY1GanvY8k{kngwV9eth)5gcU*qoaUf_%mYBkeYN`40~k-F5-yZ5qjxje8`l%b@k=3G%*?;;{JAFF*=T0u9Wq2d;sg zo&#iol`EpbAki;ctB|VIY-ZX0dxxG>w-)B&#OLssiDk~_f%Fp^Myo6%qFyCr3DTayjk(~%5|<5K;8A7jrh{O>)l z0&MHnAU0zTNNVtyYxTK0p86R%qW(tzyu;vl39-~41p_h1veaYZ7Qca{!toSs*`3i# zl9PF(!l`P*K|>Ctu~eA9niTnxszBq=h<{M3K<9wjTl8tFpekjo3{(TpK^5hzru?I0 z8j2uQA|Tx|AuMYERkr&#(6!2SYh;LJQeogub$=!6UBos6RIc4f3*REc_uvvO%sCAi z!zwHV+4kd+ojBR8#!MKeY*?~GqO-oo;Ds|!IT4pc+1OgDp~!L_)eh~h>WX{?tWrIw zdAvs8Lqo3Z_xnl;+vpX~-ZQKweae^m=dsEJJhUAC-WR*7hM!#f8eJ^E>Oi9ry9M^8 zL+MS45aicQL9G`sAwcX7cpPhiyQO-=2(1?sB>d#}Qq-eV@3j}~3hE*V;ih@TqGt@Y;Qta^hgbZxL4r(l4eFYl5 zh^?^HrUMrCQsg~kb<*oCbVWxSX6!*lwg=B%>5GYQNe=b%nhO$=zc&5Jlr-OOyKCnp z2=Yo~WasP8w#Ol{ayR5*PvYUFWZ<_mTNIWKUKD#-6K>q_i<3cEX4hhQZ-Va~UrYeL z94u%%9qGMtoTM6U)-ia5J~>{+jf?_ouCjAcUVMp$%eJb4@< zszc}J6GqTVTrbEJ>p&j?owpSNW#$k_*gf=0g*l0C6YQCIJ-cy>24g5aKMN3MTqlis zJMe!?V-Xt{!8qRxO7#jcMGli|Xe_=$?Tw#_{~>8|`U66Hs=ukiNS{C+J7B9CeA;|b z=0(Zsk_T+Acf+6gwKD5Ij`Mw}H53_f!yZi6el}kG!-Z82j%q zB;$!7C@T7as5nSU8`L^=^g-ZJo`#7Cm=%jj+&51?PXZ>rZ{JA97*Bp3k$LMzcVxSg z8MX=eV|lstVg#?=X7T+pKNS5k>kG3$MWI&9OhG)itKIQ_@B%T&@MtVKL(}%*q{+Es z56|g-4gtLvtF6{QYaAIM^S}a?GG0*y4$AnSmn8 zsca2Un*KnG6&nID*)y3#d$Z%@A7)Bw!My;Js3Q=O#b-M=BtDb5Uy%EOBlj=BaT3`^ z@3oq*@_o-+s{Ij^mHe!l_{UNS*Mq?K;fbhz5c7AKt`@Vt+Ac)>YWRNG6bx+9pyXlW#RU=JXP3yVaW?p?YduU?T{=PKX5!9H1BBqGm|6 z?omNH$guv;jM>E|SWREwo~$CeHCzAK@H`=ADtl4yH=5Viimg?yFJeFA8Vd4he*kG` z1Kppo3v8nh=#<8~ak;D+Dv23q%2P!nN^;ixCwUf`eMxyH4bpNwVt?NC2e0Z6cm;VG zKQdQx{Z%AGfH6Kv)or(_*y?myv}N^Ru}$+)tOmX`*KP)OwV#MnI+? zcDY;AW$&Ucmc!V2N#kV-AX`CXOo7Bv2P!pax03FdWDx^RPUVWA3bQ|%b@Jm2zDxWV z7|>4afKKt$hSzaH7})UO^3fmAjii^;Z45Q=6&b;L23!V~zwZ!1#Kx2YH17C%FE+a` zf;`#m3<&ReH@od=>D1Z0o(?Zy?3Uy;s%O$2S7`vZXEt2i~H`E1;&y4=9bGtg{YY(NNyJ|0vAQ zXq(e`r4N8$5PloKGRxYbD7aNv?`Qsg|4NL^(eVI z4O%a7H97Kp$`)5#2Y~eX#}ZHA>=P-&?hAs~H!KQH1~+IgPg`aZ>0qqw=Ev@bk{GId z2BA^kw7Ho928iyjd0Ih`aM~Jp{%mYb?(A{Z3edwU5FYqs{?K8Rj=2V!Cg2FqS+k`S zn~Tyuh;VUsxe;I)1IYThuIUtMBv&ft07S+CypM&4;Bbb^c^KS3G`S5J$}WG?SdBNt z6p_=bqCN9E#BcpkYIgz%9k09&Ot06bQnY+9!7D%wSXbOtHlm{#3v0UJfS+r~Seaq6 zTz^lB477dB09u3JDBntd5Y&GD!Up)%A!k9`%9H8C`Pc0VFHlRy{zd{NM!ka&6#apm z+(*<0TI*mg+EvVVF{B;48b&!rOVR{wYCmS3teQf!Q8AREmmdoXgw9MKgO05dso>Nu zhUe+E5r)$e)PI$v9}%~8OzjU9VRHI>+{!`7tL_)EwIIE8A)kIc~P)=Q|WOS=#ym#-C8TixU zkfd)3UQxAshTR^bhG*Zn{m_a|dqx>uRQ$rgcSmC}@3&D#N%L;#Y$t?VJ(!SrL~=PU zHt+RuSXiFa7^Lsw;-bQ+?fH0VJ(BW}F<$@np7n4jODoPOd(xGhat1AB9^pE)dS*IX zDY&?Pq)XP}qn*3XO3blpA1YUF})q(prl8HYEd zq}gika8D~8c+j9l?wFz~u${rn!L0R4lqXt6Dh98!WK2-?PsM-Remi;ZqKdnrh$IbF zJe!IW(`*`vKQZ^`?_w9nuvl8U+J$s&dgNH|5a-x5oo)OG;aiN>CXT$)JRCXj;~EqX zQ9~aKR#)S!m0-G5QoVVa^sMuO!4&BtxX;m3}84%%Bro7oQ zZfX3+8$*dDN<#)27yWCGiD4+|SP|-bji?>v@JvQXBPB{U#ZC|l3KVUoVcvaDajMz^ z6AI$6cFU1K6z9V_s_?&CZNBhVy_~&Ryn%LPvJi25JoM;P+#}+}_qyCE6?#k&GWhFP zCh$^R_+gzDX5QuAg;U?{zuw#i{#%dx1Bb<8U`lVBDXU>jY4v&G5rmijfYI#3&hwL8 ztO~={^p{f9w?b-Bu7w&G9zilC2dk6I|zcy3Pb)DVm6lpeGgM}#H6P>^TT`1#W1 zD>_UrdT{GhWpfKL3omKeGdAO`Ra>LfZ+1$k7~M}*(&Clo=4Rc!ijzk_Mo-yb>LW?^ zetMikdxs%)`(6RwnfuoIFSwwxO5Hi(JDck?$HE|Mdx4C*)KHO8IWysn_iOXGOx%9d0)p z-3`o)igu)$y$8L>!bhj4dBR#a&%brbHc<*M59G7CNV^5w3Y{e5E~AeWq0TIX&Z9XL zjZWhUGq=z?lLo!wBR}z<^Yx$a94;BN%S^tP)NPv{aoC;485LZb?wd(W?{I3q<3o@; zcKExXEBU78@raJcBNc-9?a%k~EA6H`A)uE0#X0?9NG|<-JVWIcSrJj;OK8We&EZPu z2Vl^iecw&Q+Yy9`78)ds_LzIT&Hr~sx{*^Cb=%bj?n1ER3VN8rw{_4oxk(bs2tLXq zhv%xFJ}xmSg!|(&y@MkpW!p3Fo$Y!NSQ@7Kq($Kiu|l77j~w;~KD~Oag;`rB_C8~6 zdE2jrM{HbjXI&kV`LWAdJCpJEj=Jsmw)oKpZe&e_cjib1GU;*Z<{GY#R==VzF-cX; zXM*b31lrgjUIgola2Teyfd!*W)i%N|)s#fXXpii7DXe)`sb<$#N4w~s@kcSp>zQ*E z2JV!|+|i(}<6v(-tCxeq4-C{$Bre;z?GKSwR9Fzy{lIe)1xj~Ai@*0JfRVxB%a5ni z>ln+CwO)nKy>^>bUAPKLB?y$Jop@zc(;XT*5W_xxSc=>hjwXQh#!=$x;N@M7HyrKo z?kq3Le^n`Zbv@^Zw2Ii^27#hIpJpmMVU{HNT@S26huwf(CWd*)pRa#O`^~dBCYRgL z%TH>qz8pFWzmujAxSH?HY{#t$bwXGYe@Ibtte}H**upCME9YwVo?6#z_x@K1=N_i* z{kJ{=$5c=c`V8b-wnW2`mBT+n=aiCgA8C&%`W1GQs^V#)NEP2J2lL-k6`hdgtc+#! zVd0-cZ#fs7kY;qs?wEZ8u)cZ#I% zMEUv`BMK}kyC$RV7q&94=RvsF+R(l%MIxwe$#~qytmIo;v-;Knk&-(7X5u69d()%y zh$1#7tu~_ra&ELGef*`i+o;>5r&K~BO-h`w$>-oOx>pN7R0I~`;NiC;+G2iR4J4}|JNKn`QfL!8Y5`~RNFq(S8hT5+X`!_VD;z)}7NXcWt z`;zY)qI(ZkWf@mN7>*q}QF*bGL*x z^!r9`@;;#mjrZRBK0Kv&?fRK-QyuT5hXZG?CCk{K2sVYhhXw8Jnrxpy`O2MSwvzka z#Tgko=XwXOqZCvzL zu^jn|Ran4&KjPZp&qbyKYF?{D;kV|eL!?|jxvvQ))-7}p0D_)BkiWA&Q$xJ93#oHg z@b-(!L3xiUYmoScw(#QT<{H<0KK&19IPz8=dC6Nd4cj-jb$N>kU+cE{dq6n_U9ix} zlOG8$OUZN<#neCb|0)4DZL|NM5yOE~e zA&jv>TH?GOkE#UUKPj>*U-Bfle%08;|3@4h{KLD z_SryxupB!sjAj)T%8#wraYcCgi5LBB8YaHyo*mJ_+Rs(?Yb5QvDcC+DNqTY?1sb=H zqV{`*X2U)U4H_bb#M@~>I3JPeKnROS*gcX?%RR>kW)!deRpF6?yO+0>DowxpO(5Ou zOBA5|+Kz&ADDU2POKi0_dlo4faiL%6*THYx|2d}JSs|?NrVNkU zL*-QtF36LHV#ef8GSe2A)?yUxeki>s)F{wb>uHR4ItrQ?3K!C+j$f*jr(X)|A?ezh zpeXWj__1d>gS1z@%oF;{hr`chQ+dvE&a~5GK4Id9>|OQa0j23PsvcoKlvmEZ^IJ`u zw{v3QvN4aFMjm(SlWn{Xzu?kMxb)whuM6ysby!43Yoc18wp@d!+q;{tq2&kmiR)1C5@#!7Xfz;!wQ& zRZ~uO;VHIo^YAiaX$2qv&|?NELYAXJ=%f`7Kq8PK7K40byqU-54ECx*15uni2jzjuTcwxBv*gya9v&sa=O9+~;4X=FmdnP0f~>zMn2#z6)3aFB+HMZq?UV z5a0t|w1W`H^Qq6i&7?2n=m58JWfD&5){cNW2li3;jAGhi-bawSMpHV#(Yy<4h6$N} zzW^pukccz)ftO7KAyNIfyC>!WRA`U;83o7_f@+ZuY4LO9F$Uo&i~uy_&vp~rsWgC? zV9VrvT}^E=|yQ&YM(=}HG*Ki zZFhT5PZXPh6iaHX&_2Ka{2Sf;L{6XifXCIK2{3Y+{V;2TYwB5L&eDy>QvE8puqLg) zm*$iy_cYs{euftu(S5Y;0v==Tw=c~dj9;&Tn?!SB&HlAq%A}UWNM%@O(IX^sZw&Y? z0KfE=YQBT=8eMHq@wC!x?oMOgjb`EFZg6%oGk#qx}pFp%fP=@C^ zfWg^l%4NTK71#mZxyHt^U$T((L!uJTf%mlR5R^=^2_p#@iU@3>GX)if^)AMmuEMV& z;gs4to&85Tq-npcD@5mtf4^A|p^fE#go*n(Rnbfkut2YudLPJI8Mm)K!AYJol&M93 z_u?H56m3j-Ikr1PT`Cr+k;}@cN6GeP7yn*!_qno^ld3tTZz+3*V3$B%RoYP62bc1T zL|&6P8#P##ZTBVU^<2|=XpGbGEBgtzBBygzd|m>{n#yrVT`V0gTkq7fC#-ydGG-=L zo}K+Q=&dAp)IEY_Qc>IojE`rK@Ts=Glp*^+HgnVVls`gJ$mx9&7nfkQmmcWuIhf9q z`12$iQt~wof#s2BB(hEHta;aD-K3#yk$f12q}bHz2gBd}Dtf1y!sDY`bSRniBv`u1 zh^tO{@dPp2c|+ZczyNM7 z{vuBAVGIGK^KrJ|J|zu#7mXp=&ZS2_xpZ!+BT2@TOwi zZ{ERsiOn--4ufCCN_#)Htp$;XQ&ylzu78<(f|O)M%L&{&NfH+|z>@N7H+458t~*(*D*|`YWY- z1RtA6#*pxd($RTDWW7(6UAO-opjQcvP~@3LUGYx7nho6OZRj4IbL56v{RSc9@>2rL zLziUbB(O6f0n7ORFdL$c!FWpJ2LrPgCVUL{6dWxzmRc-U6cS(Z3TvyXn zpb{`Eg~p>7&`|`1C!2^qQ5C61wvfnEEGc1=GK#lZ{V2}T^=&OJ_1%W8;@NmahdBfJ zfJ|hVRfulpH=7#`<>4-{O3ZDROB^l^hpJD-`vqpAM*955quEES#DCdB@V-KOu9fZA zD`k5j|E>_w6T_VCb9fal?|7556?8C+tbdN)dA9{A`$4Hu-b8fYtIXO7-SV z{v-!F#cL+$n5hTQEgrUX7O4p$+ZdpV%dp7_OGa1+q*p@2?$`M_a5t|+u_EPY^rU@` z>tGBKTaym+;HS6HicI|3g(lqehD8=8_aY$@NA+E?l9EfVFP1x#-21mIiF(Rs_SZJu z#!n!2aWN+P^e)y~vy8S+IG{<&qmSZ}KMCp{6I8n9h7EaXJmULSQer9Y6R`HyZohKP zFr|dt{eiDT{2Z12o~ZHmCBA1+S={J$q(&oiW<(Kb*2exb9OzN9h(-p3 zx7u4xu~cEM(kd#_a~7=Yes#f7b~pDPHP~yFXEAaKR;n<2Uf3SFk6Rm9q5WC(>A?=| z?ZdQZy7|A^eKUA+GGNXMHhAA*_6%Y%5qOl++XZH(=PTUG&RY!6x}qmGPS7xRm8%mY z$J0R8!b%NxFOfx&TKDIGKn86i`#tz_u9vP;i&_=mkz&Z{fai-76I7?+JOP8y3_P%s zInDX0w$sFjwKW8kh{m3p23{_QJr?N2Q;l{$XCp08OJrUJ(#3cd*E9vjn`|?!$WD~R z8Plc5oy4B^DNB?N)f_0*-))&wC+ko&C&l&>=Zk8HMnCl{sF1;8c$z(+L;w7242k;v z`1#d(WKLcGar~IjJ2#J1?3yOuiClt46D`fYIVSQH2J!!RlIwY@Q}YZdARRqeqyc zeC?!?h@$_yN@on)d6(C7gz%(rvqxWBsz@f`{qgts zSxNXp&BS+HQG;%HA*M1N+@Za3-_)o#k{Fqg4aehj;?aXk*F{tx@L}{-xr{^g2t72!iYW9Y;BGPAy4lqEG0dH1F2Y#q6FPZ$8p~R+@}z_RmQOkN8#DDTT-E`i1UG zjPSz~VZ=aA_qPJ;=s5!9Mnje#n0xr#_7hrJGaUcp6t}L7ju(-W=+V6ps*P9g z)lvw2b#d1tP0Y?@1iZ!?iE5g@B!OL-F3)hy|DM-v`_Zt$F(n zak>d|7hA`xuduim;Heif`IV5`a`iJIiB+weG_1E6Rk*#1f!pu7=Y0tRmHZhYap(4T zBW84;e8>)E-poGwc%i5A5sR!hGd=DpK1Bf}({pZ={PRZ&ol-2W%|FAe5fxDTO8gUJ zD`T~hc%&W+n~B!;h_wkF#GQTADqd@_YqrXZkwK{+f<{!H_W9XP$)9UI@-3;u?(-wZ z!t6ie-97LjMyT{E@mN|eL(8g|7@x3UbQJj!(Kdn~L|*8pDs!WW&bYIuY02)<1vJ}nx=CN?hCv>N7mFos!gcgG8LV4%U?uY(!XT729wLY*=_rs=&#`o;RR`%aGRjK~oAdJt$ z=`P?doWCV68GsdvX_HU1%|(@(hk+smf95pkDN)79qrS6{uW@?ZK$36v<3YS?Qt#9@ zj3%w_|+TPXD}6th_Y2uL<&Qt zbN?gTh{cX1;lOv%Nokx`5#$ro5Tu|Fy*h2XaXa`j1ERAt2$PyjXq#3;^4hT!<>={Q zTAD^Nq;!cZDITw|&>l@2Jh8i}WROcIrhkYb$PBeISgDIQe5&-`1$F%!=tnH2eXG!i z-Bl7h0O^v%?814>7v3*zuJRVizVtLAfiWy~Zr#;@5*K~-7>TEF4k6AH-9wL6an??s_5k zIBIb(mOM#Q8AmZft1sG1WgHNY6@EzLP%@eB+I%1nJMMn|;0>R`BLd+>{xvp5d-*mC zXVbHt@$m3&nKm$#%e-7U{M<+22=?XBZ-bwi<_U$BRHeTM6TWMD73rxwnGewgsSr9a2 zuZ4Nez|zNLi$~X(JLK;4_+VOhb{qa0G9`jREUv`DecVaQF~c0M`ZPid2Vd&<4-)8* znrjZV-;#vQ65hJ6-fCXgB@dZ#s$EHT2Zg?-(Waak6i%T+&u43?iV-3Emn--K zlm$$x_g0bACq4Xs z29h26hZ6ZSnDB!_ulY?hKyCDCcZm4h@kc?O*HDZ*9Gt35xS z3k`mS_bUPUo%JS}Rh!E9bYumn>T7^#jhJTqb4Z{dZ;IiQad0QFr&|i+GZpRWT4vON z9axv2wF}cPfLHyJ<&8UN4i-QZwymlH0P1DHiGYyw36dKL%=dg6*eZNkjsa}v-_A=^ z308omMPAcc0)KIm8qGzbA-nn}Qw#MnF6W&oo> z{Qz36&QukFhn}^vH_M#)x?wf;Bz1#_J4>O#74#odf!slt!k^vMgbxEejIwH?4zX{> zi~`MNX5OGxUvk2%vZ^dd*4u!?5$scee(ScSd5~*4ixCy?G?$zHwRorP-($>k0gl zUzjuD^#`$Xj9s(i?(A@?s8KXR9MI~v_&p`DuRM{ly!CtroD7fnwMc`dbZ0>L?HYOp znODq#x@Q@FqbwVqb-stA?pFLyNpLamb%wH*Hw1AmbASZ{ut-o_caaA?&BGBsYzKtm z=7lB`zoH-5g#!Jp?Hh%QeY67%C&J^=UwY5XwqS_*iLv}E5g_5Q3gTNL%OQ|4&peeu zZI1P41ZEp$={hy>vl?sk5Kd(^)3nN1HB<;)puA-j=veFjNZ8l&6+T3$f9~S5pA#ZZ zzkQ>BE=H}t^#?H#4)y}!K+<6b843!YoAig{UqPe42!I!MRGmbQC2Kc^?WtUr7=f*S zrj}mxjn%WS;mQ|BO!3681(4I~9-uDx|MvCDNB+H{h)U^-=N%mE={coMCTC^nH}Z^x zOT34nb;4q+Bw&Z@rEviVoKS%qWum%;<2Mg6BP z(Gnk)+|FGI)-}#)Z6TgC#zau?R*V7-ISy9Y%o7CWsT%zXhyti0CA}z{ZiPfoR^3iQJp=o2{S;+;Ju-|T2=vNroEP*=?#gPE#4 z+0z8~liVQpeY8LP%xt>;nSl{)@MABsoj~BN!9AhqhwMPl6AH_sF8c4Vz~B8K)Y=&)jeOKZQONqRV(=1zsvjQ zsWegUn=$Nxe!-I#z`9vP zC}HF>>57ra?3F=HnRSXDtuYf!T2T1K7DXpT)F!?>#9q_Qqms+x)MJ9|YVdy(7|8y^ z)NHVg(raoGi>iO1yTMT-d&%$>?1j|whpJ%<_I%w*e zLWvCL9TpK1y)IQ1< zqHF3GTHxdJv3D+NH*{*QHaQ~+q$2C6qdYVpvdl*2tuHxJS@Oudj5o5{-w7_kqpmsq;_fsF*6QGaB}m>MmK zS-M6Rd{TDgQSvHKpC#X^!9i3@W@vWC`I}X83jo{k&=EaQ$pp0X4r~LB^S}_ zxZC2ie2KRzZB7WhV>4SPvv?u9P&cJKi-ciG-SI{mIDL8p8N2A zNk#i-oGA;`AD5`+iQ*pn&Gy;x6H1WaTa@pm+9W2938oYg>5Jw_2n!i$m+Q9j zm=BbqHS*ds8t3? z{)lq6YJLv;jo%kP7IX3AuxC&d5fHNjgCl7% zvJFPjeN;NS#c-^+6ASeurBf<3aBiD@z>jO7yhVYi63N`AsQ!_LM&!Y9B3QkY zdk?I$>x*S)<(&^z9!+EyEbLaI>XP5gg=I%@8mVs*$8l#zHReH$57i$crb*OB&_@lJ z+G76Z?~bl4X>PP4TmRPGY6Bs=v!vesXK?n&-I*?Q@=*LoSBn1PnB^|qv)A0Kl_vR(j7eHv8QcHSkHiAEj1ty{HBR@_YY7J4FE7;r338Ci zWKumiUbqnPJU;9YttV|`-mJf#u?68$bGV+amTu}*JN^uBt&ni+YndD3;wN+AwfXW3}&?MoZ+afqSIYF87Z%Yy!@ z{hdocxKKc4<@+Jh@`f4-{c&mn`x=DNn);ztQ3CnDF00;fB(4dj?Klyr@uoGYihfNH zJ)RYPUPmlv8vWr`@m#k29{W-kH6@dau|huFJ-?O~+ha3I-Z0e2tv!n7k-4?%0mY5J zwfI&A`-j^p)vI2q6Dd#9w{ypFa?&1F`OKhp8mPe|l|Quj(N9@#r`cqaBLZ@UNE!z*Sgu>+?;bb3 zPPF+&78#*t-e*&kh8a?4Ay{hP8&R@9OwQ7_p*G1)DYAf3Y{75gOfyO|bXkf-RkrQP!U>Vp}FY+{D%Uj_c#V**&XocV8VGnNV#!bP^4)VVi zHINvWct&wdbe+LL-qfYPjLXydnh_PzejoZA?8_|!Sg#o$J<9RQ?NwEg)e;bIt5e zUn&E)TZvG6~DX2pIxZZb*9V+zD_{8@<)X!@5(BmtptYnc-hHw597jsM22-4=O8Df-vckd zmr*FR=z}q8fBQ@@H-|>|wzdt;umV;C5OeJrM0u|9WJU+CKQAQU=B1h% zk?iQ(L;hCpy)_8zszk~TAC3!&w=#*b@>kYDsFDLvV?0w6k@*KRKIyakx0*4PzAcJx zPk61Sd!wbkWmGgf3-kG{Jgr%vf29b35!Y$XgMF?odIFcZKbRu# zKats4+n;F~f4j`3O!fnBn0w?oH9w`%pB8~N0tw|CdTtKePyccB?vwfh>x<6!RNwu8 z+e`Nz(Rnf9K}kE#QdqSs~(FqvRu-lRhw;t!4_+{9Ns+|r~E0PS+pS@T;>-%Nh) zlf8)N{7S0#45Ook*KDNpzKy0V`FTnz&DgP)=tCL4H1lDkCo=Y5Y8G%xv4+W=X2NSu z_-F@-A|1om;%h9M9@YRRai|c(D*ch_ukYUb-OPH35Th`EPD~r2_WDcUezot)AnP9? z6Nqa6CY&Z9sek3Ef2(bOzS#ez(}89C|6NM^m+&@^e;v6FOb$Kh`?546@W+onG% z;8VgsVq};R7;m`_c2Bx^>vI?1Vk=HX&@;Ivpj?k0|xYK zFvcVE1SIdiR0;#eXA%rR#dQ2q;S1sbpf+b9m?RjK$=*wyCTPG_g#grxIBJ~l6OhAb zQGcaGdq7zY=f+px2BOh!Fl7EIW(a#p&>I`+PsBfd0GTEd*nZT&8~pH^tG58*7g^Q) zb!ZR-{TldE+;8oOV^RFryPLLjk?l;9dnaoFryM850pk(FIleu($FA1O~moym2_V_~=Ib8W_IN1H`|P&19gNB%7

e0(0!8AyJ?up|jJvNmfve~~p=)ZRPL$R}5n zlj0$Hi@_GP`Z@C)P;l^mCpg0eHzv{ETJvZx>}k`JcVl$8WT#cRhKGmjV6+ubmEDTk zGdWAad^Tnh`zMau0H#6ol*0qD6tMfVJB`Lq@ajLy`ML4uA#X{puFW_aSKeR z^9gY=5Rnt|+*Daxi@f%!hyBw{$O2=!-4~+VB{yzy9fFg%rI@`=E1cL#7xyvSlBq_x zpl3rrdZQX;Gl=2;R2iysEP2V(xYL;naGq=?uy|`P%RZ{^FRMo5X4_iH;0MP1WERL} z#psxLur>DVGgQ1l?HFwy$&9U@ksfpEeAVK=X$>at$CUq>$HxfZ@>VyzTfqv ztKW9;@|1jU=HqaT8?jgc*45D6^gajf ze*-4$2!e8?_vkDrkqr zoo7mU8ta}LFn%(vOS?n=WTW%BvbXE>3{e4!v7*=90VV97=DIW1S`AQ8uRQ=9u+ z3*Xl6sjAeifvP01Lem_pV-9*bJ?k<19WY^?vC7}b)Vgx}R zH_2Pkm;8%H2ib63ou$C+hdhpa{*~WPwjD@6-8wZMbLC9-sisxuti8J(j_;?VJ+LrZ z$`qE}g^y2pkH`O(4(`yXoYylHJNxN7QvS$DJ(QC@ou#@VBmcsLRXsCF$P?IY z{k%^Pu71gEq~(FgebAg3S13v}mc{>=81lTf_lfXhur2!MkySLEC*6vL?DuqVe*oKa zkLcA!5mmPe@X9ztHod-yipZ#ig22|>UXn-p4-qCy#A#fHS=6hmQNLER+NFczvRo~z zVNU9rlzkqXa$_MHE*rZxhA_!hqK12-`0DeEZK64ja%JxyZDoFX6J5x6iJ>V@?JJp$ zXZ5;X&HJ{j#zU$X(48_G6|N^fda_;GkWu`G+5uoLW?v~BR> z(^}Wb^Z8v2kx+kA^&HLGF7Bl8&jWqilUlKBP)GVw}9Pf?S7# zK*(cez^Y3-M?MsK3r}9x)%3gJKT=Iol>dR}^RY1GML%zyE^2>rL{so^KLE0CU`~L1 zf{+gEprxH5>AbAY;AE6F{vzKUt1o{y!l=K-R75^-Z;2eZb3@!=Zf*)4`fVJ z4d7N?r|*-b#h^xLRs-&^@O#f2y$cY474*qNh#`;S#*7iw2A&O#<650F^AbSCn<^a@ zm47IJ6J5!{^Bg(tMC;9G#7H&1R?M5k?_|7kn-KcC+T}2x>Jh!ogOB}u_N-9d1Wu#X zv1*i4EHQWS@B{NX(}4Vp34<|+g}oa?S3bg|Tg(DEXwkHuY5b60fsWDVwHyD(g0;*{ zDz^J6?Y(8z+r@`?_$n|R>+Tvs-NpmIkjF8Entlr^qEI7W$!TkB)z~-rm2p8~R2h4m zSu|qE*j}tG2?V^$madHZ`Z73<$$*q2tu5_RE$%W3^d-FUAjeTfc3W?pZ1||>CU!ZO znG~^SfL6cSa`fv8F3CEHDOt6dhq2`g+KP(Zd4-D+G)UFOkklyM;WqEmd+V(t-Dxu8 zZWd$oJDkai+;o)L2-b)?!9x~1Nz&%iD&FUQYnYA6-_1!Yi*vY4*-IY|WIxW`&n&P( zv%*IEJMW0W;>33QNPYXp!FdkccU8PniKoXJB7#NynUpabE;=vEO!gApZkdT1iikks zbO)1sDL#yQLG2k2zzfnW__m1Ju#z6?_sLq9`Z!w8LaJwH$7 zBEPp>Tw2kic$VHDIW2GK;ATsRB6*nZXiINM$;6UPbQaaCD5A2;BjcyeN&XTsD5kG#@^K-Z4gv%9TjB zqh(+vyJg15{hCFbY}-5(FM@kt8w|~cTg`D9CH17w#=UAY-v$eCr~pSI#RKx%X3y2x zP-V8^&1C<{v9BL*sFnw9R~FeI>6f+x*+j75_ohkjzb1U4W3jAa+Mk42ThV#Y%`Ple z-U;9&WAfm3UDR|guYB6u+>HcbI`1cM4jv^Vp2zBNQi!0-S(gRqsNdr@R(`{Mosnj) z)0-^-b?a=BHwX)Q+8F3=7;M@t@50?&&x0th8XvoxD)O(#>hSjcjrT% zrR%%IKu%a5cgL%e>1Ac2@PH5|1wS%GDyO@UF=n@={8Cp}hB4`%F*|)}h4YCfDH7b% z_Ear;7N>@Uy2)NMs=daOP;tf=ItCH2KUAMWX(ucs#wf#@mYGhFSJ<`+zEV_G^t&Ca z+$EUZdmX0waRcmCVoDBYo4xDW5x_qG`1Kuqs>{^xu=yy|kgDH3eGK~#sZr|E{7y~Y zH(bo4R(MRe7Mv=&`EKyFKGo6F{bHbEr;Don%DLIdHON^wJEjDrj>-FIVoLa%5}tEX z^D@hmp|TePh??t{e<=jr%FkQv+v5MuRZ@V);XcG8=48eW*)^N)M5y%h{Q%1BRm$WK z;m=TXrK||KY`7_J$yViFj-3E&yK0=A0FVeD36_xUr6yWdd2W9|49h9O7D6LG70sEC zIm(=Y@(8l<_W|~lzHHk09kM1h+?;^Hf%I6dp1qPb`UTvR7f z9fpW}DwJkr@gb2!Pil6hKZeDkcS_V>1WJN_%j90Z{Zn>nga|SaCt%+MsXcp1`SM+} z!4P`_n|LJZb=*ON|GlpcFC9<6y)*oB_eAbfU1v#;5H;tPid!~+?Oe;{I`;GSX6tQ%W zBT@`p$(MhA%iWj703X2o;ft;D3@`;CZ2CRBz<<63m?io5!EGjk7Ws16%jJPcJwRZ5 z=J?e~^w%-uWM3YIVIyYsL~de^{@-W7wKN9~jC-odVDeSaKqRP3Y2#h|(8);Jlur+y zW(;D1=J+(Ig$1^N0SJTU0>{NK@GNn~7>R!DG&Em&wy&C<$m%}|h#7+%&>sM$Wxyb{ z)=3C3X!HOllFQL8%mx>6ILiGvS$Mw8sd%B&uToj%=1cicjiAsV5h2*(I{?WO;!31K zjGx{%1Hv ziiexerC5Ax(E}Ebne3)bGE{|d-exeK28A_3W2p z_>UBtEm@%7eR@xiarRVjO^r%NCKOLi`$XdyJp?wXtIoF{Md_PH$JZHx`q`)-9h++? zud{kQ1OooTK&c3Ldtx?UfL;Jw4{&F}dRFgkR-|6?*YZ;R<~OD1GI-;p(f=|SwN`UM zB1xSx56-)Y|G}EnYu{${rSbQ!#ffeVYv3HrBswGlY{MO8d9rh@`KzGjhs$6}FXiR) zmbts3*bB6h&NU^L9bO0L&0fPx*6^ueC2?!wM+E~2NY+%i%FIWnw z7|@}=M}1cu76Ha5*b65d#+^RkuRHmJm6#}FhKj34-_}RTf_EWLz`^GU2NBpfq?%XT zqDUqwW6wdf=rg^|WT2L)_M+eU8m?@G}r=X~2ByV&ME&M#&cH+_0lf-fB z`HCU=?mC8)vLT>>($IG^aTeSPGYDns6etT#V_Id&DYP!z!o;;`hw^Q)n!=M~)(7Gn znF|HfJTig6asX}%=?)RG;bNfgpi~72hn@Smd0ljgEY^L%v*uw5qMVsP{{Y;iOQ6h! zw$;~s7T20fF}s&ZO^7lMp#6HsUr?oDQG0gbk2v zlVs4H)M(#%$HZltrfJ}>eQJOE``n}QmzE59IU#5}Wy_RyTcz0@o1cg{sjsigB^h33 zDlanx9|1h+s6#U8K+nZo^X5?I1cYpdI5_fV0d)dMoUeR=nO7jXzqLyyYi!p<3Ox74*DMXflZQ;P53TPfNr9w zL;9O_cOZB@nbi~Od%57+9u>o!EufYMAB;p zY3iyknmBSZN?$Ui!amZ6?_;*M5dnugZ#Z;A|*q+twj>IiCg0o_f;3(v*mi=o<}_@$hCO)@yq* zd0~Q${PJ5!QuW#3Z^ZWwc}<{7%@hx4CffKl$-+1ikrbQ6Br#t3P#qrl(W=hZ(JpS9 zv#B5pw=svRXpHN!te6eKK2B#N#bqRFj2xq@5;lu-gss-|ppYfaK|)`ehJ0DC{iCT$P zN=1&_rs#YHPXihuLXLP3DD;IWqaVkNaWFn7mQ%(_;W}OuxG`f%DNfXBO##Dgu#e>du5^NswUuk{`oacy#OlG<& zJjB0Y4Vig6a?M?_&!Z3zJ@FYX0f~GaqRWO#OLkm0mFo39rRF+5f}Z1;{_eZNUv68X zcgD=|AI9kE%^U{SwcM)+@{=pa_L%Yw(8{0kQy$tswP{-MxJ>GL&lKPKs>Qt>t^l3U zH+yemHj|gOLr#7@IvfOQ`vX*wv_$pOiJr%Jrr6Lzp9U(Q=<8yPZOkN2{V4hOp*eX~ zs$~qA>9!lyIT>siUmXfyV+i-X7;_VjtgoR4weG|ctDRyVWx1P9*RIa-{ES))FUg!jfJI6b3dy?LVEY5M>=BT~U15^)%l`>=u#l*|J9iY7WN zH+STYO;mN9ss+$QnQa@Mw@zM^LADSmYl&cRmV zMG-&rwPzeokSCi(# zw>e2nugzfz)}PMhA}+TH-5EAU`NpPU9oC4WxyqgMgATWUODTyr`D{Wvl__!=22N{I5j6ew8Ck=(Gi)xcQE;VazDjt6VtWiUG-B z`4YGYeAU{o-ha&-9N>J2m(%+mt6r?9i~EDnFt3WiHt83Gz6s!LTm6+ifJ3{S3(V{o z{DJtCNXsil8hsJ^eO&SR1u5H*lrrGr8S@b3#Kj7l8N+Uus}(qp*xA4Hi24NqGt{JK zJUsWBmZIL`lz!@co}yhW|2eL^oSczAIe4KW48B}5StqOLA^ya?Vjt7HWwYEy)9rZU z9TPW-TC4x;#)YVajjWxmL8H~Z#B>cSb(s%S$P~c&l8K@mcT{ZJf8iuDWyAf~ardGb z$`8P~@@lL5X;M0x)2TSq?ESX+G7>rJkwh#s+et5UkTGd7 zSF3$mUtv&(@v5oOhmN?n=`JI}TKpCyGQ#)FQAKk* zr<=rHrPZ6k8mkp4mCRMw3lo?JM|tJKC9w@|*_mtuUZFD=-I-E+RkaCOw%yUS})P^^S zDTHPPY;B|SRQtFLP3zb#9I+ zR8L6;}>Nt%B*33g8vSzIlunU(@$I zL5(%+4Jxg2!R@Q`71<9$CMxkd-=i9F<>^IJMl)${c?vnz-{H;aU-@ZOqjQGD|jrAPT_=$2!uI|q_ zGd2ObP%=pesT%bzCY@L*lxHVcvg(E0UCg#D?%t-z58vN ztAd?=O!xaj%t`vBruFT^QApNj7P*2pQ7`;xPIzxsXi(}$n=#AOgwSYH&oF8e?hlrt z1w21NMz>pzec^c1)D|uAa8?c+QL%u+#mfk}|92}E4<68d81;PD0V9Zqq3SMM`doD= zJ0guUNXK=)??R?mwL+1u@x+{Cf-N?4pe3QeVna$aBb#PZPLPmyJaXH|bWX6$bbaHa zalQ}(?$Hbz2{(b+oqdWjLW^WZuhuTFk`%SaBL|FU1kniR7`*a$_z9LH>fsPo1gyD8B+0nDtQc6(A9_TM= zWss>8-k9*?I+)<*MrxuMhrf0-m)b16#fTMur5BX#qZ~4^a~c1S?=D}og6)f|h}r}s z_C53wasL;Q6R{)=E?}5Cx^_2=t&vgT`f3Mb-a-zq-pM3{!QXtXGfdBZyQWW~lo{GW zi3u_J-8&aH5?XyfTb*glhxy|zmj7w<|4umZ*&D6?|3MGo`ETvx`J$2Im5b+aj&qgH ze_AAeTDiZ)`av~@-jJbRHx>dYp9w!}D1Qt(q*EBh;ThpN5T(M_;u-KTYau-y=CA*! z#j@nL_zIm2>_3;DH5>p2;0vg$K`9-*&z=P2i47nWFklXPp5b2t&}Q=0iHqGj3qcR^ zfk?@uY&{UMY3qhuKjXH_f-5bbTm1G0fnTcmuV2DMd#b8@p6CJavpL zH#j+dRT^<&K@7%QAi;3wSImZgTsb`c8^h@}CAaR5{`KwXVfy`na~4*gL35W71et&e z6If2~z6Ueap_H<#7{}MIx&Mc^@z1|LkCV$AZ z3L&ulOgiQ}e+70Sfh#9qyMAo(8VFmP$@m+(r=%tM~7DFFr#`&_z856zUKooJ$#F0q|*#{E@_+ z!G~N2|BXOrsTosqGA&`ONCMX+La-(L>oB7Z$Lam~;r1sz)Da-B1q}(>07yfPB@sj= zt~6wwcGItXIPC_p1vVff*MVWfVdL-Xho8# zr@$TIiU)9oTY$~j!CwEEm;U?(Unt-)UO6&uPu{g)VYwm?WQ-veR|d<(nhO!e=-^7D z^s`ru!B;K@n?ZQbamNQWjZjaVxz7c>;g}~2vF1wK7~#E#=&%u)w;@ySI(&kUlZ^pj zarf$mvvyZ}-rV1BT9~3O>f~?(BuMBrhiimH_*!&s-Mv90ozZO5J{$;2*se98XX%^+ ztYmaptNp`a6(>YpOB?YEpV%1)FRV-v7E?GQ>|u?(HqihiKW`bJfbeCpgH&1->Sn+2mzm;fK}hYDYGBID7V4LlJd#~^_^s9 zip^v_k?y=7^FSZs3~Y_8%k(4ff)zp;JZ$6&m^%zc(El8pfuwDh69C+no|Ri@&4 zz*Ll~!>cm0CS=$7RxeG`+tduunz%7(YdbvKu1IAJQPj!Gk>?(p_g$uW#viGjrFS)N}JJA4YbqU zw;HOwzr8VM{Dym~Tm=OS1w;%x)T1|HoQKdr+lfX~W7bnHxmNS8kWA`_3-Ke}o&Jm7gy zj|N}+jmi=I0J7cPIV1)n%(_gF(eU;Gy|Y?Jj4m4{U^*}Q?A7z?5-)}X5kX3TE7q^c zAp!qV`9+^g<0->|*6PBCr&}Q5f4Rk3ZJ`Q+kW(8WT;6UF-lOU7nm8wH?p#lq51Cxb zu$kMC>#q}a{*uJPCA#7YAd*BYA?Nq)Pk6l<{<+A(6*9r$Uu%SYZeJ{0MRFl7?_+=c z5+?^MDqxy^p;AeNS4+gW{-WrGCB(zlW(9(CzlC(lrq5pZV)Ei{U%tu}@$#35p3UUp z{^>)!ja`1_9b-m ztU0wU=ZiS3;`g1=zM6yT_P1&421BT7^BGuk^I}!Fmoi(iE-_qDqZW#%*4V&GRtJp3 zPwoVq1%Er^3VPziQbF6I8~IDM#GP0D3#b%_t&&OCujNBgczyGii0jgXEnfquAu5lP z5Z$}PN|vxmf(>lkM9>UflKI7gk{dxa;`0SpzbA)1m(chR)o@B=eWW!rr0zZaS&6y! z3e?uaiGDxdPR9^DB`C@C`mujP2?%c5z64MMQdOYG$KSmmo&6!tPC|%nX6D|b7Vm4y z`5DokS?s1%*if7T`KnMM?#r#^e96>b6nHzzR-Ik>uN6ufr=8OYm)5%D3q)yI-BdK; zW%(F6cDo6^Ks9yKf?g_4-hD#T4P3xiERPchqu``IsW!poviV0j)rYoqo_l(p@BUfK zIKW!wK5}gL_7~u*?g~P3*&!;}xx16G$v9ib{b(%Hn5fg@(_G}e7UIyO8%5Gxp6R0= zg`i$>sUEwdeYAAX;4Jdy)WWuN1njdp`6n80&ZR?jOvn*V*h`&DLst$;r^`1o6dp|K z-rj%P?{jnJxPp9>*vG!{)$V2aG6*bB^2GM*aaP(C*y zn&yDCI2=$$8I~kHP4PXvW6nx@Q^Bquzvk$=2Rc9d1>4D@Q>Ii^goi%O?jxKq{+=G zlNUkVfuEWFnXxN4k0>*g%#<(8QLxLAeM6qOV##!!K-I$Jp#q%`^Vr0Fc6NeA8U|FL zc0G?nTJN_qL`DDIm(ek;Elche&8wt_u)LB3_Ki2?x%eUaqjEAB(WWf@1uGrRjn(xgi7e%Iz`lv5JkVRY}7 z=?tm<0>tOd$&41q&&A^B$@icJo{3Uvq>`%Qpo#w2bZ!CS$%irbns`l5h(BWXNiQ6a z>H-&McQMC*D6A{tP@Pwu|HjW!T-7lu$rz!u*bPQqkrsc$y|e;n2IbEbYVYo&lgH$9 zP0-T3MozDF9F*;AQts!%IG{ufXN#GZhl4G&lW*n#7_nVYx87KrY87BR4Dws-;`kGkX4_jthx$MC z)}SdP`J*oGweme>&l#%s zoIOM-pDABNjl{*&tCJ^dmZ=r1l_#7g^F6UcCnIb(x5XMlqxW(w6-UC-T&ztqyaL>{ zFdY=It==61jg;n;OI_&dPnou@pFfLiwRXJ2fn8=EPjrVp9T-hJ893Rmj$_FBs!gue zkb|sj6$Q^GHbsT16t0mG=K=&*PUb+>nP?3ndgYy|SPjBCAkN}yy^6Sf6yiC#^XNP% z=@=CV@z&kNIkMq{sX~|W9h_{kMoJs#@H><(ZN)MX<%iiW@ucJR)jzxfSiIjG3kJ#< zUQ^2O+4H?OeF6RIxxR;2{)w?o%LTj{u?knVq+`=1qzli>m^b;nM38JhpUG!a38P#1 z|HDLZI&bf1SBep{nmxhgV6_wQ0#F3-dA?I53qljRe#x`7kn$h-9&T&$GWLP%E*Fz! zv;Qk)2gd#l#jQK$@8Z78Nksp1U)}(sD|X5I4^B*P2A3PXpUYtioV;Lplc-3uBS+2( z?n8C!hp%`|4AlKVRLEyq<7xmlxL_k=uJ>OqT?eNnd*HgLUHbTaI<>fErgQ4HUy;B} zF{V3so^85&8w8EqC+PAkbDH$if2Iw^an;~6ESDtuhjq}wi(}UctAdeiI>q!5Jh=qt z_EdV@=q+$Rv4+NWw$Qk1R6;Kvok*zIL`pjlP&eBW#J?doNH<0CH+56P#hqOKsV+%)# zMh~6Z5ttF{p_ECv#qd%DDRG9i!M-#6i=ON#y{N@qz$6A0+1>imz7|%xOz0gX_=7M!%Uc)CgPV)2{IIY%B{G6e{o2#GnQew` zrw4<0icn_Dws(HCH=GSKn=Z(K?;y10ZV-u$RtL6anjPY`^CSBMG^BNDelVDq*%J?D zCH~IlvS++b{$RY=tS|2@%TUihni`3d#4ShAZ(#HZfFnHbcmH2JhK7*u) zzcv3K@_jS;JyS+d%eCh(`4$~TGo>*<{_j`ARP)T~ia;{_&rg1LH~v1Y!HK|>_28d> z_xXQsxBuW<|MPa3SNOjYIR5+7 zbJ8z|f1mp*(k**_JT**iAj<-NK#%)h_4#SGV2=Tr^=&a72~c0! zP5v#96#Lr4KA`G4}$FIs}xF51#DQ)wd&HB6`mXw$!QI|pn#dH-(k?z~ve*NXp$H{hItP>RIQ9Jj51 zrHux_B)MQzMb41`BrUvhi>cp7`4^A`@GyftKoeHP?r|FS?KKdCNsEadtfU~)Tj$m@ z+X3BuOkiz4?2UgCjq{O^pxG&Cz3+nyaV-GUgPlCX<&1#lAbRmTJojLTftOa~T8VNK zrzVx|C=}|5+@w3el%*}nodQD%MW!AzEYiJfm>X2DK&T54Es?3%oRbdope8?M(X|I? zlKHa6;UJh$>I+)WM2uh(tF&j8_%y#?1hU`Pz%pxT-a_o5EeCk-B`~~mKFUTH&!%7# zh!B`#CCyT$!T17T7r@AoHK?sn@|En_(c%pPQ_;TrdCNGI!hR#T&r%QJ$Y$F|x=u5z z6OwGCD2P}OR1h}3vTXn$j@IU;12RRc93omULgvcxF@_a+^P{sF!|~p@=uv`gT&jOM=l-Xumo(T&Ieqx33U+f$_uy)7t|# z3w3)Ou5H?p=72n4C-#7!;K1QfyCqS_Bdw#X*u%M}69DDEzs1AssY=XWDPMM<^zXkv zBma^Hm#kdp5ThFty!T7>Hk#F0p0{tT|Fs#`L7Ca0xL^?ea5lV_fLFf=X1fl#(@QVC z@!eR%dO8)s6(OLepanI;z(QgV9`=%8ZVpCjg3a$LQDUtvH}Jj#4oRc*Stv+>QL3=( zA#=Evmeei7HUza#5yB-VE@Ze8NR9iH1tvo~D*DWd+~?VWrqIMwpXZkYU$^aJm>e+e zQ(OCPBUyCi8!&IR9#)Ls7Kn(mb$(mTFl_|sSp(hyFe!$&KA^Vehr}_NM4o^5Y(}k;(WVt45YdV;|q24;5K$0y4?(^YC{l-9It1`>him578>sS z4Xagw0OL+_^`LtK{4C?bx7$L$?^FpORjbenL0&UZ4Xd!Z>49B`RMya^Uh^%^^>jU} zw1p!=>)BgrvQkUn+^le7pVp+Jv*ON-dG(sKdC1qnn&u|QABj#kcBXXLS@@@=*F#9Qj3yc#PPh2;r+2$wdSiAxy^recdQ{Y7QlaCAO1 zVah1v0ZCElFpOdh=b&Cx4sr=(0k{|Eq8PTmjn!aOD7-X3A9)=56bk1S8$lQ(qBvs)>7m);=i6>pqkuS_9ydaX1rCxwemFIu?tFJpAiE@<~F-HyU4o%5-hCnQ8`B zAjt#M{Bma%Re=OD30UoH3s(ZIyC$(P4WA3g7vXHC`*^^JGzz4ecd8FVvKe6#PbF$u zfCb9r`m;Wvi8|C;L#h9aYYxftA#0wrTnW7aVva?Z+ShZ8ZKF2YALm>Htv!<2?uztW zm)?fjzrK(?sK%zVc8AyxO^O(~B`dWfDG8X)fEw+0ufLd$$R(r$|K;0gJp$*&f`&b- z?QR~jZ};?T38;zuVh^=I3dQLcN_6akXtabH-KhQ2WZF`nha91r+i{#tA@TJkCLysoP0Cyzh^})| zHUA_2r}%x7F9C$=(SCH@^Qt8SQq}dv4Ph$&MGETdzWF<}hceM-4>h!NRUwZ+8fJRT zq=i<4RNq)cnIpzY-gr`Za0Zgg@kqtB~r`NM?dt~1-_HX7lo;9EiHSmJAPN+(= zJ1g{Up*v58EcERz!`t#mWyBtpIHFcP*XTP%LV6xhd63l*Reg~2wg*YS+3<%Q^FiP6 z>&TXI2;4RTLS)GsP9og_c7xxGqJn{LQY27qp$|bGM%4A z9a;S(cTu3D-X8K#ZMBk6M0`Zb1yEsW zB>F#1{*F}}o=44=8UfeY4s81Tw0)?1pkt#P%a-=GGXSjL^OYCHoi-#ub;H-VSM8z* zwc64VCRXi0lFLh4TY#V%{53~$ZT8=sjMeWhndm4=gM7g!G{VY)uMIe_Ihe9)qt67o z%e^1#XbvS6Rn+H}R33xGKsOHhrOn-?J9+R<4ok))8<|2M!@s%!F#iGq_hjYi7o~2` zImkre`>^BX3*r;L*=ogK*Y$r0IUQD(hhL}naAGgwuu4! z60Y%biK0Gq8esY_&+kes+SAUgq*I|VNu{z+3NeC#3e4Bv?HPPMj2Dp%C=u~V78n1p zUyZj8+LeX)Vf6E3C5-ofGIIs-8f9^5MkuOZR;S~=G}%_{Rc4qGJlu(RuHQUdGjJmJ zVT&S-c86w5NUeLMNli}>GtZnZ^XL5mKi4yyx3S_ykpWu%W)^6;Y|FmrC&}7}>HW z^47m|<7{n=N0JCciLu^#cqJ1#Ir<4ryy;Gw{D7)-WjejO9t+n&qWanO_&#IQbhMd3 z>`92TvD-P_)TK;>Ro8c<3?0tAgGj*A$fG-vI7%sj z$=L17B1XFl%G#41R8c3EgK{pv6PuvDO&dWQWqCgtz?4ozbo2;sN@5=G8BiV22Anqz z4;ni9OBI==Z|cYb_Yfl}7Rgh!)G}-tRTB<^r*W?%syTB8s}R{owp><{p6Qb}QukD& zgah^B?{2+LD6ut^%37uQk_N;QSh0|=kJVLiaGheQV2^6^KXZ|+Qy9OHl&&vFSimm0 zm4^8TZu9U)g*}OChxwSA|%1<<5^+#D=e4;3L*A&njIFG zOwu(goBkl(O?1C_YZ*dlH+Gd&xk!2Or*Ldt9IsJG-9dmDU8ea{ys4_e!5yy&=j7ZE zLZU6jN{16AAyt*Tf;yao%c$BpfsSXtHs#Y6{F+HfkP-B)&JC`C+p? z0RfD4GBAfg;nL(>^>@SS2>G&uJWY9O8MUavxNNv4+|On0znapJ-)Zf3r;-KrGT`g; zq!uD8`}k7tp-i_!CS%aL`>S5yEOnT>Ebn}e)Zi)hyOhjBbqf+!@9J?1ck^fHL7$?R z9kzsUjCXn1LjVJXYD+nP+Dr5dhMw& zJ;ak3f-j5YzWfCLy%{jF&j@M5Cq44KH(9|Q9cF%>NtHunYtkEdo!1jE%eJ^^;HW)clUq3%|;tERwDn$aeyNuVI?MxCjQg|7N~V zZAV`QKDezA4-{?f+e%a zFH_~HB#@V^`uaZ?-afyEfdGfdl5i!GQ(*RDG-LgwVX@pzz|?>FRh;*~I1fE4m4xw>v z0S|+kebeSA)64h%r;kToQ;Z7vPt9LC_1d={;^~n^1HT2N#Q}IF#h-8Jx<{OggCAFw zwvmL*i{Y|Lh{o0WD9`WH(4O)$L;qiQ{V(hglpx6#NuWSdIDj-}4``-)0475yj{KlY zzDPy_64eOC6P6zaxR2SGS{D#wFzx{|K?^VpMfmI~Bmh-PYvq6pp3n+FaIPLL^mMW= z13T_C3G*c&ZgB9lAiYMqTmUn|VYbnQ`n?>8rzv~=_N;Zt7fAMje?c~Q^r&w#mbB;{W-BT!g@5%8G?_cMFcy79iogfbg z2-|}Pa4-)5S0wqT+2|R_9<3sz)klhcJW$7x&ZdNR!t8YTHzCM+{bG?)S`uv)z`k(HDE zZ&*h3e7h!WcJ+OlKR|ri>w9SYPC5rDsf2j?ejq`a5+Dm%j}isWu=(mfK!8L{z;iDF z4Nov2CQwWPyT?`V_rXVYKjHP;9!L)djDY=M0An#*H*_RbXv_r}C$}3ei>Ly0hJx=8 zI%CexEbslf!W4NmZf!qqMnljbIq;hpYN0w z0Ej@##K8gWN4x#(+Y2H>q#-Oo%D7Rn1_RnU7#JQ$aPLZ|<0V!-JVAnN?T+8xu}Ceg zasz}){oVi#u>(qIA|L!RHrM6#jwovC1hDwLhG~$jk_w%Bwg)%4#*ddz*K^c%vF0nA ze7jIMiye0%uxw|*eu+I!B*8dR=V|NY62o%8E-r)c8u+e4>LX z)GM03;b3rU>h+YTCW3(VRF%rGR(@PZfRpLC`7?b#CmLJg((G?nhcQa0gL21qQ09&x zMy*yP8>E9A<|(E=OpwQq!$3=&5?(xDGf)&vvHP^!oel=KOxbQ$gb~AUdd0{APh^xU{pUH-Or+D}y*vU|b-HzX-}NMz6+5 zCk{nZ077sfi}ahBztgew7#;xi#<2DQQaJ4;)LDhU?|A z88nvca0>g7E^EppH(FJ!Om z68&y6EFFnm+G4VFF8PvLC#n@>%97s}!fa%6~e{BFNRNI9d73OG{3>x$UMvZl6jSWH3!PBoM4dY zO@}Mm0HPE75CN$$0`d@{-2>DWm>)Vez9RnqcLTpB-iNqzEox_ zN6=1vLSr-)M2~Ky4Nu6{Y0d@~@{mJV9pePc0jN7Dh3->?29?_K)JNnx zNoJtEU+^ovvpovStw%^E6qw~<`A_A!)=TgaIl?}HjKq6)O%llkWAA;a{23*w73yl! z2#k)zC{`do;zciZOO%@)CX?pcKIhDr{8s0pWFci@h(`>MA@cI+e#{!f@LQK4!~5Pg z{nEp~kj;G?O@uQ)o5~wk=Vez8*}tn9J6P0wSQ?cCRV_R;tvi`8hnEDZ1oz;VZ_Y%n zs6K-Y-?zQ}=l2VLc5^M#{9wD+3el|E^eSJfQ)H&2iX?G4aDZEB?P9-p1Q|A3 z|H1&YB$T|*tcpdPY)vQ186H}Bsujieo8AiSXQzJvZq3*1m8xk~bktwZOneG&Do_kT zd(B)VQfZ4N0~Yi?gywOjh|-A;ZW(+sNOyVD08kvNL4&#&YCx*NlKL^p!a`_xlicb& z*vDBrhN<2nZ-ai%x__onpVqa^7HvUQ#3uF8me4|ob~6m-1;{EzMmM&~22-S(v>;Xx=4DDscwtSq)NTDwu!pS{GnrK8}dO%h+!p= z!{hLT+c(W=`l*dBh-tNeNflTuR#a7?fPV$csoJW$j>B>RUlNGM=PnaE2s^4aopX;U%IaD{E zJmO%!6C>{3 z#&v-4$fuM)9F)kw&%+`(v4;(glSEM7ch}??GiGR~=A99GB&4bKJnELP7?&C&yC|vH zk;~`^)w8unxk4H*pT0CEO4`%<<|r0S@sX@L57Y0*8s2)?#=RAxa9x(D; zD~$u;h`bh$DDP_8w@BiVn zw~_4&5>2SwZP-#yjj;4U|J5mp>8sbk*RnYUZt zo^&lkJWh$B2F?mal5upkqh$ohD)I>MbTvHCbd0H8`c{5VOJ96x+`HlKJ;TuDWT{(( z_|(xD*2kqz2IA+ zDvIHqggoAW4D)0MR_kjfz_+-548;^g$FpgE_`7Z(57^&=TVD(a$$cd5-y#(s`vSj? z?^w*1zZ`x9p%j9;2i&gnNS+TiCY(G(UyaZST*sMlcJ(wa6!6GNzcltIs6^ zoLUq|x3crVq+Zo8j~U1y@xv#Bjpvg2!Q=*+m*M_;0!$RG9#(je{VcNvHpwYQQxc6; zoNd4?rD*_g>0sqI-QPV)juPSm3~A!R6g=q@nA8GH`<00vLSl0LLgBbP7RZU*`TOrj z-PHpZh1jbM9$#cO-{-oH$!-$V?Gc8{ZTGi=oR`$?;igE&>AQr_rImPd`v@b2{wNj%X{H!pOe-1#KvrByo7xZ{d+@i}|p7AJM-?J?v3 z;7ufw@<+95&Z4$0R7sp|YB-(NA!OQIyc+!<8062`0yEflkjgLAX3YNqNdEl`IDh{| zrT>vakQUa+-~HKYURM>av73L;%JotCe*q(Zp0XM#EBzNC_yhTv`tSFHsr26)Z2!l% zQMBpow0-~hYQ|H~X#f4)_LCC|7m$4V)~Ly~XF5Yh_BA}jQ|GnNn2nn=2aCdO> zFdun+^-bb5&Huaq6>Qzq^M{ij3kXEb2N;3o!uhXuiPM#&TgME26atPdz#(#LnMBX1 z%KgWeU3+kTUDQ$)k~LFBR-y61$@&kFetqN3x2~}!aKj;y?dSNHr!F?x4a@kZr%J$R zvg~`l`h`3B>M`(&M2a?g!q|IIuqf4O@BxnmXvbrw?@A&XL5bPJB>L+=CQU(=H#n_c zx1LniiTBlxX*}b_@0FSl^PazcUtQ*0QqbIO**X&ZS`n8 zR}w(GUVsWEVH0q}w4(3~(&{sQ{*7rJ03lYJ6Gil$v2jS#%S9kz(M7b`A!#dvTJR8} z3?y!#C6Bu?!miwN`Hd~~^muyc)rWU0Q8B-qgm<_1eOnIRWlcvfgTlV_xk>`k z00ZzU;DU;skuU_y4j?uZpwI(V%Lp&V#274IJCpSkLV>fg@KsPwEG7uj%#-Y7u(*uGe~!2r6Jaz)4Sc1DJl+od76DdYr?KGD@9E z>uTC*rdpQqWe~r)NTmGJh!MaJ@B|={z(>bg(W)D?9gMwyLTPry{Xi&xgv1pHdMUZB!<0WD5O;5{Cs)ISbso9UtuaFOXDlViRQ)CcQw%qC2-nml;f0Umn2 zHe(`y-=rqI$%rEI&041;o?5<|!uPQ)OT(<|`?v3NP7Xn+G|q+l$&kl(RX=zvZc3lD z`{Yyk?#ODa4-kNmC{#Cm93wMQaGGnY85Mq?)u=@nbF?z*_@&lPcau7Jwj!w9rwWJo zVf^F2r%SAd2*F=_PZYBvM0X{A@B1Hr$@*?$a9$jDwKM7hO%IgNp0b+1sJJVi@@)uq~4I(UQ*;o z?MI?GOeOibmwR2bhf57zjQEyjOQ4^s_9+Nr2oc;rN!bY&0RbSPGEM$>7ry?Hri3kl zG+*0q6%JoO@(waK1b3D3<+f74UdoM4DXX8@4Q;?c^%(fRknZ>uo!ZYfW;wo7Faz;k z8cPQ?S@(V5z4IXP+UgFxj2krDIw|z^u{syvFAi2dm9`TJPg~gY-2Z0(d9$$y=8>PP7*nx>>JjZ@(MjDRcL` z4F$9_3Y4FEm)1`(N14qpP(5xFTSgb4s8Zj3li)E@CC7pJZV4F0ym?Gq*3}|ZOFy!1 zR&+W}yp-sSgn90h{?NI_S#~oxR$31F2~9B#&Ab5@Cljh8eF?dd>i5;5fyurwN-MH5 zu;Iw7=k(?{2^Y)z#>eEik3 zitt>!{o=GMrAOzP+wu}~u11*YQJL^CSd<4$N2<=g4AXCmvz7d0O_X?)dkLsv*|kYO zmZWI^C{@hY-vjH>I|ep5tY4@UA}ME4D>9JFJ`#Qu z0epJ(yc3$Mnf8=!5`j6ki}uUcSCw|()A>PVP7#kgIqNWX09stuW%PJi66rMfSq7Bf z6jrogrW3?O+`rTcIT?Ysy#2<&DY4AJSS2OOc)u&wtbkDD&27IsS^_~X=g%^Sx<*j z;BA+{qD<`Nq6jWDFO;p=o@y-5(I^1~Z8+f_`;$b4#;4}hU5t>Wpdnkk(IG;+uNbCi zLCkl(1~G7-^WIHFavv#ty5xKLx}S~v85GUZ0mFpuV(Q#i5rpj@wYXO*?x148e%b1| z`)K191uQYNdHe!?aBwkQanuK@2ZqY;2D%ptc4wH_;*4DlTJ;tcU4bE~x0yZ>-Ub;@ z?x8(c64KX`Md?Ud4M&Ho$AjUaM=#b<)(bR@)4fsA)`NQ_@5|$}vb>9g8BuA9r6yEO ztKaI0fH|mr!`{MF5^5%EfmuVdLb7wREk>4vTRG>HYR|d66{UykQu?lWu9+u4US=@hD$XD-P*$~W3~21 zi1?oDsWZbP>n!&d4Sr0~co_{DU8$mhs2N5$P}*w7_kBj$tsajc@r*0Qc)CwjOp1`R zEsKW?!+p}{y~+8!&XnYdp8lkgMfIhKPMZR#s+y5~1*3F324xri?fKEH<1S~D6 z;4{)im|fZr@dUn>wTdvC{7HybrBy2u- zAsDS*?kW)Tt~(GXIuHS*TcGWoRPJap=?KDY&1WvQZrV%-ca|X8G^T z3-%1S#8I9gk%@;lrQ#zGp*I(PF`S7eLgB_R>PX~_77P9DfWeGX{!xNzVtj{>7BnN# z#;E2!l0+yaV8vILQEsnRDNwCG_>oxNaZtMgsZp-ENAt#^SW)v)9Hc%e=QLtjK%a;2 zrg9i^8{3Q7djF88I+rXQDRmhX75*Vc!g6gDVqS<&00ZX<<8&qT_EFEh=z2)Z+M8r&}$G%OfM@6XLEd7{uSvk5!u&&P!B zEQ!2ti1u4R;o(x7UXbz8s|E42xyPcf%T$_nI5RwAQs~rwKFz$FIRLFAku7VCa_-5U z{Lb@M*oS@MH3Cam^+;2J>+1wUaReo>mzPz{#R){8CA+DQAL8-dF2 ziR|uGn2x1)Ssg~7aM$kMj~A86*_wXu5-%E-+Ge@JU+zZ#y#PmT<^q(^Ch(MyXm{oJ zHatOWp4d1LsVvPXaH;Uf9E1%^1esd_UeH5HAyZ<4wLwMtQ}Ngy8U8dOssNUNn79FG z?~YyO7sTqZgy#GGu^sxu zWazz4*H*?UcSZxY#Lkm7yf+WdUktyFL*S1cZrr2>G@1~`y7~|ICBFg}(!rWsjYp8K z(MP#>xyR4=;qRmoOG*e9wDUM5d>-&IxW=X`0>L#*c^Oyl2o-E!wF}}E*YAfM^o_sD+Lzg$^^vA+nYPqF}EPCN3 zFohB$QP<=+;pLpb%R$d1EpN$5mG5JEY1;aU+ithhVA|E6)|>dp$nF{8IljvWw(PS; zFRnamZBY`mIevwAd>C?j?1rU=0(3N1;j)wA6BwF0_+iB{1JHq%t*X!NCPGbQ%X{$q zvz+WdB)eQKS$D=i;t1K0MTW4I2R_3L$1KC-GgqY?nyPXv^1idH+Pud$*i$3sDcrX* zp56o4JRDEZwAo6%R)saTDC`v0U*&wn|Hp>>1yGS}NxLmOJgco)0En0ZLPK7f5k>sh zF{fYNjZmz>>Z*M~3WQ{elbXQ&PeQ z2sG`SbVQn^gC?l5Lo?E)-Pr|o(F|6XcviJFsftj$X$CmxfU%`8{)BN~)u1L<;Q7zx zWYJE0V5MxdIHAzRkgNKAATNbD)f@p-He_M;q47d;(X59kwzm|xfNgM(KOIcUBX!~( zV8}4wF53zN;unP#kkfdM*lMsr4)=4YZ}eFhX);-B_%N!C*2lobommlrmx`L_>e+N} zG%Ny0i<6f95=C28&R}~Xz7I5nafJC^=G5A`f}`!V+D+v82jDVjc8RtvvAFns%~#FE z&AJobO;tkP4TTQr_+e)>3sVbG$A#usn}1Qv=~0J|8E^3+I8Ru37Y%mziOHs=I*cV` zImlI6PB=5HhPb#Trn|>(*<6b9{6N})UNyb^$op=3%;a+<&7F@_^ybQeTpa>dCrL%r z0Yc|Pz72Y|`bMbBJGpDLU;k~^1pe?SJS|kMOv6?!))yXLHN;cs^5a<>u$WLz`U#0yzn})>xgBI6oc$<7Yj?DLjp|_WAv^jD*CH$=7 z;=4T{Gt_AtYOn*Oow(^dj^JL9(+BdX9n;qqjpU+$H4U5`sIMZzseR1$i&d6W!4p%h zcvd5U*QVt^zA*Bxr2gL30*;iU{@Fk`V59eYZfEeM!n=R~7Gd=JV*d2}YP*H~EnpFt z`dq{`g7gJoe<4J23T!$Nu0yQA3W3P+|Jx>(8c0geba@qs3Iz3acA9l`O9jSc9}t!1 z)YiHfdcWfG`PGW)<(sSDIobZJb?wG^jX;f$Bz6RXJHG@6dj45C1k(NyKsjh&n*8J2 zio~$4+n!Vh;XGNi%vf(f$ASCgbK6ha;f8sWV~}@g{_BAZ!3n4`1G>Tb1cg(1Oc5iI zVw=(`q^oECH`difBj4!P-SCQwB6TC@sb?B2ej?F(NRL$@Kw)d~zRtD1{{H{2c&V@p z^%9z8P(~sl*`KX>9f^0k8jD^m&dXv6VM-h1N`%=${eRJ1-b@T>&+$1r75UYAHGlQd ze|b4n&sgG>bd}X$A|SHZg5Z&M;EZv%dAnLr#mHkZ6cwrA55%IDM*u$%G~m{XdEctZ z;VVC8-)|7VD$R!ZTKA1Gu+FG=npe#=ul$p5qMQgyrzr-C315MLcjPjfZ?xnG9l&4a zkl%&}&0qNx>(3sjnveJIuNHS=PC@r+3kTM&qm?@pe4gM`jC;TP1bK=ExB`MN=`SsY zNS|`2$s2shz@M`_iV$SC^xygnYW}a~kuk;59U#X9X&0iEdROJfrwY>?r3X&SHOjb^ z;y)k#M>Ym}o#U*$%QO&j^|rjTj!94O#~sp_=HZDt-_Bwr+Iv;-%BDlxJx3r@9JGt>0LiJhPWs-_b- zsrmp#7t>r?U_hKEG9v{Z4%kewRh1q%80KBXrY*BQ$P|>NDP-Lh$#6IUE*^2ifqd9V z@`?SO7+DZ0vR`um7%nz0Ltm;a!*>_oQ<>he0VX1|80-6hJTz!S?hjNa*`8||Zq`8) z6bI;}DDV)5I8ojL7Yh^r-_K~`gx1JONOQNC=Sq~Yk#Q(5aTxy#-}xvKf@B$7`@eQ# z(NK~acd5`0I%>DEu(oBnm;QrykOH+hv7_`~6?IQOtM(y{Le7_BPl8^{zqCN z8Uf^mkr;V;Z-#!a2LO*L9v|n=4UL`wf;7^dY{>%AV#sw#$Ze%6kjw zX(vb~j|-8i6keF=C3E&B?s!+;Y&%)R=RX(_P-D5x8oK|-p9Ewgk%hS?sus6+=cCKx zM?RN3AwlNonA{F*Da`!P7t_31oQ1AiVM4{ED$#6^vM!G&Q<69p+EGBye)C4DPVafx zqo&V)XBe;_v*_LHr#9w4Eyx?E?Yo+iBB5yx~AhK?_0XeFsP-7 zDx<#tcwzVvNz2jUL}WiY0`O++_oQ?HiF}cZ{{VudHAK=Jn^sc?GTAJH$sdR4Y4Joo zX|kG;_&VHDVR=U!#gqH8N*ei{$vv%t1%tGcjuQ=y}f*p zM-5&5V`H8^!8?s1%PBy%E;%m#u52&?HY#!DlM)$tISENE4~6XlM6=&JNC2z)RU@E_ zk^A$X-8z3<*kGg6*rCH%7IFc-;d32?JiQhxn1bDGAdXY0*URv(3*%7})r0N$X)jfI ziRf?D27B-y?0Ke^Ef9uQz%xMxS?UPrrDtXR%tz_Hy8uwwU1M4!1tT2+0WL?>JF66d ztz~$~Fkwd~A;ZzS42*8F(%g5S<=X}n&<@iSzZE}5MP*Ve343`L(J>&b6>s#aiD8pv z6b~=F-p-{v>{gQX!xfabcs5Cu);iwzd4OVJ=e+VWz9Vm^SE?2J0h*zAZhdf09oPRWT^ zxfI?kc#v^}?}K!!F=T^>*)F-GZyW@?*sd%(b2U|FvWV@;d2oEGP#dvGT{QhK++ zwtyHt<=!N%#|$mAKS@%1$W~u~Nx(byOq2mFJk+gHj!ap2)RAOcs`f$0UE63#e@f{7qrhbjU`Q{0|AQed4{|`Tw8O~9yjYBY@BIoS+X#Wn zxL5(>m7^Mr(L%&g^O=ug>{acvU?!fpDoA1bPlddS3__-imq^>Me28uxD5a4477rig zlEqGH&#w!8OqxYEYx|0qsr<`IN(p~8eJ0$l+OFsyhBQZ*q7h=_p^MiYrQ*tWI?chl zee=t0P2rL6&|9znZZD1qJ(qxM*!N{nw-JaF?JQ4}Z&O-{43u895fN0mNa2^FB@doS z5y}o=^>gRQ>K3YX1^FSf(9P-hgEcS?QrW!9Fv~uEE;W7C;bQ#BE-R(!r4PWTcmT4S zicpRnDN=4fdpg{=l)DN^F4m3P#8}j$MBx^obp5mzTY_t{r3sB@iy#fzQR6j95ZVk| zwUVyzL{~R2HxXQk3W}#J=6?=Hu>1vX^{MHqutOAMHf|8?xaFN|Hn); zsV)zJ>7+fKOP2INjX;ktzK*q|qJER>qkpiGcGII~di?+-enggJ|kIp(FwXWl_bw zpM!n4zYF5x2e?LVS4C4Y8`=f3=+50@B$Cczex0Z^6eB9dZ;{*j@J+Hg9s|ki;#D5@ zag}Z)PV|>CcaG}i)?Zx|Hz@}V@?3#fr@;4&L6&!k`f!0tz)YNh$IDo|hEO+}VQa+{ zf}|Vi9T}t5AW~%h8v*~DsZx!L|a#AXNOOEBqD$Uw3v&w_J?9^_tOYy&{HcWY8VxUMs?(wh?TVc}k?^s%N z@$BNX4)gr<6JJQVn4U?wx3J*pLrNT+RT-(gRJb81w$xJiGD+HwWxFOAz{i>h4CX) z5d3FYFAM0}POy)o*4nwb)I1yqe@E$NXx^Tjui1hW8l9A>_+(2hfI7pt-Mxrwn2613 zwVX1HT+Z?BTNyZ6d`#J`W2Zc^yV5E?7=4|k_n$qv=Vy8_1D(^d zs%~LaM0A9_k3Ng{h_LD)GnW2(@4*TL&G-inYlyAETbMx~l0G95RAmb=BYT2KmB!qd zq*vUurwK6UJ$k!M5Z^UvM0j-6-wJHLS;9U9qfr#j{j9C9K0H!o2SZ?Tru!M>)l}+M zHl?ZKB&ibaTS@p`Es~V4$yCFGerg1!_9lQjG+>&v&oitafQSn;N;K2?)(e> zi_BJVz}zzU5~74ldfA=-;)MrU)n_j&cZn5}U&~A;Rlu7(;^jl8 zZeQU}B=szniqUQrKihb%#Kxg;)D9q1woRqZeBW$f*n5Wyt3hWRKtw`oE2o7RaLwJ9 z%N%X{eB-QTJ;QD)%=DJnvg zcCyK|mjOguGPlb41n;TePlxOaYrlj&K?|4#9Mk6!+8)_;Dlp9ad0reC+GP=Kf;6{X0yrPLI&w=+HM?KA-@^3`-@v8PJTkXloNgE1iv1$sN0%E zcb?>y3tMp0qb~KBxe5fCy;vVdV6Lz!{(Ajb(f5W(K%$Wu$X6n%U+I~Mk(UBr zxLnb;62{KPR`(ZUO%p!51nL`pw+`R&kS-^(@w}Je@^q~iVb}{)+U?oZBO>Y>rnJ_M z_JI}^3J2yJxFji3rp>Qx zxS2#Ag?(n^ySB-muuQV{m`BH`J_NUOGk7+Mo)i~Ge37cm`-?1{W;WJy-hDCR{0TQ?J_RS{-xRu#Ai@&%Pn^3g2Jv#Mdw#Zl8?SCkm`PP2r{G7*v zpH>JsnR*`bsqLpAJ)n#0pOqV*3ooN0L6Vk#xDlNqVmisJRX~oD18=*%^2}I-S@7ws z0Lnl`_)mJlm*!=SR}$Vfoy!l1m=>;{4G#}L*WjAq4JjDhb4AJcY)5u&n0`JPoIOS1)OBx7q_Y#Rxyd zl{|#Q;psRsYTQD)FRtI)>kEq-iE+Ch)hdi7bMP*z$wWF>|L_h%B1&K-PYb|v2oKSr z7E>G7mq94Q6(z+w8A3|N-{VBvFje1q5l*bj2`;W2svM#GLOE7BK{gwBq&I>^2 zQ$6H-cCab#duDamopc2-n)Io-2(P8hkFU!6Slrysj~v{;66yWVg?p3}MvAYm>WxSF zrP1O1*Tsm$A(Da{?|Ad3AK3u>vA4la}di@GLPx2yq^a<&5N9gg$U7AtFMa1X>EMstIQ0 zJwQ)dYyOD*_>kEir^{7|T*ed6#fjBmxhMX|PFJC6zTq)k~2XvLu#DWHeid@1O@f@H*I#BJcqamSMd;>loAsGu%1Kq}8j4 zG1lR0JqRnc!QBHnzLTFmS<~y}!nVH7OGRmsbV)&xkW@o7>&4aVnJy> znl?h1d#u0C(1C6{UGMGG7+q>n2g)3y_-O~TO)lG)-qtOS`o@^Av0oWp?0#-?KQ22E)2?iii zZ&8fV&|0xOXahP+D7SRF_g?oY%^Loq^`CCQ(vLLnJ_r5=)6j97W!Fh2S)GDHM1qlGfSf3=gRXoVtb{;Jvk zQ3#zSjOYqrX#KldHq}JAx!oWJ7CX_GMNSQVgdR$lpP(E*@Z4-2TdGW^%62?|44TKe zBtYFyx&h&TimZJah*dTPd)>zET}f7$%0;m76Bmky7GkTR#C^R7rW~GmvC!;vGd5+% zd{52muje!@sNX&QUVkK0lrkVD?A*Cw7%j`@! zfQQ;=ewHU8x9i9ZycF}t)K`>nJT5bB9PWU&Sg-Iz@wAW@prap&^rD|vPhMi~Vy3;R z`D{2d^G-G@9=!qNA$@>C5_8^-4byTMOJD{joQYs8Jc#fq+Jk1lAHp%t7bm+OpD^WA z5TY5hU;5){)NnOUT}LwS=fMbAHn;Qm2q+WMI%m7PrEDYnfZMZ;rb-EH@0M&^VL%#a z5}gNBE0K_IArbfZwj#NSC2*-tqOt)3)`~ys8@*;|32cF6MFD33^t7_Xdn9eB$Tx#+Me;_hhiL`|`JlTitCvi8 z`)R1vK2RXMhy(Ll$}XF_a)kU7IWa;E+GzzI$63KBxeaEx*tnu66Ft4AO05_>a!w#% zr2-4z^`GlC3EVGog;Q4p9T;2={JCCL!HNU^9EKq0P_SNHKL@OW;FZ{MQ!V~imoXFq zZ&OehxJSQcoBYpf787#$2bG+wnOiU!29-88Iv{K z6>?vkj!{NlkbYzc1gstX{MHPN$M|eUTf7d-Twz{23!S^4ohOLm9yYuG$dd`jX6pxf z&9#xN&ujBqax%P98M0+4f`b~ti8 ztc`f0?j)dJYKxl2kCu)VM`%gk1NLWY-@6+#%A7o1Z+#}R<^f&nyxa3z}^YWTrEBt z;aLlxKlXp9>Fc080(FR5(51Ggt2pk^+vPjWx1lp?wgIW}MkF4UFt?6`=Uxb6xF5;t z{q23Y^=yAlMHmeweCz~3k5&6Yc7k6rQta|x&a`?T|2*|t+88g0c0c}UY@y%iOb6vh z=?2Si4XE5?r-?B?{XSU=-35TN?0RYi?7HJ)qdGPjU^;*OP)1v~ACK*rqF}gij&Fhx zc^*5HbMEgLXCT4&s?v2;5fAyDngn;}hgQ*F=q%MB zQcxSS0gn5i4hd4I-5hF@93=&U0xT04&^exM1RK!awgO-;9cx$bSonCOwjV>DhHW^o z;e*q>Wkg4xKjs$q#iJIT-ln;27OeO~OC^=2lki#23 zU@+b8FTqOm(yfv)SJ>J&OT^>mW{0ncSQ3J^V3CvmxuhoKg3)@UBSmyX%1e4HlDLDa z@TR0f<(#dgn)(@=L|vd|;Em?arp9hqtmENkq_#`&Z!#a~N8|^M1ly=z1D!dzoQ&#~ zuY%`KK=q^>HSSR75ppsrfxXRXe}PCBc>3fMIg;y5*y$>pE3GtYA!klbl26k)_!|6?J52TXd3Ci?A2vkTYZPizcM<$*HJj;V z3wbb$;zsc-FS4{?u&P{Wqy5QeE?rzJ^5Ra7lVB|T2R;ORogZo3Iu{&d{tTxQPlX3E z>z`L7x{?fc5kgA{3B)Q&q23PPw+=1mvRrYDCy_x|LFo1*d17XFG4bfHtg{YDxM=5A zn<&Z^_Ht$kr+D-y7FZ|f%xMCsXi8!ZNTWhtFA)0cgYfi3s-2Ap zOy(LZCfmpxVEGI&$o1cV5YeAN>_0`6gE+RI;P9he4Hu*3uKbj#ZIxfL`-#0{RI-0LHOAdyv!S>6W-dg#1|IRRS1pc6_qkm9-J$ zcWz=m7>u+KJiq#y)2rMw$PoV^0h8`?u#SqARA~rt=lkq|?rB62)bXVJBPxymH+)L(Kv!XgnxLabKiQ5{U+(BSAER0lFW8A9l3nGh{#B_z0SPeo2G$D}|iHLzrY<`!@ zn&7jEjO%IhA;108)za)0568KdjPm#(Mr>lVW})dR%_ZAa8VawVD2i2boY+U`EaV2r z4c5qwIE*4$$3S!y>)WAxVIKUU66v21Ox2M=>vs?nT{0zd=^}GQ;>bHKWiR&5Z%* zJc=jkjT@4P8!Hgxk9Zh(c;iMq><04J@vs#Lcz6T+Zw)YF? z_1?q$!o%a^9Fgaac6O`}+qbv3-@SWh>j0LzE5M?;4UXES<>ee_&R}!2zt9r zrvTn#Epl7yco51tC8jYUEG&EkPJ>mVE?~gpOK0|xGq@rV6dWbYNnIP*z* zmVv$#g7((-e7ir`U$unems&VF{;cr-&5os|sW~w*!OYAo7;$lNfrf?WyaO~+qVC)J z*|2mxMqk-#>*@8smPe-5Fv!I^XTRTj$klstu=&wwQEc_^Z4Qd^oH~Z_ z?>0b5qCaN=vxZj^&z?OaWF>bCS@!hw)JE%baBw(?0F8*JtE=nU1%iF&XJ!VyZuQZr zO-@d7GfNUM^YZdy-(p~3VBd#}eh3Qc(3`2sUsX_g{22I5=n2Ue=E8TVz|N@m70T6p zu)U}FrQbRDG-h{qcTC;O%gfc(wNYHI>yZ$0SAtI?j$5=0mV2KeDOGPZPw6!X^6TPN zYxzOJphoPo>x|~P^dqYw+T~zl9OT*jAHLo?Dypsx8>hPmQKWNFx@!RGc2HD68YEO2 z5r&Q-1*DrnLJ&k6>FzEA=?$(N;sHmut zyqbl=wDTSYsuBQHW$Jr+in#wEghZnlFOzAI;OWz+V4z{DsC_4dM{Hk*%GJU0q;X@m zW{*92mS0LrN=&R<_P1)ajpwA*5Gukx=$R8h=f`o-7pSP4b=+65KiA(=IGoN47p*^T z2aQ&}1;jZ00VWm-Nuj>6P`FdnOUl5&Kq3q(EiyWa_Zec2{<HJ8LT^oJ;6V(MKeaesMHDj_i&Vr^`|_;^=+SR;!n^<8m^@o|(%0?o(lR8^ z#K2n22t*uBek-{7T6aS&9sg&E&n)*kVt*1ks~RFt@%o>g#KUCC8Q~@6nB2Yy0wy>B{O@BC z7##ZQB?~Mw9{v`0=T88MuYCijQ(j*FPcK9x(&U!XxJp)W@is`-_#AK1nNMJq28>x+ zTAoyV{hIK)Y#)1?RZwuBr57RqSGhT)2(!1Zq!*7BxwbxWZ-m4&FX5qbci$oWs}39` z=hVNUgBgI3G^#AL@nHV|CJA3Uj<#2XwK$|d@o*O#m~^+u2Vd9|-c>e3nW(FGhUBTd zadj1B*X6tFGrnrQQ(j*F4L8zPa^AD3>33OL^PBf#@OBTl)(hsB=b$GVtXeYEtzb=4kD-QY5;(DK!{T?f-(>s}~@T6lO3Wa7zm{sEGM0S}rm%|oQ0h#0AYFJV^Y>>U|LvM7<=ww|6CfInVI>lQ@l5Q z8kf4UzJ4=3oqE6ZM0)WT1okc5IjOiMV6ukGYR^U>akxC3ITpW-Lkp7w8Y1bUhjRGl1~u6PM)?u@RFl_!!sHxnq>U+R4(DN-n5?R)fsxT*5<_nTl=UTMKcp`0#`^4IZGT_X z!-S#vS1n(7q4@1L?%gILQzXv|ggw&N?^D3UkrkoX!`eQoAxSkJk^5NFOhxz_JSP1tBGhAA6#H0xoOf76MHBVS$QE5?wi2AXarNV& z9FqM>CeM{Uk>6V3U7|M|6ekJ#mixwGuHeu7+LMNPJNFghN?Qqve%`|xlc=632399>|T1Fhs_5lvc2pQguf9c z`gYs((3My@rXRwe`H%NYNWq2_MFfEC z;*{7zhrR$)-a=Y-AHHMo9>{PWGoaR5e0lv~E!e+2TEWUqJVh{{fAiW?Ple|iVZ@sgAcv=yzg=%-?Wltj07em`VV6POALZB`WujTa{S z>g;MIvkGo&E3K^-nNeq-?)6!oRq7*>tpvo^QjnO{(FKRGe=FUWFFbtAh+ zA!eBHN1*ZP&Ahz`o?Wg;zZAri4l_P2K z%|$9Bjp=t5wP;@fz|PT1crhfEqFAxpK zipq+LA_qQ+g*XUIDjC$fMc)eTIlzAA{Nlw62ffaQh6Yo8O%$2sUoQ{%^|uIAQhLhJ zi5K}h$ybDuS*$xYBwvdR;xWfbLswl&8QOu^LhDw%Bg!OkBhDh+e!8~9S=76f7dgP9 zq@?6#s;Y`_9t9iM6Eu+d_kF3xRA~QS(8A}hVV^#IGO4rdoI@<`Hpc537%Yv@KXMWi zCu$vg&i$2AJPw5!T(Rp)A8|Rjd58?37Ks`X=y3IuF@Vy z;k!4vaF`p}W#@mcn~#qE&G3ps=J_PbP8%f#q7faP+lwG2zqGQV^v^dKOzoN`{TC$j ziHrZbTu{7dDU0L~2T!K_p5DSwNlmR6RCiNTQ=0&fqVxP{larg9cvVPPc!UvzT+Sf1 z|NHl=NQcj97kkTn@d#!anH%T)^5x649^tyWx}kK*u9M+Rc~cZNs0z3)b(50_oMz?a zL8s6b0TgNIvzw|eWf6?{_k97F^Lzc0|0*dX<9*E7__zvJwfo_HY^J!tA(`tmO*TB@ za4RdTuC6ZCTUdCnr8u~Zle znwrGRH)R2E_IHW3I3^}1|NQ>V!D0YC+q^l{U=0kW$bUD3mW9Q^!H1n75(=NKN3>NJ z7r*CENJz-d%frRR6<6`n<%DZd-$5o*;*!wfE%x@R6<`IJI669-nfdG8G4SgHG98D% zpTZja5!CCY;T4Cc0|D*tp>X|cE0jsA+#R69^6(ZV*3IdlM1mM-?DmP!MW0hR zu>a1?&v+K_7SGPkcDTp2_Q9-y#lX4lUKsf->1Xv5A((^0-Rgbm9jr zHkARN$+BY7gVJPJbbNeA7HO^ta=T6dPDD!TOa|~-AzSF*do72q3nnYJw)w~Kv?W_9 zyAs5^@rjAhSSYX;)FoR1HKfVO$*Db%`ZSnq=m)J5q8An@6UYGYHAnRyG)p!W|JkXV zVk@DcJ)uRbLpt6>jox%$B5d!Cjg3*myf4J~;YejaWCL`ZPS7S9{|p1#1&}>N4#oJE zuiE7gmDQCQ3J?5-%`Hs1no{uI0<1+?{shf0VEfJuOa-e^htfCa;eHn*QqDC&(BC%) z)if|{J#>AZ^v_}ec=aF+`j+Dc1jqb^Z%aNZ_QfVxTm8Qd5B@)&zJkAx{P(CL|2cvC zNQB(qFZSjW{@xM@;C}!8hLP`lTE5lfVxMd=rtxL=E#xYzmaAg z49<0B6)MuwHPtT{m)6(_q$7`ZSA4c-8o(mbii6$;FqsDgi8{VX-tcG}HmSn;`Sy z=?VMK>8@V_u4j6BnrqplUlAm1-ah`F}w}vug^61ETot&LL{>mMQ4Mu~Ym?TJ73)I{?K>Ph`#AvIIxBx$Y%ZHqt7uMDs z;Ar=g4=Sk8fNleV#lphEu{^looRCqgh2BefEK>t5t!6_yAc?z&{MU}S{o{z47KwSc z7~vO}+dl7~eIP8!Z=aiE%zH$}K}pnFftL$2X!1^PDS*lL-Jzj*M*+O~&i7xx^on{s zBMJ%%qM)FyqZ6$^C&bG12}qp`KKyTN{2y1Wxe}U{l_jPz)##O~)QmrG{I!`VmNDtP z*^D?6d3f6qh4%V^3Xx~;H+*-63Je*>pZQ`g0;-EnyTm)vnw0qGX+ihlgq9 z+5bMk(n91&@>XukZuB|`05_Oh5`oET!wM6FlhNtUwoKvvFjENGZztf|0`*TE+`Qb} ztXt#b3$1~K8_tf)z43Ve=CEFhE{6hfPF_^KQIh&^W*d7w5T%%SdrR9KH>bRNitv9) zVoHb0DU1hl2<@?q9F+C1o}x?yu(hFh1mBS7$DCr_0qQkQN=i7m;+a)di2-kL3xfZH z3ZDKSDT1h!l-|1$*H+s+CQB+Y!jZ*ybPButgIl{8-TV7B`uaI_EyehF1;J19dUroj zm#66KX*h|vkmPdFZuR6M3U$&~=2RwFURF@{nX@sxJG`QUef)YiuR+inD;oYRgjo0i zijfMhS4C4c_q&jCH$(x;kzus^I~(P@LxGP>Yi%gHd!kdIpC_`+7?Je@#i54c2+QIK zGZQfB9Tu8dl|HUf#MGRpsv8mE_{1;M9S1Yj8R^Pv^x3cBEix)|`PQ1r!gy^XepYoP z@OAKF4)@tGZFH4l3XdoiA5nGyog>XWb;?+R zt92R?n>9GXUpH!tXj9zKU1ROkdyEYZ`X?zBdm!Li#Yf?yR4io9-2A^jagbJdJM}_- ze-&h+3X(si;7 zQ$;8k66_2L>Z74D7H43#$cz@HVj|Uq`1y_I<&78@dGgy%1Rze8U4XLDOBR~btJ_UK zZQ7r655e{|mW;hmm*1YotJshhY6#fR)PFh$)nE&1&!h5;20&(HOPdDK0NsE@R!A2L z**75~21m5`V@F>DnVDZ%V(-B8DG$*phc?&7{dDKMNxXZLX+B0^<_btrJhPZ@4Rq%r z$0HcC!pBysFmcYXduR&ei+nCr)1nU@Sz~;zR(&Ellf}JbMD1s|_JKgw-YSrL3L?c6 zFM42fp>nb6b7l1~vH>vCz|`c&&ypowi|#uvv@~6xH}%D>;fp^pXam!kXZ$3HGw;0A zI{}l*nK|FoW=79|-iN0;hPR&b)NKePW}R+vrU(%jcY-ZK-CC>n+}_23zrVylc^GE<~0wM_3FLh9z~i5zaZ_KcHc}12nzWiVbQW zn@(<(m2Ge>_cXrUe7-^z%S-dol{Uvf=AW@0V~^tU$riu(@I#A zoe!duTHbQ4Z#9V6h}*!;NEmtO^x8{XPe6|=?wCM$TWJ_u)@NxiAR)y}*EEoE_AM`I z>+EPP`+Y6fU|@8OF+z^NGO6&VfpY{_?E%&ZBjFSh=I48#Y181KJ))#3sdNY@s1Ir{pFjENVosYT!Ss6HNx4qB_=)7I6XI!lp>Xnm3!=mLC zhYS(vu2RR6NVA*{`q3FZ;+UJU&C5ZxVu=P$_)N`7I*R^i{ALE!-Urw}pPdUuj!V^A z?5xFC#N2~xq)SvT zGg&GcdL(2rDk`XsQn79INIXuKFVK2%V}8Q2nwCOskeB^_W(w?*VX`zzb7|tvWjy7* z;kzM9x`SM*&eNKRte0LEhARwutk5VuM<%!!vS-seIrGymR2pwNpRIqOS8>^}BtSqZBbt1^F6!9BQJ)O{d2d+Z9N#G(-nl$M!* z^lIQ#|7I_~*fa2Zto#zByFk==sw^$ z2zhZBII`TwGk^QF8=Ag?gaYD+B+ZOq+?VB%SyhzDYpFWYTjh4+osn6j{px!PC*!HV za=~bblSTfPQMR+}7L~m2EcNMb$OMs9talWYgOCe>%*u)Fw|s?h*zQ4J!vw~=>s=rF zTpt6)|8}CLpOT_Q_uw}(?{4;x7&&(i4E>y%4k+4D+#Eucm^-)#W_xXCybhH*FoAxG z5vpz$`^!oGRvj=v+E)C7(X%Wk1b0}?@4`m(7ZY~oBawf zZgG)5dzvn}Rn4-%;Ik;|-NyS953vCDbw*)pn@{YI=EqsWX`o9Ji=@ceX>E}ft?f2T zD>oYqf0<1BQ%`>@V90(*h1|0~0EybS9F+N*Cms27k-Lddxl}ixi6sC{ORUrR%PX2O z_dqo6{fKS{hApMMuCxlt-*7z;>?lgfl2G+jIYAGLIiysNce zyOCN%R_YLaOK6F05Li@>jae7Y8bhQk7h0 zZ{>gFSGM|9A5m_s`_=SUWM&oz$?*haK;xYz-%Py`(ogxzc%!YyhF3ElQCAoj!znF+ zZjgCA)Qns<$x;>J?>)r8RA_uREM_FuZZ6&QsY;g%V~n53I^O)~wnp-T>4#CXHGym# zSJrittbOv2at@}X$6)H&iw^4&KW$W$105GVxm_vu9#(y!HEaogZw!n*Wsc^Ji@TDC z!mDowQKx1sHhF~`f7wi9i6eM8upATeXf+#I&G?{`IHK4$TfK@1wQ;Ln zKUn#3ADZ3Eyc?8<^8hu+qE!DdT@(c*Ff|bi)r*n)LPtcbQl4AR8Rf$9Zf#K~iVjgw z>jnB1`lLd&f?BlE7$^z3_rNLBTgN{@&nF}Tj305c>GcqVBUUz*6Nof9>fg8rFueXk zOe2!ycR(5GYLRWa<`8Qb!;=EonM@DuRPIR~Y2j&DY`B^~l1{b^a<1!A&_18meWGnD zbN;yGmN^D^&Wz5m03wwRv+EH%JaVRqZ;X{}tG07%0&SDVIxcP+dkQJ-1n0dWOX*H;(^(j2!Q0n1AETGhlp>QsQ)xa>) zKhtdMbzx-^Pvb2nc$&}EDgCT`aO%eB@Dflq+R9|vF*$BJIB8CBZa)!UzlAdV`f%;d zTSKF##yd%TGWC?3qH#3XrysnEg{#V3cHZ#&mZ#&ZJfg&gVys@Bq= z$8IT2!W&34|Kk`8NJq^jjhM6?UXhJ#G;8g(53JU!pvJ4sR)iBmKi5h| zu$`0TForNvr|dB4g~*&-k-mgnd9CWt(o~kvg}Tj`eGKf{d7NNg-li=RYQ?rqiT`>< znY*Y&E}9L9kMvLrTH?JJ_BVWYFh$UO#8Wm*f@`_kEKi)3&_GlzK^Wm|P}CaD3rjjy zP)84$D~m1hq@Mo|Pp6cc? zjIZ~qtRb$32PtnPxojW%3%hU-0_j+^ch4iCi$Pu`^X7*F9wevV?yf(IcC^EUg#v90 z<;Gefrz|v0KdEg=oP_#Y8$owpaGUP@2%W29;a1UY?3mwpF!{Kd9&7YPoE8=yF{|(V z9umvXw~%pOn<2E9MQMEUtG3H$f}R{-G085M#(SI(+AB*MRJ4t_)Z(UDe8e;fRNNTE ze_(suL;M?+g#{2cL+|8%o2>^aZFYV{R0u2ey42~0zz5ED(E7@=zTjB> z5~QlMS6LBUr#|{hbYljqNnc3z>N7-kqW_TGWcd+lP)%C<;M-pGc8*XrlWq<-(0{?8 zFu0%PWkgY%Gnu{=swKo{(Mw4opZA*yuBD5VClwwJf#2huSPhv^Q8?yuibgR~CaXxg zT95!qzJGU!Cx;u4ez?ga{^9BlyEiW(9dmtYlzp|eaJPj9ip z#R5Os5qqX@JTa8^2|Ee-qY!r2PsEVg?lv-2ES=oSb|3rmu(CVI+xBX>|M#_lcosOh zE&DG9)u*2&o9<0V3g6ZpG3ajfV_wGh#>&A$C)69AfJonZ6}?5IiI8JiFCUJ4zV3)t zL`+qpg2?*l8$u*0sBG!vyQh%%8-G{PAuQ~oPykZUZshB)d7-!0zV=r{`d9P{!r1mL zTVvx&x4z>>L3fr?s(@Pz>*W30QBc}x0>XZKe?>A&k+xJ;rXS{@r&mp6ITSCuh+yqYFJ$MDB8-V_;d2KT)Z_p`%_#!alpb}4YjO19JmylP4 zN#7c18IiXvymeRl_8XwB_3uL51n~&7H;tDO+eWSv<^Gb=Zb2`l2*P*9y+I#TZY3E` zpS;4u)|bqQSvyd3qc3{pmVwK>Ql6g)-+FvAs}S6f?19|gWb5}XrHSSx0%-0r+VQk& zMp}r{{N>02YIrgimKmE~@Y#AmT|9bSNHB@bm5BVHC0(J1oESXObC3V*S#a!tyxzwB zvy*Sf_R_~4-_^fHJ{J?lrk!I>nIdMLINfq>nw&9lXV}qpC^P3Ko3tg0h(7wAFyF7; zm!_gy8pOO0#CC{?c|gG5!EhZyC)wOj7LLj1JyC**HH?iTDd$UlN6D98C@4G%?+AmJ z5&dYDhS3-KEMu0m+1Y=Uz%CID%~z`QETa7Rc3b3}o8ixR3q8sX*pQzTIvSb@d@3St zR7g|Oe=%ZlTu5_JyyT{BYy-dfrkiX7-veXgrftjvL#&q=4Xqi4Hy7gp`pdxoz4?7J zss#Gih2W2Feq93H!2kR2L+3fY8i{4(N#=}?ux@T(ckg8PVCU@Wf^Ks7*z4)jzOr@^ z7sD!Bn!MevnPi!#h!@w_K4CJeReG&({HKvmu(@A-z{zDSq7J|c71W!Gm{Y>4@;yHa z6brL_gtYI$Q>7fRf**y4hIcO)p zY3Cp2$8LkP9r5OBsNx0ROK$F^Y_;IVdj6?l*nc?kK>OvZKjcBJfs7@;pGz7zyh@@@;kW5-6tXf2>@a$Q__4dl4R#l<0{@DH9RJM%5~0&coAJdQS~ z)8uw`kd92ULII3BPuM8034^%ZisgthkY&Wg#Jks^(W=kIKwPT#rr; z0S!Vvn1tluSU+`lB7My*lESFoA+w8>DEv)I81!>k7_`(i6urnYl)^X&uMH-hMm`9g4QrrcU;}d78F) z2RFAYhpR`e5aOx{9AgF$-L8=OoU~CocD6wxuT`v>c`AQiAGB7iG{oS&!o%qzhIt%$ z#Kp$MfWEv#jLM@&#i`179&1Qm=;-KNcUHUvMH|`qvxq;Sl|Q1liOl=?q3_4O1WPL) zvLQA)<>5AVp*(oTVDQ7@{m6#ity*kefOE=;dPEqzyir@;3wXRIXAVl zw=dqRdu#MCm>%&f_jg_W%Vo+xsj}1C6Iuthf~SA2N89|)j>0uBW;^@IpY`AGo)i@o z1^v3ZI&R7UwKBkvjE1@GFzz$#0xCplmf}2k+s_E4%YjT5K`;s34V?valc^DM{hB`o zxuGl2BST>gG@*FU&MGiBH#cusw$GnWfP`cfc)@;te)f&0DOE!L@t|-QG)<*)vUx7Q zN%|~01BHbRdfLsL-e?R;zeDV=FkReqNj)wR`fDhgwJN$ZAr%(~C#N6+RJ}pDx7e^Q zNU(l$YpWACfr^^Uy%h8r6&3L;{m^@}Q(j%|Hj<@Cg(~6E#$E=zgOWGYFFC)Sx2R6M zkZ7F*n)0Qu+YGYEjq(FYwX7$CE0k4#_+S_&Kq+LgT-hNllh&UIMq_Kfh4}Nq5}l9nm_a6;nj*g;)!UphD;22TZcX7gD9dTg zuj_z$Sx|oZ@uTy5LITz2YQR6bxbT$pM;CX=JT6%SWf1wAKgwu{L7nP#4o)hP zo7#lJd+bK_Zs?am^DM{)QDNbh2GbQW9M>$H+}k_`AvHkn{WqW}iTToYeR>x~>+)s? zH#lPFJ-T`#@(~9}_S1MR5XR-pB`cjEK$^4#kDxl(K0ZD^Y3{a+6EjBldpb#_6LXM? zkK=~P`ug(Md!7myvY$ODkN#>_$B+iNSJD#T0LNuWd#O>=Ifr)g4jzK?SZajjvqcJ*7Q>3M^@i#?pTH7!kXs^OVE zNZfKJ6!Sei?CD}~-ruKIpiYvrc>1)rka9xdvB8sYcFCA$Uks9Wo?#4lC#Ci&i^UwT ztavBICx>FOvJL1qnkg!|<9tu#(N}U=hq}Ie_3HjKI;f%=F!SVp>Ei!3Je-8u9aU3O zk_Zy98TU=wroyrM(Vw{XdrN8aD1DOkO?`aS0}Qmr0Ts1{*I1{3KD&!luPEVw2HS_a zE%s=oW*!_66fRVV#GQNf=+VfUq(W8xIgj0Q@s3gV!}#Az zOG|^1B!7Mk=vRg0s>;`DN~!&2rNn4s@krsYi*@Sx&^Bx<=qJ+TRZ&`U^lH0qWwc?Z zZp7=?oXd`_kPA2B%@3pzKGGNzWOJL{^Yh<}(F{e89#Ud14D>clO*w)BcFb+-_6L$T zBL0EUZb?2mrJjCrR0&xRsNKoE&hfy?R+QU~1XAFq2`m`8~H zDd|y3{;84#qPG5&j%`)a?g~zzR3z{8f`i`I*H`!Elpz<$WOsjmUJ*#bRcO8AK9op3*Qt4uAeMd9N%H=RVCWbv@bD#|-i z#rFt6A#-%_43|{-^=~Z2%56(QG_@h-gmmJ$tPa7MZct(y=Y|m!9p$Wr@*7sv?7-^afrkZVLxjwV_z-bBb8lCiO=I#b4EP ze#({D>U5mUk%p7YVo7J8u0g7;oN~tnc*r>Ys4Z6KY8H6kw;w>@+?#+lVHW2 zj6y8$Ep6YTwk7T_&CG;aAtrI^Dkqm}l7MSvrh_N3K{lT!^E&fl;XN^Y_%h1blrAT_ zL>bd0B%}L>1&4J=k}j>Or+(Qv`Y@VEffh(8@jHg;CVsN;!n!z7nr(wyv_5M*)wQs+&`S7cMVwaBwsA5(hsce#(o2O84Z) z_XdiaJ!5%g+Y?~@ev?Go_BQsVK7*H*4bB4Tf;(gcSkCKU2b%OVJtS1AS)py9RZZB3iw+bux+pPpt8=}iZzrKG%>+1_ zL5>+<4>DX%hC62qG0RcOY?p;%&ty}iw)MV4-K}5f>)#HVOFOK;TnFNmx1^`Dj}4-d zd=;g?s$dK$$Q(>t*QDn(r^d4Ae9X8br_z~#e_i3g#AR5^>VdSBm6gUODaqkwCCSPC z*g1xrx4+L-(HSZ;T3u)-9lV2Vi2Zk2^jQoIrxm(Rwqm?~4t`#rd%vU5^ZC5Ic?&+B zy9Qv?AtmP@-))U=-NBy<3z-id6s3veO#IhhC-3aE$No5ytQ_!?8i^ny_dRwBL*K=F zSXQxv^+RuS)zx;D^FeFGtu`aZe&A{!2#UZ=^_dv^H!j)JWWVzDJsagYryJ)$x9{R< z`uY-!=}-R(Q9drMxT%#!NhjVT=rg$!Mlb?v@`W>*Scxtge*agU*2r-M74DabY(hmT zI$|$R2@hPE=ke1@=LmCuh0+b8Ovc2{wH)ri@l2ym$X=?y&A5a6a_HG@C%JL|SlN3` zxi{G7jx1o%N>?-V$C~3M+jT$ILnIrrYNsnY^`^bns?HxS=ua0s-kuMA5-B~6!@jQxad8tClh2tiSp$5L*jQR`vMYDwZw{4u zh6p?zW|7i<*CyCMv#4m0{s&n>EGf1!+oVi|>Mf_}jaRBK@z1$(GCXz3P~sk)1<{lb z+~jI0^OT9*TH^w}s5bDTQVas;A@@q?Hz#TcNiAHo(Y{H@x|v~(B80cy*O*u7(2W-r zI9|+t+~3V*S+{q^E0JVX|9bEIuW+Qd_N8DbGU#QO9h$95<$uGH(Yg81t;iV{y=9v% zG3~e9kEw(DnPtA+CPVoimp!q_6URa3;7f-2SG8k&>Uf52z8!ZDiN%1E^k{9C z{Xti=e?`mG!|S51Y9fhAu{e2g#(LjWf~l!_5XC2644PIeg$P2VC?>P~Gw|5NZZTJo z7P3%wlERZno^knOuCWwRp!hENf-c;>$Itji6E>vZe&CPb@wUcG%J(#(%s481wDj$hr4UcL{7IZVlB63E5x4;+Qg~J{`1*xQ^pqtGNirfUtz< z;5c{+62hh=DJe;Im%EGEReF_fPHDC4(DeZN71|4XVs^Oq}PtoL4|Rr*oSR?AZ}Pe7h}XQlEre;fX^@TWz-?Rkog}ow)&2=2I6`A!OmE_ow^^$>!)PW zb6urz%C6!C=kqxlBqPWy-GJhIFb}iu1@U}rn3IReuS0$`c(~50Y9Os!@&!i65MqR_ zfX&rLN?xj%v61>~dq4!^om_XpT!!h;*7URV+X-FK4i6aLKYU;dTT#kpvSl|XsNYR~fI!IRNvUT&BmmpiX;f|#l@Lqf_( zk6|{7n5!`6=|L%uJX2@X)A^k7#zy1k&-H_CKP-ep8U24vO|^3_pYW{HGkstllVvK~ z_Pvz+(!ypa^(l1}iGvU`TWjyuKeQUkAb*XQ`(`$N-$~-zW=@y(f#EcWw6ebh$B5)i z#$_Kc`2B^7NP>f*w6ubzb)WL&4#r#qS8JJ0;!P`!^NCu47h-$g$sd!7(FFxiFjYD14S%Km$j_ta#;p6z`}dY=;+yv` zsuhlU8gyg0F+%F1#80l%v?k*9x?=E3BHD>#;di&;M_*mFP8mWuVR|@uyHa&wuuX zMVL5+2(#nfqW0(~O^6e1);YZl6=eFDLninm`DXpf<$XMgM}}_M1#nM^>VjaN+U@(l zO+ref=iQR+v?E7ZoBw%Kfbxh*g6+4+!gceCv+);R7l=UZ4C`{t=*)>a81aHD0f03v zbP4T7$KNDUpEXNKNGan|mTpC?HvALgkdVSPEW~IVYs{xy)c9!9vAVz`~n+ct5T64!O@Lgnnl*>sEV(Mr}Q!``sX0wpMO6N?1x+` zS`r_pE{B6objKEgXHq8XWgmb2Vo1^|F+ zc!@$qTttNrd===PQ|lQLcD(3`2A~h!@t;!zUMhd!yu-;#5`wIBlFHrP1Ei)Tg@ibccSv8$*A7@`u;41#{|fOuP|_Ovj@{l(xO(XO z?`P7|qX4wmz7Ipc|GD6?Brh zlelP?&ZzK@M%6TjA+ zr+6Q%p{C}}$z(b~GZqtL*{`B@pT~P0dImB^X~O$#wY7Emx?Ue8#KmjMvjaZQt$fi} z6^zNrbhCZ?_HEJg8iDyQSQ3Y(c3XXRuh!6edi34^3WWAJ(BKYR=5&iv2BgbL77h-M z2Z+syN`rJ!duj7`q$*U)OBGdBKY#w*KXTMheQIxOyZmMRHRi$p`3jOAJBWEYD}#pT z9IuYB<)8LbCV6cjHx&iC+a-6?+rdq?Rxs59X?1xB#(VY{AU47b0}4>B*o0JM%AfM{ zr)w<;b!?q+69+S7W+x_`<`)zf2v1M3C#ru?@_;Ty;lIN>Uq#H1L7l*@XNrpn9RT8N zSeW_Rgl3w|Ha4)EEuwj%FgQ zsju&-si^^t*JO);Z<@@$iR8us9OKhHKwSk1B+!~zY5N6iY?HN!q=-8o8jDDRB_$+m zIom-r0{AK%2gk>4{iO7;PsI5ms=6QY;nJr|SYqC011aAtK1k_c%K1%UwL*Rv0G^cP zGphN}_I8CCK`|^Y4wI2q2q9IyJ$dlLLY8Z6?7i*r(tr_qv`b7%H8u6^sYb(IpKCnv zJ>Ys6|K|W=oZ;fQQ|gY=UJPZ{)F39m(p_7!0D{;=)poN_JHs0KRYgSw(L-Xfiyped zkAFuLTQb!{d(wT|cq3wO*2r=a1#DOM_AadOODu~W zqvzQHzO+sR8h)K%_gCGrrR?_!cdm|_Xc3$Bv}Au{74YnB5cKyG6AyKsc3xfT8jGH6 z&#w&VOTLZMO~%}pzJ_JQcl+eE0QJQhtD(l0Ypk;~@0JXZ$5)ozcF<5e!uq^rgL}uz zhSh`Cq}*cJ;s~{g#vG-A59Y#+eIu$s1tU1U0WMQm(rzylfz+l9w0DK}&r^miv@nEp}b z>;$5>o`0`#USVeS@bAZ)X(EDCQaxsbwYaoGR_J@#KC;+g_Z{m(M9qw5qsDK^w+Y=vl}Nx(AGTyK01I{b2La+|UC1$w=;&&bS`e=H@L)A$Ru zUam-&lguOBhw>*&i|@%JZS*-ZpLG*{cp!}-Nl9M`czz3Fn-5~d)1zCjE^*yHbe5WT zF(^uy68OeRyK7-*s{Z&yeQ4cQ0`AHf13% zuM7qh3>YP`ponjy3g_1Unl9x*LZ>b1v*jNM6`Oxwb$a?IlKu-Gam;GHqdAjKKDN{Y zf6`Cs(w9J+kS3IzR{G+)w$4Mz3O+1GFi%=J{4Iz$)%w->5ieq?<<8JAwudf73uq}! zuok7=(NRP8Jmq$+PB9s=dk!bDl1Cde#EyYv`>kP8Hb*lJb=$1$DSRwam!KN(HPQ6v zV7gNL{3vaPcrFrgy|Ds0Rygyu@S1H5w5+KIgBtrk7_f zap);W{uI`)UoX6hylhr5obN1s0r|3dp@35F-YXxSv1I>r+d zJF9rlzBCYe)mXzK%so&THNA8F%VT@(ex%Lj`L*b3n)Aaer$KYCkwW2OJ=c<83th}c z6Px#5{FZ2fBJ>(UBdqvX6d^X0YkCFE3)S{%l2v;g|LKVWIG%xG_CmrEqYREQpq#)`dM zTPqRwk^Hxu2A=Ei_wU5ivVL#ZKIY`a2@vjOr=d`e8@l!58s#5KckziZc+J^nOrMwM zl$J)juzjL8g>9OXHns=ge&@T3E``pGF)w#ieW#7!g1i$hak1lYfIKNE(^9vG7vm|Y zyg+}=5aKISdEZwLu#r}@kqdJBhri0-63BYorPQ2e{U8i6GXI($$Hks+i=#=dv0ITJ zO81nz^D{;CD7jbAVbLdfI3PhJgcSVvD#Fq`mhy&Rv6(6QlT?9V@!ev0NWg!7vZ0L( zA21!}0-CW5+)b$)8{(XnRA{j;5R^N)-L-duNX?<#=rO zu^Zo33?XnY4Tbl>Fr~HlgXaQr_S^o;ko2JmR3Ily=58}8WQjyh1dmAEVQzT2PshNi z%N~=*oq2D$J~>|VLo`vXaj`be{WqF*s(xCoUn52{M5e(f;RWYBXt698pt#ZKpGn{)MZQsC>E5Ex0}}?|5&y ze|$Xqs1k~FtajVgCjcfE`jE*wxH`O|;!P%WP8#UQ#HfvT*tq*A5l#eaf8f%5#CcH+ zn2azeHoo7jy?E?R40r!wBK+Ja845V9)cj6H0xHB|LXo0}M8SFz4=qCSgNa{OehTIM z4CmY5M1G|%ITby|-!{T>SuG|jl0Rev<;>;PRpp9fpludo{#alC&+Av3cf3|y7HuKS z|Jjia4hLAsmhqF54Ve#$a3KO>@{D9IsBQ+v*4)G)O~BZlyLaU{J{y)5VSxdSR0{-12ZO?&XGiBwr-@%Tq3Ce)IG zh9O$ql*3l^CQ7XC%&jM?xWNozC@oK;AFsg6;D06Hzi>^H?MR`tzxGt~W8qr`RmTz* zWHc6L>N5u`?9nwA75~!P6^BMIf8R|@D&xQN_*u40Ve-9au@$Atq=Ln&_ic0X{1on$ z7!Y-OH`=y+Gj}*ReWE|IiKUjBkP=2MvNj^LKdY=n9nDi=tUiK*T@sW|mAG#oMQ-He z2~a1@l6`+@cp}wVbjOyuPJ~&?zgKtT+fbdf%Ne4|YJUBDwj`a>dzJD?!&`SRDgQJm z%k;RX_Hro_1L7Snt}?vD!o)>W^TgO${heY{Wo5)NJu4|j0{UCVada&FzP(^vE?RMl zcVsJOJnk4dA1#`N9pbv*0XNhqK;{{!jJJlL6zJgY?->;Wi%d*;xfhS4H}90vy+(oA zE=K`Z@MEQvw#X)0bg@<*4p z6m;R4qaz1zPSVmZ_iM8;_A=Hdir6VMyFcMeS3UB2Po)CfN09>*7Qg-XPU4RLjGl4d zBHov5>t)r4C_Q;458oQr#DagWuVvOh2p(P0;8kd_d2?)(F?5JJ}z8?FT@bIDD zf?AHa0V!-}dt_4c6GP`F<#fcmX|zb<%>*ER_|%{Yn}eQHK`e&&SQ`g@-}DZoAM( zgV&~+OZl+$Pv@UJc{o{l#sY%(`A5Sq$7@5;qq+Z_wS#t25)3NlR)P=Q4l9{Vme>a2 zt!fI8_FZMOJ6?(#mE_SM<*aeo6~CEbd`y^j3ud@$w;I{rjugGk^No(%%THC)y4dcP z600lL_{H}X;n&j1$4t^$?zV>9b>S4G6jHq8rG1^VA;G9>bSZM$km8r8KD;_PLT}dE zh>$-VNyFh+J#C^V<+?dOGRL?>V`t*OkD4qozv_EmlBJ$9O zX(maW&x*!toV)6E(g(W+y39(lOE#GMyK0TUu;@IYtFx>3miU&eGx4OW$hhL{-ZPJ% zKBI?`8;KErIgxCrH)r?YAXB-#91SqbQoEPc_#92wn?C2Wqk^OHFR=Rl7^rVFTv(Cv zhYYs-f6TpQP@LP=E*iXXx8M>ySa5dvJ-D1V zYpuP%{hf2G&Z)X}Zr!)|k*YM-%sIy#&v@kL-bDhp%}$u)ce9$5*T$Vrzm(f;Y_Q+C zVkWx?g{Y#*T;8{8VG(pO`c|l6jX+9H?(V+QBahr9gwzX>-Gv?wx(WYg^|sI>XPQfT z#b3z*3bM?hhgI?()9@CKx{b`f-Y)i*%6fz{OXU10C9P4z$Rx|G(0prxO}?Hd9Z_Z6 zRa7psQY38Gvn!FQIi2f*=p6&+`Rj^o1a<9l4;dH#>^=AJqFPR=7Dpcqjg_*J5@Oz$ zFSZ|u#&&%rD?|yy<*%?25WY<;HiTpsWMQ~`c-mC)1@9cDdDvyG(MrEbZT?cMl5u(z z0hm&BFVM(d)-wNDk9)K&l;R4FZA|19rvaWaAn|bd_Kqh@r_Ox% znKue>2+e^4mTu8NJUE-%;X;q69&L7&p@Pp003O~f{+Obg3$s}-Cvan9mgN#>RdxGO zx6KUXM_mlRgun~ftu?*g`AxLdrPqv+F!F8Wj0oxOzHp~6{iQ*5&e+ZSGobM^y|$eu z5qy5@#cJL&;jsLCAC}vdnHqAsvRu}c6f1k{Ko8l^#-zEu`YdQMn?wI(Zp$P49D&-b zhba5(DzWP8^F-hh7M$~C>3f8u;o9l5b1B-kD1Ju$=|_wCAwI+}CD!-lXRW^ba#u*V z++3W5!g0<5dl76v=GW^?kxIG|Vad9m&{1z6DsRFc#BcKOk1ah-YQDCAh7Va&Sp69k z4?UYAoO6ot#82s1h7bHAs(Ql-Hv8BM!RfFG5n>mxk|>5I9yP4HO)c$ccY!CjUU^sx zdQrt;(7<)poVTTfo7bm6=e9D>2W5rCC0kR@8mrUl2AJi~xW5Kl;c`O|??dh{vCrHd zYqY_`-Rvp?9T_`TbC%p{miyd7BNHq; zdoLH<0lWc}7@um?5c6UQQC|Caa@bLa3FDNnf-del4!%Ez--D2KT29Aa>o*6lEtX~8 zRaRbAxouTHQ@a7)2;X+C0Q<HgZ=%5`>HYd$qWK(#?w*IcY!sa!1ei5*J@l8I(| zRBWTQId@S7EW?Y9OPC)7qg3AEMw_oXWi&t7h20pl0n3hk?*7uv!YV(>8vjj4#?ECAi6=2dyS)AUoJmQ&z z1u#}v7+=5Mj!VoJ`mQ1T>QxYucNQKnOqZ&|QAq|={z+dA$X~rTGn!vLg1Z0&c4yA0 zx9(i5(XO<5BpU&|1Q3Z_;1>eQN@~b#T%4(`g1mCvzM$LT76$E%Br6HOu;Br&Xdz5Nv)1RhMmSM%D6d(r=DS~Z3|usu1m~(zn5I_ z%kUN$;)%U(lnGgG~b+#S(q=|RaK`}D-Vifdc3X|XGl{DkCwlIJISQV;h-wxj56btjsC+*b4%w$pFF zlq+s%FdGYWJ%NxcYE>*nTYvYra6XUJ3zv?vj8juGHDJq5)H z#>QUDX#-}#(hByVW}IRd0QKDWrrj=7NOo*=Sg6S@XrN_d!!(taMu-Rem1{n}_BEFqyV`~;q+vUTKg>9_6 z4<|7|I07Fp^DjE&&0aPX7t21Ps~yeyvT`Nt5XG4yf`ox4^>wEUsvG$FSlG!rJS0+t z&U6(Jme1~A&aGZW`kPQy5CmCHENX53zNk{k^jR&P7|{iAz}xjeOsfQZ_1W6;!SX)` z_WTl1?hJr(_Z|IeL_f3 zgiSlpYTz?#pObc(^uTsryu*@>fW@r-spoY$^))!&KaB6ZUOIw-Le%FA{WoNG5pgOi zj@qgwp19{R_#RcEN}L9koo=&9z9|BsB8SC>J*F1NBm?^_Gqr^_mjl4lg|TK9H6E3a z72XT!?4_nD4Y>nGqT7`uK{t)+xW^*g?{4iuop)I^?oeelf#VR3{NeQDKL{4$l~hk#F1pR`kvvTBaLp=^=snY)ZbR*HL!P5Fr>vT_!}00OKqy1%67i7p`j*^> zuK=nk7Y|rPy}a_Rr%(r6Xu`kg(?U;wq4c{X3Y>!tI${b&lOh9<71&I;VXA!j`KdJ> z;5~nK-=UKvvl+9lMY`IQyO#Y~cHiTc2Y&qc;iz{jzMcuS3h+!yYLJA6AuFioJW>Y8 z{8?t2zO0%Fv)EyDQCM}(05#6yTk_{TEv4ZY$|6a$=or!9Xg#ttzhQBPj`Bg9UxwKN z-$siI3pr$>^S;lmR7M-}8i|qtlC-AM7{hj}-x?LatDXwV$x7|^>$lebJiEw9K+mIp zDTS!wa*F7$-G|Erg+ZvXyteX5^9iHTh#d7^-GaN-CdT*P*KJXlt{VRy5AwUT0xo3Y)$n~N#nh6jPKYe@& zA%(-}6Q}&OO?(eXQH_l$BzgchZtIZ#9sb8*PbGE)2lLh3{MFje8!s79&8b&Xk=9c|A-fCO~5( zEFzLMX>qXGxc8CjWdH<2Wl}Rp#s1Edwj&_l>vvm*m#IP{eT!RQwPunPYQW166=ul7 zJyWxGK1o^cRHD&iNpOBOhhe$Gb;4mKsXEGk1pj{Zq(-TWPiwaI>Sdi!sM(9X_R+D; zJjPSt!WLc;wJf!ZW*>r-(OHZaokeRxt?=6+@6b(KrM;pqGz z;a1Kqz>w~9Vg)JY$Qq^6AU`Nx2G*AUDZl#@K_90LMBf2q>7s7f=a`22!xOY{#N~3MN*xG@%Yu)L4&y6b!xv@ z=g9C;eqKO*Y1VQWD7AlzEl_wx$-%v+6jVJzly#nFl$R;jD(-uIhuD}Su`%AK-QuMe zJ@*upTii4b+_&RSP@#L}_8G)^z$3cw4AKv;3kTaR;>CxKf*sL^^y&(22m4JIrE%wZ zGNx~!xh=VthYFu3w;#Dsp3)KMot<$U)?)lXnOASfg73UAAAF(}Idf5b*8LJ6 z&0{0p5Y*DN2DTu-K|pXeJ6SHdz<=# zWMX04-zJ2gNf<@Zp*%z$Q{oM&wTl$x9A;wdK13QMY&z}sz7?cvC=oA+mOKNGP0-9| z{=lj!h~T`XEhJYnDAXLnqA%u%v?sAl=Q>tftL7ytq>@ZUyZGfI^-DYpQkdqiSFK*f|K0N>P+jzpO>l1;}nkLBS*j=xn#sj4ez3eEvtg5g0ZgZJX-@Hn=T z4;HPYxT>x$vF&%tIlxL_cp2x2&F-)J+g}Tal;&eEj(lmXh8Q%yY*+}Wp$>T_x|B8L->c1Ue%KK+EMc=Ay=E#|2~kes~-M3keL44fwVGC zcB3i8oUrq#&T#e%IWdqB3c5a8`C6_$T^11$;lY3h4ru&7Nws?ojL|fczMy7#Gh&g7 zS8-ytv#`-0ozpU9tgF==x^?Sw{%qUZ9hqY;*>tEmB~d0*DU^a$QC2!9O8@eO1v3YV zQpC2%!o-wF0KZ$GV@RobM!4xjiJ?OqpmJ+NL4Hs9Oogg5B9YqVR(byT5RwONgu)DG z3#RuirggDa`lc<%A;g_6wwjGph)uoW=~+wz>5^R-)8Z?$*{ZQ~aWFf^P19H?TTrUhXfLVRHQ>jN8}fUc>1!Cai{yn-t3bq?1GRhf+qEo6Q{1F479?92r~~DJDX0>gcbBC$>^`gv{k;37#5%B{{9;`VDso;@tI5lY zpy7>Tw>jnC-?Pfy8uKxIZ>dI>_kam-Tsu*&ZL8rsqiA?edowSgP~o21f*F5*AIQE# z=*Qwar`+^{=+Gp*Qpe+EkKPI(r$#ZUIhUSG>Fks-F)?wHZv)>Jt>7Kz&I=30_;Out zBf*oq+0r)?v1|s=NnirGG(+-d3xRb|A6A?!rh<-EXZov5aSq_?e2FUzJlMa4H06smPnN$u=0 zm6x5{VA}CaN|Ci(Yrdz>1ykLtm#!bM>LY-@_ipJ_T_@5r{&a#qx3|JGCe>w1hr@{H zHYI6Gh(@J^%qz%h8kd4@f$Hotr0 z>sE^QL0k?cUJzn+(`*?omF@CM^_#MrqKG;+X>bltVO89+-V`G2P@6kis#0!3R10tw2?T z8v9rHjNe#Ea2Ko|3XpRpZWNY|g?)GY1+}B(_}(5PqD*T_(@EU+*<#jseb^C+Q$DTn$-o#_rf+NAe(MpNE!RG z|I(~b00>H_o2R2R%+~ZMo1Yg2zwd!%l`dm5ZmMW+*YAkg%9ExFEd$YQIIG8N{(^(M zBoPlflS}={gO;+}-1MQ?$ELi4nGBi#JT-$*X<3>8b$vy!7f_A7l@^GP$w7>8n+z%# z-0h-D|98LXv{S*?S2qIpR9VJIE|U57BvlGFKu?%R&D}A?9_qOIyHvxH94?0+Ylwh~ z1x_9d%h&_FV?=QY8OPSQh0|z{w{pngorFFA|&GAb+9_ zB`lpOPTFCd5%(m29dSB@Cs&QaM&dV4`)w9+K{dF=*=DbPnc4QAe0HR^-EUGtdK+0g zzKu3kid^0GZInDLjrs-_?t^pxRUSk+GUgW6TME0l4??0M>nKImJfKVy#$IPu0y>(J zOI8AmyP-mBlj~UiyEmE}O(TLvKQcMe??YoiO2=n=yjvYkYakSNmq^dSCC3rH_UZfq zV7I>mH@iX3&I7ykFNZJ!B^tV@!ZSxEg3RLWq(CJ@dpYbU3$MOcyk?iLCau0XY z8d85?9#ordBIOAf1HC$H20Yg~i@rl@h3;RDAw+!wxhxI@Zv~GJ@E$+YHvas@TZZ#d z?{{s6+=Oy87U`L&0h6wKWqh3o^+0`XT7mON|8YL}9dGR}?J3noN1DXAx~hlpt@IDCY7v2+!16?Bh&JGydJFEaujUAAKbRI*o zfP3)A7;x;UAh0jSFH03_pE3DFY2)JH{5r>Krc~@&>@ch8_+Brky+v>C_G{JE$4ed; zhT{a-OapclL@vfr3HDe%0Rgk76GCanCL8Bb;aountasX zxW$+n+^giSX3ZKM(8~11ii0=RgBMFbR92KnuF}^YzkXb;ov<9Z znSUFZUT^@T)j`MfEH@tqerf4_=beqP0+ZC`_`X|UZAQU;HyA_V61KFe7VT;l-_Q(Q zxpu5LEILAMF*070(-oKm3zr$B*I6;yE#`cDsSg{S^_430X|?_fBkEfSd2lvu$0}CxE8`h|BUm0X#Ioran1FDy?|HWr1>G> zX9)rvgu5ADRMvupX6}E_8|V8817R`q38p4~R*s2`d8mWE%A1a%Fw@u(j-NI4`pV=( zv{SO!$xnGzy_`0B#RgfO0XBEj;MDcU*oc}s^46hWQf1jM3xv@> z-|*0t!n_dpKC6+U5)}?3Ql(CS&R4CyRjrZ-O!K|{JZidBMrUCaLYQgcrf4CNIAdx& zB#F84yC zNQqB(7e>7UE6=!jw$xRmrWsPC567$Z@^%-kVmhxxcnrlMX-!CS}Gdc141vrSTxZ$;W7NR zv%{%fLl5}!kJ5huz3z5a6o5_(y49E>wTBm379dS6&Rj~x|MsrvB-(<3OT>6FH%}>b|MYbD=>1T#2KS+8T zgfz>TZ4`ChW1&olsSw$qKx=8k^dpGW8tyZkU7hPjna)HGZECEh zsCPB*!yR73sGwPu;vsab`okyu3XF#&y6Xa#zhdq_4 z$gnVf!cAAPg*DOcm-mOTaytF`4jlu94EJFBzo%qn&mUI=e*7}V@@4Vq3E(}fc9{7; zo{K$XiodVtzcV&ynd9WUYsNi)y0~aAQQ(5#UT!i^H$crIP|5~R)CE;Gon zE~T_&95wN%AzLRpRGk~wx*Jw7BY$|Rt*$2^yhJw=dsD&3=9FzLw>SU1$GZN!vQ=#0C<7P3! z4m*HpjHF}~pV9=EOR>k!xsJ+Q?O7_v<2kIaw94q<)sM@S=t*pRpU$WcjmI-~vJK@? zleDVwZBe1X_MOM{EKGrpcKEm`O5je?c)Fr6aSGp=%|}MA1g{9!{_lojAgiHmAl`DV z9Qtc43aV;#wiMASvaxO!(?9KLX-R*~N=T36vo9O`NW38svS-w3%YQy*?EQ-H_52&i z%Xk2Vtv0AN8>6@R(~?w`rb=l}0n|R@m@tncQE1brjf{Sd9w`kn@8i+5q|0kd5fDmqO)HQ<=SK!vRnjvtBtlNI2QDpe9@#<3{FkVIOdSNn=CCqT4LN6=& zaGEmunul`ZD||Xhd}ehf8JT(}15@X|uE^aeFzCfSU)lJl!X^*eP7Hs`d~|C+Ken~- z`atT!sK6Oi^TM1)0>l8Sk)r0<%m(b#R(JmY1WQQ35BgnBIh%A;M8= zvjzuS0EFBK0t3gDr4=4ElRESAVNM(f9N=uxD^tKqMIIA}D`fl+PQsHp*Q4K`<$`Yl>OQm(lBX*UcM0o31gUmY19vQB8%1TQ1C0FX7g`PgPWZVci@QDAnx!8k6^(ksFOW5p@QJ)q?RK=8>%Dy2jqIc zZ55_RSi?{(GwB!TpDnts+1IM)^knOUKXb3h>Nd|P65BE>`Aof+=PVlMA3mwnYy7-9 zsLC**B3{>Xs_-od3YbKHSErisH{dTQe^>eQj+Pgat*d;n+Tl6X?=Ops{M<>o^-*>x z2`xrd?eo2r0jaD!$0TskDJwF=IkSha1N_i2dh^26XJ)dp$hqi?i&_>q->~Jg4U^H( z^erUEf1$2#aQdp8irPriq4awdsG}eWPMB?p)eg(+ktAl&>#gW2s5bIZi}P^&^t3v7 zi=m4|)3WD-kEEH=uR(tJjd8L+q!D>hmAZ0&X!PKFoQ~_{Hl=dHHa9Jj>I}ebkNTyd# zUJ7sQBCp2%RDdUMrgb|#*XSJlN}*q4;!nCBP95sU93p(%Y7%?&{6Ulz>9YXtCT@>{ zn$bd?`l!~Y?R~%q@00=-6T(|U`fC4c2HbY$V=TfcDA6hFN=*1vt1PgKxI#FW*6qe> zmjZh_x6RFJnke^8Jc+=K`3ediA{|}f*kLlc$n(G+^`d;4O+Bq!&I>xa!B)mURsr=8 z8}pd$3I2Vgm!P+7@;|!Y;$>f>y6^O(o_buTYf`-nz(TZ!r3+{YBc?iFC#*q)$aO14 zqI>*n9_FE;kr(!b5@AE+CLqK?y({^{!(?Rfbw>!o>UKvb2dF+;-8bc-cSqXr$EXz&|AL6aUNE3%fGcfOD=G{|rVM zsh;I=u_Cw=O-{iUkZyX`b-Bw;H)k9tj?Y#rEO30$Q2QvH{bD?&^Id5(mIoZ% zzSmn}YYQ629?+*ZNR9^i(Cn&m<0rs7Us5DWp`936yh6k+qAgCP6o0DC>F$zMgsp{u zW%V-lJNEWe=^(JR$dkC!q)VceI;}Y1fHkDfH~d%xc)M!5VKE|iI#t;V3pAqo4WXRR z?i@?FCSCy&5@u$Gc6QXy8OSa>!0h!u7aP12uc(Z~%t=d23u-hirgxu*kW5? zsTnUwYaFNp_cJ^f5foiBgKPfLq)!9Kzvb@O)Z7Ruy-RmS~;&m?Zr^`wQ!f==Zte8 zj_ei%go-3?JYQ}2Yny5BGEwWWYaU+XRAz?^d`Lz@Smj?QmOKc>6o&Dlx|HH3g};Q9 zaG^zl?kQ<*uYZN}wH+MeLQt)oUOu0i%a!F^DS!7~^tkj|h+l>&cylb}E(5!hh7|EZ z8+8hc*4aIvh{?-wW{qYKpLDx?eq{?)zf8ENgT$+_OiCYXmi;}x!Y*=jk^M#aWdqpJ z)^9nMuS*G1Lvl*yR|1JwP@gGhJBxuGE~CVzVVjEG7RQ5=TeGkyCsgvMAQsm!x7(tV zNJ-5=_S&bxp@Y2>Q%n>KBa{fs!rI`KQV}b?dHdtD(UeCny|W-A=I>isiWKZlQwB8A z<{;_FsOWeCs#Z8h|5ExWw>{b&RnTe628T8!d%F*mD-QUY)PCDdEzhLRoTO=wmu6P8yAHfCH3VF1V^ld^-RQt5G3 zGgoP-;dHD&rFUhfcl28t5MGJD6YVa8q7wjnDRme}GySe9RUA#(jgTj#$RFZsbJOgy zIXEG*#NdAB>jH3e=l*k%O08XV?x&}pcI_k+M2JkG_xRunk*i_xqw2ifhxg&eO!pTN z?~;p^B*n@oYZ`_{uR&8MU^C>of1ygHij3pc!$VB{Fy`PxGfbApvVpkJ_a-ef>qQN% zRe0tk{Krdp%#Q#iJfBK{9eY|Z#=`lfy{i#^1#~$SywI{jw_U_E zUNAiLv)L?!_Ztq8GrFBPF~3cmx?w0zh<0KY`-7}A&iIRiTzK_et|#kuKY2jeXn84(TZezy1CU}u*0O{W7@r5Hjvsblw%-paZ3QbV%X_u6X> zAQh}s!(@oxtOGz1`lOsbgk_nq0_V)+v^wvfIt(fEZ!_r)i3yer#!&s@ z;D4ptnSjx4<2B;G2_2L@=d?3@)m3G+L%0jTD;;=7dCI_oFLRNM9Pg9(RHSyOZ_s=Z%*}6LdTNm^OcK;a1Dv!;g zm$Wkzc>^?C6aC{1R?c0Zpj#5h1&*K1ch(lqq?sNp7ag4ZCC|;@@#c!*oZC-_B+;Va zi!2l3>r-y14gc0D%E>ZJt6zvrfC`_me~dUErY2#v4D#ScXSXSz+p&;8<|oS=A%Huj z_k%P=%(VVIb`iOj-2r4Gl>IXsWZ4zOKv&P$p234t`>_2t{IDY*S< z@vf%&H3XZCl#_0@Qy+}DjWV(Rjl4X3MzW^-WhpT&*2xSD?(NyY2Pn3d_sh3V(AC7| z3JDb!MV!>Y)6um0pK$B$pJ>*ZDE~N1>A}j+%MRgqvoysN`OTM2Pd%0~ zLAK;Q-X%6%$0hb;y##aszSnI?RD=aA2mxgMvsF`P&&TSFqXDsav~HlOQ-=w>Fp-0| zImv6^;nt|;o;}oIevpFZSHQFa9DDeVLo?l&WZ87nG1GClDz1ZQ2qP?yq2JKOyI~XX zK1N{GsT9kvu>K$hJke$Y{pwiMu#=Zf<;G&0hLCbmOjh19sV5uq(A7-Ea#3l9KBqLT zcs`B7xmCT4`;@qhj#p3`t_V2~`9W@SJ38u3os06N_X|yc1|?Vm62z7uD3I-5K`k!5 zKme3EFl^-^UoJhLKyl`@!9K>*&JS{gX5#OM+!W6+VA#B#JRbo@`6_b})grL>V?Hkd z3l`P9hnrjT7UvfM4|Rd=ln=Lx5Efgi2!m63^}$7z`F-913@r@~Mj|2RRynu$rW%Je zqY+NUTQTHMNN4D8KYxL+{54=$&3L(!9t(^=(>%ThQo@?ed6w%Onf){TR8fJ@yRqoD zX@WO+3kq9bd0CT~Y(=znREb=qpDjZ8{&~PZ-4Gx>^(}pWF!+?LNy1Z-osCY)F3MA! zS7iZnMv)xiyWF{4fbT^{n)se%pd=(^lPFYgz5Al~%31?A!x&G z$WcE`tk6$l`LeR=he=}|FkrzeyvOnFapMb(8!r8%ocq}XA{Kc{2dc0leu+*3P(vQp zP&gS+n=_ay$5PYkXUSOVOd1Rol0G9nM@8KTD=R~kR+{$2!Z^OSe#zzM-uUg7_Jf8) z1K!xAJ2HB~C~-3EjKer5hT*4k>Zi>;x#*J3rUh$SY?*gx__^LMhwDtI)~tR8wkYZ8 z=`B$3T4__Gf_zcI+U2lS9#_lT8W7+nAvv$9h_n04sNJ@A&L!cJ=eMUuhzJ#RAS4NkhKb>uN4 z<8eJQ?s%0lURw>3D;7F7MDDb;t;RK2o}ig7hd$cE6T1WMAQZ1zrrhG|Cs9Ff#B}VA zjMLQuTY9mAa38VH{BX_2jT+VVEPV|zLsF3-EA0^KFEiu&STMl;-O5<_Ah?^U#1&?d z;P5UqbHwYTcI{a8JR=;LD{xj|mk~m?>!t+Nmo$%+%Fo1(R(#w-izacg=`0E?YX})H z;crv*GQ#CuHIdNfnx_zjSE$umGGJqY4s3fUM&XRruW5#h(h_}AZD~nsdc}munBZ_R zaelIoPH$ou=CnMO8Z;~v&BbeRx3W%~Dfir|)88@VhmXI+0a8?pT@p?~=vo3Au_V1*lwQ7pC*6HOqAX+CiKM zb%))1H=9NbsTocze3>&7{1PsReLT<-@f-jcZ8;fZnaq1h zyzFePM|oQ00#_@3!lInY>$e*7>-~vi0||A)c~UCJp{tKi(d@1Xn$<=z2{A?{twe{z zeBVv?#Zj7Wxu$9r^fJDuUwnNFE$aCf<_ai%{`%g+XNHE0J-OmIy@1-a!a*{s7-_($)f**bu8@RDp(gSt~jUZ#~gxHwRRq@JAry4`)O-YqX^cbzTqf73p zH4teUL9O3FNyCEkPcZ>-d_JIp09ThO#F2W9%?wn+eH*l0=2L448<%!=>O0}?f93@i zX^7nJolRBAc0^%Fx5`i{_ru5kSxZw>J|tAGtM^ryU1S)X{h-UD-t~h9)xICB-Ki!p z2SQ!SBa*iKaWaz*OVe9IfT|i#H@mN16C>xJ`5BABb+P`9^VTmyfKFf~@aJdx`}vz7 zH#<&dyUS>?(Ul zj)ADTU-=0MJ)u6L%_zM;dADP@Q6$o^Tna;cc9D5r@qgLgo0K)@?L1&?H%v_qdFx^^ zhh~E&u|d3x{N7N%e-{;F)DkK{wFvVs#mXXMsO#6{@o42_N7@=tneO@Of{F)?x8r~TQ|O8urf z&gb}73Qt|uK*%`Kwqd*L=gU4!-&Oz~2NK4qZ=H|yc(9>Bj+3#Vpg@7ZXLPT*pT`#4 zp>#tC`>gs+UfbdLHX^z-8S(!h;Qy;mBt6Ih(GGm}^(P^VdmnjV?CQd;_D?4&R3D$s zr!^I{$D@8B$jQ#T$4ZFMh_4`(=Cc>kaC6KOWB$&G%gcGaC1h((0^Qf`sj6>EfD`G?t>mRDy>4BseA`` zmI5vW#VX}tjv7J#?;vrW#s3SCczf*MkT^vw0Es`o{og|3f9XU3yUZJ-ZDC*s{qsQU zE~Q-LHPpbr6ZWs<0kZHKz-3#8U!BEMBE}}d5sqC?T4{4Z6<7zdV)0GQ%<@`d+piCe zS{1~SJh-)SDx+s2MPlfuVt1jXNg9rB+o&0Fo+91{=q?)vBpG=CQx)CX?Lgoxp zIhcX1{Wd4pmk41thiaXU!O6}pL$i|L3uofIb1nM#wikDkC=3eHRqiTVJx~X+%U;pqw z%s0{~)HV+8mUH}i6WR*yR=9s@)JI5QbUGXnE#r{$t^D=nU6vXNwO^ajoRT|C^xLi? z&Tlr*gWqoHgKtZaK(K)|xBk^>ZquGu3!d*zl8SE9=P_^Tgjv5jX1Ds4smk_ev!m)f z6-={D27##F!B1joUwL9)JnB`taM=)P&v+-g2#zK8G)-?23MbRP-~W>Euv(@CL?@vI zohxUH9JMz7DItG7Mt0zz%U95quIw*PJL6lJXhMa+Z0y7N975?7-~qChpBI=<4@&e? z1-U_x?R1)twbfSv1_z)K@Y=|bP;s|91_0-Qk)%n$w}WL}@Rkq&=uF@tSvNq0(ZEt< zl<>b*2zc@TU4_sOI4o8fDl12)!bOO0O6fzdD2$dGtY`j(wQl@di4YkRL(}?cL!#Ra zt6Dq>n7O(;RyohJC4tgmmqb%bvfk?Y@|=z!k|EZq>&KNyv9&^N#iWzD2bF^ zq3_?@-CxjJX#E_n-sm=hPivfi{0aR-Kau=AtLxSHssm?~I)1o2(o z4b-^%4ZV@yMEiUJN-YkAT@IHWyI%?ZGFD1ueHE7)hF9D#kiIw1No4-?Fpih|Rr(OJ zI@WO@oBIvjZyW1D%rSTHny1dX;pzcNAo~HsoaL!1TxO>{Z%jtIM0L^7H_bk_ z0VXIgis6X>Z&VYG|Jtz7HULRKo2UXt?yb6_w{U^;U4Swq^I;R7diCsXap~ELTGr?B!1%)F z#8b=V(-eT9p994I6I5Xs2Qiq#9~~JtSJoRDVQfL|#X)H)Z{(}jnL982;gA^CzvaFV z#`fF_ZAi*Y2rLh-X>Uh8r{+SZHMhjFtNpxbnQey$y%9aTfS8CehxXkem1`$hJGTGi z(Rx>H^f<>g#0x_j94Ph#1inL|SW!SKITfBkmuseLu&fM2`GRjXa?yI!sG#$#AP=hn zoP|G2lm`VE_C{hi>?0o_Mpm;XBEr5B557H_!2j$#zd{cpoZR7h+8B=26hF$WxNo@F zy{&oG3MXe-(A@v^YZv*ZWt|RJyIyE8Wo{oXm)1r?#uGX4+b|e+9Mc&us2?F8udFj` zy;=;&2_`Prkx==><&t4sSUi{T9&RhR|JIHgvf_6FjFS@$;?s@1!+i1N-XVk$d4vVC zVCE5t-LX1dv{<2pMxJI++tBc0xAiY8u4_=lv13mnndQO1g<`$Egn+uJz_4!zv`3;Z z!pBjCbC={jN+v|_S|U%;B~5QyUq^hfb>_N~t1xH!Jjeg9>%m4+P(D4z6DJ%vue9j> zxf>>s83{0cFnmG*)vG6P>)H_IQR+{yyzUnZeIsK4SD4~K%8dOF%rpL9FwgM}E)u4C zs%tU&Wj}xk{ZG95c6a!~o*r?7_J;tk+qce$mr@!t8Vex}4P0C_1Ugdwn%LAV0ptj> zF@v9n>e%{Ls$jyLQ&Tw)zfx^r`a3R+lgS7eVwVMjZz{IpE=d8|83x)#rpqfn@Ufa; zukR-&Nbl~qFVWvr+HN1%*j6Q8H`XRFIZffR^8vai@B7N;R2Uif7)jco?NN)b_u?op zp$2|rpv=n-Z)!@3;zz}hT#+@nu56lw!;t?%=z`e)KS$_5A2#NboT=<_|6LKG>A(iN zSpZ~szI}oD7ff!>&OiuPkeIy}2^0^zuIw4`8W(-M)Yz(+jwz3ExLEZ(0n%b40J;vo zcxyi+Ej_)$Wj|oZc;*`VrWoaj9n#Xm=j?M9VLS=z;5$t6s|8(OQzu9pv-U+CCq-I>Ny_{1Cl?^ zAA-H?3u(`#dLUZ#fh(7u0N^SJ9Prw%s~41Q8HlNZkME+Ic$-hlauYQk~40aF1Ffkp8?YS?Q=GcQyG(+dP9(S>DD62qSdu*8R{ z`IdroWpQWmyGppHRkjc+AkInMZ7`N)K+ zkSTi&x@UJ?-_}<)A|hZkeqT6_?zq27PD(N!AKiKxtg&h4`mYOjUpX*T7djya@<*DE z7oq9S{k`t7*zB;hg`0DPczBfb3nYXcK$#k>3ay~Vu~vY<*oLiibfj7t~+E>L@1N{TF@b2zCw9KRIY$S3r~&$N@eg6Mee z&yk!E32$#s< zo8$IKp0uc>bsso83#4+aQ|HW>?uQi&80h3QXDds#&GA6-2sX25A1;PJrr3xY3U>UE zWo~yDlLUp%i{XB{Kg9=Ic$raKUHE9>r<0ynFj$|^Fa`>9n;RlO9lKyku*#(c^vlZ0 z8s=|4uN11MB2Y2P9b@HftYtCccz;h4d)-xu)G1l^d{F>f$B$Lj{#(x3Y?^s1w%DJd zEiISoJeP((>oAfSB~kYdJB1{`4}p$xmVnAUTREZ4uCY&w-) z9Hs<*cUzKX0ZY5BqDlT)0v!ZpgUzn}`XLSJ*npdQgQ1m#K~4id4YZ9}FWh~emUb^C zfzKDjFAHBR22==CV>&lykux3Xfbg2vIKoty@OnQ!1I5T$S+KBmmh54yUKoWo)8HcZ z<0LN2yZt}#&z~nm8#g-3{rdu(@YzvTfCu;4)mh)?C(q)ZtRO5b36 zK=?b}3)DkXavUQ6AD0j*$ynPMpu6PlD}^Bl#1imx1xCrR%#bXsCkbjs&`d06E!s5q z2GJye4ujuKi|bMPVmeS%_x)cu5Mvvf6f~%+sZlASZ+CzA$w9&Dws`@xNkwBx;lQk>j-BfE(1#KJiFJM3MLxbO%L9Mzf~xuRKUbsP~#f|1Jp2_N!+h>9cgsP zT!LeVgRd{F|H1@=ZQjbpv37_kh-G*@`u(i@Z$x*}xVQf+(cKeZ_rQ+~^ZvUw@4 z$qWM$?MGKp{S-pgh zsp%JyBDyu)L+)jTyxK^3#3K_vgX;rW?1oks&v=!m|HWcwne81!s`3BBVc+kO31*WM zzko(5w=cvlPqy9+K4urZdw7u1l29raamC%U`iA{)*iikuEG_#TJNIrvMfV^dQKsB9 z-LRF`D2sjrlh2^6q;yWs7n;V_G}*-cvJSe%Qck+OoFB-JDmVJE&O+Eq%$I8aaX$lk zpti-V`4xL&vaBxbcONN%9I-o{*8yZm!!;(-im1Zk!8KAGHD-lDSueK2Vxz^+K^5@j znsN)h*G2TS$U&0qjHjsPMp!)@+BOiEp?&-cuVHN+9D>1o%5RqJMqyZ=0X`$5tO|os zv!T^lTVTM5!LFXzRFQ^1`AgI52A8#Y*p-2~vT;Gz4(k zQK78&-K<(kCL@`yY(bOVA#M71e=8=q+FmQKw19ao_kZ}x7@`DxV6zfm^OF(0c)|08 z^1P{#&y?&Nnbzwa3bY92(!B_yMf8wXKQ~Zkyg7_8hd%m*vFp#+4xX$=VU0ax=YW8> zDzVBqTe;nD%%LnA$q@Qpr%3xp}X5X-tLGRF7Ty#8ryq4JAPG);0Q z(eWPXVQQK?>BZ>O6g>L8%NP~>Gs%$(WTd;>?`8j#FwVa|b*P6MDW|kl-cBMx1WoKu zO4Ex|;aK|EqW1Qs@s$^UzibG13)xQy=Ffj_A^-XK>mTRvf4~04IsBgwAeL!)2)B#q z|KNqd%ROKzRsMXL7ar8xZN8Ru&CQma#1s@Nv^a~QGWl_G{qX&pXP@|Ng#a=*z~Kc> z5rF|0!=oPy3pc`JySpVniXC#B0gmvndpqR{*xr8p#dG98KVg^@C;ugN;!}FMk0mD~ z3kxX`(WH#GcgH5*#l^*7K8~7^k$rQvsBe`DkhC1KAio|ESSY#z{+|H%oV^=!_^&@h_g3_! zQ0|M`_V$b{`ta~@`{fqXW=l&;mo2~!Bg)duOu`dG$Q6Lo$ESZcJ_~Mz0aP*cV|Hs` zVB+kZu<|X1?YAZ(U)jIjW~G);pNHKpuFB0*AVZCY3*#q1yFxmL&;^%>IJ>!#OpK0> z0><4=TVcO{yBtk@LcDj#^2jSHl2IuPncCbt0z~JD|AV$S0jGNX-o`7lGH#V2WZV>) z=Zy@R!j>sYW>J~PZOEK?rVuuXR1~E`na2#7Dij$uGGyAwl;K?}=X-wt|NCCoJ9eGx za5~%e*`LqztaYz@-Os%)xlh8Fk?MdwQ{TC(w{Nlz^bZXc?c@qEzcA7?D|zpIS|u$hIV~CYU50Qa6g)QU0`&Bf ziM8hD=IGD*Vy9Ph0!uYKlGsFhH|a#8Gq2M=JF$u-0Hf^?Ya ziWD8>k|YU5xvU;+Drd1?w!0but!yiy*RIsNWVW?w{Ic~!vGhAfj%HFm`5ZafWcc)xepp#K^+V2sRD_NYVQX}jrA_B$sWy9W~!6JxyVJ+AHSI0dwe(96s{ zrGA?wcV&r3XaGd1?RO4cQ{KB-7Y)J*0vL=P2|xGziPfu;{O9)lF^``3puLLO)D6Miz4+Nn2)gCnQ9!m)L9W4$mH*PiiSbK_ZMD1-( zv8^0JDcA*Ux4!3IW;0Mg{OJhpDGRX7& z>j57{?R@q`;?`|IJ5_mNt3_V1+u@#{|r3q&bk{{}z^bbn&q zc`SsjKLHTF`4hrUqhU=QJRW#L`)`+kgtg!eGwcrSzr+7OibkgV*v)Dk9>%s>`}v^~ z)#%9}>bbeOH9wpJ2Xlya4|Jx%v%bzPfU5iI{PvF(Gb<}AZgVRurR+z#yuUm>JPi81 zSEqa4zklD?cP~Jd8S$#B$|2B9=pd*jYEo8_=GQ!WG?K1Ke)K2_F>wsxr~ZBkaq%s& z2ag}W|Gg-gO>}ewrG#d(3(M|cU%nKcpPQfmTqi3l+l+`*e${f1)PTy@*}&j#CqFOm zrOpzsPeOP0Cj^l&wzMWAL%6uE!3iQ&)$qgq&;CR$&4u?_NRxiq+gq>LgPBDiu+e*X zc(jqfd-v{3=WB1pPbYgxew}Ug0_6l5#r?<6 zo{3?bnl8vhb9!NojEuU)=9kFDA}}b^D_6F*e%|_Z8!kRu*0*nHXl_n#x#T#(hS0$? z%E-uAc_F`@8YVT7jC`dQIxgvhwV#5^)AqJ}k@f*0PCeZSM~eu&b8BkM2I1f~0}D%E zV8kf%YqvIVfwnp#dQ-{QJ=gHH8h7&7mC-r^INqdaQ13}S-~T>tcJ?M06|+-dfXeUB zbslT(8h)=q|CZp5;9F=qZ+-G@)PvaSYB{1i;IjlVmPg30K4MJ`2B!7BS?b5~EZZ?e z4<*a2n&q9@KX4%%NUSGVLGWi8${)Yita&Uf?4ucCakl}CzYOlq-_G7!?B6L`Dc$9e zZVCT}mYK?|s+jl0d|`2tHJ$CTvf~#(-U-G86VZhpNio;l9g@V>0tIe# zM?p;a6uJ>3L^5bgd+wC9`*iBOEX^!07^H;@+*%}KK&vHjswd$aCGQehS-&a7jnVbpgPR98?q?6gDb zYvg>H@0juHo5wQ@m%?bID;P|L>$9mTx&B;34aIn5P+d@1bMr*O%9qj(d}9pZ%a9aI zaK)lV7x2%>F?z#jMB9$;qtrx|ZKe$Hp`I4q4RzBlu{) zKX=c^LEoQOytfJ#cB`h9u6Jg7!$`H1P5>^8)xXYZBd&kAFJ{#ob0PYKkmT93KOE=3 zZJ1DQN~FmL$YlYxvUPTG5t`3=@E{1ApuE(w|@%*vxXziJimo^t6X^Fl7 zv(a$T391&1;{LUo3?)ioN7DgAO@@*_unV?bgaec`XJHPOXRezP>hSWNRZQ@WNGTN+ z6+5s^z6nEl!BSk^MCf?yd#tRl;yW-s_FfpOSP^SRxJ`BD6La1-0{^cX+f^C!-|GRc zpeWDIXIpZ&q-eaq5y7WBYLRi5`GLGOIp0Hbew8pxu%^XW{hdbYZ8q2{7Z8wr;z>#! zgMFeVj(=qR*_A1USWln>BS~j&&Nx8A)j(EnI2h=@9Ihvx!~)x1o0!k2 zByc2DgIY0tqhJ^?1YpUHdqNXpbTP_*mhq~A2o?k8*cX2!Zz!cV6h|TlID>7kFWBa~ zfx*7fdTC!@A2oU@hS@bXGcz*^ zG$J(B>}h(_0lG_nkqbFI#3FwI$nADhHTK?dGJguk5Hk+_6u}rFh~pEgqu4LMtjx_v zKRwaYR5VqprEC53u<^IO z-OtX>rbP>~QOpXPR;l8N*3t6vM{Zm&84*fvVuk}%1hU(K6Q|ED2x2#O5@F>{^rgw> zPuk36fA#gKIgK@UA*34o8`uy1IE^7KU}TR&T2`iocu=DTSf$;0T$;#UJPjrS6mEJn z>D=U5O7GDwYP1d^%?MeDv6Z53?M`Eex38K8s_qb@G1&wkRB!II4vnABCPyNTsA&6q z%GX2jjf?w{iE}*CS0o7C3W8 zBNF+T@Im6sjbPS$ozV4dEau4K!H%XzQhnhLF^f#B>}_GPG(=04vwJICJwl+!b;L_?hLDX&MyzpAHt*;nmyPx=;ZyN)_3NbVLZpmo zYytxDp0M>U3KGb8y%Z1i-YC?=k#SpJF{RPiu1iXQO(0^+L;K)d$gLNO))2g0Pzu*+ z!g;qH?OkbJQu(Vimkz+j=}71`wlp?5n}#q7Yij&3LLIgDv8$e%HmE$= zlH4*l5=T}a?0k{VxUrCbZQ53XCg^aC;4SuE~F7-&H;!A3$ zP=y2qYebZ?8Y@obn2ck0O~9W20v@7c0bC#CMC5FLYc(s zw|{m=4D!)os;LPgHjRy+ifk%v_fDisU~>f7Otb4B9Va4Ft0v@BCRR6@PF~1dw_94j ze(sWbvCHZ2YJ1V;Q`Q-!if3rL&>;;I&%%mVFSPtk1P{P{cZw_U^NGYi*|8O#y5gU( zvY!wCD}o4!apva$-Uzc7O21{#eP zC0kz{F)MRqjxGK%CH6bn!NrS9>XnS*oq2u_tFyXnfo1f1- ze+|L~;-zlwDKMpk@x_W9M%8{&RNJk-a8o9Lm+vn!Zis{eV5aP3GsMK?iy~pa>~G#A zAK(1Q$jFEjCgCnfZvXbIifLpmUBtE=aeWY0)zXsl)vHX&B77ZPC9uImvm-KK(11mX zGag5@t5R)=1#i=o)Kn3J4d03dm^YoqKRNZepZNfdLYBLFCA%>jK9{yIb~LCGfVJG-faf!~W;&XHCU3E~B+U zpRz9p4E%#OVvzMBZamJT71B084^s`G^7TlUW@TA6c)23R$*@xosUFCqx;_%~64$Aw z!6cdVVl|S8Gj@%4>F8WE<*+$m(HJ&pb&QKmRFR7)IaHjZVMMKoB*04YXr>3C29xI!`wiw@Z*^dMmtU@;=d0ef zwjkiFy6E})wIS|pD{kPDXv=mss0D+I6qHm#DbUiW9^(~(s2jQ*x{(5zTw4xneup&n zSXt^RJ$?PgYz8wflazc`UfWCRiLQe%blWm11;ajMu@TLab-MjMR4A?O^MhsId~sg% z2Wvo*Nhu~aKVa#{c;wOvbQ=N}9Gr)aLK=0`^xHKKLz6JAZWIKE>S^Ft&m5k$ zQWZ1C2AiEk7XgyYr(l!GsjDYs>HK14IS6ER-qI6%V};ukeaG4f`J-<_Ytf-go~RB~1^ZIU zn7B4@atzV$be!JZX!}_6u8p5eWUVP**?HxQtNZ?w9pb;>4dWgJo9D&~e$gvE zeYhwrqIbz0O{0(!rv2Pwsxylk-I#hbnwy)ex(cmIMj&xQOL4izXh;zGXxoH$z2TMG znwzvNyy>61g#k^E);v5MoR}K8^t1;}+k)tILi}AEp*$+)>*4>;3aS56nkf_iD_7jV zfB*aU?O|9~|*kMOlg)*sJ4vFgP&cy`&g{+*f+=P}t6YDMh|w zVq!|)16KT1$k(q|d`!04|Ey&*(Q&dq1yNc5QvpVOG&U(kcBGIyd34p95d;JTrwk1Z z!vd10o?m+>?yjwlQV_1U2RbLxmj#9C{`PIB2NTQ^#an;oxFausGfVPa^5^xsiGNps zXjTl;w8sBi?-u7S)MjfB@yd>l=eo9m$$nr9%N z(2{uO7?n9*3&U8dD1sS50SMI z96p9u_V$*d=rf!H3I?j=)fPNqGzVsRDn0v9aIxJ=M|KR-ZmI3q7WjM%UP4_mGTKIq&{~8fP*Bw`X{=|o}vdoQRa>1@h zJBks9XDg9(!6{3Yy?T|RvIIfe_6{aDFK^=EJ{bf9_&#=tr)Sc8(cX7%f;9A;@JmfV zfKf6nj;t@Oy8>)r-jo4*@|%Upo3TaV-zoki^fe8oXD=1}{)6KnGXEb!(f>to`zRRS z_^buJ=bkqa@YQrFVD8M&?MZFUzl%gv1Z#v1_}TsX?H&fwCBrFirrSGj?7!%ORq8+M zvl+ojBXM7v(yVaM<;6{7(;E>;AL206n$1~66r{I%DJjZ-sHZ%EbUYzLW2VwnD@RiH zp1WQr+JItj-fJ_T>gq!(P}HZxpoPiy)Ts{{sdw+b_MHDZJZ!pkNnih!^H@ED$aOx` z%JO{ZH2?h3KQ%GYUAwfj1P~8szVgqoIbqq}f?x&)Q;f{alY`K<1EPl+T)?H$vn_yr zDfmzVYA@ozqp{913g?RtX|v+|vyM+J(+)r!BReGKyE;M;M1T+MLO`iRp9+r>x%o7W`mKc?9# zB6>qpf=GOm?f`6Awy#-|-CFD$&Ijl9$|p}ENzLIvy*pOQEzHlt&o5KlEb-*h5~x=l zUCBF%)Ei+FZoUDx?Hdpbd%C;N6D#-vEC(^g*9teJgD}nR`R~@(zYFm5=dVBqDf9j3 z^fj1rKXiy7El69D{5m&-_txr-ltAj7ggim1qvs`4KXG_xYhFNJj**i%Uwjh9CZsqQ#%9yO6ih{mzTu+n>XTZLU!(+o?!?Hhp)>1G`KJV63XsF z9w(GJ(@%N{?t*@v!omO5#`UyH*jSyhfE-7vWe+${g7HYC>`o)~UVB`2)J?`4d47GF zZ||{TbDD$1jqcZ305=d2ARoW;s^MuAH9FJ-!8aMvHT&a7bW7vLJxnn1_&#hr9?Jac z>eZ{f)oN0=fMjEHWalAQd z#8nMxmS3Yc{t!=iOUUjRC8dfw!+7M%R9Ao?mh|ZP`VTq|F8f$H`xP7>GQubZ{H0Xy zYgZeIHX`;Wzq9VGHRk^;DDL-KRjz}e7O7-q#bMD1j3e0*4AwUJA|v!_z#tUv_Yi&j zPK>RR24%x6nRw*sehgG_aE{Bc*i<$h6D&-@ZBa(rdDF>higqp{9WHz2;`PZGNZ-y@e>>TJy z3aBd-s&5y@yr)`hKXjMQ1r-E7%VAZVIEI0-Bo^uu)Uu7OiascX3grRcU~!I$Q{g}~ zKF%Ax%{u3n&VmM(%OUjJIjK9cV({w+B$HWoZRoQO`xc2W|>=YW`gqVwHJ^nHEiq4#1tBm)rH3!nua-X}qbucGqR9~QNQAwVzLTUqV zL~ORbMsBMLsU6%}|7JusXB;xOSg(?vGjcryirMc!2I%w-7-NNR3UJt($8UoXzm~8e z_E_jmzWE(Bqn*F}`F&z|;eSvgAc{XkGur{P7m5%4Lmcd1{{QR?A1)48G1BJ=ZESg< zN21mK#>N?I6u#uiCTo$&uhgGkw9r?|SW#Q+yOk3gPd@gX+^;Cx@9_f{5L3;0tN;1! zXl>T@hIem6@dGaD{Xk1(z&xfH3w~HDYF~~n3 zbmpif3a_N?pQ)ND>G7y)h2f~|_hJ(!uj=W=_}-Y9m=3J$FQ8u`T)`^0e4*szwD>1X zKYjYNJ}sPzRrov1*ae^pmX7OS%15s*?F=M>VBjt&^@@#4!wPop-8*sJsK#H`Ds2L6 z=*Gv#EnG;#MV(>V3giZnG@qzY!Do*MT|m<=uL+atX)vp_NyKGL4AWKdD3-5%@Ea z-@jj{%=VH;a+7u~eUeLHOu2V2j6Rso+}=J$9P@qTjlh zV0QRu8TYv>Z3zf=lRKI0CPen)3a^A(o0!Lj_IVHgH*Y8CuJ04K<~)ZY7oUP4Mm(S4 z09lDP#UMMcRY<#y2`7;b5A}T6LAk#axE_Igxx1ZOohpEgLOvGAp&&mYS@i7LB(WM7 z#HGioqhr|?Y+**PZyNZWdrei;NxD-+PL>7kjWJa7l6T>LJDfs?;_eg?p5fiv+L{L1 zXp6`fotN33<@R2e5-8PprDVyc^z`&O-@bmm`LBC}Q#8I%`GK$y<;j9-J`-PVyRacSalD!# zy5tO}Jv~kI+0zl?c2;0a$sQ5kjDRjud{>prxXSd@_l9g}#ry$vA3tDl9|p@Lp58i_ za_ylG`{viUJB9u0t5ws!U$S9!lO1_+Bz|>nqt+^5pd*Q7L+5*~jKVl37z=#VrIPb0 z=ie0-@o`~uc7wmX7c)|VX6Gw1NgReWD|lzcm+@2ryCyC0DT59X+F6rFf5 z7hK25Xi-Qio)OXipoF;@0D=&!`} z5R<1zQ%7^IW6ghR{41n6sj3E?sV?!KV!1x;Gu%K+Wh$@*#Tv5^LzkOx%K4&N-MH=T z?Q-PvB=ZT?#f0=^tH-70BGEiS_2JZhc_yNvr0RI8dQ9VveQ)c=C%3;{+>h@umgGQ! zYsj~sKipoC?(!51=b=pKz_gX``rlzJ!tcZqr-33x%x#P%} zIF7$@z7>OW`==AC`1eQ0$;Qe4frfuS%ztT_{!hQ~KfP76wEFsbP_8K>z~EFaVApTe zBk`b zFXzAjEQ(?CFE5jWrZcC`PFj&)ULz&qEo(^xwth!((G(5Nq`f4DRfA zzpOhT5JG?T0!qm3yC#(K3>A*wq&sm#z{Iy)-dh`Vnz7zTfpDU25xCycT29`c<|ldu zia(XFUHb;?z>^a5WhfR-96#O@3Hbi1>_O|FbvzQtsIpgbk@C>~iRD(IrJ)T6Og#*> z)tE2a$%r4!Ye4Z?Hj%%1e)@sAvxkRR9if*b+$UVjqeqXRb+-kfxYL}SY-@0EkU^6L z3go&`XtzFtWYd|lBLGS04YXJpmDxIKkF(19D61*R@Ua<#HQ~|o2e)J~Em}qq1x;y1 zCeX^Al3VmQH8xfc`AXi#N-98lTK9w(%-QaS_Nd=amc#q?{fs=!*7iVHBcRO0M( zeba8r%M1d(A?Zo zjI#S6=j*onB08Y8%5~$)wZ&1LgW7ym5R2*kDF35YN;fk3E(L#=S>GEEs9SyoEXuo0 zT)8It0!YQdMFT@aPfyS0ovwG;AJad5`^Fsb3s#YjimK{37WOd4T1kn(EZ-y_NPiVw zD6B%vs(kK_xz^C9UIj|&ARvo3BlOj`S>KT>(c;&>4K)4Yzg--DgDR$SCx9wjUe`t7 zer-p(2#kA!7|>KBa=im`_h;Cp)A9kksBjU)bCU|%?4J!(@=No8^Tx4&EHNkwdpJ)G z5|3)~RH!t6^N$@R0PaLg7ETsEduCIAK*Kr2Aa|cX4!#oEUA?(66Htk7eBS=~m69TU zZ`(O+8wmP+x==KNmM3R4u^Qss1Q8*5Pq?!$n_~`&jSCAWgmPsiCDqDn$3Q#*1iG4* zG77s)F0F)0+;lxaDyzrghDwPpoB3t@@-vSR4;zkq`9t}ZQvhnzu1VI=&pHkn3m{acFy;Ll;iOGHb+z~tmV24c= z!3r(CVhtGf!2-twFJ^0%H`Tlo_*C#MX8g-=^{xtG%RdiEx$Vt#v(WpO7Qgaz!+$_X zEVK~J%J>ZMtw3cdqrq8-+<1B}=o{B`3#9b@0{b;lPc6LtK9YKn6Ml-#pN`sNe*4&B3*-YXAP<2@IK)P1e3LtSGa=zd_lz8Q^_WVW!TPbE^iESRpov(8_>hHiE z5GWB9G@2^weOH&nKMa`_N?~hh#6NY<(H`Z0M^yvmQvH0ht3?3%9@e(DdW3b__weJl z!#rT4{R6Bg;>OTnUrJ<~$A}wxI-1*6U#jW}2=pBzVFQrFZ%H zTyaqQDrQuMw2mJh{bvD)-i|?*@J#hqtxZ8ZzNLNb*OOTeU-VKv<&h(XXn6e4#jGEc zjS3LA?L0-!E=P|pK`WEe01lgb(%LaW3N6_5ubAKqtgTgofXfad{f#RO=q2tR24D30 zv173@)xXx}_;F;zB3E|uqKmZ+~Wkoxdw)XLuv3cDHoTH%NU1Y*hz&HE?l^!osmd>o znxA2W;$;Tp1t*DJh#frJtQU__GN@~X{~Kk#z~ggeV)w?^lJ{2Pf@{t`9W`Lyn|YL3 zBkS^{aL;4)Gb6wlx=(tc`>Zy^ZFK<7?3BDLvOi?64!X|_Bl>)l^)4Rt(EndF^8aMf z7-U#@*zDqn0DqP1+FW`itJ`MxXo7b2Rtx~IKUda_0R7NgmvNuY$nlmq7Ad#4k5}6! z+u8>G?3{rJ5(>2V$bgQCjj!2E06E)q>J{K6VK`}SaUZaWsRCwlqM~$vIS+p{URhPu zSMDyY6w-0H9HihGa)H{_mWLYXQ4V-zwA13}Taz={Z-1@ln%!)UVE_WM=W=w088DMh z$M?lq&^b~`F8!k!i0|S)xIo|m=u|Uh-L@v%Po}F12U;Bao>FY#((y0p1rRq|;`=MFU#AF$xe6td$?(L5WKoDjc=X{Gu=khz5PY??Ay#B}6Zq zHDWsO<1I52@lT&T*=Od(k|Qy#DnrlQ!b@G3r+uH#IOblsYY`*%}G{Qkhwl3>?c6me z)?X#S-54amz27nfjNiq^inhDYTS!o@3-B)} zRyTPgsNPco27s0m)eONGLIzd9#th^H(@u7FIoX@uF$F=(%dWyij4>96VxK-e!^nAb zTS{3C&U2hH1q{%-FVH|u5Lt8pLQYFadkIx3hHQ0_ltQfua1(FOM*9Z?2tOC zFezVJ=19<Iy&1fJVn6q4qrWm5{5W0nONn z8_O&?t$9I|x_RmGo;vUqVh#y%)Jmc0AO>%^lVtVVXIUrE^ACsphH6+h0{Pe_=P-_- znOY)`uSF6dz1Mq+_}Ew*(;=EDEj2D4tFib=*K-RI2h^Xr-LzN}jXj28y>fQ3py2V6 z{sz@&t5dz|EQx%nqsqi0t+vCAr3J4l`F8Ytiyd<@!AQ5Sc#Y7a>^mA!lGy1VKOQCu z2Qob7GkRfpVW?}FTzd@v3-0-;-n|bJBdS9Efq}PG!bd^dD zz7=HKp4#IDX*J`_;$8FCN2oedv_n-_snLX)iV?LRe*qdpO3S*T6xQmD38^m~2{@W1 z;W(5-aSo(PsNt2%p}V=cxgB`ILQW$G1&$eLpOHA-NWv@EMln6TST50}+lqJ{j* zsZ7f$%(pdAeOo57LAk&-4k?+{f-x;?3iH!NDLK^NNF_5VG4P=8sa9gJgFgMfnZ=Jm;J!u zS)Gl$iUveyZM>bn-f7Kvd zEKg?$nQV1O1$$;@2AqRRPRm)-qnF2uAb5tqBw`tDU{a|Kql9M0sPod1 zr6U<8p&3XcHtvXhHHV4BoUBj}zY9|+$BaDRFO#sVI6m=ToCXglZb~S&k~tBN%~}** z^!ze0YhD;#VnC7=N)v~NM^`aD(JpZ+mk8BWo>3?~`i zu4J7-JE{d?7pCT-D`uR>dRON6#;~e;y?dA2!&Um~2AL>#MX%u-FYl)nZ!%Ke2IPLK z{?oV}@kY74eO!XO;;mup>&s+7TbVSj?IGp>$9myl+OLrsnRWDDzVyyY*Y0cKca^uu z$H^{f)oL@Oa2|9)M{c8BHhY^ew>na|m^kTLyJ}Xv@;PZ3z#;tsB=5f!rJNMj&JLrt zy0knQT^@c}uw;-ct0kVf${mlJzgpQgzSZ%~MLcn)^hr_sfwnOZ9gQ3yPo&Sy#a*ey z6DAUVQT8@w@I4#W*|t_k^-1HDzhCacWKHE8@h`28bjQRGl>9RY9GMQY0!y6?^Nq^} z2EEUpKh0&nYS^2Yq_wTHgJa!!7!VT>Fx8M!OmBa1w4k@32|=ebv6JR2EMHt)96v93 zvGe2#<@<@iJ##N54Puukn?LvNj#vWO&cLgc##d$yJB z;hut1EA!r#7nPimY~2>+zwg0$Jv}{)Qp?K1f*G6_8NlFD|B`MD=u!2H7l(fxiF#Rh z8Oje4z6G^LgkD*>(g7D2SweN;LzWC%p1DNS(UTejk|IAw=OMC~ebZ%K!#dyUc=8WJ zM;aC$&Wge~SK$~3t!WMHCG|Jp@kLFKJ!CcX6qRGWzl_24jTZz+>n_VX2yDoE9$%<- zMZ!^w{m?n%#fr`w)bA66&YQ(C>@vpEd-tpS%tFSi`R{(zl9D$N+b$cKOqH{bl>^DiC)No5>MQuMSE$T~~{Tm1X%P|(Z=J<g5uq+oO& z$wm~S&MKLx6nyNFCGqgvw;*{6OVkqN&jLxUlamwj_5hTL)6+axJ-UsrUHiaa{ zz(oQD+Q>yqCzmI%kh2jyfcxwVe4^&ymoKdyqHXQ%ydO74adFge{?q4GO^v(;Y|N+p z&CSiYLi@otvBU~0j*gDytP3!hrH1Q(tZGcg`ddcq*|eZ9h{cNP%0c1UPK{3BhAae4 z?_*h`nw^wXz9^}Y}gF}iM@?7`lM9Hh=Z{bq+E^KRC=Rn3Tm4 zAy|H5CsX^kQWDN&NqWv1f%0^QY<|B*x-UiRyPe>35UmBG@XAmmm*k8Gj2M*fC>mT{ zECkuA`MQP=^_#Lv_ZiwOP(`!bsl_9oo7HH`$f5i;1m~9wDc5@P!kM{@?ki9kMJrHT zKqav(6X{063hJS>y?gK8GQW$;fB-b+E{+^Mt7d4}s>Wk~s$|E_hLzM?3J+WhZi|?Zru2NL$pEd^3?OY&Iw!JQIf4JwWD-~y70VZW z{4}D4L>WnVf|IYu_8)bT&M9=Bq1ra2nvCJ+617P z{pS@2(dvJz<^A^^Zy%09s*F_miV3P&E&cp~tv;D}Q0PND#X@gI-l$?t;74f&_LZYN zW_5ALyA`ij@UXZJm_%}j8bwAu0|WGb%jRm+|I+t&5-={zU=& z{a7=FQOE`T%d9d!oV(5&3vS5=VAT?Xk#sbBi%HbS4Sf*-j410r#}m%m@F?vc7GZti z^W@Hkm7X4l!^G51+Pama9UNH=Yv{gys9|{ID*Yx=S+y3zDkl(&Kb=-ObS4m$E+d6?_ODF~0-Z);r$6S_%*? zvII3TT2p`}g1o?p-4?#Zq&^}pZms*{;Gi#V<3%Q-<;UU(C|N0FGwUPuUQ#uIaSU9K z{cvTv&c0p>gt1+1ZMnC9^zrZO5SQvOxUq)tA0y)l_y-@jpvs+@o70T^Eg6->o`<1= z;UJ!e{z)1MguTT_f0Y_g=-49_`kWt3_(iY?3JNYk;z+^W+1*XWTL4-raK^pkO-fEq zW_v&iXJCkq!Vx@4ztw4)aNrj@L1{eYS`H`XH=L93gi@saaN_8^)$HFD8H1$mbJsZ~ zlwJn~ltpxum);o(iPX?wmAA{w%MfyFPf=1>N?TjGaLxagFe*DbN_Vrk>5$3+8lST-;- zGaJ}m21=#_G5n*Bn=ySnQYoBLh6QZWcHp3$cIO02nosnOgnUUb61Z{+eAe zE^Ng7w|5ddO}T8?TCi+I`n<4HS8tX18v`HlV;?_zGFGxO^N}|miBaISp<(^Nog`zX zgQJP!&;`sOG$?ZrrJ9?Vi+arSn|FNIA9F;QakNEIt)-dMkbF@dj;}b?%j$FyKdNxM z>Gkr*3wQ9ixF-kNqN`SGHGb*=10RF*+1+(55{K>u-{#RRYu=vSL98u=ibbtZjc)A4 z?(nW=sj1N*EOlO!?e7p_}8ldCS(;>!n}pMCDAAm*YE$ z_!hstV=8Nm<^AoPu$qRv@~m&Yw6urOwA$}d<*CGg>KD6P9xo^__^s2NwSjX%w_c2^ z4ADw7&(92_2;>TDz)zui07YYXnN{^xM3vOd*H?D0D8Gu_VDk0MOCQ0?@pGr%Yg50bs=Cq}=tvBz7?d?&8Q_a?yZN)Qg8NUr3&ST#n zz2H}Li4RGW5!Nj5VaU{or`bipq;F6B@V{C!7QtlhX0Vs;4}! z^_l&4i+GRtX2>EJgSxbh6Py?MZ5A;>_WMf$Qe0dd97=}m#r6LDePovn7z9Hn^WDj) znVA^^Rwh=~_)VpBdi}!Be$|GB`RU+*(*^RExYI+Y_Sn-I&GOKZ`XKl9FSr0{@!#V1=?A&@Uh;c zRJhOR{`vi2K&3{;!NKvX=%9-Vps^b#jb1+shv69hJYHhOnA~&>i6v# z=%0w3J_33z?+jRIjTlIgo}S{w-q?ZLRz2FF?Q36Ae(}PtFJs#CUy<4Xt>3N^ z6J)==EE14UKhH`iDVYWQ+?9O(H3 zK!x-6&Ds;QeBGIT`B$%B|CIRk{Sc=*?VT|wo@<^Rgd|sOoFHeY@h4 zME^Ctt3t*h1J6#z&J`84b)?1zTz{yLPlYbp8E|n5`+W1oNXC=GoiH@H*RIa1W`8f0 zvAD&UV3t!;K6EUPxe3sGrsdAUz$s(rC!w z+yzxtrqBJ!E%8dhAzXKsdz+5Zua|wyuUc9;N$qDwdQJs4>Zr_WP+v#OvBmxZ}iP<0VQb@{I|1c6xU)FG~HJ zR8t(#m!+68=j{Uo&1kVD)+&RQUahr=7e#k~`kmsjmSzP#WPeD#@bK0Es$=^7s-JIcVSL@9bu8$QT-9IX`d;q5iy|`Au>LfO zcD+g3{MK)#SD1dFbAGTT=9KZ!=W1DB^3$&y8=nSgm{j-~AFg!n$aEhkdY@1JP5YF* zHYuC5qFga^U(SQ*6MPu{V$b;jwnkdElrLyWw^>e8rJmDH0s>jOQO9khqlnCFEpKu? zT64H^G_^p3JAMC&TX2$%3HFeGulDL%7?yo|@?`r9)!x^7suuZVO(5)jD0Adjt{BPs zA~G&1JJ?U{hJHdvb+Gx-Y$l1KlXvroxAjex)7UxnZZ=*NZMMRlcgd1w*R5LJ=;3%| z_SCO0IZoDRquYjlWvVk>!Zz+=>9Y)p=%2XcN(0)wK&x->+g#(Zh^+|Ul|{m3=x-fRNf%S^ z@*sYtcX?fzDPzR{qKb7z17cKZyENqP=*Ry4)cb*ffmX8mHPzrLRpU0R=hUP zQ=14z=WZnqEcf}7cfP&zjH8$S6pG9dHsz)*7?$-dGJdU#x1ck8W)#Ghpnk@j!xuaE zjvZ583k$8VOz+;X9p;D!y_lt>-l53afTfQ%9E>)Hmq|D{WFKj^=US3oMxT=B-ahQ6 z&}1Us`h+usi>68Uov2ZbC%?a%JZkJYr6A#($BrB!krCJVx-$-=1Ml!(t9(2(MrN37 zu6RAXMl1SzZR0}fBdzqJn*?5qL#;PyUpF>2_lj9Q#BX0%nHrx{pjDs}+r^FHx21x^ zdmr>CBQ?*zz9`N-DupG;)rzMsA!TF}$Gys@lU4ktvw0{W;8n=@nU2Jz&QPSLa4ToB zMr6r0y{V?CW3K75`yMw9S%sYsZAi8Rk*v-?^cfDS45ZMmBvZ){aiRAMPEV>sM42TZ zHxqQ_)QI~EWQ;V@V^Uw9M@X(m$q+Ema9Uo+JpV-9LE3S|zn3?JFtQtwIGd*$Pk2t} zN}dR_6`4ucVR0d+HT(4)akfh!)ycVxdc)p;$U)8>a>2f}?( zDlf;^dbqE=ptEiJ*ssqP#cw71J|!KWD(X-t{-(3r&DLq-9L*)8K#L8OWY&MJ%N2YD z?>4y{Fz}AV+{`DBVl?|%y)ER5TYnPtIG#rx%SB)T1r+$Ss!D|84x`cC{sy5&pfl?6SN=z4tT(|MTZ&pB!GxeX>3qd*!L+tjQ{2nL>dJeNn>6 zC*QFs?Tms=#`?pgciYXL$)JDA?{Oq2-;;SK`;e@xdXm4YY4CW{x^sv`F8?jAz8&Ez z#q+OEeRCaWT6jS$JaeR}GwjB<%sgdBuZ*i@o2cen*T!cZcFFy-f>zC3FBp%k%dM+( z|9mE5tJLMxZ(5c=)@;|u^wy_)PsA(n@N+WP>Xl62H~MjsXP?ovUkSgup*t|w5N#( z-TR*R#CerAAk}L74}bmF-1WM-M(}QTxl0A(6pm>V9jLakk+jp5u5kJN?j*M`P(8Jp zY&GuIL#B~#l(tbGOKI^VwjfoGxkGMSlObd&Go7q5E2Wdkn{g>L6MBM>_*XvWbc%zw zw52U=X-lxs)1LZNYyNC>bUxHXwL*_kT6!>f57KzamkzH#BG%!^UB*dJJ=?PFI z5Q|FBx{d_o`)>=scL*uFRRxvVdiFvCXCN71^QXX`vM*Xa1CT5cg1&-_>(xLJXtW;} zqpO3gVy3-XNISDWMx+O)Cm2(sAEPOZB|xh{0$3&ANmwmv>H@_XCf9uHI-WhL^n`wJ zPen`^cb)vzOivh~3t#X8ZT|Vsa~}9)m-GZy0u6EGxi|bm&b|HuB_XG-+5LN5!&{%x zk9YXg$BsPVV!|;Fz3GXlievZvdpkakJm>n@hC}apxg8%K#7E#Pj@|qRkQayE^`fH> zIn|*zKV>iT_P^*bi@msvcfa)LLr-()9nbUB!RZOo-hqV1QR%Q*HOC1QruD zK-n@KQ6s6mBZMLcDKiIsAd|o)pxum8l>0qJo)CmoUQv}apqt~Xd_HjZI!2C~xY&g+ zOaVchh!Xh;D)y`bk%ffY*yB_m>UoW(6G!%q?(&8 z{-gxXjADgsDPOS~VvWw+sA6ZS+iIG^{U#*e%+zv2(Gzs20xf2Gi>^>TucmX|$;r{f z)qIQa^aX$pyJsg$*bSL_1Ggfx+cREAFKD1w%Y^m4(R*E?du8U0v|)m#v_0d<5~vS< zW;RSKCM4dMRP0BARi7NtmT0(Piqg~-$)uh2@NDRfkq7xF#LO`9rK?vJB5p{Rl80{p zcTdxxaqe;VyS4m+aWC=(vzy-DZ!4O=78RI;n=l-q+xBGm2~Vg$si9V|USoP9mDeKu zKoYDolV_P^m1OD=-O`@mzyq(d3{2aMU8ppK-F@zLFC~P|(_}NHo&prcQdeDinM+-g z)P=cVoZ2C4o70_!UCB3CQ3wG;{+bmD0-v1pz%0hnBY=ewB=(kuWb(kxM5P%uj|53{ z`vFU6m}C-z%=fOw%ey?jZi|_L7CKQ;jv7t?#LDXJ#GqdD>Q`%*4s=>-US9ov$DkM# zpg}7HVvy>8{)c}+>UZxXnMlm(G&GtkT2wG=mX0f>>e=1R*P0CQ(VjB&D7%1_D4m?~ z0vY`d+Hj|#_xs@weW=XB3R~LJmbSE|01xJa8Dflp7AC`gFM6R1fs0mR=-%1=JXKb{ zTNKiqS@a;4%TS`c$Mi&~kofJly2UNjq&RN)p$orv1u5IA7E}fUtP;&YGT7`kzhM9+ zSR(;D{gu(|nsund@&<$%;=|)Wf|kZkd3=Udv~Fs|Lr;tm>A~p<;|Rj(oO|Bm?kWbl zC!|-~v)NPq`>3f06la(mesFU|1pX&~_Zem&#ahd$)Ng~;PdDM!0fjy1+SqM2?^w^TeB*BV`V zz|9>*#nF9#@Y}!1Uv2fWtwSQ6$d$2JXG@R^v~##5K1l+l~tTHuu>+RKwQgmJIt<{Qhzu~YkQVwH@0qNER5HWG;zj~Vz zUfz&e15wuRZA4|TlwplnmfTWt3A-RXce(SOYU|MS zKH)3nq$kCDuBttzMt6||c>>c}uCVoD+aLfz|nz}%-+1yc8T~1FFeujm-*M&m?Z+1ve zT=hy<;swvHfBnb5U;V`m*e_6UmPi_^>qOBLM<0K|5EC|c2&3rI6NldOGOR>?A9?y! z55MSk4vF~ehu%7%C+z8{$8G4aJ;Y{^X{HhmzvjV*UvSIH%vO;d(i7&D;0TMBh_Ng+ zO9Of$OH$J-hoWOrD+d9&ke(=6p@U|{-r{CAi^yiziP4SnDwktSPY{SA1p-*ryh;d2 zMbmbR&3nz#TzsK!s{@5i>C$~xac5^ws2*Nc9Vo=;DVJ8yny-54D={op^KsAULv(NA zvdS$90i1GXPpPMFF*FsJ$I&_E#@Lf?IAJ0{wu#^AyjXyvn8RKxqNC{ zv2b#%D%0;=j`7(O*(Q_T$kzQTnU3!De?J*heooN=l?V@(Au)cM*^=3;seB;@-OzWo zeCh;p`x$^bfL43UndfU>C)+J!p9r9 z?vUx0YLV{kPQ$L`SKy@`I-wnxEaPi(JEwG1(O7!5+YjiWx-&diW=*T{Y5_`V>rRnG z$%m!5R|y}OeRl(f$-+{tKrm-OU`tPo5$VC{i5$jJ zmXrVlXyG8T`sLp)YU%>Tb!SfqDsSnDT0Vyg+%NR?fb@j#(Gc=5$Zv(!?BxuU*%vT4!!=-1A4-i4IvU=+yL^-YdC>I zW|iPD3rB1odHjW@nclJ!dg70M?{^_oqs19OstS^fpzTY7@# z;>eX6YUzn=+iqjg-5C#!KB|RTvUS|(3Ho%Z0{m;rC7P=oBo#nCOfv!K?&4>+)ywIL zyhWT?;(rG81lynk>&QuyXV5bPN6`u-{WW`J=k!EI+^+cjwuEW2iXAW#uRRXX;4+PP z=!NuzR$KQ@CWltZ#%@ecgj1`1D_Nd37E>3uv>5x{9)6jVGPvm7&q}Ezr*SHH)Vczd zGrQQQ?&gj<#G#@R=8j*p+!4c%{$D~*kl1p4iSa^u!c){pJtdaD9kMocr{R8*Pb%EZ zHC?#((kL6s8hvA$wf!0|3+&8rw;!r+YsJ8<>1wIC{3Jg)tRj zQ8^VzNK3OvMYX$adGtg818N=}&j;?q@7+PlZdC{LM1d~YX0XG;i(+vqtTJ&W+yPgi zkwQ4Em{;3d;de_#2ztWw>B@unViI*ij>bIllH1oeJFpe`&ZtO=`6IXena_Uu z6HcHwc8@x~3gM6lM@SSs5lX^6&X~w=HynE1BixK`@cY@ke zv0MJg;TPU|FY|7Hpya*G*F0$N&X?bFCudJ^u72&8ele(hNh*4Rw(=WZJTMvDrXjVwe2kBe^R%#ob@g1PHd4o{;2IN!)wj5}L$E5S`LWW{%&6AHWslJUe@$ z_DXORYv&j(kP~0r6J4NgqSU;CKFU$Lg8XF?R+&^`6+27aRxhU~u)pRor7y(5vjcj9 zh48pYZ*LUTL|=D%=D3ccUXrOa`b&Tr`l(A#%!X;*oW!z6^R}cF<+58_5>7;4xhlY~ zhQ3QrBw$BrlpPt+33PG4|C>z#Igo|HHWmghPsNy#mh3##!#T}S2dXd~>PK87wIUMg zDH*=j2cy=p?+Ofw&O|9N$ynzGDs{iJCm2ZM&fPLKlv9ph#@hI{){DoBx>`a{VAHkB z1j%;&h4h3>{;kF;Bte+%khQ5h4ZD(G*0i>nOjGs$>>DfkM~s-+rx31bN;WCXQ-^od z;St?_z&N;54ZXe2o@g~*eXhl%TPN^!+!VVW$YM-IC#g9lLzB>lZTt=)bhBBk^4$|^ zwX_epVAYChBgR*gL;okPi$#RTiVi=m>b)E>*JTmar=~g>8Tq)=Qc)%fo6ap z0gy6r%5@es6&;(~o@W;m6KEtsS)nIp7>_>R4}R|oQnpo{LFE}pwt(+c4wz7_DnbyZ zntie)NM77Tf-=-Un`(1xa~ANKp*T3Tw4;GtdSZY`SI`pyW|cxQh$UBYU|RtOKuZm4 zL92_JTA(?&BD+-NPr^)f%NkX=B-Fj6NJD!*f~9c z#p`|(4Yns*yQC+4t)q^aKuH{V&JFU-j$C$m%&n7ub;whJ%9*r3aL+pb; z`uOM`J@Ltp`aFmK3XZ5VIFAaTa8F8j`c;Y+agRLZANQdVD!CzH3vt?Wk3W&lVz#pF!rKR1$JN6jea@?b< z6n%TLz~E?1f<)z|Zmh-h(7{+olF+VN-0!~k(R%}W!o&${DJSydEfo|nmbz@+RDMDq zS+#b^+BBGkUCEd1@~D1w>3E}jUl z)yF>D4AuQawzhXag2O`B9uvB;{f;l?qcf)>osKOCb(;VGzixn1hF-%f^mN^hl}yY? zk)B*Ar4&fn3pJrzWVR-Qb;vz1WziG$Usn#E*yxEZZD~teTIjsJ;^i+7w6MHnSMtC| z0IkbF=aizdxwXGIJbsJrrkC~0q9-aa&^3VHyMmN$RcBCn29kYgHJ#(iNAW;4&;{O} znP!0`wOxY633_3q3XOQGcps)V@&5yQqD7>u>51u;trPhzJVD7&P;oah3)pv2QwtPl zn4FMgKlQ98;e)Cx3Ii9$HZ&}F)ibB*&0yY&gdwtZK&@a2Ua+mKuABzqha>aWVIE9} zVhwutazY<)vNL+ZC1?U`5I&+VQ5C+yF?dP@1Zrb+!%e>X3(*rFd#@j2B5I3AU7{yu zIqq?1{r4dB1R!fL;;V!#T#>lkZB9#QA>=BPY?%q;V%<|98$vBiNfxoEplx=7kj^xr zeO)OR?`0T7;7K`ZxJXKy*@4j*4-j-{5ZeuRGFNT-|_4|rq=>aZBrmhACbqCX%a-65`S(U%1P z*kg>E20&56$NdxZQeDwv?CL`=T@ABaSc^2tf;Ui_3sK^%0~?UDA9+PmXT%5U#3hU( z3#E?wN{++G-Eh+AiKEy5mgBnm$cSw8a2kb!C*g0{6`hWz#nKJPv*JFt^yN(4Kh$!m;}emP-yB_Za(J#4X-nwsoMN!^6nf&6 zIb_t4m4}1lCyNZ|W9JsLmp=cATtFhPF3n`LFg3U=eZ1U9)t^FoiGK#x{X2Gbuz+dTmQH?3#8H1)2H({r{{g=|j%00000fG^mU zuq;c378YVc6qNO~)UC!w?zSeKvw7!q-oL+`WbTQd>C-;-|NCe7d7hXb{7g(s{I|2_ zCklx_U)k4MODX-kF-AqnkpGex^5>-QiT~MjDE*$eC(88c)2C0LK7IQ1>C>lApFaIJ z_b;z*mX9uZ#_mkKEPyW~c`d|O+fBmoj_2(1+{gceuzclat zf8PK4U;p{zI|A&;)*>PW(#e{C%!5BbMx^V2ri0uA1AIu**G?x@s{<1%?SPN7GQA^$ z)J&&+fE`pQW$_Rw0L58@hiI$_FX#1Je!7gi@(QF^UON)c^HOkbq0`^8YBdk=N&3Ox z>ES^kNJrJAJ4g)u67}D+Ny_u>WoAUAexI+k4d=V64vJh}=HFWdm|%lT524Pyzc@K8 u*B%jls}v|~(t%S_s+N`r<9~}Uwx3_zr<9dgfOh@yvqM2M8U5_ zF3Nw2f7$;K|FZuQ|484`0{d2g?RC-4mLzS%f6FNw2NOqZj*UH~v5Co!r*S?gjGkjF zFp0Fnur5aTw%+tctqez7u_pF1+Pi2pHR+5OPuP8xPgnzY$$4I5H%!$-!g`9%!Efp$ z?h*Vk!>x(o_Xd#eoyZkob*#+tvH`fp@>rsPeJ0(?=D-W6(=NF8=*i)zzpDsE%Gu7XM3^WSQE9RM+@%UJG zvhr7rxfWqg%))#pMb<_}e%#GI1TUgi=xSmuJlgQjs$-T}6J8|#KaUpxsKdf8Zg(f3 zA4YEq(!wxL(+BLXm$Qq6!d&zb5ma*$mXvxp*pu@s5Mo&AU#6c~7 z!GDW5ffe5n4WNrsK~cHj8bfsuiDR_SL=8f6o#wtjHd zyq>0HYN5HjSw3HE@stpb_+lD#e4xM&twh%^T!MTLiBsPBv$b8-4f zZ=I-s(bWJI{vv#7OkBOIapF1wpPRc3C;hO?8oVf7+&9479VKk z6kIZ~U2UaBQSPcNkddfrlf~HMuv%D6z@M?<0VefprZnLjOe?+VIbtfdDEkz?RpieKTVg-^5!4@TN1atXWa^|N>X=v=O$Ca`;7@^UTVJ`j%~+d(ZtJ$B71?;Z z`44{{p0B*#4W0C6A#r5D)v>-0eZD10&$PbbJN-X5YvBM@uJn{s`fUhd0KPFH~ShPbc0#z%N4j;L(-(|zlGVRKfD7b zl&@}-ue6$qIGN5jc9aYW_nOb?7t-|-RF`*8VFvarLeS3>M?Cum9%1IOQLkn3!TnKI z3c0Y)HNV70Pl-~V=uXMyJ17&df48(Zz zfmSgCmPbrjVYt5Hd%FedeC80vRvD#Br98wSuoNo~9k^y_-QPW%pk8@s$diu*%rX-& zWH@BsG5rRYt%wi4rGf4~w3#@CiV@*&9XhttEy6#hRz`Hley3;DggRtwRyAJ8U^}MU&-xLnLjZ2S}TF(i{nf-?rKKskcr`HT5-pB)Oz$8Hqe>DVm~Pw;Kx~L!c(`VOg!td zsONuNGo5uH)b60px{!WPRJS!R-Eq885H;UydZLgQa?{s9bEUD+c!QaBurZx7RTy1m zqY=oc;(g~ajdiXYilPHGh@uX9RijYb^p)x*$|rl35I0eUUH>xfK5 zZ7iMfF*TX_Kg~r3c1gE2g0?L}G1x(PxN#6vsq888&~MuH zwtPwCw`n|KO?SaE|B0*Ov;92m??g&KUJnDiWXBv#o{g&%=o#M6+m<{w6K}4X;pK&& z_r}H&xyAwjJ{0|Nm)0&+mg-AV1-Uh;=*~y;4~1i1Nju<(6ddu{w5FTqbv+U3m>xhK z*UYp>gTugioKC9Oc}@NFSbg@6*rLmErS{IKo}kbAgst#1=hIzS!m)r|u+opFcD`uh zzfqRy(^KDY=p*#Pg7m>vlt^qC&$!t`nMNd$ONs+!}Nee&;G7Lqe#rZCf~Ef z1s7TD>$*0I%opaC_>xE=6A9?EXPM0`1$3vlD^bs4*4yn{DH^Lq1eDo*xZeLknYjiS z9ZjvKxM@4*=u@bXb$%EXVxP`0)tt3iqN=9xFB4@2F+F@xm}zQmo!MXD-}m-opR<^o zIvgS7-ajkQTa&&Pnh^0hW3N9gy%qLEtRhjM+9=g_x%)j8Z5EC~;yXgzMD+R|Tm7bW zb(Ni-ItTf$f>P+tEmDeb*OhB2Z?q}-ncd|5j*UR_D?9yPHlz6n^^iz?yu$l|?i zc{*>-;@%hOXBy3)ACbZ3k-URza(ND+wZPV>lKD>{<3a`!3W9EXnKf~ltg)<^x=vTH zi-N@yMOdW4&~QNO~n1>^nisA9>o`i z&e63swAKy%xApeOKqjh6pRub?3XJOI)exuk4PyduKVKrBN9&@r8R_msT=5}>kwhjC zGps%j2mFmQ$D|`7d78t*YOXmhA`Bml&!lOhfIms-A=|;6CPDCUIX5aZdY3X_oFFFw znwzbE(qasFZQas*{l(*wYzMutzb|UMQ;$sLneUBZHG}&p5(<#HgWIyTaq&WFa_+PG1%n5}P7{HD% zi3=mqyUgtPO<+9maB1l2>pZNwF!ElhH?HDo?4C&}j;j}2e73~!Bo&yjnWu#1&HE>c zq^GUJOdzch|4p!=_kFtAVA6@+ifjY#0WEw z?ecoEgiPe?6rV_IGrXHOojBK|EraII>nD!uSZMgyDa@x*p3r;i5(Ok+Y76ka$p?fR z;VV|G^~$*E%eLB*LG;4LOvK0v$Q8@vP;s*1l2nUu-{KwCKY__BL!97t-Ebwbs?qma zT%!uo^YGb^J&+ATMHE(j+QY>SAH7H5Ov8b|mph<>7iPE@RF({OM=+l&s5FqKqXbpL zB^cy|QR2f9N139pyo0RY)V6~EOeDXs=9SD4Dg&M*Fs{DVAaOXnb*lZbBq%RMBbmrOa)JGxbfW=A1 z2NTluo3!n;zj!PqJ2D$FOyFMS`jH`Y+#J64Q1Nl+9UKIzqXDx2IDm&96cSM8Pn&}J zNv?RbW{z?Z-dd^{Q3opbg!DJ&yvF(c6xZpZ3zD$Z?6sDCS@G(=AscRu%_ooUmr6ZE zs@Kn8m>-X&1dZ_xMMcSFMMXt>ubTzLtwG z@@PA(MHFPeE+xs631Fnd5DM;}Oq2gs;lLmfsdu~4Pxvtm^*0(x3>ZpmT33u8YO_YjTyG5pZ8cc{VW z2kQGj9p?HJnc`Cx7| z$GvB>KdU9jad+j3-uyi`l>*hg!Ii$GCEBcDvrw}an-y}ueQfy$Hvx>)DEEl&^c+u8 zs+c^yIBn;{qN1`MVXtd(ISX)%gt)=^vxE$*G~(}Zc3sq&&s=ZVzOe=%4|th#$^g9# zRU8SpOuv*HQQatv2u5++82*V=4LTblcau6MJW7q-js6EpmBQhf-}fiY*vqsPtQ*# z;$||er1(Ysw^~0Cz96&~Z&`|)UJ$AE{<_aHU3dq$cJ~sZ%Bq+y?6TUW^u2*{E()M` zVVA)Hh~cmNV;n}XMUgSq-D;imrm#pIVtLd%;*1O28gz)F5mRgzP+J|5nytdK{OXER z=JR;*BHFXtMD8^js=rEsOB? z*v#{AkD%X=jil0SPmaXmfLPl45H1`~-(;g5^l(?-_7(T(_JCZ9Ca)$E6IZHATS~fb zP8K9aYp`O(j?L$HOYVB`6e;p@kBAdga;PI~Xwh`U$$303URasdw;nDnfve0gGB8xn zNzTQ|SpyZful#&EJ7-qb^wj|-zU*6s1(Ap`|8+G>^Acpvh-Ew=^EXlHAj8*difzt; zP3F+DTmSBo)aHud{s4LPQzUt30-%8NiPv_7%kl<#NPr&`3FZKA6K{HHF~N4lO?%c) z_Afxy#hDd-nepvEaJ->!q!As*ukHoNF;{YpB?az~;X*BHgM0H$zQ39$JG`Ff=M{$p zZ)&|=;ar|EUR9{c?vDIWQ7!Q|sh;@Bz-LJfOKkyt!9YU|@bdj9dN*bjN*b=x!p4A_ z^h-U-+lnq(taW~u4(z62!2FL}fy3-DG@|p;*Kxdzf+Qj*6*WM11bQn?U2YfUuwNmS zLm8U;!+`ximS9)anQV zN1Jp?ixqAuInd514UxykCK8+o^oIEFC7}a3SJo<$1n(QQd_Tpl!9A>=#kd>^WeSSi zZWR~Lw&;WcxMAhimYAsIV-}3Wlm}wpSKn#|FSlAoo^OHsXIEc_Sst%v+Xw)9hI(80 zB*gEeU5#RIzXstzC#E(0iP#W+XY|CRfB__KG&H zw|9|0@jf+BCT0He%2Z|+a^G`faSb^e^BI>9lS>{*}AfUE4pp^|koJvxgWQp-c^FHUu7}D~i2{u4Qgv2?KruaIEylj`V4C3czQ+{LQ zZ>uA+XkfaG8w_^*WXLF$aerz0jI{U~GGNRDW_RXbL+d)s_Anenty?(-7F&0!9c)>%Fwsti!MXxu6~ z=bk`^ZXoVb|3E&5|_rP;kkKrVAG7sCxrTR#1oASqxyKBl~=gfrD;z z-(Q5#)=j90I=XsAb~D~<^u+CDo&qwNt2g(#E7l8>AvC9BIIPU8w1gNZME7-YTBCLQ zQOd0vA96^oLBBCgwpYY^k>-=Qop?v ztl;%KrQUBgme>^`MtQOF0zL9?fP`T`r%cxFiC52^kGy^YzV%6j^ldkWD$?@=_R78c z2p`dpI!nZO5SlE>xIQ27Cx}#aeXCaJs3GVM-~781AEwS78`Oa4AZU8*dDiC{WmYXI zMt0Gq^~MXZmYcgZm53M2Rk~z2BHu5ZSJ&BHan@n)Idgu__mS}1r@+rL{xKyRcmA-< z&<*sWA_dd)P*giNZ*=i~<|l~kNGwxS-Ew3@JtcG_IkP7d<;JK&$i%mSs0!11Z%1I| zYDEW|FQL1Hq!o8OTDP#xHHdd_lcPZN>l}dzayH{*An8wl*7!*R&FQ7lr_jK~qM+~J zN{Nhe5P?gr4b5WD0`S18gttMrH_b}#aAX}amnQV`Y--{gCenNX#4OtQR{{E%wgX1D zOrTm}VidiswX!!GM1FwjLp51!Ss#m}DWaNtFv&3w=4CfS?^ry|MKwrjjjY7)`j165lpC{n-hrq1w$8W9-Am<* z__hQLQSf>t^@6_rVuQ+6vE>t(e<-&SuNr!6*lUFsP88<lHdQKB6H6Zg#2Ah6$c9LsUc$}(~ zF3saX1YvPxVDqnAr~JA?ix{lmP?HLa>U|4|n@*q#`%$Jk^udy~UnC-GO8sXUchl|i ziH}I;frHhXPmX2O+ZpGbviWT)0r2fh?Cq?6V>A&LVj8F}pR`10<#nG2QU3^4!h_%1 zcvZh4CTDj38+x?JT_UhFjzT{PfvojI&$pxOaepKFjWB2`U0HS!<`pa%3*;DVe1m{8 z#ch+&8Ti%f27R@PLx6WzZLxoL^ z6hMy!$vZ4XOCru+`b9@K4^`*$EVK&GVpg@c7^X!uXDvfPpO9TsXA4h4j@}x6OGkUk zh5aW}-lm8bl0abKG0~_--DiO=noz}t-y&3x!%>g+kqN-Uw7)DIpa2N51kU_s3Bp)i z5abg^Y8p;`OKBP>LYQuieV@np>WJT)R3GY`WBYd4@5%(=fTHHY0Zu7D0JjznOk%%> zV>Z!HiF0uqcAPexV^zK`(Qo7D5y50wOEo|KlqU59fbAW;3rh_?$sKMB0WZX63yoDQ zzcAN~TD@UK73s^g)YfJdmZg4IMp5=m5%K!wXOS9U8-)X6?HoQtlpuiC8K!(F&DpTyrMi9CT7TT z^>gbK`!eU%%a|u?(KoQ^if7$XZI}^&s77Qlp#`-GKLMkRibX>PU)r8WY}p9;>=~07 z&G9$WplT0Uo1H{~7w5o3F;NYsAo&7;AGLCKPnPO@(TYkPPqmay44)6Ne@z}DrV>@Z zrE%cf?RV`$=z|l;&04C8IvO2l&F}5fYXvGk_D#G))Ph{?&$j4_*mE&$jts$o=3(Ny zCWi|r^5GU(I>t8n9Ti4j&0J4eyyY)1CRe+e5!Y29GGJF=Z$u!s)|W~z>;J8VO6H3? z8gYc)*>-=1gR?wVA0Sgc=ZMFuec*5>)Nu*lDo4)f3ej9ry=(tUOk>Shkp;>DD%{miVZ4!>brjf(`W*<2eHHAW)*!#k=wc;>-%i8(c=^r~8W0~5tJ9gI>xTaL@>%HR2zOt`w>z#U`#&O|?(WAIQd z_&q3IpIJOgg?5lMzRGFe;*7fE%43n)&pz3@_M^=Hue5Bb^2r;S8et z_OVh=hJWzPQrwWL(wi)a(+xT_TjqkpL=luO51Y*dpwTmJ!XM@O%bWLcvF2B9eAE#q zh!5L4x#ZRC(|hePa$X5eL)!+(bnlLWVV7@_EEZas7S)+B)Zm*`)BgWZ&sdrEF1AW9kjS@WJU3& z=2fL?-8IG8dHWRGvc;Ml(*<{~-?bt=SFFmPE~{%P73A*-KR>@~x0biqift$+{A%>4 z+iO;TvM&A9;L}~Uip2x;_+I4t5C&G10;vm1!ZS<{7MIlc3`YSm(1d|SxsGu0CBGex z?{mAe{0V){x4{Iy{z44?n|(754+BH-H~Z!vR`%Z%n}1o71g~d*S>LbzpDt<@+6xVP z0|V3j)GS5K_Yr{bs^q1{@^H5D^)i@t3ZDb>!;fLFVHAtC60 zVE+$!LjR+-;3tPK$BdzVrw>y^@m&ixGCi&qF5l2B>npA4BC!_hV%luDodd`DR(T%Y zHr=?~3}0Vgb8&NPd{s|~SEE45Q!0mryyt}xot|_cW&vbVFUDp!KbYG_-U3(euM*$? zCgA(c!YEDfV`#g`)+`nQAxH*j6WM2(6(!t){{0*qg#C*Y=F3&hQg37!w7)K^e%of_ z;HXXM_+4WK%lSs@pCJ=j-7VSHEk&~Tz(Ckqr>j!yBb!K#Y)6$d_&1FV_H*s~}+9?Ef=r~zi#i42V~GJM9M_b5<|lLPY1Lx|4a z2N2<}z5m_dYv;!ypqLhIpgh_3)Z94y7hPp8_WkTVhz=TBB$Sl|CUd{ks&97Rh@EBz|l zTvLOg*b*y{;_C9cm(CRAbVGq*oUPvWxPv%1ovbWw(B|JnL_~}d7D^`UX|_7Ow50M? zrxQ@02FfZfYh`-{k=362e~$eEc13SyS|FN`Ed3by)s$rumJf@xe*e9Q1FI1vLA6hmj8$Rt z>3zoU0b03^#4($=pSS;j-zi)!m^@L5srX*kg5ofaGuwLLZZ`X@Q=sHtgCi^8jr2|9!;#QL&5D2E?vlxdCuwqJkX zR^{j8d$~%+DknFCdMqNl=ti~PB2l7>8C(uD0egUlqrsulo{FEa2y&L@^2tgu0hNBtfp9(9iPeK=3Id@>##=XU!SZAe9;eE?q|S6)jbWpOG_Jplk7c-lrG zwtSz!bfZJym-tvDE4h0J-zlxzF@l_&Tr72n-FBFqWyc97cMmtZ@}UDVztV!`Wj#;2 z#^&N;fA7C11m}jkn|Wl3WFK)~HQkVI&AvPrD4Vi$>w+DehJFF9T3^l*EMT7ony$&# zqN+mWg4joa5*{)dj`GBvc0U0lBhj-Zi?|=5*g8LPe(&WLSsq~OK+h=vwN9Te&cG@I zR{K{EPU5rsmL7c9>zhH!Js}VcN&C)Cy2$Zn9~(iB7mmji4Qd~iftlxDK}uwekZoEX z`0}xkVa>pXSONYfWnZJwK7e**U|B$Z#5h?6UPabFYM)v)J5X1B+RhQ~ z={Zb>vfhd?URiwh4GfT}tEsamThJu+y@5~J1@wj(^(v&-IrZ5P>D~R&X?3^0y!{0F zn&ElI9MfR-d1v1hgNij51{TCoJZoe0ry~Hu;W&`Ai5)Z6*KIGq1Os0amIc$qUd`fj zIj@nL8sNWiYIG4L{`>6(-m zu0#Fn*CKUzAdk(?m!#&J{@&~cr#`?|?Ew#EMjK%c&H8ms0Ha2-89O39O=+e3+Vn03xaFsKh9QU@a< zzF+*+gS? z#ry3J7PLFfhbW_5^Hf4(tV`HRQ-o%iBbg?_xgGcpPqUxqD9#!-4Ji$F8H`+49=vhe z#&@>v*M~7#RK9G^kjX0Nr^GKIVT)35M-d#NXX7nQ%=8cl7AO3@4c>{4rZqniwLX7jVWGLIgj z0xVO3U!wrBJtj&{cVhD+#-S-TsFJNH7Sj{_q6nbFriSFUvHNcLh z{F&HYvoV4EJ$z(xoGmG#93`p!Lul!IGqDhA`$7fYfeXdW)fEbJR0rK9BhR5He-GpX zbR(2tQ?(yy@rG>N`pa*}POIDHtks51n8Ag6gux6X5yuA`GMaJ6b3JsRk;oBndmB%j z2nFOu`*Z^g50QAb-y$?RG@LXUy<3Z6c^BL{2#7q7P+(Tx-E;+1aJT(|8%dX-6nurc z6O-SCDCcRt1nv=+h?6@3e!V53a?Y!pcM{q~7kPh=V*km;lEg~kDb8}%4aW?%1^0BJ zD(Q1fm=A$5K9g?m+VnbRP}xuJ_?Z=0@D$P7iKx7y{7~U36$*mGcdaPm9D(mP=s(rI z&5VnTHNAqJI}cG@RRpei|CZV7sLIB8c8B5%Zvh*|CLts*%vZOOxd%3qoWhVXXRBc=?>pWH zgvi<}m+ZFr#4xh3_t%`=2XU%?Oby5r1Y_Bt`e)FI&U-!DQhp^to+_ zHvPb#F;kP1mU={Lei_9}{B^4G(|G_-*V4#5?ESoI`|!`oFiI|Vcf~R%Bv*SoWejLl z69F=q+j?35^Ea!B6K+l}>hLsHDfj#D-1A|dsTCz6cf-MIj&U8PyTVRpSwr|1W4t8% zdB^Ew2=_{+j5vbBIvXmbSLd?qc0%5dgh_1|@ujPEyA?kHhED53Y-ikw{Kf=6@W3BQ zW^g`ymvi}11SQ`vItx90USMd`QUz|{fuGDZCnQjm3dqQORgmUqZR9W%D+andBD7tk zEZvcZEn8Z=emb($I3~=>T_XT1@bBwa9{LAOnj^mQ?#K*8rBr)R;w>&sFx(l4Gmr$> zUG*{#eleWrqsb$NcAiRk$ti9311A;4rl-h-um;POevn&Zm;Ovd8@zqytFc;HLh&Lq zI3a2lBLH~ZbFStX+89YrMM;mI#MP5lWqp_4oR)b5| zFJhK)N02-#p%hYZxVGVEo$SFci3<3Md^cG8q`q+A(^$<`?;uQh2+fB003ahs74Npy zeQ`*jD=atZ3lYtH6Nxv`?kv-3nr&tl%47_$T*~CcEGD0X+ioc)EYSHzT#UeuhNYV) zTV`-2z_Tqc5vV$;PHnjPI|LOh5K9~orQ@V&3N!F;f2a=rJ`&jQ7*rNsVx=Ia-%k}5 zi%u@k0Z8sY4K~|3f>`q2mO%0h29uqq)j;A}x>wGhd5T(kHI)x~q7}+d=s-zvli$fw zZa?miyTWaTjnU2;sAIoLNpy-^Sj!r6$>5)Y7-Q7#hBd&XJd=BqJrVDTdtaj@FwN$Z zf)z6w&A25LGkPKvAU(K0#=~k)IX<-KmuSQHzauIB(vid4haBWt3DI{cZ zVLOvm%7`h`zbyk*z`g%D^57N#A*!d7Cz7RQCy{i2c#1UcKOX=}hFp?^CpfA(hb+zT zHS^#74YX+^5!!Wp5(Hxb^e3`raFOpWt5A$xFQrsp*zezg;Y|87XAA$Jbm!WqY*1M3 zbB*|1dxmqrf#r^~XTO}O;uB4{pnR4f5Yxf@k{q9^klO3_lLTsTi$V34Oq2AzOBfjr zvU{Ydkx@o+bU=G7xg6R(A{sEI?g&y@g!A4Jibi^mbcNml3k+=H%#gp%Xy|Xo{%q{d zS85t0FI9MRb3Mu1X7cmI6BZ4q*ZX<5y|d228e0*PinetA4tR^1tUB$?lYQ40li?%; zebXLJRwAyX(@l(agB4D``FO|-05uLMNUZDrd3^QOo)Ev2XO)6UAl^nc*VNX>@gt}d zNhV4giuDTEYZe*!c`hZa)2XJKzir^s@mmY8f35a;g-p7TPZnvAr#JylstJ8Es-8P1 zj#)IV?h_RP0Pg#QPQZ*H48-_A)vv1s;zFhS*w2QuC2H2*e)yZGkjKi;;?z0Ds`uU( zFrJ`dWk6)hgzo?4JxWUnB)~M2zPD0zw!Iw|c`X`BBHLBbx4iV$zl*XU3n`{{35A;Q zS8y@+Q8rAgbE+F=?O--<#!?Re`4$6s>p2)DHRYP~__){IeMQVutErLA?h16A1!!RuTIH}vs1-l8xJmg((N0Y=u+Z5&V#%^Ql;Ob<|Gg~>rc?|cU?C0G9I zH0NV?+Kq|f!Ym!tjA4Hj}FoiFIlNqZl~1N~hf@GVavb@OC*hk9+$%(sX5Mm*VkImORI zzklF$;=dkcsJ?2^L73y{!A0SX)C9}j^BWKo`|NG#VOo`og#1D z>qid|&$u2wAV7w2F^mWLHg%Zxpq!O+J_{we6Tw*eaTG#B(BLH_9zla>!pJ~>g8*#y zN8-hidx4Hq;|rhZ@Zwa-51tQN46cEKNbHWW?UjERJc|-ka!Tsl!3X4rzst41bPb-q zv+F6UZNma5o`{-&3~G@r8VNdUQ(Q1_F~n!aULjes^k)p=uRXpN4K7qo^na4wO>%Ji zsv2sRQ1xr$YqP5svW%-G5=?SZG0w=rQ<9UUi4vR@-p5<*gXCDaL+kw|Jlt?JrXr3p zB^HzYx~a5%$fYbqoFIGP0Ry`h16X+pCCKnn+&yqjXgX_3P*C(Cngh;IKY$I1@@_XmZRGX)uaW+ga&sFN#^p&Isv;`Ryg-{DWfbqw zA)ZbDIJNC+U12o6fY#4x-QD|LRhqR#{{e#(k9t09=(awy8Iw3N2(_vc4;!_Mh$-D0 z7|r^FbN<^Q1RlqNC>+lW0lcLmi&Nf#yqvCnGx&fdzM0yK620B5r8-*bAU{*kh2q=w z{*@9WlA$zPPO#yiRTd(PUGkg_ zbK5b&ZeT??+uiPvkk*iz6~?7?&qGPoTxe&|&8S1^ehz+(v^J|WNm#j00zhQUw=Ny= z{e(@KgNMaDxU3Q=>zVZX7sEYp2E@stL_hFF=E0>NeE5bG$H8!dAnnc9=xdykjn?fH zVIsGwjCMw<#ckR9n>x}4sr!b~TY_K}3BQf4efld=SYf&V->D>s$9WM2y=E%+9_5(4 zn|+)J>R1yTeAVl(zHurhK@e~B+*im5{!nGKv*(Kkew(2wBV$}duHTDKUAhy8tD8}# zSx(jTN1sFX^m-C*jmu*Na-}v5>Ir5sV?<#IAF*Xta}DRomUb1LP1||v476+z;C3r~ zmn&w~Rh>tVE77Xr6J)?xek^_LHKlZUw={#jwO@e1#)@h}U7!x6VG1Iar9K@{IPQzq zBs6^DJ+C%#BHq`;yBsKapIP57om;5h>TlWV@Ug(+V&3&?KDnF9pG~Il!(&dbGGu#w zOTn-Bm%@=P&@g6Z$i02IGAf27Z1c^{0DuuB^}4nZUZmyI-enkXw7B-X&7*j?)SLa-%zh9^6OBED!*}=IVd9$NT-x0 zo!;~vs|pq*r5D_1A5|Y6-J-eb1mGS(210(_{UV{k;+}=ais2_Yz$s;^OZVTq-@LcKqJnNF1F(Fc1->AghTUu1t}$1I`S4U3Ql6 zi{aqU^I}xwS8T>4Ip1&7z^fPcefd3}C0@x?iM_G7{EuaUD8QeeY#cl`?uKT^^jg>L zWc@?7iK4<;{6F>^-A#?F$=97^)6{Y|oX=b&z2{H)4X4YYdu8H`U)9<0;WTHcfLXfE zbnr{ND1o6p_ZwM{PXN=|11wzHab3LLfgr2cB~@05S)b<11)tVSeiIxESdlSG?~F>O zyYR0aR)45^9+EO~?-1{kPLnrH&%Zcm963`V!@0}{GqQ#h1g2I3?k`ce;tE4I%4>^4 zNDVAb2CuYfqjIpR9Wrq{6vP&%7-zXS*c;w`l9>dd);1`gLks{RDNY znZGr2tqG*F&Q-BGQ*PEmJ+Wbd5!s^1%j6m>dcU>H#Km?FA> zI5*-(t)EEYZnn9vY|y10Beb19uQ4E02~{7=>eTy|y%?U5fF6-@K0XVF5&Y>S(#Nuy zY872>70+3y+r(w#{)2^Eom2aqN{=8^Tgu|%-ImIAl9VFAmvgcT?Sfp^HW88atTd51K zQgMK~e~b(H%6tCMf%Hio_&|F`v(ptWlUGmv0GR9UUUnQ7i=BWT1)(DZ3*}KakX& znM(q*_q8SM!r|`u|7i)Wd6B9puxW=_QK!h}F6rv;CGqB*M3F7R%jEvT;mLdTs>{}AP@iC?@yI&fxQm)P z$bGDox5>VD)YyG61FaBWEf@S1n7*6GN)CppV)D4ZG*8Zd_Z8dPoeM?-?=9W2i}t~wp4X@M{;Gn;{)Gdd|383p|3%>pR?#^b z0ylVnwVC{Y!_BNm&PEJ|Xnz1Zk2EgUO)kvOmq1Y9daSK7GRAO7o^Ll$=`;4rK6b4m z!NBaQ#jbyKF(1wzx~|GUIN(O7SGuV3^iIQrf%$yFo~u(^8PNi*0^(}mJiN4V!oZkq zco2Jx8*A$lIZpT1+MvR~6w&6}V*~5v1Hi;8x(DqrFl*G?s>>mO#lukc+dg=h_RxGc z`awC+yon1hf(trKI~cu>0{j{D2IdEp9p@Dl4F+)h#Xqx${(_(XIr(qRf5h7PB@;qk zErkg*`HPH>8`Hjmr=p;wzX0ifPX1f-kJt}AS2wQ&2Zr8*7ig*JTpd!&-)~YbEZN)g&W4JO%#nq|fz_4yveM zH{F3|K7RZt+5Mp%$7VlSwC02356X{YR&QiRb;fyWJMtLy%}+u#fX$D1dn&o|LZKk@ z&K$b2VvqEG|s=!=|sbV2*uF2O))`w4}<8F1((KwwGtGoP+fQO5VPMO~P(f3_D;;7yvJ8N}( zIEtqvo&gSjlZT5%89@Mz(s>vrQHoX!hXZ=9NWkihh12z=UV_WW=*VV<_Z|69D&W5I z*v}6@y@zWI;irIdK$pn#1(Cw6ttAlb>}Z@I^7OR&;`5Xifl7vlGj@yA6cM3hYGu;% z3Z?dOvwyd@pJ(xTkXgrpLn0n*`EgmI6EC?EsiUl!_B*|C4A+GYSz*HMS>A zgzXnNUpT1>HMfLfmmf85M^HX%i6JN356M~r?~@m)+8dx%JftextBD>jXXw8HvP_D) z!%Z_(CYv}hb))IF>AWn>?&qBm8&z^r;3-E|aw#xTverIvGK}V2qr67IXKNV=Ek15x zHa5OmrBAb$!-tUmKPJR?3`U$B^2CH%FYS_&N;^FF>0Z@x^qbLPyPIsZ9xeskxds1)$CgH4j@FlfrN+q!PhKYDQGA(IT) zApL>pO{(CSa8A~TlPU2tP60ao=u z_?B={CmE3Z&YB;t8x14(xhP-z!Tf~RDzdspkUlj+ zdG@Zwu$Yk%^t^pJlM&_$s}`ws=W)mhwf_Fr?AgFpTXYup9Ke0--NAcR@9Ju=N=Z}N zBDCJff)ZN$MUAzBDI!XU~sMNzlvIQYeR{M;z}_QdN}RRoYle%#Y)k~48 zOZE2lH&GEjzsBp6srK`UY^(?y7qaqe?92bT)WWm4o98D|-~0H9ENz@QiY;#G$UW^l zL&{6!AF9UWn(Y+9zR?!4R7x2*va?B9l9Do7;uSBm{(pQt->R`jsE|Xd}}7CYWsf z_v`rYJYlzX-BfYCA5Jj(?UWfD2aan6W3>Dl~gOYwoJ_G&t_OAb_<=V8HDgY_fSXViUA6$cg5^D6@$H1!zMul+#z^51bgs0YhwQ_;{|j+j{d-LVvYQgd{5N zw7VJ_a9(rO7Xy$*_NclZf?f)4C(EToEQa*_3uPlMLrg#!nI~{A7Tze=98~&oa2YO*YWUt`{9(tR%K_=R z)lbr05w!)QooC0gus+5aX@1Z)#vuTbq$dV&KjxiT;=B5dK7e|EwH+dWXLnvK1z_k& zX1Mw$(4DSnVe&}^0M_JEuOsiFNERIyVkcJTO@Qv?(S6n8n2Lx|sP2<13 z55LS-I)r!O>aN8w?=pd%Y~wcM<-Rxl+M{#0x}=lDIDO4BwVT3UIS{7bq^h`SzOuP_ z&}X%Lj^n*Ln9kl;Qi9=ThPv*<#`}wD{}wW6?{j*3V=ni3w}ULix0x%9BhHS5We_#t z92G9@Nl0C?WQ#%M!gDTV7y$aR9QirbqcB1gZ=d>Pc6@Zb$rMm%syAArxMO=P8qnau z18c-&F^vL?PUu^;?zeu1eQ|Ox5pMRB(YiA3+AapWYs>M=T7fMD&tKcE4 zIU1%OQ)>3+PC6W3BRrbj$r<~+EOXT(c9Q+Kk#NO6#x_As*09e06knqf=v{&6!_F(j zacUsci?Xsq5U<{u-LI)=ZhhHcJz?M+VSmoNav^qFtqh(QRL6({(ghl)PjNL`drxd1S0YFfgwEOL29AB z01Q!J--TO#07@-TupH@r1(164C}WgtG6p0fX!@mjCEvhFEMxW%oA{*{bJ^*Ky{9&d zYz}cEM;U&Fcj0@2rXqV*E4xz%CDo5Db1FJzKN_+F`YSD&z3d0&*A3&YwcB}oh3Kj7 zW*=K_#+!R&L@bTG$its&KYgEz1v|vnWrg9aVnM+ZW6VkDC9uoivrKxwu42(_;RRXP z)HY)|Ieb7H6%MN}c@+L|6IfwNn~jdCBCMj?RwYm2Q1zJ{4L|adAeq}oDdxxXoKK(D z%3;YbW~~J#41G?f6NTSV&{hY-+{edsL?!7~!M(O34M|%pFIq^l{OE()y6OxW!@>-6 zkH)g9L@GvB9uBL@xeTk?5Q5$XkfCO)%+!sCmF{@35c)HIZ(^=8v;2d3vkld)aAF}^ zg1#NN=M1vWQn@iUBGzWStImKs$cICbJ9p}tLTIw^rL)G#o4OiSE^FHj&AoYga1b|Y zQB1VKMgV3xmZEeRHY_ZG zzD{6?1jV-1(Ly572S0Vu%q2hjbLU9(9D%v{G;UpgSA{1U55qPeVCv%C_-k9ew#pX{ zN9@i5F1HKif4Qs|oFREHHQlG!)3-m|*38pKY7u2fsL`^TV=rbhQfJlB4y`GEj?Jfm zlo^_X1e@1?ko?qrlGhQm&q_`|;`f_x3Tc=t+Pwjk(|WsAg(_bsNh{T#Itk5%{? z^2ZqHgE$;HRQ#@2$%aHm$ZC)x8RcG8o0iig&eg2+kl@E?HxXZ>7Uvw}F3L)U@s`)sLqKj+wF&;9IhMtk%x+O&C{FcM z3$FD-b`4?nb=7A%xbZ}fx)?m$p#4YFQ$DQI8^Q44ZpN=5b8Gv|b@X*sBp0lYAj?m~ zVfsFV-36$2~f02rRU2y-+n|xva_QE}SOQ z`Zh9m3N{QkqaS!|ZSFP0C?64e>I4@jBj*u9qY)>|N z;%&^^?y8J3&Dhkxygl%->2EF0k*I?ljU3rq$=c8p6bUjixwoR6QMDpo!a34#u zo7RO;Z}Tml6%#(fZ}eemY@L7ZiCW%9!XpyjjLC$9qoT}@AaM%F^V&J7WSLQz=pgOe zf?Q8zpKY4C@UyjsjtmX#E3z=r(f9qI zvPSlly1p_$I?ao-l;XUD5HBRV#_03D_tssB1W!-{Heju3jkxpdy6Vu3narDr;ZQd4ecO< z%FerC{qf7Og-@3ncZD&8H|3)Rn6tCVQSn~{yF(4?&7xAL)X!rEwqw-X)+6d1Q8>|Y z$y04MEteNnqfgu4QswJj449AV(}6zlZAp9b5sqRc%1!5;+7%vuR@MN26H;MDQAEM8 z{^IVor8-MnV^aY@Ma zVUeGbv`H<8#!i^%Krtl=`rzsy%H_D<>_#bajsRF=HU&!LdH*|(4spg?4<9-e+;p|1 z>?Y9CFNj|LM8xsb#e~`I|t8Y5jiT*jMvJ z3$7l-qIid_b=KN7<{Hygf%30`%&J0u>PiX}$9@7!`b$m#*y z3Z<0i8i^Z$-vB;a#|o60aug+#Q?KPmcJCK1U#`V0NW0F8HU%GmJ_5I-cOpU78_9m9 z>kN56UqZ?y?p`mL(X3z&)Xo1pk^7b+Jm5U+K8z^Wx+#{Sn{Q0e*Ky(uI`l_C$NFDa zXXQbY5(gSmvW!4t&@;3!-xqWLz(?KRlDTfN(p_*n1lfNE{uscSwO<;>1|{`lz~j}^ z6(>{KrnSv%ZGAq&=fmiG=Nb1l<7^7rzq@BUMbvDFi}sl6-o2n<(d7ji)%Km4Jeh06 zd9DX&U4W}Qk7CSPiMo(}eAdzTdhC0Ka{_yiS5gDthVlecwB?X~#rEbK!UeCqW_-;@ z@x5M{y|Z9N!A7L(j=kGfH6ngObu>B^v!SWLw*wqd33^Gm@3T0J)e#cy;Q-L0tGBc5QW~bS8S2nN4K;;h$+_Os%K0hy#@+V*G$) ztAy=T3?{$Co}Sb_0ci_o8!B+noooKxqC(%G)o+=?izku^U zSGXGDZ1Y^K{lj)G5#xgT836r5H`u={%0HmtaWDA%04GM**nR}NwPyeE6LzJqk4vdf`TU}`%wi21#J@r1yva9 z2@jL?7usa#D^wKU0#5)6%HQi4^xxG6Lq+*} zp%`HPH}L-+{5SCb9!%(gg*7`nG;Dm&yb`USu;FR;4;-;vfBhM zijPl9F<3P8xJf?L_A);dx^z`lohVM^3L*fgXlQH^%YebxeXE^$%E>{!E)rOD0fZ8F zx;3EFvbOr6O>+QRD zkdy1}S0{V&>=!%JwzEGNw0$t!B(L+}v(1h2y-~|v+n1uY1ElfTNczZ0&Tn>N=$j?* z*6Z`qOG^u#Fph7S*_|bqP7FU_vj&6cU{xQ5(1xL`fK8gT^>DRptAHB_IT5RVP`SdXLg&gnP@gr%4rbm0P z*kmLl(;Im(e|vd=<9LKiMn>jrHr%n&_Hh3h2PdqF%aF_`xY~M}-{0#w`ZI+C zC(p0rGc$F@*G@B_G7Y&ej!nSc#b0dNxNd~5fttUAf-o>~d3bodGp^6J)`HdJ;<$2^ zC2dhsq+Gl8h1@!*qohvkD!ms4?+WW5(f+OGIGoUjw72#Ct`f&rWGleFE{xSijuM~UpQ3!hDr5H*h|qZI>> zV0i?X^e2ma2DbvT_RfUw60OC5s}g2SgrU7X7u>TrNIl?{XdWS_d*O+l@wHdUCPwTd zl>TTW#^K8ywcuZ^$BMCO9^qx5Y&ppuhacOtQTT6g90k$7a&mT7QiHCSpcst&P-Xs2 z`F?tR{HRqzLC8gJn?JYRlYk2sGUq@D{Q3QCdmh@l<^-QE-Bi$s4}t*LsEXo(@hFY& zMTwfdcm9YsGF+J(T9iAr!jD?Do9)p0y+2U71{PU3^w*MB)erUew|Aa0VfK6fC;E>P zSVm3rm4AO1^0~mZ4@p?Tmy%Y*HnNH!2 zdr?|HMviI}qJl|Dj`W}D|JQ7(hhl}X2Fgj|Peii!SB-4BDy7M&%G7w;h6yX($v zmiRs##2Zj^5Pz_F`2LH>D2>Kyo<{Fk$dEuV0pL>(EaS(n#5H0&Jq0zVh^RSoNSLV? zothn~8ykY(VwtK+Iy=xcai>B5iFE>56dZ{usxc;S!J!>`PXCx{*&M0qSG(*Ez$1JM zltfJ}1b?_ld4~|O>_8Xgw-ED5W(;-=tiMaxT(Mj&=?SMHTlN76Qwj($K4;aV7=v@4 z&B$QMTV79$1jYR~ML7hJllaruA-@-v{faUjySc@%-LIncxRgv@r#TWbEnEGjzn>$% z#`eVj8VsfmXby1w{e@n-i(SV0nXUqHlgVeOa{$c?1Gg6#X?#FvY|ss)D(GXaq9=wi zgHZT`(m$v5S{DkNnwnjxQJf8Wu+hrr(zl0XB4tzmgAE$3@!#;Sm z>9EgC8&O?oxE%?|@Bz>gUB$FCdkFcDw&)T-befmO$Bi#FqSNiw8}&H;$$$-m|JID6 znWl;04HOxT$l_bQZ~IPGeF90nS0?1Qcsl_}-F^Jy57bfVNSy~fp(Y9ye_`>E;3_a5 z;W(84#hWk}P-tER8#~~*1EXZgM1EzbYNG0fgSf-D>1TpYEbvJzIqCn|_>W>3(nsvH znQ0~3F_?q{%{kND-1km;9m;%i5rUD2QFR}e+z-@+{G>TX2yX_zv`|)3M{r+kqjLg! zC~koHu-Ulri-vC<(wU1F1zn>Gb9aJ}bqA?C0(@`3g~ydeH-X2nQIY?<`FZ2oYBy2e z^z84zaz7JVP;t;UeS3u(C2r zjT(caClj=$P(sWO1OL9sK6W4)L?1}@qRk07{=@@>NhZ?lKb<-`mr!|cm+*0wz`zd{ z+0-PQbC-tL?d{4OLgCu@GCbP1cu5tV(|(+K^X;i(KPUP|+D;Uz$hvnxe(BU{Qv-bH z{6_-X|824!H27(xMZ=R=Y|btw+bHQwKqz&`&z!`>ARu{)WPtR(Lv~&Gw|B}Zk8^+i ztc;_+#|v?=Dfs={7r>>AwS;c|p`7VHeSZec8GuUaJ4UPbH&EaWu0Hr;BLuE>`#j$# zFsgUtU5SEn_t=b&g0h2XB}V=tgA*|*8j3Fv8I+t0{$B(N1R1*gMf^sFEPwC#hxh~k zi`d=Ux3RJbKT0iI76D`+edUb(CrG*1zw`2DnCzog_=Qn$%VHGyNM$!Sa{N7iF z-rTM;Z7XPbQMCHiE=J}uGM=@bgkRp8)u9(%_o=_aVN8^}Jzq>%CF2 zLta%C|G^qU^AVwWY})fP#P(+7Kv!<#%OD)C0TnZO*}~MI&@`P%bxx3(mGEbAmDObq%5jdFe>V~JrmQ(ijk4^IW^tu_~M(Fhec#Ag7lt)NA-nr0r+wR zQ2uD=wOvy(Ag7xwUf%Qh_U?Q*UdJQ+1rM&Nv#!Ht1w*j{MS&`BvHty`^&`Why{NZ5 zEmn@Rd=Pe)j9w$8+jRFwxzk!gj9Ml;nXRZMi}uDCt2Y0r1U;kzybf6uad+?;2q@pX zBQWsl5z|XinP5`(tyzX_E`N|p4j>kWF63Tk(t*>$rkH=d^_pFKimUpRd^p|YJRIMW zHJ$zEdp&xd*=jG{c?-3^H`}X{n3I^skCTa)-(YfFeIFI5`1TGL8($L-=B4G}Y&UM{ z*4q8xbl!ViA1PogR#BA^BZF?gV(P8 z{M(JgPMZiG^|$AhzbPjh2VUio6%^@fJaGcpolqJ6wllf$%ADCl2lNxU>W2 zYtGd`cRNIj&-%WIHzk)iimZT6IE@L+GEQxwQq}?f}eQ+ea>|6C`g<2=HxNfUg zf@AIuI(#X7RKRSCbXB9Qrm5oSEQOKy<(p?yr;yTMN2RVqVK0<(6WR@QZ_mI#tVd*& z)1^8_lV*xLifu!U(_CAiLWa>Dqt*tAB-dK|r8>5U3su(i1ex2%t%w(T+s^tL|G~5Y z5YrI_;2;xXVM6+n4?2Ii(1J&9r&pUaq|qE!?&(P*NNHzOpz+=Fw9E0;&#Z^5Kp8l! z_*D#Eyk5G1*TAd>i@r^?#H$X(sy`-H{9Z6s0O^?424W&lspb?z#csJJ&_RL-qFJ%a za3ClAD%HQ6Kxr47NvIR4g!NCQJ>2gvX7MnZfACg_JC2dbIjv`7{tw{nQ0MZm z?H|O@`_Vt4`C^}-gs?)>Mcp(`%;aNrfttMV5pV#v6o+g=1*<8LiT7=xt9bSS#@lYY zb>(Y%Xzg*8buX_9znvag4{c`Z%t68{;3X>x6B5pNVeelU;l1r)E_9z^5 zfp>?r!qgJL_>AJ053IwuYfgEU#_fqeJYQN^g7qt2NuAqzHWIoqHl?Yhoi%Lm34*=^ z1J28*Lv;TfNKnq^nBTA1k2VUCAzloiYm(?6efkpUZtzo0dmm324Q2o6E5_d7; zD6$FPM?VR5e2(XYl~Jio6w;)m6uFPGBW0ooe@lx#iT=KqXJ1K44NtNg-WOceOa{qK zw_40FwuDkP)}SAc{oVvU?c-5c2o$5Ha^)9TMP#3*z?$o0Ggg_&KCYnpZ8xP2Yp#HB zMLTCdR&4I|!SAjvI=*XtaHk}^U7_62BY$q+B%$M*VeN)-@#BZMV0s^1jTt&lDDl?o z*s)eW<-FZoD)Su3&l#%W9(&MQsAQZ^fMB4P&AJO?+Iu-JB}AE z7%b@y3H>S&EYV}vOZp2F?F5n>TT-P~bR)GSp?v%W)AUCvl*CAi2gRy6p{&T zlix(!_P`V_VD@-_iSFjuu}VBxVVjvs;&_HnW)0lsjZyL(!^E&^gy6u8I{^BF?kwq~ z0+a@CC)ju@(VufVl2@0e+=%aFJEEi0PpK-CG zC-L&K2#eOXSt;;rE}jb+6W{BdIWxd(HJ(B4)3i|uWSX~fUt#Es%6f~0HZ59F zDrj<~WdOWspk3SA9EXYWH-`QfZvTtI|Nn`de zve(}r3hNpf@~=S+V1G(qA-^3UU8Mj1Z@hx7q)<^-V55~q8bl~4-ojiA%*a$}tH}0G zTojbh2gp(IR}_@b=P(2^UD}Fd_v&vR^snrHQ=F|MFXn651~bu){c8}=Y9D}mxi#MV zByMpR6T2(=k*F}oP{&B#ty#A*ZR>v-Puux$FuP{`$XCYQ`=-5N`SA%=^@+d&F9YSU z8`0{7^i)f0J8DWQ(raSR`w#2K$1mT$J=q+L_j$bE&XJE#`v6t}0vXBbyzd)gV!nL& zLe%p~O;c09-l=FlEn6S(*yrt|p1 zx~R`x#yWO2UOej1#=2W8B=m;_3o+KQld7wYn4yooo3)9T7s%AsRNF)hAZ_^R7(vn* zOmHOLdtz>5qkwi--_XE`%FHbPd9S@4H9Rjb&*5^Z*<;_%&W_D&sR>Na<*_{i7;*!F z0F7V*ju}{q>XRo+rI9T(Y)QL}J4#U z-TPe3Q>>!B282Zspxq9`!$W@c{4w1xM&>7sv%E}9gc%uw-gtO;B&(!d-IVC^A-Z5u zk@$#%3f~pD#}B}hJ&SjzpvY(6tgcoJ4d`sJTG$@AEPG&ap+8;pg~Mze<*MIfa92h( zQh9w^lyNMy2sT(vIs=t@;Q`sIfs(1v-sl$9_3=3Ep<{;Y>{&z9jLgjsR8Q*SlKXil z!GPN*65C%>zqtNo>^d!Aq_?fW71fK!z?18aT7uk}}Xs625bF7_Md z0di0MZLyik@gF!&oYDB9cULpVN6xj(2{x96@oC@08*)6G3o@Qx=dkbYTMNlIjQ1TL z@`j5My>oX_*2MGyACwG#&dd7zIWaNqb?ystN@*PY)02eM#9z+{@Gi1G)8WS5?Ys1F z$#U?%BIOiw(R#1F?mtbC>3^nQrme1_U^>*~P+fa?XcM32^v#q7x0W#ACh6Tr$w z(N&~_+%?41@{6P`nf#|up5ZK5IA@hE0^EPrH;t7qR7p&TRFatJHyTsBFHhH=j11$atgo&h}tRx7H?;SLAjDAb7m9cQ4lu_srHP26=A{S-!eFoRxy`Xk5Cn zUB9EBpJXBfk<_)fO?$om(6otoh!ys5?d+|~i%(yetu!}&_n0SoAM@23R+`9Xf4Kbk zz-I(VqUE)*kOW3Cxm$MzA@m-$_*$`nmjo)0s{}q4b#OpwNtF}wtLoa>Y)ptASsfx; zu)9BtTIyYnq3C(dW&l1cD2VOxCl6SLAOcfht#=z}KfKNllRvCZ5G*kc8PfJsb5Q_R zxlUKwK5P_?ilM*F+e84#3juv%z65zH0a4TGTW9xLT z;dMPuawN)${3D6L!#2E;o9DkESJ)&H=xVRhwa};JI6e2evaPV8r*<5k^Io?N2 zf)m(elnOs0C#1lZ`Bxx$v&HB2gfYjWU$$urR4``}k3sl7lSxl_Q20^Z`o+8UqnUDj zI=yv77cGV#Q^h--wdAv#+oRU)#drhn#6i)!Rf1-%aSa_Gir{8-=3i2{SyWkO6xd~! zF>QlCU_1@JS6sPD#T}^j1^5w6t<`_nf$!tx53C};<6^KvFB`NDX66f(rYL%-P9eDc z=A$VXSw;l`FijTyj}-EGmhUIO?Nj}52GVkDB!8#8879~KeHb;R|ElxdYyfPPj6I9ANRy2KC*|BEWJL$(L*Y=7yvSx3JBji5~+C_b&m{y)^76KYHScvqy+RPh^ zrx0;m6IIts`RS$0=R|0pH6L^D4XEr8kW|L2++)#~N0x7VT6#W|3cwo}oqEQIJ0>`Z zVPt(3^qeo@BIfw`7wlG<|2YPyS*vB&PRttZ>Q>XC>(pL)%rN-=Plo@J=^-5bw;Rs< zr$M-pNWnAAn)ili(`=sB`h)i{?U1E9Z`OPn@~DrTn4*CsWF2tn_V=@=5ZYN-^+#!8 z|1mspvsHS}4;kE{$q-n3O8^xK5LTD}o+DO7&7ADR6g7#-URc-M^)VA2Zj0f^ z*MxrUah|-ZpzvHGE~lNB#_skT-$nxx@+Dhh&~7gz>{)~eqk8>BSOfZ%r`&R4A0Zv^ z+snhT=)smVT6g`#qfLH8fUZ!n*yu_@D~4_4)(e-L zWw6BEh>?pSpAZ{&!$TY0IJ%*kR)JByh^AO{0?ui4P}%`+3+H~;KM>Sxf6WyUYb0=K{@BI zFH=nBmFN+DIEkXipuY@bZ#mw{iXs+uSZTv(S`vDAkA{K!QZJb|h2Jrg{NF-HX3<^Y zHA)vdToyKsXq)LJ0X+nofQ%D2TJV$d*{Y@`@+Kv981?tfxS~^tt;FUJtj|*+FmTL= zF})}1o$S8`Rla^Cu|8B^AvRuN^1({!F8Q6l5LQ);{s9s4Hc+3oDKJ(+-3BC7rx=8z zG_EoC6iTnf=T#AgjV$wEZO5vl6m_9Fzg68rI*#7ciaizKtC_gyLwDh~jXc`VoF7iV zeTtoCu}1sY)7)G3o)yY;u=ErvL}I9uiA_uH{IO9aP2h%l>qf7oFTvr*>K8@#)b*MA zr}4e4+6xHG%3x;v9&$}32e;Sr+zn?DzhJt+*2sYCs-WXkAvd*k0iV>q+r!3MH?Y@g zhrOoK_7K|vyf+dMVfV$_EB#k<+%MRwWzd^9cpd`ln39$w@VN+zvl+@BhkWt;4TQTs z9;UYL5eR7CP4-wALTUn>`qcZYy)Lk%+{~cY;ML*U>L=VRcP3zOWBhyR#!~PiQztsV zr{+@}>(@QjuZY*9D=w|y@$bzDs^R_hkfzSb*RBOK+I{hDTZ1j&B*(T zXV(husfDG0C_tPcQ&cOuXAhqQk%h=WzJW}Mnd~eHa1^O@QQnM7OQco`H%;%Lnzk2O zJ0*|V`!`D~$(n<&6{F7}OG$WOVav->Fd(*Okm|zyJMfLs!3d(B1UeyDgERPY+F=Cl zxtQ}p(9YhW-U#<@5}RjKS)zZK3C~Ubyf*V3!B;@(GU`yFw>7i*CzOSF6FauExWT91KtbD(*g7?}ux1y@xgO)*YiU z9ZN94S&0cKLtuT_+U7O_53jh`;!yg7a9aZ@+n?V>&sZSSbOy%_-c+j)dEt3N_v!jE zF6#6;^>xtQZ{sLVX*&hCkVip9cHv;?dz6f1duO1`=6BSwwdMpT;a>q=6|%3l9HOoS zqQxI1ecCt&h(nUk8ghI+3L6SNxEl$I*yb<^_9sTDS{66*wwB47@VCFLp6m*oeL1uA zd()kZXFa=5Q5J}us9QzF&fx3p^TDUHVQT;7Ii3C)SI6p%pz<+2>otrYmHvOG}T1DY@z=oaVFj~(X@c#k0wQ?aDy z2psF4Yr^E``S5H%hna2zw6xv0&iIc zt5`r}2Sey^d+lkTmT?b9Pg$^lF~%D(fZ+m5_7xo>s7b_XQnn*as&gy?{Sy!t`+$sK zYF8<_%v|*IHFQE$NslFr>g%a5^r}F(Z9u!V6xb@4J~;)zG2D@%VFN+&xq4|eAQU{R zZ-;!H5kb$`BRW*Dtr_(~TT8tf3-EbfD)z&>#O$v~Iynvl&+ZS6PXGy5oE)S7d>@(skrAv|r*yumn|zw}r{U}D@#ktnpZiMk z(Z_NA*yMJhrRl1H5;R%ahcmuCAAd)XB7VghSiLU*-HsZL>%IRzNpQtXU-8jQ*RYKd z%|a;=)wO)kGvzU>wHlEguW!I|$8Dk4QRZRuiA{)2xrt7DDQWnxi3Gq3@Gios*aBbe z*skS}Q8@q5OGm0#?*e>il6f1%Je@h#<D5Dw6aPN))ELz$2tL>E_nKLI0x; ziuz3`T~xk@C#nukEl)HO@pN5W)<|mwN?}HZN0)gq-kDRaWHC~_MfB#MG`OQD25vW) zXAU~I$jfK@<}>gY`=17IvwVHyKp|+upFQOBF|(oE7R|4!6Bd;y8_4cw_06r*Psw{t zb4vrbeQcnj@;uG()9Mm5iUfqEMR)UE2DJcFq|<@)TWp=_;;E$QFN;X3veR$KoSG=F zCXEkPR1ZM`COM&=RakeTirTwY$QCc#z^188J_99r#xgRAJs62|RtEn7w*FQ9ZRBTD zTCK;9GVA_Lw&f>mF&Ps{5Zbb{7h7CScsrpIKF{bNoJH@aAR!ZZcP@Q6%f&IB*-Is8 zkFD_Wy~bJax5_H8 zyoBfJ?wFR6)F(R44c30}P@_{n?fhr(n}T~u@K-w9pZjpjW#Pf{)cqqDGUrMs7 zN^lpSVP)JKWyAi};{*OYydMbuuZd)iRvNDOKP+TIXMa0?rQu)q8Is}KW%+l^vu}&x zM-15ix_^))o`i5<#OC$x32G1_ zh(1>7z^0V^er2tDwtx48F35r+691xZOsK5d(HYdfwNSlJV7>fKMB*=dAK_4Y;e!%0 z_Tf>R5~v}C;eO;1! zZ=|S$lx#Zr6BM)0Y!GY zQ~@F|b;a7&GQ!Jd;_1F}a`MfmyuNHO%)wVt78?6LmSTUm%Lp>3n+c7Iyl^X!{#sR= zMJD64%vfub4{hAs)_lgN5nl%TMGm$S)J@JDW*j}9M%g*hqSAG3@oWzi_a(;gn35Oo;ak>c74JP90QG@Od&$%U00K|BLu$V|+fs<;WIaMPlq*>TG$a6uX) z1(X`wc}$i{*RnoQw6vcuR82Z5CXEQC=p3s7!>t7`pU6VjnG{|+emP5D@AP^WMLp0xPv_){zdF8eX<25GZ?qVZW7H{WnN4gB;sW1u;c3|kFhLJ_g41be^**ijl)|F$FYe2l z0jx7^=y6sQAKXNQQPwKF?;WFoGQZC-(8li8E_Wxq3Akr)my2dS;%+_hJ1)+wEFr$L!p5F5W2W6h{~a+1=?%dn2)ZW zu7xLBq_do6Tt838h7k-k`2lP209l-G;}?iHhRHZ)%QIKB^w%Ekp2?zF5aD;rasj`S zj^rgBQU}o8^g|PY+bnDx_hck=K#`7wWUAg{RcA5s$hB@nF8<|H%NGwKS^6?#!&0ed z1#Tbt+Xg!Hs+=WLRV`%9OsF1;HF_Ib8dc~mWFk1&le+nEtp|wb@n$iR8gMEowV9o6%wHw7 zc4aCH4XgN_SClNkA;lI8Bu&>zPndAH~tY400+e)>Ylgb6D;A-bbP&cWyfGJBo`Iyb-75*&Hdo@p-ySO~<41zz!mi)f14;pUQiZ2fh<<>=u(yyJPBqUhZhe^$i*HV3Sfw3 z1g1uF2E^D9;C12olgzWY3kcxejpLEG85Nj7WFh(ibU*FDICb1bjD)TtOdAHekG>KW zPYJF{?%VKqnIC6{(T=K1qSxSvxNZRtzaJq&dxpI*`0Fn#d#{zE=^~6xehjG|TNE6x z9U&kqx1JF|rbxrV?P}4drr7Uo9wl18-oH-PetIrgpdDCZrn5V|b5nL5 zwFAVVD^v;>|Jv$w{dQR3dtSRNv{jsfSiCdLqle2yTDy4!iorQ~LPT#M290I5(WRYv zNj?R~vLjLUCCrOcyiLG}_FgfkLpKExE^naf3XHL^Z@==QvpnaNmOpRkZWG~rGG_9^ zpJIPxwUVD?U~5Y3Zrqc2CJhUdm(PqwUe2>w$%gDQmzC6&J%Y?&`(sY2k}d;O6AJTR zHkXCklfe<=*vSVA#?cJ44oudYM`XK)#ZT<|-*%jUz6!E!c6vO_emZMrTWHpWK?1n-HS&D261$&Lm4z;49-sNa0r-CfWuyoD+EIAYuFhFAn?$zsPe>I2Vi|JvGIRz&pjb}lx%XVo`#(gh?Oc(;+Lrwyg zV63*bw!_22#Fr1}|GV)}m(OhJ96|ehb4g!C?7NU?(=I$P4dM~o3j`A_)s2Px?PNsp zjRVy}AG2N?5G>Md5OiPL8Sptmu>YRNK%UNEx2=L19{ft|6m^Em($^ejLo_?f+3joR~;I?g%YkuOE4vaM}EM`Dk?O zdNf|vkYrVTbiXaS4>5H;KQK7q}2-y2U#W_1VkKH*CJjJKJ{L<22+bEJPXG&nMz zsL5*ZJX<@1;D7v!B4)C@@4tEKTW?3CVN5`kN`AX-!IN@ay=;d^>iFw#`!Xaw^o3vS zSwY?xjbi*Q*dmXvR6CSbU-;bLLywvf)46zXyPJmGaP4{m zS$ahwE>=Z?qt@HQGV}EEhE~%FM(q0vsj$gbJA@V4-p-we>w)`j;5Mz##wzOzUeb+C z+?yRqKq{~4j}how^ClYBd0G+owl`P%loLEE!Z_Q*Q_0UfbW&F{ZjZj;DHs7uM_Ejl z(-zT0Jq_}vD&aD&2H|OHqH}{rMltg>N#2`XG4wa?;V10vbe>Jm^ z(#vf$+k7Tav@w6vAh_AWqaMc*L$s$RB*MG(t3||W=Dk2E74d!R{YjKy{|!&kG8m4B zH&2tdyY$p~yRd{KjC@EL`Qg5^D!H{fr(PriITEhsk+&y%ecn)3Uui~hrtuhgrB%%o z`J(7yia8z%EZ39bKQkLv$r~v~t;|)9_mSU3T!^VIRN^1b=hZA>>=*+*t{GD4gn7Dw z;VeGg;|yD~5d%e$1_zhR-pt1E=Ll<0OGl!X$bY(qUhA#TW}U@gi%?Gf@TIi4B++c9 zh&}N73Ic;0snsAWD$)fdY4QZs&mLy{31&h`OVq8a=igO4A5&~SK}gmQKi3qJ4n zRWg0PK9!!BX4Z4GTq!dpBJN`SKNEp9RR7Act!lH$_UxG9a7}PEUkZXYp7ji}zqpOR z8LX*RvCEwDs;~Reg_Y?cBSOF;dzyV53#eoQ}9=NhHFCH8fcY zQqntryELPX2dcK(5#ha%tFD4Fagt|g*BB5(GheEPLHST1uKM+%r;_yW|11{qeorm2 zj!GlOz7s~ynvmy5lIUg%P881Ou@NXAS&2L(oh^sP|8g0Fkx^bBXZRy7BvYwO1f88L z@|2SD$-F>?YYW0fxfK$4XJEgOrBd16`{fTiRXs4WVVxTHmtDOI151=gR zsZd#4q~{lPZ{;%3B=cB-P^5>y5LVe_xEPv4)6v&1l_u~K=3}nRELOuSXnkffw4}j0 zI*(0LFpQvJ{F)UW6SYUBh# zpYDV4_v6Dy5(K3#oyKbE3l=k=)*QNsWo)KMw?$a>vrazZXWWcvVi#m_?%K)G2%j?UuY$bpHG9k%sP^()TzDm2P*A#;31xnl zF_w(Z^@V034gmsGwAir?nS1H0*o6W~y^IZO0t)ea_kXH$OXaYOR1Q680yrgaYC3OYfTBBAxN`Kb9~&KesV`w+GDcJT-Um%`zEhp75q?$f86DX2G(Ne^zG zP=a`DOtixhR1p^j zIIWKB(yFSl;Hx37Q41|cM@J_oCoQdn7|^UI5)2KXg@v5nkAEZy5@WF_n^GCLS7y!A z@26{9axO-_FDgex9~Y1u-ww0r6YuVnDCjwBlyeV?Iyz^;c0BozU-X7IeROQA;DVl z1DczL)zi&EorI|H@JTlH2x?xNgw&j;AR&dP=X+n&!_oe5MN%*Dna{m`%wOJRZ$rRg z_deZ>f*UTFD4xQDSAuk?s-(mQ!J(ng-kusf-e}HOnhht_J8vtc@Q?Mm5P02p1mYA1 zjD6vwmkuwhRHHnqjGZFgMmY~S|JhaYV)}{+*?GRp?rU{%ha_v@c|w~fgw!SkFscB& z!xC_p^LiPM2xx zU-M-UCnNEH6{@AwVwXF!#>!fT?sY{?L)XM_7UHcZYOh z*2n}hTTN=H3wniaZJFs-WOh)@Pm6q6j9grXRdY-(qf064Yjm|8&z5_q z$nm5^>O{NJtiE+g_ENX1#ZFCEo!ixZXSFjJ^NQ|lnyzG4e)`~0eG$5N(~y{x3>7`; zA~9=Af4tjHNukcuL{9L}aC?P*fQt5&Xg83?q2F{5_VBn3#39$w(Me%**c^!K>5=GLx}HOS-Nd5|Pcmg6Tg zDrgC=FB$&cxdQiv&Ua8?-(2L|hEj>M`~O8EPx%@I4^3{2XZqn2#Kk{1z`Nqf2l1%K zvjMSIyU}(Sbbu<8)6-jrAJ!4>d+6NfXXAG~;ioUN_omLtH$IU4d>-xrMk?wN?5O;?t_mYGdf68#{xg%h7gNBS5c@ z3Hlx58a#TD{T*3N-bkm>GgmmdI8XWVta6;Gr`HC(q=bc;d4MS=o*9WM|BGY7M`-;an4{ zQuW$Z=_%*gI|o@A_qTmP=0CUG_IHR_8mPb9z8M*nn`@yoQ8pK{q@vSQG#f~>IH3C)#+eY+{Q*I*PjwY+^hhRvlD#J8c4}oo?EIYCsF3rRXi_Nyku_G_x=bd#*FS5(&W zkeTjZA1#{XG|0pcf)_WNT8N)Ia3PbARos)}1Z$@mXt&qT%JSd5eO>P3;EPN?7PZ!T zI2zFnz)b_e+1BqqQ7-j$GDCustSF1rSUGaZA!!w=u~*N4MhXmHzUcXSK_H>xq_|f0 zb}X!Y-Ak~=z|{JHy{?gjPf0eo!HNi;Y-{GhdmA?S5qOnK8c}eQW!nZdrJ%3l*PWM^ zT1_PdsA|O5yG}2Mg|I|K`E(6b`(>nAJw$Y}+7mmXYZ9u6x-c zyi-$MU||XZf7tP#ow9JPveD6D%N=81j$lH1LJ`@VETF^B5m&;kiRCjsb0+f_|vNnn)@xuWKX(#!~TN>b#WlW&cj>>6lC zkJhq$pl565aF)v)mm#ARU6&n}sz^ZStUv$waid8oxD$Z;`#Q5X7X1%PqY|5Hu?6p< zrglO)lkg18`aVOD4UstaN>{+`VqaxgF6YT2)%cLOXS`H_9ci4gn(Dsz%5hGja@ zz|9YP8|DoA%n~y#Z$XJY&rL?f4^(?-Drn|sKbOc@FQ6%I)$BH5A{X4&g~wZ<09X~r z9tZ_{^OG*|E>0W^w!R{zNOBF*jjtfVH4&PfUXT@}2a~H}1tZ8<|H%<{)wffr9gxQ! z1_KEte2eydenme3cT)LOY|*1xel@svN=%=YkHx#meCvL<^F^I`t$ls7>G0FXGSZTc zw1Vj0w9Yo2U;Vv=JNSWJsU(LAPeyV#=fivhb~X*0Xi|5cZ<~g}3O@G9-ZJ6(f2TTu zyec7=5yG~e1fH&AY^eMZh(xn9P*7Ivrw~b-_LnLU6bH)WDYKux_s_I&*oIdDXHtrh z<`{M`w9yrC(omL`8^|QG*3^3LB;a%~FQlanY^bZFsN_=Z_9hIdn~SURkkPh033P3E z8QDf@SI$_Bc6$i|x$;AUH!5B}*JVyU9?o&szXNQ7+VMtS-!A9Bfy!v7gaw=P!OJJk)na)*6ZhVJv3>-!+hmr1=EGuw7Mx3xHum=;bU^DQfTj zlst7$-D)U%53L#Sq7%?y!>mt%`P?1C=j|G5>fVc*jVj2ux7ZCsv!QmvE=T(IttX-5 z%U);QE7$TAr(0MGii&J-!K%^f)2|>D~QzQl> z1PKXgP;j*HP@AO0=#T~x7~Q>5BS*gXe16Z{U(SbfpLD73cTqQ!N{Ve4Z&6n?6tF z5Z@;g%Z1`%Mx?wwV?}O@yP3GS*bUK$NC;{$4Sn2`MDQT4U*QUm4ITT;?*8)HnFz52 zlI+u}>5|9PO`oXG$vI^H$7G0asR^olBLo!;KK8$_dEqJ_2*I^z8U5!r-j6Z`N+nwnbLeDmnM!}G<-9m@>)UlyMnj$?r7xVDB^ne1%A$B^&raC(VhJ9bL2cn=^T7{jr=i_U}2O#IiMPM02EuC<|Smx-M=#mpW8Ru1M;_xqA z`v-?>MJpYlnli8yR_Y4)M-?a&bdISil1&Q5khhZ;aB`8Bm}?AN(qShb!|)?Vv+hsn zqR;ft$kV5FZs+u# zf%iv*Y85uIq;I^or}&?W#+INej?cYj=E9IC%Sz6pyZs>iSprDng&sSy4m-ndal5RE z%{e>zv+cd=yH0XZpY^{!yjO*Y5oCrW%CqLxjK6+!5&5>Ji6xmon$Bq)3o%+TD*p-L zLL@?7O2HJW`HL1+8kI61ZO^alqw7Q`hTe?P8uepb>Mw+Ll(@s`Am?j-w3PPLNM^}> zhNY91a?K^HH-V_!B3|C_E|X<(B5x9ouqSKhKU6>z`(!vO4$aCt68P~CD>wN1qG2O5 zqpI{+4Kpb#9>{WG{FsVrF?Uys0l7F>!VP}orbV7aEOG{bJL4=GVl&-j$@wH0DTN0MMm=3cv!*T7G6`&V?hH2fOq znVIWw-qY>bH=nnxA$ZlML+(?!*BJ2w9HS@-Bco`Pu2MG?mnEK zj=hrBz>VCK-!E;b3XSRIXhD72o5TXughJQE<~divSRHuX9Wl*~}-0WTT9ZpV=;JA2IG&<+b z*~tA2qVD%+4CoTU`=Tu%KHE`8;xe+JO$D>RuvzX3327m>0$t3+{(7(ebMEO5iYLt^ z0$mFH1KUt=X{OH=F(vmzhFdy7Ej(G#NmX`4a`*+0sZHKgv@t;~ZvnsUcJ*oi28~#F z@Y6LBjYgc%ja5RZukv0+F?AwoK_qatXI-~b<@AHlC69s(i>q%Oxr+>jtb)glSW!$f z@1oEjEgAt;xLXV|Mu!pq<{J%(Im~ddj#Au~o&44aVKKzlO<#BgC2cRKd6`^h{Miix zSxQWHQ-BNVgw<}*!!Ytet+dZlcu+fmDyXK+^`^6BV0xu`Hpp*lyb`{Tcwnfc6iUA5 zu0o{|JZ%+g9GfC6B=wiTt@VkG!sp)7-`8o8RDI9FVF9KtpV953QkPv6FPxp{r z_Qf%Z2WG>@%h-uuJT0kn4U+ zfK)2oqQ`=+nQFrViBix3u?c8N$KQsXh*Q5^ zgZRHSFf{n{YODV@Kh-g^&;^fw7a6lNIy?UDC7U5SidJHM$hCAp2{0CpeaImYCc|1B zCa=+nVFP%4G1k;97#NI_83H({#5(curAm*;_fjPG04PsQ&if1Z>7&!dEMy@h~2#f-eExs`=!V2^nHlRjgdL&3&S0-WZ#BQ|swU4kS%3!POcd{UY?Y-$r(K+Qnh>lf}HInYHO4xA}OVVsb(!pid1|&z;#%mlnNLg zC)_VCuUWJk$zKcLFY$jy13jqz$F0m86c{v84~Rz|0P)VDaJa-{6Vw>TlnQp3 zR?-w86A5T($>YsF+BCqJZ8hB&5zVbx5GP-t8lMjCl{B? zfq;t~MB&EfExC8BS!p^=ImZB~nAiTHK3kd$L2Ganf1}#>Ccl;&T!|?uWWqGsK3jf}X83MN+&19WJ37H2e*hhJ zh}u+#d?wC5K(jbW=sEJnO#E-IbnonKsBtAX|2n|wug+YR1g+JBp&_lb7hWoEZw^@r|Y~D z(C6RXCs$OMQB+c%PO}?BB&>1aeulwHfC;YO{IeG|%gKf!#~lnV`+~?*wgYl7DwEPl zXi?e|sAcDu`vy)%^VLnB3{ykTms3ouI^sFh9M8`72K2m6_SZuesG=3SFjPSACJQnF zy?ImhmFf%|Qt#C(gU+oubftj~uiWOMUD4m;l@2MjVpFcc6J9gW{E6x5=?VT0TK z%Z%S7DQH6?oK`JXiA+pVKr+WoCTs7>mQs1n}{mJC}Sl+Q2bB*9LO-wQ5nC8x{p+xI(0zen(wC4RMLpbpL zOy9hjBDu8UNzYin{8b%S$c(y=uTO$AE8rbikVUr?Yoywngs)D|mfkbv=Aqu^IJ zFaGm*04^ipVS>hK6TF(44z4x?Hyp;`MOeFGP8h$>Py?eGM2VB&6$4A5^|_Xchm|@0 z_3XfWAZm-IgFDlqj`o55Ep^l$XA2R+xtH1>FHLD7?GVA%wY4&@mR8YP*PODl;^;(J<}kH z>C7?@(76M6WJ-GnZDLJ@*LFL3mAuItdHtU#Dv!1%;-JTwUcbNEC3reb7I&~DB~h<) zBu<=qUv`9^jP#_mSqfHaUo8`kS<5e~mqt|Cv@eiEuJ*rTbCM1Cv>%-=foJopRCLl! zVz4jI_b;=$z?ypxv;5;DW_pjZYlP8A1HlbwJ*wjQ5h!c|MHz_FFquX9msB3)J6`nK zmFK~P7?~C9SNv(2ljs3HyD&Qrep;o18JU%{x`$ldkR?I>25^359`x1jNl8TuUSd*tjj9wgQ%BTlQ`MAzD z7*EnYnyK+%)9o}k#2==)jIsRAtKi74e)Ozhvn@`(WsD0pjY&QzIy^tGk-Xpkbpqv? zbl7!v-3RB+n){l|)oh^Ol+)F}AVL$sEvl~;Fr95lk@PqVul0O^0|1qb4aM+flVT-a zD;m{QZ>J^vPje&HTuIY+HWPp^{N!!loTq}d#rEX1xJF|>YnO zPN%|-*c$Uql=D|>>r!i~Z8RSo74l36^|;Mr?M?@IoM)>|C|p;TO6h6V#*G%>hGJz( zQ+E28O^n`%jid^Phma7|(|LR?N8Z<~G2jTX6wgNX^^7^?q&Ik7m?2ux(i88q=fObl zwl#d?eY0a((_+q|5=8`a)Kf7Rfe#3b9ylyAOpILb8D)QS@-xYT6gJ))VXU_~H`#`t zA6qew)mv6yip|3K9*@&@Q~E6bqK*kduu;IDI<^NlzTHn8alOJ z5U;dv`!N5JK_l$KNKw6^*uAx4tcrQ*OvE|{xkr`rwD+|0ef=TK`x4O-cC=mU=Jw+% z_|eKwn{M}p3wuHTZy~~5pUk}-7iH_(o1YY)kb|YGZMYa{c=cApRy8wvFPv{%^()6a zWhqZ{R+vxyYN~Wj=(xY@VKlCpPsM)OKMFGC!uiUclu)knuwqy2cR$rd*Q#!OmayBalU7q} zx@g55g8p8H-CnB>lv`M(xiuXUY)`8~l)-mMZje5zgW`jxQ~ zwrx{&t^#yIv(WCYW#S(KFDK^db*5OYhQKcUrg?(13d}dB?L9G}qK*Qcz0sbRTX1@& zz-CwYrHE|)ySbP{b(NOxXA818xT2FnHN=C4Fve2r|;Wk_Xt>0wF7F3*mH;F0{%k1ExF_Qm0|2 z>07e3%S;*hnC6X1@h@RSp&jE(a5XRMxpDc~gjfM~6M=QTx*tvyrf6kuwt(c&hdG@7 z`ae3Zn{rhMZ&Z(GFMi65%XR00=7CpMSG2?reM6D6)g82D8$5VP*wOTld&2CId7PwN z({}nj)Y;Cll%0J=_%HKafU(HH>TRXbLy^hekUCot`aX0Ta&fKQOsGbx3mtaN7#|y; zU8cYms{ePT5@;)*QTv8z%9K{~4UU#w2szB&67a@=uCQ8&G?X_RmjkBg?dHRD8d`~f5r5hf}_sTFIi1fusYK5h{y1ew_7@uYY*D4kS2Qe z)6Qe2L@`hi2NUHq>=(tHU$zZ179&r zdS|8NbpSu_@S7XOa4u+4qxQ!$9;J-o+A?(76Xo?T!&s}YA)TVm`>7%6XKj{2V@9k2 zuB}zLn3&zBVwd!Eq)kW1ybLP9hWT(GW_&kf^7+DtI4-f3Nuv{Auc?$A{7L6F9*pxa zkWZ%EWAtO0gt+ppYUuJB1pjQzJg!x$N_YBz#EingYQg*OFP0+VqR+}B0bybFcu7*M z=)S?*9(Fc4_mJA5@8uB`71>6?ndBUhj*P?-ot8Fs;igf`mKgv8=* zRUzB5Ns`ViS%iQp4N0VvbE>hfkUXEHhJU2&P6Et@xq?a8pcz)*T!1r+bxiMn&&LDK zW<*xGg^J(ZnPlhojdsx-K-(3Cep(LuB!1h}D>GHKaPs+dz(Z&~K^lo}G%}zY%|+%& zdB3bHp4lzGFo}d_#OAG97_epQ1=TO4)$Y4pegkYM6)9HU6-=%8!Z9aPRs9@P`jwkI zRQ+;0*8sthD?fOgby$FVOww};ciop9uo^JV!h;3;GW$0{#&I literal 0 HcmV?d00001 diff --git a/doc/source/dev/gitpod-imgs/gitpod-workspace.png b/doc/source/dev/gitpod-imgs/gitpod-workspace.png new file mode 100644 index 0000000000000000000000000000000000000000..0a74dfa68e77b0375fc6b9af742eac8264decbac GIT binary patch literal 86700 zcmb5WbzGEd*EVbt(u@+4jyQ;jAYFps07Hx@D4+<4beEKrN)A0B(jcWG3P^VgLkoh0 zbc1wA!+TzXy6=5I@9%rQmp`^{xvq20TE{xpvDO;Qo!fF`XU?BFcI+6LJPN6D?AYng z$BvyGCOQHB#o-Lh=-6=;J9(tkJ*VUIgD(_Q4VbG|cJ;=dA0rYSE2=HYkQ|$vJBFQ1 z4rr>oa`oNK>lUi7PN%%d9&DvElX|FHbNS7w%b(z%&ujAJYki3;i?cxxma$0ilZrYo z#7|keOZbT|tx51LksPav+;p3A6l+LzZGC^N!IbdWu@PZocnpf{D-uiujf5TNr#g1* zCaeQ_41Ma@v9Cyes++Lm$Byw+p-=t4JOaH8jZ#opsIIO)?r|C#_IY?8vMo`}<-SLy zA*`>r*C|_8Q&UsPFt_hrqWew*i-(d)N@=N!zCK(1!oq^d13SC5uV3$6MTIAN95|Dj ze%a9oS9{y;6C$zm{#g=xqN}Z;;oAeUE-m61A~5(glDr&aZfQy5XklUTMW2+}V{Zk6 z!O&H14!Lx2bh=K2C`%l$)#v8sf&peGCiDjpt8HRSd3kv)?d@{Pjgz)mnIX5$!KlOK zS_&~Sv2maA

Yo58IQB|>)`4UU#jYXNra*;vYVS{+v^uJMel3WdXO`W6Q`!(2~K zPZvZ+^V_#?Klm4%1(9h!@IVG8w~HXT^91`mQ3I_9qZDq2MCp`G~=pfW@g>d5fKqlm^><- zp^gqJlCk9R@$tKwaPO}27bTpW)+lspR+g0q9Go54>R#kL`|4DzmG*$ZjT>5}?5H>e ztFQNBQHb!_*;x#V>WAyctgIy~_zI1Z>y^ZIb)$;i#nLE_vuEy#KDje!G?Cjbv0Dr# zOdTAD0GHGlJ`^u+)#|!jO}OIW>|ETBgkxQ|CnEy_0%i#fehFbIU9hQIcQZ6I3i9*2 zE2whw^3Ku*9d1~?IZ(h~9DeLYOSnl$ckdN}`jthcvU=Mp<(B!xKwpU`gx~L}z>V?> z3Mw|fYv!k2EZbXc-rwK<*SiN?Lu=FR?-tIfuwWXUGA5rRRv!ng@8Oo0t=E)u`ph?8 zVDE{3;O=T^ZSBTy#~Tc|(7np3YLhW%Z6=x~IXSS5>_=C)&JOrz!8H1b!toZO?cXwwe8B^zEY<&6qQ+xX_!jaAA0=|H|zFMdY{DO(tO|@`lmGScNsb*pu^~dHLpP^129Xn=jC8YGw-SCaap-vP0 zt`hl;fP0>}YBbwI)%kYEd$G8#u6u#tSlQXx0n%;LIXXJ_2x2!jHtMy&;yaMPD#db# zRh4g?Ga~Vp-k!h%@>itg-agsBo13KLoX+OKT8KxHNDi)NZEZ58uoB}1Y}^z^+-EMh z@1TU!=LO)Ak7lc<<<<@qxZe8>NkBkAZVdnOiG5anNNmIQ4-Di9I(RTlx zD-jjjQ?cu*jiV=z7EwH%DdwUq>9Ulrx0# zG4o>&cR2adF^nIVil_Ohih_>}hw!i|5OJbXUJSAxT(wnGDn0rX-Y;alJ2JcH2SYh3Rpql**WO~2`v&%?&j|-c|;4H{IDt=FQd3ezAU_rTXK;=q= zyEh`NeVUMf;EcEQUn5cG5a>e z$grtWWAczsQ1E+q_x-ChoIMeJeSP)yvS=@}YI5+QF+|T1wp&(BjBYUb2NTgo37D(N zm^fB2K?o?IWiYDIY*V;6j;VHc^O)|nmk8DF>jhrFUQd^D-IQM~MY#Nl=G;}QczWdJ z6x{LC9Lc0-{D@R;N=b6?lK3SqvTe$%F9yqqY`WfW4``UC?VQimh`ZH#`=@)(ul@ng zT+6|dSRpIRfg+oYx$fP~B@gh&-rnBM#?K%9{WWn7AuN2pE)H9*04PKZr$%di-@Sdi zb*-{0n$M_)wX(gXh1An&eWrs#a_{D;(fFS~e~u$mR8?yy^38gtrl%X+QcFsneS3=@ zrKMwF2w?3)vNa;$;J zENnk_mJeKQF)>jgtD!QNO-lljGiTm2uampSh1KX2=#)80jXZYO(b2K7xz193$~rV6 zqWi}WMS5aSB}-G&v7)4;q{_-fGC9ACnU}C=q{^H*Jd3ZxWB+0nU!*xa%p5L=O{2z~ z!i=uP5_!MWBPAtWUTyD?BxYgE_V*H(BGgSHB-AG-uTfI73%y%&y;tY8!Nsd|efegz z+|A9+vlngpXJ%&T=;?{kUP2U}nuV6-PiLtK_2b7ILX;0L?VVtYK;>Xio$p-P)qSop z-YIE!mCwPX9=wV^T&p~!JvSox18HgoJ4KaTKP^N>V_9e78OcWKF0VqwDXg(Cbw;;; zYfE_e35VReZdaF~>#ygFB=(|QaNG@-*C*%Xw^n*R2`S)UcjX1C zV*S9{I_nW(W2%Nk@bdDI!(G0Qp{&$GSf>rb43RN|^8nxVzELiEh&asCao37s!Ho>Q zE+izx&tG@Eu*5nN-K#mVx4o8xdG-3WWpDNi&ynr5X@JJf0#u8Uqq+Fc(WdW5>cg%A1I&vEWwHGa*K^9XxQjYhX`I?>+wO*RRPK+{VwI?X$lwN5{vL2@Nh=V!;Qo z#rmOhA@aK1CA`TZ!77589C=w;<>yDN$^V@s=HMAYh}1+=3SLQ{j}m@5pW5@^kJVdI0RVL6WDfx!ogyV zH;JO`dx|7Xb}mQdxn_R2YqrIeyW{=&W{lkQIx6}&#sM+dlLqQCBk=^rZ)4M2w(7SV zhvOyOo;4B)jU&U*$j*i`-aHJBPX~X;k}LJvgD+h)PdYDzomuLgd=lm7^~r>?$MH3;3NFm)e6D(5N1 zF!Y2X20X^r$ayQ)PkikXWK_^y?wh4u+{axlG0Mgg?<4cA=dwzmD9rZ1_lNIRzkq*Y@cvo>bReQD8rza56ILr~}7syzvB(a49Sp^;!wDm%Sk zJ0M$t<~hNag9~C160{mp>goNgC+Feefn)sHlSM%yTVOf(>|k%ZJ40Rl-o2|rB9`zh z?eejd=rWt}dLEWNm&vKAOwGdJm0Dj4Wi0nmqEx?k9d*7JS#c46#KHkc4r5c( zWMiJn!otm&juicX?d|Qh;M)Y-mYSLjgk|S!^q)K#3lASh)GSq+e0#gUyZKFF?&nYX zGP`MVE8DrQbby0^#jX~tzZxucPnaNE!f*YHe+u?=DHjP)dX;1;^q>Mpy~CwaXq;$^|oSuQagGv&V;`YLo$48Z39| z!Ji#;%j8eS;vD6m%~R2gW%aAOa8YCtJ1Y44k|&EGmA~w2qYcc&+$>&6_uG-y#MKZsA6*(G|#*JFUPQ27ay1 zB63**^@QN98^E;)x3#w$QQRvbkV#2Po2+#)>i_X$9Fa2sKE!%&efHb(z4OZl zx~;gHniCDW*3GZEE?&GyIyE`jhlJb~-ZId2c&nCrb#3Wzb+kD;I{I>Y7rUJ&shR9+ zQeNt3K@e7Dky^k_9{&a{=m^nldPf%$S~PSg@%R1D{rqD`*eLL5X9I#)XtN}DO$vTj zRmJtwjH#}c#e(PVrkt#g1^n-QNQ54(L#)0pEzM>h^AXJgja#m`7hw8jc0#_{m;BWcngk5nVilBFf{>2N^C%c zy)Uj-zrvRo5_m`XfRSl`>YrKRaSPX-;ccEB)6~+}_aFwBYt&w`{r7VqBy2>eJk8H8 zFXxJgHW!wL&3dd(mHQC-}K~ zHL!yE&qcvcq;tS^`e8@+moGBG;V$blcct8i_V!%Q$DnX-d?Lqms4L3{>jX=G#@>5>6L`wvo!ik$X$HZs(+BwT*Y7g+ollfJ8^ zRc`VPQNOt~EGZ~hk5X5sg*hDxc!;>|@K>(ch8G7I4VKu0_@6=0{PAk#;l5SPoiJtB zm9a0C2OHc3J(*fA;P2pVUqp^x1G)Ap`5r2YSOPGD$W5a-q8hhbqzS?{Pr7yJSm^07 z6~g~o;>(hf{=G6zc|qkgg|6=JVYg*veZKV}$F5SRjeP&4iSzA_4eW>c6N5y5{q0m z7jOBJlaZ40L|2zOF8Ls8Rsi{V=HjxAUFHZ(6n74LTC?ae|)Q3gK#icri%J}#uULuCbk#qDEA`HcrlarHtHv8i^a1WzVnlCQX zCfy0W7tA6lu}>>{`uaj-X{x=eDdm9G)DeCb{^DW;OzRan#G9adE{GL#j6pHaUy-IU z*(qPy#u0@Kl0~jxO}Wg?Iax|kS`q~&hDYX5w&b(cw-iiFO!jQordrSX^V~Mp*na`; zj~9Yk+0ZNURDkkrajGactuym zyxNjDuf2-VDTgSJ{6Dh^4kly^*nid8 z65usaU8x2BSF$=%fJ4`yspl_H9}$_8hXLtA@PX4=K7Cx-r}^hOXkQL;a#6>`=7vHa(Llt z#;6FYJ)O3nx|fIEt)TnY6|Y&m;H-Wvx7u`-aOG_!CiH>^9IHTiTr)8ziGwij;p1&*i`k|qbPDNwz6-l>kEwOoqOK2N7eKl5j39J%~z*&^rbHyD#U*4q6 z7U(lF~!5j=9j2$0CFpt&n!57Kf|j=pKCG%peGQ!*# z=beLyC~l9wys~V9s&HZ zz-qWnD%-3l6X;G~k+ybr+Xhi(V6=jQg3BrY5iaV(3VKT{Q*{F$m<kfyrTH0i3yVvUUsdVMjg8-Yu%_fmDiEZK zAzF+61}sno(-VsVTpfxR!UzPhOZa2)>cmDwoJ)JWG*k}aMAC+{lr&FWdyx{(OD&%? z3u{)=6eBhEv1J%Q!Z7KvW56xURLX zu5K}cS6CS5$F98#QJc*u*i;G2=Wr@)(z;j(m23TuZ?GgM`^~Egg z;>8G31uz^vWC^27w)#{n(<6BA;sJ+XV)QwCjo0+l6GKBEr|K!z4mp+W4UM}sho@7N zq2QW@9pJ(~_>cLYh>(qjIlnm*UR+!}AJn9l{!s|pzW<;lyrmeg!JEC>699ql-@i}r zCl)&5U163GvHgD!N$U_{C-CA3-Xj0w*1Y)A*3~uh#-x`VU|Izecnm=1NXV zIsgo;$$yn_bEy(U5|jK8SB;B{^Y->OzU2JNe5=MgprcUn_vyg#^9T$HQN87#b#AcK zC6d>6ay4y7Q%L`S}+oZk{r+s%&=* z@Xy@e8gJLqqF|9+y!7(U%`ukk{Y``^%H91!!##=vCls?$Tin9ZVM=~}kW_{A{ZWhd z_|j!?sb9aQAy^uoj=zMzYf%@_UX{(|2a7Kx3BhSJRUuYWvqFe$r*D4)tk;Ktu+K0; z(*%T<)Z6=~{ZVmlPGlHpPWkG5(!5nA8L$;-2&RO20|iEfK#(eLpfuKT(iX*RUeKh1-O^mMv%7TZ%ehGdNHl2t#kwhqMtPGh}9sv{%wWj&y=ks}zx z)G>^`Z8`KYL56ug#|vw%cS(PW6^&lnImY>jSz>-lGa}+XkYd<%t>2N3 zkF?6k$_j67^sIgPa*6SjQia{S=74%G^<*fT-bKNcN2@4I@J=-vhl~S1W`r`_U^9uGNlUVDGjP(NT}?-jz8{91`XX zf;2jPdDI?xQiRvabh{4agBszb`eBCu{=_%=`QB-8gS5&)OXD{okIEm?y$(vfra)`y ziwR5ql%1p3$a;~4#40(&bv2mo;9=Fft7~n0u@F|{9U=#QBde>bSgulxW~9$LTC&Nv z=Q;ru4^L{gDex{Qb$7J(lGvwz!uZC^N6TFvlsuT3jxJqTuy9?88fx;leTnK-`A5J* z5#U*m*YrG%4rlW7Y%3z}?J+8V*byNmrL?rPM?wN5fVbfJ5|Mz{{R{1KvadN<@$ops zLQsZ4F$_a{9k%5NvMn`D0v0OCScrjq`fp7!wZ$ARjwv|!j#dM5>=t`SKht@}JWL(r zG9aG<=AdFIhy@max?ujH>2Mj4bLW&D(Fb07zlk6Uh$H}scylq?FHkbh5B65S1kkdD zRSaqWj|Z0Zqhsy!SAgO|AA_sSy!JmH;OFmxN!8ca*9^fwec~#cK%D;{kL-zvKFiO3 zPOhS^srhh+fzZ6T#P*j)JaEH}jSn<5{8X|%4i7-qU}i0CCEnN9cXQ##=E1&8Foa8e zNR`NfN|i8~xB33RnN>AyX|vXChJ;J32GYT!X=|y$R zK$ZkV>!g-IfXGeZt6+Nj{=M;!0t*0(fJiWdOvHEy^NpQRUzR9Hzy#6rRy<%PAbZj> z0VO4rlz4DB42*s2)7CYX{^8+vEuK+AP)!j1e06n|OS6CnM@U9a&RgSl6A;c)hlTTp z6GoEe@U*nFSV4=io`m@L8_(7+C+55F@7PVZJy3!o+EGYn1=kij@N|9b1llY3Yi}>g zxVPAD+J}sx=9@QtOLOx@`9~#BF?tT62E6Gn$3ZIg3h||voz;({T!Hin)WR@AE1agc zBSM)cH$UGVh$?kw3EC4S+no#yF132Mj}-y!cl#AB&x>ud?;l4uFGPku-a1P#Jv|-W z=sGk9(i1kYQKRh{O4S)wCBqD=3Pn*9Q&WEkwOb7Tk@w}{Sd_kjflM>WeK=#T{2A_9 z)^q31o${MK^i8hb^+HHL8#xB@-E{%kA3c9jWi^BJ4$)6JrwowV@s_GNlOZh))7_Ax zV|%mgIT{r`I`|T};uWuPplyDYkcOF9SsgA9(Zflkyq;1Ma6c-lDqZ1n7lepEZvsT= z!7T!j2^*%r|CE&18%i#2OR^P3H4SSQMGZ-6y;hVle89N&_!dXK$YWj-(Xb_U>6@dA zQD2c`jib!b)~=h2AQz&lsyc9@w*4iRA{JLwRppC5PZ=6vSH%B*5|fNirGf$>Z49&dgrea3l(sWMTMtkSpoc zlci&~s{}pE)$lv-f{#IgA^gd^K(mGPM0}p)52*zw|Bti>?!&*>*Ae3WC58VVr1s$2 z=1wW$0b@nVQECa2P9dl!5_#UkwU+5iQd(a6YXxX9+>j=~>c|07(V0wDk)Iiu6s6j?*#r-%6;K*iFI=um}Q z^X}%aU$(A5^aDZg{|ugwVqX-^1FEBJ4M?Owck8!(Y+RMj{SV-CTNyj;d5>DQEm5-a z*~Xl|{~6h)=H^24zM43T9|Zufd_)*&X=pM*99-)|ru3N7mCyVw$|vd{cTUDlsucVtH|}6wrPjAA*VvuoNC62Zz^ID`DhN^8L6-N<~G2 z6s5349};A|7_-Q9PsCWASj52ZHXr!a|R&ob+_e zkW{}PA;|RyGv8<`ta37jD^6Kwk!_BEuxL%|b-0(*f%I}CqUH&VylSc53)I(S3$l@p z4VAkLTjTxtZOjuy~s(1pbWvwEB;7Zt$%;{g4mOfaR?Q4^+*Lukaho|UDW8! zTulHs4{WN$g$>!vb#cTH{qPQz=~Wj1F!o$X*ND=xYBV^CNeudgE%hm%^Fv+D zf0L#9U+`RMqI!Q9)C`SfeE0y006HSRBHzAy*SEtT!Ac#2x_g)Ef!E0bBw1m(wNR^X zx#wAG2gTE7veK{zyuNW%%r|P^L6v{Hwi=7o+7|-9$w@;J#EC(DFo~$C)n@L>uc}Ua zWQywl=-iM88$XLYGh&zr#A$*dZ4&g^sE{-8^!if^iQfr zKkZ1zc&r`_2&1gKmHC8+FG!=#s!+wCw6(QCu_c6TP?`%Tgjb-bvgJ(F)s3XwwL7|N z4%n3|GXCmszr}{-!T4<;YY`*Z*`3c{8y-sx4~In$|CRCZQ8_Bm9{!O=x0Q%fPDAnt zeehs8`~|O6WZSN8*8gZp|L~EH$<5?%VhI`T0jY;X$hRZC+uJ z9z8|=1WBnv1be8tNG0b=ll4cJ*g@cZ02{TymmYtRZp1Ox*0$HM5|-&-CmMbtT^2gw zaI3b{s5LbMqok}nJuz`dH~DOwn4>PJWL0SWq`3j&Q^vf)VQ%g$KMC6+t*i4#R@KD^?hy7$xj-nG=O&je-2w#oZFtQsv^NFVNw-eR{Txj z`T4{zaUkQjidbFN-Sp)yMk6*5HLbirw@Y}is+s;5=#oi~@`d1po$8eKytK%j8d--G z>|on_tkiJSt=a`^aRGtWW~2558%{DpAfEf1MTeZ zm)=`w;-Uy_R1H9S5(UV&2T@_BtKa*Y+KaiVb`G6e2*3%9X|IV2Zve?y;2 z4c%%WA<4pfrBtO7fNFc{^(!sP3V>sB-hAZ%e1oE_22a=%jpOl`AtDXgqW)wyM*pbN z>Awld?@W3l6pY~2c8G~V(W*H-Vf!$$Tx@w^MSETNV0ge|^x#Gi3i(LgkM$IB-V^(R zw#%_sdv(bdIM#l=6`kR7JZb;kKFzG1Ku=~@FVEgN%tdl-;vl%z#L-mlU77Z_OoF?c zhccIQ&V=*yt}Dy$^7pFvBv7bBmjnw8lRK5g21$($sSenqf=Yp zWMpKG__}4N91$u{evaU+jlqG$1jq?k^=9jS|M&m|gM;~I>f0ILkRTylVAvWHn59+v zt20e;asp&-xwOlQGBYhfq8*gE(>KCIx5q#&;WUWvKn5FlW^t$GH*sYk^xnJQ7OHB| zpEnB%$eSOx9aa;v0G*jVJT^9#MZ$GU2?mtg2gSAq?q9+#DT3^*b{*0Pl$b)>d9>mD zSWs7Z{l#9gHwo=Eor7%PM>$5NXx$6*xUr_ zs)SC<<1;A3sUim-UVXrVjP`KYi*9aiR^=ON!Jq<>FCqCls6b3$?-U;LINX01ivtyo zwmu6#3Uw-6i$TaVEb4Kv$96Oq=RT2<>7HQdl$}*$P^L8{_CNiJ^*_n&)PRnJ?eVIr zo1LRuXX8r^SgRX@NNUxlMnG>{sKXf9EcBD)$Q1%; zxD>wN>_qdkdL~Ho5jh6UuUCKO_Y6;5h{v?4fznl|Z1(6rmEEMoh@TFZwgF{@{X=je z%Ymr(7=4yd^YgZV)jST} zs$|UX83zUj2RC}67DQf|&tbL7s7%Y9%!W3 z#a+3_Ym{+6Q2bGNt6Zjbc?@NEgWc*KrB9fygvrJP#P*Mmx{Wq%d@FswZ#UGK(t%6} zbH}1@ASmDhec$=5oYGz3)nkB78^E@=#zPWK;PMw9)ZJ}siNRuGPz6svSr~*3YKGtK zmUY~!4~T|aXb#(MD;5XY=Mza1p&-JwgABozW)aNftCLkA`$&Y9DXVY zab6z55$G&{;ArGuSOvX?1$7inqssmL2YXKv6(*N)xMfl^I5?^zzAmKoA7o`^|Ki~9DIMV)QRbcp z2XrPMx=mG8zX4(Gy3EOwCjs_+X~9=X|G6A0s_FMY*<3??y?^@5;$kKUI1Oq3bIDkv z92AWkTR_<&zCIk_m=x5Pgw~&XFaat!fqEo=<^kv`sBqggMW?=dX9NoEwrv{2E`h4V ztk%Q5wRT#rhp$#9nuT_OqcSkX1`#72o#D6WSl($+rU>EjpKZZ?qnapP+NnHf z*4f#qXg$R8g}GXTgfsXM(8dH*I8H>UH?s*N2ooklK&X8=7MFC(cd9jZb8BVX))fT2 zpjxs`YE{9R5;XJx{U=i`<3GnorSH~whioo3PV~RC-3kXrK9L1D#+R`bv4#R zpM$pt4wpHP*I~xD4{a+F1TJ!yVp3K41qE+2BTcVqmAtxiA)SF34h->HEGkaa{)%$b zL<2Is4%@#6Qt5_l7sw*L%_9A7R@QU#rsvu_mG2 zoRh&H+KU{Qv{&LxuFwvphZ9J%CtYC4TqEb>Ib9fJr}cd5NG%VvCvBD8pRD zHzz?C)36uXwXRpudJ~7h1hvYXE_g=NCQjs+Zm+D*s^+t*G&LKmC{f?}Ys@Vp5Q>;s zNv^Kv^2uM=Tsn*+2e57TXpu*y=?s?F4@5MAlBR!PBdS_m0?-Iyp3r^O#Df8#aE}|% z$RBS^jAvIaVA&QAY3X{2Z;^T~b_?xA?+T$zcyoqnZU#!bG`6rszZ><7l7l7sFN@*{ zhcHcl=N#}%%iJY=d)xNw<0RYBJ&|~rggUkG?OIUd6~5i$2%Yi&h<9dXZRpg8p9BR2 zf`Wr-z`x+zv5etXw_zBCG!-2kP*&L2F8j1^ip6 z%&KeaK(QSKiv%bd+w03UTI&5g4lJL37+fS@7I%#{Q0~zf2Qm$|wt!w7#0i+GnVJ3k ztk*Er5*AET8EQb12IBn3ckkFeL198~u8}f1{4i=S6VNWm*+Y$G z-07IE9Kd|ehSmiD?gPr`p&l?xMoD+^@ul)jZ3i}v>46G2F*{JJb@#S*j(%g|uzPGh zc3dWaW^(JBvSh0b=o~_Tx?$l4Uvj1rP!;*Zsxlrn1kR@Q((D9aCDGQP0EM*w=kLZO z=VEkfdAS=XECi02K{BpCYh=g&5)pdGQsn{-MiY8()nS9UqJYzC?3aF9W0k`iIx zzGx;7Kpx?-)+#ia)m!Fl2b@3Dva`AxgX>7CJcZ2*Y=orko7>}U@nUPs%@gQUL6Pzl z8r7UmHWHO4L}zDbm}e*8mXbR&DXS~dfN(|_;bLB$_}wPP663?lD-ScVwA9WuM2iT6 zpEN{Rf(rSRZuI=!Fz+iOZ7qzTXwVk0C3+uf6@Na_fFEm`A7dfa#LC-^P_e|ts7HPR zWO@-(_S`TTVGAnf9>ScTNn*{p!=ks>K)omZ)$TS`aLb?$JEc^o)2V?#S)*0OB5apO zs>PYYuc_{pI}SM;fp#1SZJ?GXHvi~!b9g)H)bTjLJ^QuJ9d%YYTg0M(Yf%>RcxGtr zibep!~7GPxVHjx0qHk&B`x4xwY1_v zsrk`PpfKxa(&Lk$fGd3CZ-lW)PikM{LS57a#c^Q1Bjr-!JXgMJn*VtBJ{+Q-L zN$RlGqJCS#VEO!7L!@0>$zT)9rAsm|)PdfbgYt{ToyvhisEcx1j)V4~=PpKGSs4wR(n23pq z{aJk}SiYy`_<2CVQO8Zm_n@>H%u(K8hc*N-L9n%8Z-GP$nk3C@PlFn085tRV>}QY$ z1x#g+4YUTF>e`n2^A!UWPQ>CsNC&b#jQIO{>{ya9 zoH=BAf61eA+k=XVic{^QFmTPJ{8)v^jQ80*%bgq-bbFC`_vNIJ=QwbPED|Y^%{|Ez}nwHb%EQhu{3wnbz~Oq zQqImk7-(u`rOppN;cna4FiNueXIETfP`7X2=8;0K%alDFONvWMd>@Xy$DjB0)}g+s zw_knC=Cg8tR=}7nOiXNwAD!n+*%HEnJjI|Cg@Br0hrX3n)}N(4jPdLFx!>C$VHoJV z&()!D;RQPN^XCp5#uz_iwC`*8Q!r#|Qj*lP=Vg6ROEH@S4*xp*8BqVVG&E)cJBdF6 zgpKJ!#JDULH~ie#5u)lVvPr3b62fYOH|ArCVY1X7k>3e=Y^yDhnStaO0NrhSF1 z1wIf4d~NUCW<8+bF8+E8E0g$@-t(YQJ;3JeQ8O{qSAV+}i_20&C_1 z*pDrw`5{$iaqmh*#Z2J%&go;vUh2+gMuR+5Wcj=SSesR!J_#;1l8e_i9bvnW-}7sI z-KzwnG^T)dgGT{v@;Dx6jy<05_@MsC#{2qIMCCzRD~qW5#%U|(H4QZK09^izRIeXH z^t)%#r|?*NjOYwhGqk0svlDiG3QZE$B{9Dd>|4;QsCU>Xgs5lG(_V||aVpm~0QHez z|IAP0_b-oXDmQWeqYjLI>Dlx+1-)KghwM@(2OBV3erzA~g%58qPoF+@-T9W7a0li* ztEkie{ri+KIa!~t_O&2$9$xW$wiVXy(MGq{-&0YL zmL?i(TO0p`nlo51IHM@jhUQ1mn?L%TTvbH z4YzraT<_`W5!0OoYK-e#`n$NkBfkfx2#OBE!os|~yp~+2#fFPUGIaLmG|&?%*vn)VhP zt`|5s;QAfsdsjeCCHC&p?P1+mT&csm=(|9EzZRKMSSa+==;Pb88_^NbNmW!%9Ikzjp1~l`Ep|dqsj$Wn<^Ec#dmt zd{8&Ji)k3Yt0v5zWC8~j8%=OiMp(&u$Hq{!Is9UKoEl|^0RMCy0TiR&4OiE8o&=F+gd*bU zH0ZSljU!|qL6u9!OOwZmDuda24+_kq1;3DzlWPMf0(@8cpxYPpu0$}ezXXkYJXJg~tZyvjDE>%Eiwq{JnW5 z;fQ=xT|%Y;%w%6DG6ps0Mi`Iznj9FxsouW=qD&C+-b=~Tkq@ff)O~&RL2}PblC6(b zt6#rY5UqnYr@*>wM=(2AU)0xlfueZ~6_!{X!BtW6qN0UB%T?{Xl{oxd6?+aQAhw@96bszrPUT*fn1S{K{!jnf&y0 zd8)b_3MX%LrCMQ$(5Z!bRa(8h3$u<0X>V!v=e?iZOAaviZN3sS3EtA!4D1wPxeVPl+o3GToXO!J$dH*c79du>nP6TKs zUKrzN|2^bT9Ic;iwRP~b&}^yYMIP)nWf%F`^Ya=DH`(8P;C(N~c;i{u?YW0O?#=|@EJ z=FZThm@+c)>u)4C^nlS*ppiOtyS=OXW|o6mTAlN!q$E*$ECBk2kLQVTSgXiO8@(lH zGgDFSTVY!iZ)0z1Z2WBWO8Cfw+`Rl1>Hf%htkV8Ip9dP;m6^dvCb#3+Fa5~jHg9SF zz|4auu_(Gnw~x&$@Wp^Kkcd8%V&Xw;DF zH0fafYbKQZH!tDCAgB>>%^;ixfe9e7;F2D_bqwwG$CZ9X0v~ncri|f`vpAx8c;5mj zJ*ct;T+jr3aYM9l`CTiPLrXrlTf=C;-+_07#(m5PxG)nF(|&le(3P8k9)-~rNlOUW z2{2F$4mbPlR^-^mv-oB_FStp`>a`WzYZ&kWI+EH`}-aPG)Lq z%Fhkp;}`ALR}fscgUscXD_8hmS0!c`{hXUSF$~3g9tW;BGAI(GqTYvSX=-L{^F+qS zM}&n@&jG4<8EE^e%5)_dhw80iHL#I4VZmA!?bA=-VXRc9h~p zyM?#iK(&qT;lX~N<#oOKKtUVuofS%K5ejKtKCC3KEABd9cll-hYBW+8ZpnW8xgbc< zY-lS@04?ICwb!Y;%qsW0LC3!?)oJcVB#7fj?~^hLFN5-^1s*Jj(PrhM9G6F=SZp$^ zbbf-24A)a((EY_dWDL?jrXHeINR{+^SE5n8RC@=jK=UfRr`&x5)bNo+Xp}$8r?kh~(0Id;I-{pEX6GXOES*K+(0!wk=LXMc)5m9~iph)<_6Q zNzEuch1S}9_I;2>gSt4m=r&*|?8ijicJiL6)2{yPQ+$r1q&}p`wR+cVVG2v^jE3g&c%B;id+$byS-jly#>nhft2+P1o2v#ygfju ziPMQ)E;ar7M#@;mnx+a)^H5Q68W^l$>@&Vb-OCl!=lb|sD4XgEr}`hm zi{wKiXYTOmRs?Eh5bC}Xq$V*3S`f9cN`4Y_!dD^`PU}>izB=A#zj3}I=aKreDnwfq z<6aZt$0`}3ndimY{hq49$dh(LV!uW-h+`M&gK}ExnRrfHAOA8V0mB?W8?4%;@3cp= zy$3q1)Q?~2d>r6$C&VV}Tu<_aHv}3f=cP~fD4gELrPrm9wx1b3aRAhke&YvQ!3{R5 z`d&=zxj6REr*H2WD~4iiv#L&!)7xB>jaBmEtEc~=J%Ez&vDDOq-ytu9pAB@o|BNPf z1r?QFkKF5FHNSzfxdf93sZV`4lZf8N!FQ$~3?3{uY|2cp%-_0%idS{Nl_tCWo^%3y zE7z0D?04T(-;t!ZMk+duWeIc&M-kn-YwV}r3=!nEtsk#gHkdk|1EHT*$*VH+R=6b( z6$cYRgqhg;-FES#JNdk>r_B1~yQtn_wC$$1ae?t8tyZ{AgrL^C35 zA5C5j=`@@%yMEU47!5huN?|rePSMDf$Mk*!^3-t&g(T!xN~O9&Qu$Qlls(=kR96V@ z{XKyXdhnXJ{pVrVk?Y|KmtZD302$pl9kzJO{l^g4!h}zuL$WOYV$#kBi3^~=F zXJ8yX=c8(SnjU0(WBiQD(%!>Y9ChJo;Ui2PAv|vf_;)yzkduC&XO9cMWbW6869pf? zNnx``=xkc(;7*t_$){Fs`UR29y{~AnlGlPvEluu32BXuh2&14Y4lWAf$wASx1=EG4 z-Ka06-cm$FbOj|NB!*^zw}Rp1yMC=4ycY~BE)QheSoIGf2l$*E2~ zh?+sekj3dDRyH(Xk%wxOu(C{X!5OZgMf#?3S+oY}rTux90E_w68twP|%uU@_5+pE= zi@v^N*MI23#ZPcyhhCm0sTmii(i=aE$*)4z>1}xz66*HaSri5cf93P9ey9gO(AyZk z7AbT%Dge$Ac$8?I%S_*zkx_eM*1p@%Ukiu@ix*UOUKXYyTwEhB%kxKHz0LlFa479c zczk8L%0!!+Sx~xKUa*b-J4V>Qg|z9`81oP9`K3v9UykS2w0s$fVlX8yuM;bF);QJ_ z!&5h;wCOIyx;rNao$N*@{)O+e)VfJ#@>W=GsVAf7&9)Fq$a|DOw3F#xEY#!})inrz zKb!h^X`nbQ z|NnvGcO`ot$;vo(*&{Q;p(7)!qGXeqEhLI#oQxciO=J}b$taP%vPmI(mYuTsJ&*1_ zpU?OD|Nhtif3Eww?kY9QB%@|270;y8y{_XT zj|C(q2UT`&7x+d7WAplO4fa1HuF};PCO%-wL`7>HIxVMeV?X@-Cu;KY>0d*Gqw}RQ zb01{p#vPsA%x?WCl36qS?FRlq_UA{t+G#5>hNre-0_rHsV}u+S^kp=|SDpku6m??R zkNesB9vyMrBE2HYfh+lC-!~H7J4;>Sa^`*<9#TnM=S3nj5ih_8ROHz%Og56ub_H11 z5l}Lv^tNhf)iizoYEOGb_m{3<;A8a3J7RT#o~{K+Td}53spE>mDO?-^S+KOkszbJ1 zrs5Im2%6NeFyizn`IA0-ERJsi575h~Ky{LxAmLH0>FoBXMt}}8icf^S;ZFYp`{#TS z4Xn$2fd_d@C_b$3=zYH+dh?5XL9`8*V%n!D2|9F#vC*t`<6o>-^%ha9$KSf&t|;5m z!Un!*$2R<+#v+6WNn$Ei$VJQ6e!WqsfeaPH7eU>9Oh9|fDe2x?3DYExtFQ?Z#c+7J zj~rAFQ;FLwfBksPsbtfY6c?X-)4;I8VNUzcjgs3YFk%b2OuQ#fBK7f(EfcNG5EiD5 zLQVH3(PRoLw`qPCIfxm&sPuU?D+?z~6`(uT*rV?Gizt|2L5^m;mEML*Zy z`U%7fub&uk#d-2t|8yKwSAU13s1urpOH1`+G zj`GCS?-^XCuhvdS;Qvl}9(C=Nu{t)IDYY+0lSG-zCyE@AIQl8&&sXCAkXJdD=yOm) z%}^$n7xLWo0<@l6+%bXPl>hUVHgdVe-n@>a6c)cr=1RvDf4n?m5j z(!`jqySYDc6hgGU;ORmBfBZUAUWc_c6Yet4evSAv}WJ>DeCt5>m=;zmDbty{1K2VY;)?>E1~HpdJv3o)OUm%VrX za?r68gwr!Ks*E<14cJ>Smx4-qfnRcX6*t+F^NTZ3dyP7kGUeA|9nZ+@z?ba0f2Q+T zs@{4kx8P&B@r93KsNA{6d(9#hor>R(_*cURZ_5*IB)=ykQ(N*B8!r4v_fAS{lp6D& z-NN#ngy4STi{<0Dj$cdkbF~4^lbs8|sFX7Mfk*z@S5l-!-KOz%_TIkdmmUMxP#*~} zXHlhcszuBXz8(WYss(wsX<{eY;kOP_HlGsSysf)Rtmp&9I72pOF{_qnp6{@#e-}PM&`%_YcZIMLHXgfNj%H$s z`^KTAP@z&vVUXhSyljL~Q6EW4 zR-c3Ix#Yql!*;anU}UJsi3e0@<@l3t+?)HxBod6(bk^i8ahRAKM`>TXrgr+c%EMf= z$rVgO)Lhy{HyEpwfba_V41N zc#k_jjUOB}m%8^=EcVhZb9QnYQiPpU5<;eIH#LsxFd^-&(~v6vgA4`*KW|$^wXTVY z_Ohl6vY#>I*mT{-(r`RG&5&ht6oobh3yDqEF-@%d?aAmTKn-x-X-G3kg8#A^^nSA@ zxYS-q*zx|)`B;>mw8c$}2zc>Dj-#%XJKox|cwF`Su~6kwa*ELI@_zH^*HkNdZe`*V z*!I9^u{QvtgJx|XCGHGC$j0(Fxz6Xc*lwtK+JAc|sm&#XQcfHg88Ox~v*4{m$G8HB zE=q;Nn;=w~_ebbiu6`Hm=?F$~N*3uNi28qE(c&(D&pM7;P}z(azK2#_&mis?IT;Ny zatc*fy!H7#G3{4bxBeF@q0h6ZcX}Wp?}kwev78eZlnk4p^7*`I@%zB5ml;|OM3ExE zV|h@EB}lKkCdq(sL<5Ia5CX_^o`=?UL)n-5mYDB`?Py>LXQ`{`#1b(>ZJNZ1|9u*H zg0x;K=AqxJMn)zxkzB+?6xs{Z+=Lh=Bs`s{m2Y<)2Zf@hc7NmcX|tQ~ZgT33mnC1! zqsA~IGb!3!9V@O;{g6uOD8TphQ9_+5b|aA~pS${2*hLz<^Z!|F+GJd2o${p8FhV==eejQ*}fPNQYl}hiLuBlZ!3FmPx*JV8y{$Msh~PYNlWzNG2eXNl$0XuH+q@x{YnopPY{0@CCTNg%P zv2Y;O1}@J(e+*+`f4|eK8^{~RrXDLE^lAuuu8gLtL38WFH8Vdz41CBLSMYv5hcMf` zumk5aB|iR=C7cxUi?LQM*rI(lBayX*3Hw6!qW}f}0Pv5DxOsl)?Kw4edX|yyIHAOJ06I%6StM-2nLpgu>OEIS?f6AS&CE5Ct+}zw0E=Y6=Z`aynxNymh zqkSfexBYn6R-pLE*Bbx=2svm?TnA2_q4}VAQ~yL!l@Ri;02x?11v3sE`4=Nd8c_>M z$Ew!b&Y?pgLitdL+HWcF9vT{VUhAe-+4W{;JwPu zbX>Irz)94K>rKqg{S5V+0jPg!c?|xU5D(05X;69&Mm4C2EJyg(yXh)z0`2rEuv!@b zFbeg$=D;@-ffc3=erPlv8O>=Yi^C&5b{F3Ogno9t!a(fq&gROME-1(|&!T)cm(L6V zXA>&C3Yd+xhTvh(|6M~IRK{t?XqqD;6fWgamDqGJSzMqEk|utQPLq3cx65SjWG3eP zIGJHhp)XPtD)X9?sJh*>Ji^M0ir+cT@Y0?SH2UBxxppS@?ipDRJ{+gHVUo5L>=@jh64`eLJ!|D&S?4okQwiV=wf#QvQ;8z_+gTg7vzG0IH8YzTrvPKW}8lwq^% zTY+j1!m!@^D26LdZN=_7bCOs@ZF~1}%0(!+??_(b-&1NK*;at+RBJ~^c5baUrk4wq zq@zi|5ol7J2H_RL6Df-eZhty9G&ynS1mVxGEZJHxf(C#*x?7Hu*6L0(YFxDVU5BKoAwuF4w#dL$l7W6R72>B%ZD_F9yf9PkOnT3{H_@?q z;mdwD$ASaRq~JCtN9mQYf)+`grdUN$Q8d-Jt=KtQ12T?Q8UJlYtJnh@t-8F@sC4zG*+w%wFv>gW`Bs0kV#=(um-PjjTG zdLP!My@6Yo_$m5nbZ32bj;ZCTV5Kh_dBhs56uKe|=@Ra@pYi{@Km^Qg!<(!=>)Utk z{a%{my^+=YF=N32_s^-88)c5_g2h~x#5=YC&+!|ozJE{Q@$;q9<%+8(o{u5n-wrl=gJ)hqllP1 zj+01oRfn8ctulOa?=-82$Qfx0uMky;S#(FUA?PB17HaxbYqsKsY%%XhZ8!f>nFh@74$Nqbn-35_wvCbQ2n{(Ihr=cQ?Od@hZ~ z6G@i_nNNIe|M>8P-9P2R8&BT1-xQ}UuDj`?E^kRP^&(Y`*9{|gD$TN6ou0}PpD>+I z4@U_(;4N3+H9S52xoojwNX;If2x^*HZ_ z;K)^)KSB`8P~T|gWJ|sE$OnrmB*$NbEX{En=4Yx2bttsu+Z*9UvYn+Ly+~|tH%1xgXwtd;kM^wN~5H@tZV!v!s;o+=P@Jn8$=f-cvahl z=22*8nb!z)4{nbbk0+UkiKGSRxPOAjpc9}XLF!~;@m{*D`R@~``^gbYKo#os8F6Q+ zq7gYHl{#RLcf+!{(y(V4H>Ph=9mbv_j9}2(O=H{7+=yJ~J4sDn9ArjzAEQjmTUY2= z`l5|hSv5wQrmf1f8!Hm*IZ696ETWs;BbYEc!`*~exmq(%Mvs}ED_ zYCwi_($&k`q*0bpcA{)6fL?>1{M#}_#^m*RE?#c(9Hi{P6&zhLBI6o*o~lx$>%t}r zcbiXt^axhOx<601pEN^A*6HP=?yfYiQYohq)CP}?7`z)JH^EU_%o@SPiV;OQilzCF zh7+=41}Z`TWu>^*fY7BqG70chG7*fX#fM#w&Y!QH+8w^kOf4Ami2B&~n39v4#x|#t zFS-zwA-(7~FoZ>N+A%tIX^p0dm`mao_UZ#aW^?e}0lBt@>^(T75 z``-02=XfbC-a1{QbVsJrmhgR|Y$R&jQBnf34(!3RV_88>yEXmnqHpI6C3OYE_(&WG zHE1o=gox9!f1^JU=MPlRHfU>CnlWKhq+J6maeo7P59H~rUoyOIU7usvoX)*3ZGPO| zE`6|`f_NbvDbOOou@t;P-)o3g=Ah@CuKLgA6w>jQOq;19UKH;LbDxZyMJe;^w}nTr z$i#d)9sHknc_8EQmhj&^_CHP=pI#otcK8!<^rXR+9VPN`qaOd0`GW5?yn0pF+}!K8 zgOSnC+$+g#NiO%h4OL$H@Es z`_V!O-V*obD8{~A4h4EZV_R#q0?>3mVj@=C#NwZNhLJ_M(*K0MdU|@ATrH(9^xH0L zJb7#dzyB253h_2{Q4OZ|}={QizqCJ?va=+cWtxztGq8xJ!rR;NJWtMaU(t=cE!hDzyYBDmV?ntj3jG@}R^zRL<=o z?6mV)L)@F$LZSfgpZBS}0_@N`H?pq3Hi1E)ELVvCAj5<>To}w(y1y?_yXxlrCtcO% zQdB(5NxoC}!1(@DfrW#@Uy5hDs-Zm76cxoc8!59UAyFfi)ChCkF^6zdzKbOH>xcA%d_nUN8zme4syEWCmCzIIM zwuaWT2gmMGz4l+UWaGTSrJ{}b;Q95kO>JO71@G=#>x8JH6;0cIY3qyf30SDZkjv0u zT9&Bx{-LVtT9Ui#BqUV1o00A^p{q*6skZTBP#6=svH0}~A&o2y zU)5quffO0yRbvj$a;M>@e@NRXA&xd8H>~n@1yT?gzAsiLPf!f-hdG5rnJciEYnPvGtJE@nIpy~mS+RH28czjkmDnX)!WQ~+sg{5kpyzSiGZ+qk zF*#`=i~UUEqf)A}tO?E1VvY^y#BTRoVx!n+pk_)DbAWM>gs8XL4^EqIj)NGUXlQXee944O zIYd0_;JOj1XmsI2;8Vbz(citrH*}c`?Sh1vjErMYO0&L`PY}k4{eJN%UsHBey~;#g zi}QQiWFlgMUk10?PmFppB4^lk;sc_7HS_uprloxxbidZLAF$qfrMJHPj?FC(Ua|W_ z{lR4P4Y|ypUtej&iU_3DdC;qmVYvp8W4>Ft1tMk*wnLkH^QW_&V=wVPc*CB^T^iJb z7S$%WUF`T(OjhMg^t5&sbj?oM?%k=2cE>M9h|GLai(*l) z-R&Rb__+x=X#O`v&+q*JwaTuhB&o?J0lJ9P&W0cSXU|Ti<+jSwRFavQBklb!IdrX0-9apN`^e;<3WD&5ShkrvbL@mPo~&T3-LnfUmARl(gH^hxZ=E ze|a4*JmB}R3$^@d(MWtsUtvPe6-geRD{?`lQvB^(^jT#BzM!^FG1tVaF-Cba54Xau z5?{zT7Q6THnuvlluE>GhX_4@U{I7m}E5U&oR2~=Y5Twdk5yY?8)$ru-sh?B>G3Q;z z9*~-oCrESRuj$1S3Je==`D`&lomsvGYLidMD=^v^c2hr!#&2iOo$GT&E-JUkjv#53 z>iIYCrM~3n=ePS9Xqd~3;N#=Vb3X|oKnt^Tm6Avu$jBUY9qx6FZ8b?zvkm%hYe@c1 z-Ts~*Ubxdu1{lI;dpH_^BRsaJF*PA)#dKIJCl^ujl*D~HITp(hJ+HjVj%>VzQN^czvNUb2 zQ2Dvw7w-=swXmLX8^W7P^ykaZt%-bprn{CZf^a0J^6&hC0_b8~v|RlwVh0$jda7z@ zUmJLmAS-JgHTmPk1ZuvktqqxxU^J1aGe1Z9NYt1i{`57CF>G|D%16nyD*0!b;}@i^ zQ+Od!^gp|TtDc<=h-Mm*U7wD5Hbp4VQX?|aYJBCv?)1;s>=0Vi-dPI=SkwmE<*1v_P7udFBWU$WC6>xx`{lwdS;z~C z&ob&;^(~5C0!7-L$~fhXRoh49m^>>G3MHQ=5USea4k1rCpYYIzVo|qRyA5t*wha%C40qTd!AJOx?@)&vR zf)3Tz)Ct130DYI&*DN5cTM74>pe#G4%~endR6Qmid}&NI87RX;aTM;zhUDpM^{G~= z=kSHBJY-a~zv&s{pNUrjxE;I-R26>zI8>S21aflC>CTJVo!`If;7W>4JAdBu*=xKgT{g4BW> z3iQ-4W>26m@cGj1?mjc#z!F9yibJxPSM111lPUw+T91*{eIkX!^&*$5!z(pPT?5xm zJFfhmfzD5^so%r2fZg(dDPexGnM^sFOkSHJ_fN5{n1`zi?-vKMQNf?m_M4fE#C?1m z5Vu7W_%*G3)u8cqRQBYJ2}IOt;RCW|-j7^byfIa2XeZt}3VVxjbtht0j;vtOSd@A* zwp)dglH((k;m9uZBQo}gQt56wJwRlbu_cOaF|`%w$))>wWf2~>G(=dP{j<>=cc8+A zU8q<%tr-4|D7mxS9gy$qKiyRboA%TFCUt!e_QwuWYkv7rf)_jv*lS`|g1%aMhnr=w z)cdt}#17lB1sz&+NPOEvX5de9F|8*N(BYZT&5D1N zHKHZ6dfBzmy5`6lWhnn$FEFH$qtWY8kJWzDboSJIOA?7*kQx2U@)CbSe6FNT@P4f? z{eZ4-?P1Mcc*8B5!u{U((Yp})-`~9%@J>c?xn=^U=SCP6Y__gFA>ln9OIGVjy+bp2 z^H%cv*($L8W`=npb#`k17Lh%WR?kufu?;u7bPy7;haZ*t->#6$2splUgOuG?{X+`z zpng5pB{$S?g8SJ!qOLCG!<@+EirMZfdQ^RVcaa3HmU;9^todLv(T_V2q4T*aQcDs$jrX)?Jm_*D4A&dv`8^g=;|EWTkPM80D=IwF&O zvn=|lktLbQ%g9w)r;uP`9M2>^2nJJ}m1jIe#x6oI;WkPZj`#@Q`!PfgyZi0@7hpSS z%ni*~zpVr})DoOZ9N@!G;n}Ap#UWQ%GxuIn9zB^OK6wA9P?O)Rcsd7dj1gm7Dt1D! zC5eMHtMG%j5B69_2s-57y@e7XCJui|So?osQ{|;QnQUxO9rGjd+fqE(&u^z6q(+)1 zZZ=aL)Q?>cjWHg0byKQI{2W5^TGx9eVdgcP+cj$HG%sJfb&tn(dJlhtf_-^DXFWcdlQ0L0+CN_n2+_3Y9-{Gbog9$j+lgIW# z=q?>Bh8Q+Tq@s7M@U`xv3d%<-uH(Oq?;f?sU2?TZHJS3W%gxqL;q&FJk7nWg)Qz*| zHTz4>&Q)too2iex0uFw5$^Rq=+7((Q5%C|Fy^a#s>x6$o^vUN`EYj2%<@*`rY?&SO(A@;Y5H%nxfcYJ@7W^$&}GJa7kTg-(wVJ*T9kjd?sGNK2mD;) zIyb|$+2cyV)d)12C_@vSLL0N14!LtLu{^X;prHJ60#!ktvr6wMNEez9tcK88ey9r7 z&|sKRLH})!$7dt3mTKK>x{?y?Q52QX!T9j*wgFCdozKX}BVwDGKd{u9JjzP8XzGCq;2 zE3S6b5X6_~)Pv6TVLg5*GHl}JQPuk`U2Z?63Od{8HXf#z%W5sE>4km!XwgkP8|V^& z!k1-vq-lENmF3Yzr%I2#eaKj=Rir!u>U_>(A>))0YPOgIMsXVm==d6C0Y{)R5G~&* zQ07jiZ3WN>44LY6_M<)}#J>2J&?@eE&B3=P#R#L650lmD2j6GuslI)loVXXT-d^G~ z(-VAAC7KM8PV4Tf#2c9WjAEY3egx#ISP1UuP{$u*p!T(J4piYdkEzW-0CjzGCL$vg z_lnl+t%ztXMEWHA|E<;X z^@_TTUt}s?M>k_J(ln3mW3(+~m92=dG7^(RNXRrOw7Du`*n{dun6xo|i2#m9d0vDm zcOAp;pwsj}1;JmL9lys>Nt3R`M*r|T(40@@kk7gi<`^C$X5B_-AJ%YEj_=LoN_|a~ z6R+F)bZfWz!vQi`8YVjK3=iU)$1{}q9$e-&kWJKRz52au83c)9Ku}T}V=ke?MR#t3 z$#&2MIXxxf4P}tU3k@O`D%9r~=$Q(F)Xe4IcuO92weVO5?JN_``~uq~<>_4KJCVnK z^&npmOG=o>d49ZO2L`{QU$bP-Z(+#HGX5^|D83+r?p!+AA zQCcdoNLOEgFHsQh<5N*5Vr~P|`@aUpQAOw1-V=oeUL~R7%cCwU%i#3@BJE3m>?7Wk zOFUR=jylD1p_eXd((>vApPc`q$AwKjrSPgR0Xk=uxol4ozeG9)ko5~iwi7rGmR zjCD|r>R+A{tlQ;L=DWN;eL6>^SdI7QVTcvW-?G*`im7FRN2Pxr^UCYRS>@@IB>V>Q zS0(;hEJ)9{<#0OvL8v$YQ0h^mIR0J`q)XiFwEz8>zfJ5v8xrpaj$WCFzs+cHt4Foe zqZ$VOf#MH-d<{gqG(MZ(?m~^G3M!8m>^_nf)Z2HQ__ri`^f6G#e3_S*cbV$i#qgpV zKQDrV#&3RnY(lKddiC@Pq?X|;A5J)fJy^%q;bNPEr%N|~s~(j_E3=?dVFu&jaJ~hE zg#%#be!dm2z{P)o$T?}U0QhrG(b09cuz=D8=r!2WYhyhB^I@7lKDd26{M4CN!hbm# zh)IONkG#u+r1@!b5T6sI=#u7jn7joJ@U`2Ai{FCEt-ew=uLXEI4y)nIc<|rv+usUC z^O?$LKj-@g-PryhCH|tn$u2ErMcYj;qaVT=e3u!RFrn}lDhlVkx7Y$iA7O6yg=Z~Z zT8H_3#99FyNr&+Cq=qdh&0TwK(qVcF6c|j4%xVvl@ns@~BltJK=}QfMDTlJnS|F3@ zmu?TffY#lo6qzRzi!;C&5T|Q&m;`eTZ~J0)25B@%?lK%||3Tmze8)2m$a>m;;nhxg zaQ64|xAq*6^@0v~;pdZ3XC(BIXhu0+cT)oxjHTR^gBg;-Ku#QBQNm}rPKUgKno1*Q zlfz%Qdkl7C&s&zq&1#eJcX%%h6qx#N&wwBz7s3wv2Ss~!=C!^-rkK$bobsMV{aJLD zo5%8qO!bXQw`^XoPM};$k@Y#7a`q0u6vVw(&HOvzDJ0ZE0HJ98%)lJU;VuW1KBJ$+ z?(cYTv-!1XvyEAT_t-UBsdGS8JX zfy-uwUXbenZC>U5xK)PVpyOJC7itWGER5^V_=sK}X3o$2pU>=P`TSo?ulWfn)n|l1 zhRU_aCea!BC*f?QEeRLyl`VjwJ*KZ&$6cfAV55|^jBwQWoGE+0cNMdK$s48(9&$oMW(fL{8^<>nZzYfKR0 zLg#CH2!`*MPI~~k2wBW{?9^Gh3a_Obt2CEnp_;s3rB3+XEfzoiBJ$-rF?wT{dW}KY z7kaOusE!ipMgfhuLhf|GbrRY7BA%KvAmVW6(Eyyo8P44LlmJD9`kzsP*u@Aa^KrK?sP3VKXvU??=gA?qlB=L$Qur~9|f9jN!|Q3e)*1sIx++ojQxjx=C}*>n%XIz z%v2+5+R(~|2L_8L&lOo3x@%^6=<1G6ZsqQb_$J^Zr0bHcUF44qXt>4w#}yCv-u!dL zZ{WevL{AKs3nAkPoJ?OO$t)%GG7VTIMxgq`0y|h%izdw+_Vgzfe+x+)JPGT%39CY@J!RH=K`7C@oWTkch->Qw>h(4MlnfV zwj;kHLSugE3j~*ST6N2VuhXrbSOEz+{$qa$$A`LLIpVm*AGs@G>QFinMORCs;GBs8+Onf6sNaYj{GP}Qg=Z^gDT}htf@h#VXCCT5HMNv*-)}x(; zBjs5>danD((cKLa!g%d2?%kt{HD;g#vYf=AN>1>~uBavOK`RlLB(++DK^d5wGBPi6l)>f1$Bxvrv zoC`C|T=zV$de*f~?eeV$RL9h<6NoN0l#eoT^1Sd{(e#j~>3zUyeVLs~PbVlh_O%P+ zb!)P<2+@)-n1&RwP4(+PR>O}# zoiuwiD=c^YwK0)M_Ooeq2Vyxv($UQp_ii znPwUB!QwN_+HNY|K?>d?)cgL*pM2tXh2l-W1`E%n$dzHUf=)^7eWAY{2Hd%JLM2%C zCq!!|BR+FzujTf(vgi<;E_uS|S_2xw@fv|vgeHQ5e{!jjr|$F1i0xMPtZ_{LN+28m z*2<8Gn(<`>k!Ijbl6(;lULvBG#e}-SgZAJNUO@}8n2PAoDKqVJPb?DV>J}eYEc2w2 z*&K`lB{Qx-O5C{iNbgF9!^)ORlU@|5J!wWycr$aoD#7Yzw1GN;qz@x0CXix&cSEEc zPkB!JBg`jS@Qo3SeAMs7yY^j}XA*d7-lLOry?yUPF}5iDGLI2v)9vIQ4^WhTdMCo{ z9V0XxUZfTuS4@EG>Jpl$DdXofPg-rJrmrYzzmb5~rB< zSI^^8Jf-ZAagC82PQd+&pV7X7fPvBhfDzU;RJdKedMXHd<8w1J!Pup5?>C{>X5Iv| zwi8hDm;i>sF#N^B?M}Chx!EBFyJTmtA=K-^nS&hnAJX4Hx5eYMb;wCTqd>S5!ZVDT zr-9bHF(kwj?7#5@G*FvKXMwDC=$)qnC{z-;Zm-XP(|29nU8ykzQqNvg0%3{Af?kw9 z8))Ci7GfQT%OVkz_DLZ6vaZ%?rKF{yK}HlWU{sgLGC^wT9P#>Kp^vu-+&a^B8|ch7 zJ^0zm2StunWGYltgm0EbOc_7jbmMP=X3iX`srwP}FWd<%R_}iCWA^pe1Hs3hI3f?X zniZLtnD{s&Wa`!+jKQ5{80_z-Rsv{rXDtK^u60)w_YF)im|b=u_8IXIx4?c z#;$zB%YP4}Vi>ut$Q0UzxZ+OX_1z(jm}Ixc?A2Zex+qCu&VyW0AOW*FO{>k5hn)rZbg0M`B-T%4J^}=n0)^uC-dwpBv%4oG^1T^IE z%ydzrpeiX(#uEWW1|jWcX@I4k00xN|R*MBQqYT^6k0uJ z{VMg$hrV8vVSZf`izY3$Ya~(&!_TnxhJlq;dltJYF*Yq7Z{ec>$O5Zm^!Iw-scj$? z^gFLDZ{9;OSOCXXU)2$*1O^CB74XosgqD93K08WIk>yEaVO-g_DWX5$iVo-+Kd3RWT+&<^co$8W^Kexl0d!P6 zFkFVK{4oT zo+za$26QG|uG<3mUqsK?*p>Bh;tE9P+|HCxJBg;FdJ-D0n~@K1bm7~96ObTLK5TQc zQ|?lUyXm#EV2Ox`okSs>!EYN^cMQULX`5lc9ZMJC$=${Cb)Lgf}NA` zrf46?>VX0U$S0k7v3J)-$%W#KmGHJqfkTCJ!lBNM!RCb@B+?wgc*}-me7mSNCg)QX zo`sz7|_+&33+ zxNGX^`D>(-%}dRaS(5NcqscEopzrl15wCR2>$RTJJ2H|-iWhirN{DU%+o@(SFk4PL zdq5MTDUd3+se-8V+ZGFIwP2RKvoL5HkXp!7=axW@*ASXEh0|wUsd< z!(s8;`}jw-E8juqBSqiTG<|lz+Z4(TpXDD#E3sjlErTZtx84<7uf{AXS9}0yuXv6e zo?l*_F#^k;@)M?|rAC=gVuD8Wz;sUpp=)osru@^e*?FPn-fqboBI1uDz(RGwO9_OeZbVF1IJRJQ+e6eyC#r z4BE`orhVeJZ*G(w7VnaXWw4bTnr7-z-FLT^x0K(EfP=;hlHfOaGI5Re&-Q?IX33v& z-mSD$O`4z0mPmraRL>P-jo*7klCZbB@s6rAFJ!wFE> zc)cGRY-O&Ia;~<<^)SlndGr@L3j~K|Q5N^Y$%ZJ{SxoeuyjL_Q{F}7r(Vuh>6U=j4 zK(w&+uIj#TQdrky9NukbP3X4oGgG`~8mc$`BQSOxlp*Lt3NDh$xK0w>%xs#1sKDcx z7rFVRs}`4fQPP5Um@ZeEiMFsU^U^Atb+iM71GJ<@YXZ7hxs7={cFoPnm;-=s;-Tp; zteCOiY!ceZNwArpy`axL11}T=<|2*9&HPg9XpOO;>~+T8rZK*)g@=N*b&DDw*zkdg z>1{;D>D31ewEuv{2$K{-z07C^sLLcVR~mVa`=iZn>eCMvHS!yAy~RE_TkDqWZB%&W zCHklTmS}_-%@~>p>v>}FuucEELLX{Ntx1w5?XtKoV&Y)xr!B)Oe>bXPTw;+Iq$cpU z4`^+zB($}wC7L-PG7+?}=9mcr&f|sNT8w8mIq$2&1=A^qX>ru)xkVI>y1c z%@Q8%`H(|JW$FpbXU?|v6oKi==kJWPw8M}ivlK`jNmp34(njHF`+%u7zMyDESR733 z%s9|I81}X6oo-5LdN(#*QZRL^)V2rg5M$7X2ADDb^7ZTXcKI+0zmbc1g5Qb&;Xh}e zfh9E-s9=GIqTK>P=5NkSSg7MH)4-!RSlu{%@$HpB!JCZkoricQS+% zrgu=(QSQocs%54C0=g#|5?}CmM!GR8CnvZZ$sE)r@htt*~)H*kNgLJ{8D4x7WE(w z2|Q&SMy>z+$vcd4fd&c$)j#Pz033zz%h3Yh!|0Y$DIAsadsBldMke$RB-dlVLSe(yDM03UFTr_?pxJ@E8uht?F>agxcP2e6}1Fo^Y1mPZhK7#j$ePgJd zdxV>ZznC0uUw-{LSYp+R@%P0ugX~`n!!L~#H#grMyWf01PaR?Yc(Xt^0wj=H^(_xX z>>DVQfKu7p5@27J-9}$BB~27l|J@{ik`>KmrK;0+M`dDCcS}kHv&^mfE0z>V$;iGs zrielxBF^!_qlpP~R!u~2eXA>Vy0tq%5ZV7^HK6*#2(bbO0=6wa{O{_pK1=jOGNGA9 zOvl8eQ-aziV3lC^f8K0m?0?#1}n##eSZ{)9hA+0~yA_A5$^_Ww1}I?|AZ55wE{|0Xw9-d0xQkI5-0f*Dr< zHVQ=E^7nUQ*MNu;)b`FC_#MSDf3klx~zhL*0#1A6)xsJKHF4^dmp_4 zUj<;zwG?1Zs%r*UN-u0;Y;63A=1$xbkQ$IF!x~rnZG-#9AAQZjZ3sfNlFy3*pRam~ zZXhH&__QZTQ``;&sH8qhOG({5$b+fV>|MqGq=OcCo=`i-9hq$g78ddapS>;7;K}X` z6|3+BalFR68b?1x6G*^rPcT zRy_o2WL|`N!b94>3yAg|v3&a>Z`$koB~{OX)5*m8;*$x9?CNYBH%0E)+aqs6+Vm)| zwtY)D{}yB^pAHz_tMNpE5Gq>@f!v$r(GST|ft;4fXg5WH(FyAtkK6I8Nk?bwJdWn~ z76YsH{>2b2%r60Q!!NYCkF0<%1oFcqCZ6Pm*HA|R(Fk|y?d@%W0h0f2lo(2TRekrq zFH~ur%sgIzl@AOI^hmeZjT-KMT3FS@K+*IZWQ zMDU7)=Njc;c?e#D{?94fzQ*;x+qjy;QD4tUm+|kDhcd{2Z}~_j_20w!|9|P@|LsJ! zT<=FQvA!Lt=~;c5aRIO6a&)zRa$3`MQ86C^DsOlcitSYb7Z_Bv2Zx85Z9YQe1I;K2 zF)>=3Gy`$wKUN~f4$*3K_X&@l(FjBD=WpLW05A>cv7;Y?91?JAcjzEq~mnN`L(LaiS>&dc2lRfW5ZOR%C$iLl1Oi%X&{&)WT zd2p{uQ!JIPQ-nm88GQWw9I52wo`A`7jXF{} z`eVCWPD>0_KgZIU=5-d~<8Z+j9Kb_>)O1nai~nFSLV@lJ3`?^ zE@c7|%T)@uRLIDSBoL{^B&&Mob2enc;mErdOxUC5(3e!D;d?((g+8n=6qPYl_9Z8BZRAo%S!YU zx2B- zhpsLpDbYOP_MQ0Oxe70zCnd+}M*HLo`K4dxHqBjfK}mB??cI-}&@@nZsykdu%@e2- z=3^Jl(N|LNCV|+Eqd+amw~@Nhe&VVft-fV*q7BPjihI=af>^9T@Gn%R2#)VjCf_6E z&)8W?#m&mD3zd1NDjV;VrV5G8J{Q5oImASI3(M_QKk@o7k)1ncl)6rR;#c~KONXC% zZ@lT;8z*{dks8-0_sxZ?a&NsooBvTA3O+8AkCb{sMR0mV?&qXJUyEQF-@`;3srxsa zgn%x;Pv6nqP+w7J4ub{Kl$`A)jfW9#Cnokep7E63rQ@Mh)umI_wQ8fL#u@58GQ-*V zggd@l$mv^3K1F-pC@wZ}@8Ts^A1f)oTZ!gKb7Z(t7gSlbq`qw&?5=2B(oI0aU(>1f zIK2MQmG%eP zj_hOuz15k|fgh28!0UM_UXQ}xlctp+KPMU9qGhP|sG+t}JQv8z-SHqQFUFn4+lu$$AKriY0A1<5wdMfe3;t@2vT3TK-bg26 z^&Zy{{*cuMQ#KiM-vSfl?myKryGB-)lQc~iB|RCYxTh<_>AHed zikzr@etp?3fRXLqqUZn-;p#=XU%%`I&<+5q^%0w^gso`qH=B6-PQ^sXA^w%V^CI2; zgrkAN{k6u^gGk3q?X*4bt|zp7JnXymx|t_;^lDX@cKHmF{eW#>T$4-FO8p!uVr}}@ zr?-iv5^pNS@7JdL%rY*(LsK%ByDGlq*fcvg9e3Tte5zRdJx%Zn%?dHGONlM2r*B!2 z<(^fy>C9iw2xT3zh6i#+#=5$MLn$#S|E|D7>%RVCuM@L#p^(rS`YA)}H4~TGlTJ~e zClV(2i+(M@+xn#W2kY1Rp_+n>4^Xn+jj}e;GuYNx}983yDE=$elNFa2x6T&c>VMyjLf;hs?PUm=gufg_Sz473bzrqe!Dw%!A0d(%KhJ8 ziSCQcUd3I!U=w++o~qx)=`B@3f5?v$L6=@<#X0i0ZB2wyk&i$0QcDbTzLf54F_X!c zbQn_XsdoO0Sx`xd*BA59NU2e2O{=sOHi;COQ#>V(DCs&|n*{U;nw49lw`b|XJ&-@Z zAfS3Q6VOt1wsU92&OKRcdL-2T6J)cO-#N;-PrYF>FBCWJ`2JW7r~Bmfj}n>UgtW7_ zW$6*F#C=->Vm}Pt=*yUv+0G?2-EBCAct_wDYpPj8T_LdFm#tMuG?jo}SjbHi+7K7? zS!fV=bYDjlSFOcO*4|z1iw~+|&-8 zs3VWqoqwpx_3yKuu>(s(23&wBeD5_g7RF0JDjy1W_)_eJ8;VP*KmX!^~b z4Cvq=rCpM~fEaTa-m;=}6Ns6IheL{@+BWun8Qa)D&7Nx)nz-%O_^g4H>&1HXF_-^` zwX=?j@@xA&B`q+FARPk?AxL*9Foa5@gi5EEyVl(e*TgK#$f z;(6Y))>-dAXO?TZ77Tmtd+xojeeEkg-?^r~#(j7G2qe!=q3&I>ueOv+5t+o+lg2O^ zir#ry=6F44U1b(!w%~^QB_I~? zGCTFndcJMcyS779qG`3Ze$kbbx8m)hhsVJlHwdEzsU$|9Et1{Fo&>gl`E@Ev3HCo_H)mw1fJ2fTBv z3p&DgwfP~gZSw+cBDcyKvpL~1XC{5YBP!y`yy2=+^u{y#nK=$x2Tyq*w9a=W%+{yP z`f=b3qa#mXPy*Vk=G>O66lp9=0jN>q_cMmO!FzX`A12~%W1lP*w-8yuWu|Pu{sOI$ zzdnECB1e=FB`C_#g%f3%!))nW7s)8kN)EajZyxTgWOY9tw||XuGt5_-Pr|jPvM@H7 z!;|f=842e1tI~nWEkm$XURc1=ysl1>*v)dhC}8v74`1zZUuhJ=yq+7{HY7-WPX9i8 znmU_LUqvi+@*cmDh4%aDIe3h~Y8Q8skR-YTF!F+2Tlt7(ee7JVHnTHai8*9h%?Ji| z=Gq6FN9ZqBAW{sQCFfgTi}sXlE7ufNB@bhCcZbkuuvrQ0!;LN0E#|=PZK3h~LsD$F$C+4^BBAK;h0W&z2f1B!YXvn7EcA)<>8vIEVgtD? z*3bAMFXs6rOd!xoFB&L{M>m(y0#w4G#bN$Fc03KPYToxNXbUyJ!+xt-4Ts#{m zA=tuE!2zR#1aD@Z{a4coLBkF~8{)!H_rc3h;N}dV0Lu#N0tUbSZg2@sC#{O~46F*? zA`{-woLKS}l~BR=HnZ#`Ui~gBx_5rrd!}{EOcFJuL=ap|v61@r2_CBUc+0)Q38$y* zxnhmSPp4GFh(dN~{wP~lfCfK=pG< zn9sPzQrf3^A;--;m{i?z#tFzOT>X4hg)jB`=Vl)M1jc*uk22*`BcBGpJz)|qug8eE zE6L+pqrDVZ?@@$}y<;zK1S8N>zXflmCgQcLdOlzgS)~pyoiI;!qjJYypiMuYUa>BK zr<5&|VRah9?$-uY@Mcru;peUBR{|e-APqDW3aV7!h!|Mj3ElY-8sGCtbZ3UK zA}2p~tL;-h-=rryMV{cOfl_O3#VeLiPf8|BT}a8Q^MnL(cMk~3L~DBSKMX#h)n|ea zJG%zbmbf2!r}Cm4+;`t-9P;VSsF5xDR)ZBAiy0g(1`lTn+X88I#h8@GZIku^^;#O4$^f2tSM*caZSHFBu?^b#j9Qtnm-MI_p4ru7jBk0n)|=>eTl^Ik zf4xu=+Hg4jriAL3R3K%$K>I*Y&{q4dYtwGEFB_+h6`e-%hEl$1^N`NL#=$ts8 zd5e_rJnrEqcv!$x=3(_Tv~KB>j*=-z5FRJP*1tsPqsq}SNhU8d5}a7RhR=W8l(?{Bb5(N?%ktXoYd z`wu4Aeq_J0OiZ%XZ3vmht@QHZgS>+#7d+&wTBFZQ((6u20lQI3dC$#+PU-<~9{yu| zpM>H^|2XOpi}2>|q3G!3{tk~MF8OPh{@%$YF*@X`e*g3K=h9;3S`ktHxj+dAjCd38 zK^yPKbb~40%$p}_6|PhS-0@WlA$KCg*4Z3jPm71*)1Eat$&z0=7JXIgt%s8s zid3R=KA2Tb$0gqubSxP$U+kgGt8cH58u(i`{SrzpLd>yD$8=Y za6q`&b$@^MbI!_V2b;#yhQTE{gX6M(6uf>BAZ3`i>nIUnU~qTteOyzS za=hy29Ro55y1V?tYe#EuR1Q4~mjM&8(*{3j)ClHhT7McRRD6U}$@ao`cJDKvW$vmk zhVx)lyDh%#d+Mr|5G4dgY$G4D$2t=#4mVgbuj5IjM6A+y+k|N0zD=BT=4zm^7S&cs z%@Y@5Cx@isN1`sGV|3|k-~ySi&wiPfzhKXdAP&J5`4bCTGUn5kV~EeU=#(|DpT&0F z5t-6bw+Wq2-=3075B}48k(Aq36Q?2=Nro*1Yv}N1^~@>t85#dZd}(Z-M_eLjt&YBs zCp`*uSi`OJ%`d(C5ZL&N_H_A2r~a$awvi^VK=oCyi?s;016ncu2ChRoB}(1{|JX9U z2Wh48qs;!nd2QqUSm&+oznmCwJS!gysl=tXAyAU|>u~zYr!(dH>X$~jYlD!}o@*;w zL2rZojN193NdD`x2>vVq3#J z4l##e-WUwh{pJ2X%mk*A@o}n2nziFe{D4!o)B>{xb54%3_FI3vr;l^_HwtWMjfn#v z-a8nS1=m%;8VKC0Kj%zf_M*oVBkK>Fp?02!{&o+Ed&H=eTB5_Ih?+m*>R6`SUm6ozx8ZH+!?Yy_eKiOuFeJ_5+hTFW%4PRK*>NDKWV$Lc3 zrF$+Ctd?}GXV;|;*HGsmUn?dMdkdSqF-M`JiiJM^)VPlL1$Bln_;mZlaN)S)kYBBh zxYLMRz3Y4^-qG0j!C-Rh@xn~i8$I?fP`aW0aV~hS0;-9M|D4Xjt%LJ(XJy0g50lET z@|(TCwaZqt8RNWd(qs~F-Sq>4S?+PEB_3>t_JxL%}D4k4sy&#b11<)_-%BI$GezF!-_%z|Lfhb!LBW zxdc&L-y@)-RB=`&_-4+_Z&m2KKIZgGb0g)U&KWLt&Bfl8$!sAiC%9Q8V(V=%ZBz+* z+9K+nMomRot*K&=^6e5Z#o&tc(xz z_ywesu4RI2t7n~8E|Tr+-e9Ut!8Mek$BoC1U%iBd19gk*5l~qhg?4FfXWjF;^T#`& zOl^BJ_4TV;UEhzdyk|SERp5TF+}*G};kav6)pFMsuW%PDRC(Gb>OJp5t7qnufn2AI z{tx*t9Q(^K+}GXTJ~;gKY;5G+kF%wjbx^N%cyy#P>U?5vM$RLV88TYY&Am!33m7 zxibIp=7;{6$uIRN-l_MoMb>ZUtyID|EO)eYSENMYnSMd0wVY|~3`ymTtLAD=>6N3# zC3hAL8`6%nHWv|>LYN7`dpBDO?^Qk!2iNoG1z$+$^sI0h(oN`WJLnDyeaLB0e^Ivn z8%zUG-Igi=Z8`-chAiSRAf85rOQCE-Gr+22JSnp&=nl6zm4lA>>(|4H8d; zvhrULYN?ubp8|$RZP8R60hD=SKSsa^PTzy{Y5kC_tJ+`Gs{vJ-; z+ey&Z(5bG?PRg2lkidbi)LQfHm{|6P8tc_WjiV?3cM5Czlm_K{s--hRCx1PN=5Ihg##BZm8ZHU<5 z!bM!*ckU#k3Fj{-1i$R*h-eaL5M($& zTMY>00V>f8(BT6lOhny}2!jM)DIypJ@^z={?27&Bg7$YO2FF`XW_^8qE z--Gnvp~Q>R04oJJy#dJ1n0&weBta`Naq;b`?{y=>{@Kb>L{3#NM>)m_`IxEZQ=gvL zBda=NDgiSE$N(mE`nMxY1BFFI4z6b)9un{JdF1uABxAhR6Js5k|f1l zZ`cyZs&UsJkRFQ$Dk>pv@F9nf>NDePy|@!7>!Q_(Slk+_I7pAxgjyG!)_sxIl2=!i zso$L^EN-QUk7(tpa%tZD&N=F-%|0wB&@D;`Ro9yNNgvJP+|GAmNMj;yl#U|0oaT}@ zrO(|)fG)wKvA8kNx&@KBvFN&4(sA;8`~P}|Yqr~%!JBE`%k^oTbGlE<;;=0M@sa+6 z&y6UDjY1ufy34Z-pl!y*#g$@XZ5muj{>z`CqVd5!`99&H!PA6*}j%!C6g$s zVa^i+M6Y%X`5^J)PKVuOarQTF9<~P&JAjrV?z$7wPfcKz?Y#YvC>>yJ|C?@u~h2b!4;$93CAiTo~iXK!tZ5j}hMG+PWpL-W#XAY-oH z&r z4o2MhybRsfdkZ!MNRdbyiB~%yqQlO{xnxp-0kPWZJxvvJd#A0aqp;T#P!(4CIQ&fP zp!$~@Hf`DSAfoko=>bZ!%~6&IVoVQ>RstQj+L2-5d1~cSWlyr2q{zhHn`jUDeV4O% zr~UhYJl0LEqo{qg?Nxxgq{*5tZID@hgl+~O@#=gr`IFlltrck&n~r)U8*W60BXC;^ zvPJsUy|7MUCVh*_wPwJJ%hFC|oJX$izEli-7%k}eB!0E^ki(f}@xa`{II&;KnBHRK zCT)ZKVfiDQ(UIQyLFrY*JOWDxaNaQnU0ct;Ejv10pr0U zu!g$%8lsb+Yw5;dCgAo+j1XNG`}aFQUZQh0_AfM?6dGdr5#DDlI6^uUFZZ{0bU;5a z{$iLEby^PNfxtLhgFZ3;XhH<^0FlFlFzduQlsBK^v3 z?We|Si_?GO7HN1!#oXXa}Rs?lSo76}YaFou*$wSr{4<_WG$LXGc-Ol{}0 zrg5-W7X#&jNz^oD*zC!fuuKMt!MkfvCbbj|ouPFJZ;6Iy-?|`xE?4P3OTXuxRuYKQ zj9Nr|Bi>RBRJ-VJ;$*n-+#i=nS^TzDJ<0Zea-CdcfxJ>DA$l*MeBZXq3U2Bu%oU`Cnhz)0zs7FzStU`v3-fJqtqHtN`xKl@jMVxh}ke#yne%_%7=9rdclw+CO^+? z>#^^pwNJx^Uuf|bP4cDuT6}C0QbN`~y;j2THbcHURej`xD(f`&t?s4DIOlVu$lpGW zXRT5zFKWFM#P!dHhPwty$W5Yf<0kg{jrCQYJbqmKF+MvMRE%jU8`Z-V@soV~%=8g+>w zoH)1{2&-U4oV-PF`CID@E?fbl*@3{t^EI~23CCRk?7XxXP`bS$9&fG1n!%W0_j5lxJV z7qu`(hkIiO!~LlqPOZOeeuUQysFv(X-F^H6NUnXlrN67DZ zt%dda<&pRaX7lghKwxhl0ECc|uOW#KZU)WMuxbU##-ZmK)dotL`12G83lDtB9#21g z(M}+fe`n-->FZ&6J06OG?aVW*lhLX5o9v!NHECHzE9(WaG*m%&{4#UI&GLvjYW7He z2uocHLd9tC_X)!?a&tQ-nOe*pf-zr6 zp%J0otSf6zCWJDMPi142s9jj51iP^nwNCEPjmMjv)d#@Q@6?u=dL|?Whe97<(c8aE z$Prb-(ve2D(NR_G{N}LelC@3sJ(Bv}yXY?c`KTKb9#I$siZq5W@h^7qUo=LBb?A+) z?d<#$0@?!u&v6*4Tz50&Lgie%qSNjd<8jJrb=lRP!Xf&%AKq09EE+9!K=u2J^o57i z6&Bv*=SM6C7#rP6m2`@8#%_izB7_VIZ>V}mp2BEf6Wt`3d(Rmysnuf$>*Q}n6k*CUCfMVHH@Ju)wcA>X>*j44*h**=sA4@S>m zk2dDZG!o#D3~>?7^57@EKrf48e5lw-o7IX8-o;!%%!#CFuB)WFDZ zhxL&SAM=OwKq@5_&2))p5VN1dcTa1jR^S2udGUh9JDF5tr*BC?Q{~NMh)X`Tsf3PY z^a!+xAN1qnlMK3RbFQ1R_xpuEd84mSmnH=_=2OW&;D0rBBlSvjJ)A6s*nsdiLJz3w=exrl;tXIQJ zBr(_ohEL!EKk=b%;q1{xCvB2mvrX!K9A-^h`m=?r)FVyQ1TMdT2u7bDH-SC-qGtG` z(=7oPr7-OJ_*Km2hVMypKh^CUv;3Ay$gjbNrdrN%G^9ab6aM@e(=F=b+g!U`6&fF( z*60K}IY2F0iXpe7hE{(YyTeP%WD=6wrT9L`L1-Yg36`pYrx)nL$&nY8ro5%3yp7he zN!C1(MOMjsIq6G6M)B(q^pW9LL6_3ILVX0yDo@@_ggO(9I$Dmh9@!V z;prY39QNV1xiKAcQgAIvS8LS>C$k&;4weQYm(|U+Q4tvsA)~fVp-&AiJQf~h(VLej^%oZni;wQJqC3DYybL2ejJ|%Y zA!HA3MPz=OM1EmVZ(GX7UEYB)++B@E6rs^KB@~lOWK2@OY7tyeP<)D_YDOr%-k+un z56(tQMI$0%!wT`0{cbaP@KnC^Cr2GLs5veN5gcWO$;gf9)ZaDP7+l^$qWHf`Th(+tUb&6?hy@Rr& zuoi5e_1!K0MC>m6ru~_k17o5!5I2Jw1-_ugR+I}%&od;)>eubQbroLZkT9hyGOl+fJIAOat$I zJ%fXlnxGKkHbx~2SJT#VT`m0x)?5)>L)eYVubpo5NS;z{+@hXeo+LNfANdv&t>xL+ zpU|t2;otEMixOvgwHQgf5^~Qya4v528)H=9dV@PpvMFWQ{{EfT6#_gKM~-1`=c}y9 zCkS|{K;g|&Lz>n^Nrdi-EX#O)Omq12fpM+e1+52rp|=Ckw@K9FY!sxT#j`p+P+t@) zGw_WuA9HBLS+s`n$jhmO?A+caYk24)Z1rlloRq-gL3opN632t1u)T*?`!+K+&0a`2dQQ7 zcN~!2GlUH~bs+wZWu?$SPel@fAr$lbY(}uGbgPp6U~4VD^@ANg2x?fOpUaWy@4$JN(+2WP>AoRTE#`y^74sQS8tp3U-qsB z3;79>kYdoU*`&^i3T3)aeyihGHwj4WL{m}-h;wxlXpz0e?rPsE%yiX}(K`pB?V{YM z#ca=K9#H&}MwSK4=CYzO40}-0rTyQH7S+WEVHL8BT8bGcaeZ5o-7c}|Sj}JBNrOv# zpSQru*^D1%5FE673%>A7n-$KyccPzwJGDAKDey~29m$f%YzR?}x@E+4YzAxfOfV#w zneDCSHPWNKZ%YWBNd3r4^uUxlx2AG;ZiBKHaWrEsoc4ip@z4I`Ks;)Bkv$61>2z zyv>?vt=|-?@+m$VqX@*(O1W~1B)??Om(W^cF1bx672i$v^reF0?=j)(vypWoj6Xh0 z7t=nPbzo6VyAxS6W10SSuW0Khp%KH?tB-T;77u^>SMrmc3xs3bb(|}6mFs>R?$@H~ z;nTHX6!wKfUq<;gI9hwik1bzVT#Wu&eDbdD@xxR6FAkYuo*_1t&urBRyHiW12_ybJ zI*Ee#aPYg4m0;va{25y#Me#S`8+Y!OxHE%KOkrg0b9()>3WVOm5{OiO5xmNbT52p2!URnT?8X|hp5-Hx_*$fyJNe)+7b*3!}V zOcG={|Kp@M4T$ukUF%&(lii2zLRF!nKDGM@DNffvKfji|_JK#USi&&=c^?Ky8niK0 zE~p#O&JL)Y)**f17-&G%8KgV}Al`HnLkOvvCR7EGlm1Ht+Y&abLb&f;>(+y`z!{13 z4OHDZo|t=jaYq)AX2EK1)z zGn3X1CB&xn#<;$yKQ~K?l;5DD=8L>+LSOkE79MWU;5E{p`{4uVkgIQK*scv)Ur%yB zrW2uW?(Z*b=a-b+Piku5`b%Zp2i`oHSXj7a(xKu}1z$KjQEsL6`0}5zOKWPxctzLf zo(eA3YQRvSvn0;1841nZ+b5{NH8VEWSh;iO&K7=rPkfP(aOEo9*UuF>eMyR%nq^Wz zU!}CF>YJyLtn6YU|H^@;q#!?k)*UK;+fSc90d93zGeSjWYk67gOafB(T(~O8*2&=Y z*@14|`eX%liB2#7-mhOal7;Cn(P~C)U>RzFLL=_1*MMpcx7blRLjU*nOTT;(k`!O0 z!^bI_c`G{vBzqpMGo+^KeZ>CxGqxtracF$p^mz4cX{lq~mzo-_@9}YQuWXeO`7)F; zW{4FKSg)DT&$M_V%c;}4h(|_}x+;054Vs)@&&m+6THI0xpguJRTevM8ru1JctrU4K87s_K`JPoq@A9+Q7fsc7Fp~N{UhUru<+^aX$uY#<=g!icPr-U|M$*PpVFh;(`d z&~z{QItP(MgfY#{Fe@1TU1#2V4fml+e96J)@vFb_Da9(a*YXccTwO&xyCMrM@sXa> z?ks3(kgq|M(%f6>!@MHukVw6Ujw2 zY$px<=aIMGRyGAf?9R4Azk?B3plOx-*KCGN#;r`+`=G;wRZbc~KYDvw?`Z{fuo;2m zVJSSbn(m8|yp0S$;#Hw=!xyCG0OUq!NcYF^n7%$`y>c7P3hiKLG?3*ODTi=n4jGn* z2V;bM9vPAwjWXoP!($&qd~_)h8@5&e+s3U@m>AUwWL3kv58Ik$m&4t6Rx)LW)u7K?vVE{E8BA4U_A-%%S`M7 zdN=HC@Y#P|LHc&d`tUbG<@Y<~(cfA_^2733#2+%+@#(dlk{sB%)w8#k;VQmU6JQ?D zJj0ySETWM3XHsS$T}QutdpRGbyZ~(1^=QCoK7tO(#E!)YO?^1v$FPKfxETO@b=a|k zys#=W>A>~71`*)hrB$GS=j;Hqlo|Y<&5#W;E*a-H75}B82hsqSq(FacvLdO06ucW) zV~jabcMEiyw$kGvW@ceO)~c^xr(Pe2ddxaC6=GM;QQ-ryfD0>@3k>|CMZ+z3xYnrC z7Qkuxw!hsPUHg@|mOAtG_4(%2>@QyxEXHP_Xe}k>8WlC`MTGY|u!Y-7>vD4>cL204 zEd;Fax$T*HdwX+No}8Tla9zy6gA@`OT*iPXCzR>v@-zxWz>Q%4*;)@@9^eF zH7SElTPNo81^$=K&CMqv!B9H<9>rYcRD z0M9~&83t>nW0*6jQj56ZL<-2)c<$zR?l&?qVpsx_fqoS8gPA;9!kQkY?JXMtbiyRR zx8RR)8J7@Chyd3O6mS%cg4YO&;8EK=z<+idVB=?&#gp3=Oe5I_JLpp@At!=Xwf`J zIVV#}Nf{O9T@e&WLTZ108mkVT)hOYcFE8HJK1T;?C{nN2V^~{}f4)v!QHUr6d@I<5 z6mtL|VaB9)E`$WXYmY$zW&&qcO2)G)$^*`gD}l?SPln$YK!&i7HxwfIjyJ_fpe z`+(y&kqcM(Ggv3G6G=fQ>E$s%)~YS8-wMExAn&<+GRp*qe*mnT3%_-eW+x{L(33%yD4-XHDpULtgOYhD2C?OSl0VgV_^pFDLYOpigTzNDli z0R_q!dXUUt!dhi4!aD*9|p8U)PNM;zjG=;bTp$S z=(B$Tba(j~mL%o&DZ&(aD-5mzK;9h(clR#}`;JF9T540L9I0ce1?8IPw zPrFa<1SkN}S(Y~#KaEI3SBY84$LuZRZ(itXZtT1H%~SAO@4KR)~LDB0a9`_M@7^!zG!kJ$U9rA$yO@Q>9= z2~Oz*3HAuOd*-yD3(IAM;&$UjNWSxrBTS&ue;K<7?#s6v1OE>HA@rKS5?4 zbeGQN`GQtIkrWDHTbjOWBDC2WaX*HU@BOCFYIZR=@;-d>{*SIjIQjPc-aKysiBWbm zJ(*`}<7MuVYZR4P_tU#nGv0l;2k@oOX>PQHN8S2xlOsrvBUdR=hEi7Att*-(1{pXn z1~h|LYzG!#nm5v6= z^COQqsS&ZTB+yzOpDmL!@b7A^CCbBs|ANr- zYJQ+HnSqVh?5@5QmeTpC`3JW_ead)B@Z6)=B3ka9!@~z@y)^o}J)tpo^^&}0sREwT zV{1Ll`C!@=ePs6ULm$(rs}gcd-7gUr>#pX7zTQN;aYwr?4~JCVo?J_Eh#VetgZ6#` z=8a>9+XW-aCl73>vvWiYLgqr_U0|!k-5(tMLbGH@%QK?ilWyF_s^*!>miB{+_RQF9 z!0>>8R}LwINk5SAo(Ohl_1ogj7kH`)HY629RZvkXz&h}?zMv8;#+Iy7;dPWn; z`y>euGr5A%d8_$tQ#t{lwCaOX+O_(w?odn2TC(m*i^vUpE$?Xn=zpqco?*BWs1lGi z()h95m6FxCt&`-F%xtk-ze)g$_NMz7Ns6)UN;~q8xo;{4_?A>gmE9l0WA9f&bk+=t z(PE{^?kt5*8ja$_-Fd_^E93Ea36b{KJ_EKX8QarcXvZ&{ulqIDFXEWYyi{KH0>j^h z!`tKi6py<=w3|B$KG?#UQexbO-<{oC+}$342Mtqo*hX|ltz1n`WW+r}Mak55^(OLj zU6leWbyu!2Fjibvj8L77lLr~eK-gLbWrVzy+UMwRTRI<1zbyvA=e#YG_>JO?AYesn zCB9xn=)8%$Q>FmtyCEubL^2TmglCO4chCpN_%s!WggQCw>x^s1h7L-mF%L{U-b?9& zF_{}_`(Ima2!Ou+{MyNxn`E&VP}2j>s$lF)qf>G!vM&HN0yXVQh?Eo7WEMnZwS z@3DKe1bw`Dd6`ab2)Xgbg8fQsi!dCofaf=3U`HJk{Tm^d@;wLW-9-cscN|nYD?weq5i=<&AT3G>J zWqYjJhcr1G(I4ZK^K_!9a;~JO{PWrLwcAZ9i1*51-_k8L4RZ{6d5>q|Zsc?_?Dzgk z*3EPhnme<3XFUH|_7J6#s9XJqcaP;WSbixd`b3Bhn6w7;ppmb1;=UYDc#C0GO2uky zXgkL@fir!)63cJ{`*=6H@_mV?6a9JmeY;;~`zn+Q8~GY@s^_0S$3KTHA?nZ8?{I4~ zECSC5x1PDAQ1K=IU{}K>-%3auUV!3Q*PJ551QjKNd@QAY)^}Sy_ie!C^PJ3~PsT1VnNDQXXdA2*lq=~EXaX`yULG5OJdk+or%SrWsi8m%c+&jQ(f&TS28C$! zx~jM0nJw8Y;hJ(?L&LrK*PQI(;w^>kcz=8>;N74qskGe9nuzehxO3vcSd3hLf^Q~r z^V{w*&WB27Lr?{li*6a{&0*rFVPSj|&P`2C`h}hG-1K(@P#;_tTJbzd|Gm%v61ty= zgrCuRuYoib20GwaYqgiX9R28B?1V!>fgyd%C-N5VDmMD7xCpYB?46{zUy6OHSk$lv z+lel&iYs-tWpNU2O7PoJU|W6uM#{fWY~0#Au+Q0(SyA}$BWI7qz=(iyG;>dBCR_Ht zB0GyOE+lYDo)E+)(9jZb`5dqsfk|hVKXCP}6ap*uRFi^9S3AFyRNBKFY#bb6t$*6S0i1{LINi+;*T7q) z58gvbO)c)Yq$n-j1PoXM=z*#NL5_J)LwU`%*Z`J3i(5%q+2%)0MNSTj?dtmaWT_d! z9X<{Y4xoYH?tTvZ=5*47qq8F`muhu2<}zR$`s0Vk&C(`6Q_&#a!+PXNdeKr8gxJQoER z?V~s3>+)ghj~};wNJ#L#=j-ievsL|igS?^t1Xw{(UTBs7cgOrS+{H~)N{&RdvQkKw zh?s+uGutPMaeaMV5f2naKA#07G`%_C8GggD4Tj2s2-zFphhsAcU+cv+ftqL{I*HCX z;D=s_>G_N7ZBFnfVL14PmJ{;fxCOdfl25k;FvqsH%hNTWa!x zmRw^;!9{h!OXHFh4OXB@ioYuxn0qp;NHFkAi|?@{;nK6UpmA2siTNus0u+VWu3{@n zPMO)Z1#sOG<0@DaN7+zxnbHRb2d#%i^=Bb09770WA5c-Vb3n0Qknk>5Fm&df8LgXn z&lCcbuTF%u7=nUChyrXYPtF zLi)5Z6|@=Nx%te5bm;|*vnlwqaG1%;qXK!i4S2H_m6jWG=bm;lE`buPZ)dx+6VONg z`m%*khX5B9UVHKRFmmEve0so$@OThVGi}CW00o%VhVz!A!UCxJ`g)zBrlDLrOPC=w z(O13kWLfIhDv1dGuE=cpj)KSy+PsLw*d6fn%DT z{zgO930_Fz2J2xR`Gsi84EJ)N?YS5Qz3 zR$H`zf%|h$)nWv2-~AEbOxC-dE3aA-mXu7yp@O;{(ILr|c5Bo%;%Ek;T{n zWb`Wo=?32r<1Vs$dElAR)YQ}fD)7u8Lci9viHzK~wc(RBEQpwX`Lipfexx11JIa1F zTE@g7lJGc*y0go5FZ_1D2r%K_o7Q6ikLj`z#Ieok9^O4&%YEu~$JUTbsZ{71+Jo~l@GsO znd`k&Z(mpRFI(+@_T7kd^~oVaOC9VpiqE6u!2wQh-|G$PY>{ZnjXl9ar(|HFPMJ_k(C#*Xa1L% zWmS%>bv=dsUR6K(K1lCfP>l3Any?_m*lrYq7-N}Sv$8rB*9GIkqbSG)Q92>R6BAtt zIV@qakE4NrWyhk1uCW(gpUBj=Z@2XPrCnTvg?Hefs=c#Uemi(7AzapIQ7rs}A^IXn zlk8<#(v&CYPz+B^JzAJg7(dwnkh&LaVQw~3k??*S<*}Hn5)&LMlEC|Da#vTvm{{mj zrt-%_|GWU4OV|f0pss^ zrm4bV^2-A=o+3|TxkA#@)f^n#PMP2x-Tbjuj{S-AD`94$QVQv+o@azizo4;WUq;Dg zsi)cr-cS?z;7Kd_FyNCi`!g^o-s0X&JoWsfeORcsf@?fwQ+af(8B;M}yzMDeYtKFV z)G8JD4d93p(=;|V2GDCKECax6wZFVL4G9SWq8IQYn*an2Pz)L}rws&-#Zf$OI zc2LmL{*(e0)Gk}ofCv{hi*kzo@Zm$I8~_qtd#=|k^QMf9XO(w`8`je$B4*4%lnMab zR4IGAx`4uI(LzAUtF>W3d;{@gM@Mt><*XWo-HO_!S;NtfPYB~;V(cA3KTLgc!vwi??XAo1TM>2o0zz(NBw zp8DP90}^;Lum{O;y!`z9vlSUgpFGqIqKeDL3^=xn;4}|n)0+Tt*X5Ej(AkMA4BBsy z%Py;fj9bv(=>M^SMpAMcWfr@)KbcEqBL^ASAeI`dwUv5oq)S7 zI#X))2;h7*??rES8hU(cZ*OPwJ!KEGXB3YG&MdFv(WdT9DuWC@-fp17axq|t4(W>A zJDUK9p=*~gIoQeqL^~Qw8Y>F2bkCrBh-rif8y8oOv4F5JYclRCRY8Xl7KjKNHAK7K zL!D?@E0^v}QhLc{M}gw#eKaL%hhJg;Jo>E?F&!=Sw96;QRhJe5kg;4fSb}xwiPa+9 z?;2>?8R$mu07dSW@NKp{90EJg(!i7|b1%&)pzbgo|M$_RgC=BR;(+@i4Bos_J4mY| z{SHv>SU5mlA@l_-rB_sG6<0YRJ#^-B9Py*o@t6;w2tncBv0e{{Zz^lo`Zb<Pm{kWN&F~tp>rFJ5$e@wG-#xJ3XoD0>SW;-lSq~Jk_?EWDp=O zh2W(}wlEO4C@k$T1;qcu#akl&hG=i^NP7A-+Q4%loqHOfDRIf!l>>sVmDq4e=CLy6 zSK({l_xP-t!16J4BbYTSA?yGf;WT5tDQMcW#;4w9jc(P-591GIzD!91@^`%bk8GRw zr)%BbNz=V;0<|zTIcs zVY6hc`@Mf}UI)C_uA{x>>e#Q4gQ7Fm$gD~b1nUCxToc>L(GkR@K8YA{>eUD^F`4|D ztVl^oOgzooQupBEJ{8ufcuD3D(6}kr#7>97Z{J!KJLv=7+V26!v)UP)?FC|XB2rph z3o0w)cY#IYt9Sg_4bpp#JD}OA_qb>NE1+Tyapx8mMz}i;j*oXX!IE)*A>W;%G}}Q= z>KTw}=-cjk$I96BUu;GSa$8t{&p(##aÐU6{+n+}yn0N*0Q)&QyoAe>T~Da3IhD z>K~qdv7P{VtasZ9el$*CM%zyh4>x<#0Y*Q&(6!b#&lF3NfteXg!@z&olbr)CAdze_ zu)wX==b;ESx{2#OUuF*v3yNyuAd9Qv97Nz!f<#V+=Z=3iK#vERT3B?jmT?9qGIhBq zZ4<=b)eBMRgo3HOFNgx2W-~GVz}W>i%IgZOAoyN8Rt8-ZiD`;SSCZ}O@jQc?s>%i( zs^Y|XR(=Aq@S3uzrDgC0&GWWzvA{*;!|>_??PexX^eK3JYsvxdD3=0&0o)fi>L*}f zyL>_~_G)#q;*&?bQ!qj)9QmKwU0Z}iB9HnbPD1f$>4y>HC;s=ox&2R*I!}B~q6rAi z|HG!9c6KW6(Nc!l$-Tv01SW9aBLs5-)_XTmHJC_(=Q}BFLVS}6NSlQQAqR|_2@ZWR z>5}(B?xe8{%#P6AD8s4{t7_SkCe0*V;q2G|X3(?Q&hRYL9wV}&D6^&}>51G@_d z&c9=>60+QHPqlJQoi)H1==Hj5Vq$XdVh2;0kyO1Rb2jdul~|YxwjxXXeLZTp0v6ak z!jf@uILYR@ua=jWO`+)G#e9PPkSK}qBIX%Y=6bR<#+lL>H>gq(?#er|X8q4=GK;t! zy)5%)Am)Ri9N^yW^h6}lT6%4cVaDOh`}YhvNPAhL1mC@N1ttFZQ_7Y2`!FC#z-j)g z7wEV3{2$KV0;&r2+a5KLl5{HqBF#n=>8>pyT_z~f-6dM$h&U|Ot-@#y0^E~^T3u=d1 z9`_D;6Ywq7woaxrBZI31Y#8<834FGB7ny7rxg~A~7$dv=V6=}Mv6o=Kt6xE|NkC^2 z#qnLMJe>#5P}~AJih#AiFHo#zwT^Z5s~`kNgiw<*{Q?mwDX+X5dTNF}1D!XBWPfRo zf(Z00x_HMUc^U`*i;Cuqt!|_A z#?~jRG;v|+`<&0CMNbvK5#_zew`yCL?U8h{hFEVGY~7*pCYNfeqjQnQn<`97!$F>EYlLLgxgmf)eplnK^{^ zhWkpGan5=_Nvh3ZWLFu3Q$FIaBI_Faue-lXeU8r3!d$_7UEHVA4$&-6uL+l&HXzK5 z;NcOOC!bY1bH5w+$uiq8E)P?A#;%b$?|dU!W`U6>vuE8dnwk7OrRfkc6(g-tKfbj= zB~mICKdjCv(v|xoG|ZHew{KH8UdPl?P|T6>Tu*>wiGxNl1-uktQ(>M6adkxeP>AD| z>ZW(AQ=@dgroRVqTTIQ2CY@%1VDhOQUXigemsP*wrO`P1BA4&|&^pNd?U=n;FC3G3 z{FzhYUYtW@sY>&sL^Rh4CY^&?S?05+!uRkKKeI);zSwkkkT?;L;xgJSG*cjKc4-Dh z-ku~jnH4TIw(D5^apFbg{##>EPX8r}Jq>u^Hh~_u(*Gj8{eR%fzb>;8V!H_|CJ(9d zCxr4hEB^OC7XK{KV_yJ>+2~;t2!rR(pQizzTd<^&NeSPI{|n^&X98<(YI>EMJEaG8 z1lZ$r?1A+nHka%t*R}60KM4y1tU!{>V#$;wE;jVF(Cm! z=CR2niiLJ_KtYMC7-_*~DDxTy$P3$4+Z3B0+*VDlUOiDvh~iu<0~_Ckn3dV5e<1sn zVSvp0MMcMQf#G^pfc#=Ll!!v~vP+G^J`;!&W)yfNH2AL{p=O|X2NcQ;*oYtLR!`$_ zc~3Nv;{u`b!?})!=YUPZqj-ev;bP=*O$Llg5OsO^{n^`Qi1LoXL7L@9h*zhcJb41Z z2?LAW<;-Oc8k}SfbP#;=kBEAln5-Xp;y?B|G zgq8d~I5=496Qeba*!JPoFB1u#RD)`>5f`h zuz7rDq`$;|9PB?z2-qe-dgOJL&fhYxD(ggo{Fb!1^7n6H+tKys_im|C>$TLrp}R&E z$44$m&M37e5E2+v%`MN`9b*>=T+hm@k z(bDvH%C`^uQj`C9Wi7X~aXr`;_9Cp6I{dbPG?_>9&j;?sGaLmJfGV2vDpgU-`I zCOV%Z&w@FT1ojiTH@)Z1Yo_5vdR>R~bnvjks zJ>YpnY-zhPBj0lI`Zp#5>~&44FT&51MC;{B^#mnti2XwK7EpvJCyWk^!z(mu6{(7R zjg`o64=&ZNjHe&E{|%ev#qXBiWyHixXpF2*ofIVBp!xn_{<4&=-pVm;`U*5ud4Yz0 z)socT?vkB_=C-yOJ})8gUVTNIezeHxN#dzxIo zy2XC3b<=fOct(Xexvs|V^mX}?aDU#JuxI52?d`=|bLXz((>d01mz+t=ThU%o>2rN* zi1a0s#3*@px;|3qGmD{!`j5-Iiy7HiPJXVRQ|Bp~l`lN&kCWHOtSGIbS1J=7AUQk> zi&D(K69fMIY{92d!@CCgxTF-9rB-}2#kkI9?3G?m4jf<;l3AM5`Jt^fD^)+71feP$i<|ERxiGBv4F1!u%I ztr#}>K;2u3ist-zFJRw5OdcbrMMnVLAlhC>l(wZ^{(^}irO;5P~$ z15Zw2?Ia>1$mNJX(^F#Wv6e!}QwTy z%lN#Cl*;Ceppwl;(%@DUZAnTawA6-l0F#mn415tV-MXk)dkf%ml15PO7(9f}odyrA z4noooy#MBCKV#(E_T+lq>;yDLbW$04bZji|d8V(>={LCEfdqS?q60|&hU*eU0O5c# z?NTQ;S}&*MdIZZ z{VF^&z^kRCp^~Kh3KTpvf)$_89>K=JG09i7u?6NPSR}4I<|RifV)Vh6ODFjv01r^! z_ekuP1tu#~MnYCYrg6`QR=AfnQb%tXCk2OySf8 zm)O{~Lr)eM1u3p?cET$Wzdbx#IARUY>>qUSUl5V^L(mPoR&!KI!@<$9AU~fnED>6W zCSpI_EWDAixXN3Cm|tWi4hoa z(1C42%G}4g@QFw&i3b6sW$)1CzJCudEGYd`*6+3z^3sqoH4%c(-AH%|+D@5Fzcykb zBQvRCYBB?egY_@~ke*l6iy=%*wZpqh5G8YgX>OzCBh8By_Zu2w~nQ|7D|s zg=22iyp1ZJJ>1tG3CvxdiJ$H&hRO^^m4D7^pG;>2YN2Q&sZ`tN+uiksz5LHIxQ$3w z9iNpu*iRQWQ?C*Jg>o4@*->|-a1^}v1Itzq?hUQ!df!stc+cye2-q*k{S1}Z8-8zj z3KblXp6RHm4QOEg0o5iH(wfCK+8!RFOb|N2r-eoj>?Z)&fgVom2e@Ve*ii}$Lf}JG zfD76pm;~l(m%+U&7>wF=><4)yp~?4SV+N{6Y~LF;lHj_{0;CZjNlTAHrMMW0v9e}n zW(G>Ce&eRRQ{jF;nyjtLZWIF{PFNoT6L|L!v^Q%xJ^&=W>jpe7oc6y5v^3|TkpJG4 z$Lis%*NHh+hw}0TnXBSV6Ok8GS_0Rnp5}U&0|L$?D&IOk0$c>)t~9M){PRDFPk)@_k|9@=AU6$xv=sp%M^q zhCwZ^0ui8CW8a`4_YCe_2IXWfdHWK84o9Sd$n6oMlQfPJjru#y)zwED!NgEmZ1j0< zE}$_Un>J|*W;C$-FmYmr*}wrn`b`||h;COd<>%zsI5|c4RCD7Kr=tCg*}&@{mztI~ zv>m;yoq&&*M7kn3D0hLK#>Ls0=M(eyECA{J8?I?;HXYSm!fc5qHUaB#JeIu_)Nqtt zRgrmqQ{eg+lHUHSqry4{EtTxnztg&Jm8XcatyU@4I2gWE&)N~9Qlax4o#*a9o~Ov7 zJVNgo&LD&j^E@Obu!;-ex4O;2Nj0H0lY$S419SZ>3CwL?q@PHXS5RoyN|e{sMbHMg zvB2DexV_mPrUtX_lws-ZmH+swY~3kOMek*L9IOeB=(bn7W~Q2QCFq(l8imHhP!Kl3 z&cQ2E860P&?|OE2;gU$*Iu$>>7yNb-qKPX!?5g_Xqy{aJzN%ByMy0%NmR73KE{i@c{t9&7@k@>P0~rnki3JV2*#BeN`)E#t zJEw%vgEaf^#|MBqr}V{Q%L`LfH=iJi?(q`1O7>)x8}2AlWZY+8Q2?-2YTZ>AHqb`L zJx0*Py5qdUsyT%OyHy!UhhH>wx#wAS{C+{U(B;d}T^*E3Q@c^#gG8$pgZ-0(%dNj{ z)T%+iRcmYOxUprnu>#vMHImSGU+_=bVaBNchOda?eH=XV*=AN+|b9_yoYGuEgHcQ*1~4 zYo(WmoSfX+>!?3PZfD<02ub9g$nI3yw9E67kp6lKwVg7Ki&YQGB69gqXZNaK7Rf}aMppQ z->waaPQp`Ax&XJL{nICLyKhHKBbhmLD``~qJlm7SVO+o*u(9F15dQ4hC*i>fm>B|R zO@E$D#_lJUBQQ74_3o5Q;)bgcIKLFRJ>JuI~1^ zEy;`Ztz}=vi5N@#-Z2o4l5zq<4DD-!Phpg-7G!6Ij(pGun`avE|9un%_{a!^?NQ8$ z&^r~U!cv4C1D3eCxh3=H-RxS$&B=_&+fu#kNbq16k0|ouD^|~7zz4h=Uek)<3Qj01 zHe&aT%H3*JO0vNAtL+pEm!n^6L~Ja0E`9t6J2wHHl69kU&was)I8;){;qr-0*Z>45 zg$7~cx&q#J4$4G3Gzmd}auxZ+1TB+KUZI6p=KzW5|XQY&n zUJTyrwCrs5_oSV**oiYepXdI1uYS@ga$L6a_b6FIpm`wYgnAATxmIqsb$BSCs0{lH zofNOG|sST5F#q1p#3Uc`C!)2+3t?G^uOkw4u$1B!y{8yOQ^jp{Sl7ld^YXjU_II@jhEiqJ41 z+*H!Co9n+L;*{PE+Qc~;Y^nJKrp|Zo-Yt>t>p0$eR39ntkc=N}gF({2bIrjM_cv5l zRpl8Pf2NY2p3Yne0AQJZQXnY`Lznh%!lgK`V{yiY#kO^ZA2Z6y8~Zi=Rg%!x z7h%x5DgyvYO2J1flG{DtQPzBkhIh!fQBOcoGo%&=p^ALrH;?MO!mKk{OTh|^7{X;^ zm~@E=xRCVbrxN)7o#RRzB2S(3;@==Wa6iswG~UM9>-{dum-wsjH7*~7FbKaLKBA&& zR-h_RzY#7!hRh$8OQNz(ehk+#zgXV$7F5VrnUbVup2I!DpJp43iPalb-XjpQnxn@3 zI>qlDf*q)0iiLCvEp7=_!=)`jVbsW-ubuel)<^0u>1^U z!b3LN)S~oq^9yGcmO1@a$-@c;E=^_X?9TiF0D>eQm!Q)_3bOp0sc#ak$XOuVeDud} zNpuPBh(lY1#hHL;L7t%yZvF!^ z|A#>${F?s}Z@~ZGRPleoX>U^?;Xq4G_K6c}S~zk4B8Iq7(@>Y*FJs()bI?3KT^sTn zeqJ(t9|rQYSIs%@mE_ruRGN81v&7E95&E~^o)83Ux5$Wyj;=05MJASXJdT}~|3ci{ zGD+h@(lJ!^)uYMF5ihS%`9~6(|aJXL^A{$L+nz zS+y4_xnKt-Y(OQ!#<4Rc3W{pm;1YuoivS`ApbOlweu=r7@6ukq3X+7mC#Oa7KP*UW zSEsD3{NBa6P8Tvw$6ga1gX19VD8l&G()+Z5j93BI{SROm6VX%o_^ef$`o>IcJA-b9h)&?&;0ZoY&5g99RD8t@GS7SX}E&@(6px!S#mA?@sQ3!0qfD( z5Bl-Ww4JsA>kvvr{-_BI=P;J0P(L{;3y~}~F;!?F{?NPr-M?2NJ7Lbv9oJa%5h#JQ z54dXBNs(cBWC2~*6XXFVZ;U5M6tFhf1MW`tGlG#8=q_T}f&J<&R_Q!{x5(Hl?3W|# zFRp;&wY69*=>wW%3?JENQOsOe9$`S37XwyQ73CFni{e8auW08xy+9U$ITV)GlK4+v z0mzk7#po{-$^jL=Ye#3N6yaB(xoaimyZhVTcXke#p#{G@`x!ky`I-c3lW4myz!8t9 z#KJ~}h@Fd~KBZvyqnl0D5U+Ra_&82a6;>#~${^Yh;&LhJ~% zavbcoSBx9tuL8Uor3mlZ)^t4aIf1|2!R^A5#*12SRzlfM>cf~~v>o@qx$gh>RcML=0%Aki*S>hi?cuD&oc;L^iJ3ONq)*gj{{)5;Cy9<iIxMPukN zNi2$XB6-rln)KSU^-+R`r(gFzA&Xrkg^7<&$+mf~;8Z*IR8aaleMd%mdGt~tzamYNLAb{aD=wIVp^F`(_4X{WPRRnxej7z~R!}Cc0N3e*&P>D1F1mOlBGLA6h;8#`1ol9CVq;|&QHJNTGOc)%li*5`;5@e7}_>ZsB0*l)#{;)AFej>J3sHvKYVO4=x*7v;U|#^<(4 zeE^P(f*T~Vt{RBc8`%&F%;}e@iCK}bULQ^Nmll99O8yl}^VDe? z0d+V+SF!}x+w*g)Pl+h#5>ySNM997JC%sQskeY)YIC{*bZgtE3U8=o zhGkH_uzUu-&S`2rzdB+**h^8M`@MS9+0~_8ofKZaxHwcH%EuS&eazQ07$Ni|3UwDj z_3yfHs^uqVAxz?q*d7R4_`Jbl|5%oQyAnwyI2bby{D`0B=L&WorD+IQ7BQTQPMuX1#qY?mVaI%A^{~)z0(`<#PZuyRSeJ z9N#0MlkjYUof|w}@4C0{?(SPDZLp})Odv*zVyb$1;bECQa^$<|;_U2;u)!)H)LwgZ z1Z;by4_@w+v;3#()^{Yk-O9DjM8deIuZE+q7LoT5UzcX7eY|nKnH|G&ujzZ0Ga^1N z&TMfms0!NRv~9RZ%Xk7l9tAqEU>93eyX~7qb46FkYlp89Yz3N;kv|C$PKYJbZ7fA7 z-D{~)Z0Mn6rP8`y_&J9&OhDVjdY`v$bEp-==qd;7oGoCfriw7(L`_;@;nR&8S9w|- za*U#i3h`(Fg3{K)!XRn4i$v%ORHF>O$J<76NFdV*)0-}he|~%M(j_-H0G4f%pFXYk z3OYTS1i8>ox^2Q0@E(|8rpkRbnDk)2ezFndn!jN{c4V13HCbJuM7Rp?K*OH|Q%JKq zJ0@6tT!z4i1b5qO1$9zq1%>Y|`v~7wC1cd<#sbC63;utW0S7(4 zs|EnJcrMIo0v!jnoHRu;aty&qo#cKQn4_Kvea$-8Bg7P@}BEJ@5Ql6B%C zjQxN!jmY>CE)|zoOZsu(gY%@|@6R5n|9aH;qL~d$xw3LpY!~+!l<9(&BPAK-9Src# zI`9Z?Km8Xu5T%P?z_X*_Tx(L+%aR(D0XuhLdEtkmT!9AALoW-x(J1k&+J;4|TBU}n zt+C*?wzh2!A4`NjYe$J?`aLN}f?|UN+46g4%PP#-jRZo%-zNcYZ$}3!yUO?A*te3d z^vDsa_%$=lRP`Cv>p2iwnqt1UCXD)s!vxTN0n0D<`S_QhL2yGVGKHsW5IDLXT3S2J zL@RH5Vq#qVPEw5`t1J-lhTcD?hr?fGG&jjjMACz%AR`^TzQB$ME8+c((+dhwop{PQzSLfG%H; z0D9Ch3O??z08=L6mx2+Ik*7$;;@Lx%#OCg=B=Y~EH@39c8BR2P^0ILUk{LmYxdqjl{v+Hk^qvwVed$0-O`=Io zkuzXZS40qmQM2UPE4aE^kXSQHuvVa#D)mwDve1pso6-*rzH;%4ozi$`$<7vN$fd-h zoHOcZ?Yfy~dU2p|WkGK@GH;*I_cU2s<~M9;Y&8ETG;Vi&D9_5mYbK9rAd`rri|}?Z zl$LJkr?L|kyyJ=3eydJH5f@P185T}pkkp#8ITO3|P_$L+&Nm07@jdw?mu^fZ9!l}c z+lrK*04x#Zqj~8)rI_>9-nY#&wB`}EHEPfHte#IOExm6xwE4)gmn-Q&_S&WEPhf^6 zJk`*pURTM36iPK4o<}tg&syb`?%o^?h+Od5v|B}O9(FI+OHQKkMAi1(mQ^MSHX#!L zOU2x98LEz*@4qQ29294659i2J%`Ge#NWhBlYFY$i^M)z=H zJH5uduBA5CJrT-{)rT zwZiVJ?DF%rNGUvqpqtUL1q!B=**|D~K4ypCXs^tr+ny@!Wx@NZ?-%8>_`7kBRKaOI z&hTi7`xT9bhKBvU8`_`jtqjf86v?)WCchtU9_}A*6_ZM+Y*Vm;IOCrM2fx>h+dd*L zmGnM<=z85!QhL1I*F8aSsA_XVajRrlV7cmylf$ad>J}{p{ z)Fz@v3~&8j)nJZGNU*T7D!<{DxPN`BwRI?4xQDia3a|!1pyxpu$wU4OCfZW)1vSYx%;VsV^@thx{9G@-+htmdhtFfEZ%i z6-Q84IA>D`=m79!WC?`8(ZR-M0jsZboL~~#DI%Q`JAIfsEek`LCgQPSwdO(p`)a9w z5pUzj@+=`b@5f2vO0_|@U7skg-FqcT>R`2E`*6MT*akvaN5>^CEt%XwVNJj)Kq}-p ze;!T-C=>L&)vAlwD#MOio#!v&0Xcy|fs7N%&@^}T3zX?0c0A3#C*2gpM{f^4B)!c#{>SaC%(-_h%&_uC zzJk*TJbD_Gw^%X}=5*LvA+IuT>eKR_YhY+qJU=@--bQot=1ne!?{6(lr4TRz2R5f! zG|)OHpSHHOH7u^I_v$)c5eGTVd#<#}ywRtBPX$*3Eg8@IzIY#YI9i<&Y@sD2zumGk zx&b^+zvVV;n?Q-%Ow&rGU<(00rnAn%($bu2jU;rt2MA&vU%$q8%3u-$Ea6*@!nY`M zp>>lbyhO4W^eqT3*$DLk^CO7qciS})_CJl_l@a<9dyqP(0!-lhnU$0lgZjwNtZ z=(N8?(VZtsKqtsMLi;BBnf?e_11k~Q+@8%3<>>9~~uNRV(okvF=K79BGl-koz zlqXAQ@+qBC^I6?rp9^-OcE^38N3N1yQSmGmC@6DlH5pknussG8@YaKj34&1IzF~yU zg)Uty8+;gEBZTKfw$LEJI7fi-5^^*#2@ow12KX`|Awei==*l#*{OtI>wx6WUkWIuk zs3t-YrXWBQ3%s>Z5R;Gj5`OnzeP(l6FImsWz4k!DP2IN()b52_$YVv4#yI*Z-5_AX zTLSp)4_S8vNZpY%E`Njpiam|QjR4|1RQj=Gw=21h#guYsF-t^|r{S_q0M<YpRZg9Nf9r(4K8d9%B@qTU2aBfth57%>ynUE%RS0t|re3d+Z1MqfY;&HqMT;<& z0-BVhOr?AfH|VFp!dCpGw{5Apl7`ENIf?E?uew5$$Ulwpnp+;4{(3$-UEiKq((i=O zzxroS@4meq04tUt8SlklCGinwa2;FWrBsX>$3|B zG1tS&0=v(-Y%Bh7rItuS3U%bt*)RynP!NzwS)Dqz>Eq1dDCog3C(J&>;pS~hVrt^- z?1)Hrp&Q;j|5Kv#mqP9@)niaK{wY_A|4aN6_K`RFA`AmM9G>42eTzrhBe-CT_v@dy zz9r(Tq?f$++#3`$ZPmJY_fFx8Z?!2>g)}t8eDeg*d(c5aOPiY`Kx`NNDLZUJ)D2*c`?NZp60_)J9zn=K#zkaSy zgLvuDU%wp$Xm6XCsDEqh$&fa@0wSi^Z@YpJcA5?^Oo@(rb@~+D*>iMsXK7Bql8*g| zd+g^XHM~0!dwb>UqoXfhmJtQ2^#ta^0@3F9n8AIsh1p};F|o0hTOz-|mHY&`WrDJW zV*XI2nrKg7`20Z8j5_Oj`2Bdfe0eXgeOT#*am)~1b3R~_^RTZiBO_A|JAjVoV>q*)72e zi^nHMSV!=jFYJ{S65AYfOz~VnkA_D_r}F~Ku;?f~%atk^USi`4{MdpK- z-y1Z{eEdmn+_<5g&{WO=6?cHzQQYff*G6dg%)&*QzD017(5^=ot=d-J2?Xw&1W5$20kvc6TV7=Epci!P!^+RAA#p+Xou5XXNtb1v48Y?yJDQfEe8x-&I>tQeh zm}GV>^iAhg69wk^RO+VFYU6oL?*`fkkk_Bxzr&#-6Vgt41;MQ~1&azP-aF=J!h`8{ z`?dYQPC9=jt;*${4D(mTesl|l;^dAkAps95ZC+jA6pTI3Ee2vk%6ICfRb^!ZR--B6 z#oY4SXoMZ=##QRFFYj!1$kBb^BYz^e{aoo;Mca}jP~_m#N));O1A-iESnPwt{A|eR z$t#GCdqpQX%Rl#g$KVBwoT{jKiM|kPwv>Py>4d$-b_f-j*-VT45R*nTnbP|!@YDSK zUtqv7IZbp!ODl0f@Dm{JOiUisK@s8M&jnJ$dg38dPKV$YtjQt17Fj1&RaH5I z{@y%~i-}@vfB)UuOjs~Hm=vty&O9e1I+f(;y-|)K?0WVgNAH++0R{OOS5d%nC_)#bQeuerD{Pemam55 z2E;jP30U<#4|GZ-a6I#6Ysa)`qYas`IHvO;FByKNGKHh`Y|ev!uoSsEPB zPAMUEa%^}`(|ZN8yHMe1xg+i`+&1t`t1D-3-*)|#2@CKnD2MU*$oWZ#2|d<2>Q)2d?fSejz4$2NAtwQ!lOA+ z!Av2+h%%ny6qo(=Qf0IvRsE90bM%FK7Tf0%qx27W&9M>tN(z^Y>uwG9j}? z7cHco^bmK@P@RZaAJrG>cC&mPk(P$JoXBgC_9FWO?@}8Oxo2hs60JXCvc9;t zd^o`2KB51te0nr+L0;*7e$yG$;cHb1@HkHpHg=_gNBDlzl9luB`{_*7{>*08;Xc5c z9wk`A<0E=czD#ByYb9r$F8EJ}2*=d9y(WKqd91V{OXHEmot-bGZoSvvcn#*=H9fHI z3whnMor-9jX?3aE)xJEa7h1PWtM}yAThsIz4!{vxT6Tqpb4ob4w3$u{7_G@`#ny0&S@M$)p=C{J zJ?6MoBn@_mRO$wRqa~%=4l8M}qjZreOl&2{^yj&Thz^OV@PT~pO$DcS$p)^1W~DVC zGHfL-G&L4%6C8HX&hy4+J4*FnDG=4|R$qSh@L-T0lXob2qq&e+QCUaS!U z$dwqpeYImQRc@lJIev1ja;&>vg;XThFCiu-D{~qha8?>(HZ(38l`Bte9yMJ(JCtWI z<8xd4(?_wxBCY%0o#_g9-^L)N5X=1_ryE9>`{qrT#(mvHtW5*dATddrbhJ6x6k|+$ zoZGZdPPQ-n(Zs#EDHUL%LxFG^Y0?B_)BV1gY@7c^5>E?H6?WW*`wO zkT?&P&XvYqL91S-Iq~AsQ|+RQVnY)`mo6!7@^Umj=n|$kdmrS@=$7&8Q%#ch_D(() zxWNA6=&{KoWGBeIy3d;u1!}Xf^fcGisoYgy7C_$Jb#wa=;9~gX){xQ#aC{RMzH>*W zxz?mWd!u4;aNS2M$+zi=rA9vu>HdtG&vn_31fJ}7BJ@8$T)3<5t&Ecy{!%x^-$x}y zn+aKs6sZJ?ybjF%SdI$1VlHhtb}<+$pG?ERvn5r?$M+^&dl0-rgR6|d&Y=76|R)LAr zv^$mO!()vjhX*c+pPZX-sEOc3Bo31bxlF4MyuK-GU}zXGG=9m9Q94sKAC!hh_5wuz zF<(e`xf366!3cHMaS0(86QE$7)p1SzWKzZ=*7pHTF9Ms9D&t!%3!cP8J~XLn#2 zrHaEBL3g;-ShP?MT;JQ^yp65RYVL^3gQ4bq?B#tKJNzu=)KAynC1G(yrwL8r8^m=u#VV07)$|-6lSA3hhebyNh;5*D8Zwi|1=HeC|A*WU+ zpmhgTwcFXB6~v7yw}Lq<`vwQCghHtKEiirr{R)bT)T1)Jl*5XL<38ITb+erAZ8wf= z_RHjW8HeW3NO+bU?C6Mkfqla|34a;TI)NON39^eK(`7tNOwulqHfCl*pjMnofW1E| zb-Vt4en}F+AW~)yOiOqrH+?@-KkoMda$T7W}%D?XiAcsI!Yzj8-O^S81Rauy(z)&;RMf?V-)}Tpw*8uLOK}{eh3oxPox`pu7O& z<7rF`l)=C`GE;rxfaaIS`SmOW{Q?3uMD8-9P`cbwB#-WpLpdXM=K2ZuiKim#*x#~=4|6%WXooSa71uE^9T0ne|wA^V{&35;17ad z|HNMXgJy|cY7gDDk3m5}b~cQP=WfvK?e7m+F~WLr>!;=~@zA!G@{x;+z(FBJ@s)pp ze8Hdk4dmmy%p&}TrxSn)2Bnk*8^r>s?O+f0k%T6yG;eN@*nCUSAB#*20>j5{9YsTBK9 z6FoBIZmBu36qx*Jha4qkx~{j(%i7<9H@FHOkemwKOU)t=vHTKa}(hn1w=vtf;4ySfM*1LsKOclIa_u93vPu{+| zZmlL5@qz#8we^jM74P2dXT9GF5x_caU_0uWLwZw!a_lYg9NeR0@N zz9plZ(4=Ks{?2*bVxBz-<+I)S(#HFdpsA77*YfRFAM)a&B4l0t`r0entel*U?vz5q z-3>@>UVExRVZmQ~uC-y1X;t?ny}jY!7#faGeUVvNU0rcFtgjU2QV8?2-`f*b^)ImA zT_^;WApGftzcvmsb}kk!(kJ$}iKr3tJlh!eF+Fzwo`Z!&*5e1T;@4PTPjU9FR)vzV ztNA?>D&L!5we zX|qKgcCS|XY|VA8R`F)4zTS*~8WEqIqu{l%FucBA@BR2?`h%u&tL zsHmz+I829^)9mhUuWwD+YW1&t&rH#&@Qk;lvt1l0wowS>w;$~5VpVHdyHdG4Ih&iA zk?uqh%+$w>Mjs(Q@?QUhEHRyc)8}ekpFRDl4Qp3LDrGh?f4ZpN0U#dU3?BkJy5#31(Ts9B&|tj&oo`Vyj$h` zy~Rb>i{&DXh-)>Gw$vmWDXCzu+*4y*T%6A`uei=(iZrd2o;$(=a(s)HA0J4W=qA&1 zS#5Y?&Gqg@AZdG?$d{g>2Vo#=27s(Za$X?B_4)NOwFx;^Xt=`r|Xk!jDhrVGPVKXAFxU zAUN9TeTjIQ=->E7F#FZYT)J}DjA#rbrLwvdGp=W%S5=8oL_9WJ;wO>uI<}j^Vyc#c z4DIxg?3Y&DccdcWtt(pk;L>h$P2Qqf?fX?fHYuK$GUYPHVn~wHy{{PV zMPJy-4qjLWhjucx+w>fvbfO~0V=~7P57$hiE+VCh1U#q1&kqz`42=r5)+*BTYBDn7 zB3E@0)|XvcDjRPMDYCMuaTn(%@7C{+Ze~Babv(0Pt$@iT+P{|gfv|%^&g-u?*}s0d z6w67#T6b5SyP)yt1!jNbZoPQit-cCJ);2X!q`_+vEBpb2^=cY=*_*zS+Q$(`9}JXu zYnK@^+C|}K)W_jx&n$21>UG7urMMht`vsR4<$l`c)Ww&~th{^%FL>5Uzq~Y!E ze(OMTe0JRTTdLFa`kt_(`h3j%nK=6WBQJOvaVUIcgP7#*vt12S7rZIK&UbqLTMbRm ztGeh?L&l#M|H<6Y@z3i`vA9cFmvf5qispx^eub`|e^0iSXCE#;GyS2Ae3r-=<o<)mk2!>wE%%C9w?d08z9 z>iubl7d-V$Xj!j2>yMk`rr#TheHZiXerdO@P0>gym&_KA>2zGWo{5)^aJ6#Ksr{Lk z7tsfC59Bj5Tq-UZ)!9EqmKp{;*C6(58d~t2a*p6UweHX9dDJ+g55dd&DF59YvqO2J zxl)^3+kFzVAI$COAVPw3`KvE`>B8yH2(0Y4)5dR?9_*)hw)Jp&hY$;kOx)UBA-q3NJe?}@HyVexrw8gnex9K9#ZRIr;HK6OWC^^aLU9#Ihdw%`#)H9daMqO0X z-i(X-#X?;q`32gqJI#W<6Ee&-8kyo>UenL)Z9d$rLP-eeqe!S`hLkC4T`+~C0V?vx zyq>(#F7zxa@n2x*LB93Cge4p@j`=wR@v$ZII5hiz(=!qe=cq<%uHIzd>nBb--ZHY$ zQ|xyq|rB9N>w5_v3XZG;cuZhRUXe!B)Uuk|XmGhpkoAk>(&^KGC zEVbT}@_$Xpiph!-Im?RC=rAuo{qD$Bx>D9KUIz2ZfF%*ByN;2wZA#j620U&79~|>c z`9?d!JkyI^%hH6zY?d1Cx?Jrl)y>cd(ujSbMY~shJWC3BBE4^z>l0L&IgR3z9rK?C zyFG@+7iR4kjrs2wAF^n$*Ey!F(g?q!5-AW|YdSW6UyIr%#JwW*joePv)QkKbe#sn{ zXO^3fj^*8dNoZ&@n>)u5`AYLtuIu=$u8*?dL5*Nl^Ap6W$lY0!iJ;D=A404c#smpP zWDITdDL++pD#yzc?z5j0FfQdo^DAXS>279tC8LC- z3cYV0n62?L!n{vWVq~k$rCj=cbV5m{5V@#L|Mt20Xc4B!&h}e8)z?OCefO^@^f$li%p>y&KAJ;8X=DMUOj*QRc=_#Hj*E16|ZH`E8iHkpxZYj!o*kUE_k@`?P zoG5SmhMwBjvu_uU%EvTiUGcxx`{p6zXR|X$)8F`J8)Q`6U6Lq}2$k=hlA*lug6;*I z9_y7iSB`12jLeu^wxGs&EUuihO%WyLVI@qy|1J43G53PVc3mr)^7*-uC0y%+^<2mE zTN7}*o=e%$l`VRGyC&Q&f}oXA)vnkj#ShJ`G!)7o&r6xF95p$nR#denyVrE!*lsTU zX~AP5Y^?8~tYx##usOmT?pIK=S&JW!iu_F%Q+gmdu&5-%pb&kuqaH-0Oo9Es^iJXiGj6mk*TU~#_>5qC*#w_c26i9 zUi~m~!;kWeVmk$!(tN2fNoRz8$0h@ZQ`aXOPs6*Ut@Uk9YOO`4XM!mBU$km2kkt`c zlrglC*5hy|c5a9}+^7h>KED3psMgU(A6xm<9dC4}L|6M>&khp$e(AP!wroQ9Eh|pq zq6Bvb)a7ahzLX*LB>eifS3VXEr(L?){)WxnWhHyfj$gOrC`g|3Ydrqve`Nw463eUsrDoWI?fPxyt^(t(lM zCNAyu{i{6jIO2on+zlrrRmr(|m1(wq-0}|!?|eC-f9tTAHC^4Y-F2ty?)BP|v*W(C zs87q!4Rli)Dm2J;Ckn<^tt>MV|XXHs)&I zzisqxpU$vu;MhAKvn1T^9&-xAV38kxT}_g8{JMs=d!p7*WVb(uKKTHHWnzH^by1{k zUK-h>>OFo-P8mzr~N zMTkb&7s37ck<7)Qo3#WF)Qu{hk&So^@7v5XqE(eX1uJUafB!`C;m!9_O$4tc7C*EZ zZ>LpUjzkO3tJad~Gzy$4ntds)_}ZPsCdNtyx7Fe!t!j++75`|}HrJZh-gsjrU7vUN zS7Iw~njDSBUMZ!!*B{`m)(~;aUO5>xS|wP~@ud3dN0gK8qW1##C6wmrjO06O^iMyk zk9}fx%eo%cB;(2QOdK%Ht*uO$rh{$2rNXmsu6l zF&BUTRc!*HA5~HP_?1U74XSSCfw$i0Sk~2|Lzp_(g~`Y9F*8Y~65}%$aZ6cU?+g^g z-rmluQ3p}y(?akH4JF!`CbucoV~M9kHRo<~oi7C*3_Rb`&a zs$7ziD_}q!4%4$Th8j*G+S?X=r^EiA02&(Q+VwjSpsPeIBHMl;*ZDtN?D8 z6X^wO(P*A3j#J4vPBpI*C>^h+>FAfrRR1bADlZp;e6ZE9D?MgXTQ}c*sx;foa;dcCa z#PFJXXNZb3AW!w*|H(;ije(t60?Ejgv6#n+U^JM75Lg_c#G%Re-cau>O1pPP!|PZC zIy$|P8>+i76a)R=B*Usd-LD+4?wN4;_-|tr3rBKR#g`(vdih~P*kc{Ve$>J^VUDDA zIT1{GNh1T3W-?Ay$;e@OnWVSzA8t*M(Lb&2+5wri5=oOZ~fMyf?5 zd~g~LO1??tuMnF%^+2skl^W3x?J*yQscasLo{|e5Sr0)&4EBSzxU&w65m*#XWKiQt ziEx?0iJ)w5e$$)Oe=&6Sb*n=`0hOuATW_ycl%i|3cUr9|_ZtqIUFERQXqUknLO&)> z{a0^O!qW|Tt#@VYLYu<4xlHFnN-0!PIuIDjgLFulj5c630M4*REK6>wxCEFGo~Jzd zNwqVpd$RbFmb9cLElGsGe=mIg^Ff}}GEGa3b5kj8oJfmd*xxd&9NH*qQFyVrbw8!e z`b_6U0vKZS6L&e{bd0Adkg~06pfXg@?wzS);Gl>ua{&m2K4=3}6JvtKfQYheqbP=$ zQHE4w&Wi6)a&b>BC(rVs0Yk6CP+5Rhd@Ju_h*zPe7AUrRXR4~xIg!H8u#oq< z5I5k>204+t-sw)f;NErqbDwke_hMkbK*3%jY52916(_Ry`WuD~v3Ewm$RH=O{hg1Y zLh^g}?RVYz(BmBZvGt>`j&LHj%+%xd%h-KmuT3{o2|J&8@y-VxS((|1vq4V8WEKWu zc@_njMRjR}6UpLR_sWs}IOEDewNB(jN}%IQ6&p1Q@*RNN!)ZcR?)Dp$OsRb{HvMUdLlhW`Cu{>hTB-k8UU zgiz;as%?cD%CUaVsPJ>f&lAZmr1XYh$Zp9DL)-r&XT)%>ii4;`cu*S&q14Qlwr9fS z3#J&{SpYzEJx^Q%t5YW#Yi}_K+})B!y^&@b&F=Ix^iwzy;z1zWOpjN^Oo0(jWJKz8 zX?HeRc`$H;L#A7*MY?xz8V*W+#kJZ16|Qqiz=9^Xb4o`Qca~ml`vE6acZOWctZ6k~ zEww3a1}CC_u!qNt8vPHi=-xn#>%S#76=8&zRFmEf6`qsmmEuH>To1fJRC*ghEiSi| zXHi1aQ`IdmMYuzRE?|y498yZrQf5JIwDh257LW9FW*8GgR%ftLanZ^3p#&DqIQGsH zAv|3jyQC#8X-P{G62f=o)G>l=pU6bOgGJ*fBhcH}I>3f5Ev!XR1J%*DY;M6J<|6@& z6FCvd5C1LvHVD6GG_Zw~tEyU1x#dJyt^~_6o9DhK_s{rOjqa3XfZ z2nIQNf6Y7Z;ZzZsRe}RboYlJf`WtmKy=5bu$Z7uXf5BoR!UI8AnvhXjvJ)INDTZG{yylR9KVUToSon30ij`YlkcWSMb((FC5aZV&7?xcfN+x#?H z#nwGfq&Zj8;IhXU_lcZ{R$JjuCNEma#%`Pw36WRZg0c{8EV_PmsZOAwJw-LApWp=| zA-$xMJk_q?u_X#rPVZtA2lvj@!5kHT(epOjg`J=c3msE95fWSeHZh*aiFm5ih^K5- zXosv#gVS)8}^c3_8tHOwloI?}_OE%f`2yz_ORR_{B<$i+OPt`Zce9wf26Sn~JT|@P>Gr znn+-&pbDC^05vY9(MY}7B*dfDnK{G`)1hSgr^|_yI)brW3Yny(*`wn3-8MZ=B!IzsfHeS)cMHE~r}m|*syf1n6zGC& zMmDf`QCXY>kS2nLJK!obQV53?^J-gH{BAjsaYVX4C*su@5Nmh=eo#;WEskM$u3^;F z0L6)%NG!E~y92>1>A}Koj4Eji=$)M`mcTsXp^z8H1t((qwDOQhY7%wwIb(JABag2y z&Y(zqnl~{c{YM`AH(Q7Q!l@$r&p6%wqy9|Lhl4*H3sRg&co6qEHzdE^u>HALxEaUc z_x@AA{&(P*3HL^z`Q%2n}`HT0`E5ur&+qhZN_PKen%BqxF^%pp5ag0V1gc`6o^v}8A_ z9?r3#I#7ivZa?CWsTGk>Ps#ALjvrfMok4+FE?^fGm}IPT13kX#JP}sexbyK$4ds-# znXxv$t;ytOW}iah4=dTEFi#!GQU`*x{ScbNooeXqb)HD8@#H++YSqX=!Np$aEXrbO_8YuZLCt_%5gP=xGd*MWuw4^01>5z%Jv8%ei z7mJa%4cvFi215ni#^^B63=kv$QYIq14mGEuV{_Xx3S(l}qa-LRoJbGj(dVn+_og_J zwyHg-+ylu%6`ZsJ6RK6kLc&zN5AFoXix5fpiTY>Ha?bxwr~)$-2dCDPG;o9yX%XqX zIFSIeO5rs`%_}*utpEd{m6HekUWJ-kpxDFYP&Y6vI;I6Y`2q%{5!4427sjr=*w8yj zfS3b;^m7m9%}7uagupx4I46QS9Q>jmY)`aK@|=io&D7B$c#z%qJRo13$z>Bw|NeY3 z=I1}yN^(BPTW;GuV>P^qC=Y-1z1{;(ak^#;l0tY`<5CnIlI&DKBke{$!!s{0DQRXE7QrClb9=AK&KI zng&8D>KhZ$ELMiiQv1rhrN@cXMmuwlMkDm8k{c4B5vCyb_>(D79A0*{c>Bn40Bx&V zPJ}x#$ca!2TAF9U_YqDc9C_;F+4OnaGp%>(z2%+Om%S4Rr^(7MMc;l^w1#OOCt`3N zOYU^u?zeco$j%y_vvE!&s=49@dqdxHBGumXDqjoZFohH0iy8utqz|zh0S#m?3kB&n zm$xJU63wq1e~N5$ulh($rJNh;D(ljo7BO8 z!J^2!k$!-dG%89X-}+gl@tnS%Shm^U;9|9GHWwNW+5}R8->LQdSRprjd-faCZ;yqD@Hhx7Lm^8 zM7mdYrsTIk3?&x|LKw*`U_XeOTA>@gu$;vj_WynW(Wl|#=5Zp~Sa<6G{U209wU?cy1`!`4REyqTHB5L(Ajavft1hRO2ilCZx!+r4Kbb;h>fYg__GvO4Sdw&Xa~-VG;tP9zrwB>QNb zFyb%~IUBi(6PbU_&=w+`(KT>1)k zx4gxj2o87l43BNF$e*3$`{vA@=>0Wz-pyT^YbgLT^3|ClcjSrG^H28d*)tns(M?-w zPej8w=2=?2 zNbQg8Ju5G_Cj^Usc)pSgTl~+3Cz6`rUfZAS3?akJ}il8AC&!0b6`{8s%QB*%qL@L7*0cb~01fZ6l2mk;8 z06?jp$mV|4_C&Tf9-esi6rt=o@VJ2|0+6F80ssI201$>@nV!g(U1Ps)8S-U)mv3u3 zRnZg4UkKre2mm}000000NZmsu3_}%(%$zx|G*4t?|DG%7jlQ&c>bJeqzwMi@_+^ie zTKG#}_u6t(b9TSBw}skF%bT;3mTQ9!%pZo~5C&rLCY!kW_Kn3WZ(jSTB_XMDYaD z#8rct324?8tW3@E;wz39$>$+L6eA;Mo|xfO^Wv%1IWxzig`fz52^F}mR98gEQG=U? zU^RFDiw{AfW}m%Ghg#Csp%6GCgo@ya8}HkW?SFGnDa4pO?af2ey_08LL^7g}m7*7i zB_})Z!{7It$nsI+NX^nA zm;665zjymy`FdRV4_P{Ut-K$8N`A=?J*C@6=)v!hVuly`vN)&~$Gk9r*GXmDCe0{e zJ9eh&MY?;k-C40*-;!VQd(m%yPIt^68AT0+Z7lXsFS0-VmV_PAre8z#sU5T6ZX8MC zxZILjQhQ|ClB|{t8z*B5GuVzZ%*^z%W(aIH;D9m9{2}hX!O;i2lAM`IjvpOu-uu0J z)umRs8Wa5q!qO*0!`E)#E7#Tznypr+Q!G`}nQMVS6!C7v5{897Dze}8GFW||1YXw> zHV~!=oA0q43laP05Om`e21#y@Ih2@u^!)YiVe_Ef{(Nu$#k>4;>N4>Ji6`tahJDtg z8k|$DdBs{(tR=B*Sw7B`yn=gbjhy1Z-fa3Z}P@XxP^& z;M56~U`(;yM@NVtBnj9Z3Pi44zLw7w+wIQ5VRQ4d?fdtijgHT${)pr@MPgx*+AGMa z3{oKjfOs9?umQ04!1ebSPEf}X2Vwlqma)ArdUc}0Q@W{%g+x7MBufPB4+aG?}Y8>6X`*<)ujszJFvb{@13yA|r8bZ=VhbWR_ zU|mv_v4h|zIVp*x1o!|)V*f!+Bzhu?kPouRfWu!aBkwhlGni)(=wD&{%jjVHH|Ae8 z-$zb^|6x-&o9A>l{9lp(JpXxqK0Xur?QbG~`olTr{95?q@98(+F~m5(AO5BM=lSsm z{>A)fG!fU?j|YGgO-{1^??i(6Ufs~%R|ULRH=Jfa{x>y|v-JZSaK66h`Oov8dZe9L z{N6d;?NkRO=#D1R*DiH%uxRjtim;4$!eKHpKKtaw+s*BrcCQDT2*?LBv&$-txLCJ~ zlU=+QVf~||(og>J?3)hOdric0nvN`jRTnQaEP+(h3)wfhVzbw4_Kr8UcOJcXH#(gX zWs`UYeCCKAoCZw<@L5Z$l~#j5Mm1C5>sH2ZWvI1G%?zb!%UGi7+#pRwgPCL`Gd!4% zgC^pSD}+jdn?qO*@2?@sTM(^1+Z91fy12m<`@w zv5CbH)PHi1iPRBx!CIUH?^rDKF*#mi7=lGr^$%RT`JlG8xx0UG(CTb%?>~O_Zftrv z6d6HT14a{4iIe%~#P|X=xoA9XPiaKeUbD`k8WlVu4qLQJoI+6^{n({?AHYMH= zdEM4T1j;$}xa^V)*NG-#+M38oB@|m4^5hqda)O6K}t zWQ5YF&o|^zh6oAqkdW0Fq+txN2VvmIk$c{MY z=jf<;-0dB=THRK=+wJzg_~J{j?RK{X6W!@T0Ufa2<8H@(3cwQBLD*W|E_i*qUwi@0 z&KF-akB^VKUDy~PmK}DxEqe#rPaz3pfUaM(zvwl3oz2$K+qLc6Z_876@z#=X~!c){es;dRVu0;>@gepFK# zJ&4Q%8(t8HuPZapjcmcXTBldn)$FFcynP|lxR5$j(g(iuo|5{^o8HitOEmk6X0KhC zPV$2R6!h4dh()l8d7ZbNuSyD%s7M4zP*6~)(o$k7P*5;YkpHf?Fp!xzLDNzws6Z=e zvCjYx=%Yp07(I>mUFT7HZHDcHvkkV34YrLv3k{#jj@^_N-CEQ3@_YQ&J*(p%P9A(O ztv9!rUYx5U*xAwO7@&kf(2xtN1pf8PCxD6pzFz;Cg-reLv;P77XV#EO1rUw&%n`#!bb3jMiHxA%W|x@EW9;5+a6GxVq659;~i&BAWiABx1Gjf~l) zryz4hEC}6T4j0$VWNkmN?~Tybx6+EpdpCFYBA&F0ii$s*$p)TdD-R_6HmQlu4Hk9d z<3Hi^3B-7jh@Em4;hB7o&WqZvZ}_YP&kNSI@{cUN3E(Ra>>T=BR(2Bdrq z`zkrCvvHTy1ksNu{+Qu_ch_1$Wy?+r28Bd^kUalnqK3gDui;$6!#WUE1-0PRrGu|w zD9OO@&Xw-0ce77)XS-fI7@%s-t5=`$&LYeE=h-@i$~q0n#i;Z6A>ZBp-%qu9iRq`w zN6P*jIWrfgrFpD*9DRelC2A#SG1Y1t>B{-IK0vC9I({qk$Z}GlrE_iG^okUhcbTPO zito5(cGdqS9_Y5Q-ne47TU~FbWE}&Pkt{+Jx@d}}kSrv~T3K1ii{?>==!cdtL-09& zO6BX$ubyaS2;8t}k^7?ai*lYs0p?gZ2F1fU6gTulw6oe^u$-TcKx7>*dG+@PdXzAV9QQ{_wD95gtVw;w~NX z1(}r{_g-CVW*ebn2KJ0#GB9=)#et0I0Opf6iRaZh00E2HysZjHV^kQFQ&lLKtDkFd z7%NGdbzr^`8i~h4sx1q#4YvEC_Y%f+7x{>rn_FQ^Dt9y~yG^r?W%&#lSZS^Nw1Mir zVa0hBwq%@If6L?NW?v)fI~7#%)%z8%){H5easSXC?CG_~QLPK`2qj;PnMy8a+(j?D z6onqz1XZ7gin98w$?Su5G$Cq5SJCr5d$Po7`;7og-?Ssh#BITK!lzRX^cj?+Bzw@f;w1?( z@Y}9$t701e3Xw)r#lC%60fVk2YTo0re3PyT%UaX21irBCkAR)D??gM==ma2os6PZz zDDm0il%eNPqK5s^|Ko%y0c;PpdH+6(2^$;Z6pQMh4Z^4oalg5_`5gE6e&m)o(7}%n zw(}=r?;iE;n25IJjlY~uJWQ7;7LeTQ$G_Wq<6SwS5ASkS3(~`8iV>;|iU%Qnzyndg zP4hFL^E2SsN*@9obn1ZvKpNd5&{OC)-Rurj*g@th@B}_@asYB4(K#cM5EI^#QTBZ7 zl|nM%k=)ZzYwa0fYIWKxp!0|tF@yX37aLlW&sn+q5`9Rxl^UaRHf+ez8w{8No}0(W z_l+W!*wEj~a4{%tJ;;OJA>%{!{epSVc&`IKX?ZSO)MEfJQG^o}r;&wsG9>7&usy9b zz8DgLl0Fgzy#4q_Mi~46!+H!Gv@-**yk$Mo{#K!gnLooXkM+Tehe!ZDbO1HK89>AV zLw3^$(j#NS#d|l_g%|{oM{j0M;KA|C#0v&}h8D#Rh|4pYWML=bAd~oD6|#mNif#qv zdtSa)5INs-TNQj;C5q-s{LFPTl=+qki8cigF!ur)H)TymyH7(78#aTB^;W}zlkdc1 zKHT>k$N{3}z(E(kPeYH4frm<1qZY$)2to0ZY+bSE?R+8Ku2{6W1h(2I3V4_}oCP*m zTzqg;mzpUB&cA@%k|RRR*Iu<2u3K_HQMlT9w@mu*8Jztc&Q6?j1Anu0(B6WS%ayd5 zYp!+jNY4aoc78|aC^OORRNHMUr&iJCTkl)K$BK9l zsIo4`fwuY;hZ+a{^_XYBu6*KdY*LDYOo+E3UinO&8@6vk3G^jjuEpyV^x76 zznW=6QUOD_DPGxm5>Z{Rani|EI2yPc4k~TuL<)L>% zzudOMQlrtD&=8fNhc(rXyB#r_v+qq>)5xSCHhbKbxHc2*U_Uz$8>vVEFe-V{U3{-p zFKZkbs#ye}&idGst#zU-#hksGmBBg{=Cbw+HZ$+d} zJ)DLJ%r5kLwR%S`H;SAZj7KlIt^dv%8b{l#NNg@GZ1^e8zBbAB$wZCga|{SE=+B|T zd@{WL^5bf}lVYcW)+!+H*F9?VirCN_n z_nN{~I6f;qJ=FX(c)U$SQ9l{ngKy{vAoYr->r zUBK)yh#rde)gZfPxxsdwjo5A9e-0A4U_p=N^Y|LQ#Y2s=n5$^r>Kc>R?Qd zEl&fhw~{?+i}&&t$mI-cN8wJcmx$_xJ#)_2;ie}8HMmN(Y!B7tQ9y07e= zc8H&7Tf0{{xEnr@QljLeZ$f4W6&?xcf`Sr19N8zH zor_y+53Mtxm$Q$SBAS5-Lsr7@Akz#()s?kNLMe^=)7s#8@QWe#Kw8 zuj#Z+V2EvP9q@|Wp= zqL*m(pNJQI<85^fv1Dy7SyP)nc`%csZ`uX4IyixVL1d7`TH+;K21*L(cT!3@QFfgN z=x}S>5tonj@7u=5RpMA*aT8$kUrbPvzHq3I&Eh!QfB~(W4-Iydr5V4QZ~fe^!p{{B zrw~-+xl;Sz6dL8Ia&Q{pwavbbyA1~Ei7^#FYy9J)0D&e&Ffnn)d5N}^3EZH*nuTo9 zT${2cuMA!^N5Q6QB`)n54E`r}8Lt&S)C?SYxjQkPPlCm%^YblO9J7 z)Q-tOO>-Ufxk9R3PrWmJR2foq6Vd?x!h@er1^-HP4-!(XuT}N|?f8O{to)9)ZO%Gl zVIsby=2k25)<0~EQ1A+02p=loe4=6yD&NKrwoS6gRkAE(*CbEu5^g*h=UgfV?e2f+ zYq3hT)X(zrMi6GTQ-heN>|bGcR~|r-;^PJ9bDf&PvUL>Ju{@msNJkb4}ykMM%G4d*@C| zsb38bSpszE7hCQ9Z4e@ce`!KN^X5%-WH7zA;6l-O(_`~fD*dJ1bSNl(yRoD(B6s={ zX+fYwr1#?$3Yu?uyn0wa*JFsF+sE36u;uzg*D)I%!I|mN{)Nz?;lzzixDSxEkm(1} z?C{&yf*}6@HT`0vRcmBchHfk6dksy^_T}i#@2c(AnQEgLhWCfbqtbX|a$GCa3jF=z zad}uA<{TebdlvGLnF~qpjNHe9s=Q=0Iabvb8@78~ud&NAk}Em|f!?3b<_OmyWFYJC zYYLXkUh-WHt}Y>HLTW<#e2-y2(xK%DGR&lN$s{ps^jY5VZ{1yOv%d&(Jl08e7rK!3 zUJq3wM-V#jQssXjv>*7;VZvLP(obh`KrDOIK-=K-e%iOP*2w(k&Oq`f&lp3{H#LY* zOG_FMiC)cPz^m!<9JgI9P)PvwMv_*EkVLFRu{HX!h0z~(E4sJsG(vtgHk1NzZmG{r zoh~#(pLS&5Dk1d3b~#*GQ(szZR%<$st;72}L7r2mXfLkX{DHD8ZBeZ~_aT;4a@n3> zG^BV_Uv1+4XOJvH#7#-sJ6x*Q{BU)<$qTP9NEtuW>jK||i4?kx-t12?o;&*K*-2by zoA}sMwY~eR5{t~XZ=;2$-XG%nZJqun9fx|R=QIZ5{%;bXIK&3aAVwnV2#LdQ%Bi+& zOEmWrV9DiO_A>_fOq>spv>MSDDjI!A_DXK+4|z%!sY71`b30KyC{mafCHP}VIGe2x zv+;4m@$p*f>JQqcFT2rogq3yvE|+KIZ4Rb29>z_sa!RbM4ES%%Hmg0#Xq_@iN+W3M z)R?Za9!q#0;?7yWNXm(gel$QpF9K!x?P%deMSPS*`$_pv24r>yomeGp<0bX7Lvf+Y zZ0Q0G=7qHbWuR+65LcVrjINJnBE)%O0iN9*dz7@!eAT&m)mZRls7HdRQnR20%QOW;_gnLK$m7;?cbFD1U ztbxC?9B5`u7NyBFCrF7lOc*g7sYzrtWY_c1r09V3h%P0E6Qv;?z9qfsh_us=3#Hz3 z_2zP*L9G&5s5t!bwjR9;?&DgTBhClb5%FGOnEs*zoYGl6kU!c{Yf0K`GysDJhkEMa zeV-P;SHA}=yCcTvN>MA3Nx?CO#)<^i<5F>h7$eGW`%*KL>NLBMHGQHv;nyVqTC-ezR*l}{;h!kFr5dN1})vkBmLPx zL`!JYTrqUa5Zp-zlhjCV4f3h%glTO!YMa`U==f813zCx=P`?V@T3xV^;*fw#qdcdbETis z2qy@E?Ri-QN)7f`1?QMseTjXG%YIE~UtNC)v(g~Ci%qGp!Qf>yD1dUknu>FQsE!ZJHuiINS8r6vPW z%r&5a=&WM-k3~i|gb*&X4|=Q32p4OW_*ZpZ&T+)mujhC5Oc93b$QN+A9wJ})5k}ta zTJ~JKj$8d0Okd4Z2pWjY$UZw3PW4S!=QV%j42r|SebCaRX_Uy}Z2fSioIVYNbMcHo zTzD40>(|(H58)14$@L;eIa#w*>tS-F(iQheWe{npVEWuBx0nD*O;pCPi9w;v*94gE z_wsbxcYd`&B`ar(H4&`}gdc{2=mg=*i0*(_ya#rhag^W*Oiwq`Ant2SE>2_EKd>pGZ;zrC!HzRANgbm71;b56FvK=Ajz;~{A;@~ zRMJ>`MKsb+GDcpU1)RUv+%pnhSs@^=b_+{xzGI!L-dC@+m>4Q8!byVxq}Zj&trMyM zp#Z2>p}h-Z$$e%HW73CJ=)@P-|ui9hqK9IhSF1dob2 ztY@}{rjR5*)3Q%Q7Y@Z0*#AWnbHnOkOt4fsIP(+IM@ptqqbGW^O;y+tOi*ACp%PQT z>akSUa5_K)K&ogzDXh1iHP@SjXt(ts?GPs_h>k{k8Yy*>OSm6^+Qr`F)wurfsRyH0 z#?_FP`}(R@qg*>tPtp3ECc~)^MwuH4M5L+6cKvFJn!E!$`NJ;uD3%BtTgqlgYv{Zv z%YQhx$p3yjnIc+{{Hm@xQ{57&$~%Gp;vm2Po4J+~{T4G@J?DlMqg?@N#nd!LMa2wG z+g4&X*UScr)sAZy&HMf-D2VVT4V=CUEucV#dXH7FhvhPld+_Y)ub_Gx#tscdLjG6! z7siKPLkg*hrLg=j7o`Ln>Z5RHBFbrh-py(h0gd8lpmbrh`Z%v2$R;-aVhu9ZZ`tE;Zq2zX$*^U4g_+aueS z|4^%3Ph)e}_o5Uq=^v$t66}bTONL=ok#*hu@e%GHW^4~5(;MCaE8a61CDZN1(O0DL zd&*c-378E}(^qsfU`KUQeOsBCI%#J33-2`FcrwQ?NL=8l;IUjVORzw&cL%%hh^5G5 zyH~aT(wTpaS*5BVq>B80PkNFm^miid$m9r+CK4&;Q!~G z24c_$QiV&uhvnOA*PMJ`dMifiRJqn27mUoTSC^jSW%|;3wU8;apf(q{G~Ze7w3Z{; zaW+YRxij|Sj#`f9v(t=s!?(xcEB-^8Pr8)tIBwr=%<7d{@>xUc>QMSGG_A9GA;&sK zTw&v7wT&x&MTB-8$0L;;cdq-BokbSQ#f$cB+it!I9Cdx%ShRYB;Ge;M;|9@$t_qZ?0Q(W#M=SfeeJa+xu;f2!{_fi#?? z>dVeKO1I_F9>I^#izHoVG>AG!$dJyxY+jYAyLA8Z^OpYNCa}M!#lV;sZ3=M^NcIdl zmf7g0%wQC;c%YtcD39UuMD9cg#HqEk&L*@?Y;9la`Bkx0{)Ws70SW})HG@H{FxQRf zjrF^4UfFiZY%#V?ydI(d8&CeuRT-M&mj+dn4+(Yj?gKdXe@%_hN*c0 zSP*w0+keUCwL$n4NL+%jQ`rq$eLTa!OB|!n6jCYAVY^7UR9@#2h zzyJ96nzf0d9k)&~XjK4Pf_Ir$vaQllJmsmUZcb+zbZbPuYlVVbWz>rMLn)<2 z>Jvn-cP_Im*S}q-vK273BP^5wXKXPuw#x2CrikwDpDeyRUvx*xd5qO00vEim30dzg zIF}#Zaikl(gpxzvr$hvgEx72nn2(f&(z7CfnFSvWe@HS5d7(|2#L026DYqNWYuDln z+6^;j20(!0C;`F35`K64FR8)6Y>%zcL}8ls~1&P&zG+ z(##nk8>dA4@lu&lI!=jEnjG|iM%d~&5Qwo^w8f|kn#fkxxg}}#xLiN!De^`mO7*ZW z%7j(7UvNEHXjjFvuUKvggZp?`sa;AQtZDe(F%rmSxy}_wQ}46fkY`cB`X@;s?AXyN zm)Ad5g~k4Koo%;rb+zqKY0PVEPq)^SEBeNfS|Ll(c`jv0S243W#+op-#7V$ph4mg8 zo3_^R!7oh?vwWfH@Tx#3OwqtNPH5x>xYB@J>Ac?Wn^Y>fYIMUCxOc?ok)Rk&`-BJ3 zDAQk6kEV*a!K2snoc^Llus0RKw-f&)#kDJGjTDxIUG2# z&gO^LJkJgrJe&3Mp3MrkGl)Jd5GBTPIs{)6zH`Oz0-mYxJFuq0#-9jh4^`9Sa9uWS zE%I_c%hbbqT(5@LQSPD*#uW4JolO(6hvXQL!_v;^q8PVQn>6~Mw}55d z`^`pcSeDZ_N(dVvz^Ob9(pyqKJaO$xzn)Q>g{6WS=lz`?Eo6-oL63{Ohe`?S>~8Al zrvgU|$H)UtyS~Ur*9vFm)=?V!4$X-~K1CH#4sl(@RX?`$R>jo~*fXAi>@*ESb#q~% zS!djGP5{E=PTtl|FkX~VQ$*@<=hNrg%=UTTy=ic(;xRoOOKthB$7MWM*2^dFXC`ZD z()Jp2q~e{3utI^2tL;UbGGFMgK$95NR~M!PY|a#(&GPcO99m~=nQEae+EM7TuP)l` zFzXOvmG<9SoBv*7ayx^!k1J9U=4iTi&uaP2M52{J-rYa{72vwk^wRCc zc`9flFfwE3%C<7XyIy_W^Z-rMoF19M&-VtP>S&h*GqK`pk@*uQ*RCC5Y7`@mGDjN| z6p>tl$q|gI{1^b)+wF#EA^r4TGB--Y?}0b9xUu4M-L8yoPv)>Hk#Qg5dGwtT)jkW9V1g+(0m~>@v!7+1bKKyUKZrps} zPcy$sehncd)L__J6N^gx_A=>~q*kMOuORfv!nk25nvw6l#ru~@Mqd=?BsKvxrpl=A zZ1be`bQTMaGb^|wm28c??*JK^A1I5|Sn=@@K%*D!ly$&0XoncpRX(A4A4jb

DZ?BEVWN^vm%7K_15}x|lbWKSQGQ$JHq+*%5!pO|2EpEM5`^(JsV#w~8j?65cwIjRY%w{; zm18O&Q|TvtRFJ5{LGq5_GXr=NW#Gd4_UY6hUx#KIZl-n)<)%*}NT+Z)l+*7DTA=At}ps`Qya$qOU zqMinN9xK_E#(3y%$UvoeVPPJ$&io*O7@}<6_c4u}L@Mvi1*T=R>N=dR;n@Y)yO5fg z7m5agx$*nQ0t7ZGBY+#opMfn_U1yH^GMQkCS8`Sww(Q0_`xY_v`%5ELVWNv(;nyt@ zb5^!E{md)=Ty)Myh}(nvy&#NYHrTbWVB!y$vN!S48_HvhH~QHE!W_m8MDy^zIt_=w zUU9;PQ#M%zhsy=)w)r-Sbqi-P$@pqS^HzMCV#9vz^+&Wqi_*`IJS9hPeOSuGzHh|7 zY4rhJ@9x*7@O3=Z>wOXzw9Fw{9HQOhZ8dnC%ku@?sr9j>1QNW_OCs~thWlo!{pcdCe9&+wcD^k7fO#0sB>?zCiuka z?&+#}g^jVQ%jzKlqw!IA9ImSFiOSnx9Qm>`#FB@bCP`AYi#uTvxr7*;gxz3!stkX{ z$Y-?Q-8b^ke!_34$z}4DyC$8WJf!Pxe9tL%Mj|FU*BVg$6 z5emWBnG&r%M&_n%4l&?Fd3s@JKk{m}XnmJr@A)n|5cF&*M&!w+KRU0j{}(ZNB1w zM>cfO>ncLI&lQo)x(;-jky771i`R$!^1wq*`-{LcaNgi(9@@ND`2E0dtU54Vt$cd< zei?f@2Pa4U+vb;gf^06!v90r%w_OH)fBrpHk)P)O=6{}y_~*RD>tTuiaWdln1^B-X zR*+7awfXJVj5gu~ByH7vZzh_e1mnm~5rq-8{5gB{x_u92Ido%>ruX>XnZTl?LWS4= zdf#RkYK*VY*|$FFKv3Rsa0j^x%W=>*Ex(1@gefPY0rHp=k<~2aD(|GVR0Qqj+Bj|& zdyqET2zG#WGwUcSXy3Q_Km4-YBh0mM{^9PK^Na(+C2HffU@4*UiFZE zr*XY9-0bQk61%jVq2Mciy?-)wbA9*xB)!^(iIo+@?PHA-LE zry>^G3Dpn^Ln%kbX-2UtdLrccSOH6#MvD=P2A3}7t82z)hwLMT zh0LexbZu~)X+M9HyH1N$(RQOgQs%JEPT;I^QG8Kj+*!$O`?Kt!qO)b0^!aI$nxoQo zcB@~OSB@k@Wu5sFVc2)OBR}Ccpn~Kt#g^5W-gZgBrt1@g(l+P z9}YRGGg9l_bxz1Jcf%NXw9dH1)~3=kyw7K~>MZ-T&9UQ1{CBo%k1dZ9=G(X_9c^&n zd1yEy-T7|cJ1E>fE~D`)1p7FyZ-j!2;^PJYNp~<*{y#W@b7qmxLau<9EVT0FPt5E4 zgbVt!toOs5@)`{K0YE*1wJiUxE^=_5j!tod(z3rLCh3dqEkB@s)_S^R;4t2tiCD0d#^+uA_=d^;EH{rQ&d^!_Sc`S-ERkcyBE7* zx~fcZ0ygv8W<8(5T&_xM%pL~U0;emLp$87>9p{P&!%xE3KhE=*ZImwe`rnY+>nM5H z$j^uA+!Pivkj!s)$fg@iRm}hSE#2W%)k0J(==4XbKJ_OfAc6r9oytlWcp;-ZVB7jG zHnsglb5v(pZVV%j+WeSL;<=F6n%pz@2W;og5zmESBGu<#<}C1WislkGD`8n4 z&t<*pOMJ_mHe&_UzGM1zHFldx!v@KzY`!OP%2?*blG<+C%l6HeeqVB`@h!-CE-*Ol zR_yG61kotpXHp6e+NR2xQ;(TCCfn*AA5M58G^F8FL#`7~_O9lWH|2;`@7GKwDpuM= z=~-OIH+W-FgIqfz7v0V`ZkEFaVr)yDitTConMHiYp4IMvh&va$iYiDpzF*;!IJZo&hIiir zidWc-=~4a^8|x=Fiqfi8hhn#J2*u1CX3;jf?X2iER7}WmA_*?KnV!**y^G4!;k3#ABEu$nF&~ z2l)O?GAW!G2e-JPx4lfm#Q5oXGGH|fy3PC1`c`+IDQ9YmR*xioZZ{M^SU+p&@-F1h z0DQ;Anf!73iW9gX5vOybZV#N|VdtPm2zd2tRJynBc$K=**y=5AroP|cBZ>M<7W$8& z8cd0tP|J)tgZ{lRmsHrg4fTycY15)lEZ_l6NJkEa8@(X(uN!Mu` zb%#-=q)>_M8<|4_zMlW3USSk%fn*F+3dRrBG2tL#pM-17iC7bI&z;3~6eg z+x}W6bU!~~E1;8djk%oRw*78{jl!#U^!-hL*X-$_bow8=@ryBMiHdY?!`n$|G`;19 zJ5pc*E*`o8b+Lj1e$OE;h!LQ{2B63XOnY~|HSy?043kq9TpK%oDQEKi0q@|V+B+3L z(Fu}tkV5qe0aEIn1Y(V8yZ@p%cvmS^n_NdFp&>ppYkFom%^`ZB6*T6R&#~1J+eRU& zsN%QyE}50sGz+g7w}o4}K$szg4h!E4jVD?E+FI38Z@ zp{G7uek1Wc5O2ShIa=hG*>y9@MFzBp+Y@XuFG51`n6O=b+wK|UEQ3B?;2@R2zk9HA7(t%?NIaInGt zf<;w!w-qdn|F3ZDNq51sk5<4q2@MKHLpJhOf(iT_0+SMC0O3y{yVYoC2z9JxT*LX# zfh?RHj5tw|kEPOIH9uNaynUi(T%@1!9RsCQ3gQ!|GcbhM?7%eiII@? zb^4L${@%^Q{9lDB9ShPr9{0_zqL_0DW=v%-z-4Wc6k3nd|4^EG+@FG<+7azNczR^x1f+usg zbg@0dfMRqw4jb?mtRy+qxw^TWbAac$Ddtcy3~7Ca2?@5IFGIyarH<=r+>R2(5}K{T z*FDif7EVoPG71-etkNRl(d$LD%D<9_wA!RAEfgNluDBFEq+~=6{#Z+V6ZLhl$yQ2s zOiU`Qt&cu}9r0AoB@l@BtPTxYrrF-21<^%)naU>EjXGNSA|k?J5crK{C4B4JA?)B6 zijz;k`1S4)CMP7jW!$ng$yR9=yZ}xWDhCAp7V%n>68)>1Ee-CubW}Tsw5~A?ML86y@i; zDor>UWX9H^$e?m)B0e`^NIz{ky`JZw^Vw>?_h;w;+huK%et0U(#cf_qFTT zhj*s7C1u5306Ww*T+jp;!2OhuT~j?MNQD><{BdcRCT_4v?>fo7_zQL}6+(`PL#U6% z8?e}(Qhw~3sG9jIIfZ9}KXG^HC^nhN&an`swD?;sl6KL0{KPJ`DLe+FDRpj~UL-V< z*dR-{T!!^kC$(>;?Q4K7*1z%xt+Ci_vIbN!rAQ-+eHTZ4rOQ4<5F- zdfOeU0d=VKwMxDnIokcgC{@KU9+lG0h>~D*;lb6Mq0Mt-8MHjsAlIvWtvWY#r7>~5 z14D7KbY+{nccBKJUw|0!4l?+O%F>?&rFFjiv}J-fT$&4VgZkq630Wj}90^1<1x@*x zAy|w?b+k>y^LON1)I4i#i9$YQb+>U(*~#BoRWbB#qh_NgO2?sZ6a-SDl+bx9E$_`s z-V6zo!N&9bATnZO%mZP%y`$xQ0ukCdTjWNuE*<4E*YCUmIX;$R^mlpo5rg!ow~0Y! zoJw+}TpJ%zt)MjKGT07c{Ip_zc?+@!i->X%eobK|4o&6zeU#sW0KTbF|IyivSA3=h z=hG3_9hcj-jcjp!{QlChKer~2kWOfnVJ<6E(7t=?(^t}V_wG~rPZ(=AHA#@1N(PsD zXY`vVCz}9DH9YmvAx>+FHg{UAD~RbrDc9Z+77T#Zpgi5mil2*P3hLh{`rg(M_kd~9 z(z}7Z9r7j=g`O?#M>7{yTvRcwRQTa7MWi{!S6WrZ8Xcu_>#|;?W?_y2Jn}%{LCqHI z2#aqq(-*?@qcYaUGx$HKReLwkMUDi`EhxSTP_M_n`3Xm$JS9esE?rTeKK3Gx?c695$|wSRh#N1S?^PSPqu&=~Fl@`V+ca zQ;iUV*DWj>GAQEP1&zXcQevDZ|Cc8N9%~Dn&c^fP&C*LnbKU6{WmABQ+~}c@&o!cw zt-mHayH;_77&^Ly4FGFj8Z31J+4D@E1|Ip(#>OPnxD0K~l&48bOn;5~cHjB(JdxGu z$$Wa~j@?18VAfu<)9=O4_kQuEqDLB1;OuL#xJqhqZaK?@NNftL z`NUAkCi&5rpG7b=Gtkj(bUt4SJ)io&v??4E&_r*k!z0l@g_=>_CF@UDxRn#U+?~E) zXwiHC+4(+X8GfOLY<5{Dhl@R;2IHt?)f?@8HF-u7aHO%@X%?wR?#IW`KlzuTV0ZdI z8b-c6Meh|4hKGm4N9sI^XT9!r7a%VsF=SZOMS)h^v_{^^r#F84{=MV6-}9KS| zaPM(k2LmHEn_Jx99P)(|6kHI?cM_9OfDER~%OZ#H+ZcwK$%Gdt4oA95}-{!-AIt=%h$1jjyrpUUIyn?*8 zB>s8r5*pHUc+uK0_kb||5EtN z`+-~SEw!AQ-hSr`-`@0U4YH^SLY^(kc#opEPOdRKH-<7%Fa;(K4ni(GO_vrTHD=LA zOp4F4EbqU{bZIH=r}WnB&YpE%`a<9O9oz!SdE2=xz&W2X*PaRm%>=wN>Sy)6Pp2Mg zXKQD2;_p$kwY-c@w>wYo4`^R@aI+MTI07#IP=uh1fRaE16WUrUR|dEh^E}Qclhq~z z7pL7Ee8cH4It!y_8TLL?1ARCKKNL@>7qKV5-(U9R=5=(i__!sE`L$rR+A_(&%UJk{y9ZpbGo8#fz)Eo!q!EA%1(iyTCZ3 zyvYNCw37MzD`vbyd&*md92W8tJn+XN$KgOn#?bMmRgOk}&zooje=mKtOL-CFnncAP ztagxl;2PaU+PAD3Q+cG(l-(+#T35TM(0yW0mQ`sF>CwN+2{ESPout}a&}a~Fx2vSNITE$W7R|S`hp%HX z+p$?}^W4RVoa^8`t9SbTaD03Fwu)mrJSk;pr>#EJ&Bd#)S|UCVzUB6 za9P=F@ZSFpNP!pV@w-MXfs0*$tm5HhMkBeCs_EqqK8=WH**`k2T}fwfSNOT&rY6sm zr82M@K7@&*Hp_`l!DAfPL#%R5gJWUiJk_vllL>0EOrN!{uNv~09hzhaSGSE$bdzkX(im8zLe4p& zpc5gN1Y33W*?FkC^A#+z-(x%djIA+dhVFs|Asllk&J;9*rA#xm%^ePW)u_Ml_h-0P zC`VFkzXZ7Hv|s+)@#i8==}{tc%v#k&;IC}85=5ld8Rbg(~uh%3=ec^-iQKu z?Pm7aknp&r&0#KI;_k-jaB6k`aW9(x#+Z(yAa^o?!Ua8K>up~JPa~K4Q$PGimWi?; zt7{wB!je$Ee!G-hTa`o&Yl;=o;Q*b@p=ivib|=<#cdr2u8QU1ykX%V;sQT9mEwE?q z3T{7q4LeIECPo!6nLknS(UC@FbcFEwF@aabL=HLen&U1u6YWqnB-O?)>~-e)EK}6B zaf?uhP5|=AWs^0H(C{-yHSG3`pE&SG7UXzrb4Q1!YHi3mTxVBAB^9-asu--hZ4I23 zzeneBY-?Q4A831P86kC1&(0I8Esj8k6~ZPB?H&~~|4k^%$~u*6w^Lg!sj2s-ax@l!N%sd`RV@6U0sJG_SjyS;rA%Ua`o(U)PugxDP4kt~a zqUsdqpH$jXvq3OAU@|NXi-%(V!j7pvDaZma4lZBhjO`mrl7?s4GkjE>4aXJiM=Nc zlR5ua(Z*1SP7XYNW<5xNp*6;|nv7q5Pw~&Mrt?e;dlp6$@qIP?v*#`|5_S1ejIw=& z;Ju~o`SA6_u;S!eTIAv}vrC0$ZEAP!B&*w9Q$9oU1k^~c-D8u>^_LGwvhOtVJJ@(s z;vLqem|Jf5q>Wz4^BPVW^9UHPr151CO~=P_yotOJrz8nR_z;gPQ4D4!}f5(hrVPWhrrRs9@CT)hKNF@ZZ$L_Aa@7r=cMLSjYFz!F1n--{~;5AiOZxTf09rn#GBLmiT7AfkeG)v zBaDHMRlg`EpBMGwYWF1`Y4w!%j5bu&e7vB_&>ArrCRB;%(;cIQBxVZSUYO8^d~mb1 zknxY+bq|$V=9*nq$^=$RIJ7G(Eo55uJ)l=VgWxfj19z%EHQ86k$zRG=ZB%nP6zgAq zw4P909N(okriVR({QOriTZTn{$OaiDGo*=lWcla{ zaYRw&HmYBrhCT0fe7Amrg#kZU%asSWwL849l6hH9cKys4HT&n41KZ_j>bgQVCpYpYd3c>IYPxs+hw7uT@o z4r>6L02K7$_;cqf^<8{cg!Wwn+LqAHs&}_iYUh9`uA95Xwh9ua4a;3 z{dq(|(eRP)d+?!8kxtFV^E$9#B`J4A?y%O%+BOqI*U5m@J~haMf-9a6byTfTFhf4BaWKOzNHy z&?rMlOYW)2)%e6ybEb;;)O2nV2Z&r_LhzT*AGlVj{RbuCKU5>I_u#?wWPahN$474! z>G$#YPahfDBs5uUDd9{kaAsv1EIbY z+AmPcS^;b)JfYp(h0i!VhBi&3fX~mG2IBcVykDgU-P-V73_l06>yfJTX!$L;d8LEH zkUJ57bqyGIL_Zw00skjfsL$mUo~r11&VfYCKKZfjaqC%Q1<^w{_a{(%n9{fr{O-(`Mrr5 z4V&y-2b<>?;_&YRdSF z-)S>OiOQ&J-DYg)AlCfeDb1n!?gv>%P7H^ndeDloGDp^=?P93emfY|cwXu&`0dKs1 zw9dcLg`c~r7r#D=>S9<^o$i)ox1_ahb-7BRpQ!PS2H{()4A>!zG;t@d^<8CIC8$Oe z9-L)F-jqXKu(Mlp3@vFI{qf%We)#gv1F&rO(u1%eGl`Wh|5rl)g0r`mWi{)0XRr&2 z2uIs@d|H{D@^eXW?@Y2<-t%+dGq_!)j)W)0+GY1X*GU~Ik)G=uAz*x&V7d_;SouVF z1pf#OyZm8gp-?Wmilo21m;T~Cmvb7>`LY75yg~POH={>d1bi!$5E_2j_;0dCI`2J6+gZoeQqf-CBDxXkv5h?Kh>k+{C&$jnp;Q!0x zfAR1N93}LBApO6*_CJvRH!S~~U?J!mU`Y^s3Gb)b9ALqI{k#U*41gi12 zA#!a*LkhQO{M+&I48Gmb)cDxgPi{w=&lv*VH{*J(64BC!mZ~M2lahr|(bX46cd~RH z+(Mo}T*uvW_LTyBuJv}lXuo^Qhs-9f4>f?Jc0<$hvl6BtWnbGz2GT z(BQ${LU4D2ySqcsV8Pu&u*TipT^o0IcfFO&%sXr5lY9U5_i7eYQPp+Msa<uqOk2o}@+qMVE~|&sUi#M|A3Yk-&>L zSuR+1y>KFY4ke06d;Exp$+T7CoMW^xjc&JG2laKj`h--2J1fVaW$7mT21xI z(z;QE=MyLNktcM3@Z7~$gMxhP{3mk%U#=AM?82Gg3LkpzpmF{3)y6FEO$@*LatyK3 zFlDq8Ouxx1O=Ved$^ucTULZ_CtF{$tI@F6bKWz@kd z>{EGVy8wQx*4*I@Q(>DSJKGU^Cx?6rVq)A@199odmv44TC~cvWXyQh~VHGAWVc)3T17rbN z-t89bCg7?C%kmug@U0ueXERJW;g^Opgi|clZGW(dNVC#q>5ieCh=yQO)NTRmGM$Fj z&2AX@V^bPMpBDFxYhqQJCfi49rpWu0(I*R&!DXdpRG5E+wA|LREnT=UuijEQ+DV)Z zDa|y^4*eOZoq$vM z^`rTO;yz+9KrqO+*$>PxTNB}io}r2gJ&;R0nCi`TyGgf-Bt8a)+lEC)l0y_gNOiCx z94}fC*+8^_*equal4HRR=Hua2=7M%S!f~t*4xDo~s*#>Y`_wP=E8g2vNFfmS!`~?k z)iod@YH+jmiO-D5GWtU*x)sB|wbEVjWGE#7jH|zmD4>bWid|7#2F~(0I63Eem61im zQ@3uPZWBv1f+xC%$2+9Jq@KZ$$$;Ilo*)m+9ZK3B#PDW6ST`ydGO{k(d=2VDvunW57leQBRQTCDv&j7x1jX7X zEXvNG4k~&+Fi5_M9Riq+xiM516>wSsBx3y5>|l+9w;TT#F4SL|oW*O~Z&}(+p~>y~ z$WLgw{L3~<+A*DW{{sWr2a~;f1`y6DW^!CWYK2ZdCWqL}8oOta(4c*}Wcii*xkXe> z4qrhW15@%KyZAwGcm5zhY65NAj}CI;@Xf+LB7)%Yq%eaaBTq%>K08(yRpM$PHqqPy zatZ9`Bj*0Mwd#T5V^;Lsd?%Y-uKkdbYG|Bu@BxYzx2Drx1JM&+4a4Zx#G1zn@p+2iv4&>6(2LPb+Rl2XmS*B7zEs!zywifmZT?;9E*Z#qEu) z3KJK-@s*g5DNk>6qttlqFMf8#Fx8kcNX!)Ct_Gh}fU7zsQRY?d4JZ0uj)xiVqKKP_ zG*$D2*v8`;rLvmka)S(SYf@>lzIbLE+Vr=s292f;0L)yK4`ZNCJb#D0WCzU`z05=h zz>TeRl>VTjI_0PC5rq{R5x-SbB`Fn{rhbHT@2YTEhl4DM5=1|#MZ!Mf2W9i%rAqgk zE-*6ljd% z=Q#UuIT-H;Y_^6>-eYu2f7Xx|E{fiEW;O&TRZG4@b6a0L=F#U;kS3rU+cub=ss(s9 z`4-V#EBtpsPV-;T-7hWk{9!wxARRyFEiMCg@k{f&UUJeTui(f z2M*%>jZ>a3b$Evz(aSWGyg`7K2h5Z<%ncJhP1Kq;F1@3}#}I(UdQeOHgMIvyT9E() zmK-pUYM?)%%KyIk2gu-X{P}OH=AZZn_`H8IH~;=5%KxICP#x_s255rxjp)#mn|=m&uJ~Wp!|x#I$T&;b3~r$9TTq z@KMeI?xY}z;hMU{bolg*sn6NR3-;mPqnbNjv@BWndN`QbJ+EeGba?0>OatV!o3%&M zIbsOVyJJ*s8@sB6#nk35cME70G2h{xKmk*^Uc}8d4GyCxE0YKLpb!8M#Y*4C;|GRf z>w&biYnz_7G}XSI#}|{6l5t@#v(ZCb)ITURR~igeF=`_Pu2rj7!b>~aCe4Hdjd zd;kH_sze!WUWPf*gO?xTE6g5)J$4}uh0?}yB0}i3AJZ%+pdNEamu&&v0MM+eDC-fw z#rBbT(W=7*>v+Kq8-2m!p~51n&LG-fc0d{P^1)`h ztdt?ds%a=u)x?&v;>_TPn|S=_`-`40)q3Y&tASfQj|=fsdgq8??|i0#Opo4)J*V&~ zG(t7%%0UyF;rQ*6rthw?o2EA?L2Tllxekz$H^&#b?HP@7}rjxF35Eb5b+PqY80st>aJU{#dC*Yva zRyJxD2YkHZOF`nU4QOZEF(2B=4Rt>GwpF#NHndUGF<){}5)-bl?xLO9C9@rK{Sa6g z$LE$6A4D8&Ou>xoGdibRH?)N%*XzhnN_l~4tF(cRTYy+C)Bb~Ln6k7m!i_&T*ML~) zjr=bfnn_46uV=y?2y_taBduU#o!1hwQiVy9gY;yp^4r7gEg`%YdM!ld$ zohRZ8+f<(Y0=WB=NTij2P94hR<|g6xRn@ zN8*e8`V3+FeX!7?@0%$VzrrVjT?ScXhTEByha1Lv8m^eiYtVxgSmbXkb#XN+6wdSJ zCdyy0DgO#dGJSHa*X4*7-#HQG>KSqgJ)hAhpHcddNyyp(Vy$sd0PCA+nl6^?O;$QV z

  • ~kXcnfxB=m_;sx0J-jNE4hJk*KvjrNJ#+K09qXzZRX?fT%BG_Wg2{3@NJ!L^p z#`gr*Il5M`8}sZ4)60+IZw(vrwShS0Pmp|1wtVlIt?t({9x{7ymZ|i>g3}QXf68yX zRW)SHc!yNtXG=KSZ=`Get}I=%Oo6k|R7Vag_lVs#^uqQ4R9&Uq8_c5d5Y(GxDx@nB zuc{ID3AI%6v|ix@@4l|nTqY0lofCWkPASp;yefBiGP`R3t^Hh~OSMplkd15~znWpR zc3~5xVzeQ})^VvJy+(Huke_LuU7J3d`+LiyRb10WzN1*SXyVkM@bl|!*`2N40{ zrd1d4J%+IP6Br^!z6+CZj5lKYT67|MrKcVD1-+qa^oZUqnk8&OUp$@`qsPd+o{-!p z4FCO?1PQ!WPaFM-;vK3|g>TERlR=V94}}9m;^&dsYwvp|lwLlJhqk;sS|%$rje;|d zg^(${AFH5UX&<$;osRSB`C*EsY3I@bv_2c2(wVoa^!BukJQQ8E!=kC$N6~=1Ru+?G zxzrq9)-ou3bw5|qdvl6O2G<{fj@!tZ2n^hAd9f>k;?2Gug8nvqV{A_lkz5H#>uA@I|k2rW)rrc=^#rRs@_5#Wwr3@M`765fCfyw{}9po-(b?Z?_X20zW2@oR*iTIRn0M6wak6q*htyinoeJG&lg$*f?7_iN0twL4y zN3^YGeYehQ_%T8gJxg)s!p3I4!iz&Kji;p8LhO@g!FeE$il;9ufRag`VwnuK4gjQp z)`&M{NwwDhkfR}Ml z&Ix^h5AIXqp*QlsTzy(M$`0hIY6rsl86#W^>Ru1~U3a*2$lucB0UWGhL)Bd@AkK*F_Pz{Cj$eJpAe2HfS`Fk$MJ1&QA$w8u=lt30~xiuii z#6m3wyA&hBHlzcT!*aEG8^V(B2*4SHNNy|W8D9Y;As7Xr+h6cGnXnQw7O#_oxz?aZ zt3UXCk91{+AR)C(&_j@kwaJoNPGA0@yGw(=5Bnwcw3U2d8(nT(!oqd)rM}nO;fjgS z!sFdTzdoiQ++*(9YBR#8EtPo*6d4|X7G#JrjGmxiRTSk!4Q!Ze#3ttI{G5{56l6Ob zl#pg;P|M>u@VUcL^dlBM+~Jm3KQFJ@X7Q^)0DSfJ*|BtzDlf$5@-}+fz)+?~vUp9- z*e}f-RNFRWElRo%c0nT4Z7EoTUQGilWSqO;kNTU!`zA6tGq-Ni0xIhT*o46Fw8rxt zifii#1U{ z!r>j?;1@QEQk?__dia?Vs78kj-MIzfF6%V&E)}C!0hFf+W_Xg+6;E`!l ze^QfuU0J?rTL`MpQA-B9R@>dEXZBA+F~XSxR16^7v|s zvA>?DTo$Rv`^GWFKxFuUFH^N`Ij5c~5%B9NaH?tb>eqz*Wn$*6%pw31LWGpmTzNHp`sAa?B*&@R)l zRwVwsvJz^(>Bu6f<8+T_is3JD1pJX>TtKcp{ z2A74n#2Qz>IEH)zqp_bn%ZRpa4y(mU=MGn)DeljD`$|c7w3B&lC?1FNo;b1XFGINA zn*v`Jx8996K4=B?(@d5N*!~PX>#Jk(#AoIL+qU<4+)Ys1Rv%&oC`!6)oS7pXMvV`D z4#=&k4ApUFBC2kz*$qt8qjsve%-J)h5IAT(y%@S1sgqgGGKIRYOU1NZoO!rbkR%Cp zg-5FIXR!eRgr4uVWz={?V}QETAHG&)~5`Js z1UydG^e|j(XLdyrS#`^9z7*+sO{IW9N`2$5W8=vBrnfS~Dk&nR_kb5(F!iP$Mzo@w zHv`I2FffiSCom>@C}%h-_IAQ>h(z=gr*T58jDpT4dz8+%Q~^W5d0He5Kfw1tAp7c~ zn_*B4!Oj1p^JfVOwVSKiEo|qY0`)*nkT{I|unjG5o9fJ&mApeGdgj6xp7l*{ge%4k z2nDtoZmLpcWYBU-he zOYKHhTTxZ>4- z>;80`RPflA1^%a*Sp^MqX7$&hcT7<8(4TD_&8M9KfomURd;)vl-YlZz3LY zJtC)PU?pzPy$big=e{Jd;;)oH`XhQ~1A$Ve!YJ$6QNtwzw|s7uvEWII z#e?->MnGV-Ma5(wfrQ^DbT99K9;stjC4Rx0>3c|cQ;h~>IQEfB3uFgP_@)q)a)Y3k zKzN$^*VI5ByT$gydCur(%XgYlCut4GbudRU4UE6_*8d({`wLb53&=H4{z-NHi+uY3 zLWKQ?HTx_0_E)^`GND60hmDrVxleysRpGSjHZ*aTzI4d%rC3*9Z*lva zq_8`#*F=#h#eALZ<5kV_GRaIGP;GAXOcccX)Ib8n66C7RnV((v%;~6B8O9dQIwD7; z@Ojleu1`nI0rJ$XN)Z`p2w-ArYH#^*hpq@YzSZuJJ0FYx%BS?)JozN#*r`i*4J&Dt1@>ng+P~aPQB$=i%{}< zXqdoBB9Q5&Ym5j~w0K^B)|oY31F{8Cr&*ZiMGtNOco*E~v5T^qLX+GWZ!aOH8=b`| z4Si^bPs6RHs%gaCIA!lOK6%|SeVBTYUp1X{fZ*pfh;Da#XeS2BGFo;5nU#6qVjH?+)#$c-5!U* zV?8#ezfuMWAq|CEe{?i?JS_q8id9_SYp{18Lu)?)$Tb{~QN`+(j16b zdd=1$;+t6#AhkN3{L#{n>Dd72oesP#g_|mQ?w0y-Qa}F~^Xxhl8>8y!iMGM(DLLFI zq^$GbI|%Z6@;W|vxt_5>&FXU0rTm5>;PDl^D-xJxMV2CsKYNnpLkstnn-Xh5$({(V z{=R8>R^^oLJUDT4w%Wpf4-@2N`%Kv4x`pEQv56(mbNim>E=&7i7;F53l_Gu%S_5!lBc@}ubzW-_}uh?X}#LDyLsPz z4ex;2J~mfGup6QCiyysw&y5nreQvixPnv-n{`zX(3pH(q+We}55CRl%K9-9q z!aK3sujlcN5~Qx2F5Q?3wt2pa91-9>NRZ!)frLpln}Xwtk|;M3#d88(ze{sGB1>qw zT}@GsQK?k&HFxv&Q=rQf(P;4| z8w;W8K&EIqo^ctSrPzyg1}241-O=a^t+5f>k5edLPzc7KjA}f7xTEM2)!C%rRY)TV zaCZ`%&k*$E%%-Udbv!RQ9HVvUs$$IZw*WWoWqU$~0FLZP=w$;VVuT*EN+ zGK)xJ0y+y!ybW5saU?gCv#v=gifSXFY)ZLC;Q5LonI?g%EaO(BBDjy-0j63Vmb~W= z1JEY#y$f>oLiR7oa{9t9+K{a1MrI-(-EmNO$5PGfcdx`I};eJu-{(Y9qS2 z1rzJDX}{$kdW3(ULFzigb?zfnY+3C=#z&-4D4(_pdauX(HyWP!)8A7E*L#|tF(_(hpABDQzVylV=aR_aKt&6w2cETT?WPLTnZjbv zkq&9H&ux^h6Vt?f(os>;z^`pmM$>Y4p2`f4>t0}S)~#Q1r?M=+8MbA}85~%!d0x*kIjJ>-OWaGuEpG$$R`av5 zJYir+sx#6iB8YwPSD*)++?R06`&=1wAx z|EO=rPMlO6-kJvo)}8pFazTZfcs7+WhlQI=I6J88y2(?1$vpcX@v;29HuIlqr$*z` z6zw*}Qx$)dpMQ0aZEF>);(3Lrd8OLoilAVJe)>~9l|2Rv1am1qM^#BESkQ@x`yfSO(DG0V!?3i*d|rPoUztKyEpuqi;ITbn2&V&ToYh*~6W?!} zG_m_kZ3>wpp6^SgjZ%4(To*TAc)dpvQ`~`u+^p(vs5v2BD)xpFR-!hcKNRl=+Zpb%t zZpo{dsfwB1J$q-RJgrREM=JxtyyA%}3@#rV2f{2xyo{u*ktWQv z#hQxgep|4Fu7wel!=PRs7J`_Ptr<>S<#c~eUvwOk0AE5xyoSdFT3&#DA`4unxPKzF zLv<3oK&rKr&+2%Am7`J;HM`A_O9>o)bS_9d-J$b{zWzaQ**$eTF$oSv@JO9sk`DVk zI5+icn{<=Q>f{N2_%K@TyG_?3f9um^R=L|1$EIFndJkKvM7Z%Zr`@W%+!T*8BNE!v zG>W|H7wWr2q5_Mb}qC2); zL-?n34Z}=qlv_JK7oFGbd!LZ09rK(HuJS+z1zgfgs-&9W?&*F7q0iqfb{fP`?S_WW zw4khzL2g^|dCKo$S{e|^)TN`jvoC2J>pAe#FhR<|O4gs#%Rqr0JPIwNZQySGKU0PE z;l5}u2fxnNzM=jM8^!|*gP*%=9usb7!xj|6X~ zwR3tK(|r~Kw4W1uKmI}r36&lTB_ST_;Hs7zJMIMLWThm%;B+_}4-T9;A;P?FaGm`C*|rZx+}sb{ z-9_{pnfkc{iuXm<7IS!x-6v=Qb^KFj)aMuxC-1-+GuCnQirYe@i4p=e^*us?Z$Ay7 zypk8{y9^oBT=@ENps|x+&{ShL}@v?pipj)TWDplyeQl{m;Cy^+`xXO2 z9^~in-EbZ)R0YpX5xKXk!-%d1{* z%yKkUuSli8eO1bNN62R7eCKjW=o(}CtD^CUr+;pNx8ZL3o)8B+vDsA)>K_f4{!4$R ze^e!+Mw#u%pY$gE@A^Fb z=jgxBgLshs$N2yL&wrT-{IC8}|0%!x`>#I=JpV5BY>^!^2EMOEd2!ZS?+Xpgs#M3L zVFxDr%#UB4tPJcd6xEwzIk^q?H4O?<#Z`_Gu5pj*^{c_C5JHlm?0Wm=qPjX|nSMQ-}Pp9-MPb2ilmL z{im5}!}4Ha-7XjShN3~$WJ(h*Ea~{rW4KA06b&8(1NJrLcPReR-DTchvHAXN;E8FQ zR&Q6u%ohIMxKT>{*D(&-zNW{KV+GO{h1?}D%|{PP&JnAqUNj(N=sn@^`sfdjU*-5B zzSe!iiF`K4tO;33uP9=Ccllk66iopUxPo9!`18qX$B6}c^B0R}NfPqI#Ms-EB)jCY zXJ3+mQ;G1RcI8Cy&WKRe7a0=gx7<(p4xd`ewG>lv2k-W67NQI55D-AZ?;e8)p4{gx3J0)Gv zXSP++*2w*`Q_%IuxhlkV2v~2>%vL!pc=r4aTpTg2Nb5S_i6eVAJ%1c($|R}rS*2wqe+|#SuYN2WBr3jPEz~_ppLz+cFU9D zSF~klf8RZAD*nrkHxVbLS2IKM4`Ih;malIcJ0wnxj#88{a-&%Hx63vkq>yf#sRKER zkId$J9>Md5vM1gl4etWvrtF7Y|oBbF3S{XlO#nyks`8Z=R9KhCsWsn;c zhi6$MZU^RUB$1#StEMwPUQ@>KLu$o4?gy!Pu^@bVwoc> zSE*Z6DlZ6yP`mlKd)2a|U7n@Yhs~wy?d7Q{b!m}c9-Z+9EKR=m=`NM>7mwl6_x}00 zCfZD@f>nyPR$S#pTq!+r!|)o?9V+CPpQ?h@qy0B7I%Z$U=AWyy6K_e#(xL^w_$9kl z0a`E2tazJ3cAFhD_8gX--OaXq{0%+0$GHot2{AtEJ-dIi+B0!o8Js3oAVJvdjdYS* zO?;Dg7E{?u$rX6PyAC!?6(|faJ+Gy6*4OmR5MUvzjq{=^nKJ8rlus1mZEGbIP&9@s@nfpD)0sDBMjsT5-c#%xWs?LT*ktoI^ z%)DHNt4zl-oe0j99cGXvTIqm5m`Ng4?nl% z@lgiY4Y%H5^Vlh@cLZuE1}?L1!WB?8dN-5w%qVO#L~D;RiT2wW9X0?0^v4Ra)HEk{ zupCbfG>k#_C?_u_Jgih@-K54}DW-u-JNlQLSB667_RQ6{Wr{`%s0bH|X!{@E6D}`v>Y3|wEDEht` zre`QV5V{;bbv`U$Aw)nvdE!+HBw#-GpB$dQYzy!Mej{JOX7>Kr{>YG41CG!a8xq)g zi&)j(B+@UBU(EO9J`_kK@~dq?i^=U;s$svpX5;i88`-wtYZ5?NbL`A;R^K?wxpF=_ z%k>A5hh2E>o4^2|R*EAy(vp*d3noA+m zbVGUA47bpwmL{_)HmGHCl zdNWiUCY$k~vLUGxkVVRik;cQlkRnB=DJFG4RB#%q47?s-I@EKV_eyd zzf>;`rTTn}K7D1q)9M?%p^fSEDN+%~FWKasq=OsnSYJiSLlLGP6~Lpx_GWN3@uYnd zP8Jst^i*ukM9qYP{WbKw3F-5W(Vz2JGwbV;(byR)3{_pu)1biFRYh_Q%R#KKN=S6& zmytnMvi*Bx2+0dOK^u*5_x64tifTnLl};>#=EZu#@{z zpfUR=_AbZM+U~%T7%Tu*2O^KDDpaiPC1m`#6rp?4yZde|E zRHPZs7SGo>shy#LR=sy^*?XJqqkUl9l~s!##A5~-O%+)bI9A=5B*HW;{{FKgRRPm9 zwGyJlTA!A_rP}b{OHXs9O z;<_#pcWUBjAq@>qXr-t$-NR$Hk9n7rPJ_PPp^+k^H1UnSF{%QA3e5Pk6_Ur2r)|*q z#{>&=k>}dpQyC1iK39$7Yt-Z%_)~6~MJBF-((1$Nm=(`DNJ5wPJzYw%23)yFUVfRiT zqw8d8Otn0wg}4_Xg{JPCiEG{zo}%RB1tN|3KHGZb*jzZ1YStb4dsSg4;h~?(MCc&N zJlL>u1ZqWG4aKD&-(ZK?yEu5*@396XK91wH&sA%kd=-?Kh3QbM7tPz%ddkGW$I#;; zXPA(CLD`$ATyu6;&7);y4ZG_yAUFC&3NFfZNDBrWn#%Q1c%bo4H!9HuNg)e&ZhY8a zu%&wZjj%nR@n>;K#Avw;Oh}5L9FCsc4V`8_mZVZNWF|x^>*|+#5fy3}nn3qr;1IkA~4YN^mB`1H$xUc*9>W-ZS^aSQEz?ytDa_;?dSK zD(X9?h&>vk1H$jk z+#8cpZ*@VUxH|PEWFaV2F3sceF^U}zB-ze6HXy=rm=badu7%-ID{= z9z~BUXHdO1`$i)zbJCCNP3Bygz5zBpDLu{rBCHAmLtywK*St@Gx)w{lbNR*t+=?pRb{&@` zDzy5n{~nG$wTx!KxN-qY^QS3N8Wp?3K1NGDJWMJClZ^Tw4@WYqzV}f&F?JA4RN}Sy zZ|qbX_q$S%(N;A1!_Zfb=1BN1w-}+;_oHE64&&q_ysZ#05Vul6tKaFzMF361pS*Y! z_6*OQ3zrkSQGX>5{UfviinrsXqN;(@YMb*X)!ql~pH55J(=^1Q$QlbVq+UC_@mVAa zGK)cx%Bw;`20x3GnWVCoT@%QI3Lk`!$r7XzCECb9dWzyX89J=$VI*adwxg3DZ*vP?;$q8HU<&#PpM409+K$*`ujMH{0TU3jtxv+?ZD%@e@ZApL!c)}h2p0>Kvyg4$6 zQ!TlYjXBd*z@eXRJ_Wq+&BAq-n$%Wk)``^v_yU8hH{}Byt~>iiJ$M>m)caZO?kh~Qvwz3&{B z030&u~s_nC4_tMTAwz{oob2MR~<{g<@j1!Fc?ZJaXh)T{9 zp-3n!TE2$c9&bt=UAYlT;C1kQ91O2C(k^2hHcBh*rDJ@z&%17#&+Xf6lH-2w5h3{I zi9hNGgFBMX?&mV-ysf!pwV%4yZDuJR35V276iTL0pK5hCb+e#fapL4c=N-Kw%Ptw< zM&T7C3AK8h7JJ>t*Rg9!F9V8i6)Pc{zyTHYQ?yAc%M5|@KhaqEno2N2VRR-I2Ka#t zO1`Sm#l(rYt%jk@8Xvq-V51`7WBJa+!{_UMaC26i1ZJf-X8zb5m*x}aqVgIoL7?^} zzaC$ZLC2oDQ@!Pyc*H`Iq;5!kcN{^hgaM*}A)DCJrps+mmG7QN=mLg8{4%pxodd@= zoJUi>Ez7k&_BU|Cund#zyvF=eoVO5qhSTh-)t%_{jb*}laq^i^G?aQpe=liKg9Xmg z?&4B6=cbxmMtuQ`XKq0td}Tv6JXjbVDf0O z#*v}=c>mC(iY6rSiGjA^N3+GR@02{@WN4dGj*vbPCyLAGOPtfw7zW)ADCg+E1V3i- zS=Ejz-&bCg5W`A5=4gF?4=!nSCSUc)Y>-Yh``@IGvPZCyYz?#EE z;L>j|-OpQDRZgq|DHxPuxZiJZ(y2f;Zb4L?zdF~0IrEl5yejG)v|20Car{6p19E`r z+T?eJo~9j}52&*HjIS|F{dmbC?`k2rT6^h?_@Nj>j=xzpc*VlPV$>g~d}mdmQWd_@ z%#Z3wXftJ3l97z0o&4gGXSP^FIf8KK;N$2T7O_`vyP!kEwuW;OZnj5YTvUb`O|BrF z_jDO`@g@Ze2`o`59$ozs{JtdfE`rZ2?{ZkS)eiOj;?(jxZ;M#c{)3m!6V2h`5fAsX)(zY=7c7m* z@%1F&nGg{Evz7Y)NOfJ_K>R;aUBBD}OYz?=+W!k(26#BX_x-!!+5g=7OqAQ|x<_~l zzV+}a$n}$h^ruM^EqX>7h#VJM+3b1>iKkCwbin5LmD(9GIJIxyt*_@~|AkJU=#p0I zhBzo$Ef;N~@va6I`^?KVXr0b?{M-K8`lA+V7mB`q7BBLy}>5RgWcA>APT z?RlQ}eZTR8A2(yyeXjqh|2gNn<6r8kQB$x{;Njs>Yp6drz{4ZHgNH|OMMi-83-dBf zF&<$EhsN_KMgjQS^KXJ&_3y;|T)LfNsB@q9L)ZsDGUS+F53g(x=N`Jrhc9uw|GHT4 zn{5AV{yK{(v471KPxK}XZ_(rIkOyytniZ^EFu5m`GXnME^UBxeHpq0h}gn*ed{-qn1R| z`}50L)#Qdxn^nb{YSDoiy5Xz2c!nl1|A=-2$2(P?RI(|=^4?j1A`P_8EJ?MuuzTj<=tQxdp}p_oX*gAwi6ZXlZ!`Yk91Lysvo zD1QlLPJB_ZMy17{uHQ4VeRSS#U5j2z1Iy%$m{*&W7$_+!E^MC#69Ms}SpQe(r4;;| zw;66ikd{sr2$KOCBF8bTDRaFG)L7FkR-XDK4ZoQZzy zuBs^oHY?PvV$I(KvGvxq#h|>s3gUbKP!6!Y zQN$wxMzQXPkeHtCu7bQc$O->)NzXVyp-?v5Hx!x=o0rx>ci}5W3$@s?wQsFJL873=i{I zigJ6umo~g##QJ9(Y5vyc^Se3}fuG`|GVi`#B*yT{KR6s1;y9BUPPU1I(m+XI`qKb^ zCCs=NGhqH0&4$H;>dzhnX^|;z2R{@scft8ipU44RV3cC*<17;awj*T0wJ4*!MEA*z zjh=uVjp(A!q>jzSM3!H+NXD|qCRXG=0(aWtLEBXas@ux~?Gk8)72kJC;*(uS-M zrc`%4yE`BA`l66Fbdb|jl;a+hggVPj5DfLGLnSMnHmltHe1Chf-gSGp-o+8Jf1mf+ z6~<(tztR4;B$I|op;GYaOHNVss~cYEAD;J$QhUNPYFE$ix{HH5*Ykt_L^lv%TBn*iFY|8p6qaw7 z%p%Uy3$A`EqOOi7^}7D{OEj7}ILp(jTP@68$mzygZ*Ac2GU2oY}ymoaK=7 zwX9!sIlU$KVSxmg1+)*H2#xtDt=DwWZ>^(zuVEp9+*duUj=8VL=5-2YKEzK>44% z6@`_WZIoyX|9+7fGxJVY>BOj75lMhFyvLpGM1Wb(^Uv>s%r|4?Ny?GrhC!gGIbLJ{Q_YIVUZOXh~PQds0?qk$>4k!+x|9B187?N>s0_OH1{ zKCXNiFCho$)zdLJdL58WE;6xC-iqM7iGda#vfj2C+(oO z@CZ10&tLoH@j^CcsLa~q8qk>1nI)s2;m)7VgiTYhpXOnFV>ZqOvI|rSriZ3F{*!J)1LzKTHN_3j} zmGx*b^n||drIzK@(zVKfhT;K_{*2K;hsdn(l z_a2q!@1fre>1Qo?+KpHe-R6 zS5*nA=pFUW{b&hwZL#*Vjy=Bf6;c(l{gjW$(MMZ2#|ocmTVnh(581fun_DKg`MKy` z@V2B7ySyWVlCD(y;VU}uuVh4J;f~0KZzca>VH)h2eq6A;)x}CEg`z49>+>A7koA#o zh~zGqoL=C9%}7;;fD4xV1{6&-)!J@P_+6`+;+$Kb=Q!~BVA_6?=o_nA3#j{y(6U$8 zwHToDa<@%V%lQy6Kh!XexONIL|K4^=B}R;q*5`W;utmuf7jpb9_!U{af4|AKre$D; zv8*P{inU|JOu?}rRE8&wXe_YDe}}2_u2Por>kK{q`@8`Kg88-yXH400E^BQ((&%ZU zx?@)%5ub&!&yDY5L3UG_-^l)0Q-d=n0=yHc@F7^7lIw&s#d#l5HI)_I#Va=fv%%X_wSH!PytgR*=u` zc*sf@bl_At&vh%Gr%erKyU>ZgNYiw&R>3FA_neny>_jkuY{C6z_6*K?j{78h)Gz3M zSW?0`U8%hZVOzB@WDNY=DqN%_5iYoH8v{RiPWZ&|a8w2!8QIxxFbRM0|IC!g0go}X+Y1BDJqQ}_wwU*@qjwv`2>84u5} zN7`nl;Co{IYxdSEbX}bu-q0ba%-CN^T#{3zqduzFNVSR&q>fuMFMZ6nd{MDeF*Ze} z6Th-^=m_(X$RxFRr%zyyv;w#i!@>_LZKfrre-IqLf-%n|#wP^mtRzN;fiAFTt%gf2OOHZ52h#)x0 zY$#(7B!Y6>ztWiUYL*5;KRK8hCv}p52y=!y>=Z9ajAG0W*dPIGtbqv2(37c6e(EK` ziSN~$+&cW6Mrj7-7b^(S0ZeBdiCC=)n8sYDjpIkroiu)GPhd;tH>AmHiLDl($jXFp zr$Z^;_ruJrmpcesA!LBFwu)7z<_4B2A1hj)ebrh?>tnm4_lC8Y;_gK5c%COat0AV9 z-a2qCgMnZ`U$f+=@#EprJ$5AC(>tk|WVt-Dc(x0i@9%{k>$9=fBo=@!*@~pT@q*e;f~w@H|;8&FuR%%QZ2FT* zlYGI*HEPw|3rY+v<+b6$AzPrqrBch1<}REq;g1g%m#uxm+&n^0Np&P^j`tf|l!`WGSH}4^h?Q^nd}M0Dq1WdV_Qj6)FDN9ZGH@KEYw## zD1yKnE3)w7Qgy+FI3wz^ri4O>0A-+XJ z#i33Tj1Qe}VgK9q1E6DI$frL#vsCz)Mzr0;#^?7;vn-R8U|Esu@ogyMy#(kkpFLi)7+modPD!pG1y*%YRld%s-xNdE)OF5<@SU!?XXYLcfm{C*`GmmmWmk@Zr6S!QKn@QL7;8%dma%V$c7@rJ#BfWI_{`jI zd!FTsk2JA&dio`k$#qp33>_R^)A`VM$zAx@Nhzsa-eotC5te!w{bZ62>i2^Hh2ap& zg@0BopujfM#2crh0+P;rTQoU;!#o@CF|VJ;Hdlkyl-nq)+~*rrgabSnr)g=2AL~pkyqU_ zi~#oEh$mS*Vbn?()juiUnrx&V_>`wrzl#l&2Sbe?quo;95n_xSxK#EGQI<|{R??Yo zr^&&m+t+*HH|(Dki2!JHuk*`&;jLs8fXu3yq3==QvyhzMA0A)Te2RKUXNBS@@J>$@ z-hV1tv6J>tz9*O(lW5wa+mEIq!sJIIeXwj$BFJ=?dy53KkBd_>>||<1)z6%sS105< zeu47k{g;zst?^2}(z-*H-!P^@xo`!~7PcsT=JwRE#i#EU4Euz10xox&fY$=3tEV61 zv)${w6M8;)%71-{kNum;+DZmieZ=5S2=KlS$#NTizt&q*I?|lAnDn~5A0a>nQePamPl7e?UI-WlX`5*wGjXf zJrs0|00C+daQWl|M(Ox>%Xmm3acr_7>w$q30}~!LML+YKWxv4#%m?2ifXd17PRr)^&^Kae#>Y7R18+h7Vtb`-5ED1OpZ!O(1s3zTcJTLr$DK?AGo$+u9X8!xI~vWmQI zpJ`#BfY-#bL=XZi=pPQ}_u}AEJ;3$;-NTOxNZ;3Fze)4Pp^_FmGo+eApzJTNlO4ih*xreEyVP zYVS9a7mXKKFD)I!D_SmG(*r#r+~AtlRI(sh{RzeX#u$j+!*7^??Sx5HYV(>DH=~>| z-y(#si&A!HLJ4PmZpt#@)1z>j?Rrus}D(jJ)VTVBLZC zeSb-d^d5>;hr3SAPzS`2NZrxK2u3@(OGbiDwS5P^vI~mT5eVsooc4E{ zy%^Z|DgE=@&A~BlrDOHETCf`I)3DUu-bXNalRXMC#$@_R{HJ@ZgRf*SdQ!46##H%; z;OzjRFq)%cwxiTz^$Mw~Pi{5ddE9-l)wp5m?c8UDIAOMS*a0W?y>Hf%3BbghhNTwT z9$O*a*}RP9fk;h@&vwG+jzzrhW@r2LFuy*~e@;mO$MYUPjOsv`l49anvFtC&0&?$v ze572@`elw76Cfd;_*S}>7BK-6N*`EHD04dyKRFvVkmO@Ae6Bdn27T-JI`!+$0v?nr z9N>~!@l%9X;VJlBGCZ`Z4q#>zCWMr-(9*v zH5;~UnWWk??qN@D^8P6BLn9rZ>UH7){I%lN;X){B&eaqlQ}6DTK4;gnFk);;pI&`G z(YJ1Wel;vU^kWS^=8-oZV7_07;5^Dp{}j)gQIB#387tGw_{A#tO@^WQvH7t4MsYgi zeXgqdmosyGEY~;7yyY>b8z89`Wg3QNN~Ue%yTcOQgU8>$@i=O{q7ogYXf&cn{iw>7 z`vd~8KzIn_eFN7X7pKXaB*_}a^YT;LlfCzOrIsM~ssl3{mZ=>0I*`J{nhccm(!K9R zTHAb^){sI)2#11(GdrO7nPHQcT4D+Bdtxs0!xhqF&yovzZYCaHGiuXgz*#pi9AWWx z|8h4RmgmuT3)pgUOAHJb;rr2Dj5%&HNR$Nj9ODDS7*TE?HU?ynSD%SBKjJwexf}7k z10!@|BNK-{l`$iRgTQ`{(Po5Uz+VnNex^uTrcwl{3gM?&r@WUL6uyMH_cRIA^GD{k z)2dyG^eZ95Y9u;eN6eGwQJ@d z7N@z3M!5;tiE7ei1E|K$9x~LoW$giiH%VnwW;@=_Y)ONsMps_a=E-tV*0R7SC4U>Z z3{TpTd5464gD(moHrV@FNWWwGxioxLXzsjxV5KhEqq%s8up&qYk)H5!Erg6}$Mz0X zxAP_XvqUulfByalV@t?aUUu+%cQc{C*>3|cNao#^Z3j-ckB*nA9)-uY5Pn3D%N2`7 zlaj3ZQO*tUuY^+j(R(vG;ZN_i;CuZVHWf^jN*^wm*~lWAXavVEWl@AyPrZwseOUjU zi`F7?vTNaHI_DS1Z(r(NGlP#Db-$64kEYLq&V2ran(fn9X)_3hKfT$bgDyWBu#Bsf zBS@J>JhIk`G?T7A@@fvVv96w;5#*R~t(@YzuhtQhVRzN=HcKox(?1R1sKq;+%N{Va zbO$R%ZL3WF{o`aiCw-$-mzAIoWtI#*^a+hpxvP2DkI$2SCsaDcgrCcqB;i3^uBb3% z)XR^dubdd@MpaVP>)7`wW<{d*ys!h9C$Qjw{j?6qDEDYxFFAgF%+pn63xMIjMy2}x@Z#{HzQP~3UjW*hC9)-?FTeMux)+U+IGaa~VaI9w#*bKYMx(!_Rv^+lJ- z9$sbMUu0;FP%H4ojR)cLNAc9~OGdAIvH*WVZ#Q|!g(>EIvCUt@p)3C}?-3|f3G+Tt zd#Ka6FSD&DS6wFb?iSaO3GBrmxlhD)Z_J|^v0?u7W4cekwLdSnA0PO8(`El;{ashP zlG5n_ssl)1l9`~JpIL9ZT%Ox9!P|lZP1F^77M8^&%F@pbBgM+vb0sP;w|;eN$q;{9 zZ@nR1-kb+OY77}La-8hw=L6T0>ATS&NQE;4ly2Ga6F{P0g-!x8JMV*+bHvgclgqqm zI|>#0YgF8xB+P>$DpGg&3a?u`2K>SZNf1iIB-3;f60`-~eKu?xd)jZ}1Thaf$8dri zJY@pWg(q;dwsRmNTe|Fd6&jOd3@xzZlqH9uhV_8-=qIo`2UV8wUJl1no5P^jXViKqS-ho)rX7&r}g=Y&SZLay-C%hpRB`Ep?zjY(E6> zU^s_#*}dK1>N1Sj$`=o2<$#RPw{Ru^&+I&bK8z815X%1vkQi`3krAuzN4|t$&57Db3xB-O|Ix1ru?noICj^ThU_TTE;sGEV$fXZpIi6j3eA=SCI=L2GNQI^OMBdS zE&f@+p{@OYHlv^8$e8;~v1JPr0a%}Z)}_qMIiw0(?g(bK<O z$BWstcoEw|*B3S>X|C!2*~@I$9*`~?)JF;8NX_$fJ^C|r9W5?pKS%~7-G$JE7+Q~x z*EX(|y?EOqf}z7617^(2$&NOT0bGg!FKwCrU19Zq_aZg$<=5D+_x9{S$eGtDhk4HA z5Y-;#KcC#NjWL1=N%+$OM|1CC$-IMb7n8l);}W=A+h$gW7sTV#Y(4l1Fg7QY@r)Vn zn34S&`$MWP^DC|kZ-`*M=;vBndybgx&80ZHeqe~8gFcvLfE(IEkqoKZ&bIk;GJ=>- z01?dUrS^ZOat3&2;d3k(o9q3j(B>Pq(Sc@90F3^SHDq7bvk(mXg^7nOxB6C|0rW~C z1yI@}%y{iB1trb2fΜc&{ddJw8g1YR|$$)M2#5Xm~_0gkUmJR9DEfc{r)petXk! z*8#hD3#5i24a@d8@l-}R@m6m4iwJ;tE2pQ^*#evkxYwga#8xZIj?v^p8IkIQ~bNr{8y4^78Ud zPEIm17zu!n!6bNBSQHgroZWez$liDK>;13}ONMA-tQPZoR}*uS-YQZZX8}O9OCyTo z*Q^UnttgckDgu|4k(r*JzNd5(04AxRJ+E6%!L7j!AjD(H;9=|nzIWeyk^Vz6qZ;E@ z7Pyb8DjBVd^?QiP%oG!L<+ZqkiU2BY8&Oq3v)DRDm@`a;I8V3iB;UH$KPbpNJl&&d zKB#wMV!};^2e%?_k6LAVaFmI<^&^u4R?1x=~`A%QK{{+Zd(*zN@>>?1vV|&Ke?_PUT<`5 zX?Ega(koD|?wUV1czyXSIXRgOqe6st2ZoQ=v^&rNEUf-4`p9IciW+5yk5onIY>IyA z)13>N{%%({-Z?wKhUH+fW7`FdMBV$*5-Z`HQ0x{X(&$S(T8oFL;s;lc_ihCJn z-87gOTe!ibZt!8}3*oodGnU>XQWSR`#hr?)Kh1bEOU%7-QKG0~TrWLSu(_LSi;?-A9f0lVUnHt5@ANJGvSwUF|oA!*=w^uf2+&PMX zOB#wx3C4`zlqV#tkcJ${C6|%7+D7IUWE%!a-?kQa6AyO#OivBmz+J*1IkCUTCF(iA zqa;{~@N;xdl}6*3c2?%$oNKL^n~bac&EH>Wm14)_-Ll(-vRp6C#Q}Lp2zkup#)}H*13#363XENjCFE zRQbKiR^^i%8g(pD*G8B|#=u+7i?yofUdFHR#a-g&KXUlF z?4k#z6bsKACEE9&Cf_ll#5DS|yUB?+uMp(pMrVMgAI*$RvednJ7(O&=j)q0VFK3(i zXd$f7t9>ogVqxNG)qwY`LssZn+p?9*k~5UID5ExX$F-6GHi@ocg4Ps|Qp4iMCTj(O z7G)2kcW4b!iaNPUh+%FQ3UDJrsQ7+SNNKYJBd%^qjD(Ydtw(Shdqah?oSrGM;14-0 z*q2a34u#*4at!k-D8P$s8%wER@7@%#LEqTpd*`Kbf=eN)=$KI$q&7+kgPNk5nC2oc z`j{+`-4FEnqc64;yw?$F>q>Z~ll(?c-d~g4F9Oc{&txRFYl@6`h)vEW!x;4UB(UOGo=fq6hsaOUpdlXvL(qL9vE9q4G};aABaA? zs?WB!`q*PZa3`&ce}vnPm3xyu97^COzT38^Jh|JWE_w9b;m^yzW(9^lB|Sl_@dE#J za%o67%^xJ%4UJ#Mjc=x<>~`RWnLfsOWV6$ojhSCDT{kDS84;vHe6CD4@7azIXKFVN z|F&NpqJvsnQy06DV+r0vn5smu>it0B92~W}mNcB|A1$ zuU*MmzA8nLbUj~tCwGjfYU_ylK5&|8@&U4henzWggj#4!&8IL#ZCCjBsWhlJFc`=f z$H=%Jux@$6yt5#czpd@k6OD&3gn*n=E8i*Og1>zNGV^XoEC0o*dV!mz6H?`UeAnw( zPCuB|MebkNdamy@$TMg%NBMGg%w?kp6>_mr2E>G+EtU}H*c_F86rn@R$9y;Bhzvo1 zX3${5GPhR`xjpW=#eh?tnBS;+JEc}bBi=6u}yZYG`{Cc1oO2N6H^;<1GQ)) zMl>+F&aim$m~x{C+@+4GdIa3~RCpYK{^?wnr#drJ=k`*THI~nxO z(17zT^g`?DH|P+yyK%YJsx7mOpHrGgw#ZJ$%KMIL+QpOamR&DirhcAkrR^Z(U4fVV z!e_aI9iq9DuPG;+jZyl$@@95wXqjWw$FinmGD)W%HFb4Q*vNn`v2&;|xy#zKTqEOY zYMf1&^bpuPgO>Z-*%LaC_}@(2ZYdwyT%Ra;=g@0NzsIoC0sU5;!^mF-t}^BSZ zM=db0xnZYYZy-9Ph#wrZmcBjUwnBDV(4B)&+n|uQbW2`%;e0V|!b)GL#r#(}ER-JY zl?ePw=m&p{VRnv+R?5>0NtTn-7eUezxo-AjIm(@(`y9p$(}L-okr^_2 z8xmkubUECb&{e;MZ#2472r28RoQX0nHdGRhGtwkY`##V|b; zhgzn}sM6WOmFU>th|6wt<>xRYT2m0 zC6OACZ-=vv1&b42oLhXcKP~LkSBBJc^w^uFhZ@C20I?GquvJG&s@_5y&ezUC;C$s# zvj?!yQ?tEFAbKd6M}!DN&owOEVR9#`WIn!b86Q~BwYxlSpF`l8c<&OAXUOi?A40qpc#N}OO&KR#yvGTJObpb z9+;E3VRNRMiVu^%O>w!PKDy%pL0bj5Me-Z=9)5dega+^62I*#~tGSuZ>4;BJ`cjvy z3N%(6Y7@?ly*7%BT`9sbY&|*#%YW)yHagZp|0PL%{;V7Z#r5!b@o%?y$Xi_EFkpk; z3O%t;%QHmyrm60PmZif%J z2m2c|9DaMWECXc&rkC9V*Zk(wcI6O%dXsNJ@HX?q_eKtux1_Jn{U4Yc{q4+Q4kWWRXU;;wgckpz^t=992k=bmG&IQnAna5rGEm z&LY~p(4Rz<#sdl&(+@mz8r8RI?TU3Y-XGMA^dHv-k4oVo!)UWRe{t@8?(EUCn`8di zuxb7Hs6I3Ia?bJ_c=4^$^Pixxx8>jC?JC^@8~ zaTGvm@QN?MvOf0~%5(6Wo4hEzas(4SEYCNOQzNTeL<>wl7={nzLk4c_g`FupbHN2| zz&kXFKN}SIs5BZf5{V{;s)zh7j14(Yv~jxa!MqB>ViumEiKHS~3lyYy0yoRcnqM=7 zBdI-}MF}yWA9)pY9}vut&~RP$+kls6Hj+B;2v4ETU7 z%_^9%KM|)}op(kl3!i;>#>b7?1`6Zinj`)sxs#~ za=~qTotME&NBIO;-K*f;7DxOeoZ1j{4v*LT*u6Pg=E5u!J{ZeLNoAJ z2a1zOLw!gbP}&5T515A-nro@vhkiNcuu%tEr0?dMD{T2Fxhb@pq=Gz4S81b9FLb*I z2G0-n@X7Mz%#ccfdpKu|aBm?E8j%y~RH$|#d|=(no?4#M}| z=KgNI1DbrdSyd(b^V9Ubeg)vM%>W9Kd zEX5=pmn1Z}L5H8NJtE~%B+i&oUk&gxwREyY%7kKoOcPqYLe7xJAfufWW=|?&{(5sy zvBT)W`8FJ^RA1#$UvGKc@Z-1cDP8hsUcD!;&3p`L4nAz(11C#M@iJMc0FYPsiio0+ zK9y|Tsf8o@H@|gr(<{`L&{PLQlo#{Mw=x%JW8v3G0mbI%P6W3hPQRFC0<{5Q`5io@ z2b@PC%%Q9iLh9d9?#7)3t23VYEL8l0s9Hn5_ybiYKAH)p(3IrPJ;g*%E(iavI$#3( z_N?n6tNVgj`HQe1*QKb#gD?f>$@77Zy`E9lAs3Rx>v3&=uYW5w z4})CGvha*NzZ>?GVVO2UOrC_+njz-AiXlEmJ~oWbsz%-z4q>Mz2Tzl-wdr7-V1$^` zha$un@#}3zm43{;TV0E{@%m7J=5R<)lHWFsBXQH;&m`rw;%ckT&DFJhU!O2_&C31M zdamG*OLwH``)DqoBg!#zqpZ~<&-tP~3&R0(6;2|xtAv9Bt~kjUD7Dk?J3Vv>N5^(4 zM*XNOXkyg6opAzJv&5-oQM=C~Ho2dQzD-bOk&tZUBZUdQy+1B^+WYY8NZs- zi1WTlrD?XCGzpZml&iCV@ELD-AyTc5a%_7h2><$U@z18wo$;)F{9h^<_2dzcQ`}`V z%Ly@NBDv+gBSJbo$9Ud;)ptXEHtv;6{(|>TV zr>eP;AY9+}cpyoPF?X=e{1gG<1dq~L zga%M4d0rzV_)?w})&epDKI*!;*b-vEhq`_H@OrCg+pERW!4Sq|QK?QWJR&*Y1B)KX z=GMs7{^`U2QO_0==Zb4Rti7)WKr!gyGmwv<;G5q z9d?B;DNBo9>jX_FQo;B-Uz`(uLW1T@O-Ice2|tt7e9=M^-Un{FAQD!)I*%!Ntent3 zafjjYkM<|_bSI)JEm27UeG+DjiQW6%Q+#niVW_H$C^P#(blE82zix)({ zcSL7W)A=6sZ3ZzITi#&@3-mDK&)OCwg?Tx(xM;7DVk=Vck?1vwh?d&~k*$fx>I0Ch zO5DLqJ@$-IZnZ)kn8jmdgN5YkzuBvY&Hhvdh~)T9pYHZ&GYW%>{T5#?y#|T zz-Mw;KZdC{>7nOZvUb4E&$N-dsvsDpwurH)Kx2fwO>O&e8X*$*b@W0c%jR_7CoOMb z%?m7UOb(d&WGS8{LTwHea@@X?(IDI_MO@b_pB{#jJJ3N*e74=sF)C9`O9tyO(kq(f z_VhCtO9Z1_@ACuuwQ#_!#Iq{hyubt3wtvP8hgD1~aj@j`DeyY>LXnEGLWmdWLX}1$ zpuOz?7dx0q%-*q4bXM~sS{-+E$#d*OD+EXj#3X6?eRD3MCv|h-fCcM_lR~r5YhLnz zHk@1*Ffo%^7b7UEIgAm7p`UpE6JwD5menmYn$;W{uhl4uX@=ZIwM*Q!o^9^w$SE%> z;z0oHt@%3ynV|oy6FIwv6Nm;Qj>GO;bk@8g<&kp{AZ9iRBdX_^7R`EuNWypxF^RDQ z(yd-dV$5LpP*Dsli`h6VI-Zv|$MmF*h0yTiid@9NH+SQ1tu~Y@Zdyq^+cu zz-A*rjSerQ3I=g!TL!BDP^ZA#o9jLRnvWg2loS;Rldk??fm7nYm~Vz|sH%;6kzgEB z2)*40*SrF$kQ62|JV?IlPuxaKVmyp^IFlg6XkHMDm5I;dA$wpBwgY_Zl)!(GTT7z` zqIoR~4jAXZ_R)jTD6W5Q#3qo_lVqcAJ*)l=Et+Zvd*Tr2;7Sh#!H996K7&MM zGWKE=ao`_(y{8-^$AqIl-kORaU=_Ne#7Q z4C>u9xRpJPl{ZC7&@B zyt*`Z9eOe5F?TgJ?4WDc^Zn1*H1`8J0n31h*z7mkN zygf{m_}%5vdb4-CUXtZSg`>$aZwT!p1WmK{DOeslrRcW*XWkV?yJ?HwI4jF48=RKX z=7m$>zyIWbcZaZA{1F&A5&HTh5*}|m&?suIoJ5f2Hqz?`Lae2UQ38(uy%|ak0B^DL z(`D;!$0M8YbCnyluLJMuCEiB|F0}`CosW33@DCgF>Q0C?Ev%xp|3?-7`qNsY!vQkW zT*n?JO$e++j8f!e;EYNcP7WUEp#oZDy7_S54L@P(rAm%QRk1(~AXy%^;v@KNR47sC zJt>H~iX+ZL|MPWlSW{tPR|f}7L=fltsHzhFR81sgOYClal`HjM)>er320je58c`)W z%LPG*&R({i771#}p#Fu-7U!|XR2T;^bl?K_l!#@k&jzmd;63l&M14;Y_LV;A64%~p z@K}7*(7l`|lS_D-dr+c{E_@d2Wfzdexgr!TJj>N){{z?4H`G%izCU?%xQ3>NF^ic$ z|D~|oY=8KBh-8eCNwgWq!RrbC(!Vrh|JNVINwI`GJJ&n^ndi0bJt%~Ma{?i#LA-Vy zr6ly$Tk}3VE7j@Ttn}4As3g)7QHUIYf8et~;cPtHq(UZg76yMhWY-+|W8SkXbgIJm zd87&~{$jl#JYRs?g^>46Cj$$}yvC}{&+uz0?ksfp3M$e_7cNE2OA>P~2h*_^)mZ0yS^{a4Fw|Mcn8AroDu}&{UA~JeM>|jTQEKoc2&Nv zwt3t0$Ko)-(Q3cWxRm{OZ!q-lcF31uIYjy^sYaWa3`)RAfL*r*OJvbKme3aKNvZ?9 z9L;>Q70W!XUQ9ZbE+g^yoU*w4LUZaxm`E1kKMKW#XWY*g%@o$mqU^zTx9IQfRRHXJ zVC1lECN7*wN>IYqGOC?Rz>EUOEq?{jTRU8!@{* zKvl^ap!WWr&PS| z`jBFpZINOW|7DK~rvK#05N!gmGpgPE5H0%SfDcT2a|C~kha`Z}p{QQLqgks6GuG1H z(Mp);;Uh&cc))s`Q|nLEdVR*#n2@0~>p}-YsKG1S^4N58EfSyp`lMQ_uB0k9VD|4V zyJqKe+;b7X40ZT+SnYJ_M*y0XuoKV%>WfzIj}J4oHp`rVySDu%af#TCxR92OcmIdf zdA$KH53C8FAJS#=A%!ivMKRg2AXHVc4URK08iEjo4)81oTUE^uV&*0j5+SkD)eH|h zqCv2qh-?C=D$!#`u`?zBVT~wVd3ykx?H+>SKKrVdojOTr=5G>1Ep?U!Mql6p0rYv? zhja70N-qS203y#=*knC&AK|w!#{HMUaWMUBl@%T6*wWRs{d(kasj5fN1wkR|g$lHrT?q}^6# zgr<8Ow93E_T=Nl^YuX_5R2UP$Ji=ihQ%lC+7-5S%w|{1V02B@Ku@CXFHyTVxIN>11 zgNhS1Y@}IPo(SG~-J?$}-$4Z{8cXw|WS2U%*7t$;2fp*m0J2T(6RrJ!5qGz@bFUlB z_@WyoOwzwS8h+zm*`8>@L>yoy-HuM9orsazdC{YpsKY*NMrBs&Tvras(A28Z!IuYQ z+29n-IG5=@6(P@%;WcEE;6?$pz@_&u@>F0|uc3iK?EwiJNDtGOFfcquWFi1LT8$6j4z}1ru`J#iwZiBYN^;m6(OCj$5@uX# zhPeeYWB!ll;?k+*3ojgpt|P~-2i*;DA^(Riq02I)D3QX3m0o6d0QQc$&LCB&RM^lU zE{CQ)`f|XU#)!ASdf)7NG_lOSlU<|_a#t{gJy*0@{{1$7Q84Y(Xak~d~NUgeX zF2U&xn_L|mcG0DFTd@51q?pyUk`hLGn;wqv9HOUa;8iMS@Y?UvX4M_+;$5Na>k2U% zE07y|J{Bxr7+!;Kz6KJa)il=aXN+?E%%I7{^z<}AM02b=;=p#SrC48FTrv3kL!YmB z!Iow7W7n#`x`%uZ6|uWdy3ONiogTHvopw-S7TU(eY@QaFRQ%&fXg)=n4ur3cJ|A(h z%qs*juz>t+c|=JBTq{2UgmII^1xZ;hw|M$9K|*k zB*mVoToD;{ULKU+u8!Sfv+0_PY+0pGaO#77e!Q(u@aS-v8fAG0fICTDw-*5-*u%Ah zEZ3V44?yFKB(oX4o*$)}Xch$b8-ClyZVDYe&Xagg23MdGK4;P|_Uz^%B-sMR4zO=|XVsUK0;vZD#W_B?R!`166U}#d}Qvd;Np}R_M?V z=-Th0GZ{#B4T{_ITek5sRC^3dmzMbKw;xU?1$mFA0**!=@1bu`m+NUm zBQ5NwJaeQ<4yG+k6_2(l!;jV4X=-h5ex~_Mued7cDs#l`{BTc>}USrGqn*QLx%k` z9e5WQA1L_X7cisZiQ-(nCIyVBoVS&~syLRC80%v}06kId@EhA6h^XIADXf*wz^j<^ zez8FVixe?;vEWFqakQ+2OnoQ&Y~h|5G#g6|)4Cgr!W!xJ`=)kGj1Eo@Gx=t$5tPTW zS&Fivc4?lRY*$nB2ME1)nER4I^~7+^M@!{9_i3N5_>7l(G@I&PJHxQGWW|e!*?j52 zgC{G)o#kAdSp(_7;88X9i#dkw7u2FVI|@T16qQepL%_NMs+~^%6}qbsM$8}wZo(;} zib{F_PKIVq7~$9^#ZV6})n%I`xaPJ%JkdmeU+<{_-nKnGqF}!)x2(!mdl(uIqs0|W z6}Q8pG`ESrVtP9%>o-==wL2mkh+Z+Nd@Sm@1CypyRBjCfO8@FQy1nP-S>U(RP*l_0 z69K;mkdme4PzMcmY-X4``PuC)Wjc}`lmod!9y|2Dlht36vL)a&`1=uQs1$NDBv2K# zc1;0uXeMNS@r49)WplKy8tL!QJ&7TWvSv}egDy~ZH~ z@fvTz9ds>E)1uTm$at=_nn192IJ+AGr}b*LEO^ZG>How7?d#S1tj^cSSz0IN!m z^bInL{oAh)(N3&3E{-}EucPXP;p*8F-G94hR>vyCeGYN1O;>e{14)4(5MqRHJ}vCN zj>riqcv=S6MyWBP!TbSR%x~KECc7`XocxHiHP2AH8AGox20D^q3OH`^8W;eRglqN| zjW0=jh%uGcVepdqg5Wanl^WG*laE+r#cO4`2zCxZTa^%cWUCa zWad|gp9@&Qsm7W$hx~*W=tT}QMNvxlP!RSASM-?@Mc<(ldRbmsy5k;1b{}BK>i+*I zd+%_n!~cKW-cs2kDk1xDvPy)IEqiltj$KAbWRy`^S;sk6$=*3+kCV+g_7;jbC_*H= z->u%C&-ML$zu)V7UBCZbu5;h7dpw_yb*Cu}1iVU6lysl`Vra*n7UJQEKYjCcuY%F& zJZc&?^Icde>{3+)`lf`T&l6nRZ}ma`pLvFO=h66qmH}DbJ7F6HQXf-G%iV_mEaxJ{ zngbWDgn;Rba9*v$&cqU-NnWuKdxV&p{3Sv|zQ}Zg@sK>;`Dky02r4%H3f9%b)#!K& zdPI25rL8ZO7s-KrqDsM06Rnc&gVD8(8$Q)sSaH&Ka-K!+3r(fUv)rZ-zHldsf> zcD)`CvOC>h9bWy{ZVupw9I{iUQ0)2F!15)rw@>RYXzs|P^8zv)m4VIri7XI~J zxs@IEYg~3LG?*Lq1?YnlZ>E5Kfwk+76yldV@28hpw!_U(AygFTqDz+DZO#Ro1piN& z5$Gg%ff}#RJOt7CfH@w9V6->g>DP5YU5GHsNIMSS7-l?=Jp5^vCxXw~J?oCF$Wtgp ztCFF9FZwJ}U}zFky$Fc{jn*3sRZSS)RCk+3s1gpz$GL+6WrCLUsOCq!@2-s&3n5^K zkRTPF@C~LIGno^wU~wMxvc3TbvBa2Ay`tl+XlN)lLy~k zehJIkPD-CY{oZ<0Biz+Z_~C+5CIjAQvyRl(ZNwjaB1eu0wR$jBo$>T&;{eOm1^FFf zha`{x^W=){Z)1m$Tt*nK})nq$^7M$^ERQMa2i1nJ|@)`IsU3ZuIroDdxSr^O3} zlYVQj)hYf#K0?<&dfpp|>_-HgGjxnbs)V?|)PhX%mU~&XsN$?zM73(bp|(6Xnsq&qa| zb?ed#(UkR4fEe~ldM*uw2CNy7B7V9UnjH_ROcYpQ$XBjWK!NTK)n}WbU@eN(&Rag; z^~C-61Y$E*+29~mX7cmm4rC|cktGG*=;|+*_r*jOFP|LEW71}r6JFfQj27}mU)Uy) zR^el=YaZrdt*0G)uB}SCSRNpzoGrv%r!>A3ztd&7LPB_9z43M7rEbb-SMA$1L57SV(OY=B}P0w*){j+t$CO_GMw_7FMXVvkzaqv0nWW;s0Te>ysm$+ zl%!|yXUtIVeo_2;EUNAJsOaTqEF?zxx!6w~M|%)Udvv4)gvcCp;mK-Y2y@57M) zz-mF-s!kzGdbp?xE$E$7pv)0aEtiW3F%1kv1t(H}h-<^2(6O#QMb{Z6?9!>)K2A z0l?XkJCMmlEx$FqcJKfIjd)hi)D9Xc{1h$U%NreC2{XM?ziK``NQXM<&d8D=2FTL$ zw*gv~v-jfbqE~08|GdV@P_|0_^gB#t<*+FJ9R9@oU-gp_Hb+UX>=N~s z)<;iGO{&3YW!?CV&>Zq9qvq-6h_%*OHb0Coud3_SS>+h*eSw^$m#k}SG{)^ZxsZA+ zhibEsd9i%(@&0s}bE#IDNdUkJ!_we5WToa&im<7H#zUZ`9vc;a@7>)1g>d=EpB?LR z1GrqJhm`M#8jP-r9qrqx1Q-F+*9iboe+RGy?w{Wdr2gy?L0J%Q;~}6qCE5WNFR4)W z@yfgzQiKEHs_3Vg8LagT)F6f`)r{haVK=`}fd5>BFVpHPrKN=qIZj%XRN`-m<@GNg zLw5mKSTqJl6)@F)4nCUs;I_sx#VzZ3Ic1hz{%F05srWu<5Q{ED4E^d21TUCw?iI1; z-pxHx$J`fw(APxjY9@Wq9T3LOD1o$b$4Oo?Jh#z46&x^&H}G&YQ?qh)iFAY!sD1YZ zISeaE;+qcMay!)?FS6e1ijjl(+jM{VXleV@7ogu`4?|Wi^?lC@@_zXA>EvvE=0kd? zlrAvZu5XX7&TSb_vg`exHFj9s`}O8HDdeZ^DJ}%97!v&1qjUKp8>bxaSH~3N_i}k= zeE=C}Z#A0%Bnv&+!uO7lTQI!U3ubxvsOE)sx_%`bA(6guA+fB)px**@-_UT>XYjUZ zE*}JMYyzi9Qy++NC&R}))^#979Dlye!?Cami#2OZUV64&mALQ3=!WAqDL)PFCdDCMh2rL zj=x3={k2fwtldmw)W^uVkuE`cPoetJ+Ua!FZ{0V+_A9zV{(5P50|t`?DOtsqeBC!V zOmOV&vm+x0lg*(?U<<^FWNT7QJ{+H!v0yFSLrr%weeerz-Hv^@I~P)lv&JO+-^r6} zKxSwO!}s-3Hv(wmA9F&CsCUe{*-(Dl&Zeo3+5#Z1tmTt+?y4zQAU?zc58bhNcs$95 zPVNk0S6^_&!_XLjuz#y`&7p1WI=C14wfwY3M!m!FOp{Wx`=)pPEZQE^X7svz=HXlS zw-fu~_DZ;8w<6DoWc(eU7IKVS2z#ig`GG{>~29HOAv!Oa>>uuM1Ii|EDNSc_?)J1`(lOZ3_ znQ9fmTWMAI<(wPPQL^(p>*@gmcZOL}^VEJLN3gaVu)Z*~wYI^8Y_o=B-|m)a4Cl5w zuW9}EM%WA5>1zfSSA}AeRLr}?uMOJfuELS`B=B!BO&BR^fb*SUErF6sxK}U`|ANQ8 z=k~g7hKhn0E2X+~hcf5Fk3X=8T3lQ4xX#)w+T}G{=atMp!GyZ+A4`vtnEdS3eJMu7 zpE@H_ojV|0gr6qo>Q`0<$ks$KkfTx;0XY7QAQSWNHM!%h+u$ygeOD|;6Q&~LJOYtB{5hgYM zZM@_Xpss{}HQH1mVuLx#6-W@NL={3pt3=C4BBk1MZ8qNc4t zp+&LU=8|__yL&uLOHHTVvAtZL^tv*xL0R$7@eVRCDc9RZut*U2w|ux>?J- zsH|Sw5S>5CznM>xK293z!ZNX?oq?EW_3fbqy*(h5vPlJoak38T_xmO5@4ibsQcTP} zuZusi-HhyWxHeVi#XD^_bz9fj1sn67SSCjnDIEQc9FFLcia)aWaWOhaP#{$_A7N0+ zDr;kK{owUZ=To_K#B?JTulI8E+E+Xyqi*$=?6+{#*X2T7q|?-I+0I#_f+z;$4H=mv zGFGd#QI*$89y8nqU)HyN3IB0sr9_y%>MPY;bgY61ucaav!VMBp<^io_fcn!ogqvf$ zdM9qY4zk?O10h`Xj<`0fDvj1xIFbFDv=CMGPR<>tY>}1hLJ5@rKVF!$imquk?e(FS zn%x8aP)GBa6haz@`Exi7$&Oy*uA~^%=7-aa_3Yy};uP5O2aqltYlb52K{L{^P&hj6 zh!mn_$N4M!%fTx5B`8$@s!iGv+ST8WO)2=Z&{)^A9Lg2Hzfo52vtko`uv{zyBaP~f zCfymy)2e9t*;knK@acC;7tFGjSbYb_7G1=xnX`P0WF7f+?9H(`b)t(DD`{@!l^jhk zB)D9V`|3PTZ*cKEIp^foI;6s%6Wgp6*s`+}(Y!mN)s8oNENLOB>}uemH=y;!%-)&h zw#-6SmcLIViEF3vmE+5M+`%!f_}^A37u8dfI%DsXKjNLzq72*dJ^3~1#+?58S69Uv z2o=(0oT+3uzMmXKwRY~k>Eq1sR&r%QU(?8!!s}cN=d$8bJgPku4Xpz=KHRvsEQtAF z1P96m5^h!8@x~T(QkBx(q(NMl7>}=lW+lErGUQ;-h)KZVoqY;?L}*2!2!5Ofu;`@F z`1!aur@QzS;|#CKGfv52GQw0!0z=o743{uTb9233$k(=x-?(LXJ-yTHzAj1(9z?KF z=ZU%aU`K4Ez|2CyBfV?7bd>3k0m8eEhouACfK1SDAmh)MOyO!uu}Sy2U{PFOHQ2jC z_O686t<}}wW968u)dctM)9M_QW3>zB={RF)m0UD$7mCsYYhYwG7^`nCcr z7^Gm23=~%t#=76XodLq5y~zH(=t6ZlQRYHz zR0_fy(OYbuB`RqyTC`Ht6Eg@b>bAttiNyE)%%)bbhhcoa^=)xNH6I_aT-ASEl(;tj zVB7GOE+4H>cNPQwU1`v^?B$5pL9or{xA9BW!=q6b&#@Z`qvp9tOuvwmqq*bL z14-M}Rj=P~X6_wi6&ld!EITe_^Yfq#sZqoixo?aue*+0)LI>wxy&)GaU@+{1%yfoL z!$-y4E*9O;s+k98d&2~_H(N_Ol{*y8cRJOVc%sCQOKfg+Q+$PSB*haF`=v+_NG-p) zl^$>n7E9aBKmKyBJ#s@}FxYQvzi6{n!S_okLg-9T@-0*@(|y{#^`2J`i+e_m5h+)( zR1#E+JxRr<(xNzP@G4ZHs-tOWFpM2WOJAvz0_fQnvngniR8FNIJ8e&X94sYrm`LUp z`3OECpq&g@?YCZfb;&-jm%XuY3Np5OhzW&^3O#YK7Tpc(-HU#dSL*idzlFWdc@lT~ zgv#C*w8W?(L3NU-_oV7SCb({8CVTb>7H?lB)$Azd8UGqEa8jmB-S(uk&S(G)6}UaO z_MC2*Lt!vTr6)V-30bbn#|Kb$p-SotV&2ZKgQPj{=NIQCIwF3_I)GEKrt?et)bV4N zw;eP5evy#C)!|U9cW*|(3L_3CM?K349%J5fnw5CB$^F$fa+p6Z)TjU4U3DEkQf1Oq z4R{Jd=6F5DwsuMNda5$ zO-@ls_ZM6#7?XB`K8(14la#V#$X0D=CH7$=Db}=;B&&yTG0C(I85;}mRsRZ$tNF8b zZp-fKBUOUm=#$9m_PyCiKT3S)12bxVa{-7Whg7C=4`?lnuXXRXB(Yy+ZFXf3O{zwr z_PYiSOz2*&Z6}`=22etAZRzc+a0#fEv-x5JFYQi|c3)y_0@_yW4}&BiP7kQBuT2hQ z-hvTC(fi+DgJu03hORr@eWtZnQ)XG9idJ&?H5_MYUa@EX98E*_emG(C=tBK&oO1T|7q@y2& zkRl$7=rF2YA?xucRmv6oc#aKn8DF2L+f$a3X4!6JIix)0)4>V3;R7n6i0wRJ7PLORkoBlg@&lWX#pss9%c3?d*`-28yx|<*{q{%ZBoK$SSNNud z)2-z0d3&>w?4kWfD-C*&ou05PS%oU63o#TF3R2-IVhxto&hv>nNv;&OUp%LD>-C{2 zZ^p)Gh3!rKsSMn|T~AoaHNKj*T@N&vi5g~>#R+tpN zz`OY}k_%bw?cG@xsA*9uc8CqeQQ{Wt7bd1^xi^V_z%_%q|Mvw@XP(3V`hJtzA$XJb zH16yKm9h1ZppmHF%R&yp_4Y0&?Al{z7m?S=^X@es>3pJ_O;bUT@rMp#pmoXy6u zQCGD2Crb1x->hz!+_xns#Upm9?BD~OZGPA&lk#T^<9w=Xt&fiGx#Cic1ijW>7QILb z7thHle^?#iA+YgU2T)_RBRyuA=!QOI-}5NDNQTOKLUu9tI@^qe7n|5Q{Oqy1*z%fg z+J5%^hizIl;VLiuF1rs7HZ@Xt1Sa#^iM5{5M0 zeU6t&&i&0mc!z&!ZD@VC%7Btoc)S-wSinfGhHr%Lyc1=eWdT)c{$`}`<)|U4OeHYr zvGBu#!4U2zZiY!!TE{_)Jwf}PTd?@JVYiDt&e5CMV{ z0gFT`!RsA&TGi*pi9wT1{FmV;A&gjKruV9g3_aU*UF7Tu8jJ$&pXWhAdA3^~$~~g9 z$ZB>GfCCD$*ubt!lkRsPQw%wZM2g8#j9=m+!Jf!WR1kMrBpmt6Y>I2SBU0#>SV_<; zOP<3HGaZ2&hJRcIIY|uKJnrhWk|3J$N7p>+=%X*Hf1Yf17eFJWwa@EdZPM-u_Kk;E z-Y9UIdix@R{T7&<9$MdQ7KHSqIty41^?G4)nQ_8SAzNkF*%$O>Ri$l%m1P*6eqIbi zlMYrw)ZdWLf!Z3*xdD_=sqybc(8kj=2-t)?=x+E3uFM;2Dx_u*uZzv z#%DRId8M2K0sC>EZimpH4iU#O-m;}}K}-hHp2_YACpygrz$#C!8T z6j#+RWEMA7NC?Q!|3d^-s2q!0EEgkDMn^}x$9<(iGakJx^LBIYPZUwF7dqcw+ToDh zBlvWl4FMI)Bd(pa3G@&r@u7~Ioy2M<7H1HBxtmA2vpSs9&Ys)mW%u?83I4L%z?H38 zfsOZ9u~u2bV0{E1Ky>5et0Vdn`y?d|5iZ1dNL7SHjwd%EM?GFhOS*bCg_^c1)Lf%= zDN)ppuyDZ~V>@8u!fTTR?K6RlT$#MV`kFj`G+&4Bt7!sqT^LK(0h;i? zB3t3a!FCZ&$$SS@K|^E!+8CXgp#70)OpQ^}vd4=lw)BAu3j%9WD|_h5*u-~BTJ)FK zM&w};6GWw6o0aZ?V6|+@hM6WU7l(h~ER*C1k?PURhgGG038AiTuPN?TnO>Ioow;BA zJno92gw-4|RG&%52-ibe5+kpkO1P#KW?oK^|ANiAl{s6-8U~IJF<@uEZaf}l&y9y-oNy}hS`2^Eok5dt_CZq=mKrUWD)tKy`RB&{^CM?m<#$C zd%P$_1x^Hym<*RU>)QL~sm|u|?lo{G{*!m}>-dZdqHq|{o~TNKE=r2Z4#7%^TGltw5Jt~yyew?UDN$BcA3#X+=S3CY8B7mfql#gxrM{%-y5QF5v>-o5qN*?mA z!|iS!rx(T{AewTk*77|g0?m6fTG<7hpJz&GpzVhGuPC?d5-yT@&&( zP99mEFw&FXCj0&R0E@pDnRJ-n_NY`Bln%e8ywz)`5L1y|&Gh(AhWbZ#k)X}3Iv*_; z?$g3tcl$Rak{uSIMTtJlBJl~k(%H-I!_yDOyUexG_@klZGUTY*00A?ByBeRPB|)s? z1x0aZ>Kb-5AU>-NC?$#UkL##V{d|Fs`H7)oauA<2jaHL#_cu0*I_$ka$t(2G@-MB$ zNg>hbC_45LVZ4Y$%N@!1t4}VVtpCvs*dP$Y>uIa}R`UA0qQs)HAYAeNM-p0n6^;f! z+_(>f>Di>1oCh!Z2=^C`QKlFGgf8f=sBu31$$Pmu<~)x+09YRrP*pF?C}b@yE&Jc5 zz49W|L^&@}rG$|MX^!quF=|e+H5fiEUK=fVexvBm9zb(_sKNr@Z&^p)RDDz$5EBx7 zm2ZzH+1dZ>?uGSK4-=yag$k$y98k_Rflyy8Ol*xh%w|)9z`a*!W{;-ozhp0lb zYE@5WIwj3<9nHB;045oWPcu@Ef@o|u=`WnjQkS8&H28z(=M}QCf>;ZSV|mLkUQNaD z*-KOfs1^{i*;Y%n&+rD9>{Bf_U+K^tneVQab!e2uXX&27Pej{m&gRB~I1MlpnoA{v z;+kUY5W(x9xl*JSYahah;P34KaW|L=1WfD_>VUQVK}`4WfRjomDQRgLadCw*qXRUW zGB{ieWEl|KTREC_#lIlQ>HR=+ujOR#6Z+pr3(L%wK;L6h`)@s4UQc8iRp_E}Dfl`@$rx8AYkcC*KJDZSrp9?~CTU_b-=E+3 zI*`Rjr9RkHqN#(^=1g&EHxR57%=OSDBJ{nQBXpJCPC~4%sd~kHpRDL+8+gAu@PRDs z>eAH*4<6(T-xSFTKs`+O;S8@EFr5Y~bnUdo&f!kAr=SG&8qJ_|cj1y{@PDx+77$4IeE zY&>eOHBV&%B(H$$jnRhPZL&Nful}83FFDat{^j9Nffpo*nbAln@x%1Ing7zU&qnv) zg_A0@J1}hiP9X?merK_1n50Ku4^qye`95R+H*LO4KfTP0i%e^o-nnE56XVOtP~w^Z z;9T%pO(}DGB_w$H!-9~*N4yisc4%Z?D7ysqR)lR$Jt3&AXS-=66@I&iS+#ymZ5j18FI(f z0iD*F+d_>5p`Ct)29g6sI?;b?hVleXls7g!wjc?3h>xWk?T#qv_Bzk4aMt~G^VlXF z-B7g%6hc}uB-w71E8+QRYRb7}-(rQ-hpjzkP&)qIP8iyZyoBe5^Ldlt{|HsR?T{Mm zf#O=`{>G>TjqIKenSdxh`<>cdy$x|@(-_C+eO%Mk1|)=w zqWDL2%nru+?q=~3?CM+YH%qJ`&EIfzcrB!e;60oy{6ejE*t!)Bl;4XrD4hiQwOn&R z$sdc~q5f81&bXp&noN&cZtD*)xGh?=nGL$SPw_#Xpu9cZPndi>r-1OAj1nj+HNNEH zf(BGijPkmYUyEkIU7!O3w;u*+^|Qs;{#M4nFE~dXuN0 z94m@rAe$*X;xyZEq?fJtR6nm;W8?a}=hQ_N6(xTWCx@qiy9YX@oh7SD5mwqaslX0i3;#e-P zj{#3d37aPOz&R2pY?u-c;;Um0Qr&XBB&CGjdVTHs*x;%cMl5BQ&y%82++|7C>L#S* zPYnH47a%-XBXstGcPQUvOh5k8Q($|1+EX+t6N@_7kAZ*S8uH#-Uwcj&$ckc+kpM6>m zYUxw0Bv0c{o%fbgcM@BF_h#N=wSFp<61v-G`{yvaLO}QU@utnpsDS92Xq~U<)e;co|9k&yb#4+|X6~9D1%J&s&MpQ0!J6Pdzt-cJCKL{@-@-n)@n_xX z+Or#W6Lnrzlq-V;_|N@aPFdM}x82!f^+oSb;?csJErB>K@okohuj$h0oUeSTR?5!Q z%cs0x)ABC|N~%iRtoJtY?TEu^$P3IT8a0E;GNv&14Ts2#805EzeytXll(!|`(Mje> zSD$}m5d4>>GUceo8%!qu$70Q0k3JYAR;Z>7H3 zNuDXAUGYXDC(NVDVjSx}QhNWcq|d@NbJqGswo!5Ga#te4+V@*gc(-<$#$aoKlJTo` zUcJ7P+#YV0e(m(HL3%sHg_^A$m<=(VZK@fFyf3oy!ROGugUyE9GLTnmIig&F^=#7$D zW-uhNzIb}k9fvXyqywsCr`~hD)9JAO?%yNHhCLmq?M~Uwm9N==+s8ot}xbHYUU0760?AYBR`f zb!wn}v&w?GZPE30P|yxxhs1}Yoz7*a$gt5bPi$JWcCOE|+oYmK6OY^VBY?L$>Ku2R zV(K}Ux`OBtQCPZdLZf5Ill7^%3%u`Gug5W4_dAM$9EUC)#r-fLVuvA9H3w-j(=+GY z`{@xrleKTy_30*)SwCUIDxeaVoF127jB5YG2~RB55Wa#x5m>N#CJ^A{!zRY+j zF}9!a-O>8KS0Bn%i-^!{Sl}vkG3Cj|q#=Qt=>{k0B@I*PVr&fdM8|QFja-TM1z& zNucPXCd1?J5!5rGXqC5Lz^Ql(Vw&^!TTU26jA?L2NZ=8U6U>yoZ9rM623UDcQCbwW zfgv6ApUeY~3zu-XLyr2~6CMG|4Ap`ZSM$ne)l<6~^|oj3TM2yst;U{|lLY~%qjx^1 zvQ!tW1|9E(-6PTCGxyV6-HJ9W2)%v$X8C~!mhM+s$RhOz;&VQs>;jGoDIBpbq(K+m z@s0Wq_9v#>)kBRr>^@7|#T9b$dH~y55#59u)}1HWk)7hF;1ccN#yErgTgew%gP6dt z17&?4<`B^KPoF=X5`-iT9VL~E&3C$3AwB!d-sd1m5E}ux#w8{Rxv#aT{vJ6yp~R$2 zwJ~p4wIUb|2Q;4P1PU=v;S)Kys)~ji$ILRIQoH%mGINg@hjEW*U5u%#%`ut|?@|SY zYo+~lQvihx=wu~gROTjLD`3x1;Otd?EgtR}_wWo)jm1zP7;VluqVLVz`r}yS9nwVmLj@OO;<2WKT1HPAA1*mMPI+op$KnkH1zZ zt9ht8A>44ao^@G8N2hw_G?U7)ZtPjC;?qA*Ym}L8{AnyE)-;{<#+kl(>gPT#)NoTA z!YHjBRlsm*QSbJO-S%=mJ9{TpYz-SiGOCi=^}W#G(O&H|jMtSSN_0fvL|vm5QZ`De z#~wb5`skv8PRl7Plf$k{0Cf{+tGsZtrSFYoC6+U;O|hU$QBJOf1zqzWWSv}BW1XEk zAPgFgN`MBE$u#2Hhxa&ONOsr zutr(=Im~Rac(qNvDUuittrbyVw{?bC#(D+Eg-+kI95G1_xfR2%+v!8xFcK+yhZt&B z?&)Sjw1aMM%`tfXR(q~#3Us{ap04RXzuX@Lj(`C~r4X3SX){{N)&FH(U42HryvUhk%5gJS8`XC=Zc7>-fUcuhG zXz^*E{cPi6UFcF_qP}Cy(%gn;!dz$mCuNeeZ1oA~5^rqc>9J zl1A@Nj2TV{@gzSrL&8e?*kV7re<-lycm8%E;)bA1Y?x~rpkzOeg2;`FxY&^J9%5U^n$3Nbj*oJ67Z9&;$S1i(sqm6a!k5jr zyTDKA6jJZ6EBM$JbN{A$5(U(!TLq1@NSQMW>;Lxo8FE~4V_eY#qGU?(*QAv4RLf^ z-lb|_OwJ+nTk3CBeeKe5c5xAf<$%Tb_n*%v^&(77s49j2dgZ^{XGO8It8$83?qXEo^`WU|K$qTLIPJv z4OfIr)G*E-fgHkg_yWGe5SWoUd7*d^>43+PAe;eAzvc1_UF*mpVNb?_&}owwVU)m2 zEhfg61@$KKxtufw?8we!FeDlWO}(EaBAD6)d}s-72(52vfsGXo8^v)*QD`+={FtiT z*{J=T{S&-T_hWtb^g~2IlNE;jWc#bYPqF6yG%0mGg@5yGeB^CcPO%oF?|DG&U4K}H zZ{lJ{ZeXsun-D)mzI>LAxfTNCC%l~yU|l7;b0#5p2Y4l6t-Tl84;|b1(%A?zl@KTw zcm`}1cxo?*GSYOuUJiPlF!#Yt=4~S!XVP^aEC(CdSUj6VqOnjsx11*5Lcg^&B>|7t zLSHE0e*D1IqEwG~P`<>h_QCDV2eVU3d7!hQJKVtdh8*{}i8h|oFE==|_z|wCL_#(U zWkV#Fqkx&HU_QA%tx6M}XR`enF|*`Zj-dG#h8RnR{FY2f{xHlzN{Eb%d>HhV^Uo5H zTLMRSOEtG_dWPmpPD59#l-o)Lq%Xw|l`&%)#l!J8jo~hebCwaUNcB46-xKwoXqa5_ zOFYixUO$e;;Z&Bf5ZHNuf^P?Nc4i)b64t6|0y?uUVNSc#_G$z7B8t#yRDDrVuQR$t zTt)3b*&0)zjV_iX?Y49p$gpl{I;d@Z+Aea0O8E52G!iRPDd&Fru;pOl_<;5j$0Zc* zZS&@vI-o*v{kt5o+H^iQLNG$q0-!%+59=>AKol8s>^zV)g^~-6$D~~(*yJZ-q%E|t zc>3kmebztkez?Jm1hW|*RF+2J#9K2sOv&}m`~Ptj@tAra)@L&b6tNzR>sS2OjDn9o zeKye%nz8bmh7Zc`&?#jv^QfJAej|aY3V#Awu9=%tejm%S{8w2JIg33!>2RiI5jz$r z81FTN2@!Nr;CTYbv>1{s1PivMeCcn70t?VWgS`$StC8*wgk+=^(AiLvJyhy3ESNBS zpIX{~1yW&NMe(l2A9tR71L)`!m%3`5@YB^LGiOJK9E7wM72p-z9vXAWtifE zQ<{Z?n?imHgxIG$ySp2moV_L2K+^DQ!tx3-nhdo~_^`3aANmUub?I=KS40b8T=UEv zCB*;;wTa+pSeFD)?*C8MvK0onmKVM4SO^v`VY;Y#S&(W1^^`laRgVDdISZt!#TcUz zTB%}RjqblZOBMl(-F52yCA-rTQ(NQz$UU=uI1nyD?1*aM=IQWXa6w*AN3|^Z0LkTl z?)cFzq46wDg{!nX4ZhzU_41u&45`Y7X1MGvtDQ9J_v}%dt$$7%v`wwN$V*P>Q2~Nj&k+=Xe1VyZ=}RSC+QT{6n$@&^p;TCQp1<-JtQoxM@g9lPw8? z|M%h?9G501yt~Fi0QpD<`)gIHbdTM-6Bt^LzwrR&C%B?Wp?cd3#?M{}I#408kXo~yz93+c;Ye7ScHrXV@HEjaFf1kX4uyl@Qv|w~9DVX`jvoK?qP{4bQXTFsr zf+A|)|2f!MDZq;GtiN$Q{Ndjk4gVlEJZ@@R##GE+di|G~ zI-P9rEOUhxWZ=ZCTmUWUkvzVl^_1VG!HB;?D!IfsG~o1@4ZLmAPpf~|}94zK! z94KA~Ij@bJR5_`13i$oV{h4FkSh(c-s6XwtVZQ!q+O5>GaTR{dls?&~>JJ~bRo~45Nve-bgLO-g+%WMm%d0f zIN(L%dZh`j4pnLLD$#T;>DFhEQ1)KFbj9`V8~#mDL-2%FVVPGsYj)taJE%E8rc1nIcA#qmw3|??j)3hD6NJ&Akmo8 z^Io#~6WU-<##=6f5zwg`QycoN6fg=p7J|UOO(O zza)~t!QD%)lUzqU1v0V=4vkR$ZI3N}*#FsRJ*gcIw#UXid+p&At{wEP^*HsBxK=llH}1 zM3l+NJ^Pd)xoo;KGDR^k7X@+6otTA&(V zhsyN;4M0~9o#+nhwN!U|R0${8XTShmY>heFZ-BgLSi#p2X2;G{Qm*tgLP!#kMEkOcRRGiswV6SE4(ZD7@YMSuXj%KA{I7Z=0UefmW)@XH4&vXU6wuSP?hJ7Lj_RXC1f65eg9= zHCF|w7#14rCGGfA5~dW?F?K6dJY(-~`_(UEGx+ILxL#aY4)AAi@LnYqS6t!R-svSq z1or^d+9gg`gCk%$So)18y>Qa7(^`1bHu2=GV}5?VV*t{<(kqQf9=>H4|NdFsPRZRDfqK1y%@jfcAPj1&pRD#2+P3(N=fuNZ5>ieY$ zE>s7)z9L{<{B%*g)W9016?A&AvJeglPBBE2R9Qs`roT|hH*j@!rV{m?eMy7d+KuD) zA?zjlc>Gf40C1aqeSd5aT<*H|+YauiU-Et$Vbjv#m)Kb0y23Gc2XEfl$;qb>l$T|; zZS~2zto{AzlV|%E*q=_nX8;*N0mG#V$SQZg?XPlAzdDog`(lkFr*{w42${S zQwz)}(xc3)=uUxL?1}$h1nx{6cZ{7KpDXorM5tqqD6A#8)>H{QGM22=iPxqO=WoOOj{OwU#(7I^LR%IA44QB~16;v43 zjwSP_ZprTUO-QMxFuk7fOsTXZK*P2=5Q27*$bSq41?r&D!J;Vhmj6%FDmg|5i2#m! zXTSjJAWz%Z9e-B2odN0PKS_7G)#F(L#vzb`F@cvkhjHES6CzXYv%p`l7AHf9BC#0_ zW)5@=RV)RWqeSUf_A+eHCaF0uyJfvRtIIpC zGg_cmTd@jkqG=>Pj#0J^k6iY*&pZJOsRA4&JqDR1RophYi^F%OV$w#I{%3-MlY6j9 z!{MC)0ZG9%)*84K=74ko&4i6114L9MBx#oxgnV#CO&0bg5i&TMZr53qD}zt5xU zHFOutagUyD#WD!zT*%QO!Aq{Qj(~U2V4vnV4r<~K1mMytji?@KpLRF|L8xm) zE}%elomG8%dqu834Mb5sCI=U=K-SxNBQDg!Cm_Rl^6btDCiC*5gUZ7t=f>Gp0c@#{ z#l0z0;5cS5(Ii6f+Zd3(>`fXxO5qJhJz+%5J0tn;6gt|e#h6XWD*iSL_G|Fm{aI-n zw%xh>YQ)}+xya6x1qoxTDs3f$L?;Pm40P-Z$FPU>J%;1XykDr)6uwVZ7|}IWj7uIF z{w!|qA0{M;)xPNd6XFxt|261`l%V0ult_7DSs4SBD*}Pi+>dVLpGI;%cuV;$O!(%) zb0bXg@uzP(CsU0txSc_cCYuKnXTFJG>SUW$12Lmrv|s))3!>V2_}wn={%w2yh}gy| z>95iOHg;VttWRItAtZe-SkS2f(bG=nJqItt&E*zZC2h$J2iOA-51lZyxF>4ipydZ& zNl~(*Ld;GC2{fu*PSfeyz@NT_>d*cxh|ZDG%fG58O~sr_FCQ#oV{{BWt|erBf0lgc zkp8BI37sl>UmBBZD{?U5^=m;-4Ma0aYs`7}0!xFAa9#lFf`SMUl$EIy;^24|9P!W3 z1_=B3OSk+$&X!qHs2MW3s6)2ids#DU+fe0-VxTk}xipYRIp4-=bjhGsC}o0^JlDVX zy9()(w;%Vnzm7AE2-Ws1$HR0NUZuAlpEnOve__yJ$+9HzgWQKivtFG{Gf&Qkkc$oa zbMjk#*X{mLQJqs~SC5g(4J~NKGHKNX;I6D@lyrLdhQz0VG#S4CzgvdJxaMCl&)Aq484hd86p7u z&C~WE3Xy+vGERFbubnSlWEumB8r3{gX8ptLsO@w`x-h$ac-lq-@$@S9&bm`qW52IM zce&D%v&DPDG?Cf21F)gQs4q8ks7lf#?{d;NCXP24m}A27mbh=LrGjVW`6I;0XA`H2 zF2}2~r+5;b6K!9xWP|eBQT!a`()En~b%*_V78Ac?S21I7csG|Imp1&da+hP7%N!7w-iyxk4DD3ET7*e*1z>It80y)og*gPXog}n@~9mI1#}mS^p#}E zZh@_&{xK`5F78lYd%3f5%{1v1%n1P6?ME936zv3n_muop$EJ9Hp@8eety*0jFm1ebRy>Z z(TyYupJ;wsvp$ES6odCPL$63_f3iuYB3|Z>vTeUFp;C1Zk&R!gHh6|~#yB67k3p>? zM#o*#tU9(LH`rH_ZITmTarM|;dFVB^F!1H5zgfIH@3|;FYP{g(G5&5mPEnqjx@3fr zSQV-Xij0u}RHd&2FJD#aWeBLs?4v3c7Jrjp+oJ1f`oP!kHJ6FnkaLewcca1Ayxc@+ z`Z(t?*pF0jwT^;72FU(!&x@5D_H3y+rmRmL(&|pe^=IcTAuZN%=pgRkR}e3lxm=QUdQYd@da^aD9G(}$2TD=yo77@&8-0lc@972UC;Xw3kfi;tC(-Xilo0G5x~H#M*S6L zP0St-qsO_JaG|pKub9Uzm4R#4l`Z*hy}L}TS)9c)7G>2l}`_VqirO zQgBN9g3z*2wol}*7vEeNcRj;gGYPUopB%v+3jbeI zX#a#?J9#2wsIIOKeA`~!Rdv=VEoobF{d(AwlW)c)Si(!jE^*tM>dz!QrzgLLen#;; zz@!Z-DmMG|Kh;V-6`#%sEClX$akKZ3x6^{@G7s+u%+^sqRjT>gt!GvFF5($3?BFDR z5Zi0{`Ig^)j^5+U(_>np5|Cnu3p3}pg&(U`Si_#L&U6 z9Y1VW^KW_$-!dX4)k`eXQs_|^)Z&?};B#O!1=4sbiZ6N3>X+SWCnbO z!rPnu_fuP;H_5Jp6TM*YspgTBr@5}XrA}t`eYSRYHH5X->$7ZLxFet?hcTh(Lds6a z&O-*s&U@+|Nf`d8{1aYA20dcLBiN{BnMM6YJ4BKNXr(OyYJ8)AJT)G7QH}6f z%xYhU=CE*C!0 z1JY=rwVjMPq?6_HX3Y%4m4Z*j3gH`bMpCkVbChqH=%%6J zC@Ox}$JwC;SkL`D$-{!M25u~-o){rfFoEV2patM1cZVqK`nE0Lz)%j_A4$+2#x;i9 z47_nL{DIU%lLUE>e2uysMM^zPh* z`NcFa@F)8x5Hp6T^~Pl!w~RmCrzWC1qCNa1#sOE+p5Pr&WmnITM*vL_jXzwIXRQxt zX(8Wb0g#uAaV1#gDCl+9&iwWF*R9N4=%0JbeL8h-@Zg7amKO~qvhyO^K+*tTm)_jIhYZ=nFI1?GiKcnTJA0x&)~zYH4!q~2 zbn=o77!c=I+?P^k2xVI6$<#l+otj}2;3+I zUewXx-Z&b&^(eYi--ku|%2_U|o#TIX2udw;o(OHUx?1V)_e7`P#RgO{zWQZNmgsVO z=I!;;QX0*JCVy?td<(VDn$ua{!{YX14u4XFTo1Zmajo=cb|riOj)7VjQ<%O z?i}}dxqOf1s_{M9Av+5$N>8H#*0o7(jNc$bQ_>Vxi0ACB?&D(PYO~yz($QG=#cU1; z)a9CW(vVKUR!Zp!FG(CcFVF7jJjb*gCG#4u-crISuoWSl>0<%P)z9=>8DN00_+Hv( zq*Qu4+jipn^Y;5EyBtRwpGh0N%2V{RHrCc2NJ|t-4Gz2Y zJM|(;_B?(4R{GbZffYhyW7c#O7;;DPYF!-JA7-}!6HO|I2B8sveJ#*>?U7`-bY4Ix z?^hNwZV{Tp{v>=n)T#OM$QFs9uf6~NHF#9ezut%Zf2Ytjb^b3DI_4I@+5N|g8 zQfnnQN9yM5OP4-f?aA-myfX;VFTOP`=D2o$^3UklBy$~~EU5W-_IZofK-TUnX}p!> zWI7|`z%(2b6oVox+cL2J(dPyy*PNVf5}ad`ybVETkUdaaAtGA|#pcjBd>N7Q{f!IZ zbLiLR$i%mVZrhWU#sT=`*`^3c$n&^a##p7t9=OTxeAFx&q`eOa0o$!QK`}q zeNcDSFcC)y5vStYpmjkh@I10P$^!1W;RPMaaTcnq)YERw`2H;e$XrM}u57e&1bU`` z&L~~B2PhkHX+SGZ!K9jNGsv}qtvAko^6RtQw)-8_MFap$>-+o5QS}GB6Vy;aNvDrG zPR&w+j7i{`J-Ujmx~eTw)|(>q2-$0Ye&=!1@)@?NfBylfi_?P7_cbEo3Xh00l=Isx z6ZLDkWqnd}r=?eaT0iT3)mipku|f?EIzp%k@+0o^hAB74LXsC4>8MSM;h?|93u+Ib ze^mnj_TamXG%05j5%(UJ-?Kz$=-Dw$I*h%cLIDy{z_-D0#llL)_vrGw+f)^GT3Tst zEPvKhtej8m`ey)mh&`YXA3*ln-d_|#tpDc$Hhe5iFOcwyMdSsRGtsKm;QEfO325bf`f#<1&^``@Wwk^ zeZlzZgV}|9UQ%|Zl($1~^0&QTt?yShs-&g>Kez8MDjDC^y^JX;yxF4NoQm)QEd1{Q zPDVAjX1kT}pp>+a-5r<0G|g+|V127$J9M(Wmi&>0ak4y5sZPz_x(Z*t_q_#hGBtYX_pz*Txid^%#K>s&?b=Thjwxa0m>!!8B zBH`n5C+%-w<*2)988mh|oOgW)N3#2VM1sR0X&()FPd~lsb}4eQ(`cmqrYZ8;V>GB{ zgU^~<7*m~Ai{HfwB&ha_<)ab9Ku1xtO25-%{j$@_O6A6R9)GH4;NT`{u`JRouo$wm)+^rIpbR#RMz8qMU(OYzpIYc#eDUQ z(?mC2K=vUH5(+09>K**h_X7~wQ3{I-StJ>z7>*iSRq9R%9ZS6VS^ys%2K+9JC$%+j zxOD$fUtif7Q_0#Fj0%Su3V{$R!Mb2XIUs1_5W0nu+Qe?+`L7xUBSC&+2;nUe8Ujc! zeD1)9fFmMEaNtcwZB8wXSC)t)7v9R~0HTBTdx2Fi8EGx=Sb)0D@er#hiq zLo^8BTHa2kp@|SEk{Y)Omp1HN8jLR<1(Xd2G1rDkY=)t>GVhcPkKWi9sKcL&l#3VL zQ`R)hvysmh{^uS)Vj+^#SwJ&(yjnR5{MNXLhcVQNghM76b~2 z2V0&z=R><^f0@OBRM-qS5%o3z?n^)XQ-EHimxQWC&e&~&ES3Tg$`JvN-#~5D1E)8NujH~6#MR?uk?H6Xc-AZNGOALCqk$5g zF$w`v3yc)XW&e`d`qnF8Ma3b-UaH38cYb-Ft?$lfXenW=S&d|pIWxixZ?rEM@wp6T z!*W~{s;$^A&u4_qc(M*Se|oM3nl^8yJ8z$?47}aR*|8e=TzN-b9P~mF2n4yay*cH1 z{}kVo;EA2YKi)%kAR1@Dv;7}4d`R`W+`NTl{#uLx2`1Pv*eJWUkGX_;s)--uKw)4=kbEBkeg*T}{ALr*3(8lro zEcniW6TpOSrwUrnBxnpX9jc_2>|lTi1iWCisPFWn@)&FGT#R*yKwt)e4lx3-n*l_; zEj(ooSDQ&}#%4glu$XiYbPW(7RXVhw_dh2RQrkafYA>52j#fs>&G<{jZd#0P_y7QY z2o1$s=t^WuKc|e~uBx$qqw`(mHZP-5-NA2U2nXn1`+5rw3i!B5&cm7O%m*Jc3YN|)A3SY z*LBD5=x)*UP5=@7NBfoHplny9>=LD&@v=6uk-A85!Bj|$n?vN$%SMK4?eEG-+ck^C z))So`FP)EPJhNwW$n7ZJ+SgWQ`>b)^%~cpRuHyJd*1OmH!XOz{=zxm=0dl!pfUwe# zNGKW&NMm?=8-Q!m%5fM10vKEmhbpu_6a^W=`Ncw08^LDQ4!whds*tG3YR|G+AB&;Z z56Le|t~E(;`TP+=J`sPf6k|5DJCdz8ql3!*F?w&e1Nud|FkB&m>Q#+WJ16Am_a~~}| zm+`I7hGy+&tVkJN*R7XU0tH)1<@luU=ArqxyXkt#;kdbMg`<%RFypjEj8o}RpfO`- zY3*3uXYj!N^tsU96to|f7?oP*tO4%QnjBR)5U8SI{#H-d zX=$&Q-<%hA3WTv8V&0%u*-&KI30x`D2IjucnQwc*XW*(qJZf^_XtEpSUubBlLoIOx z$dgHs3+)=mt69FR)gwZ=N(++HDKE*|KUtpI)V8P~0keanRXnPa-hH#uFhu&bp01c4 zMzFd6q4)qwFw;o-?|kTh1fHMZ-N`;U@(uL5j#*Z6Ng~AzGW0qlUwwMQ6?Y2nOBbcM zpH2~RandE*PpgSVBu(e=J$POjF2j|zX!Pq3VE9S*2&Gu`QglW~<;}Oe-*v~cz6hm6 z<7%Fb%vfH5Q);w+vQ&)N0=~Bv+mlh^#<+Qj_2Pu`!WtPhcnOn1l|qKR6Oi%D=H%|E zOvv5JSHIyl0S>cMR_fnh8A!=5XBGKyoMin>$1t3U!&CgTKY2Fb%*VX^l>ARyY*WGs z+%h_jXjSDIJiND(0Uv8B>(<`>rgPxb^4<`OZMxD-$(8P7@7Dz+HHNXD&My&K=^yP- znsLmiEP={PjA(OeyPijfYJr$W$bL_?2Wb?CE&Od+}ac$T+Gk%DSWrYlMTefHk_;h5k~RfJ6IY@%b@519#EE6wQ#d5rCZ5DW{_aXF)FfRvQDa-$R?c&(LgAv}uTZx9a1b0H&8wGo|z$tZ^_v`tnLPQIk)j5l?Twb9%Q_`P=% zAvChgVKKE_7moFtRTQ0FYIAupiltZ*u&~F{Ny0~mNo+qRuNJ7A#g<~_V3fD!<3bnzg^eGusso~s+6=f9IYno^SmGmx50Vr% z`12k_;*YjV;pWQIc4%8Q0?I`JX~XaeTQ03_5V&I=U@wek#9sRDVr#0|(KnxsmM#50 z|2%XX$@GxQrmeH+*gru>!`VZ(JTdgo3#itoWgAU9cd;M<~II!ZDsK* ziDT*P@H;*IQu`=HI33u`-4^&vsXW`YnRhL46D37D5O`(qzUbblH|IjjWMUp*$b&%X z3NTQLN>wTj3^ejnq%u8igg`OBz==9E-GipX~=Qcl5i1L%Q~TQUj(&WNQ#M{;UvWE3;CGft&}Dgeee9In+U7k zOir96Tha!nwW>QuB-@5k2_DKM0rAQgrc1k}Ee$r3>*eK!i90UbSK)a{{Nv}_s48+h zP4_B%25B=-Y273T<0F0kPt%vM+VRj-Evy`w()g&txIit9ojz47GKhJTvQBv=fnr#5 z(uC0FZ&y=+BnJh3int;YQcP1nTZ($o*M~4)#@v9D=j%UZkUWgyf2NykAPuc@@WU^K z$w7eQ5ksI1VbAnCB++)8iCJ-;>z`eZ$7%wfGaN`*WAiJq(8wCoitpFJ#^+vCQkApK z%VWrbDk8gg?_Qs(N6l^x&UZd2aTG6sP}PR)RuguH_=0syDsOR$ipXwx4@VU}fshp= z=9^VlWGyR7n+5d=4s3pDP9naL*=C(08{>jmM*odFS^k$AVIcFv7Ts&7?aJG<5ZT%D zHs5C7Q_pJYfb{I3HbYhTz{RpO{(dhb&|{GrlI?#OrOOKzd1sIp5gDgttEm^)9h0fk zwhU4fcy*{&9uy0$g&8&!<#arBiw40!6hD)$9BtDHd2gpZO`nt0xqZvX+>o4woJGpU z4Jno9z-w^VxI}nT5hkVvk07JmEPn8%wwQU0j$f69LsX|Ls8oKJ4B*s^jr{~+=;JSw z-i0ul@Io^&Ad_A{2xKp}VVwagB64Lr$zl@})|JY=!hxj)Y8|A3nLdlr=4PJ|@hy;sl!v=a?x4!gJ4(pM&gZ&5kSD zM}RBo%FU+*JN;>u+-`)fBzR_dEfO5opBL0C^s6KNmZeFUdYqfivv%4BnprQ}Bg#r8 zko2{TfwGF+x88Sq`Cy`rooe&07wD3g)uzu)LfrjnKMN>4kHkTy#<8HR2`*{}{D}$I zSWy1fhr9Img%d3wX!j)>@enV{X2HS;5LPn}pgx8QPbU3L;n$s9_TCT@5vX_{@^+~NV-VI3T{+iiFZh4^k-X0Fpep{A4s^?U4{cJ6Sqd`Zc zlK>$lE%onk@XUVcdlqGa`m-)|;#aCghf(tHQ^m%fy~E)3wC!E0%8_pJrz73|=%0F` ztf0@k8SOtppzZ3m(QOo^bA8A=j8RkCy$W8WECH$$c`@hGN!NpQNk0#FW`$4(8 zNt~7#QAv>hAtN%9Hw+XelyXl9xsh(mJRf1{Lz?7}ffns|D=FiGav@*w+Om-fXnlwKs&cK3*k;S6eSw8MV6$X=Oma_lzM8*jcVT7V z>SFS|Y;%W>kXS9{k@~sm(ZD*x5%q$s@0d+?;O41``%iw2({;;hqa{7r*){!`Yp%Fv z|E=n&jJ;#8z0kRt9rNT1RWeMzS-?l1s&+bhL~D$b)GM{Njn)KyX>Ux4jRyfjE)J0Z zYCbxuSK8AktzSI!oR##Me`);C^N0&Lfq8hRDC?= z68Q3oaitccz!*=y3cSr>NGa-*wbF(hjGY7ZS%ZeCfPpNQm;yp{i=-VZEIfmVGQZ36 ziWZsSu*%Qo1_v3-#Nu!p;*tf2E_a63mLna@#SvT49H6&#@|C+-Vb%5-zs4qA;OP%E z`axh~kROdvaDPIBi6&Gj#`l}fC!fBb@6>l^DgkK+JY2Z4P)5am{mZ;3*o`0kr}X%h z<jZ+@6T5Ubw%GAC>l$C3vU!idr55@Br zbf-rC49beJnWiTLH-(9+5aB8(Ghlm#y2XoN&?l z5#5_(@sL<`i4sX4*7NAccrCsVnIReEi?O+(#gd7`$fF4$aim9fEW(G-v;aqOO=-=F=kk6T(Q zkc72;khxsd$S<4_Ok4=+#`a02SNgm@_ST;aQ`%9yY&ORc++vwrb$5r6i)b~ZeF*~1 zXW>Evp0x zH=6-97|(yj@o;k;IRD}!s-kY~ACP14qj-}E`tCh^#&kaUoT8Dq9=-^ji;uMXHboQFAgH2;W1NUK<}!7tx?S3ZOy>S_{2mMIZx>eDQwMOcRwQfrZv{D z7XqVMDC-pt`6TxIwZIQW^>P{Sa&2~LdttEC%@OoZT}M9?tEM^CwW$zBDbW9JTLP}1 z=iX{KjnIjowq2x*2MQjZvdC7iQ@(f*!KXzA=^Ye9Q0^Xg53Sk;;I> z68#@&V}?cr)JWMyv`Up6tM|6*k)qRSpxXz67?TKn#=y&6_mgVbiz#Q`3#x*0LT=UKB9%^nH(8LoCvb-vBTWP<7rRv<+CGq<-*PN+CCWX z;*;KdE#F`gUH&;E;Ix`_W8G3$Is>g$#umVZ-i^<6%meCv&1pG!ffWlXk%}Od4DvCV z3H5C`gBjJ4%3;&{v^Pypqz5Fu=5&$1-BA0dNkRRg&vJFogI!~ZDe=Rf>1EUB^}e_= zu4^1ewQsYHmSRsLjR#ul$Sj2=cR8ka+DIX9rm*=NcjI?9Z{@c=Z=4tI9E(*i+F{2_p!nw^Hj~{;VwQTeUihc3E=JS zHxU8OF#G38j(IivpW^%86A(Z%0+>(^3z%4#_nMg7Zk%l6a(d7jd1QjXsf7#RyJfnt@~ndpX9PEp?db@oG!wuF%sv?wEQ4|f5u!~kx~DQ z{eAX>nxTGmJ{`14im9IUSLLkFs$Ul>lY-6(9Xq;Ru)nxou-!!vI~o^oIPDvcPO030 zFF;*-4w~Ak&oHvtUpgs{rV(11SfFv>_<<$um>;AJMWrX;ytfgkRWT@^%u9vd&W{h> zMJVaBL7Up|->LPhB>e}#aQ$L^efssjuKJ_r5Yp~ajb8V&B!BpqB>{}b5S>kfrJ+f)T69?QVOU%74LtM$Mh3mYB z)oTE7@CNqz8s7*+OgL4J>Vcc%4x;f>mEkP!sFd01IRye#H8bcpxUYRPz3j6O&i zrp2KaQ>Hrm#qa-TXH|yx>eX71AI&9!x`NQMRYWtu@<|{x36wBZ8`&xztSFsw8jyA; zUPM(!Ku4g}gdiP+922c_6fa&HFMU2V2*8KHhXrNTklP2CjZ#GCddhdJ?a`A7i%wc0 z$=ek~p1E56y1y3h#ad0yBe&Eu9BIWznhGTjvB~=Tw1pEjHYDivheJ#6K?zd#JA#$q zh@mgn->kxTsliqC@(%E*BYfcD?FtG(@PPLq3^dXe>cCw&dGpmQ07iF%(ot+O7omN{ zG3{|&gpsP@c|A@jWo-ybz`0+Scw8*s{N}grF?DuieD-of68y^2x?(XqVvI2tcpdu3 zk_GjYo6ZK(i3*}og4-K&ilCCse*o|qJGg1LlMd*4JDUKB670WG{O{LU_F5s)zNtj$ zwSe2r*o#^VaJ~pNT2d@+MoeEVSC~E3KMQC!iPED3ul5F@3Vhc6<=O?qjEdv!ia*UG z0c!eo;616j0QCFi03s*SAIU4H3?~DMMi!KPk5AOijYWa7-)=ev)?Sm;HQSi`%U!5_ zx!Ld>xu2B$UG79s1UfY^)bas$@XiYd8uF*W%Mu0Z0u=pHK*0>^iWDb@aG_aI?Y%94 zr((Yz3LoJQPggmj4G&d>n+L8m0_cErvmAf@)^?yvY4?-+e;=hNa(WQ>Xr?PIGs;tG zY~@G=JgX>Y8tc7c51rehy+m0sWn9;3f=C@veF^EwEG{ zod;fwN5x2O+KA2xT?SA`K6P#!y)^6+@*#-4U}cC&jHd{VQ}X5$ol51gXGdh>R1cnQ@f8$ zT{|dQ9txO#|FCG6kz7nGLyE;mW$@l*JkvAN1xZlEY$Hj)2=ofRjSR8jC9(kGspZaO z5=D%O4!N^bt@iSjbX5%OsdS~!APHv{`O|KU&2cfvOjOz zE9O}kPw~n5zH_+%-u?0E1SlgE;F`g3c*FJp2%+q5JHhCkH(& zj8we2&(3fD>YRYQzL^8)ukV|?HhaBvTo!oJ=Cx$Z z-*nRcQZ!1w34m*Q`#jit(|R1{fCjfG=TtuB?&kJqya(2j3D$kV`VO=NLTd(YzX8j~ zwaV`9j7+Ys@fb6oRUDhF(}R1WQm)HMK$o(9NOt{Uau|QZAm*G3@wc8b-+X)@yKAYb+}b-+*YwFgo%@5>=| zlr6X@fS5#lDOLs4v|CO#?3qh-wI@YiSYzu9$T12^h7~v;} zWYjiT;UI|wdFUXF? z_?e6VTnGdc2yv#Q$O_$eu0lh;Ceb-J{5Z|<`k4&uxTQ4tAgbdZG5#ntKyf5~2#_*H zbON>z6w<~vneOTmtGO0%PSWB zEpx&$Mmkz-;RDciY$(G#8dg_h&$RzLuKX93p-Qg3E+)eK!lIBGTUj167!~mf* z8Q%vESsA|Bn(olMJl(?O`QQNH?&Quj^P#oJzgB*~C-KN%co~PTzM&wCqg|SBd#ZDG zSV^S7zxGl|YB5soe~ZY-HP=}MXcA>Zz883PG=(L%->6&bapy9`);S-S5VCR@5imVZtk?S-rPEOSZIlH&F- z|44ai?NirOjazJ91A0&N8qUBJSy3i=pGo>nS{uMrcA@8KbG~>d)eU=1J>;*3e(~h> zTg^+kyOO%A_VMk>Z2h)fDp1tTz<08*_XEF%=-{D@9V?SWMQ^sWD^p5-bcOz>1}oXK zi7%i9Haak0(uNXHIq=UPsCEt5A=PR(*o?Lan)jnaKGwGN0Bp+aS`8w`=(T|fo7eJg zSI?)q=9R6+Q^?U0#*2|ydmMM0Wf3X53z_<+^iAi%S2P&1 z^0jt|n5~n9<%z54E5-fX#LfdFsReL6FX@N;WA0CH#}_^h_sS-iT0Pb6NVg9K!7507 z7h1?DMl0XgIz>e&$|Wf?v2!|$o8m+!g7P8*5K36H0UoQ04V=I*T5S-&m z>4PjUjK<;((zIjjQ?DN3OzFKl>KU#?Nh+?~Z1oE3d^nQMu_8`xTeVxwNi*~WZ!7<$ zQnkF|Sz5o_`H%OMV;0C@Pu_Pmfa`C}-J%E87}C@6q1tDDihiHVpyx*XD$NHn{j%47 z)GC?{)nd&nT`!eiAIe+kk+i|YU3vf}cATy`ML32qX$qO?J5IW?8{6 z)7rS$&(yeFE*Qz$@HjQ%5#zcn*wo zHEpNP5e`Vv0-vu#s#S*UMh_wuT<6BEhl##+Xr^Ibs zv`HY4l<;$IkM>C>g^awNXU3UC;3GI+zcg!=DznmlMxH4D_=1aQzXgXx7|JUNrf<

    V*PI@(163>202hP%MYEQW!Ph-nK zj<$=O^8m{G|Dch=gY5K^<}u~Jq)DMm8$+zo3GZj=Vy&5x`E8JLYQuV#A>zn^3Ra{| z;)GelTU|s2!7G+xYCi!+8BQcZJ+da32nQzj?Fp2gnq|P3p#ulpM6~k_>kvV7kYu6O z%hwRoY#8I$j(bW^BzLkXb9A%SBXU8p{qcs8-cQJdn0cSfi$dJb?9unmeK!x4i6qHa z4OAk~IT5GtU;Gfzr9|-z5YR@B!2NjYEKVuOpOQh=XKC_wOk^Ba1T$m&@o4w^Xes6_ zBVrB}1i8PBO~wx zmVQGjMIjNevqxuFBIdZt0%^>f`%!`gvfVza%>MD3un%+Q!}T{3t~L(QGnRd8{%7bJ z>Bmpl2S%Y5F}AoED@Nm^PIhRa&ATja&uDnm=ovotliO(PVUtioPJo(pM$(r4rB`2MpzL-K!OERfo!`t6AXiSR(hYo8|6X<6Bg z3knsuCPl7>g9LdG?9AOshPk`}zekszGXfZQx{5S2Eo4$V8IU#>Xrf0z@nXSo^Ne6U znZlT4On*jr8F%V1>k@soTN zKNRiX!ffEkL!RU?4w!u=%4Asly}dAKTmZ>I+by4Ls^i7j;-sp zNbbL~Sj|p`Gz`m}xFWh~p1*$1HR_2WKSwiixs`_n>iU_rlqgT$nx-dpw{P;trN07O zycdJmdY?fhnXJ+JRQ=FM;8d9#Ob z4epvWxDNY8uuy^{(ipX*I+FuUXr>2Oo~Om1TZD;1;_Y6T27Pp#rcUZF8*J!a&n~a_ zt~phf1V|=O{`?QS^Qd5~g3q*4UhFIXJ;oCmZw#)E%j&VzF{5$O<(Hd-sF|n2r{Ex> z@U5Bu(nVgzz@uDMJN88wyF7zv?Bm@|Fk(YQWcs00qz!J*c*91By=t}q6a?JwUSTiQ z({Awy$ZyooCXw~rRUIxEf+{eC8j2a)(T^)8Gd|GUFS0Km=?|AZb=PktO>8&maD1AjW*Bq4GE+6THKc7Nq-9yd>6Oe$ z|Ef>QBsV#wEsn+Ul>b9yK%Y+9_Gp4^V#gV<>_VibAc*tl{bWE%Nb!uE=1N9<^Bhx$ z6Q>Kb{bbGrM8Q%&dGLcjOGMk+Jk@S=DGKVWvb$qD5ZaO|C5nEe6$|t?lSpDBU^LvpO9mQUbCR(KZgphP1&%0^U3*W?p2y1BVvlDzLldn7y498xyWA|t z+jISekvENw!%8s&DY^-?LsEOZXz7^YcHmd6hLOffTA4}|A85Uqv{yLNHZd}$XK4$l zNedfJpSu}SV1-I8gbV*^@O4xI3YKB1eurQ$>HcT0^kg^y>tQtgV|#8bjS{lh*iRMS zFa9v+a@3iK-#DMLy5&)s<8FV#7hW=cYHU2{CR89K!h3N22E1aeg3R3tjms_Bilukp zj2XjTcc@U9!z_8~t=I#mm+;s5F0_4A4Y@(pI-8Lr>c>TZ9h1W91SWBq&_Q ztU-V_6MV-s)u2`^JFw`&R1bCGk`-xq`$m876<+mnq38}0xBjZPt2e@OM1)<|dQ3{J zTIWsZn9)NG81_5(3kMOGVPn}yJ6Va!pjB3j`-Hq2ygoa@hel`j7>+5lJ?=5ev!>0g zYsgJ5&Gk82^u=WRpMXrt54k&69xOA6Q^E{ozfWt}R~!7NV4CN%-_uvks=Jdxt`UBy z-O9z@L)ICFQq4-s*dYUSYaO3^v3|ccPQI^iF)@4;4`nh>!75@P7~z-uKiZiX$|*gm zEU4)6rt{nSH7G9KElr7Xffr~#y}9A3?&qB!Dd71~ron>B6 zSbX|~Vl?hYCqcejvA-Yb-^%y%f12lAPBFjo)_>YBv>f-Xe5~{`4w^m^`+5+~BG22c zhi0E}@*~dkJFf;l6Gfsyn$MbLKS<)v;7os;DccdkmDRRtmZKcRsjNmdA`UdTz#cWB zFl9#48n{R)eT>AmxN7hdFs=Ogv85465v`Ty8@%djZBzWZN6kd%@Jk(M49x&`T!ZWtOIL_iurbU+&Ep+&kI z1SyAxAp~gz>F)0TIbPR&KhN(u{x3Xy;epD@KKJbXU2A>TX(ansMJ-1dRi@*MIj;Nf zrr^m*9;VF|B@L?vr<@;u4Dp(!88$aD5~1~!Sew7#f5;-Mw$@y+O`4kI@p*@4Gr<#7 zq$u5#(<^=f7;o#+L)jvUgKW5MwOqp&U= z+-uqNL->cVP;8E^;i>yJKE0=PBBXlCc1k^tp|miLRfl7Y#aK1jealv?a@tNL-g;qo z3;M<@K+Q!`$m5O5DlG|0H{mzD+?;NU|2O67ddwT9=m3#s!nmhJ`V99MvW zupdg^Y&j{*r1jq=HK-i&-3`!0Qkq|1nx|ca0usR$%1E3s&yn1EF=Wh8lf|p2IcvKq zOysTve=vj<&m;F2ma$+GmRR?tc}YckRM zsN-mmKPgKBjVi)G6Y6)dsGn*VOeCy((tb*dYd)3i&j{L2#zI8RwH*8;C}3>2<XP#K)i2qen6KXYR0uFpmluJN!hi9b zs?*7492lxV_rM%4%M`HTJH_7oktJa4b^gBCXb-e-?E2=886o*EuG*;WftF zu*~bRuVOCy${2o?0|0m>-{eL3N=5AKByCa*Q36VXk<+s zdJt83B^uuFgs4*OvZ1kV5HEy#a|`S*sZ65fVWacqNR0ZZm|}O@o6C(yR6d&f&r#=i z?9sigO$;mPK2aGF7UU5x_}Na_-}O!*g&RIY>-6_e zDYbO~B;*U}5A*{$vwwslmrD(ae(6Q%yXn~|w6_?XT6=O+)jS&HUU6JW52O6+rO6QO z>qGx!4;{*k)-0^RGtmbZQH?*3@nR^!9A)VWf)Z;|P|4qag zHu(9ci*@)^iX+CVXuUg;?KW>$m#R+~NI#snJMz}4x4JG5hl6mHVx2uJugo&lYs;+f zoF;{2^_fPR34+v$IPcx6;|>Ed+Ht+M%_iF-Xf{m$7&6 z>iiaU{rvM{zdqdn4!|pU7W-~I>Gt`xx!L(tQbxVXatk1p7hu7iM2-|q?KDJ(+*xdC zv`*kotG16=_0@tr4l9Vj`p613N9cl}Dm-1ZUDujn%?4UsWMPXJS^4gp8KT7EF_A4~ zHm^#c-Z4>>eR~Ud0hLI*%&G*cvMU-{!c4%e6?vym!(v3UpPUsP-;q7B(;MAeIaXmp zZ0mP6lOo@Ha)&H>n!>zIn=R7g^2NaG^`sW)FmJQzF<;2Ai5sVHqOJ*#@V&J6*KSDU zU^o|Rczs8gpTTYuc~|*lP0dH>!@kNU7h>RUDPLI^aVB715|@ahTpE|C?utS zu!zuKB&EGulXaX$sieib90-d_d=gX|{zJ#y4$f6B(oTZ_)F?eS7yuUlwlAnBVjJjC z`jk9ho(2gXnm=ss03%y&GR&cDkc>vN4s4}shWl&S0(>#N43PqS2UJ}x9j@T#BcmOw zkC1)U&l*9ZI?JqQLJ`jK37oqO;ZD=EoOP5FEw-vcLg!Q+l4(X>?g$!=adxMw&H*h( z3qodFV2$PA6wkH;orf2riFPJF-1a_hNmhS!PUArnysh853b|FpRI%MM_}YAY1F_#y zB@*l$b}T`iZp0;FNs-Ck4*GjRF2G6)&=W2_6*>&R>^gI=yB9kWVu@atLylQKSyH1? zMJs&}JGpT5oafD=ZsgNDFDzpca+5bd0?rBU7NT$0qYFkG`aMo8{ysqa5*dIfsqzm| z5{)gs=Os`O%QR!=G@Db|4TzL1Q<`Q~&fjkY4fbOrQ5Xw;q{mUf+VLx~&&z*L09N1J zFBHz^&i0WMS`n{uADl`+2d8RMwZG`XwJzNGb-lOpm&G%loZ)XvGTgDXVSdST3xKcM zCmfa^RM#nyM(EBICnkH3SyPMh2P+hp#~Ygl8S#m9goXJ=m2x`?Oqd0b(ei|QlG2v?lV`JrwZRVt_#qZGIWE+#VC#kWNc2Ph%jw=?pjwauH_jw0Uj}sU?RjUR3Bl@Bx z9qhgjfebC`(-E*h?NVnO+ZI55&)=5-&-)*}=Gn>N2g`=UgyGc(u5T$Hgt0#ap0twS zG9F<4Xq(`C#RhAbh;LOo@E2D-FEPcSwmDTU}~=O<16~~8uKsa(Necw=rZs% z1HE&O+u;qqC5t_>E=)W+kXpiDssBGNlY6-aW+8@}Jo*-8W?h4e$GDm+g-P4?Kv9e= z+$zDTE;?!30N9?9fOR|1`t}Tr{An-6o&tKc^j|r)skN_AJ6dEFPH4-m>t}lK$^T?S zBfw{mY;`f5M-r&jI9)VrW6Mt)`PUTpkM*lNKDfc);DbcnP|#>UJzyj_h&7dlyS=9* z1Z4aX!0c7vic-5*&p1#g4E0ufvcjZ7bd<()b&t1KvJDHYv3q^_KmINUMUz@JB?jn) zhYaNNq^s(o@e#}ZESi^v#qeX8#X#+EWwE`!fi?$Ka{jOP#mYRpmfjSnTAI9X5}T>x ztzR(S{!&g&S7-VE<87jGu4>R@-(0bW(?cR#3+SNfa@8bU29 zzbrqAWEV4DcXQ<6eggKi`V5yDOEUbHZ{od@Ks9b7jtKL-y;!6E+?8AIzZ)97VU_&f z@#qUteeVVw2*-Dql@-59o#!4z%ys^WnjMQz^zy z0j5XHLdKJcsC8ObTH9K=rHT}WGpbnUhoRjVJ>RRHsNJYrI_H8=h6vwx7yor=MX;0W z7NR4CjcBccHzbh0r`w@PQ0SDzV`*TI#p}{gtpZ-OWRV+0D~*-e37#8?t>X>d+^NQi z9ktcxk6}#!-Cyc-(#pjsm1Lc+g?&ygyMR+pN;#CDp^V|3+#@3O?tv+K-0#9V_3r@R z&f~!+q4%mB8xPvE{RyNxtb;$e#x^z*b{56coHwB~OsAn=6S)Hi7LcUxBl%BLD#4Ah<%E`^<= zM}zWt>44et?6(Cfl*((lg^DntCmlYT(`?g>FkKSm4})SLRnn8U&BqPHcd$xcrc0F8Rdz zTfNdLpZIal-mWolvY5fiBW|~)k5z=&y>2$7EK7a+9vbjTf;66YD9XMpGH^YyPyXkD zL9qMaVV!e^jlH3Q64LgMLUUefXVTD)UeJlu@=4lvctEf$=R8IT!QFGY!|--Fg((ce!w?d{>r%KSa9Vt%O=m@l#8aP2ba{h@S+ zNg4wMjW1kT`=G?0{2a2$T@OSI8vsk1H9~cWfur1Tm2EqMG5K^jb05a)_3X!cPO*X( z#ok9!*KdeR`OzFr8`8ZNL$M}hv^BATc8Vd*A1KF#U7CwL(W=W7=D;^;xC3sUepTzb z98XbKT2cec{Z5sp%~V3N#@MJWHVyBvw4{X5nfy-V5uf33?r7)*(1m24p1Mir+C$Gk zDr?a6sPwqptCJ(Xqx^GPN=gs3@H>lu&SQ&Ck5M%1ei()hlN3{m4=m}cPxn(Dn`lA} z&mZnC-0-D!R6?@{2oq=aA~q7b3@tdw2j5a)=~$3*FqSpG|{-!UDH z0@Sx{ZuX!n!42Uz=e@?(5YOZF16TE@CCpp}g<-?Z^BMqGoHm$lSK<~kVv+UyKA@wuWOqzn_jtu4(fQEdOzNfm?+nR!* z%2SBJm9)0&j`-UU$$$)E7|J357UbK0n^>o!WFS!w7;{%&_EQ>}DZL{?sJ0`S#O&Z~ zYz}a%*k)j;C3e%er6Ob^b$2yBAT&UtAR$)oA^*6!Id|}6Y&J`uiM!9pIu=F`Jz|6z zDkR>$HBVR0Bv}iOI|c_+`4yZyM#YbHt?Dyu^QW~V$*&NHnKovtPR*iAgEmjmM5x|4oxPIx(@h(PzB zHY!)eXSHC%?jpt}I!L`b@998wNKOu6`%IPSoih<$V|iXUsao zak$Xe2#HKYOsvW;7H#4$&mWaEGg&B*38aIR#WTOnU9SqQRoKHdI;H?8>V=+Tpg_Mk z#`lH@yW*xdiajY~ta(>q?fre<<+<%%7J3O(QX3ue?&)g7m=xc$FD*}gd-KmS7k-MA z{voPOBR1bZ+v~eUa6>cZ7WoR2=vtmu2g6XOEdO^H`t*134WGr|u z;O;7!5C<0I7zP?6#2)h5)Xpr5sv<5Vmw7$9MD^ptw=xg6!$;*DA0bjmJ=~LsM@4f= z#tzcWR&E0n;73+dW!;e+3k(hzyrK1RyKK7)!^Mk3#Q6y)N0DN3%N{!q#03o zjqVCkffF|y#^5Jp<-)c#AIql=w+s!L`u}h;1qLi&tXEdhR0*(@NVqufFm8Vu6L@Te z3Ae;`#(;2dZH;iX|Jq189|5OQ9#Wzt%rz$9sFgwl5&WnofSj9W0Pa}8%Kk|Mqok}1 z$lBOu@}=C^qdc8 z4cHs>eY0vRv`cmY7{f!@>CPt1iQO)w{Or1kY-fr`woMUutKUhnt2cOF?F7fLZ}T8% zn)ZWuW}{(KS~;}?)QBr0riyoNBGvf?d)XuUaa=Evo3GeGrG~F3j4_tNysYI|?TO)U zfK%r~K}elFiz?H}*rAb!P)qF5I1>^Us2RU~Px+6waM(&L+X23W!B>x%=IKot45ADM z_VM)#vm89DPT7#5kH_V+!=W}d86>s{r3Sr2QY(`JNz>2eaQT+l5WmQSnD>_9Og%Wh ztwn5F7|zNs*-~lZoU>4^TjpG$mwNWHwb6Ou5?nZI%%vDMCm!JH&|{A+h5PNCVmx&cr*(h4Gdyr{lYw8hk5@1+{dM)PumL}^%;w>z$uX=EP-J|z_t9Yug(>{ zWJDj_6#Gl|B)$5<^pKZ$5L&u!z1M03%NuX|U_=!7UA#gF1ZNWODGy%RL7{&Q+6;300B<$Jy*b z08Eo%4>y{$rf4-!GV>`+wp53!;d`9pz2YftNCj7%gX6Flq+?P*8ZPUGNtqQ}K7sSs zbBe})fG|M4JIv6Nb4N5A0U zQt(e)8^$On>yJI+Fl4v=Op(&bcM3J@y$?C%*eiFn)fc<^F=sO*z?I|KlrSZk)Jl=7 zC63*}^Xg4`!^2+9oEuw#FaxrmhJ_EdL&DRtRhg>~dKNrQIbLNIxi{%Pe^DgA@%F@2 z55vh=#Gb=Oy>9HPE49qpqvm2lx(u zqIYVF-yDzPw|<_pu`zp!^nN>4qTD|0Vs=Bki_VkY2y^lF3FeJ4wU~a)Z^06InNUf- zSg$;lD6j+vxot4yUMD3?@x44a0wa?j_Bbs06I{^yL}DM+;14i~)=W1%4rWridYt#P zyt;m@CetA@M`&!TZ0*IX4da8QQEwmVRR})e zs!grld%SaHl2t?4R0p$s?k4Slz*0p;Nq}zmM>0XZRnySV)UGzAtLAd>{8g1r%eEoZ zOCs-9sPne-(uVsBU>?Y&R5oDF1`|N?-jwmP=Z16P6RWNe;sc3R!>6;T%HO^;Su2(H zwD!4`UEGidFL^%ZlGw4u0hmFFkh&M`cnzNnsmCRd!F2iNJIubcunY3Do5kv>_J#l$Vv6 z@)ut0fwiqt&RvQ|vmkJ!_(DlW5%Yp0M+hd8_nWLPhsPIMo#Abr)nY;3W|G*k%2*>J zCXd6m3M54x5qSSg#84^r8;$;4j2I`j)rz`6RQwXK?M*wPH;a418Fy)FP4QGKURw#X zs3#ES!~hmbPEqSdBU~kvv6rg}!}_#F;mlz3x+1wea)uQ9=nIbPsb*|oK9+O{GfvDV z{8^~EySj#r(w!Op#piuYUAPr6GR)Lm3Tuo2nY(n7P95c) z4VxI1zh%i_(0JweNI)qYlFhL1;mfQ9?xzxer@Wm(0q+3)*??xpObm7et-w`ac9#SY#UC1)ULZ{YTg7XT&K!yI2DUd(3Z``)Egkhk- z8QaN?!sunnMit(YG@~#7sDhL}`PT0$_Qw#xvWUu%)>Vy2O||&!;M9P0t16W|?fPbpJ^TNP}N3053TvO(lS zIRaFw1NFap20lLjjWUlqtW~yvyfYT&my0Qeh3_ZxH6@f%ajc%dK>J^_e+-1p`u1zfK^3Y^ciz_$X1{a5t_{k*P1@095FjQGrBc zCW>0XYZXuYvwDeBHhd8A`lZLqNcp2+WH<4VdrQ%gM%>naI7`s@WzA1YdZsEpKm^{cSp^`;Tw<@EIsO#f`QQ zuDM7`geVcK9SSqcj-+T!krI0FO*LCe;ywlU7FXSm6zy)ekh6J*zDv=EzSuSy6AV%g z2)`{;DY5HWjtZO1v)Q{pl8i{+33`bcO@5Uw&b|&uL#{&-%#bJXI!=buaKK?M2qItI7r6&+F7lUrDgHdB*Q17v@ zy>#lT{4c=N15^9*OHB@5tkLdxZO!hu6$KAX6pu`cz-4P+pL#6czZ_AMi1L-~->>CcU5>617MSJjuiIjdiC>idi=Uo4`I1+P-EA=Cq5#InG zE7I#^6GpkhvYV(<2i?#0tO`m|H;)4)eegsbX$G{?%{Er6>^9>nre>siHa7R;JxF0gO|tOQB^9D+g2=}N zDw3H2SG=!C)mhe#BvAdbD=7_1>aBbmpsC6`kMuLaXo*2}-hhCl6Oks+^82uvQW1Qelay zA_qJd*mz=CVASS7# zyxOR)5G#_?ZmlB9acoG>t1$x@B{}!625?`!!E&p+R?&XJ@+_D$P+Z_=6{fkSS)sO7z#pGGcC{%`fS|cG)`DTV@DoGd(YnMz8qbo@#*34>*w)_n>Dm`Z6J z7i=K%KSGeWW!_4@z)a=#Q0ou72ooPO5@CxeJCR>;()?oQv#7(h2^y7E>j_KOF(Gs4KD7dSV}hjg{USwgs}q=Wsn zqL9G|BX3(Dz%WnR9pRosbH8BvI;4e}2irQ97sP_()vWrx^xTDbHDMN~NL;E*d<;Cq zlk}mg57QX>BHnAbEe-!k$SBVh)pR#YNTdvmQU>1qXxv`C)s?_a)Svv@;)P6%Mo3cXaA&n%+MCTvuAacK4FVN)dJI?vGY>zI?rG9MhVNANnA{iuEeH((EjkG(9j$=9vi zDx8z8CMT@{c!?@`^a^@c)(bT)+`usjUy=nGbKMoq~nBkgYM5D(?!v2 z-hAUms5U(_<&R-@o$Laxc*VsS5inrKyng)l_{i~70QY%MrL>nP-W7h+U`BrzL>WGD zB}y&xVoqyMfo^@xOAf1AfgkeZ3zm}w4_lbz)4p!O90R>H_AEm0T3M6l-3epwpUW&W z$0(e${23JR?NW^}ZD;6-^3JwuZG`fSOqFTvb_#oFt^(fQ0>OSJEZJ~rsdC9sDemvm zq>kT`5NPyvA?@UCP)TQx^AeJQ$uEm7qBpJAGuQ^$#iXOb+ZuW!N#$V-NUFd~ z32Z->D$x3TiMIFEQ?Z{%W$|AGzn}b$2Q8g?mx-%b7Ivl8&9Pf`ebDt^x*2}OCnOd| z0^P?NmDx>>J3@oh6%z}l;>jkcVfeR9s8waDbLxKsRKL7FLEk_S&n#y`G2E^6@r!mq zpLq{5o=53fz!`oUh{^QclY?L39eJ>#qL}(~UK~7At^VgR&a?jAnQ3na+4OImXrZ@1 zo6kZohTZ20|6oG*B$Kd6fBx`uq~gM!p>Z5@u}#?5!5fZn-e7nDx9s}#MCCYX4r()Q zEDvv|QJqiBRJ;&r5|u7N7`#blR(suFbWhQ$MK2(Tty3&!+NOQ(`qiC*o(qqLb6pLN z3dp(4W{7<)xr*zdAkLp+h1A&_{lIW>Rysr?rBrM&QXreV;@;;?Yc1FmPCii|?QROp zTj^u;WFG17&DX{fD%1B48qkzP)@Okz#2Eta)R(w#{ZWej1E6jXr0%#41l7h8iv!B* zlrycjmaWxCrkG$1h>fN>de@(r9UpABh|_+>+ApG%YsIaDR=`PS!Z3#a+jV|j2$M5$ zR)y)`=IU37CNm1DCECwuk>l4?ISj5#CyF0}&?85j<&ue3KQokTJWjAGAmVGdFOsS% z{%oczg?I8za;NX{>dc!XP+&Z*|A2$HP>|c>p72(=m7ijU`Ffvz@<|29$!Wh{qD!$| ztIeDs}~J#M*UPCUVO7QWOT25R#$@c!VqqAbBft}@7bEcV#A4)^e01I=&&Bc~vo zY)UlEwQ$vg1D*TshU*m8H)15$Uqw>uL!u~_0tW})8ujb1ZJ z3gvMYf9L^{FVBm0aG6-CNFO+(msf)t&0v(3hI?Bj;=1Kko|=X@UxAKssjq<-fHO`d zyA1otic$_vJ%ba!!0Re_V@tUG@>ymR)+_wAaG7$>eGeZeShuuL#Gjt-H$ zllocIwKQHH$Rq;6|Lp|eV;Pqr?uh9*corkWXUcPk|@;48po?s#{^jD z^d3u1h#AlSF&=5y+QE=@0bk9NnF2tlvtZ2NtAoz@f?JJ5=o z>YYAT?8>bzDqFS(d6WVd$*91`9dE;d4o$taf)cx(-2?^eN=}sMsk*H)Ah9Q|X(Xzp zAKk*(lO#M)z%TSjfKm0I)Ka+#hg?7#Q)(h&RW8kctX50xJ?O8XI|rhQWI!Us$no>f zD;67ArpUZ#LuVzQwKo8PR3O60Pjw{QFIk=USXce2vMpgxsm3eOn?4C-WuF>g5T&|TxhV7uU+*e?Yk7hr5YYXb z(M5SioN)mn$NvFGR@%VlI1^OhKY-ET3G24;!1zZU zOjpx_y>mk-%ma#nlJfU?0HKVFjAV{ z`m{KtiM1?NHKkZL@XZP(2)6^ZkZF|0!6P8xR@8e-3rRd-7q7#vc3Tm(h%!Li1`h82 z0GgHh0fIHtkMGJiFg@xY1^-_z$T8*W19lVTRo6>q!XlbFJZuZQ%07Ac4>TCpkXjYd zDh<#(o~$GYeEWRD02^qH-4@{BAe~~H*&!`kyWh*N4)Kp#Q(N90jaM#etBc54Ji4Om zrg2lnI#Mw^BsLeyT-)@zyP9`q8k=~hdBcMIPi6uE)P}@n-W=+e*^|woR^7kxe)<1+ z`NZYC|Cg6fKEWy>T?HUfqmsjUW|&aQ{X&3MwE8XiL-2)4s{&9WG|n=0xv>I7c!kx7 zR-04TD9YdMyb>{(w;OXtT!)t5Ez!}vYi#`(C3Usl-x?ww-n60MDY3JBM0~dyZF-aK zyGPPM^6#=3SB|Id5L@7V4bUV|jOhn;kK(!LZq^kh#(o2=g)7&!slA|n9^@zmHr?(1 zr%}DD<|yDEKs0&w?fS64^Oeb}(NtQdy zW&3fTwC3?b`Vn$g10ceas-cP%0LRcXpk+&cIynfCf>30t>Og*gOa=HS@ZD0a%a!2* zXQlIZ{x=)ZE-XGPKq1*gzu9S(J`idq%c!c50%~wNzI@p89`c2Hy)gAV{;A+wplI@T zqRf;b(xO!^nAv02q#GA*_ip6-=1-K0F9$FGwiqVVyrG81n*KJH=;=rc7HN%in?LoP zNso3@bK+yI*W{*eUwYU}o)l^%2LPe{dKCu>u3uX~Z%ewDly^JzTt$w&O0l`74MxkK z54cR<4*NK2cYr4}_ju|0Xh=W8`z2lf|FQDh{k8JXU$2S%#eOXQFu&^`+Pw+~OC12f z7m|kR;eX!EIk&ZpHSsl(=&QS>sUd&Co$ja2o%($2@h6fY?;#LV=9M6Fv-5`m`wxrP zR1-CGNYsU&pYOL51pEv)dSYjH?)s|4_0 z$Ox?yM-XX{ZE)CzIBjx8lZ~hQoPjeh@W}csr0qzAJB)WIu-C18K>(B_u{Gao+@+hA zH8OvO=(#|#cu0Y#JH1A4g}KQzL30=1k7VX9tCeJ0v23R-$Ka$ip5|k%P6Ms%+%bZp zKDg8>w7R>4wOLDv#|`~$R)JHjsqk@0zs98djm)x$kl&Vux~PJMT^8_y0N2O}`X#3N zS=aD7hDv%?4d3Ze%nioR&_B=1y$4>IU5+HZ#(-d&mUIir!gr{iEqsYmgDY^gCcObv zJV(l|M;Dj7o~<+bXc_yb_mvC^n>@urKz3exTnHpb0(RfQG(dJ4EoOK7dZ^BdJXuT% z-}W0%;rn9(=@-WJNc_iO$P?0)HKZVfNZflLs^kGs4S1$d^nYA1pZN+N_)#r046r0P z>BG)ei!+AtUp)Lpl%SULL^-9RdsxPy;sxdqu3#@cvJqrXj&+nTb+Z9eqkD;4t^_AP zRHb*)SS?N(){ud}oYz&YN*c2t*c`&HR$eKtHN?*4r2+!vYS+iN54mqHxfD!2xX__p zlel;9HoU)^`SuE+iQ)e9fna;M8>8^V&^$TR5W>h(MP1Xgh_2yoiz;g z8d9}>1^&ufbJC5m7dXO)|e8D$Fo5IegeQu=O0ZA3hy-m zNUWymLaT(hyyvTnwen-}ggnLr&+e*+Bz=|Py&`~P=qM*(6`ZMrslr71p*!9Gas%g& zxxewHtheOd#gN7=-9?W=hfY5MUykvpCC(R^W6H}Kyq9dnwf8g>bKj_Q^gfq#%@$L- zByZUdx}N?ubo05+DudNs-M`q;ry{2`vlCHWr?gu7j9OX;XGw>ciRJA;~J;SL#mPj8ZV-!$Kv2?=ObjH-({Q9=Z> z^&;>({nlmaj404G0xm~iedbJ6&2uicogfl@HzhqH@D}P|M>X-{bxGk%!-m}ODT`J> zhS$4InML}a+Q*N!34X0&{Bm=Ipz@`F&Hv4448{v|O=c0GwFCD;5{3TJNIH>F#E2uOG?&!=y=0AoD)sa`whGRgep|Cn8f?Saz-AEQ|?Z z%fMh^({3A~W4lJB@>`w@GFv)2CYmqyTj(bKp0Hg1+>O?=LTd4Sv59na>v(| z*$z4wr8=lXgMdq~I)q$(hRDBT4O$I$LAc64iC2Tm+2>9WQG}d>${a7A2jl!{xHJkT z{<0YD6UC3Ahe3LV^NEw0uQbpwiy`X+rD@>xTK>GYQR7w(9)-#rPs}G;54b78v9OVL zXy(=I8V*ObW6}03-~6q*{D;_%9+B(h{q3K4{C){{m_9`n&@^fgpS}#cf~*mftpJt3 zO0XwZb^M9H!zXclzH`7PlyTh*&}(T51Oi2jb+VFNL%gdidEu&?P6F&qdV@ctl_%ib zIwY0l`OaSo+Z*xS<6F$aQfA+pn@K}3PTfMCX6*w=f5Zv=?3O&`)sdr;`jJ>F;QpSH z30?d1k0p}QkR)ggMxV;5Q~f+9t{HGL``e6;doGr5&T%-&f1j%}8PZ?KY#YXa!}P-4!mcI!nQKO>{*9PfnQ;R|JLT13-!egCxJGVC#bEjU_P zyKSNBaC@aV>Zv96`sm`uQC*?8e7@j#x@uHfoeHl?eAuU7N6 z$jPP)W*kq&JZPdBI<26pZyW`3>G1X8i4cm{%epQU_e!0s09 znk`V&H-A^_Wg?WH6fL@0aJLh1H4gv{UtpnthWMl~E?-eq;4%ofb?ah(0a~Lb3eQDk z`?yf8N}m~NhCLv9hz0Jl#R(MOBbtJf=Mh0GJQmaU*s(G zOGT`_N}yNE(K=PSIT5Eq^A`&;NW1)^FEhjAB|_+Kof|45*#;~LtM9-S@N&VWa)S9$ z@SMSLI6v7ctRX5j*u*e|Fn00Ny74fap&EI4 zr%ZF+*CMe}^V{6)TXvH6yyI6(&jrlm$Zu*Zj7VuAnD_MFZ6}_X?z1`Lf*MkY06c#(|8Zoy^FbJROYQ{wXAnL90i`O>U2KdIVsJ$11tE z2MkP|HA-dRL138(U5$v;tsnc;B(@#U>87+hIX5#5y>lOYz$YR&xeq5G=23Sw5KQjS zGpBkILJX!*Y-I7-z$mFN91ua{EMDW0BeM0=r5b-A(hG%MvEQeL-e&}#OWh6Kjp~!= z#3LtNKOZ3%X+80Wy>5cQ*>W!ds3eDOn*U3NlM{y(ERJq&GtVTCBx5zp>b zr>Ey$#_i2Hs}!a6SMXv36%y^-<8i0F69cW+zE2204`4JE?i;@u$7CyS+$`b+BuuVI zG=S5fIP(+vF~TUEf#fa%_Yr2A}mAmYv zyb?T{fKS)ld#SX^`tEW;p~dBjHqAHPb!_ z;Hdb3Y2hV3u`XQ#Xu{=r554Yv%;bXb)9%CxacqZ^`ZG08oohdJ_)SyP+e`K#42I`b zG=#Rtd@DRI4OrlGdUwmu=8@Q7v7!4A765+jzW5+_>e1u}b(?nt_z<(x85^fmvNntS z4F@!`KBWgZLW;LGtyGv4ufHhLS7L~<&?9>;epyPvrUpOkzX$R3gW^?ZeBN>J_bUr#k3Heo9-Wbn(OqxI)s!b9O?k_}M4Uhlv(k|>^! zKctJw4BLECMeFEUU;wrncbzs`7K%CP4Vgc!tM3$_VT57adxaIi_80ZJ`NXzw2S9y# z338HVPga?QCqWZW+x!Jtrm*5SYl6LB?Lo#h+%Oy-gpj%o-|&ii=@^)=wc6l;2f;yU z^?rQERcb_Ip5D%M6ZU5B2xx!Fy(ER2{OV?bc$~TZZb$Cv4}y;FC-z+W!qbt1&;L0f zOliM~rf5m5wPxy6?CR9QdJ}I196_!>{h}~3_Na)uyXc4{f1>Qjnc4X472bQ`^}Us= z|4JCn9*W^W>MjjGn&uM=oF;`qsg!Y@GK>K{S36&v zPV@_xWYGCn`Ch-rNOngmln9|G%Q0=I| zG^80uIr=Q__hlbKkbOWDJ+qGtQkJLEuR~>IBZt_MiF8!3ZH7&?Jt15@IgNhqL-Z>9BitCg%LMq^CJwF}qngzphx_;}B*(QK{I z&bdYiG%uxo0yw9703UbMHMRU%^4$%do1ee{FlBSL#B2;rQS*1&VwByU>NOn>x3YD;2a%}qQg)^F1^;0V*nk*x-(8p_yJsr4p9aZoXgJn8-$8Bw2LS$S&{{xwTV>sB#&EfxEhTN{MnA zveK6pgy`n_v8vOt+#y7z;_~5W_GKavb&LU0$H?7r`n&0wDhca*cC8Ua>lS>4^#Vq3 zxuhtP(SesSZ!n6~%D+?PsH_koZ*LWA3Zn!rL;}f*9HPp}mHbSn=++BA=cx zE{z}Pur&yneJfy*34pUk39%r1f_>zEm5F|!xH?&hP`-+FHo;|){LezYSEIk#0)+AGE5Bhq>J2St!2ygYd7=0a z%4u;h*+@L5L6@L4je(Jw6nL7(CWc}UM>Meu+Zz?YpO+AaGT<%hE8z4GnOz58bJ z@TD46?OzcYZJ*^v8p}ARg{42-QA_sZS)64Xj`lc4%?r74n&w`=Yhy0)wUOR8=+czSBH?pTjmU0q`4}%QaSEAKnmMz^ zrbFuB-P3Plta12}$Vef9rK4${wy=Is78TAiW^GE2=mOHB$3ZnPjgxGz`Voja(mjGQ z?+mmzRm}?Uhi$B>{~rLbN8c<%*Kpp@Kf`$xaC!h><4VfKG&ZPiKVy_2$nJX*=3DPX z9oe-?#PKe_e9EW7zx?RYa~G@3tUgjKNR)Ptfu-I216Q90d$%d8nRg+^?ivP|(K#kQZI@>~BM0E2sZg!l#%RG=m|9DS8tX3!a+6vNxAy22)L zVGyfg*?iwg5LN2^*H=I3n)U!U{&jf68KBrpm=p`lUGS$;%f`zMupJj+AuMU^3FLa!Y^Fyc&)1Kva9kA$mU*WCEv zIujV6Ev(^zi=nV&)YQ376-8wRzW%>dtR^mCI_Pt;kKLIWv}Uy8@D!Gu;f=DedK@;X zRuv|R&}X9vu(}>tfJIuYEdo>x471ylXZ``e{-wzS`Cf11FF);%59tYCi+kP^v>D8* z%tduhC4T2=OU^dKOiPXhzRSX-bhEBR02U1>9qoYZW*%^JlI9O|Bd0${4gTqTmg_y| z&iaFV;XAFrZ^{X-4}}P@_f#gm!1-9>iIA^0HX^Ylc$zvj&s2Gt{9Cn z>)8z~dhyMI>~r#7<}#}^&pB^OdTvyN)zxu`fJ71pe4S~$Bgb7}ufZ&zs*hfI)&GnyM--G47-`)hVO`26_Yt^QxwfJ=J=$UKe2sf&g(JXUq8Bn?0f&JR$0 zab7Eg-^rN1omnFI#*Ae#PD3(T>*Ws+bG6e5ZqsRP`ANLaz)(JH3UABh^#Zvq?((8G z+lq8%a{M3lUuQrG@3dOeIOrn*))5fIWO&#(T=YTVMSUS#-|O*TJA9Zym zfN0T?>ATx-*{=x7$vi2s)4c>>X|jWJAO6{OC-D$*uV@S3c=xj0#XFs%LCn4xvg`A% z8L@=~i>SBWTSFxFGo?1u|6}!VfQN(5C|7l{k4!!1-~CQ(u?!0p zKtYC2c_IENiR}#*e;pSjPow2{tDeSvsur)p`SPbJX#fz%Sc@;d6{|h63Ajst2P{Bg zuy-e^J{HK5C^P_PslAI>>9U70TGcXXI%p$n^mH@iSDpy6`^NW~Ce%E1>Elsy=FPU< zMvKSJG;{8oMXB>21eeMkce87f`4l0MsvUnmL+vGglK|GN!a-cKeY8NGHK2uHKSAFt zl$gd1$*c5U@Qc~o?FaVTo&12e2{&9S4dB5lZLn&w6B}`SV(|BPxa`+Kq+oyF4{rgW zPoWZXc@9mAKdKbJFgQ9c`kxe=bKNf|oyyp^ual*EC;{F?sB|i3-&5wWdC9_jW}ckG zk@OsVNuRV`B0dq6&HdQJqZ5>8x4)((p!(>+=H?x2m7gxtC2=G@ccw6nUGoCb>Z>C?t}NeTk5%OO_QHog=HUCfiQ@W zQ2mXNh-20xyG}2^R?A2UjQ43`;N6i+{JqJlFT!kW5W$MF`Z{idE(Js7j6ahFMiqFP zEFHyFGz4PSWSM3`B6e#}nSudr_lC0@{sM(a@ zEDkq)UBCSG69JB(!LhJSYGSq$gZnWO`r7ucgI2a6)3s0(ne_tBo6=*GGofy}v7jU_ z&55_6V2}m;@dCTb4|fJI>l+mr67CrSBY?Zd<%bHXJ=6FTeZ1GKVfH;8&Ttkhv)U}^ zmCyFvrtP~uZF;JX$XXo(e$18uu1K4>BB+~%VcT`aq~Ay9=k!{C8f0U3ZL}9^tmOT( z*k)%VcJ(^CJu=!t2A=vHmw3O3&?G8%N0#=8HHZ;X1tsaSJ!{>ojfM=e&|qr+lQ0H7 z|8y)IrtcbTiyhX}5G!@h%4ZJnc{#Yq#XpWPf!DOpU;(~+wE(WqAwHyDfu@k(IB(*v zF{LcKUqqqezjjKcN#CPxr{JNO5!SEFLjq;$`f=N`u_+l+KJ8VIxg$`{empQ0X?Ny5$>;xDrU znGVMev8dLB0|zOY95lbsJcRfnl`6=nXOG#yP%vl>fso9ZY((jCB;&@bmrA$yGxW`~ z2E6aP-@Y-Bb1YH4ebxbr>xNQN*D(S`HlqArXe8GoJM&68UG(h-vOp@{%vb~N(Bl02 z+NsAxrw=LcZc*3Jp0hqBf356LaMFG2|d7~&aMT3mq@Ohy`TzdM@&k1?8W z6-@g+IwbDVJ%(3z9O{|X1z#U9e{H$O$5<#fIc;XT`4J&=mYllx^Q-4iwTY!o%KFZh zYh}1Q^Wk|6 z{5CtAO7ssO96!{_FKemRxj%1&s>s|*_Eo5sJxQ0+#*tmv zB!zz%#6dRM#`isBr0E+}&E5XK)=bl8+5}t0bbsS&K7^d%?l5HG4)m>dbMZo8s%{Q+ zg&Vd=UCFv$pjmHcKFnS8sB_5r&LihB?R4RlL0WML!>FH(1hv;-zRxO;Y_RW7NB!;S zbPT%rUzlm|TuI^jC`(FHm34(g6;lDq2eAUBtj&ElwAn%riVAVxr1q(w68*qqnJ&sn zN>j1WjO+I4Vi9Dc$bBnDzFH#V`HD*2QYXSblrATe@hy!U`WPZzeuFWB_UR9H*0;qf zt=izP^Q&tFyygRj{{rDSMt9ysy1zOV_`&dlsU^jgcJL zCl-}Y;`I3<9jpF%M(PYAGshT-9DCEqRDhfU(lo zVd-Bwx}&(H684&pIW4PwhO-5?^2Z8&v)hL7jk7;n-6QwY71iVc(Y(*~fBDITj)*ff z$lqihPx|e9d!>Y4w_aIR(DG>^O;yfK;4U%)8Cmc%YcK=#i5{I|ff~Kvj1Xca;F0+U z3s!nvX^;68P&Vx`WQq-P*vN!rFp=Pk#V4DmrS~>~P+#!B8Ai_&e9ws!yHm5eckerQ zTd9E%$0zmESk|mKWLC@w+AmOB*B+5gXF_;V))9ub^F2adJ1kCjauW9~LG;xd-rg>109Hoq@=VD*V>5j;F+~!`-VjZ{GfO^Nw=f?Co+mSjX zsuoe=qxto5GJ7dw8w8q<%hBP1HkBF5H;883qLZ>DY{fF;aK}YbQlCX` zj2Eh2Dym0gs0zapWUdiU;EHdSrFDI$FdvCsmsM7AlEQgFS>oG6+3?K6Y8D+Wdky#l zjKsXE8AIzQTEbtcuiBn=`wXt91_g1$g+o4lep~827{emv8n>YDB~ZJ~U@)j*K}*YK zMt!;WXVW#eyW`Z83k^am6S?=pH9TMb^UK;p(#5PZ{tSK`Cc(4k3)aegszL=sH_bS< zaj74b2UuK(8Z3rztavKdH`Vci(~#q=gXOq!mY}f%t%x*IdzuF=kIC=T7c)Ht zBnmX_nN4h5tRugE2aQoMUFWc9M70hdzI+L6<$C#GPb9tfP?lbF6wem;U$|kR4@rx2JmJKBM*G zOkQgOd#|{aI;z@EK-2;^HB!9XM4;y!7;oG|7K<%=Pi#v~dK`U}dSe348`1%Ah(&dN z-6lk(wpZaJgCmu~6a`7*o&yL7=>i&jCP&WAN6A`W_P&;JA*D2Oj8Try!l9uRns6s; zwz!W_XG@z-lpb1+m|ue~PMt8w;FEyAeWc%Gk7U|@Njswy$9MaJBPKX@kQ470z5{Ft z*JswYF|^E7AP@1iDoCsD%R+^%ju()cRB`?>x~pEzxld`vUH{Sp-5*CltrnMO$k@pE zvZC&)Bq>tar=GW7`#s9wSYxVq^8jK1YN>Cu-}EGGD7u_fHaN^@&(p^tYX99RO_s1@A9=7 z)mGq?%%b7y;pv4ee4aAxXYt?3?!|n-cCWfO6GGtUE^d+<8}&?yMsF!S>K@yo@A629 zW<5B>(sApIL+6-(iBC)cr6-`&EzddukIe_UPeX@PyXT$xr3ULtnL#-h+d{L3pb>O9 zM*k8qx_{8}4xyyweetix;?-9XwzM-YA5*gCb##j|AtOFP+?5_kxl z%6S%pL(CgmH{Y5E;(TcIo6=%gRtHiDky-D+*(<~Qr-ANdD)9O@d!FDREbWg_*=3yw zFZ+2IN7x98S(5zcRvw)beC@8Tx|b@dp&N?Wq`CTr9K%ttH2TO8GjzhY6xU#2OsRSm z%R_HXSS^ZErLo+n<@C-Q?%_&5R>Sn- z+-)Gw>}n_}UJYufQk3hdZqCn8MOe%xv~5n7S(F;SIi6_{KI*IGWUnBG2zSV%>P6i3 z6*Qsta1il__K(=YYrV7R+ZEuo)Q6sHY?)4-cJNM4@xTLdz&pr&P~7VY;X1R1vK{4gOvtbT#+QCgKeCHI@C2^ht^KJGD;c#cDW z(M`AKD*7_$&~%9)vHSkb2K#GuN7IGx@~R(kB3;W?zd^{Clq~H&cbr>PjR_k&)b;&&TNG|VMp1+3+i(4$U*M4GMKGT+jCu;y zIDT9?51iI8v2zRDh)0NLm$VY;8KkNgSd&J2>PWX8I; zI;`;GRh&}JDnbEXU_7p3tHT6??0%=Un)`OqpRK80=V}{>T?kbKT%YUqRspmACd@#B zddBiFiZ>qTUD@&vBG?HSKs_sg-Rc&$ls= zr?O~2n4i3mDmp0siDpd#<0(^})7!eKiNkx{S*nN%;q5QVaoXIoq1NrkIz1*YR8l?| ztUV*t3I&t#j{qk<9x+0NMi_@FM#F~C9%6UhrZ1=3Rg~gpDF*O8+HjyewS_J6I=}G*)_#)HnEw3 zc8_EDn?sHPj_US{J|a%vA9qXyftFMaI^Y7->01&1H$a*ANllH7GI%oAZE8Zri6atx zFg&*|Hb9G4PlrNnyJG`kAw-(=$3P@7+LLdYuXYx6PH*cg7n17!J)=PsYYW>8P_qOG zg|QaK>xJ#QHNFzIw!vtCS6Xk_+wU0gpYwY=U&fGfldy$p#tlrmzIQTFqm*I)c-ac! z6ngx*`|%pPwqEK3m-A1}agVPb(04dqb2mXQ4eYV|XS1qU5y%T$>{>L?2&L8v;Y{aMam+#!V8&px`Z{m#c-guDp=iA0Sve*b6&Xz3x!IF|> zJ;@DY>(?!-)~zw9TE^aBX~qzwrzNp%oSxM7p{7h=9OioN2=3LXo(_csE^-rYiWbQu z)WdCNyQKx`b0aG*ZR^-uq$6Pw0u<;Nr$_Zp>XxL+{jvwc!fPIWUmHDLq zOC6_$2N|@80+n@S3*GM0))f`_2?1q)m0N;O!&e-+Kb>Q)QOt!9VA(uCtiX zq|+>V%45XeJNsevFu1OiehiU=pBC9t5O~Zhcm(p|iB1PzCqSsFetzw7(4_mL%Qv^v zaY7A_4gLdxoQ}t^W0M2iGR5X`8HI?2^j9{pyr;KDWPjG;dDYOdc)(0@Nm#z^75(eW ze}FGqgNYnoIohe6a4Yedj)Oq?q&I)cZ&fE!x%)1HGi@Nk*_LTCB`k^sDH4fhB zZmeru9OP^v-Bsl&F`_d+HzX$L^M_ocOV!y{uacD~SqIe0U$~CXPUDm_>@?aZPao|L zmKYZ-q=I-w`F2m|R&Vctai>{%H-1g%3gRW*J8Y@7{U>FUmlC^5{OjQn*AEXZJ|wBU zs_U<^UM?xM5}6^b^+lA{Rm=If*~529%OhN=<38dV*Z>8`Q^i2c}R8M7|`|#83*10y(2cSlz*dmT|ufF z9|}g^J2uSSSwT+Tw*iv>SIvkg^YB6NPxA*~Ms)hU`h2&t248WruhF`ej;dkuc?LLl z`VMe8I_JZ3SwWRNh1gGuDbE#!@aE>V;>pR@Ff421Brf=huTRyIF?dIROfcxHx z`vh>0QfU2I$@=vT(eC`k6eIwbe@TP>6qq?xEwQOv^R<156*?3kS$iQ^Z{X}_Lh;)l zB>B2%Yew$AhTIh#kg^RFDc<^%@HF;m@lVViM9Rd4HH72l+346c@((Kl|3Q^=bJ1xO z#KoR$&(5J?kn~?8;MkYz_IILKaAy9u>l?vB19BA+LW3+Bt>(+Ujt?Ynt7+tcn%!Or zDqetwnFE#nr^6E1Qxtz|76oPLU2-B@P15bV0%+O9zS)nx_uIlO`VN`-wi3eA$Kf%m zoKiKuB<^#Dc35_=Ke=496hB5&*E-GZRDf^a5$lAt{jPNud1mqcjnd%bHYxcrAW z?=2~>-7fzdYf*(FQzUdpl6x!^Tw9iPb9rNI0 zvu$OQ0`<7MuMO|b>L{KC2;yRYS2m3CXj_sCP-Y+Z?j$8Ty+J`Y+LxHA-L z{OJYk?V}U&tA6t2O5>dHD3fsO=v(L`K#aj1is8K?7oF9sb4Z2NW1QUIA2lbeWjpF z7h@&3eVRNm?G{6I`%=9#^@jqJYo(VHBH?Hbb#>c{EwY?O7g{8S%KC76GQ%IZ@1vI_ zq4b&1Gt7V%A7-*e!5-S^&F>Kfj9>2{dDjg+JWxZ7U=O-h@mypz%0GaGxOg~ln_(!z z7bEoni3YA90AG(Rx2ehM#g|aO6m8&oxIZgnzQ4}OI#(kBgJexv?wjaxMR{}NF&5P!Im9t!TPn`=*8>bOWtMPSz zrIR|-JaMddka@MT)}%7bdN(A|tTZFSGjyviG)Z*wNMLyGA^abT{PP$AT^{~Eo|05< zh(7-oWj&ssY;v&~XhSzuL_h|)mK^V24&Gw29U~vZE1hqG!WR?&#!&V!6Zh`j_xsI1 ze$}^B)uOqu6?{Hq9(87hBt=Z{kO7x(Z=bzmylMAvYlLJkd)WHXrRfx?%Vkx@y?a zmvm-BBF%vCVtT@plp)H`))fRWSN%>4uuEM(lub<*!Ne2C*k|?XPExPt=I2ecsM!sO z^?zv}Tu^X?RDp=GB9Rlhg^5*@%A#86HYfVKdfKSLhWg7Voi4tQ&H{9UE>SIcV!O&* zt}~^s+A!r`vmhQuV7p!GXWAZ99zuvVb9jTWKef%)p*x@Y{> zKQR&Y#Zd1-IG`N9$U{fNNXzl3*g9%cj*=MvjgLWld%4R`cl873 zI0AArtg>bDhv4htE-Y=PT^2q&K3}4uVGZ+qRwk*>|NX)vrO%#FG>sTx^MnValrEn1 z#H4NejG2Qx(x`hVgc)~FzZ|ecw(q!CEugdD%nJ=!2L#?Eq!?yob{{2ITR+cI-gA}( z{-I}w$UyW&UemQUuv?iWWqx=jyZYX{!{*JwmWGvPex*yr&6rJuY1x3z9r|y}{Tw&S>TK-j9)PZp1 zicyc_k$mBsZ6sR&Jb>2djk;>MWm+V{a3+tnfTX2AuD_r@A%to4*=C>}lw0oHH>I+g zCk;eu*dAuEh4z3<#5(=M5TQ&l=bhHP;2uljCrzBu+$3{0@Md7t7JAWNu!o``7h#F# z<73*jiRH9pi8D{pD!J?oU-%q_u=bSJmd;k>IgdZ{b$B9;^H1%>VN zQh=fIs*wF7>MA*&+c+QR7>yg<@MCAwMmH``ajyQXQp897J1^pJwD75R)oUqiPX7+&poRq@IBKogr6BPijh#d z9(&CM-j6?o8Z$z#~TM*~6;x59op0o*f+8_RFZ;^z5aqe*s zyFuwA_(y7r#67_4x5Z(Vi&meqgE#lETz+6j{QdwMtl1z8wNGIGsMn|k^SG&Gdyd4` zn}1#kaTNr#bS{`eC{nOASw~{1M=V6iij%F&i*wA<2iNib+G~oqs1X){AmV8~7|OL8 zi>N$0dmVm2R3AdWHNqGB*vRzE_~n8KY|&aDCgu#&9y~h7&17U$Xu;cjL1%sX=-58~ zz!LQ?!kCS8)WI0GSl=VL%l3j5hy~(6#76)Uzv2>Ypb2Fs)6H-%Te!=v@g-Yp*lSIV zwLH}-K3lPAlQCg3;^jBm#(TQ>>h&&SuK5PO6~7FCy@r#yXq9Y zwB62mb5s`ChGH+P8~J*0n;fqoVuQ}D7R^kowRtra*mI7g0%DZUKG#q)rY!koMuvW% zT&yn<>KzrIFDVJ?>s@Db)Ofz|VzyMpYX$vD6P)9=0^p-PWuhx?FkNd z!qdyQtfFc{lW|sT9?E=3c#SK1nW7!$!-%a*nTTDk(xFTr_q;*HYx#po=oD|N-5x;D zYw}Y#@%cNtSPCO*EE4bphKsx-r}+`6j;F{wLaT!&6nO4mdpp3X`dTwc#hogJ(Ade( zu<@;XKGvlohc-pOBl|t#Sr(}q(-=#tEAzgS(Q8774@9{Xf|$N8eIEQCgAqRvLAi!w z!&9K+HA+{fS%_*# z8~fs|)K&dn$*T=DMTtzeo;=4K{+-peb*Avz4e9v?4Q@+}HCQbf}n@#dQ4OentEV=>bKJBQT75V`~bw`-55MI}WPm&v#6F%`f zqV)!V-E+im$e(S&gDQI0$Kg>!A#sFA9WANZ^$uhp!Lf8Wuc)z=acZ!p>_Syp?Xzkp zrxEK*TgT+YCA6UeSF!vUuI%rcrJTQE`kk!iC$ z(H9QQ&K=&g#>smhiHu;cJv2zR2gduKHJg}6LnxvoBRn?fk>CnY5rta#0jFdFFdaF@ zlXR5XUJ}}7^nk59oXR{GWS+hLhH)By{|)2d5SU=N-CcPDbZ0YjjHYXm_$^FpQQ2u! zVV>|V1 zF!QtvH;L5HG7MkMs#)`Vr&JRh>s&TQt|t<*lQFP@4k9-_VrIj zu|KxxTOS8<)@Pi$<;B+b^`Y``_alu`%s!)~?)Q20!gJ9OeusObF>6>QZ<8WtLV60n z1Hzp#XEB8e4oAj^iXvlWIE~+;stdPWJ6X(pM1ns)M3|Zpfdx8!W7agcCLk~%5Lh#{ z**$uoow^<=k9*&8(n_lFTh|&{HN^dGAlh6o5Ip0AMp#>FD>hMW?>+9Jl~cCpgozEh zclf#P?B;b1NWP%z(8GL$DRi9jn=ilJ*0O-vjY_VdmITY)&^*U;*{D@wxw#iqx^?DpIDJygBz0t}?s!<%6MrR3$4ZpKNoPH3BM{Mcv$ z9lip3{^*A#)J}G+l<#q&qK_eMv@Um(B;MgbZPg6N2LeRw!y%V0CU#^l!j>?rkN2mt z2ceOao*ac~rVQ9<_AxcJd4g5Z7Br2-$CF_rJU%}VYq;vPM_<(l{;)#1;GPdB1Y7KJ@?$#JkRBnI@dCDYIUa;;7x?= z>fhXx6j81qPZBbSL#N-}#R4RRlnn0E~fm;CA@#R`A!h^CDkM zBO6#{SDh+0^sVB2H#So55CqnATQadI-08vZ{Fdo+Cj6?=*aI}K*arI|8pY*eA^N_! z(M%ICN@&qL?2dzne&3#`_UYb9Xblx9iXYK3U(NGE!If^1DKlO7%rjg_i+<2jtW|oA zj%j}%OosMz*XSQlR!WK0ueuEv*uTq~NUEqgFmb_YcNNS1HK$n(eo8!0IBDpchMjAF z6n(MtgixSYb965)ov2k|HY!5}_j}r=`te2T^xjDvZe3sEv0F|P83YC)jbE_1+ZF&f zHV&JVaSOYL#s7Q2_{@09Zm%TG42Oru>nRc@8PA;H%%}zR$W?quLrE`2tc3~?lD{=r zB;N-{F&Sm;vfR-A$tcI(qg#0XNzha7b#>Fv{YbSBF=Bh-tnmiBk>-l<{^RA6%7D4m zV7LAT7IEdTn=5V;Mhu837IoUGaLVeJn8#Q0Xnvj-3FvoGjBoxF;%%?EXGopOKD!po zB(crc54casI90OT@Y-^>>w5Az|KjR^`#lRCBrUZuHF9A-shuC z8EXRvwe64;f-SZ!V)4@Xl@tFWFaixF;-`Utg1`uMrEQmyzC3)g=IO$?lu?-M&bZu2 zX?WVWo;puw!F~K<8|_S=W0y5h;H6lBjZLJ26090 zto0*48c_=D_&6_RjcWZ`oZXz3vLbRaiyVsl9rr38Ee(xnvWy9VHrOWBp1FYj`fCwx z%C^Y0?P~t!=47~MOWMAhK#AL=*!y%#J56{&G+Rdah~DK)cf-Vo*qqGK0+c1&NWjh_ z$!N#mXe-xMaYzoLf-1(0q6te=&#S>_@fZ1IP>;ZzlJN6^`#}~%QW2~%sc=~G5YZq*Zx>0#DSfIU#P_f~bR?gtKvIT;YZ!(>O5 z4^<>_BLJhCM8}J!VBiV<9rR2XDmrLCxBb-=zEAv}rP$S{d5T;7!(Mm@nPGp9XSPbL zjiv#Wa?GUbOIAr`Lp!*k#!_XHdbM#r1hLj|B&X4@#uIyZQUY6q9B`A*R21`A#iZ|P z;A7Ua)h*gL*wkn;DlQia{|<;%5dOoTS^N{5@BIAGf8>-H2oPx-fucP zi0s;TT$Eu#+-9}a4ANb=qyGo)<kmkRwrz&gi_>8@-VzZy0=aj%=VNAe(W%qFmEO^2U1GA!k( zM-+Mw4{m?+=!14$GyBmjwCItK=TJA^UD2+S^~+97p^rth4y-FxZN=&$JD`7i64#xv zC2$VF0QntvqC>X#J-}q_c&kiyLrnsbJ38?wh62KhP%$rACO4}4vaI~OH1;(ZHZY&~ zyhzRxVU0(P6Gj$_BWSb1T`4de!c4@7=uZQ;rwkzx-^l|u7j9dWhxHT@qI_@bf`h}) zKM#hb->SlH*1F6Cynp@J{`xNXC{d1$%^ZzKlDBNUZcdc!0=u>j-wm-49CnY>)nZrg zyTcdn{@AA1BmMSSBB~vM-y!Na1L?3u$`LACXmrZSIpf(6^TX^-P7nyyXR|pj_g$|1 z^3Xlp&E$>mb2^x4eX(K1B8In=jV4^qbnPI9%rygq9XAMV9pmOuf@zlXx{uW8{n1YL*F3WGQwmmjv zRa15*gV?s%te67639tH5^E>=BnHBIx>94%h;alN-NkWd)=*rAiJrzH`(?0o0DTcHE z$SwqHiJDT9$RYN;VcFg3DXev-(k6w6LF0@QwckJyzJD~Y-+mB8)??y4JcR_S0>7hX zEDxL%-(!HElf}OqP?xP8ge^Xr_8T{5!kT`d9jao((^)uCdklUXoy~IrWNEf?c#?lve^Gkp^!DlG0RgfuOc6yVF7uf%C z!DZb0L6fe!e=H8cVek6hZTvlzyV7S==v)&^H)fJvKoaNNX9akm&4fuEn-N)d2k7El zEZM*e1)iG_)kiVHiyAg&>}&TOgzR*VfUqMs@W7ai_IJLOjp%HjWvf+>`aR=|oj$Ov z#BfZ#w;jw-{LruJ-*gTbD#9sZR<8T>VZJNhJQ;&G?YpI86Yp%e5$4YWC!W||)AJ8s zSax&3dU34VEV5g;|f4 zhv}XJr6)nsy>2D;`5H3WV*`;hc`MaabNcUD*%$L)kPM|YHCr{P<=P}#(G+`{NW3un zQVn+U{@W08Hm{!I3EEDDPVTe8bJa&%xSj z=smP{L%8AEPo8E8ua+you>uZ76n?u48^N__fvn_UDKy+r`cFS!%$*mOG5%#H&Md+G z?js1Ez6FW0@mZ{T)c1jJx+ewmQfsyO2^rj>U5;d07XnaQ=WyJ?ZQ=gCg>8y9&Fu}H zhZ)pOLNLSL)hf+eECS4WX=3C(W2oEGTqcCuprcew(AFk0x$)KP`HLK*)adr3d84iB$8{XcrH)9$(w?RlRF8XH_FeI( z_Ld#7R1VwgXKSQ4FG#QxOZutrev_#x%^9q>iJi}PdUeP>2lG8ny(8gkEYK&xHo}C` zYC{G8&E!bvM_zP~r=+(zUa>g;g~xg8jcRy|g8rkAKt~<~>|gh~!^D}|?`Y|x-{2h# zaEY!r(l%pAHxCtTGJfG(fkX5!;fn*vY4Z36j1-65a7yMh*A^+M7*p>(ZgsBb6P8p^ z=NKikJyWurt7x~W=jc-z~ret4^mi*j^FzzyN_C*k5eihybDZTWpYS+8bhh>N>`qv`|EC`a-6*)KIZ zxWUw8X~o!ExXTQ?(Xx7JPd4iY`EKv}2R~q7GC5BLn55^|g2Q(%NkM#U@pe_0S0#TQ z_@~r-j2n^dd5+ay#ovR_T#BhH99Rg5m9K1EMA(MfYSFbe?wjUg(w@n%4v(sy5{RkqRBYPd=YAY@c85sY4?N|HrD~iNj3XG4NYaa1)2tVg z=cZ4f@t8XmPn6^!*tDPNCxb+rbkAA>;KOO~;9Iy}- ziuG~wPZIUQMnjorv0lgP zHI7TyB^di(XfAyS%2}a#_h!jxl|sU8t5N}e?A@oJzwCbV+W$Il^{y=;hQCg)6||gR z4Ki4c#=?OYLg^kCUCr!}o`;6{1)d>my7%z_A^f;(e3(yX0SqM`1uCPm^;(28gw^r{ z&LwccK8nKzkQylRpXoYDihfwZ*1IcGf2oc_avnj{KQ@H5D)G)JsC%%f8Ve8qu>GE6 zUTZs(U{8|hN%bgwaQ8Qp{CD;wNbk3Dx$-OJ@WoJBkUoW|KGT*( z8z&WkR1_?hWV(Da78`H$wPu#cW}3`h70HnoGM0<=6<>pfzKjM%7!3wJWlJC8ke5c! zep#?<5un-QcIiki zJuB`yTrSQESh?&vm<{v_H*85#dv7=6%mvM8(yLu%`Myp(=hhu{;Q@Te0?Y4_Z|H28 z@gc!;E!ZXKZ?(%LS@JMdC~;o6EAS`juLdDEiV;asi1QVb(NBXi{ldz z9rXFJswRtdmFJyV5(l5w4N=$a!t!Eco1<6MY}*-W$)sqkCD;(_kJddk$~_;r`L^PK z#f4_YCHBaSi5BE6Xuv7>4^-&o3YuWj7#rJa>G^mN?CSkd#H| z!0d2eGgdC{&gIY?r}6M#kn*I5WkTToTR?dLiI@MF96HD6d^zCFT+CjQ24;*aF~&$F zo=g&ds?=ZpOKv2E#&U*|%f)hOs$9)D(Vu7KJm2;~c40KKj z+U|V8EJIMv1H4RI5_n)Z9G%hyCsw{y_2`lj$?F*6-seEGVCq&)pdl4ML!^2EKG|n1n^=Az+zy}f=ZvJS6_j4KJVkYxuSqSW4-V8K$Me##X?%3GFP~D*?N3>r%64giqsHc zTEwTadp3z`X`)vk`%ZtyT>UOITgJfzU8~fxfbjj*7zxsK6yD-4g5!V0S+Bqg>O)`B z@KVT@9EJReY>iaCT2aH~nvMr1|0E|zKKPaDoa;Ua^S!Dd1Ozh$&9w*)0WZ5PpSa4E z$35lSMKG~PHn@&83l;r!Rh`4r4p&b|jr$dzmW&If$wZ&&LK7;_RQOYGjK{hJdiGlU z?cuJ!1)x=?A$tb8D8i|zLtlK|%;!(V9fCa>?Jy~Bt*W;W5KI9+ByN8qH_sDq)$|_~Ds=^mV$+z{TTGaK$4BK~xC9&B< z%*V=?4&^3Ck@Sv#zuMsE|W5AGZD2}FG)6~Rn*Dz$mBDqKY>UGcX-7Y)&4#Cd1V=|n@1iH zu8t?rpPQtzY&HR1V1+wyaA%P2O=twJ*x)VvllGz!9S%p6e@F55L29@drtb;WxCvNH zo0-;4wsQZ#!+G1$)a*c5+KFf|B~KWE3H0K@N^qObtiLexf6u`-asNUC9$#1$2VP3? zl`mZ84h4Qe!+fNSns1O9b&nrqT9wtkWPwFD4IaosVvdm`d55*IVuYpL`jqA z9r@5(7PJrN`C)HCq`Nd zZmufUy{K~g8N;TFl9@zrol%Y6*Zvz`UC3tIc{?CXmEwk_aodOL%8vcmdh_L>v6@Az zH-@eO1vDJcUMs8QD?e_RtFm^fSMzpKv&1iU8=Ud%dSi%wTb(wW%XT%(vYIH|%$85L zSr;<$e(8qd9XLZ5(NbS!wGkT5Wqt?9mwIOt7nI$xsqOHgf2wazyN~QNX+hzb?{?&$ z1J%s!DYOO+S=77{=xAIB`UAa1p!B@%N!emsd>?kpOkW&PzyK(j1dN`{a<~fh59kWY z-z<~Ue@}8~(PvnW%!9nXb`l(DHt<>q3S2(Ml2lYr(lE>L&0PNYy+srk1d}bDMH(Va z)%WYunqv76eLJ?zteh8iR8kevy8X7CPwO!x=M;yd7I(gATts-c*^7Tzvq0oa+!yky zK7DoSkZFh6l!<=E$E5ynOY5t&X6e;?SeWuFbaIW!&NTXou@u#*U;kR9C2H@Fu$ZFR zDT?U38T1z8rbne2?cgdIwIk!K0MiozNgOLlMKK-+(MUjoU%G1a-fO$}yx1HZCQ7Et zw2-^a1?=YP+`>fjahEOz8IJ!3kzdZ6&~5?)if;JPQ0MXo8@hr!%?}tmV~FN1-KEQP|Ztb)@&*atR5hSKf%^k1&^(GW4iJLo8-;IbPzod@SX>DYB;kJ0~~js@H0 z&oZ%Igr9>hh;$mdf4#W=IsX;jvLp01Ktw=D?8l?!Gw6-<-F=5KU{va-fOb4P@(=BJ zpqLQpb@8{^B-62&9^ZTgg@RbLYKO4Uso?0`Fu79XKv`+M;@c3%@oxq|)Js z2d_PeYdFccX;^cSIuCihKo+eLxOUCkn~!6kY7`0 zabM*>1Pncy6DCX|zY?;7$!az$L`oKCSeB+V^E#}*dR;ozd}E~nic@eE zrCHyMZ_n$+t%weTWq!3KlT+OqHf36>Z{}O*&v|P25>$jc;$JXn)ra@Gw?YOD=>95*BdTuJN@qs1C=Rf~N z-TJQ#?em0x#W0@=D0XIQIf~W%_^TE=OwbsP5fbW%^%H|p83Q8Qa=Z8X6JbQii&nj; zSjcS*Q|Pm41c=kx&ge8FZY;o~Y~Q)+<}SCtBg+|{i%+klRF4UA&JU)#Xd7;J!d--# zj1aT#@RNn#z2rz*3fhf136%o?E18oam!BO!Q*nMl?#WzkRyPgm{_}0JtDKQX}-Nd_hpeuEScQ7xza})GgY|K-U2d5V)VS z)`Bj!@=I$~L`ABlKe6%+TN_q7I9Y8})Gg!FP+~$fjqTuce9gylg|uFk4$tAPrh~hI4f+jq*!B{9cR;8;Orz4;mEaszr+gGB!iWR}?B*Y@LV8L9pJ ztKDV&U#Q{O5ghm{fY+q_Hi3uW zqJb7!|M?|^n0)?!pFRsf{|qTfuaP`Y-ZiN$8cZYsVgS(3@yYQkQLk_{jh7pT>^egG z3@F>ZekRH0>bUN)NCRH`R5mU_LVaLTQJq9QzHWctd0h!G#oNJI}iOM#pTOK$#id^3V2Zt>WZBIuS z+v5I-+PxBOzE|nS8mV0mVCPZXRfNnz+3+!`DG3dk`n&gS@TnA00hluU{QuPT7C=$9 zegC(LA`JoxNFyn=G)Sv-DpI1r5=%--w=5;yON&TJ3IZZ24ZDCyx6~q`bl3k_^}6rt zd7k%m#&O1R0M{139y!4!k6e1i~ZJajR#N~1DunjF9|yPn zU*<`Nx=42^HG)&~^U`(%ZM<#Q%F^3j^Jukh@PWC2|+e# z=9d@ZI^4r6dw_KuQ`2pfeuTT66eN9|n05o5bul9?+{abF+-QXNr)%{0;`D)x?@yVZ zR%o&5``VD&mq_!tNMQ|P2>1ycDITY9p)exbYQVz<0G$j0RB7VdKaL6@3%QO_p+WI8 zUjNvY(Wz4`=A(dF3Fgz*x{aIX$yw8EQ~&xfL@X$CpXZ;=5% zycFGrLw0b5h9Ra%IRsM0wGSddCJ**VDQ2cfLDXc%pAy8z?G6Kv?*H?;f4)}c8_4W; zcx*p&u@*ApvNwBvbi#Y%pYyo!65K}r*0Tj9chb2Px={~8nzw008IVz?PZcxYqO;(G08Q;)F(f1NfYTrlMH14UxJsn#dM(`(^+#1%N zhxSUjYdJ7dRN+k>mQYJ?1`y+{lYb3;>wU!;VWm%Q6G>>1IWi_?SkW3uLF3OCKT~A` z`oO-9eFJAn3OP+-83W$BEQC=#%m$|S8X~xXW3e_QB^VrT_z`-*q9qZk0A3TYC}*U`FPXO?8gZcaP{#ztGco z{>Y|3{eHc*P}skfSo&0s*K>f&vuWvpPDh!J#>J{`)6+W=fapMcFsV-&aW8TcWg+D} zf&A#aHyh6LS<&q5paq{-^)w$KVmhVIw{W8at{s&$1UG1-FVAcSixj9_@eS9(wYOk^ z+j|bCeKdP|_$H7jmEXWFFl#4$Ig~gPREwDgMjyBI73{fMMY6|p63Aux(0MGz`W@%{ zJ>^LPGTsS@;Db$y)WR0NK@_IzP%XhG7idL-;2s5ND$M331A6Y(LKC>*6ZSi?^7rYk zOxj|=_-aGn+g@xZ7St@@2vkMH`5hsRq;^mOk61~)nYT045!#DF09~@WGCVD%*m9i! z+CkMu52~a-UHz4voVEWkf6m@h;@O1Xp12dKPg$Aoyiu}|?-Bj^9DV8Gu4UK(3v@rP za`Yy^_Jm6T{Ek4WAnS0!v9-0f6ljr)yY#?!J3dQQ{BEwRJLt-L??$&5By^;Dr^dhr zPNz{)QJGhNzE?Kt2Bb^t=OHQd2$|>JFjW)eU*!@kz^)}jzozSio?}9;tLE?5MSITA zsL(d_6xNM5Fz=MW#tl|*GPGKto;;j8@%ezi3ZT1C^}A#1r(8vm3tzmY^8gO;b<^A;{GDKwVdIW*`j#yilV+bur1wXL|??IJc1KD)n0q!EAy^5n-&|ngnJszPzz7bgR(|xTjc)hbhU`-kmSo>$) z4)Gjd4Y+ic>0ZI$L9SSopYHTpFf`ts)UY5U@C^ozk;IZVA9^P$CbQYIPi#3x6Z3ba zoLL`E)q3}~lba+2`T1*TOm>tIM6#q8#TJtIDze5E;y|j;&+`4O_|}g_{(=M&abF@^ zr=B0*w8=Vvjf(_*igY$-FV?ue6dPEsG5m$>6vFjnI+lGgpjlD_;7x^H+wHvsjrV%CfP8>B>HSyO4%KRafx#fwTj|>q%R*mYUSL*P2J~8-oGK<5hz!0#d_i^oO zu~B7NL6qb1HM9Y@4(oU|?j6bTNCF(Hfo_4dtxa%`8%q14K|cn1Iv zij7l|WdAU=t0GTf7_cVal{EP*CCPh{{@NQ2fP=lh^hjvysvWe#<;DPiH{Cn^?<#r? zm!Liqfs0N?Rl}dg=5KO9JKoL(d}42jQB2AmmYraBt$qFORpjoL|568PTWlp6AN9yz^1Z_W{MfOE2by^2*2qtu$<_dp2YZQvlIIN2oyA zw738t?lJyguR?}DY_$q}W8?@><{@sYaAsRj)$BzTp({ELN^_Z9PN}m{icw*E70(IE z@~m=*{?6I`hI)h%1YhE zY1T!hzq@HOSvX{r4{ho9LLQDOb(EoXj-b*sFMF{5!8rl|1{d-%z$%p^fLIS|P60SU z7ZR8_C2{GsqER%cj(wC&FdzAJLCLu2k;asfv72MB(C9zo12u$_eT$1~f?HNOdHUVZ z31xAG4sMsF3Nw6u5!o1R?l78bgh%?4n1_>ERgL+3B_)IA9k!6yS!C0iyuw8{l{&)< zb@<>bsZ3@H_%$%8i05SLd|$g{Jruhit(dCuQD%~%JyI`2N6h7JV70>rBzat;@|kei zGTqY{kYB_?2cDocFVdNEa6=LffE%gM0 zt{v$&3sEIGYR^XwxkMi$j>2rLmuu}e?>hXnF)kOiFdPFj@6@OQZy#DRm~&(DN5e@z z!JuirvP(aGRI#@ok1^ozVY7%WLsovymGO9y=a%T<| z`dJIBR!8}=Q^}KGQ0(i51dP}>@}8a1_hCs?%flB?qYF97XY8rmN5MIXKVx_1< z8a&c({V`@5E#!}88Rsn*-XZ!NK<{}Fv4Iu*_Y0X7CHI`mfe~eUXW){pHQp5yvnd)O zxOw+2(UPpM^-45^iCx1xRLI%I=^x{IKh?V#J>*St?n`KW6ACN6DkhuF6LT` zC)^hHxd_Y`y>6goJ>XT+DzIpxCB&c0#A-1*@*u;`TtuA56;aJ*-Hh-fJG(}+pQ)22 z%pbJHYyAP!9r=Nno|$Nc7V`}0+P~ks&-`kUc{Np+z8fx4OA`HgjOc6TBlx^9R?9QF z+^`JiQE>TN@ZtJt&;viP8xBEAt^6=#pw8=neReSiD?N71r# znIj$BSQ;}*2>1Inz!l$>Loj-=L^@V!Zic|}n#!W#f*YH&@rT?@n)2(X{0y+L?Hwk7RFp8*G(4&-1eMb-;zqi~9BfbEE5F4;HZ-q#01k zuf|lno7P<4y_;8$H}Sc0P3^UmnyDtm3+*Y<;CT>5!=4kIsgu!qTl*fxM_=x!iL3mf zSYqjxO6&1wCg}cGCnH_^PFCEVijID-Cy^1=?enQ;`$H)4dcM_X%zdi4oondIF{kC5 zGi&g1t#4dm2lsD6``pVN7X*CKNnczO^Wm4MSHA&M(J3cfU(_dANY2yf2KsY|KED57 z<=g$_kMga7^@<;a@PnDZ56OTn+3ex8`j^)ji0BG>WW%rN{ACBj^$fGTayICXvCex( zi}5e4LTeb?mZ2=BA|{apYA)MwzTh^r-PuPOBehStP}m1syA~&_VEy{glm{w3np9># zw0B#m1+h(*Z1oPylE0pp7Q3U%_0!l=Db}5i7^&&7*jMy9*h>;#FCTg_HN>a;iIOva zxbUoz>w{`~QoY+M{*>BfFWKdJXNJ>lSX-CnLrGz6us+s=nYK-sPt{ru^kzq2`s>hc z-HSxDKl4A3QS)C=Ulb+*hKX}+-+4!x99F`G-QAvn|4JZdMX!BNGrH^omf+F;$3^{Q;cQYzXKn>a}Lwz_BTuqI%$%x-nV-+!CoN@ z%s5N3NAq|F)3|cCnawh%xsD94-#8snuY^PL-Jfclus6Aqu~rbj<`%K~5fXV-`0YN9 ziMIMzyaNHca-lgU#Z+}Zi}H>Bor zr8?%pN@|OGh-};fHn?wEwhE)H@+E^-Zswc!m3l_(;Rxwkk( zSO?`0Ec^G2*MZiFyiK1I-?VEesL+U{(o5)fObKphwC{l47Kkt5!}T>8bH5%`*&l~z zee?yN+Okv|$GMt#_tk!XHV`sR`yZspA@&nzn@K`NY>O>?cO`?GwhTl(A1nj`LD?)V`sJj4MDdTg=@C?SWy( zPYbniE9EcdUR*r!^E8jaI%41&A{jb{8IY6>3=Ap5%qUf}3%^a$3JxHGm6r_QN2c^A zh#I-(gr7}6a($PW=;d~cJuaKguZ-xn-Uvz1_*Br)%v`QmUdq>kvg6;id$X%e`oyUI z>QQ!BYoMw_Ut7*EQaH7<9-FO-9+bdsSU*qtz)~RfjBTY#>1+g@au+Epial(|ZA9Vp z)ss~8{+ml3K2MwVlEWrE3a;X- z#rghJ=UZEg*4t#W#L*moCOaV{9aAqsH;t8+>Z{9%Rhx43LVJBxT>6?$4se&pWjBqC zO^+U)6Pb0XCX<*Ds4lyDW}i}&y!V6R&m+AAV%-9Mn25_siw;Ib&wrhE|M^z^SuBLC z^UPY4E|Q+U{(BR15;Lm;^hS0D@|)_7yN|D{@fq?tGSOZU=Bbt_O3h4U%!c=*^lj@$ zl_@)xR2;YLcYd2-C;TC+d#Hl2! zUhsIJ?k9MT)^7~Aj5&2M4!uxyng47}n5k!NE&XT$+-qZa zg(C=!D`Lx8kZ2Q*+JteH9xK<_A1}E|RoIO3HnbFPv5on^x70I`@Zh*5hel3}B`ugcFvCUQ=vpQ(Fjp6F-s|ibNZDkzq>tYTB6V z)G8hzkoWa84y~t%ahwkwjty^@9*r`i_G@BL?*#eR{U`|&p-aXQNpxd&%A)Uo=@MWm zUNIHKMYE0z$uvODb9sb!$cM|6ant$XWvt&=)wZcDr+@x(ud8=IDxqL-B`-`~Cc<%6 zKLvc|ph3<9O1|!yhUHB9DIZ?I1{B};lHxY~$zbuU$@fQ8)@)W2nd|e5EOj?gi_X?> zq?M%ry#Z($9`u)}?qJ*Ur`S~po=h1;Q+suCzki0)t?x6sE1%ZDs|`g3wn;v3_eNtBRPm`&k*$4Y$}DkD^RA8J*a&dh4&vui_*7Z(F`#Uz?`9qaG^B_@`!wo5FKl~ZN!r&DfROThJ#QMM^#qU)pks zwSmyq^$>k940n|qBVxgk8F^xZjn64fVNW?UZ+16f$9tJ{_LkApRB(ZKfl2g|T+To! z?We$bI900NT*h|Kpx1C=fKLR;^tW$i6pS71>Dlg1LC6i=m-df%Pa-20M$?i7gGrqk%n062GirNvSa_pN#sC@YPjyVyfH5M}Q$=9}rsnje|Q~ zw*%?J>JKib#ZD6KN2n$f%#Ddcs>58;Q+S?HC<$XTL;O-ELi3-=Dso*)nU4%fM|~;M zKc}Rw644K_z#H%5A}j0#aFxH3NEh1}KbFFP~eWyISmShnR?iQ@6amzL{h63Rmx`V&)+w zQV0```J+EyXC$aCa0*iC<|1j~`U7WYKmj;AN%J2#`#}0{ID40Jt7-!QO7~F-rk8Dj z_qQN^-tN${|qQf-Ex|%iA4Tsv=~AIdJ(= zP~!|JdWrh}_Y^a>4+s+2ohkz44c~;i>I8Q#m0%ZAah3#Pn&$r~6k#>b9iO+Ko(32} z2mE%XC#waZ!ncupR@c_w4K0ze{{mNQEI!T|XZrTy`v@#j%t-T(s)|%tN%DEsHQyan z*vT7;N8;o;&bCM(0Oi*iuZ07J5c3;#t*c0^H6268q@W%kGNJma-c&uZl1!wT^Ftbc zWkBynKcl0k4}q2PdAZOlsCs}k+9>u3X(?rWa)ljZ>rHBD7b-SAc2&Vv{<13%tnzo_ z&7-4gPb&O+6T8KJ`5A*Up}(H3VO@pf+RNdy>)cnar@7nGN)!Fqm#YZY3xZ6pa%R#{ zXFnESJ6?GKFMVNL-QaL3SmcvXf6XUf6jwFL2FLt^0ibjJ{a_ygN#g3?SnP{M;@%|v zzkY|xPFO&=K(dmnAPS`mdd4o7DGfmEOE6;WTuE?dhe2Lw!!Awf)i*El5(zh^@wz;+ zAv;*zV9>GlVF7n1QDg@2V$0}#7pZGzx7gS+oO`tW3@(m=St_o~AY{rxlY;VU?X|h` zR-(yEP~o}{y~0HLU7{+|l1{cCMGI{E%ZM5@Ma7d)imatAGzLEeP}jNsk*wtZdLvYP z^d^L|2Jv&>h&msg-3MBZ*x6l-)(*N9aN~Jjs>gmXe~AkeB_OmIs3Tmm(NV5$C(QxY zJtC&&;^ZBW=Z+7uaVzVR*XV$_xJt0YEnRDXp%Se$vY}KD*5*4etIEWUuYyQR`SWD2 z(_7?SXd!~}rqwGl`vUzQea>$W#Ku@DX1IsV7xO$Uf36U3?K%laR73Gx$_U~S^N(+A? z*W=paF{}@@5wjhQ7ZC-F? z=mx9B?*#*tviLHL2 zV6SVhbU)^Ri5Q_f^r*3jtxGuFBaCW#K zc|*5n_S^QOK@kFuh56_n7nGVJZG6!u`xmR&mcvMp*6>wsePAo7m+? zs{Aq4CVH5HA){MSoQb?&E;bq$!4uw&?+;;iE_K=p?N+T?zsA7O1z zN-+cAZ~@eJ^+E*jHk=;y`!)VAXyAv`>Ksz{)*b38_QdNi+5OiE3_z8=E{R%ztqLvJM|b;Ftyb&t2G3?p3Xk@Idi( zN~|bp+^77iYw}i6_gM0vw*Tg(yX3XGJx=kYmq&2y{Z1x9H>+nmr{Nr+MzNsoeP(uQ z@&vL}tqj6(Wm$5=L%AT;_wndeI-lKvo^`em8Yi4=;VlAvmUu+tak_Kg4K0^PcPH^y zf7R^lKOt>;ob#lKPMP-@aD2>AIf7+o=Syr+yrthXTP|mvo)L>2Zkof=kptl`0S&9~?VgxhXKb%sU3rBt`^ zW<_NEPR0i6!LVIl2WR}3G;zNHG*-8Lq84wcV_7~dmch;S6$g`KTHo;cFwQ-<%t@By zXK$&lbp!^Wr31hG%#7n+3*Bq<*;y?vg!1Ur8%O_ip4)W-SWoAAGYS$kjWITST-d}4 zp%z%+gwbJz-7y{!^lo@?if78)1DqY0Y6T`(`ge`m=?L&5(N=lW8HAHzk?8>&!&!;H z^%PM+dpxK3PElkr-m`nEWwF-b(lFtWHbrG_Fh*z+=;*Pw32TfR`!GnL0BO#a^F)p<$9?hi!kf)5mlT6ko& zX~UF0%r9~9!I&DZIvKEsEmYkmSz!7#$U6Qov@QC{yHtwjF9-PPTvl?EmHkPS*9Tgz z{!n;Pz|kJ*!Z+f%l&Fj5XuL}ts5i1_)z5Av_v(7Ct3`%O(qT&l>~fea)^Uk?Axt%3 zfD%r2P$c02!3&Yi7b3bfSKHAIiXRVV4jOgqi}Si?wxZn3(!WdG`{=aKwdL+hD7O)M z2ztj*zX>Fw^3V-5Ybx_%7 z{%DM#LlK%JJ`U=GdKhtzAdY4RZ6Db#g z_7zBA6)PObhf4&NbYaQ1wqmPGmI%hEidM`zJ(cE#2vG9CxN*Q@)zQcB&jfDiln!=c zcObP{^!r5)cL$h5-tkr3QHok+4XA0{D|x7T;fXYuxt~XlAcJ*2B-0p^-0r#)UU4rt z**!rVo)RVJFO%-RN^Zo$s)-jQKao}o-;Kn=*;;^7(FI7Z@Wh)!zp+Tz} zVS#$>4&!)yb`Xc_>ATmoo)0>>w{DLqkveoYCccnNpy84T8u9A2G1Lla9)8<5X#L>I z89!aFBCqdOIMOB+_JxnGD!YSqyfNwE&NFsNq~3E0-G&yc)zu%y`jyNVW{-osf$m4W zoyW9t#T~c^ei_O6t}50lB;#UPLX8H(m`&rKO>A3Ri`jgfhX|4H6cs4>+-1~#jL$lr z!((tCF}}lQC~&OI@(FFMSN=xlQV>T!lpL{==b=D6Axy>NG|hr_hdXEC?G_0!(aHM%6@0m2%8);jNZ$Fao z_;O$TiI}3OG7OsK;y+HdMkxTk^Jgk8H0sT$FA2LfCs4K!1e>UWSqMkU__HzEuiuL@ zG4s*cGAwkL?2cLsrzg;wxK4QSy+&?}O`q#EW7tCx!cZRX`8BX?D`frJc(c$ib-c~X z)E_2&fSmQ5=w1%H2p5pva+5xM^n3A#dvxA_y}CNS1?YGLgGNps5tQwr`4LQxeYno> zX_-YJ-Fsig%fjh3&!lZpMY8!{3^j$8!|^WXB?6g=DOUH&Yx1}To(FcOOnvNpuyh;( z>LC}gvsW>59MC+sZOdy!@T{!}i-$OQ&DrFMkMR#*Xp1e9 zn+18$=~n_>s=NgS@4Ja+IDuUzF9$e##76e;AKdte>(_OTG@k6&klY*M0KIXb3}34m zNLnlX%Yo27do>}!hHBy%0~f&=u%YyrB*NJ-HnvPLdG|egW*z{>n`#3oUIaia++eB* zwJUp}E&iaQ9&7*6L(feeyYD{@3i?1k_;Wxlcs=6jALHu|PB-M4c(y;50A3kdUnvQ< zF;DJN<{ORiYkDqUsw@;`9hX=6U3TQEe)^er7IIOyGAUwz9+uZvRMzM@?)MYku*kwY z4$|dOv3Pc;YzD*@&us^OmQ1NGp?}c==if%{@^!b_a5leWL+_aRwA?}-Ev}fhM3jHm6Kv&;G59R%D?>iRL6>fYw3s*G? z+kzf1IzA`?D_vnDMvO6TJ4zF86dLVrp@ zugL!qdQZK}w3~`Miwq`7+WK@=$Eo;sFYZnOGKmEC#E2mePYwYtWWcPoKB9B zfExR!8De>#myVypJ@8`rx}(BvG5u9a!V?mb%>AokMn>m_Wc5jKIA4!?>)oXQ4yVYcfiD)e8lC%!jGy*xw!!k_FM$gSOsg_ZxV zofd@7IKoq6Xv-uXew=6ip)3gJB!!`qKNxXSiNI43l!UJHKHLLhCVUfs-#(7EbX>McfSZ)-eCcb{-y+e3Qo``G+XU z_!(Y~z-{EA{u=kfqsJY})e#->{RP>u&?qqJZiT7Hc3OY@=<%fj7C@7laqAl;jt3{w z@cYo+XO;n{3`auh?-wGV-K(K4(+)6lLL32}cSZ?}XH{>Y>XF#btM5S#ogwcegH*iq zQ!Z84M?iiriVo9Xn#Vx7XT7CR47y3^(U8s+l9$>go|UehB+{PWkID(4bcDfF;2$JX zyPp76IeE`zB?-E^tKz+ql6W_300MT<>inSi(wcwIYOFALT$AfhX_5Y&K2L0eadLt>ED#st%iOorP-SOT7|3?q45aTdvsINO9 zyj8q6VnDI#Q@a>qzgSXjI_TxQ2^UV9bM+)pVC_Zovz;W4Gx|a|DwQ;e3`z9^Xhm6e z$?U^PU-Q-Qs~{}+FS@ThCcQ|UsW**NtG`_ssz|p<@Q@F?)XY$^Bzvq{F%r#O{f+Zo zoA(b6C7eom;VKpjS61iPwZ(W1oid4bT?5(bGjeUDcnc$|(V1>4c%J`VldJUG{Q7(F z`Igx47?@wz<^0@sDrda5Qv7xfN-%vAUhU8%` zrKEi!<~Tc+5ERbNVacZPH(xsckSdnw>UIXzN3N}=#O7>)4SYQ;pn*&pjds6G)=zUOZUUG zoic}~t-g#MoFDGJFHU|jH;{d7bl5^MC1#hQKIlPMem2GKKUeh0TeVI+^*?RBw&NBb zd(d$zjmIzeqWCZ!q^D;u+Hi%yUM)eIB=yc;?& zkcH%aCwJ$&`<2}5DiQ^(TOtYmYsGhOeu14oWR^t_4%R=;)2Af1`DqTP`YK@*U;7R0 z)bN|59b@sOKk`Uz+1TL4YXK*Dr=yK+xgqx>yOW>W!C%sK0IqF@87si3+YG-7HiDynWjJ_yfwsD zZL?EPFnTW@B-_p$*c^YgLIpG80NL#R#LE%;w-6jQDVooLRH>qaQhhJMy zy&8VLUV~rE?TeQ$(a-plZMOTlRhGA+CPBBzZ)56N&{qT`_nO|?UOTpT95*E$+fHw@ z;c3f#tB)jRK7H3oscJ|mA~*xB8$ij%O@*?yE7bA;;{G@ez0A?sOG<0|G|&3UL?6k} zAu)BUF(Oqy`SGKR)i=YO;qk>{rT0yR2hBN&upc}j8-CE~BVs%05ll@_ndUdpfyHt0 zoD%DrvXt~Q$r^%S7q_-s^**_(r4VwDJ`2X`8Y(NP{M#^g!b**6p-QgxW|QHv2xp8P z2!P+SkdmkZWz~wGMyKM?s4oY2AnNw-=Ck<%jkO|qqgY5d^koB@3f6qqV(Q{yB9Z&u z=+m#=+B^yCRpy*nVar=%WtS7KDrv#>-P;9F97>knP{SMOCE=R;h)Cr8PQkVW-x7EX zH>-AZhhN@lPlpIdsEGD*a;_{H7t_aBAT%1#oPM4Ya#!dZ&(mHa!|H$LODiDkkGiL? zL;ft{nBBGcM;fp|bwEC}Ok-OL-jB}L8Q0e$<+!9|npoJ1n(GF`Lkk>%YMPd3>0%aO zMy?Fw3eLUDIr;~g{xZoi;p{{JH7dn3boyuju3t0u1%YV?E3*2?ef4I!tWmgP5o`rU zfg^AwEMuf2>_s$xcZWdii&TRx&G!?aYD45i2`ziqvh$2$jQ5U(_cwVJ-@3~!=}!xE z8z(FUx33}`1SkpxGy5Dosi3gYCt+0;#4fJ6&Yl;A(D`mqG~R8@@C7w%82d9g4`Xd3?R`P%P3Ao{ zbGG_86?@@9FLFN27H*$X?mX$2XnKa%bC-JaMxa;`@jZWKnRMT5T;bq?WLhEMRD!1@ zMNJ|9^3@GF$@4ONOOz2p4X}*!T1#tvqI+RC_~T7Ou791r*BWsNf3vb3!RR^x>`}>~ zABz|-n=X|90tub0kb@qYtdFwyBiFCu>ux)@M7)3<3n;TcckCPLqkA{U^gxv|D@7$EoKh0{oh%2%` zm(BX`9Rkvia_&wgcW(0Vju+Vcf7T7zg0eqHLhx1|O9HzM^tj(U!{4B*>FK(BKk4&s z4eXTf_J@Y3u&u`dZCbf_ShQRnDg*T4OPUYAzrG}v;IEUU)ch*hHiNC0B8*`y5Ar?o z^OT>Y!8nJb4;Z#r1cvL8dnh5)VTBm?K*P1Wh!z2&i44A&1Va7}q#Zsm6G{ zqp$qO#a%Wlj7idA1q*20?91+z9ia#)=FjdikA4mpV;yg%6NihX*J674RcR46xz8$q zvT6geTsMksa-X)=N8?mR5*~En(tN^bx4#ov)R>s6AT*Qy>3)h(?!P-zbbkRp)cz}_ zYH2M=W8@M`nv(D$9f&3*AKsHb`P8d5^{*-LA7`G(28&ludbn^h+GCYT+}6W7n)W+L zkFI~<29(iV)DjC&VEng=$1|0LW4xS8$Xirf0dTD}|EEnZc)a(S{)sq5Vh-tP>%VI& zItNHq?UQ;~P?jJMGFG91!SV=nyZWc_377YPF=V(oj)7}%O0Wl}w<7O<&LYv^e*Ujjdc5ga%)(kAyd7;gH`{5fHNvs_^Ky~ot{ z2_XQ25DrO*&-d5>IcV!J5er4)Weca$GhaYO@rgGR2%=m{5(H2~$T6oB0FrjL^=>5; zStJ1e`6Omis)nHV1n|e11LTn;D*AMP)^{Iy-Hjb8Jf0HG1LpZa5ZH1tnlCasdq25< zl}@W%*VsikSw^25JF9 zJu3byx>JmkwAgll%%bg4^`xD1Ft}qx1*!@D0@cJ@Xv$0`Z~Z8(A#(}f(31GleR z7W6(x6tT-~em_3jO8^v1oAg=(#jI~XZ#5%SzMoFy3&@u+#|BwQEKr#kp3YNDe%k>D zjY$|8%pU6mbdqO+aE;GlEB&o8YrVJU--*v@@NiLPTyVU&aKSoDZa1GQl{g#_1~ z&3SJid8Bp}h(q80o<^+8?NXty-Ua0wPRA_&mB(2l{jcE|WTu&cUN(R&6NtB+XW!!& z1&Q_LyD*h{FGNkY$!HD`}PJNJNOvs#riLjE;WGqU(2|Ng?5MqZP`pH@4Yn}l+ z7blVA$gtBEZs~`aen)8`)%i-oHri8TFLn5?(|d1{Wp2WMq<^8y>=HxSU8jAiIj)Wj zi4w#XW6Sd1#^g-mjzLC+Um)iqbuaUDkxk12H*Gs}J0U*;cV{kyzLe5Q^!#u^fVS?{ z#)^jD=f`Jr^yk^Ot=t+CTWzLMVtMNuC$5($jnye#Omq237G4>5)&-r5b%1TfLyYFDy z%*)*Y1>TRSRJ}?H+wd+mES=@24I{(+#h)-+lOE0LV@tErrgwM>AR-1-rR-xT(ba++ z>+czU=SPG4qmobf5sPw?xinFC@6~+ zb-nqx6VOJkCPiwQqT8^l{o0~{i*uODB0cszPW5|jzmMGwm?>+@GtZUWu=@GeoF7pa zr_vYw{UEKwQ@cpoqD+R?{t8x|pS$?Cc2QFKd{AVIoN384$zDZ&`BLE^Z&&YQm~BzE z0EpJ7m?^ZbQ||0O<#5Pg5b>>{J~I)3yCLcYcrc*lZ4>90hG*11ZB*4%2^&ag-uY^T?cy(b=k>&z=#`+5es@ zL09%X*x0}DUN(Z^fAC&+FbIY2-(o$8cihnXmsqcU?{}90mTHAA8_HgK>tWBTi+N_& z`pXvY-eFzopBFo9e}t{yS0qCPu|t(5)Zo p1h9!ni1YDH!b&fMzWv3;r6YUkMz^r|&P(8zlAQX(()-4*{vVn?mJ0v? literal 0 HcmV?d00001 diff --git a/doc/source/dev/gitpod-imgs/vscode-rst.png b/doc/source/dev/gitpod-imgs/vscode-rst.png new file mode 100644 index 0000000000000000000000000000000000000000..5b574c115a2b7a87cf61143822da161e83605f7a GIT binary patch literal 16443 zcmbun1yCHp-}i~z;<8AP;1Cvv;1b+DxVt2H@L*wsy9Rf6cf#WC?(XjXlK=C(_uk!m zbyZhaHMP^zJ<~QVpWnBqH&{tQ5)Fk21quoZO)oA%1oQr@S!F{91tkn4 zE%rst4f=Sd3a#rSF4P3B*7ev1b#>NvXcGA71Cl%o=q}G@lGAu6!LciCJg8>_H{!lJ z5L7Em9*5V)(^VRqbPNLX^4-sS$OowWEijZ^4*>e_zk_lU0g4&!zYbrEw#vKq?>c2O z0BFr?a9LdtU7i~`)R@~wWFu2(x2`i9Fm^KgeTVYD%`q6%5*&BCxy z*HdfifT&njWpA-nvmyI;=}!KglHUfMMk9Z+pKF3pHupO|#C(Lq630_f!;Hce#sdq- z6k7{LtB?l=jb|5klX^DIE;0UH!svkA9ciQa%gWBxq=ZBs;K{VDz(OF#KVa(Eq`UZR z%0^BY+7m^AestsE<-$uMh8HXrML|Iv*+;c^_2Aus5!eM5*?90`0s>;D4z8Vfm1!}N z&KGPOxKhvvjgMBm1Tz#0kmWASYnrsnREltCTlFg(vr66*GS4MoiXF%#^XTj}G}H@< z4I(Fv+;eQo=*R*|26jPjU^%y}ES=oiN`wGHd$NPqnlQJ_YqVm$_(@;9GYp~udg4gr z+am1erurd%@2NPDzx)xn@n|ECuJU@h_h{44QwdJWvE*o8I(a>Sc(lDPfN?8ktQYZL zxdJdnn6`H;awdfRG|P0E*s)%gz{BENPi?Vu-_7W7jc4u_z-qgeZTdJVF5`_+X%s`^ zI^SDXtR)UfMUcqxBP1xN0GA#+-W^M=UEnhH(6NPY=H1e?eFU*kZxbg~G^YK*x5H7f z{xjC`okd+>VXS9Ye$v1$^0_d;0u44hQ64VrSK|?>714hM&~MKaXl9iZck$~513}`w ztyn!erlWi8ist>kYi9*~wOj*K*vQ7fD7vdLi;{e#&t`}NF_YU2@hD~kp8TZOrGy7u z?=^A#CK4wNFBxNd>Da^_(=p#7N{nRm5jDs2?TT?Z)A%nQK=>CeCqdHPB|Oiqt8?2j zNm1Qx#F7XU(VHPGlnxTq>LvWYQ#~fsw~ZXAO7NO&OztR<2KGNbTlSzKn2C zI!Odj_4cV4(mb)I0BULGA)rjmKSwnpIZ@N7`dJGQ_ z%MA?=w-`yVvv(zvaIl+jo12^8IwbwM7R+0XC^tWL+9LvP+QyeACm*IC3M#x@Y1{?J zmr5M)Je@b~W%wKx6{NfG{uy0pk?ny@PO7-N-tC}O9S!GXuck;M+TNu<`7xdr-8yic zLB%DvH$BIW@bb)Z*M^N*XSA1v>*~%-@VnsU{1^b0xZIvA&~sPq2de)V;coKO@(HIP z2rgcCws{!`Q!B)}FK}~{*pW)S35H@*%ea^-2BZnzod~(AK5ELR%Bz&I%8gS5}^Ilt(_V8OV4nx+xI%Hn)lYEIRI> zcxvi((zD}{pg2AUU!bj&pe$3&*YSKQ8`qtBS$8x`t~)7qhmLB{dtU5eZnk~%;q#zs zHb$P#3tA&caD5@0)~(jT_e-e>IeD2@jYQ}m7Zvtfp5cb=2T%8KrEIZ*I~k(fxD-6%GuE=)HmI+Fh zn3j7G0sw=UyE2S(vVZUj#;?VZX09M2n{wx+p%KsV5cK$jh>&=srBnZ1aa5>XM6*zO z+i$wo5ET@-?nN$<0h&I4_vzq(cMgL9-j6X8W|a7 z5rP7`UfZ2=Og4ts-0EKQe+ncFR<%_0T)#q2lMNPh?$n>5&hQC@c=)>qH$iP8ifK>x z<4Q7Dc(@+Oa<6HMHdn*2TfRPrt6_O2r_qL=c=@s0OinlOgDG_<6RBnG+Cm(TZqv3c zD~+DY1#~5j6_G<_$AkKN6#l;*jGO-VRnm7t%#%5U| z+_P7wPsnY}(cS#3J07muCG~vI?RhW70+WFq{44aSs`m>T0Qq+WOUHf};rix#P5XIB zU4LN@b^q8Fg=j_M5Gvh&EO_P1gqyLKrqOr(y@}yQ$C*ORLTmhkBvV#}h;nu}lTUK% zqnsv}RcyBQ&i+awh39~u&++26{^ydW2jV|ZvXMZqOxFiJuI{8o#~UGkUe*|v*pv!G ztCXKo$v1R0QBu;2M=Fv!7Gw#xD^#9azjv+zjL-n=i{+cTTCGd_=x?6@Z8C>3(R)?Y=ynHt5+_S1P!m5u>&U_&@dY`vORkO4FJ9Yy3)n)1LqeQ zp;>W({!zm4CT@Mm;qmsGMvnYZ+VLL=mc-4 za&sk#lxwziT4O}CF>@qRq#|5AD+;js0O{MWpnc6!LvqZ1jxB7bVbJbDgtS*<-cJ;M z@PTEBgTCA8q77nDHp`c=u-^($tzR=s6_m5#>V?af5;&t5yvwW(q1F6`kr$;S!pqw; zXEs^~85csxMiXF|=yT=2LaFp{*zAVVZUoX>hNS0VhHwuo2pRlCNUM?{ zEs{C?pJ5-(D=TyJ`vw^HbH>`eF7gtKw0~%yDuaIR%*;ImuX4EDfbk8qYaB`>4dK#5 z7nn;63t57}A2W0>%R8wnNWm*ynE8X$704e6RmJB5G(PA{J9DaR`2vixyaJ@UrhjUE zX|5b^rpUzufeI~g_{0L3pn{P~gGA}J&Ad1$;Loc3!B-K!X{Wx>w%d=kP-9Az@XDED zSC=B4%DyNoUCc;)`xHSGiWk=y(LZINFydm`n~T2GYeHWHnn=5Lifjhb9a-nz{Mws< z52V1zC8&y;@MZXjf|xH+Xb!}A8Ws5bq;koDh>HfmPsU%yeU|VuyCkp1y}X4Y6xXWD z7BeEJ<~XT@=Q*EWKY1*A>i^*Ivi zKO8GJVwTNDTkD~;jS=DTAtDI15#UzDEG6K0j|nLt@AWraK%dNtO7b{SMx7frL}hI@sM>Z{{3q2Iun8o z21EVcXW>~eoD}m#StrLk;`Nu~wcyS+H2QLfcCi5Z#T*9qZ=V+7dAP`k%rHX$l=PiR z8}DVA+WBT4RuwRj`cH@}%#2e3Ppe(cT$Q>Y;Y+S_Xz6RMP8EjaRska-Ucg^quMT^2 z^Ij^Tx1NJO6A$_zwHpxG(1g+%dP zm1|(78#XlCoKXUKYBwa6BlvJsPxL&l`IWcyjGP~kA9Ak&&+mtb#Q&BPGo_tx%N7>xrmJsaJHic$5kFi@$GNpX&2P zlErp_pifFU$xgj8f3wS+T#rq=0=f8tQ1mrSv;T92l5oUQrsc;+J3o^B=iD6BelGmH zTnS}mLphc(TwN~v(V3yOATW`{rZn&G&s7#{*ZG3P;Y91Z1%0{{YDi3y5sPi5dnVmTWaS zMj@WVK5TQ-xh6({XA@zJSc45bIVURZ{oJJqvhI%+8HD%XVf2p5#eyLspKpDE=LneD zmIE883x9L&r3G_2Q1GIcWwFguFnh|J6JkNg;fVvi->et|!0SL4Bm`5@Krj?J1|E?G zDhA(0Dp8ZDhXq`x$UIiO;_uC6G`C^gT+$&LifNo#sjBm$aoaFk>KgxOCAkQRy@#1TDDIL{OXto^@IJ)l zIQG{E^~!N>|8-*Czclt>bcKBgAe;y8ucikOJ!| z7QX+2oC*X*3Kb?EcKpVuL@8hi1jxU)L$2#8`#Z_1C~FfRn%uMeV@il+PrqIvdg|bCwGzyZ4vyr9 zKxq)GwEJaR$mMV7dkTYEUr_ty^30CslIz1C_I?MYr*G z;xN3$$>g)^2@gFA=kxm3*tfK{j)(G>zKV_unx0A5)wjxKSKA`ZLb2d?iy-biO(ZCP zn!UpghWOH_Y=_v+AJ;$h(x)+dLl zud4E#Gio}$`ISoV=YJQtz~cW3Plky6myepNX5oLHNVY}+FdH<@@;kQMJVWYi=6p2E z8XD&2=J3r-O>-B=c^CQ0nO$2PcVujAY$POlgK=o@MPrgzhK5QyI^OuS0v;bd{@8v0 zezLv2jV)QD35tk_NJ~rGm$mEa>Y}Em2H}6v*Qqk7$jr>#+}sp*ZgoCTk(WpFA!})E z^)fa#o`Bxo-IWj%OS;pLuM2y$>RbL2_jNMHI)54PyQ4!>%H@)tboSMJYV3Q-pmkD$ z4kP8B03h%(S^92hN<>7YpO0>6o`{I({?`{eczF0DQZ6Q|?5pk_G>p{wMhD8RrNu=y zK_BO$sw!n$qi@#MA`iDIVibN85C|l}pLcj-bktU>@_c_Hr@O|a5B)D*&(u^bS8%*0 zw2#kQb~Z&EyORYmF|k}b=KAnn|0E9|pWSLJA^hL^&9%wN&8;o=GiEs#m(N4CFAl{; zw>_Pw{t1K=KSKjEP;9?wQIGK0?eA19y%!@%Ux*QNUzxj=tHuDc%H){80lS!XVvhJ! ztK8!IQaxHo+KC$otY6g{PE)l!=u=%)j4i|A)y5jZLg||HR}nNz*mCdwLl@wxCsB{ zsWjQzXH$jD$NE{PM+6LZS%59|R9e^ z?&^ZwD%WdKQBf&1$#Q_RPBCT8La!vN{BCcrgiq>Xl1$mPDe9+IGdwb)OmuR33Iye1 zOSvLs>C(8lUu=s173M}{hmcW5NwbJ47*ofzw(=96JU?71p&7aHr~Q3=e5_QIm!H7r z`?C2B9+1asuB)3G_g%{rd7u#&V!ph*TpPg;E)<4`$-f67VUdw3^7B*h@)F(CN}ZgJ zk7Igxdh+oG`3njNl!gChAA6key&xtd%W&0nF#N8ks~aFg%IAVA`}Hf{gf%6mkZ%A) zrA8f7K2?iaNJz+yJR>K{gfL;DskzzuCoBw1OiYYYEe)BU#Mg)ZdrRPB-3GPj>n+<< zz}ca3QCbf+@XZ^)L#RY$e#?=z=# z`-`d1-SnR!f;ULn7q-JSPfldutA^pw=(RV9YG%5b{(z6o^WY>&A?#j~m<%V!U;s(_ zX(q@;!cYhhIxE4LxEgxYozcg$je5BchWS_(JFIsV*xgjRQq7k`Mj{p@z!TBc6sl1b zNJ_Xvd%J$AOn~J&(U5zs*W`hgaT*MG>ucBBm#Y6@MC|j2U%VTIJqR|N1sU?L;|vcc zKCGIVn-isjY5Nr))bnO9h)WW!N@oinofnvvByU{f1-N6ho3G^DXYO43nFXH!DFSnP z+4SO=rIMd>K2`^Si;>#YA}^8aSCoM5ue2@L+eR^jsXEP4=QrUU^CnbqE{NXBIIXc5 z=~^;39p8CPI{7UwN;(=$TrMD)Uw9vyE4`w>eHu7>ToJ4Whb7~MM9jcrD+JtqVdJ$_ z=AlOCSaR}C_$x0ZpsdP``x;u)rNBnw9qXUERR4WRDcm~{%uxTaRh2}Zvj+`+0QRc= z^`^q6-pg60`NPoV19IEFeD76%$J@)<*7qNsYV9)>U*c2+1|u3Vxj-06Jg#`|ill$~ zu2Y?+NqD-!f!$stz(qv@a}S?0&o$Y%_NP;v*fMTBbZ+|lB6UA$54kx~@3*Hli_cql zS2$1biYz!#!4RZFN_W_Fi38B&)t^xOGg)FZ^ANj>mYeVpNkbK)d)A7 zWPjRo`@zCO3wXHlM#W0WiPLy0rMEbc$>b%>}xQ-ajr^*e!pF_|Av=-c; zZ%(<*v-Dr4HNzEwS-JAk>$zALwie~QJd?&oQR-EOVFT8)@+CrV4`V$L%#|aDIYYxb zw;|sRE*piJo>18KdD(XCH>yh{$ogr~*{z*OxP}URbvdDT9KQT<{%}`6bdu!Uf-yemgiCCah42KbO(PFF z)rrkFHF}P|Ogr`O#4_+v`4_O4Abk~Us*K;K6X4GqxoI$mX|&WyfJ z^WA%l>@E=OSO&AK=F{WqvVUh@d46}Kuvc=F*)B6mR5lj;$IN|b2apIiefE6PE_2rY zHiqb5Ynh@i@r6xKbjGBIGBPd}WP^}6^=^1Iw7-{EG9nxFBnZ2*9KuB0+SGkaWbr(cT7DMpfFiitq zijetS&dJqW2?BXlqvD8Oyuq(Ra3Ih6F>VfW>$h+8W|I%h$%~bve3r|X7zuXX*Q{ZS zLY6COzT?N0*5_5_5CGUX+XgSY{qh#06y)Zf{P=e!4+d3F}f2DL<)BplWOm3$)vR^>| zY>yXm)DRKx6H46*0Ug&yIf1-WsgnmM7XR{d5iN!pV3tb?D*r-6Csu-2bw6YUIwoos z$~TBy&lnjgO!#+6k~R@DK*c;4GZBDpW1P0G3BB+@#3?8}ria<}G(C`IEeaH|_hX%H3BM z4UpVsbVj(Nn|Z%Gw;}mkQZJgMxMI4;VG#_r)O7TRu~9vR0Z8p~#&BiR=8AEabOhJl zf-1b0mdYOiMXxe@9ul;WkuOXxv+m>tYgV&`RpbmhgCYZ#yty1aga>2Zw>1MkH#M8k zHJd>kIPn!Q?y}CcgDf@vtI&NS?B*vC9WkP&l7lW2 zJ`ZB{Ofz!MY9<=3MhAazF)qMKf8Vz<*{q{Pn3V0~I>Ghj7Og@6mLaOAQC2Axzh~0k zUuT=w4$LvXvK8HL0?mdN6RXBoVk}Ga3I<BHF#Iu>~cP06xjLw z4U=wlpG=lKC1ysR$d|KFPM}QdRii2ljetbUXwT~Q)R!=6u-ehj2I+Jhl7_>W4DZE_ z`2W<>OjaFrDJ#MzkDcbW(KOa7kNB9%E*~c^D43YGB0GsK=TV>QhLN2

    d9`^%sn- zar=p$5g2t|J;sYUZ8oBAvCm2pDV|ICKfR2?_#Z5l)#Le_kx?Gp9@N=CeUvqwZ7e0q zV9ue;28C7W4bXf@>HU`26s1ibMEpDM*N}si6tFtw@N5uE8Tf8e#(}Z0u*{6a#Kg1+ zu>POC5><@bdX2B<=JXdIzeiJ>ry-DI^Mnp?p+Mj_9ZpH}8Eb-YlxgjUb4Gd`IJRK(e#&IB z_A53Q7$8>mnA+vjFLEr4?q3Yb?R<7;jjgm#+4!hSO!3OxB`MWJ#D}v+{bcuEn`Y$g z49{N+QL04A8&&B1CSZRdL@>a~6$`h|x8#tu6j!6I`}E&oc;T<5;S({3yY_wvPbQEG zV%_*nfL+uXlqpjdRjgz}{#7JfEVszIBpE0Y^(jw9rc8Vho#YFY4nqF-2SB^Q<^Iw3 zv6IVhC-bDLc14=lx;d>QJ)4T3qaL?U<#l&BoiyOQ94)z@!B7Abh|9?)ym zIV+@$Uro0!{6jrTXqMr?M68XuB<7jS0Nr%8H>T*U_!@;@S0eW#M-8KidxCaStf`%ozmT3q#8rj+mpZMW;y7>nON8pbx$@rU* zh0J{aH&Q|o<#sG4R3r`7n7iPs0&ybBpF44Bp-b>-1vgS%o$d$L(z~0=w4)V{Ko2$> zFM>G2!<*>W0@LYIDj0mGWF;$GQ!f}IzYCiizYx{m{a}~=2p03fF`_(wyENd@BT9VB zBcX-&#(fleM^w0k$yA-!%SLYG8IO3AC}d-g%$v11K#fAwn_fadrP;Z@|3N|&;4nEt zfy8E{^oLktJxSBa>Oq1_mUUByUbKE9t%b0UDu!_3Sg8i#WVY@Ryx+;6O&}(;LS>^L ztKNHE3`NLOV~uMd*=S!3f0cH)OOvl;NavpIx%<-`FUg+X)EogcF053g1|@HA>M2cS z9a?HLQ*9iOo&C1}mPhp<6%0WMb0W9Y4jjp)afriw=(UDH%Q;_Zah1$2Y>cYRAeJqt z;Wb&1qq$TUxC3tKB0X>Mowc%0)(w7e8XAKOMGe(ZmXvSspCOQ4QCUlyp=PE_ z9OX5W;M_dJ;Zw}Uo{ap8*fI|$-s_Nf5cQN4h0EaDvG*t6*y$o&GIZ`vYZPHp|o&!=1Y9TBknqf$y31YD#u z=KyKX7xg3NBtfSK;*TE2IKu=p8 zJ$J1(22*NgeUX)?Nc)X(5ku6Kn4?JgQd5|zHiw@s-!D98ZAd)-JlP%>3oS4@`X11HZGsj@hE5D)`Dnhebg=PTe7Z%H!cXQShhvv<7)Vz!>iX3 z`bBcOHX&MiGF)y}O%MINsj+w-X+2=dt*i7#`&?Q0J2||z!ldF%n?`$6sUyIGB}e^l zfV+2AIiF9H>04;6&>sUqd;R=2mh{s5#R`1|!ccHA|IKDzWwIW7Y6m&~B0diOxXXuN ziiyZ$Z~a9dJd^R*&;@e}0?Xo0=uUgDgA5f(^uKjAiHmZzdc&J|^xWBVnuf9CLKeok zAz)R;?F(et-yRTLMCDPjy8yxy=s&y#f#`ZL>=lN|=t)7L0QnpRIHMFW5A-DlDSu&@ z%gqDEdCGC zjUrv{$-KHXR`~I(Sv~x=k&8@3NsdM(^`$GWB*mT=v;q(y4F@K=gK#v;(gnCAO_Z> zt0xDWAiR;U*M%mjW7d)2WS&36kYbr^*UZq|0RHfJIL5*Ge_G7?1B6SXoT5Hhu5&VW z$gbmh@$g)kfoINi^tU9ZS|Y)s?rY!9hMd}zDgo;d&z8N0Ere)Hw*ww|MYDxRb+pox zo*nR6b^l%P!vBpi950H^xMu8Nie-CBqvCkwv7dan;r1u|`-;TeM!2dRH96-@ zG%_`fht-lFQ`_?Jw~jVpJ}BF0m#9{~-5)&!?XPp8GhqoX7uL(by1+U--(T#jMUi|2 zK%tXcG1`8+YrN_P&KJnxlE6OlU2?%bRvO3r*?GQ+5CuIIMpop4I5u#D3Mfz<6MdvEtK#!paUxsrxfH!moiZpxZ@rz! zNmkxo4r@$3g&MEmidKUdCMc=s?uGc$$tCW42l4)3F{^Rcc|Z5f(P3fdBq}j*JY?Ak zNS1k9MiFJF&M=()R?T~eo;=fSlIS1LYQFZ);CE6sr$?8VOF1=^4*Wnfr$`Vr9S!Gy zAr!61yX(5_AQ=a9G%^RtnqJ>bRc;~KgJi*1$=4P%Ns(js=EU($2t0`DPnWaJgsVW& zkTP5>JB?XoFHg03-X(k+&k_yXl$^YkeQ(}^dFgb=PhZR}b?5X4=tkI#_pf6+i)iV* zLu*R1qd8=Fb9(4#WNfgCZ2Z?L{(LH%E1-hPvwGSYVw8_k_wf8T0r7iW)0I~jQ-rtK zwm+CV^(^aJDm-oCcerhZbDhptbuB|KPa4)nDh*IB$VRB<_!o7N62;lr;UYn7Y#j#r!kq%A0GCk&hGwI%_P(KCR~v=}ea| zVQUHZOr1hS(d~QDCBQLZ$8+1S`^Jcclo5ZSvB zPuOyM)hx?7&yXl(-RS<@aVwbuCo@`xC z|J3I`MWsUEh-@LL3W#agQgM2c%)kS=AwaNNJL=7nt%2r0Z;>1YHwb&ic}*AK?YM0H z#52~Zf4An-!6_mkhXg5;&a8qr$rW+@?Mu*@*}z6ylOrSGe7^FmpLjaMVEzuJ zxwH=5>NoMljA3RnZssX{0!4MN6Ak8qupW`G=^hsoRH2X`OI)w->sBRfo3(?-YaK$mOlgtzJpZNu=;9Byjh$-0 zn&q$nf~tNG-x63=d4_KZ-|*M?D1nxqhfgEmgqNtOleBnB534;3sUeSzjVXLHil%SW zWqW`Q*X^~HL8cErnogGI^A8QjGV*2PStmJ{Ww_qQuc}3b!r-qbck*Hiiw`C_a7X|6 z%7%0FIo%j4{*CZkXUb-JX;b5Qu;5V%iY- z@zC;s)bN#K=2D-V&mgqOJL`l04@YtX;6pfuvO|sHoMTp&>``;#C-2XDqiM2)ckzyn z{hjd&qD#c@gAn;T{FjgW7AJGy|Ba1|Z2xa;Wc0Bdi6)G0n2r^_fmX_r-{}QNg^kY` z6UBb4KRE3F!pv(B*aJ2q0&!DKxTSuG;E~9IX>I;=k>URiG%W3lUX}Q9;Qs=U7s1IR z_#SY|?{^H*Md~*}KZK|h2`;SzHAB;OAx9|f&{`!+RPQJ>`%UD-Dw^YITbG3M1ZZmd z+bcKa=AdK!Mh3+M$EZ8Mj#_J5+yyQTXdESk`re+RDjavJ@EtbZIK zk>l|5#EDgTN%peg4ju2S!MJwVw=0E&_hjGm(kPhWQz=@#8qwyRwDI*isgq)Mu;1l$ zdtBi^G3aWnUnG@i?85MF=rLL{O4Pg44kjCO(ZS~&G~zZ>puN-)TlufM>>Rex zGPTOfi_G5&0rc(UK@|UsrcAO{?d3rOHWfW^pUfWX;y9 z7Z|@?%w1tkblMT?Qg}5ge^-_X`L>F%d4I7rQIx4KI}c4PGO4{hfBNt#md7+>S2>sV zv*N&)H_k7v^cG^LaL^zKSdH^@0DxWsE9j4%A=%GsrrOOUr1)%b55fO5a-0H6HcOJDGp%80*2}EKHpg)<@+E(b;37YXF!YnB?4tJ2bOW9EujYC$28yvOwUCWwPP;p!rBck4?)p=c5J|TP}m(5q{6xFa^ zVf%NJS0|_GoL1;z;1`CV+x7m0C3H5i>-o0GLI2nn_T2z`@=xOIib5~f5)a^sOKOfF z-Lmb8MoC`o58)$yU`76mMo)(;s_){CZf9KhlM^p%6@s$$cfa@)qYlFxu?0l@{oiV~ z2#o{mN_C?NFGRqqM}0nC;n?J?9>{efv1E_4PSc0)x*hqTR@jwK?`UNFZgy=pl!$5^M!D&> zbh&Fk)?2yE>gHkwA9?&a7Q5TVa93^_RO6C?PO_o+QlW*jlYY9=!W99q*WHvaM1?GYn&!>MeFN8H=iu^{BSu2nqL!dX%!<(%U;g7~wy)Y1iirscxr%AL z@yrdig$i1fN~MfZWN7_|P)6b?L5?V0N2i<3yb5CI2i-9m#4gh2of`pwa!>EPXb@9} z(*5*|>7N(-4b-9c&&!>%=r?;m*tNdgF1ZJqvk;5&>7y4Xr2t;ct8hEd(ZZsWrzWdN z_*}&LH8PrlLv0=&jo2DFn{Qw3C;RSFJ#2201G>h9o{e!mgj$cp`k8xdJ@&({AGp+2 z(`sK3+aFTBx*vkco*yRpG-y#j#fn=0oy6dOkU3cNaXdS~eY*Ztpt=i5NgPQrSNxwG z=5}bqKZ0fb8TgHvZ-KYHf)A+&bVBYBppw-0Vx7Dx)wh%v6<>Kmq1-o45k`Q<)N$&{NG5B3|#oMwDy|d$#DbM zcB&3dHZz}nTi1N+Mr5(hGX9_hq7lu;{

    !!)ZmLq>{b9dj2lz{Ggzly3d8>|D2}m zuwE{Y4q^&(&{L6JnVFd>(j!99#-TG`vqXLAfK!%78J$=Ou6l*DK`Yhn*O8juZ_)jj&#^s#<4G!FwQMDN!IS)&O^Qr>yxrUeL-++viMOH^~#W` zQS2+-orI6yt5=>c9#0r2wB%?47}vGD1x{XHdELxtI3yV`Z2n0BFhU;c9?xDqVVf|A z2Z#|IH10~hD}y6ON5ER3YlGI1ow5yxWV$2*p~0*VY-shmS^590B@PGIcg%TN>&5`rQU<}#S#Gp8MuY0T z0^SeQ@Ay`WNg!I&`g-25sW0bjHw!9dRQT=uKt>mMrKYQi&YgnB6zQ5FIwzNn%7qe?<#JaDx0c2EkJ~h zT#1Q^U?Vf*(?(|}r>uhr8_Nmj(59$8{KMv}MA_R8si)Y79oRjyMZnfTNX5(HPF$5{ z;h-wBK1#^~XV=3A$E=^M4$$8lqUo?-`cL=rUNl${1e%i8Qw$9)LB!_O^{~e87$(XD zicp*EaXH#nka5S!?!*gKRaRPxFUW@EQHCOrniu3l4ZlqS4-xoS&8iW~dj4^C)(QnI+4;>|}Yn)?jBK&|^t~ z-Vy+q*u^zUvDtY+NX?X&N8T`>`U6H!Ks#YgQ^h zI6#ib1>&2LoidK+uq|Y@2*eHCu`zt_xax^0wzNmevVZ%ua-GXWSJCIt+ts(n>X+wX zu06qfK1%KB$}7=vsCxA4o-uv@T>~t!;Xa9O@F*#gZdX zPEs}|ZWlIhz8JpIiIUQ-l@j@nn223B^}zdXS#F2I2T~FBS$47ZS*rQEp2o9(m)GTv zIFNckJ>Jy2OmW6xceJYAY!1`(UK_V$3NzqD#LPVeXop6BUmd8PI-_j!R?OL8dwy7P8A zN$5=Q$cH+65?|oz+CvW1IAk`r)}F{0Sl_qOUs<3GWogt9b5`9;2UT0ipz6A5gA1iS zuf4f*ZgWO#Pl>ceCn#C$>D%LQXz1XVj97T`xx8sLv)&=}#Y~7wYl*oF<4owbwPAH( z!Gs|D0G;a>W7*!-6{;A=*xcMiaQRNW-|xqu?wQlmom&@yVyhe+C$bWP+5%NoGhRTj z)tI^Ypeqo2D&Z9rH_dcQf+!X65jz})PlQ~mHN+r&2;dguRq)%8g$|(_F#?m8mNvkA zA{h622MxYvfmjcIi8A$TA$74S>Z_5YHCdAvF+AK?bUy){-%m-bq1Xh_78X`wU;~cSVM| fi`dG53OynbVLB+toBRH!08rB63S#9VhW`Hp**^PC literal 0 HcmV?d00001 diff --git a/doc/source/dev/gitpod-imgs/vscode-statusbar.png b/doc/source/dev/gitpod-imgs/vscode-statusbar.png new file mode 100644 index 0000000000000000000000000000000000000000..3febbcee0ee5e4c66c92c1fab111fe2f956059a4 GIT binary patch literal 4492 zcmV;75p(W|P)+|y ze@}ht&Qrlt!Bc0p)=ETE>r3&Uil+je>Z#zV;HjPpo(gy>cq-tjo(j!ux!Fi<0&4Gs>DkBzSq6sxPM>S}BMVVEivB`t+JJ3ALvvi!d>KR-`Kzl!E;HBL@yVfBFQvzOHWTZ@>O;%N9PNHyTqfT{!RO>!Z_@ z+1c5b&z@oNryqU9D>o-c@JmZezWL${IQ(_{#`WpxX*C=6;q6-?$;;HpNDZ=Pq^IxR zv0cVYO-W{=^s=$C5`158Z=rVI?p=DfKOODu2lwq2npu%R^i3Ri^4{&+!k!&}{7$Uv zp`^5=1kWgPjNZTd@a`SN=@Z8lohf2b#D*f%-qvPC%@N_@!clzMT3hS4MmHCiTi374 zn92sMKEml_QAKy`Xm1Z8d-e2mmz9=6q^G4RDoIO8fmvBmAt_uFOf@w&GJ5w8j1)hq zKJVVVHhTYF6_O>y$L-v@RhUXrP%pH>KMEFGFJs=nbLZ9b=j(^5!j0nf7@tdraZ78N*Ja@*= zM>kB3iHiE|=bwg#2B977Y@r1o0!R}leS$SQDpHjO5)1)ClP0mYriO`jHr6^}YEVGH z!+Up!{~7|nX}xhGo}HPQdHU$l+0&==^9cKDl92^^XjFb>9eSD=mgkk+f`vIM;0p! z3vq$0$h>pwrl3Ix{i2@%fVnt3Nlj}}VIk{cb$}Tmh;3XlNI{UCl*D3Ta(uyHiHQtM zO0&cism_ZtnUrZf3o$Z*$P-6os(|vLTmG*bvpEF@C^U2QOxXmoe|lbyTJog7EP0RBeD%*@Qp zjDllkW*oX(W@ct)9A*^mJM2&UX-ZMtneKl&gIX(Ux!sOqm#gY4aQZ(?A`H24Fb5vFMtV>{ zsaAoiHrphlO2~|!;5h=1bn)zA3E>g8N+w{<>Q(NwoS_V~ry%bU!&Ffq?I}ZPDl?}O z+56zZ^iXU

      DHg2Sy8<#XL|-F0&@)xMDPva9~}kAK*A+czPmI+66%FMsL4{{63c z<;(et>(;JWyJoe@YhLkkQ5Nu70pThyec|)3x#}vFwtVyJU;D@hKL9%~IR8AWqoL+v zst7YiQn8%1t5>Noc6Gb^uDjx?80vxh?`Od2xjd#akpY8EL`J3n?eY8H{zivbW3e@_ zz4jWuBbhR>Pk;R5RJkd&GD6l&@1`4XoUfDy6a}P#Nq&MuOoyNT_(wGYX%$5yXf37& zXsO4kFqIl%UQicjoPL^-IWvq56%3FxRw8Zu1Y%_bdd9DuY>h^<*s@5t={>p?l~#(W zNL2Wp-BAuJrkZfzU|f-V>ZwEwEc{#f)MEux6Byy=sblE&M$CuwPo*1gsYVKm47dfl z44qU;{A38sqF8GhK)S`$#Nss4aV~_5RA9lbb4-xp4X?D2C3YO4RZCs`0M;v; zDOcGmYgYZ&uOXKKr^M7|0^>M5nX|eU!Bpt$(?(~nQEysI-LQVW6G<=?8z;NzQgK*G zBjI}V(MP-eF`jADzv^~Dhg_Fka!CN74jhpM?(E{2dhR*reCYk}Q=zP_5w3GpzwyG(u*&?@ci?6W9kGMW>hAH)R5dVF3DFL!)#?FCEGV&DGewJteyi3 zacDqVsW11E<(Zf&guog|eZttUouwsmZUaqhfoTLOp&D9c-_x0)F9SLgQ;Tj5l~#tS z(Y*|gI_v`QCdAaA|KumHe)-FL*IVEC`ryTgLRZf`#!&+vE0`MaP}GG-hE975RO-;1 zy;vDO9!ilyXt!XanL&UAYUH@JbFCx`;VXi$zY|MnQtUd{*vU}!Y>fitRSMi2i zw%gtnGWj)X$gqhH+0$eVCd1Tb0!JqHi=FJOPYz%9$k%Bx)qo%S@P~bV=98Zgx&7;( z|13%=5{rd2zU|F#a(nL<6M#VRacNtZiHYxR-n=Q%%XBk0Gh7@~$x)*BS_Gf91OmGk zzh@k#5*ZgKHRpiPxhq6WPy4PCQn#48LK$h4(tx6{aPF!0@_ONu8`*^kt_D+ye^(Vm zIwr(aBFO=*(?FpB1qe9f%=q-gZs3Y=p~HMk9i;>suz!MQtiM>%l9>Ab_q>b5MSgL3 z3-%gy8#lhVCO2m0iyH`dd|)cSt$dW2QTU!dJ@w?1D>^4`Mpd z3>HNaX&E}BRI@T4>L!N^7^+PROu#5~Sr6EOO2c7y>DOu+0(CIx(B7S!I z2rOX+l?gGGtpGslVhqgednEfPTS5r;?o};OeI3w4i*qH7gHGpKu+`@z}WjlXvL#c z9A(r9UzveUCFm@m(K?F|7l+{qrVwk3M8&Zav57FiLSYs>HvuDq&Nn1l>z#5$ti8 z>T$(~4t5=NOywCzv10WurnVw84paB<+xLO@zQ^k^yw5!7sw=O+W?oknbZz@Pj--#Y zE6&-qb0_Q5l;WO#{G%TctraweD_DiK*TYmd?&y>rrpAH8_W25@v!^UV3~%JAFqK?u zbT$gns60=hNn1=sgdE~+Wh8?z-E7KSr8J}{gwPCSe-mhlAV6Z!l@d)tLAg;M=@?&~ z30ekIqcN%k7qLAeN1qf0Y=oT^GcPxkqtE5aJ5hmoHag?g8tq-VC`GrHN^_Nfd+kUi zxu{}Bo9H(}Ay=!KMnq_ZC9wMBn94FU&zVt1FW4L%Ozqlb$k$>j4(r9KSwrcM`cCS0 z4~(VVwvmS{*nYRJsyKdi2A_5Aj^rdv>PAEv!c;m1R^@i=RcaYMQhSM{1kR)HhKvLu$Bn=#pa;d{thFYqM;B(-Cx-WetI>6y>t z)l+i{0b3l(tx6<&RHX9nfY!)X==(U zCnT1{)b5qYYAFQkEehn#>G-d)?J7iEi9?-z4I5x!I<`l*qSAs&fOev^qp5hqHBWG! znUrc8eUj)!&wH*}QIb-sl5{Sr6}YH4sW(nt%SJu#SQM|Z_sDWsH_rTH5>8mqoB@=jJzYlpH{r`)@?s}7R2C^XbLRgj#hOu>q$FxL$^R&g zJ3m5LRw^TfN=fS->#S|K&=4zsD^?z+;)lYig0(r@+FW~NVBO$A!s)WzAE)rAG-`?WJY zvHD development_environment + development_gitpod development_workflow development_advanced_debugging ../benchmarking @@ -302,6 +303,7 @@ The rest of the story Git Basics development_environment + development_gitpod development_workflow development_advanced_debugging reviewer_guidelines From de084798dd7cf4646d9b17d5302b0dba93bbbc4b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 13:56:01 +0200 Subject: [PATCH 0968/1270] MAINT: Add `void.__getitem__` and `__setitem__` --- numpy/__init__.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 08050a524679..b9909b2f05e0 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2963,6 +2963,8 @@ class void(flexible): def setfield( self, val: ArrayLike, dtype: DTypeLike, offset: int = ... ) -> None: ... + def __getitem__(self, key: SupportsIndex) -> Any: ... + def __setitem__(self, key: SupportsIndex, value: ArrayLike) -> None: ... void0 = void From b24aab0f7495bcd482c77f4ab644d3f1eb1c571f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 13:56:26 +0200 Subject: [PATCH 0969/1270] ENH: Add improved placeholder annotations for `np.emath` --- numpy/emath.pyi | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/emath.pyi b/numpy/emath.pyi index 5aae84b6c1c6..d0d4af41eb0c 100644 --- a/numpy/emath.pyi +++ b/numpy/emath.pyi @@ -1,13 +1,13 @@ -from typing import Any, List +from typing import List __all__: List[str] -sqrt: Any -log: Any -log2: Any -logn: Any -log10: Any -power: Any -arccos: Any -arcsin: Any -arctanh: Any +def sqrt(x): ... +def log(x): ... +def log10(x): ... +def logn(n, x): ... +def log2(x): ... +def power(x, p): ... +def arccos(x): ... +def arcsin(x): ... +def arctanh(x): ... From a4ae295828c73480882a0385822a20a296393a30 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 13:56:41 +0200 Subject: [PATCH 0970/1270] ENH: Add improved placeholder annotations for `np.ctypeslib` --- numpy/ctypeslib.pyi | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi index 125c20f8975a..689ea416408b 100644 --- a/numpy/ctypeslib.pyi +++ b/numpy/ctypeslib.pyi @@ -1,10 +1,14 @@ -from typing import Any, List +from typing import List, Type +from ctypes import _SimpleCData __all__: List[str] -load_library: Any -ndpointer: Any -c_intp: Any -as_ctypes: Any -as_array: Any -as_ctypes_type: Any +# TODO: Update the `npt.mypy_plugin` such that it substitutes `c_intp` for +# a specific `_SimpleCData[int]` subclass (e.g. `ctypes.c_long`) +c_intp: Type[_SimpleCData[int]] + +def load_library(libname, loader_path): ... +def ndpointer(dtype=..., ndim=..., shape=..., flags=...): ... +def as_ctypes(obj): ... +def as_array(obj, shape=...): ... +def as_ctypes_type(dtype): ... From 02555acfa39aad2802e98b5cd440d111279c5648 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 13:57:10 +0200 Subject: [PATCH 0971/1270] ENH: Add improved placeholder annotations for `np.char` --- numpy/__init__.pyi | 42 +++++++++++++++++- numpy/char.pyi | 107 +++++++++++++++++++++++---------------------- 2 files changed, 96 insertions(+), 53 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b9909b2f05e0..4b93ce0f398c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -408,7 +408,47 @@ class chararray(ndarray[_ShapeType, _DType_co]): strides: Any = ..., order: Any = ..., ) -> Any: ... - def __getattr__(self, key: str) -> Any: ... + def __array_finalize__(self, obj): ... + def argsort(self, axis=..., kind=..., order=...): ... + def capitalize(self): ... + def center(self, width, fillchar=...): ... + def count(self, sub, start=..., end=...): ... + def decode(self, encoding=..., errors=...): ... + def encode(self, encoding=..., errors=...): ... + def endswith(self, suffix, start=..., end=...): ... + def expandtabs(self, tabsize=...): ... + def find(self, sub, start=..., end=...): ... + def index(self, sub, start=..., end=...): ... + def isalnum(self): ... + def isalpha(self): ... + def isdigit(self): ... + def islower(self): ... + def isspace(self): ... + def istitle(self): ... + def isupper(self): ... + def join(self, seq): ... + def ljust(self, width, fillchar=...): ... + def lower(self): ... + def lstrip(self, chars=...): ... + def partition(self, sep): ... + def replace(self, old, new, count=...): ... + def rfind(self, sub, start=..., end=...): ... + def rindex(self, sub, start=..., end=...): ... + def rjust(self, width, fillchar=...): ... + def rpartition(self, sep): ... + def rsplit(self, sep=..., maxsplit=...): ... + def rstrip(self, chars=...): ... + def split(self, sep=..., maxsplit=...): ... + def splitlines(self, keepends=...): ... + def startswith(self, prefix, start=..., end=...): ... + def strip(self, chars=...): ... + def swapcase(self): ... + def title(self): ... + def translate(self, table, deletechars=...): ... + def upper(self): ... + def zfill(self, width): ... + def isnumeric(self): ... + def isdecimal(self): ... class finfo: def __new__(cls, dtype: Any) -> Any: ... diff --git a/numpy/char.pyi b/numpy/char.pyi index 0e3596bb278b..4904aa27a3e0 100644 --- a/numpy/char.pyi +++ b/numpy/char.pyi @@ -1,56 +1,59 @@ from typing import Any, List +from numpy import ( + chararray as chararray, +) + __all__: List[str] -equal: Any -not_equal: Any -greater_equal: Any -less_equal: Any -greater: Any -less: Any -str_len: Any -add: Any -multiply: Any -mod: Any -capitalize: Any -center: Any -count: Any -decode: Any -encode: Any -endswith: Any -expandtabs: Any -find: Any -index: Any -isalnum: Any -isalpha: Any -isdigit: Any -islower: Any -isspace: Any -istitle: Any -isupper: Any -join: Any -ljust: Any -lower: Any -lstrip: Any -partition: Any -replace: Any -rfind: Any -rindex: Any -rjust: Any -rpartition: Any -rsplit: Any -rstrip: Any -split: Any -splitlines: Any -startswith: Any -strip: Any -swapcase: Any -title: Any -translate: Any -upper: Any -zfill: Any -isnumeric: Any -isdecimal: Any -array: Any -asarray: Any -chararray: Any +def equal(x1, x2): ... +def not_equal(x1, x2): ... +def greater_equal(x1, x2): ... +def less_equal(x1, x2): ... +def greater(x1, x2): ... +def less(x1, x2): ... +def str_len(a): ... +def add(x1, x2): ... +def multiply(a, i): ... +def mod(a, values): ... +def capitalize(a): ... +def center(a, width, fillchar=...): ... +def count(a, sub, start=..., end=...): ... +def decode(a, encoding=..., errors=...): ... +def encode(a, encoding=..., errors=...): ... +def endswith(a, suffix, start=..., end=...): ... +def expandtabs(a, tabsize=...): ... +def find(a, sub, start=..., end=...): ... +def index(a, sub, start=..., end=...): ... +def isalnum(a): ... +def isalpha(a): ... +def isdigit(a): ... +def islower(a): ... +def isspace(a): ... +def istitle(a): ... +def isupper(a): ... +def join(sep, seq): ... +def ljust(a, width, fillchar=...): ... +def lower(a): ... +def lstrip(a, chars=...): ... +def partition(a, sep): ... +def replace(a, old, new, count=...): ... +def rfind(a, sub, start=..., end=...): ... +def rindex(a, sub, start=..., end=...): ... +def rjust(a, width, fillchar=...): ... +def rpartition(a, sep): ... +def rsplit(a, sep=..., maxsplit=...): ... +def rstrip(a, chars=...): ... +def split(a, sep=..., maxsplit=...): ... +def splitlines(a, keepends=...): ... +def startswith(a, prefix, start=..., end=...): ... +def strip(a, chars=...): ... +def swapcase(a): ... +def title(a): ... +def translate(a, table, deletechars=...): ... +def upper(a): ... +def zfill(a, width): ... +def isnumeric(a): ... +def isdecimal(a): ... +def array(obj, itemsize=..., copy=..., unicode=..., order=...): ... +def asarray(obj, itemsize=..., unicode=..., order=...): ... From 2946a212a456cc087c9baca090ed691aecaadfc9 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Fri, 23 Apr 2021 13:06:23 +0100 Subject: [PATCH 0972/1270] DOC: Add favicon (#18841) --- doc/source/_static/favicon/apple-touch-icon.png | Bin 0 -> 23752 bytes doc/source/_static/favicon/favicon-16x16.png | Bin 0 -> 843 bytes doc/source/_static/favicon/favicon-32x32.png | Bin 0 -> 2292 bytes doc/source/_static/favicon/favicon.ico | Bin 0 -> 15406 bytes doc/source/conf.py | 2 ++ 5 files changed, 2 insertions(+) create mode 100644 doc/source/_static/favicon/apple-touch-icon.png create mode 100644 doc/source/_static/favicon/favicon-16x16.png create mode 100644 doc/source/_static/favicon/favicon-32x32.png create mode 100644 doc/source/_static/favicon/favicon.ico diff --git a/doc/source/_static/favicon/apple-touch-icon.png b/doc/source/_static/favicon/apple-touch-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..e6cd574260aab3259b7f050552cf1d7789ae86f7 GIT binary patch literal 23752 zcmV)bK&iipP)%bMD)P1Q`* zX0~eoF#P9cX1u#ec@n8AV^1D4v!FBC*pU^>mI=p_Xvu;e6bT9>2m&OC_q);P$G!LW zZJpboL{S9M=msCM*i|Yz!R_vQfA{Ni&+nY?e4pS}yaWJm#TVV)@3`J&{>Fd!$Cj$9 z>g^8U`M~&(2fsgwo*sL9&-7RK+w~sYeQnhW^!7TBq9Yd15JjU9{ zz_VhfK$#3^vRUh9v$chMO@F%3*YkL=Rj2Tg{Tu;=z+FIZAp#!z^A~GsQ;Ajt&Nj!j zd+0bFV$lM_MhIP@%n>qY1CE6@92Jk_D}H?X#os-=u!W~l#z$TKZ253MPHzjKKio@p z6<&IdyPb_4PKPt*p{m}ks*_d(vJVUo8sI^Q+z+q`kZOPqMA9IZNS*=kmW2$8gy)z2 z?3n8{_{huz*n^<2yE}wvy!jP<-6R0$$KDn|mwlUV(n6GTK3zShVmtTen&8+**Fn1& zuZ@-+0CotZgTh?`bcmQLf^mQblsp7kfa4%9L7Yo~y=+5XhH)oCo15UO%%`h^*%MEG zGF#4hebnD`3jlqSQ26f0-lWgJ78#`A*fDctVV`NuPn(r(?X}g`?l+J|n$LCh*JQJ(tMR1Xao9w}TLk`ILUtNqCq(uE(hS<9h$Kq6 z9ukEpVrv=#4vWY+T3&)S1nqpb5H9$MaOuRt+i5)YlwZo{ew2^73+N5+K4s5$ z`r`Mx)w>Zhdkjpc5#9&LZh*~zGytrDND4$TA_MM*AFGv2G9)x&!xzX3i4}kg5SfF; zWg;A~@CJR~AIa9_Mqc`@$5z&|#9!U~x|D^x!7+CMeVxVW{?744JQc4Bv$1MArrI;M z(YR)>q3j27KpY$*q4{zZfTRe_2Qrj0Hz*{tB3LD5n&24`8MGmWY0PkFF>ScIlB2hh zPGhzI>qoNJ-#k{5hE?+33emU)Z@&xZYZviouk-oe>1;EO=`dlq*EptAi2V?>kZct} z14QbDthOSq5RM4Vy8CO5rJGn~LZdYTk~u&Y2rL+Ih9c9j90f5H7&BaO?4_U1{&F1u z;wk^5(6lNQ>zi2WQa|!TcHCMsq<~ng$@St0a;ccac83-R9O1nL9uVN3sPraC0Z~0! zDDS~kYx)&k^;MwfT2EDU2Pq^d7J>|*>4;n)c}idh0^(d4m|@@Z)9}1pdO9rBOx$hK zRCXgOx{G(ln?Lo}KX0g#WSis4c7fR?bKw{TRFV?@3O@`;| ze2`W}An+klfVGPRri_?LT22z2u;3*LaS4`}(=jvo^MUWreUvLAA2mSB(O#OiTg2*928bMnNF>$~;@0Hyk6-VPu{*E4akr zipaxa`G6Q{B~TBtW-X=gsHHaxZH9cHdT}$(Ytx@e*eJg)=Gz583PP+xut0JiVouvI ze3LeOC+`PCKfUn$MCED7HK5!qMmKx>rWd97hK~xM1--rIOMYjEcz6)NLq_CdBo0H+ z2rwy-Sea{hN4!m|BLH$BwU&JzV$OuLCrxNxw+_!uFW5}~Q;%pn{a3n%eQN-Hi+-K! zBCF@Pd!&6O*4(snuM_Z5G59qR+)v_O5k$lxfjD;rGRr)PX!8h!+N1%gtiEXwC#~3j zgm2zN!JqtL;f=Xp^={*>0d$!+Y0KW$bKHHjEACbM$qvuuVIl5=Bm0Ed1F(^l8WCz) zt_l#%oht)(<;9JXF1=XdV+cqHP;=_E0BH-EGX^s@#96}j7ldaR1k*2^&CcOhoO}Jz z0JNr7y}jmh*jMdYG$W7}#G4aPNRfs{r2h5p|1@S`wU#XcizxN)E7d z1croi-qMabt~(rrg-I)BY9Sjej3>j@u_qtZX3eiu%5m+(|DyuvYal{-?9X4Sj=QEd znTQ>vm50PNhXgnPVz&rVmW+$Q1I3H}y#rorX@#c2HmXbOhLBu>$UMj~5^sX?wq-Ch zyqX^x{QXbe=IX|+U$i?P&^IanRz7oU3u0CEg{HvRZ81+;X-&J9?L=g!fV(N&E<~${ zG~fDL^TBUaG804s5Ubi$0pzlXoCi4^hV~*Hne;8EY5n;Z4!ZMrTsnwFJ%1B_v&`e9qAeBp^BA$p|zq0^oogVSB|=+TZ?)sm@A#Mx-Q_oAf4180$(^pD*oBv{64}98mb@kmi>xp<(lT+oy zO}hhUr!mfU(smdqabF`h+ab`AswRO}M{eFr(+h%YRSV3C$P~#5iZ)WrI6{oU+Ofdc zu|n1z>-*ZnIw(-l>xsYkQ3_r|BdyoudbZJsQy*GCXKZZFw{|`TXZG214=hy_Kq@-J zjo*A{1GZ`)SuWC1E_-s<03Ldgn` z0(6XtcEI;}Cbo);0jCLbmOz;9I_U&Vxna5nUaZzJ(h8dWM(6W{hoEINXwDi@B;7+|aX>`&3%M6SE5RCp)Ijuy z$Eq83AXy|Btx}MYsAa9}IgSbBJPgiRgeO-WXK>p0Rx8^dP#*o(iG=Ce2fP)a;3!qKcnn2>(wI-Fq%2k0Z=~EGc(_-a(NPjpqm~gCJOnXjxb#`GD zXjtuk>h@GvZd-&t{1o_~T2WYEP z9y*UFRNV{R94)|d62@Gl#Yiajd?Cvr%k^4UBTb*!G2?KDu$od=+3(N~K4)7w0^CIS~WjM`Hrfkg21_ft! z!j*-Af$s$y?+w;ZOuLUCcfHBFR(hr_1{^TNL6UloEdsXcTqJ=;BS^By`~JXMM>)Md z8H)_RWdQR4W+^fSiz&hmA;kHD!Dx^*6P0aJ%TKaz8K93m{j>*c_mb81@wj*~HGg^>pF|rGrqOMmS)m8xty+ZFudds^gLNX1*-Jeb4vhy3MO6#RC;sY{nwrq9kEPI#P<9(}I$*eqmODjc2Y_ut zG*RFQ#EV35u6~uUe_aE#HhK!hMh~V1GNoljSUUxn%OFO5h8F{ypUiqPeJjLsZWf>g zJ;$A{t`<`_w(Qh2vj$c1y{p*_9W_p@$`p6E$YMjgG!py$)3voY&*Y}@5I#E@fAmB<{2zbhkOKX@^ybEOyXHG`9z!nLykW zbIe;pq$jZq!8C~}LulSQW(_Wbb|MHwCqE|q0%pe2)!Csx=v3l-8Q_XmTfX?ib20zY zQo_`r-352TahfAFB{41-jODE}>>6)&Ff+Y6^0mjcicr?;(Qmz+jBksjJUR`Y%XWYo zeYeAr4xPeTdphWhWW4}2H-(q77SY#0t`S>PszpLBfw-u0XCh|I26iH#Od11{_cy=keDd&bH&z9Hn@8E>7p|d?P(ZoAgRhwy2i_qBm;4wC{h^?h~WHOAsBsS=o2gW zhQ6ol;PJjYUNMl4JpF^Js;auWxR+>R71LozBGLb6;vvgMGa2AVwm82rqJFm@dSWn`6drb*$}x<7hguTeNZ^W)HRDqgA4H8a*YX$eUlu_1+PUi$SRhSpt)m+AB~( z6-KRuBca7aXk#<0-pV4z#4mAjpUf;&vO!;a+Lx9pYU#~QJ(!HqO;yDu6& z2cEpY%v$zlVT_2NRTFdT1x}+On+?P5j&biNcos{G8 zYph+~sFtl|RY-WsT60!JF4-VIRfxxyWJSB8PPTs{UD(W&&88xB_jiuhCsTFXl>~2$ z?1e?A7_A+%ji5f}8ltwT=53&REA!(`!(^Q|tVskYF@7mpa7K|tn>6PI94-X@g#yjl zzW?r#*{%A{8-7mDarX-c8}4yEd4Sg5V}RX=a%2_1K@)T)lGLbNlH%ztIp>YPBqUT$ zSY?H=D8jj@w5og&*fFsn(9->#3-Nq7QPofz+acDu zS4CMwa0s;b0j348np(uAnAyLtVo^zy*mzyG{)n})rl@>IozsJ4K+IF{mS`CY@^)qw zZ~XN49=g2s-~Pc*)Ea(kog6X3Pr;av3FKZB$HfvT){(UYcgqEED!KM|<<5#zWZG}C z%Eg4-S9Z}KHt=qAQ>cU-a)Bb_u;_>7>!D)@Gb^iuKmQ;9$;?LYFYE8uZWeob z+|S+nl>@G0?suRaK0H|LLP}IM(k4_{-*X4OX$|0-hA4|tpN_h=)1(Yq@(mO6w0O?s z#6mE=(LQEFK&!NDRZZRfBF=-f_F+Tb4`3ffQgVf7!N@S=tue8QqN=SZ%)u z7^uz#MHWCzLhLB4ydH#nD>N8g4*bin{`bGGIpEEI_4Js>c7AnRbt2axZt{?0@SuSl zBJBVE>eP|1qB;6x6p>Ys=RkQW0A6)yFU*_#$VT19 zjRAf9xEq^k-02|PWgMpymInZ;1VB{8r=U&<75vum{|!Iy_AJ@P-)ae87g2x=6~wfc zwdo=jY3L6QSXbr5wpoWe zl_L(w4jAsB6`i7K6Ns`BYs%$jZ0d|EI{1UZtC)90Af8iA5kMxR0S>`=SbK)Xp7X=P z^yR_v*@2!L6U#~gVAS=({9L?{sA`UfK}Tf2AUI?Qjn+}gND1z1f-U-)?^?#K5Axd= zmEX(ZR8LDMouV8rE0BoyLE%XN{e%n!Hkd#xHf;kxpBXC@;txOS_dQ-Yp=nc?P_hw@ zxgGoN@g(8K8D>)A#M%Yn4u|HDIJgf)r>dnw2YLxF(yXTFwnlp$cT|r3E-gW~DVjek zEulDtG#pVAXitYWJOi4Mf}fvrCA~P8Z7TFX^@#d8R+d9wz3|#ZX{_B*@4~dXCYp-Y z)I%qvJ4jJ6@Fo(C0;T{-YB%tQ5s~lA;D#ejEUtCo$#GPCOeT0 zWZ|Vs=xJT1Y1?+Tw%PI<;wtR+6YeLrd+4 z@7a2Q-#F4=TL&!)(?*h68IqYuL>w5fivQ@@B4ihe^$UZ%APuruHIfROmB~OBaOdaKnU!=Qn;Dov z4k`z{iMDttDaZQf-b2&9`^{Rf1}&~NI}JizNFAF}|B>w~vjs{G!D=IjF`7ELwHk8o z%4N|6BFvGxixwp*&48G-#p#PP7U58k#l>LPk@3FArF^SmF3sbnT!ZQYpG zid!5?$_ji3EQFLL8oLk#etKd-Z(R2(dZOQbX&tT)-VxdJoeS}UiJ2BSPOGPS*ABV} z*=&%GCww#jj!}i z9LZHO&Q%$pQEbQ7bnZ&jIyMkr-A+emLV- z()QMikO38$D1IpvAE)CB&#l2{8~bIG=Yjqb=yp9vcA5W_6Xkvc)GORk)TVMeo$9JJsdtqccln2s%H z)5~nlEnEun1K-%ViLeeD1wB1-wCfu(;lEv5C51L4W{2@`pAcPRqaoN@v?yo}89yw{K!7`QLkED!vHDKQ1T z$4vJ?i^!M%@RuLIvM?6b<`^vG=Q#b+e<)KIjg-TGd9td-HPvDYjV?_ayrA8+Ub}_e zVMK{E*tHfZ>Xf6a1ploF6>H_6P+g=V5J${8Ij@7K6uCspDHC!kXM!nsn0w*mb4%#y zDL=h9PPgNY6W%>-UVF@Sb@FBxM0v>JPKYU%)3ik`QAw>fiXagQKAN7Mp~^0ZObVcr z-eZKlC1<7)};){#30E=Y}qg=eK4oi_+C|+S}c@ zk}yqGj(qtK-}qgM)jfl2tcZ>&*+SvMN-{oO6|gXzuL_3V9t+X;xH6_n%<+o9m@dFc zF6?vb6JatDcUwb8w!1WkDB)qybV^k2gQ|eWsU0t>PBR?cyp*zee78-lRm`I1n?~gv zMY2Mwj)aOA_N#bdE*1{?i@}nuF+pKpEa?9~hwY}HLW)SOT72zcw{2o7mPoo)pp%Mw zv4aNo9zxlM(5I2Q$|fTz0Hd8e7JXXYTfk66n>UgtBqvleAQ=?PGjMn|3~>gYJ7z<7 zHMWq=ou5bq11M!s_Bvsq$C~a>|IAbur`UB;!OR?ddT` z+Q00?8VasSH@ej|p=Z+z3Guvm%%Lk^`oq`%0H8j4T1~OklNKXm3}rktcEZ|lsvw*l zJJql-^xW0WgiXZMVs)g()YjF-LTooQJ0Z41BC!@K8GT#yzb%#AR4o#9ze<^6Q9g4R zO-MwWJS7r^lN2+dk2w`*kxeuR8P#y)QSyRJD7QZI+DY2C2?j zqJlUPsb2RFEoAW#ej`GCB)(YGc2<3n$s&(ICTMWc(2i4_acemh1o`Rt6?eLSBAuyd zL=3=@AG}soGiz(zlxgv-(aCg;(ke>1-5TBZppA%1uu6K;dvV>?`dWIIvB0d>?*ZzX zPpSiMXn0xYB#nfZ^4V~FH5sm+7Sz2SwG`nyOS2VrX#+LA!C>Mq2BKVlqM- z$ba*P{r`vZmNmc9W{Hjt2)Rg+%Yc2G)(lt|=lpE`;;3y}tjjOi{soP0>rGm$1C-ug zr*pE}ZATPD*nZG*U#>5x9CP*UlOiAKSEynjtDFYn8RN z2-tHJ85A)Cp_Q|NZ!hI+xZ=bU>1m)a@Z^13cP{Y>bJaCm5ii7yNgmnfHmB`g$8irE z=z1B=llOs3E>6B9Mg{|G z2Zh1la#)xNy*;`3z>wdEXa;ffB9&_qPR}t}osB0=D(NC@vyLbw^`KaLpNQNOX>?GB zm!UEeQL}7qRib8y(CP+vhLH0@oVS8g00vM9Cj%$GxN5nQtu5qh`qewJ!Z}TR+8p_E zji4W1Q-jy$!ghl8pb@!WAiGJ<5Ox%BmgTGe`x`wWb)$xdmH0woQYczxnWa^)4jG1+ z3qUv~p-cv_^Q$&oFgBC!yF8OF>#+@>^N-KPcQ@zi9g}NuVfKh)I$@yqVos9@Wv_AMUXq=J*50)|4PgW)001BWNklqVebUn6 z2w-$3PYpLmwDALx*^p+=53-Br&t{iKP@%4!4r%QEty+@|n_}cnG0r~6HG62SQdE^M z(xemO(L7{P&NokTNoZy%oDgD!mX~OtUDXSj)xx58$!81p)*NuD|(HnYtOlRZodQFXU@no#Js@nH!Y}_;=3_HZ=5UuF~d5=J}o~=$* zNfFU`z#O9Uzz`(T+!*rVUvMR{P(WeHL187I$*=Z(ty`&|?~BMAozhiUjM_n;{g1CU z#!~TJ5WC;_=rRtpanw$zW;7c}q$H9iQ^R^~GjGYfo~BW{D+nUZ3w5_rwIXc|3onAv z)cO!D&t+2_!fbk_FEvxq4>!_G+5Q!;2IEzU6~CGiuY+N`A$AH(CmbF!lqQt~qhOIC zRTSrR0A6-PW^IVY9K)5oTgaRk$orciR@GU*#wJX)D`p$S?$et}+AcBXAQ;_4G9-*keLx=4KvKNjhPvxMH zIuWJXd+$oaQF+wVbVvG)V0uG9L(5D=SQJOpR&i9*RYue-gwwF*f{kQt{SoUsWB#e7 z>At=)8w9<*P8V8|?LnCG+?IN=K?7{qVqCYC+TjvptPL}cxU-?-%(*tdxRP;}F}k=? zyot(n-+Y>^KR8uC>e$(~gC^{7X!aBKporPdNG=Pl3S=6o6HBT>q&>>F7O?*a;-#VX`(Z zVWU_RhmpWVp-?Dbc`obBXD5cU1E&#|n|5Bke7ad}$#N&e>F~lRo!d__9gwJA-b}DT z)K9V~Ao;E|+@=AoV;fh#Sf9hi#Z_?2sz@aJIz-A_Dg&Sug`D+ALr0e9#mfexgZ{v= zfuIa=#HcyYjZd4P)vbxQ)-`yRo)nO=vCQghGM1}&v(ZbZb8^Vtvu!C6gG~lD-t36& zG(hLt5X0f(1~rl<(}-Lmi*RdEeyyNOG)YrzCl^H2Fo+`%oCWa?M9$bC7z&-(Viw_w zgIIQYV4yJYjiVb*daQfaMY#k~mxJh-`;M~0Hhr|OKl1czuAlT`?S)Jn4Y4?VHpRv5 z3?%nrJ1FZfBA*7+sW#Mvb}vGnX7IyRUr#w3CAD6Kv>CU58B=9`1Sgl+V(n+ z{vPdQ(qNBRM9q+5o)(p_RP$L)gRW!Bc^m1kMMHa?%L^qErxY!nP19)EOeQVEF)_|4 zgW&RN7+i+mHrto_Wkq@RR{l)8dV6C#(%YNes_Hhv-|33c>XvRmw?m>Tm*&YDnUx2$ zRyDdl*M3U?9r>thL9H0RBg4l?Py{Lo3$;$@2h2-0u!AAO%Z2&OOiQNQKX$Ba@$n*O zubi^mTK!TRJx3Py87;_bo7zcndt4V0;SquJDFbMWx|SM|qmp6N3@H}gN~?))ZTD9KnnT&I)jvl+z((KngOln9HnU8QDTA7W9ApNU#|O+omq;dO3_% zQa=4@lgxHF;q0ng2=g_`q_YQ*gGRJmYYqe0D}uVBzTsMvEdp5)$TyHu-hv2yohwvT zqO}HxD!z{jIRue&mS)gM;d~+B#PFHf>7nPobk(HCrk3i42j8HAA&39st*XW_Un6le zRXJ|Es?!%+)YOQuS0J5HREY9piQPNG)=DSv_hPAqqG1m3mVx`l!c3; zb2G_=QH$zLRExGGLa(z5R%=bg&X;!XqM5K!os?$CHv;kBTFCSCD}Qo}X7R3^WKWOz z?7@H9Vi>f0F83SYAu;HpN@amsf~%IOd7v61QApUT^4sRXQt{EEVw91_Z8V!FqHX9L z4Ohgj+NQ9+uR+9HXg01 zAZcB<({r3dfZR*%NP^0{Xc9{OO%X5L+eRZKfVFRmgE#5(bjAtK^*#CV z$*uol!I7tbSyf*X)OfB_4>*mER*9l0ta&o_!C?Q|X77qqKt)sQw+zs0)e)WG&4`GS zRlaY-@CVZ?GORW2{%3BoWx>&2=c~WDy^3z4!N%ngIObCVT+y`9-E-{2nqF)Ih|`w3 z_V&C$erb*WMwsKp%!EJt(!VZ~O?jg)@98nWvH#0$H7<4;*U{3O(p5E(v{2Y6kx!1K z)RloEj#=}4tYmwA%K&}#b(1nl@*l+VS%Ez@T?oc+dg{?j=dtulRkiV&I>f^VjKw3- zaBt-2jcrDxhEzWk6DO@c@@~xp$0pFYnM1YlLcywu8emW-t+c3~2xXWIFN8XklMl|? ztTX=;9f8_H50oo{_R0x?ad(@DbBzOHc(2eLAXvNR%5a@((PpmlgU6N$=t_P=QSn+i zvabRB1<6-)f%C>s|F4^&JaqN0g@UJ_al3|60Xa&Cr~D!H(t;i9Z{Smi$zxSu-pFJ_ZE zE#(Ex+XZMX)y+lLzK|CJhX1uNn|`bByE8se4jZnV`VoMqU-Ldyovn_+)Fw=P8;#d$ z9NUhN9WZ8x!0rajjy1laCMqLRo zDIQXc#!)Fw8(8XNqSGOoBcIonH?4_8B=ZuS4?Jf%JsoEHCyv}wqxmoX)h{|E9{+tq zehtMDyXaNQ)KSR`apk}daLTu$^}X4%AgtAHBA~{Nzkszrv~XTo&1Wz7ON{XpkT&;`8tD0S%JTb2Fn>AYR5E`AR(I$)%-KzmstBX~+-&M~0DB7^%5>+P4 zs?HuJNjo7lV^pUB%Q5=ySi(=wuFdIfp>FkhPr~>A{}s@V=lxF*`I=k>=yI-iv+uc0 z5jyHy7YqKd0}%5vEMK*dlOri_U`yzSn_Y?bj*lF>_Uwkc!?t^-W;IdkI&BPs4p^s6 zr}D1gBM0brpB9V;L&vl&PC-A(zPWM#V;d>J-or?T=f|3Iqn{5Ux_)1WU@Y1TZOpPIia^5f)2;p12E=3 zo$Quq7?w>UD#K8*X3v$Jc_Gq97PUjeX$l7!VANW3aW!0?uU?2{o{PKLt%}IqJ;z;h zAhsjnCU>uS8LI4p*f4nOqO|+F;p~R5I~dUTz{JW%7qgkauXPUtDxcco3?Se4 zZTM`##4nGXy0~z&h)bI}$Lk&&O|9=~NyKfdG0t8X=a3QZ1I>;|QW&T+Kn-mO{O}XmvZlP>cNUZ{_P)`~fbEW} zPQGTpfSn?dZL^x#D?v!zz{X+Z`Ne|s!xw+|us$)iBtmb#K)KO3N5+UZhd#RTSHJ%V zb#n!HN(^3Oz*n-q|MCm}&#zBy_H#-)zWccQXiMCSr}CW6wAg4s;MkM;qbhG+3&5W5 z98c^?)NhM4=alh)7PoTOb^#<)~^> zGq7(4^u{mdGLwUqNca{PMR2s&?76SS+tbzJI&LP;AWX!N*yUQ4K{hIib69};^$D}K zA6NxA4#*jZ{o%9|eE-$o`GgKgmjURT0pAav#vKG`F0Dzbo+HeUvzgUrfBNmmMmJi# z&Huf}eDYs>zPTzHZ__-W5Mn7X>18Y;*Z1AWwc%49sE_tKM~?2RuFDpx<0-F|p;4ip zNU%klGa_m_8u9YHkHnYQ}u2vJ-Jd?*^DOg z2i8Od(z{N(twF9f;j#(vT3pNB5bT1(?Q1d-I$qYQ%2g^f6g5ML^wt4-^LMH^!?C&d z-F`r?oivMn&?>DuZh;>F_OCOqE}!e$l;l_m7vFu{ed0(>rxEv%&^pF&%2>{XCb2j+ zc{%MRn+iqYoN_vrs(C{kJ$B3tFYGg|-fF$Xs~dj_K+J;S48zfM9C8E#1_gYhN zx-p>FIm6on=wf8tI6~{F_$I@CId`HsG89d?D0p8c@VvG5WH#@f?Q1$bUik*7cvjv2 z!-Yh$ZQ*gp;;S@rPf-mN2~wxksb-p|Ey9!ez#qv3CjH9f#muH<&fZ-BN_S76GqM|Q zQz3}g)HHZ8*Q<%yuoiF}-vrCdxyv@;=QAi2;w_K*ecd{bS%yTJw&HufDV>cU_cS8c z#ga`iV-LYb+79dW5 zs+`H(WFbt<&Cbf=$=qPB(#@kzt?%w!N%*lA8JV=XiUlKkd!0R*U-PnDva*zRc>n>z}wb_17dl$P%wilQ@Mo`$HoffF#@k#GzHy#4!ZUKuFx!~r^oEs zy*qX&wZDd_?sU}8#z57a4n_)7MU6t$p{|PLd|{(GNu841h$!@71+?BfZc;gEi=eyC z#}rkWvQ!6A%Ij9}pTc~2e0CK}{m=Y;uF{tHIuTkQ#qWCwIB%g(3Jh9{a|&wEOy>Oj zB3-*=GO?9~;^^3h)?!<@U=h#cv7KJhtEo>Vkgap!)ma<1M&g!gGXZo`Tk~R6mERM> zyIPFp%DMci0(#3*jxs7N>)mH+TfQY zUSFEe4U}z-v37!`13Gf2R3!%OWU4<%0g+s+jxym4LkwElL56lA44t&^F0B@8*&kFn z-PGD#^PuSb<8!g5M#Pi;LRCEFRJ%p1h)#jrW5pa2;vm)fqNv8|38)%-@0H%l9owr)GS&Ni4fzy`!(W-%?)DBG;;SE+!0H{&g8`K>AI+m)gyWn=+2NywyF zt<$RckTaGK^J6n}v8j`n=JOSAVx!Goi+3>5+!AwqwZ+>(L2-fxw!P?bptE{V;X>83 z&=IjMn+mxL=&~WUChB#~`=dxLP_5|&6(AJIf63*7@Bd_C_;Q&|kLVl91a#ym9Lbz9 zGU@bf(a}kL_AW+(E$WFv83=R1`Ox>$nL;`{HL+Y6s$L07K3`IfJoAhT9P#Q~Qb`Fi zsTj>3bq5gR+(YsZg!|XL9+RT;jtb(dOf)yjjZ3{vcL81MN|kur6((M!k*6|N&1M08z0WEB*hJ-om25u`5p}K7|1O%y++N0mz@IdZGhWEe zPrdN%&uz$m(C_K~_VMaiOI3>jwn^yi5My?V;dT-3gjL-=yB%V50~=rSQNL*tzIOp# zB2Y^`?1}&+)o+BHCFJ`Sc#iqtOgf$)f9a3P6Gqu8Ko>pWqo}P_j)Jn%2xQi1pMzIa zU%kyJIO6u2hptrBIoKs55h1n4Vbl|_p@ zsN|gbyYw3|x0XJ-;T?WAk;S5geMv{P{2+LvV3Ke2{hRyClCG*8(AVG5Mk4SLxYf9b zs)cdMigrBBkDmLZ2Y*q@wSM+{zuf1A=93QPlabz@$OA%Fsrsfs8G5&%WxIBwA38v* zl(R&3Dbuign^5~c-kE?_rFsBops-{$ z=5o9jk&6<}Ma_5hYv(1xXc6H}1i|Zp<;ET>rQDdUK8B2}b!SDOOuV04i@#Z(%bd)) ziQHy8h&Kz+S34WLcm4cn2>jV|fAq2Mm9i$E?|t>57~HQKW1gU;PMfRGVUat1E5mb< zLwhF!TAzfvDPN7W5~Il;&OnO?#telvJX`RCvo76*TRO2)%1zc|)(k0gQ0FJhmqRh% zv%zYACKt}V_@^%{ZKiv8vjBbF&7rC2}g{8i?&gSvVQwsifUw1m7wPQC+ zX!4IYCBP3dxuu`>{rksN{IR6bsk?yQ=x12``|Ze~*ZjiOXG>`h$_tOI11T~J%L_E- zG%Zir&_Cz9!D6Oh(sPYX`Jq?G!u34Q?`S}yXkV6ra9Pb2X>p9J!Lw(b*@dA$`J#G! zZwTtUfWC79y>@@IT7FZuBZ)CeR016ZF%}w(BIk_dg88XrTSxY}FCEY{8ZRk!kHN9cD;Z(4YhNOV$z zmFo7XOv03U%3HEOusr3^UJk)o>zL)tSfOy@#8{!!L5nf~UD_{xtB)da0mKgj+82Z} zIGSC$IPmW`R`uB|pey;gzMDh;+eeO-k`R5AsvK*8)(ES*9~tHBsKl~`oiuPRTiQ`I z4$T|qd_KRl@Zw5xr5u-m4+Ws3XAG%|Lj43@4{UgXEBRkO|7VX)Z}h|YAfQFX&0DVJ zLvQpsrTqQwrm(0!dOd&YC?cy29Ari(-wAt`blw5&s1c0&;Ec{&P6C;e!}Iy|a^#Kf zLm7asB!+EtI_v+P;4&%7bQ=ck^L~*3QT~nmNME0Ne6OENZ#F`2c!oFPcHgaXTn*@= zCU{Y^MNyFy`{On5;XKSyf@mAx|LF3rJT^Z$50I=nbxX@ zCH@W!|7$F822Tx6WN)O2x8;DoQA&Cj(C-1d$hDXwF=K?oA!JB^QNp>*(4V4l7yQ61 zhOT7#T4r*1{Bb3HRs1R&(CY=Nn-#x{Y`mzGzY5Fm)ADa@fp2Bg&RpMjA68e?>+XI( z6o3}<1o7eR6n>`n+iwWhATR=6zPX(brQm``F*PdQPcel=x zmS=a}L(|##TqNI6T~%~1bLT^5k|D1F^i7u2t@!sB5e{;&=4A+f9$M3%7k}WVslUIl zp4j(?0?;4w)_3=JUfq_ejvWR0+Yw99k&Cocr`pSE&s@mcDL-bWOg31sb~RIpifZ@q z=pmLT`l4_YCf>GSaA)!vu_O#wv;8w%9#9C$wiR zRG!vTQBi24pZ4Gmadl$A$;} zfi8qNdepvORHr2KCqWP8NPE8yH+VHQsEUVD@42?a!Z{4XgJO*G)0b%B-&89@Xcb7Jq{Uw6ptWX;4~P%JOE%zNg*Nx>rLo$T zfn)b;U;llJw^e{HB_iHwWxG<&`N}d^5}~zKa^&y(ld)_v;rR2+)+X~9hu?o)-*IE97paI1`oqA2Lx)Gs(RKcb0+HBa~8sr24?oA0y>(zqnr{z%QXju zxK~7FxVGj+u%99SmZAK}_c1yI($#a! zYuLFxo~nw~OTb3YO*C5*Y7pEZz#c~8>^f_yE@hFXwYhb=9GV(H-m#DuK#Yj7pA^bJ zQMLFjx&ESu-yjjbNXmNnrf;NL9!;6jMWKkGhw$mVh)6`_lU@TA$CTVdXZ}C2Vb`k(5n9|YF7VUYs~Ag z_A?IgB?t}xqGs_`6$3iz9%4pB&eHM+l8-$*k;_f=KXXLCJIbX$6o6i@4!xNqxGYq? z_oiaInp2EAv2qH6pJZ0><5En6HvQE-J?7D_-;dX?T&PAe*+CIaCH5LIdj!~7w9R7! zMItjMK&k*0XTRNmws_H!f0_%Om*Q^tDI@ZE5jhNEF95Yet7xn_5=|E=P-So}zb6I$ z&3q<*VQG16<%*8ShXT;Ip$@J3+){L@)v8gkMICW0~CBy)EfgxW2?5UYtF!JiRi)J@l3+Nk%%n#G1kU&1F-9qL; zDi2wCGHUQS8Za0Q`;;0d0010!Nkl{ zxz)t6OJH_^>@ddcAlWWt8$_BZXjD01L2*#Hl-F$m^wctcR99o_69{(0@g6teSJ&K^ zR82l=wv=-6*B*0a@>+>YfV>W(pWrWYt9b3F-~L!+K(w`hzVTV!seq2$5+rg9z`Ol!t4y-FLA4U z3Zq3_OF33&m%rcl-R{DXulFaK>*RP~QrLXIfz4*4HA!ETFfN!S@vH@V zfA~iqd+jcu*Wg@~2c8mO0u}=n_HA*T0oq`EHNb?Qt=(q$3Z3IH(m1HEn`WT?Ld-Bk%Avy$}i;>Ui_awwODp*dhM1S zdHOZ4F5$MuQ)m?{ZAQXYTIqnuK8e&*xdX;*6SAiKyL|n3-UdM1KV`nI2xw3~`@g)} z=*HsPjBAfLg0G0kZ=&d%SNSkX59A<(r$I9u(##{sE{BduFBZ5O=BBg#6V(OuRL(#VVuo1rNddpZ;jwdjakstLPT5CmE6lx@Vs?Cg&S2NuHF>BAJE0~ z{LH_6c}L8v`h*dF%771oI1I=&&6bV6<^9*Z#>C5t?&t+?g`xeMLV?f42 zqYJnLB6RWeKKHNB*LnW@KF4*tRLma06F}6rFZ)-$W^19`YgO7GB;_A0!k>wldj;~C z{QvD;Yiw25nO$q|bD#G06NAC#{Rofn2!`-VTBjuBMO)2uOlK-pOk5l9`09AL-3xex;0Q*{sTR98x zWd}KIVHP;bYQim=F@O$83pEKs7m%wfU6zL3AScDiJ5Kl~aq_zO(w**)51_kyKR>zYfL*(8t<6-O<;mWJAJ#>=0%-Gj5~|R3+pcK?Xia{m(}>tlS>0txs1OAGIdpC45TzImFH$-*>-~!`$?m zDhqw9&(;5*Mt}~c!(HEerL;^+cNk(1C>trVia^~&*~7r@gmg$mTfYP zgpJ=iWQSKIk~MxjB~gDFDC=QtC4@&VUXH&SrZ{O9&{uu+R($nmvE`B4)z*{EBp;yg zVF2q2?L8FHLz7%qV3TIV!DT2x%Z`SVRG25^S0jv_$c@Rjmcv@}F)d&Zq5}e*^9AHm z)bERP=U~j~^l0|fS7QevwZ1{r9gwcWXtjSwLTnk){K7_kuNZ647)Wg z`Cxp0eJoiKkJmDSHGtf2z-5}%Lr&PUL^VVl`!4 z;aU4Ae76&Z_mNmqXj*)nk~dI=g`!wHAM!6*z1ikY*5!EUg`<@vHoM3g&UN6h@zW!t zE_!M7^2NT^^zgvR-(Jq*$NTb*B{%GUJ`&&d(Rc~_Dg1VZAdJORSqLEh)kv)J2EdGhoCvg00ve$RMYehkWM&eXHMuEB`-@0S)Z> z!iiKx*qFC`Frh-%8X}6{dg25Wiex_ zXk`r?Hql}gMOJ{EParCyh5?qKy;&Xdy$$HGwA6=xzw-jLZf?Vo$m0!|p6j));_%# zUo-65e zkvV=Q<(0=DqUEPS*-ElXed!77JjQPizZT)mK**8BaXEwy`&+!!#(|h$mP$njjH!2`alY|sPR6~M&|SRafDXCf#c?^pA*K)?I`=xwX2)}T zVihdeNY8(a;1&X!Y*E&wg3{vQmuqq(ZwcrS;fsdnB~n@0l5|6u6Z2#hBW9TiNO=PG z&01<53Q!>oD6*8mupsy1t^@SmzyISu04Nutfxu?X9x2mDzKM)0hVlxH!9*P^p`g-u z(0lxWvL)7HGYvNb*hvI`YnOOliIz~Go9Ccg0(zq7Hj7z{V_s^-<#?$_mr0Sbgng|U9p%{jjh4TLL509>2W1-sb4b`i4_}2_ z=$w^)_^ThQE{!DW4ZRJpcv@LZq4m}1DH7SKBmoJ&-rrz)F~1vbL< zglq<7Sx^gDL%3Qf>zD)bT#i@16*8eb@10SA77PmU2F3jWVmjHd|41^PEw#zy zP|AyCDhy*wVPrLcH3T<++Lo@I8Des^EHGjG>*4`A*cI;i#v3(O{03wA03q8!Y$eTH zRrCr5(<;jjbFe-P?vY&_@VB3=e+rr zTn#V@_v$oQauC|BZ`%wD+vGv;?RtLoB5}KWY1jRytNr9p*O)AxPO*LH60X_C^jn5p6msFQM1*hKaBNu?4i`hjV( z2L5_L-%blX+1cI%Oa)r#V$5Vr#3-w+X#hq*To!=>woAxXI-GHkcYJa0j2f@=^k84l zna@8ma6^;Yc((M=E^d%9Vr88r1k;y-}?=xHUpkPr6zNQ&Vc8$JT#s(}C#H zg-f=;i#$kTD*){jUwzZ;;bcv1p5tdNGz0PaJeTj<^N9M$dfXGF}qzWA*$ zrWILqkBquLSh?7n?HNA$**(J`mCQZNrT+J~xTEQdv5MO6QhHu3Jh`868fH>gM7ina zARw=nSWp}z!B57k&bjaAomiP@YFg(}U%xcrMVA?R+X&eW$hKTnS#WACx}^@YLcw8b zl2MMLxE#eWhr#5pP()mFDEx~p9k-&bDYU0>%)QX&1|P~RE=+@>D#X14(+fz~WT76AcE zdnKSn`8-^7KtuS@mtL+&mXuX^^tT)0lLQ|TLTLbEoInjAEFcRVcSitS=%gpO|8_?? z#sHlGf%Rm$7KRzh{UTgQinheR0cZZzPq+W<=l}dvx_E0ZMAM7r<4f<`>X7}kp?yL` zrI%TrPt3en^O^;8IG1BQMr}#+>@icmA^jXwJU@ zbRu{X*!7h^E=a{>y)~wR#4Z)RQdXqk%UkS3ONg(?ETFFk^wr_FASt44_{4|+-2~bl zcml+mzLR(1q!pu=yDr3%L!GUe%)F8}vn|i=|3E}F#1>3Nk6yER=u5lH)yH*q)F191 z>3ijS52=J$zGNvke2+)>A1OB}%(vFAHo%hsKOQhoL?TKi^P#g&nDr=V%q*Zk1nAt3 zI{}7~`_r#c3liNDFsY(Ve0fite>!bUs~fgGhu;6Mel+KA{EA_ruLHq&*0HlMBdssK zGzv7Uk5u$aKL=|OFDWfsNZL;k^5-H-P9sT@q18x>HL+PhPh_D3Nx2>fyFob(k>if$ zUFZCE_~^)v%0+=cc(KY3r&6aTbN`?mPmQ@-=NFAjLU zEzfQ#RH^oZ70@-mD*(Og3oob2s-v3?y)6K?6I7pCJ6mq+HLWak__OSFYm_&j$S zN-=K^J82t^jC8%WZ>RdvQ+nOfLQg9|-*73!G=BZ{vZ$A-jG9;@=$?jPD`--Llut#S z$3#=PR{_242fr(;j3u^uF5BQ}G-ci8M7kVYXF8kh8W87msQ_)B|3+O8=pdI?jhcU;qUKLGt#KmmSp{~P&=;-j# z+?>DWOCA8as{oDWX7kjgz3ap?TP>+g02>Kxx?Nr3_o{_fd7}+KIGU{TMiWR6WCt>- z!D1fvCWGs%xg3fgD4xtriZhmv{mmce+c*|mVv86&W`&OyswEzFYWFOlb%Dlc%5eO+ z6#JkTcKvl+4$w2hQ4aN98u3Wf#4BUIT@NEq81M-Jt4LA$*a`VO3J3^XGpBnR(4lr` z>IdCjE=Ta?JYbi6Zfj)qoZeN|Xsedd##-fmt1Et;$F&=a_E8k-2?-XE@Dr^a)nxNK8PYFrQOayeU<5aW~GDBiegad>-1H+$O&bS{R`plLK z;q7YSz%BF%cP*gzeD;kJuSV*GZn-hoP2g#jVz_BjZTOv=aO6w`G$6hJ7oj4bTDnP| z65pMG7ir0sO3O=+Z&cok8FSsWfL1<_6;D1GSzK3D5vzziMRK2zTEtW?5aon3IaY?^ zb+}A-K<6so#3$%Gk*py{M1szc^8&O{cp?k;rX$|Y!k+(4&BjhV!YQf4x%wMYs8zoD z5O@~@Iyicp#->Bb_~P>QB(_*`Js|q;l>#P;-QhAL04*2*(GNmk)J!3!0j)>(!eK z4hy%qllio3$KL&b)~f&KUQKw}cxlqQh1QyNG}P{vMKq%QkF_G0Z%9p%7j#WGK#$cS ztLAyX1gt_l54twz+<7?D>foP~tZB`bWcz;I{dzy1eYR+oZv`!60XN?hfDTxr8V}iB z%OZ)&Xla>?XKLU@*3w`ti2EV3kzidwRjWEKdFmqGNkEVF=>@&^!k|x2!|Rt|cwQXZ zXymB){xL!HJ%~G>PN#cDvZI;K!T!w2+9$Kv$GinT3&3B|Z@otVJ-%+jwQG+>=04b+ zjL#`k2B^ixa1{k>VXy?kxk3%?p+x!o#=Bm^uKFDYv`Rk@3orz-7myyc$a>g=Lz_>Y zqCa{j3)|V-?=DWJPcO{--$^&)!w1YT^PL7`inV0p)_Q&-o*GTY;xbpV*?LP^F2-&m zunuraDXa{jN~|9Gx}$(rFq2&bRBZl)6Ze*Y?E3?dyZrk>Zx}^vbDE-EDB5H1bqjsfLgvV8o~(Vqb6)>K8YpeA zMnpCfxF0}DfRrKPP7t3+Jzy#TJw}F{C4*+uG@PVi*e`+(knI9FAx4fN=q+?B8os zeFL#XZF#~MSM9~QfiN3LSuKWZD9|upB&S0oK^k4091^(i{`A{hUJQeQiMqZ6iX2Ud zXd(p63ZR^CT@ZGN$h%_srV}}l^`+gKbZ4|LI&!Kqo(-(>z9v^(UpdTM0OB=FNcTP5MNPGJkdo zUAr-8%L>n7R9Z*U>5hvSx%m3~|633#OCC_1_P5LeI?NU|9k4s<>!WtWk4L>kl^3z| zJWsU)vD2t>mFjD&l9=!{C+=T<_U+C85$3Uvy>NI_)bl=0Qnk!$RjXToOF>3Y@VRLxd+L3ja{6tB*_59hziIptX*7^GBC9?+4TmZk&s0yhbK7+>O@bGAlXY9a5W(RwEtJ5dH z_JUHX6_@Zb%p_;nm{~vYvXzmj+-K+w`|06#e(_H`&V+evK798N-#z#H&OL_+2LY(D z+}!pbKhqnr?gcm*=QNW$nEE-)@oAgsexeV4sBubv(0}6eeNpmLQk@e0g{wi}9zS*9 z_Af}V0mEw%z$-F?yNUzWYSO3I9goz4NWM85j5_Cb1RLu!Cfc;s8j}XJB@mCCy0|BY zm+AQEtn9xi9<#(KAnTDOtk%V6b0hIjcyi7>Q13|@YmSrbhC~s8gN`knPd?AR6NQ(m zQGOPS*Sn3iV-n7wSJsMxZKX)^0*Q-r8*yvM)d2DGpxNW;9(Q^=zIGLYk$Dtjk? z${1r`^KukDiDubI*@jiLdbKmrapPZw4`l4S^ZVjIHoW8=q=p;&CvvSZvn>bU9#9z% zu=xvXPWX6!Bzf2q6ml9;^?x%HNRH;CT{rQNqyTrA4U);K>hF9zk}8ERq^?;K{{eX> ViPp_wsv-aY002ovPDHLkV1h9Vi30!t literal 0 HcmV?d00001 diff --git a/doc/source/_static/favicon/favicon-32x32.png b/doc/source/_static/favicon/favicon-32x32.png new file mode 100644 index 0000000000000000000000000000000000000000..cc06622fa04b4ba2014d34ce74a783314bfb93c5 GIT binary patch literal 2292 zcmV&f_rV1pI3_+4C-vR-LZgB&Ff|u62x_!#6rv3_rmZ#6m`OCG?M&iLHJyp( zY1EEprc`2`@zOCh;h`9XKGae{AT=}{d@zburHUx8%e`l>&OQ&g7clz=bGT>ky}tde zZ+&YY0ssQQ)?f91?;wC!z3eyU@2h&m?a^`p)^xqA;K+*9#{RRSw^I9Fow9}0DawwC42zBC|peHf+znYSkS{N)bySm93#XM9o{CtPxg zMp#dv8ANcJ0W2I0$;4MA%U$8F;J&g|b8afefbR?V)5G^W<$w(M@-M}BJS}O0@RHh} zrQF#fx{sgEPVMyNm&dr{;^#RD8^Aml1UHx(nR!nr*qLtx4g|DJIWb8S^a24RrIfo{ zM9^5@PXm$2G74(rgu8o*M$UBv%rsA_&dKToRk}_)J8#`NT(K^_%~viRu{Uc{iL`{S zaOa`pD`(#@1CO8K5aLCEmJ%V(UQbMC!V@`Ra4rHxGIkdQD#Ylo9?yd3KB1Z z;3bejy-l%AGy`E~t_$h3p=`ypQ<0~cAE+7{L)trFtRUi8Yik7_W}c1R!eFioOMbt( z6@lut$st2aY56B&+_8gmG@-30@Du_WC$~YEB?`qteOTf>3I#tceP;GuZ(+bSg1xSa zoWwS4W|8&nswqNqs0vvIU|2t)GKU!SPo41LWp*B}&q?g21)I*!4|j%Yj_;avEBW)P z*pVF~O9)y|Knei23M`rH81!C8^PSHs)}^&69e4%}O%p6OFytfx=ZAGy!O<1py8^#o zJTWPAg3IChg(_7LjV7Q=GXGUdeAwC9T3)d_t;<(l?(p0g@-14NS921;gOc(44mkf% zu|4tn#2qE>@uNo1brP1dPG58gM@dC?Vp~>8gPZT&&2R|EOJF(It^_vR3jbx`KzBFx z9&0#!L3x2PE2oE+NS*6~|G9U%;?1zBC;OtAsTD5#5bxb;jTY*2du z>;j|81OTM4kuYT^;!0PCoPBI((zPC?GAQFuwO&ovUl1g+K^Q)e5s~SL`FpgTd1nhi_@UIPkXYMQ zeEI8I6lUwj?BxM%!BZ2aID}YB;AJ2(dIbZOMcgpQ1kln1(C`TJ2KN9kMHuPqkc_6z z5Y0aE{?x1L-Qw5I77!@c%1N7){8>rk`Bg}$=YGJY9tL#gkN^se z6o8P@w<$0)>x3<(ZzpXw0OZva5FyvLGzK?&VqIg9kf3*PO{E}8j@Apngvw%WnDclP z0~BPI6%6fzj`mv>0|T&lVx}v4C#9BtG^J^A-q`{oG69%sY4&(rW7wOZcVYklCZiSu z4?1Jx;u{1|8)mul^1m;X*ZZ@(llD|Ottvo@eNEAUCXF!02*Ron@>?bEB)zdX@9e&T z0Kl7|cc8XX5TY5d0ia%LQq(`cwg+Gkz@yuM&8e z;S3U2yY*0t%i(y-Oo7!{*h+x`pr(?Dk|T7iu-}$a{;niv`Wv>q()ZOp6ceMb7J^oT zF%DqMh%6DRQ|nS@5okUUy+URilRC>S0yO@9478)BQX@*XZbg@*L;PO{={ze^TZvc?#ZJbN)x1S0FXwY z7zUa=Jaz2<+8dpNEFa~KTS%~3UO(}NJ%*}iHuT|8^YxX!t4w>$<_z@zO(>D zMDBSs8Hv1&WNoT`QWLa|h$cgoW<&~X4OrsRCAd*ad>o{%{U@?hul7dhf6DuR8(MQC zm-seTKM<>7y?}Tg#AIv0>;*iPna(kI&+WTi2T%Vj^|npoH;`*3EpkSmz^eWgnUQ1O zEB}}rqq){xZ$0y?>Xu=0;j)Rl0Q{vjCbsD0559BHptk3$et?riZ1#T~jB4~n&BdVr O0000$I5b=tjMxz)|ks~ns{k~VPj_&Chh<|RHPgB)Z z@4esm_j|wV{oWf*OK7`l`|q#eI$FDOgr=RLY1-(~^7jcNHSG~RJM~om{qdUCeXypT zfHvrY9`ZRZH9v{qO+rf~5}G~bwB6jCez~31FY_$p+vS3Bmt##llJDe4UBjQ~iGP~* zd^Wj{muq|x-|cQo2fRY#r*1)ifM`HOTto-lE}fn_pnLjRdvOFiYuwQ`n?!yJI2_?{x14^ zkJs(r(@}q@ZRt0n&nmlZ>~0v3{lSyDw$vi~7n{--pzkJ(wG#bT;hsFq^a{ol*D^e( z)3~JDN*%DhRoewvR-o^8(D(a;R?g_B%$%(LZy2LQOr&2b=vVl>Z+8o(GXNnuZQ~EZ z{lysf8HrzD9RST$i>9XcQ2jBUls|YU|Dd;q_%Ys_ZeIV9SI8W!`jc-jYh1>OL4U`~ z>QA^cGGlr>>yHQ4$I*8S_+Dc_2>JQ^L!Pg&zmRp#e0lk#F}qVEylLsteI2P2(dQwz zU_1=ozv^0*Q`&LN}*{^J~JKfH~E+;=S{$Shc!vtbzneC zpx+6qqIx|Vn(x34P`hS(I35R=mV6g2C zqr=V{i*+kbA*gU&?1L*$!HkmDttpFwO+6*TtM+-r3f% z^#2)XP3%H`dG$Io8Sn>sW|!BQ*8AtzA6hDyH@Thq154*7j|IN7FvgSMsmse}^a1Dr z?M2vtx=IsDxcSsXV0fSNF62}&H`tx&3*~F}U{BGTJ*z&n({g9>B=9~9a+~eUPoGlV zeCRQ%2Ceacer7bjb@(R49d2rUYD1PiDj@aFfbM0DyV|CV-OZY;?FwEV#P_0APoBMB zE&hiiK|*1h@Gc73ZmK)T$?2`IiT9w}?=Nqiyt~j7%dk23nBx+3>ZmV?^>gF>o-OzUquEtT<@JUFz>Mro< znpSL1p5%gO_zmCxO8P5WCk6aV`o2Gr`ner;;aV1gl>Z_95JU8-y03VT#%))?FJ<*k z-|y3R3k*^D!?tZfU&a&q1^6^#G1qW@gm23mx7t?2$*r)jkXi^x#J1+tL}z;HSiV;{ z;YTJy)}KlGmHf;3hdoL9!uLmIAnCt8t8P!|#|N<8zreS&!;hWS)mn$>tZ7|!bt6av z`uq&^YLnAsq!jhVm(}g*<={K$gGJnm(~s(3O#Vs^@ayFN4WGV{3*!so9DL4G@V)0d zIpY^m{+t%l=Li12$@ng4Z>gls-%9`3(zOudEb~Rkd~@iVgdAe@!N2fZAy%^o<9Cbk zg>IPxivF^z&KU`PE5~Td*y+!|)PK_FXVE>!?olr2Y0y{%-ahkr9g;rU^#$~O`TFz| zZnHiEw6{rYkXg>$9L1vakFmhcr^YXNn7#+|*;O|Je*A32jt-2!(ke`f$UmwZBF@F> zLk6VZ*OVLuJzpEp%b9E37UPBBcSQc2U&NPQ#xt)|zs8+)F0wpLqhCa9IfnOQ{;kFj zLb($M;#I)Ev?-2{+%8Su3VxN&ZKKbF{m%*LV;;o(-xB&iIkwQP*F5A9HfRrQmu5sC z0E|Du+`L;VK&FiGGS0{O=N#np>%sfFfL7LA+czUQDoP)+5A;v@-wNKC6MbNJ>OThi z%9InOUrjh1apESleGU9?M_lj2xZNBtpqDe&1b&V=U)B&~Ncy3i@Ou#d8=qg$=ppYS z53ux8mCe62H+hJY*RMeyFsD-g0vy1^9H=}sa|9U(`Ii`!{RR9hoA+}Ae?A2LF4#Zx zV-0L{p@Y2FYL)qz$hnmLMP`odzBM1;)gh0o}4?zIhvNutVhwM1z_>O?1R z+y@>0Gh~Z3l9Hpk3M_vDw!2E5I##=CcHj%G6L-ga|6b&1r0@5wppU$Ywx78;q#Y(f zj+L1J9)AxWHcK7?x;{_*O$^3|C9Ff9n{&!O;W(OBo|D`MzUQBT;{~+C8Yb{RFF^-i zfj-Y%o~;ve&RFIZk`2_SQeOX;H*u^vnL)2+}AoCsdJU@S#FKlU9Nr2g@{ovxX5{Wuc<$f(y|wh-J2LOUYEouc-qSR z2E2c}>ptK>JKC*V$Yihve-`|_>R85&%OX113k;kGU<4*HR~s?Lb1v3G>`QEnlZ+AK z9?yX3yTD&W`z`neGL7)K74u&9(5`1*<_qk{c~!EZT@*Vq{|Gv5=>MUhD|AiG6RuVx z#x|CCh4kbVkORlyN658P|32`RL1SxFM=R!fEBLj&j^sCF&+vcSn)pS~dv``Y!zaRDVy-ZU&w=0PoKD^BE{xlYC^>(gl`_EN z^T32WtC97J*U`4+r8&t%*ha+r&h+V+i&<#1Nc6X4f4(2?qmXp9jvuku&y&k@X+Mxn z!FUPu;JYL9Ds38hh507d-&uu&cF@(TjTnyJj=V2FhL}y=^8LG*n-~V+e?EduJ={Ad zc~opn!I0ugzKQ>eI4kj5y;WB{#4Y)ueZv-iikjIwQqO2#=AV#r{j6L_e?{4TjDF|} zl4Vy&92_OFdykngNC0gw=e21-*piS_pCBx|R z=zY);_KAF@=ejcw#ai>iVspAq#d`V^)D3R#!@7_)NtJ6vy4JX@L8wOS+K;QpseIBi z^~^66oAqxZUd~0HzQ)U%*GZjEXpCKmy>G$itcD#vXm_Olsc%-@mj<_q+9+zisRUEA zVf71Z@|fMr+0<#sF@A|$_HC?jwj!V0>|`6KOF1J)Sb*4T8Snab%{cfTzR%O|_0B%y zKs8UF14cEl9e7f1P97rG!>A)8SJ}d~fxk|v!ViA{{1M-5A(s|D(|DTkhrWV2NTtV% z!Kh#``ZIbT>r>;$i>M=%n)KVSo?!i7uIIyiQ{ZRqo!u$_DE>(9A@W@O{s8myG;HL1 zL+aGdf~Qb)1mWcYV;wnTG#VqgT|IcQQ z-NT+TcJIMjuEu@|Y_$b)|BJ8dmDq>HAG3qVJTQ+{_?J9pFXQWAy>usQ{4chj zGg8@lh%LMufd7U~o`f~(9d0&rY4|3*v!_lt27bl~bUs*PLWaS7?5zF-`6Te4DDvl3 zzz;rOgN?&R8nHGq)*wG=M{b{FJ_|&F_$h1FlD%eQnwv!~k9sHk_)Q^d*&6M3^P1B? zf{*-oP`wX$srH{y{i!8?_|!oD62FOhkn&mN!`zBB)_bhgGS_wHqwYVg;j4(3H$bLK zf$LL&3x3eXS~-fJ^VT=L;e6=*9|pzl*RTCKh}kdevG6ZFkR^RTY6uEb{2JAtDE^?m z$V)|k>|LP_kT-4yp8p1(4V9SU_!C+m>JCoUn2UPH)}Rg+=nHVf?_>D8W{uv{>r8zO zx#>-y`wIA|u~r^R{*}CP|II8Q&d>H^rot~##>R3qx{3Z;3I~geq8!;-@l=bz%?=Wm@o$3I3n*kJ@ik|0NHK7hDIy zPaN{ANyekRs8KOq{m_XdM#^AZISgceP#L4%n_(% zd<*Nl--4GVs)kvu|Bz2)9x#8yv!YwHd~$ChQAJzeG7p z>>Ihi<1`yL*2Lc&&p&w4y$$(oDcLu!|_mJ)nCeHPlIZX$eV zT>iE6$JcGv8r|s)Uq_wYiR+D!{mPyh6VJk)#C0M5+WH^nZ%PM%N9;+lE?_*0eX2?Q zh^>kbazJ~LH5220@cxeEL)rl6U^w|Be%%Itc_;My(}-SB{+q$m{ob^?{T2SG|MA;y zb_SP!Ool=iER*$(b;#YUqLplj@<2XNPuiRNPB<6x0AkllwB=b8#*iWPzmzqef{ypI z?i=8jy7P`RBb{o6T`T;-`@oKOFn|2Nyax=@#aB2ky3xlY%8ht@N`wQ8`^e6OP;3wvPLj4KnU)f=JMeQkX zE@*4hME?A&sMYGTGGD&p_0asMB~5ctwSzFA|fJ3VtgWiRm? z5&e($9YX$r%qw|`;K$(Hw}g%GER4YES zp5<4|nl;B5PJfjD-)Z?{?9hJ--Ga^=MdVSp6x&m0bhVBawPs@2A^8ivH(u_`Hk{8s z>QftkJpK|td|`b38;M0yhm`yiLVrU1Dt{1sY%E(?Yg@+sWvpw+M;w352U6y0PUv&) zSv^(O$1Cv%{s(-Pc%~t1yzVxq|5e$a+mWt^t+~Ltk-j6;`KS&l{Lp`VqWG)Yi#;}D zwbP;hAdWx4t8j~W#k>uBjv0lg62FKKJWH1|mzQ$J&%Nf9(Ty{LDjta%yrnl_Z}o2I zb3eX32O!q&A-0{sFZ;6gF8xm}{ipm<7eTC@7s_6t^7TJwt`mD(*jMB}9@cOBRPyQb zo9ITZ95ww@u&;SNc);2ad7M)b#(rEcF4pB>nxHUN&>Kl6myXIw9j+ zUsL+@MOZVdcNX$8)^qGmu*Y^`1H0vd3|L5;bDPE$qLUA0Re;MtKVV ziH1Y!&kn?2=st7fO;`)x+dX;kJ%%N@v-j(s9=;<+|Loyau(78h)Au3Yb-?+oQ^-u? zxwt4Fyyu=G-d_$L3U*$9K*X;h@T<9~nSUyql|H8*b~sMSt@js4+b589zf&-CZoB>u1CjGr+?TQEHy&hnP7wQUq*E<_l{-mYs;NK9 z=d&Kztl!;>x@J(bmTP=ky}ZEZPejWt}E8 zD?zTy8nZ8C&`rIPazOt`?5o5d^?``D-1B{vd!Sr9iUBLe=X0*hsToNY%p^GcPgV-BZyj0>BIRf$<=sfZsqstaP zFw|`h9`!3^hjfcLqeUJd`}L4du$B$+%K4VRsRKf%aHbJ;Q_HYQ1=e%qo_2+ZKp~Of z9OEqX$;d%(z}zwalzZHM{)u|eY~-0ZM`yf`@2{O^{fhqU>JP<{0x=x-r^|D`aR~Q_ zai;ZZ)bY>hpLfp4Tl{(9*#qY8UZ;66=6WIcco%ed=CVSM1n$ki{08_fa8KaH`@@`{ z^GSK&oW$4N`RQ?XTirq8EC&UL5BFv5c@s~@S%aUTf02HH{3Cwd!L>Q-ZN+KzN2@-Z zzurRfD4eUgMAVB4<{t&`R^uwM;*tF`&lwrTy`Uvl6oYvD2O$OFbcVoviNF8o0l&4+J79c?U-AHcH)1X1xK`>r)McUCftG-hf8UC8^JVya z=*v%m|84N$+eZX@Wv&B1fOZ?3^SK34yo9RIvgQ9D+2s;)jG)Z?Cc4ab%8#`i!&_!ErxSMmaRU@c-i0Uf@Q`Agdr znX_{&p5t(=9UgTU{(xs375|IwIc^WHv;K70(0!PbKTxND$B(tB zb>MF_pf}x!L;qgH-Y@zINiinc3(dW8R`)dc*B@g1**I&XdyixeR%fFWjo4KwOzh#1 zKSch{dJI|xv>Cg24hxuXhYVkb+=BBx%JzaQbA5|-59+0H5#t5VODU?fPtQ4#F>KL0&(AK9%tmdlLcHN;?dGv-U(i6!GJB{_Ra1+aQEQ1NnUDxABpN`$c=| znO8ps{vYcp{0&90E>w0{d8IxXwAIy+|E&(zVg>)q>~OT=?`v@pZ?&VpxSnF}j&t6v z;DfcXk7O?A>%WXa>oHd)==guQR{fZ$KiW|`VLRaen^~yck-n#&xuS)-u4Vi$c){8W zc3Uuii@YbtwF1|`5=ZTE_&)kJdcPxmWt+d@f1eO#+tXfm1ophX$^53T=7e-?>r#-umwns3n2_13sW{y8r+H literal 0 HcmV?d00001 diff --git a/doc/source/conf.py b/doc/source/conf.py index fdb9f926ddc4..4b60800d250a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -160,6 +160,8 @@ def setup(app): html_logo = '_static/numpylogo.svg' +html_favicon = '_static/favicon/favicon.ico' + html_theme_options = { "logo_link": "index", "github_url": "https://github.com/numpy/numpy", From 7d79aecb0fe5a9ef1465f8ba46ed2dd3394bf90d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 13:57:22 +0200 Subject: [PATCH 0973/1270] ENH: Add improved placeholder annotations for `np.rec` --- numpy/__init__.pyi | 12 ++++++-- numpy/rec.pyi | 71 ++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 71 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4b93ce0f398c..b0cfa6a0a253 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -463,7 +463,6 @@ class format_parser: aligned: Any = ..., byteorder: Any = ..., ) -> None: ... - def __getattr__(self, key: str) -> Any: ... class iinfo: def __init__(self, int_type: Any) -> None: ... @@ -530,10 +529,17 @@ class recarray(ndarray[_ShapeType, _DType_co]): aligned: Any = ..., order: Any = ..., ) -> Any: ... - def __getattr__(self, key: str) -> Any: ... + def __array_finalize__(self, obj): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def field(self, attr, val=...): ... class record(void): - def __getattr__(self, key: str) -> Any: ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def pprint(self): ... class vectorize: def __init__( diff --git a/numpy/rec.pyi b/numpy/rec.pyi index 883e2dd5b7f2..198636058a27 100644 --- a/numpy/rec.pyi +++ b/numpy/rec.pyi @@ -1,12 +1,65 @@ -from typing import Any, List +from typing import List + +from numpy import ( + format_parser as format_parser, + record as record, + recarray as recarray, +) __all__: List[str] -record: Any -recarray: Any -format_parser: Any -fromarrays: Any -fromrecords: Any -fromstring: Any -fromfile: Any -array: Any +def fromarrays( + arrayList, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def fromrecords( + recList, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def fromstring( + datastring, + dtype=..., + shape=..., + offset=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def fromfile( + fd, + dtype=..., + shape=..., + offset=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def array( + obj, + dtype=..., + shape=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + copy=..., +): ... From 350d7a55d75e916159e3e31417e3a164f6dab73c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 14:04:59 +0200 Subject: [PATCH 0974/1270] ENH: Add improved placeholder annotations for `np.fft` --- numpy/fft/__init__.pyi | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index bb4fae9037d6..3c191a35f8f2 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -2,21 +2,21 @@ from typing import Any, List __all__: List[str] -fft: Any -ifft: Any -rfft: Any -irfft: Any -hfft: Any -ihfft: Any -rfftn: Any -irfftn: Any -rfft2: Any -irfft2: Any -fft2: Any -ifft2: Any -fftn: Any -ifftn: Any -fftshift: Any -ifftshift: Any -fftfreq: Any -rfftfreq: Any +def fft(a, n=..., axis=..., norm=...): ... +def ifft(a, n=..., axis=..., norm=...): ... +def rfft(a, n=..., axis=..., norm=...): ... +def irfft(a, n=..., axis=..., norm=...): ... +def hfft(a, n=..., axis=..., norm=...): ... +def ihfft(a, n=..., axis=..., norm=...): ... +def fftn(a, s=..., axes=..., norm=...): ... +def ifftn(a, s=..., axes=..., norm=...): ... +def rfftn(a, s=..., axes=..., norm=...): ... +def irfftn(a, s=..., axes=..., norm=...): ... +def fft2(a, s=..., axes=..., norm=...): ... +def ifft2(a, s=..., axes=..., norm=...): ... +def rfft2(a, s=..., axes=..., norm=...): ... +def irfft2(a, s=..., axes=..., norm=...): ... +def fftshift(x, axes=...): ... +def ifftshift(x, axes=...): ... +def fftfreq(n, d=...): ... +def rfftfreq(n, d=...): ... From 2e60be21905d2fd53d712ffd90daa94cf073e3a7 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 14:07:45 +0200 Subject: [PATCH 0975/1270] ENH: Add improved placeholder annotations for `np.f2py` --- numpy/f2py/__init__.pyi | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index 50594c1e3667..3f7231fed1d0 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,7 +1,18 @@ from typing import Any, List +from numpy.f2py import ( + f2py_testing as f2py_testing, +) + __all__: List[str] -run_main: Any -compile: Any -f2py_testing: Any +def run_main(comline_list): ... +def compile( + source, + modulename=..., + extra_args=..., + verbose=..., + source_fn=..., + extension=..., + full_output=..., +): ... From 4f1390e9d31edda13890e230d23dc1651310050a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 14:21:31 +0200 Subject: [PATCH 0976/1270] ENH: Add improved placeholder annotations for `np.testing` --- numpy/testing/__init__.pyi | 147 ++++++++++++++++++++++++++----------- 1 file changed, 104 insertions(+), 43 deletions(-) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 7dad2c9db0dd..efb6bcd911b1 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,46 +1,107 @@ -from typing import Any, List +import sys +import warnings +from typing import Any, List, ClassVar, Tuple, Set + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from unittest.case import ( + SkipTest as SkipTest, +) __all__: List[str] -assert_equal: Any -assert_almost_equal: Any -assert_approx_equal: Any -assert_array_equal: Any -assert_array_less: Any -assert_string_equal: Any -assert_array_almost_equal: Any -assert_raises: Any -build_err_msg: Any -decorate_methods: Any -jiffies: Any -memusage: Any -print_assert_equal: Any -raises: Any -rundocs: Any -runstring: Any -verbose: Any -measure: Any -assert_: Any -assert_array_almost_equal_nulp: Any -assert_raises_regex: Any -assert_array_max_ulp: Any -assert_warns: Any -assert_no_warnings: Any -assert_allclose: Any -IgnoreException: Any -clear_and_catch_warnings: Any -SkipTest: Any -KnownFailureException: Any -temppath: Any -tempdir: Any -IS_PYPY: Any -HAS_REFCOUNT: Any -suppress_warnings: Any -assert_array_compare: Any -_assert_valid_refcount: Any -_gen_alignment_data: Any -assert_no_gc_cycles: Any -break_cycles: Any -HAS_LAPACK64: Any -TestCase: Any -run_module_suite: Any +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +class clear_and_catch_warnings(warnings.catch_warnings): + class_modules: ClassVar[Tuple[str, ...]] + modules: Set[str] + def __init__(self, record=..., modules=...): ... + def __enter__(self): ... + def __exit__(self, *exc_info): ... + +class suppress_warnings: + log: List[warnings.WarningMessage] + def __init__(self, forwarding_rule=...): ... + def filter(self, category=..., message=..., module=...): ... + def record(self, category=..., message=..., module=...): ... + def __enter__(self): ... + def __exit__(self, *exc_info): ... + def __call__(self, func): ... + +verbose: int +IS_PYPY: Final[bool] +HAS_REFCOUNT: Final[bool] +HAS_LAPACK64: Final[bool] + +def assert_(val, msg=...): ... +def memusage(processName=..., instance=...): ... +def jiffies(_proc_pid_stat=..., _load_time=...): ... +def build_err_msg( + arrays, + err_msg, + header=..., + verbose=..., + names=..., + precision=..., +): ... +def assert_equal(actual, desired, err_msg=..., verbose=...): ... +def print_assert_equal(test_string, actual, desired): ... +def assert_almost_equal( + actual, + desired, + decimal=..., + err_msg=..., + verbose=..., +): ... +def assert_approx_equal( + actual, + desired, + significant=..., + err_msg=..., + verbose=..., +): ... +def assert_array_compare( + comparison, + x, + y, + err_msg=..., + verbose=..., + header=..., + precision=..., + equal_nan=..., + equal_inf=..., +): ... +def assert_array_equal(x, y, err_msg=..., verbose=...): ... +def assert_array_almost_equal(x, y, decimal=..., err_msg=..., verbose=...): ... +def assert_array_less(x, y, err_msg=..., verbose=...): ... +def runstring(astr, dict): ... +def assert_string_equal(actual, desired): ... +def rundocs(filename=..., raise_on_error=...): ... +def raises(*args): ... +def assert_raises(*args, **kwargs): ... +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): ... +def decorate_methods(cls, decorator, testmatch=...): ... +def measure(code_str, times=..., label=...): ... +def assert_allclose( + actual, + desired, + rtol=..., + atol=..., + equal_nan=..., + err_msg=..., + verbose=..., +): ... +def assert_array_almost_equal_nulp(x, y, nulp=...): ... +def assert_array_max_ulp(a, b, maxulp=..., dtype=...): ... +def assert_warns(warning_class, *args, **kwargs): ... +def assert_no_warnings(*args, **kwargs): ... +def tempdir(*args, **kwargs): ... +def temppath(*args, **kwargs): ... +def assert_no_gc_cycles(*args, **kwargs): ... +def break_cycles(): ... +def _assert_valid_refcount(op): ... +def _gen_alignment_data(dtype=..., type=..., max_size=...): ... From 0388434a18de5dc02e0438a49e31dde44a59fe2b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 14:38:56 +0200 Subject: [PATCH 0977/1270] ENH: Add improved placeholder annotations for `np.matrixlib` --- numpy/__init__.pyi | 40 +++++++++++++++++++++++++++++++++++- numpy/matrixlib/__init__.pyi | 11 ++++++---- 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b0cfa6a0a253..6899343c00f9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -475,7 +475,45 @@ class matrix(ndarray[_ShapeType, _DType_co]): dtype: Any = ..., copy: Any = ..., ) -> Any: ... - def __getattr__(self, key: str) -> Any: ... + def __array_finalize__(self, obj): ... + def __getitem__(self, index): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __imul__(self, other): ... + def __pow__(self, other): ... + def __ipow__(self, other): ... + def __rpow__(self, other): ... + def tolist(self): ... + def sum(self, axis=..., dtype=..., out=...): ... + def squeeze(self, axis=...): ... + def flatten(self, order=...): ... + def mean(self, axis=..., dtype=..., out=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=...): ... + def prod(self, axis=..., dtype=..., out=...): ... + def any(self, axis=..., out=...): ... + def all(self, axis=..., out=...): ... + def max(self, axis=..., out=...): ... + def argmax(self, axis=..., out=...): ... + def min(self, axis=..., out=...): ... + def argmin(self, axis=..., out=...): ... + def ptp(self, axis=..., out=...): ... + def ravel(self, order=...): ... + @property + def T(self): ... + @property + def I(self): ... + @property + def A(self): ... + @property + def A1(self): ... + @property + def H(self): ... + def getT(self): ... + def getA(self): ... + def getA1(self): ... + def getH(self): ... + def getI(self): ... class memmap(ndarray[_ShapeType, _DType_co]): def __new__( diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index b9005c4aa4c9..e4b5c19a2bd0 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,8 +1,11 @@ from typing import Any, List +from numpy import ( + matrix as matrix, +) + __all__: List[str] -matrix: Any -bmat: Any -mat: Any -asmatrix: Any +def bmat(obj, ldict=..., gdict=...): ... +def asmatrix(data, dtype=...): ... +mat = asmatrix From a70cb5490aaf1bcfae9b2554c3a48f33739c499e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 Apr 2021 08:31:46 -0500 Subject: [PATCH 0978/1270] Update numpy/core/src/umath/ufunc_object.c Co-authored-by: Ross Barnowski --- numpy/core/src/umath/ufunc_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index d29a7e01f4da..bf8bd1a8c690 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -1113,7 +1113,7 @@ prepare_ufunc_output(PyUFuncObject *ufunc, * is possible. * * This function only supports a single output (due to the overlap check). - * It always accepts 0-D arrays and will broadcast them. The function will + * It always accepts 0-D arrays and will broadcast them. The function * cannot broadcast any other array (as it requires a single stride). * The function accepts all 1-D arrays, and N-D arrays that are either all * C- or all F-contiguous. From c55bb45d564d63e292a3a004e6f7c4b2dd0ce6ef Mon Sep 17 00:00:00 2001 From: Adrian Price-Whelan Date: Sat, 24 Apr 2021 10:32:20 -0400 Subject: [PATCH 0979/1270] move module target location --- doc/source/reference/index.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 6eb74cd770a0..f12d923dfdb4 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -1,5 +1,7 @@ .. _reference: +.. module:: numpy + ############### NumPy Reference ############### @@ -7,9 +9,6 @@ NumPy Reference :Release: |version| :Date: |today| - -.. module:: numpy - This reference manual details functions, modules, and objects included in NumPy, describing what they are and what they do. For learning how to use NumPy, see the :ref:`complete documentation `. From 5e8a1efd4b7cf618a0c7731b82d1a7c79be3fac9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 25 Apr 2021 13:30:25 -0600 Subject: [PATCH 0980/1270] MAINT: Disable pip version check for azure lint check. If the installed version is not up to date, the terminal notification of such is treated as an error. --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3240b5fc9e76..01749145008e 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,7 +29,7 @@ stages: addToPath: true architecture: 'x64' - script: >- - python -m pip install -r linter_requirements.txt + python -m pip --disable-pip-version-check install -r linter_requirements.txt displayName: 'Install tools' failOnStderr: true - script: | From 3df50530c415cf2666f3f07a9780b8994fd352f6 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 26 Apr 2021 11:23:33 +0100 Subject: [PATCH 0981/1270] Update doc/source/dev/development_gitpod.rst MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Melissa Weber Mendonça --- doc/source/dev/development_gitpod.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index 21b3b5de24cd..5bce71b9a43e 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -19,7 +19,7 @@ Gitpod Gitpod GitHub integration -------------------------- -To be able to use Gitpod, you will need tohave the Gitpod app installed on your GitHub account, so if +To be able to use Gitpod, you will need to have the Gitpod app installed on your GitHub account, so if you do not have an account yet, you will need to create one first. Head over to the `Gitpod`_ website and click on the **Continue with GitHub** button. You will be redirected to the GitHub authentication page. @@ -165,4 +165,3 @@ FAQ's .. _NumPy repository on GitHub: https://github.com/NumPy/NumPy .. _create your own fork: https://help.github.com/en/articles/fork-a-repo .. _VSCode docs: https://code.visualstudio.com/docs/getstarted/tips-and-tricks - From ea8d1e2425ce0288cecce8bd5d73142c6c9efd86 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 26 Apr 2021 11:25:57 +0100 Subject: [PATCH 0982/1270] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Melissa Weber Mendonça --- doc/source/dev/development_gitpod.rst | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index 5bce71b9a43e..d53bc802c977 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -49,13 +49,13 @@ Once you have authenticated to Gitpod through GitHub, you can install the `Gitpo :alt: NumPy repository with Gitpod button screenshot #. If you install the extension - you can click the **Gitpod** button to start a new workspace. -#. Alternatively, if you do not want to install the browser extension you can visit https://gitpod.io/#https://github.com/USERNAME/NumPy replacing ``USERNAME`` with your GitHub username. +#. Alternatively, if you do not want to install the browser extension, you can visit https://gitpod.io/#https://github.com/USERNAME/NumPy replacing ``USERNAME`` with your GitHub username. #. In both cases, this will open a new tab on your web browser and start building your development environment. Please note this can take a few minutes. -#. Once the build is complete, you will be directed to your workspace, including VSCode and all the dependencies you need to work on NumPy. The first time you start your workspace, you will notice that there might be some actions running. This will ensure that you have a development version of NumPy installed and that the docs are being pre-built for you. +#. Once the build is complete, you will be directed to your workspace, including the VSCode editor and all the dependencies you need to work on NumPy. The first time you start your workspace, you will notice that there might be some actions running. This will ensure that you have a development version of NumPy installed and that the docs are being pre-built for you. -#. Once the build is complete, you can test the build by entering:: +#. Once the build is complete, you can :ref:`test the build` by entering:: python runtests.py -v @@ -63,14 +63,14 @@ Once you have authenticated to Gitpod through GitHub, you can install the `Gitpo Quick workspace tour --------------------- -Gitpod uses VSCode as the editor. If you have not used this editor before, you can check the Getting started `VSCode docs`_ to familiarise yourself with it. +Gitpod uses VSCode as the editor. If you have not used this editor before, you can check the Getting started `VSCode docs`_ to familiarize yourself with it. Your workspace will look similar to the image below: .. image:: ./gitpod-imgs/gitpod-workspace.png :alt: Gitpod workspace screenshot -.. note:: By default VSCode initialises with a light theme, you can change to a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. +.. note:: By default, VSCode initializes with a light theme. You can change to a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. We have marked some important sections in the editor: @@ -78,7 +78,7 @@ We have marked some important sections in the editor: #. Your current branch is always displayed in the status bar. You can also use this button to change or create branches. #. GitHub Pull Requests extension - you can use this to work with Pull Requests from your workspace. #. Marketplace extensions - we have added some essential extensions to the NumPy Gitpod. Still, you can also install other extensions or syntax highlighting themes for your user, and these will be preserved for you. -#. Your workspace directory - by default is ``/workspace/numpy`` **do not change this** as this is the only directory preserved in Gitpod. +#. Your workspace directory - by default, it is ``/workspace/numpy``. **Do not change this** as this is the only directory preserved in Gitpod. We have also pre-installed a few tools and VSCode extensions to help with the development experience: @@ -89,11 +89,11 @@ We have also pre-installed a few tools and VSCode extensions to help with the de * `VSCode autodocstrings extension `_ * `VSCode Git Graph extension `_ -Development workflow +Development workflow with Gitpod ----------------------- The :ref:`development-workflow` section of this documentation contains information regarding the NumPy development workflow. Make sure to check this before working on your contributions. -When using Gitpod git is pre configured for you: +When using Gitpod, git is pre configured for you: #. You do not need to configure your git username, and email as this should be done for you as you authenticated through GitHub. You can check the git configuration with the command ``git config --list`` in your terminal. #. As you started your workspace from your own NumPy fork, you will by default have both "upstream "and "origin "added as remotes. You can verify this by typing ``git remote`` on your terminal or by clicking on the **branch name** on the status bar (see image below). @@ -110,7 +110,7 @@ The documentation is pre-built during your workspace initialization. So once thi Option 1: Using Liveserve *************************** -#. View the documentation in ``NumPy/doc/build/html``. You can start with "index.html "and browse, or you can jump straight to the file you're interested in. +#. View the documentation in ``NumPy/doc/build/html``. You can start with ``index.html`` and browse, or you can jump straight to the file you're interested in. #. To see the rendered version of a page, you can right-click on the ``.html`` file and click on **Open with Live Serve**. Alternatively, you can open the file in the editor and click on the **Go live** button on the status bar. .. image:: ./gitpod-imgs/vscode-statusbar.png @@ -151,7 +151,7 @@ FAQ's #. Can I install additional VSCode extensions? Absolutely! Any extensions you installed will be installed in your own workspace and preserved. -#. I registered on Gitpod but I still cannot see a **Gitpod** button in my repositories +#. I registered on Gitpod but I still cannot see a ``Gitpod`` button in my repositories Head to https://gitpod.io/integrations and make sure you are logged in. Hover over GitHub and click on the three buttons that appear on the right. Click on edit permissions and make sure you have ``user:email``, ``read:user``, and ``public_repo`` checked. Click on **Update Permissions** and confirm the changes in the GitHub application page. From 775d243be65f36564580ed5fcf038f384c61ce4d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 23 Apr 2021 16:57:09 +0200 Subject: [PATCH 0983/1270] ENH: Add improved placeholder annotations for `np.linalg` --- numpy/linalg/__init__.pyi | 43 ++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 39b061969d0d..5080019f4de4 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -2,24 +2,25 @@ from typing import Any, List __all__: List[str] -matrix_power: Any -solve: Any -tensorsolve: Any -tensorinv: Any -inv: Any -cholesky: Any -eigvals: Any -eigvalsh: Any -pinv: Any -slogdet: Any -det: Any -svd: Any -eig: Any -eigh: Any -lstsq: Any -norm: Any -qr: Any -cond: Any -matrix_rank: Any -LinAlgError: Any -multi_dot: Any +class LinAlgError(Exception): ... + +def tensorsolve(a, b, axes=...): ... +def solve(a, b): ... +def tensorinv(a, ind=...): ... +def inv(a): ... +def matrix_power(a, n): ... +def cholesky(a): ... +def qr(a, mode=...): ... +def eigvals(a): ... +def eigvalsh(a, UPLO=...): ... +def eig(a): ... +def eigh(a, UPLO=...): ... +def svd(a, full_matrices=..., compute_uv=..., hermitian=...): ... +def cond(x, p=...): ... +def matrix_rank(M, tol=..., hermitian=...): ... +def pinv(a, rcond=..., hermitian=...): ... +def slogdet(a): ... +def det(a): ... +def lstsq(a, b, rcond=...): ... +def norm(x, ord=..., axis=..., keepdims=...): ... +def multi_dot(arrays, *, out=...): ... From bb34344242097b95db2be2e938c0ce14db225db3 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 25 Apr 2021 14:17:42 +0200 Subject: [PATCH 0984/1270] ENH: Add improved placeholder annotations for `np.lib` --- numpy/__init__.pyi | 62 ++++++- numpy/lib/__init__.pyi | 350 ++++++++++++++++++++---------------- numpy/lib/_version.pyi | 19 ++ numpy/lib/arraypad.pyi | 5 + numpy/lib/arraysetops.pyi | 12 ++ numpy/lib/format.pyi | 28 +++ numpy/lib/function_base.pyi | 57 ++++++ numpy/lib/histograms.pyi | 7 + numpy/lib/mixins.pyi | 62 +++++++ numpy/lib/nanfunctions.pyi | 54 ++++++ numpy/lib/npyio.pyi | 104 +++++++++++ numpy/lib/polynomial.pyi | 19 ++ numpy/lib/scimath.pyi | 13 ++ numpy/lib/shape_base.pyi | 24 +++ numpy/lib/stride_tricks.pyi | 14 ++ numpy/lib/twodim_base.pyi | 19 ++ numpy/lib/type_check.pyi | 19 ++ numpy/lib/utils.pyi | 19 ++ 18 files changed, 726 insertions(+), 161 deletions(-) create mode 100644 numpy/lib/_version.pyi create mode 100644 numpy/lib/arraypad.pyi create mode 100644 numpy/lib/arraysetops.pyi create mode 100644 numpy/lib/format.pyi create mode 100644 numpy/lib/function_base.pyi create mode 100644 numpy/lib/histograms.pyi create mode 100644 numpy/lib/mixins.pyi create mode 100644 numpy/lib/nanfunctions.pyi create mode 100644 numpy/lib/npyio.pyi create mode 100644 numpy/lib/polynomial.pyi create mode 100644 numpy/lib/scimath.pyi create mode 100644 numpy/lib/shape_base.pyi create mode 100644 numpy/lib/stride_tricks.pyi create mode 100644 numpy/lib/twodim_base.pyi create mode 100644 numpy/lib/type_check.pyi create mode 100644 numpy/lib/utils.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6899343c00f9..ed489e35bd27 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -376,7 +376,10 @@ __git_version__: str # TODO: Remove `__getattr__` once the classes are stubbed out class DataSource: def __init__(self, destpath: Any = ...) -> None: ... - def __getattr__(self, key: str) -> Any: ... + def __del__(self): ... + def abspath(self, path): ... + def exists(self, path): ... + def open(self, path, mode=..., encoding=..., newline=...): ... class MachAr: def __init__( @@ -550,7 +553,55 @@ class poly1d: variable: Any = ..., ) -> None: ... def __call__(self, val: Any) -> Any: ... - def __getattr__(self, key: str) -> Any: ... + __hash__: Any + @property + def coeffs(self): ... + @coeffs.setter + def coeffs(self, value): ... + @property + def c(self): ... + @c.setter + def c(self, value): ... + @property + def coef(self): ... + @coef.setter + def coef(self, value): ... + @property + def coefficients(self): ... + @coefficients.setter + def coefficients(self, value): ... + @property + def variable(self): ... + @property + def order(self): ... + @property + def o(self): ... + @property + def roots(self): ... + @property + def r(self): ... + def __array__(self, t=...): ... + def __len__(self): ... + def __neg__(self): ... + def __pos__(self): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __pow__(self, val): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __div__(self, other): ... + def __truediv__(self, other): ... + def __rdiv__(self, other): ... + def __rtruediv__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __getitem__(self, val): ... + def __setitem__(self, key, val): ... + def __iter__(self): ... + def integ(self, m=..., k=...): ... + def deriv(self, m=...): ... class recarray(ndarray[_ShapeType, _DType_co]): def __new__( @@ -580,6 +631,12 @@ class record(void): def pprint(self): ... class vectorize: + pyfunc: Any + cache: Any + signature: Any + otypes: Any + excluded: Any + __doc__: Any def __init__( self, pyfunc, @@ -590,7 +647,6 @@ class vectorize: signature: Any = ..., ) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - def __getattr__(self, key: str) -> Any: ... # Placeholders for Python-based functions def angle(z, deg=...): ... diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 02ed56c8b8c8..2904b6a844dd 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,3 +1,4 @@ +import math as math from typing import Any, List from numpy import ( @@ -5,6 +6,8 @@ from numpy import ( ndindex as ndindex, ) +from numpy.version import version + from numpy.lib import ( format as format, mixins as mixins, @@ -12,10 +15,74 @@ from numpy.lib import ( stride_tricks as stride_stricks, ) +from numpy.lib._version import ( + NumpyVersion as NumpyVersion, +) + +from numpy.lib.arraypad import ( + pad as pad, +) + +from numpy.lib.arraysetops import ( + ediff1d as ediff1d, + intersect1d as intersect1d, + setxor1d as setxor1d, + union1d as union1d, + setdiff1d as setdiff1d, + unique as unique, + in1d as in1d, + isin as isin, +) + from numpy.lib.arrayterator import ( Arrayterator as Arrayterator, ) +from numpy.lib.function_base import ( + select as select, + piecewise as piecewise, + trim_zeros as trim_zeros, + copy as copy, + iterable as iterable, + percentile as percentile, + diff as diff, + gradient as gradient, + angle as angle, + unwrap as unwrap, + sort_complex as sort_complex, + disp as disp, + flip as flip, + rot90 as rot90, + extract as extract, + place as place, + vectorize as vectorize, + asarray_chkfinite as asarray_chkfinite, + average as average, + bincount as bincount, + digitize as digitize, + cov as cov, + corrcoef as corrcoef, + msort as msort, + median as median, + sinc as sinc, + hamming as hamming, + hanning as hanning, + bartlett as bartlett, + blackman as blackman, + kaiser as kaiser, + trapz as trapz, + i0 as i0, + add_newdoc as add_newdoc, + add_docstring as add_docstring, + meshgrid as meshgrid, + delete as delete, + insert as insert, + append as append, + interp as interp, + add_newdoc_ufunc as add_newdoc_ufunc, + quantile as quantile, +) + from numpy.lib.index_tricks import ( ravel_multi_index as ravel_multi_index, unravel_index as unravel_index, @@ -31,169 +98,136 @@ from numpy.lib.index_tricks import ( diag_indices_from as diag_indices_from, ) +from numpy.lib.nanfunctions import ( + nansum as nansum, + nanmax as nanmax, + nanmin as nanmin, + nanargmax as nanargmax, + nanargmin as nanargmin, + nanmean as nanmean, + nanmedian as nanmedian, + nanpercentile as nanpercentile, + nanvar as nanvar, + nanstd as nanstd, + nanprod as nanprod, + nancumsum as nancumsum, + nancumprod as nancumprod, + nanquantile as nanquantile, +) + +from numpy.lib.npyio import ( + savetxt as savetxt, + loadtxt as loadtxt, + genfromtxt as genfromtxt, + recfromtxt as recfromtxt, + recfromcsv as recfromcsv, + load as load, + loads as loads, + save as save, + savez as savez, + savez_compressed as savez_compressed, + packbits as packbits, + unpackbits as unpackbits, + fromregex as fromregex, + DataSource as DataSource, +) + +from numpy.lib.polynomial import ( + poly as poly, + roots as roots, + polyint as polyint, + polyder as polyder, + polyadd as polyadd, + polysub as polysub, + polymul as polymul, + polydiv as polydiv, + polyval as polyval, + polyfit as polyfit, + RankWarning as RankWarning, + poly1d as poly1d, +) + +from numpy.lib.shape_base import ( + column_stack as column_stack, + row_stack as row_stack, + dstack as dstack, + array_split as array_split, + split as split, + hsplit as hsplit, + vsplit as vsplit, + dsplit as dsplit, + apply_over_axes as apply_over_axes, + expand_dims as expand_dims, + apply_along_axis as apply_along_axis, + kron as kron, + tile as tile, + get_array_wrap as get_array_wrap, + take_along_axis as take_along_axis, + put_along_axis as put_along_axis, +) + +from numpy.lib.stride_tricks import ( + broadcast_to as broadcast_to, + broadcast_arrays as broadcast_arrays, + broadcast_shapes as broadcast_shapes, +) + +from numpy.lib.twodim_base import ( + diag as diag, + diagflat as diagflat, + eye as eye, + fliplr as fliplr, + flipud as flipud, + tri as tri, + triu as triu, + tril as tril, + vander as vander, + histogram2d as histogram2d, + mask_indices as mask_indices, + tril_indices as tril_indices, + tril_indices_from as tril_indices_from, + triu_indices as triu_indices, + triu_indices_from as triu_indices_from, +) + +from numpy.lib.type_check import ( + mintypecode as mintypecode, + asfarray as asfarray, + real as real, + imag as imag, + iscomplex as iscomplex, + isreal as isreal, + iscomplexobj as iscomplexobj, + isrealobj as isrealobj, + nan_to_num as nan_to_num, + real_if_close as real_if_close, + typename as typename, + common_type as common_type, +) + from numpy.lib.ufunclike import ( fix as fix, isposinf as isposinf, isneginf as isneginf, ) +from numpy.lib.utils import ( + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + deprecate as deprecate, + deprecate_with_doc as deprecate_with_doc, + get_include as get_include, + info as info, + source as source, + who as who, + lookfor as lookfor, + byte_bounds as byte_bounds, + safe_eval as safe_eval, +) + __all__: List[str] -emath: Any -math: Any -tracemalloc_domain: Any -iscomplexobj: Any -isrealobj: Any -imag: Any -iscomplex: Any -isreal: Any -nan_to_num: Any -real: Any -real_if_close: Any -typename: Any -asfarray: Any -mintypecode: Any -asscalar: Any -common_type: Any -select: Any -piecewise: Any -trim_zeros: Any -copy: Any -iterable: Any -percentile: Any -diff: Any -gradient: Any -angle: Any -unwrap: Any -sort_complex: Any -disp: Any -flip: Any -rot90: Any -extract: Any -place: Any -vectorize: Any -asarray_chkfinite: Any -average: Any -bincount: Any -digitize: Any -cov: Any -corrcoef: Any -msort: Any -median: Any -sinc: Any -hamming: Any -hanning: Any -bartlett: Any -blackman: Any -kaiser: Any -trapz: Any -i0: Any -add_newdoc: Any -add_docstring: Any -meshgrid: Any -delete: Any -insert: Any -append: Any -interp: Any -add_newdoc_ufunc: Any -quantile: Any -column_stack: Any -row_stack: Any -dstack: Any -array_split: Any -split: Any -hsplit: Any -vsplit: Any -dsplit: Any -apply_over_axes: Any -expand_dims: Any -apply_along_axis: Any -kron: Any -tile: Any -get_array_wrap: Any -take_along_axis: Any -put_along_axis: Any -broadcast_to: Any -broadcast_arrays: Any -diag: Any -diagflat: Any -eye: Any -fliplr: Any -flipud: Any -tri: Any -triu: Any -tril: Any -vander: Any -histogram2d: Any -mask_indices: Any -tril_indices: Any -tril_indices_from: Any -triu_indices: Any -triu_indices_from: Any -pad: Any -poly: Any -roots: Any -polyint: Any -polyder: Any -polyadd: Any -polysub: Any -polymul: Any -polydiv: Any -polyval: Any -poly1d: Any -polyfit: Any -RankWarning: Any -issubclass_: Any -issubsctype: Any -issubdtype: Any -deprecate: Any -deprecate_with_doc: Any -get_include: Any -info: Any -source: Any -who: Any -lookfor: Any -byte_bounds: Any -safe_eval: Any -ediff1d: Any -intersect1d: Any -setxor1d: Any -union1d: Any -setdiff1d: Any -unique: Any -in1d: Any -isin: Any -savetxt: Any -loadtxt: Any -genfromtxt: Any -ndfromtxt: Any -mafromtxt: Any -recfromtxt: Any -recfromcsv: Any -load: Any -loads: Any -save: Any -savez: Any -savez_compressed: Any -packbits: Any -unpackbits: Any -fromregex: Any -DataSource: Any -nansum: Any -nanmax: Any -nanmin: Any -nanargmax: Any -nanargmin: Any -nanmean: Any -nanmedian: Any -nanpercentile: Any -nanvar: Any -nanstd: Any -nanprod: Any -nancumsum: Any -nancumprod: Any -nanquantile: Any -histogram: Any -histogramdd: Any -histogram_bin_edges: Any -NumpyVersion: Any +__version__ = version +emath = scimath +tracemalloc_domain: int diff --git a/numpy/lib/_version.pyi b/numpy/lib/_version.pyi new file mode 100644 index 000000000000..20b049757f2f --- /dev/null +++ b/numpy/lib/_version.pyi @@ -0,0 +1,19 @@ +from typing import Any, List + +__all__: List[str] + +class NumpyVersion: + vstring: Any + version: Any + major: Any + minor: Any + bugfix: Any + pre_release: Any + is_devversion: Any + def __init__(self, vstring): ... + def __lt__(self, other): ... + def __le__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __gt__(self, other): ... + def __ge__(self, other): ... diff --git a/numpy/lib/arraypad.pyi b/numpy/lib/arraypad.pyi new file mode 100644 index 000000000000..64e3e133117a --- /dev/null +++ b/numpy/lib/arraypad.pyi @@ -0,0 +1,5 @@ +from typing import List + +__all__: List[str] + +def pad(array, pad_width, mode=..., **kwargs): ... diff --git a/numpy/lib/arraysetops.pyi b/numpy/lib/arraysetops.pyi new file mode 100644 index 000000000000..029aa147492f --- /dev/null +++ b/numpy/lib/arraysetops.pyi @@ -0,0 +1,12 @@ +from typing import List + +__all__: List[str] + +def ediff1d(ary, to_end=..., to_begin=...): ... +def unique(ar, return_index=..., return_inverse=..., return_counts=..., axis=...): ... +def intersect1d(ar1, ar2, assume_unique=..., return_indices=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi new file mode 100644 index 000000000000..4c44d57bf827 --- /dev/null +++ b/numpy/lib/format.pyi @@ -0,0 +1,28 @@ +import sys +from typing import Any, List, Set + +if sys.version_info >= (3, 8): + from typing import Literal, Final +else: + from typing_extensions import Literal, Final + +__all__: List[str] + +EXPECTED_KEYS: Final[Set[str]] +MAGIC_PREFIX: Final[bytes] +MAGIC_LEN: Literal[8] +ARRAY_ALIGN: Literal[64] +BUFFER_SIZE: Literal[262144] # 2**18 + +def magic(major, minor): ... +def read_magic(fp): ... +def dtype_to_descr(dtype): ... +def descr_to_dtype(descr): ... +def header_data_from_array_1_0(array): ... +def write_array_header_1_0(fp, d): ... +def write_array_header_2_0(fp, d): ... +def read_array_header_1_0(fp): ... +def read_array_header_2_0(fp): ... +def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... +def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... +def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi new file mode 100644 index 000000000000..da24ab21dfe7 --- /dev/null +++ b/numpy/lib/function_base.pyi @@ -0,0 +1,57 @@ +from typing import List + +from numpy import ( + vectorize as vectorize, +) + +from numpy.core.function_base import ( + add_newdoc as add_newdoc, +) + +from numpy.core.multiarray import ( + add_docstring as add_docstring, + bincount as bincount, +) +from numpy.core.umath import _add_newdoc_ufunc + +__all__: List[str] + +add_newdoc_ufunc = _add_newdoc_ufunc + +def rot90(m, k=..., axes = ...): ... +def flip(m, axis=...): ... +def iterable(y): ... +def average(a, axis=..., weights=..., returned=...): ... +def asarray_chkfinite(a, dtype=..., order=...): ... +def piecewise(x, condlist, funclist, *args, **kw): ... +def select(condlist, choicelist, default=...): ... +def copy(a, order=..., subok=...): ... +def gradient(f, *varargs, axis=..., edge_order=...): ... +def diff(a, n=..., axis=..., prepend = ..., append = ...): ... +def interp(x, xp, fp, left=..., right=..., period=...): ... +def angle(z, deg=...): ... +def unwrap(p, discont = ..., axis=...): ... +def sort_complex(a): ... +def trim_zeros(filt, trim=...): ... +def extract(condition, arr): ... +def place(arr, mask, vals): ... +def disp(mesg, device=..., linefeed=...): ... +def cov(m, y=..., rowvar=..., bias=..., ddof=..., fweights=..., aweights=..., *, dtype=...): ... +def corrcoef(x, y=..., rowvar=..., bias = ..., ddof = ..., *, dtype=...): ... +def blackman(M): ... +def bartlett(M): ... +def hanning(M): ... +def hamming(M): ... +def i0(x): ... +def kaiser(M, beta): ... +def sinc(x): ... +def msort(a): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def percentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def quantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def trapz(y, x=..., dx=..., axis=...): ... +def meshgrid(*xi, copy=..., sparse=..., indexing=...): ... +def delete(arr, obj, axis=...): ... +def insert(arr, obj, values, axis=...): ... +def append(arr, values, axis=...): ... +def digitize(x, bins, right=...): ... diff --git a/numpy/lib/histograms.pyi b/numpy/lib/histograms.pyi new file mode 100644 index 000000000000..25a33e3aea90 --- /dev/null +++ b/numpy/lib/histograms.pyi @@ -0,0 +1,7 @@ +from typing import List + +__all__: List[str] + +def histogram_bin_edges(a, bins=..., range=..., weights=...): ... +def histogram(a, bins=..., range=..., normed=..., weights=..., density=...): ... +def histogramdd(sample, bins=..., range=..., normed=..., weights=..., density=...): ... diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi new file mode 100644 index 000000000000..4763a071026e --- /dev/null +++ b/numpy/lib/mixins.pyi @@ -0,0 +1,62 @@ +from typing import List +from abc import ABCMeta, abstractmethod + +__all__: List[str] + +# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, +# even though it's relient on subclasses implementing `__array_ufunc__` + +class NDArrayOperatorsMixin(metaclass=ABCMeta): + @abstractmethod + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... + def __lt__(self, other): ... + def __le__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __gt__(self, other): ... + def __ge__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __iadd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __isub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __imul__(self, other): ... + def __matmul__(self, other): ... + def __rmatmul__(self, other): ... + def __imatmul__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __itruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __ifloordiv__(self, other): ... + def __mod__(self, other): ... + def __rmod__(self, other): ... + def __imod__(self, other): ... + def __divmod__(self, other): ... + def __rdivmod__(self, other): ... + def __pow__(self, other): ... + def __rpow__(self, other): ... + def __ipow__(self, other): ... + def __lshift__(self, other): ... + def __rlshift__(self, other): ... + def __ilshift__(self, other): ... + def __rshift__(self, other): ... + def __rrshift__(self, other): ... + def __irshift__(self, other): ... + def __and__(self, other): ... + def __rand__(self, other): ... + def __iand__(self, other): ... + def __xor__(self, other): ... + def __rxor__(self, other): ... + def __ixor__(self, other): ... + def __or__(self, other): ... + def __ror__(self, other): ... + def __ior__(self, other): ... + def __neg__(self): ... + def __pos__(self): ... + def __abs__(self): ... + def __invert__(self): ... diff --git a/numpy/lib/nanfunctions.pyi b/numpy/lib/nanfunctions.pyi new file mode 100644 index 000000000000..447770a54494 --- /dev/null +++ b/numpy/lib/nanfunctions.pyi @@ -0,0 +1,54 @@ +from typing import List + +__all__: List[str] + +def nanmin(a, axis=..., out=..., keepdims=...): ... +def nanmax(a, axis=..., out=..., keepdims=...): ... +def nanargmin(a, axis=...): ... +def nanargmax(a, axis=...): ... +def nansum(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nanprod(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nancumsum(a, axis=..., dtype=..., out=...): ... +def nancumprod(a, axis=..., dtype=..., out=...): ... +def nanmean(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nanmedian( + a, + axis=..., + out=..., + overwrite_input=..., + keepdims=..., +): ... +def nanpercentile( + a, + q, + axis=..., + out=..., + overwrite_input=..., + interpolation=..., + keepdims=..., +): ... +def nanquantile( + a, + q, + axis=..., + out=..., + overwrite_input=..., + interpolation=..., + keepdims=..., +): ... +def nanvar( + a, + axis=..., + dtype=..., + out=..., + ddof=..., + keepdims=..., +): ... +def nanstd( + a, + axis=..., + dtype=..., + out=..., + ddof=..., + keepdims=..., +): ... diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi new file mode 100644 index 000000000000..508357927b72 --- /dev/null +++ b/numpy/lib/npyio.pyi @@ -0,0 +1,104 @@ +from typing import Mapping, List, Any + +from numpy import ( + DataSource as DataSource, +) + +from numpy.core.multiarray import ( + packbits as packbits, + unpackbits as unpackbits, +) + +__all__: List[str] + +def loads(*args, **kwargs): ... + +class BagObj: + def __init__(self, obj): ... + def __getattribute__(self, key): ... + def __dir__(self): ... + +def zipfile_factory(file, *args, **kwargs): ... + +class NpzFile(Mapping[Any, Any]): + zip: Any + fid: Any + files: Any + allow_pickle: Any + pickle_kwargs: Any + f: Any + def __init__(self, fid, own_fid=..., allow_pickle=..., pickle_kwargs=...): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + def close(self): ... + def __del__(self): ... + def __iter__(self): ... + def __len__(self): ... + def __getitem__(self, key): ... + def iteritems(self): ... + def iterkeys(self): ... + +def load(file, mmap_mode=..., allow_pickle=..., fix_imports=..., encoding=...): ... +def save(file, arr, allow_pickle=..., fix_imports=...): ... +def savez(file, *args, **kwds): ... +def savez_compressed(file, *args, **kwds): ... +def loadtxt( + fname, + dtype=..., + comments=..., + delimiter=..., + converters=..., + skiprows=..., + usecols=..., + unpack=..., + ndmin=..., + encoding=..., + max_rows=..., + *, + like=..., +): ... +def savetxt( + fname, + X, + fmt=..., + delimiter=..., + newline=..., + header=..., + footer=..., + comments=..., + encoding=..., +): ... +def fromregex(file, regexp, dtype, encoding=...): ... +def genfromtxt( + fname, + dtype=..., + comments=..., + delimiter=..., + skip_header=..., + skip_footer=..., + converters=..., + missing_values=..., + filling_values=..., + usecols=..., + names=..., + excludelist=..., + deletechars=..., + replace_space=..., + autostrip=..., + case_sensitive=..., + defaultfmt=..., + unpack=..., + usemask=..., + loose=..., + invalid_raise=..., + max_rows=..., + encoding=..., + *, + like=..., +): ... +def recfromtxt(fname, **kwargs): ... +def recfromcsv(fname, **kwargs): ... + +# NOTE: Deprecated +# def ndfromtxt(fname, **kwargs): ... +# def mafromtxt(fname, **kwargs): ... diff --git a/numpy/lib/polynomial.pyi b/numpy/lib/polynomial.pyi new file mode 100644 index 000000000000..7d38658d0202 --- /dev/null +++ b/numpy/lib/polynomial.pyi @@ -0,0 +1,19 @@ +from typing import List + +from numpy import ( + RankWarning as RankWarning, + poly1d as poly1d, +) + +__all__: List[str] + +def poly(seq_of_zeros): ... +def roots(p): ... +def polyint(p, m=..., k=...): ... +def polyder(p, m=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... +def polyval(p, x): ... +def polyadd(a1, a2): ... +def polysub(a1, a2): ... +def polymul(a1, a2): ... +def polydiv(u, v): ... diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi new file mode 100644 index 000000000000..d0d4af41eb0c --- /dev/null +++ b/numpy/lib/scimath.pyi @@ -0,0 +1,13 @@ +from typing import List + +__all__: List[str] + +def sqrt(x): ... +def log(x): ... +def log10(x): ... +def logn(n, x): ... +def log2(x): ... +def power(x, p): ... +def arccos(x): ... +def arcsin(x): ... +def arctanh(x): ... diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi new file mode 100644 index 000000000000..09edbcb6cab4 --- /dev/null +++ b/numpy/lib/shape_base.pyi @@ -0,0 +1,24 @@ +from typing import List + +from numpy.core.shape_base import vstack + +__all__: List[str] + +row_stack = vstack + +def take_along_axis(arr, indices, axis): ... +def put_along_axis(arr, indices, values, axis): ... +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def expand_dims(a, axis): ... +def column_stack(tup): ... +def dstack(tup): ... +def array_split(ary, indices_or_sections, axis=...): ... +def split(ary, indices_or_sections, axis=...): ... +def hsplit(ary, indices_or_sections): ... +def vsplit(ary, indices_or_sections): ... +def dsplit(ary, indices_or_sections): ... +def get_array_prepare(*args): ... +def get_array_wrap(*args): ... +def kron(a, b): ... +def tile(A, reps): ... diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi new file mode 100644 index 000000000000..5e49eff6d791 --- /dev/null +++ b/numpy/lib/stride_tricks.pyi @@ -0,0 +1,14 @@ +from typing import Any, List + +__all__: List[str] + +class DummyArray: + __array_interface__: Any + base: Any + def __init__(self, interface, base=...): ... + +def as_strided(x, shape=..., strides=..., subok=..., writeable=...): ... +def sliding_window_view(x, window_shape, axis=..., *, subok=..., writeable=...): ... +def broadcast_to(array, shape, subok=...): ... +def broadcast_shapes(*args): ... +def broadcast_arrays(*args, subok=...): ... diff --git a/numpy/lib/twodim_base.pyi b/numpy/lib/twodim_base.pyi new file mode 100644 index 000000000000..44e15308e943 --- /dev/null +++ b/numpy/lib/twodim_base.pyi @@ -0,0 +1,19 @@ +from typing import List + +__all__: List[str] + +def fliplr(m): ... +def flipud(m): ... +def eye(N, M=..., k=..., dtype = ..., order=..., *, like=...): ... +def diag(v, k=...): ... +def diagflat(v, k=...): ... +def tri(N, M=..., k=..., dtype = ..., *, like=...): ... +def tril(m, k=...): ... +def triu(m, k=...): ... +def vander(x, N=..., increasing=...): ... +def histogram2d(x, y, bins=..., range=..., normed=..., weights=..., density=...): ... +def mask_indices(n, mask_func, k=...): ... +def tril_indices(n, k=..., m=...): ... +def tril_indices_from(arr, k=...): ... +def triu_indices(n, k=..., m=...): ... +def triu_indices_from(arr, k=...): ... diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi new file mode 100644 index 000000000000..7da02bb9f196 --- /dev/null +++ b/numpy/lib/type_check.pyi @@ -0,0 +1,19 @@ +from typing import List + +__all__: List[str] + +def mintypecode(typechars, typeset=..., default=...): ... +def asfarray(a, dtype = ...): ... +def real(val): ... +def imag(val): ... +def iscomplex(x): ... +def isreal(x): ... +def iscomplexobj(x): ... +def isrealobj(x): ... +def nan_to_num(x, copy=..., nan=..., posinf=..., neginf=...): ... +def real_if_close(a, tol=...): ... +def typename(char): ... +def common_type(*arrays): ... + +# NOTE: Deprecated +# def asscalar(a): ... diff --git a/numpy/lib/utils.pyi b/numpy/lib/utils.pyi new file mode 100644 index 000000000000..5a1594149d59 --- /dev/null +++ b/numpy/lib/utils.pyi @@ -0,0 +1,19 @@ +from typing import List + +from numpy.core.numerictypes import ( + issubclass_ as issubclass_, + issubdtype as issubdtype, + issubsctype as issubsctype, +) + +__all__: List[str] + +def get_include(): ... +def deprecate(*args, **kwargs): ... +def deprecate_with_doc(msg): ... +def byte_bounds(a): ... +def who(vardict=...): ... +def info(object=..., maxwidth=..., output=..., toplevel=...): ... +def source(object, output=...): ... +def lookfor(what, module=..., import_modules=..., regenerate=..., output=...): ... +def safe_eval(source): ... From f71c496ddc7caa2a47b2750cce455f6129def261 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 25 Apr 2021 14:18:52 +0200 Subject: [PATCH 0985/1270] MAINT: import `emath` from `np.lib` aka `np.lib.scimath` --- numpy/__init__.pyi | 5 ++++- numpy/emath.pyi | 13 ------------- numpy/typing/tests/test_isfile.py | 1 - 3 files changed, 4 insertions(+), 15 deletions(-) delete mode 100644 numpy/emath.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ed489e35bd27..09f214602438 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -184,7 +184,6 @@ else: from numpy import ( char as char, ctypeslib as ctypeslib, - emath as emath, fft as fft, lib as lib, linalg as linalg, @@ -343,6 +342,10 @@ from numpy.core.shape_base import ( vstack as vstack, ) +from numpy.lib import ( + emath as emath, +) + from numpy.lib.index_tricks import ( ravel_multi_index as ravel_multi_index, unravel_index as unravel_index, diff --git a/numpy/emath.pyi b/numpy/emath.pyi deleted file mode 100644 index d0d4af41eb0c..000000000000 --- a/numpy/emath.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from typing import List - -__all__: List[str] - -def sqrt(x): ... -def log(x): ... -def log10(x): ... -def logn(n, x): ... -def log2(x): ... -def power(x, p): ... -def arccos(x): ... -def arcsin(x): ... -def arctanh(x): ... diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index 569f05435c3d..b617b3873b64 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -10,7 +10,6 @@ ROOT / "__init__.pyi", ROOT / "char.pyi", ROOT / "ctypeslib.pyi", - ROOT / "emath.pyi", ROOT / "rec.pyi", ROOT / "core" / "__init__.pyi", ROOT / "distutils" / "__init__.pyi", From cf68ebfaaa3d1f6d84029da033e78c785322aac3 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 25 Apr 2021 14:21:49 +0200 Subject: [PATCH 0986/1270] MAINT: Import `np.lib`-based functions from aforementioned namespace --- numpy/__init__.pyi | 333 ++++++++++++++++++++---------------- numpy/lib/stride_tricks.pyi | 4 +- numpy/lib/twodim_base.pyi | 17 +- 3 files changed, 200 insertions(+), 154 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 09f214602438..5c7a3c7e4dd1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -346,6 +346,69 @@ from numpy.lib import ( emath as emath, ) +from numpy.lib.arraypad import ( + pad as pad, +) + +from numpy.lib.arraysetops import ( + ediff1d as ediff1d, + intersect1d as intersect1d, + setxor1d as setxor1d, + union1d as union1d, + setdiff1d as setdiff1d, + unique as unique, + in1d as in1d, + isin as isin, +) + +from numpy.lib.arrayterator import ( + Arrayterator as Arrayterator, +) + +from numpy.lib.function_base import ( + select as select, + piecewise as piecewise, + trim_zeros as trim_zeros, + copy as copy, + iterable as iterable, + percentile as percentile, + diff as diff, + gradient as gradient, + angle as angle, + unwrap as unwrap, + sort_complex as sort_complex, + disp as disp, + flip as flip, + rot90 as rot90, + extract as extract, + place as place, + asarray_chkfinite as asarray_chkfinite, + average as average, + bincount as bincount, + digitize as digitize, + cov as cov, + corrcoef as corrcoef, + msort as msort, + median as median, + sinc as sinc, + hamming as hamming, + hanning as hanning, + bartlett as bartlett, + blackman as blackman, + kaiser as kaiser, + trapz as trapz, + i0 as i0, + add_newdoc as add_newdoc, + add_docstring as add_docstring, + meshgrid as meshgrid, + delete as delete, + insert as insert, + append as append, + interp as interp, + add_newdoc_ufunc as add_newdoc_ufunc, + quantile as quantile, +) + from numpy.lib.index_tricks import ( ravel_multi_index as ravel_multi_index, unravel_index as unravel_index, @@ -361,12 +424,131 @@ from numpy.lib.index_tricks import ( diag_indices_from as diag_indices_from, ) +from numpy.lib.nanfunctions import ( + nansum as nansum, + nanmax as nanmax, + nanmin as nanmin, + nanargmax as nanargmax, + nanargmin as nanargmin, + nanmean as nanmean, + nanmedian as nanmedian, + nanpercentile as nanpercentile, + nanvar as nanvar, + nanstd as nanstd, + nanprod as nanprod, + nancumsum as nancumsum, + nancumprod as nancumprod, + nanquantile as nanquantile, +) + +from numpy.lib.npyio import ( + savetxt as savetxt, + loadtxt as loadtxt, + genfromtxt as genfromtxt, + recfromtxt as recfromtxt, + recfromcsv as recfromcsv, + load as load, + loads as loads, + save as save, + savez as savez, + savez_compressed as savez_compressed, + packbits as packbits, + unpackbits as unpackbits, + fromregex as fromregex, +) + +from numpy.lib.polynomial import ( + poly as poly, + roots as roots, + polyint as polyint, + polyder as polyder, + polyadd as polyadd, + polysub as polysub, + polymul as polymul, + polydiv as polydiv, + polyval as polyval, + polyfit as polyfit, +) + +from numpy.lib.shape_base import ( + column_stack as column_stack, + row_stack as row_stack, + dstack as dstack, + array_split as array_split, + split as split, + hsplit as hsplit, + vsplit as vsplit, + dsplit as dsplit, + apply_over_axes as apply_over_axes, + expand_dims as expand_dims, + apply_along_axis as apply_along_axis, + kron as kron, + tile as tile, + get_array_wrap as get_array_wrap, + take_along_axis as take_along_axis, + put_along_axis as put_along_axis, +) + +from numpy.lib.stride_tricks import ( + broadcast_to as broadcast_to, + broadcast_arrays as broadcast_arrays, + broadcast_shapes as broadcast_shapes, +) + +from numpy.lib.twodim_base import ( + diag as diag, + diagflat as diagflat, + eye as eye, + fliplr as fliplr, + flipud as flipud, + tri as tri, + triu as triu, + tril as tril, + vander as vander, + histogram2d as histogram2d, + mask_indices as mask_indices, + tril_indices as tril_indices, + tril_indices_from as tril_indices_from, + triu_indices as triu_indices, + triu_indices_from as triu_indices_from, +) + +from numpy.lib.type_check import ( + mintypecode as mintypecode, + asfarray as asfarray, + real as real, + imag as imag, + iscomplex as iscomplex, + isreal as isreal, + iscomplexobj as iscomplexobj, + isrealobj as isrealobj, + nan_to_num as nan_to_num, + real_if_close as real_if_close, + typename as typename, + common_type as common_type, +) + from numpy.lib.ufunclike import ( fix as fix, isposinf as isposinf, isneginf as isneginf, ) +from numpy.lib.utils import ( + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + deprecate as deprecate, + deprecate_with_doc as deprecate_with_doc, + get_include as get_include, + info as info, + source as source, + who as who, + lookfor as lookfor, + byte_bounds as byte_bounds, + safe_eval as safe_eval, +) + __all__: List[str] __path__: List[str] __version__: str @@ -652,154 +834,19 @@ class vectorize: def __call__(self, *args: Any, **kwargs: Any) -> Any: ... # Placeholders for Python-based functions -def angle(z, deg=...): ... -def append(arr, values, axis=...): ... -def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... -def apply_over_axes(func, a, axes): ... -def array_split(ary, indices_or_sections, axis=...): ... -def asarray_chkfinite(a, dtype=..., order=...): ... -def asfarray(a, dtype=...): ... def asmatrix(data, dtype=...): ... def asscalar(a): ... -def average(a, axis=..., weights=..., returned=...): ... -def bartlett(M): ... -def blackman(M): ... -def bmat(obj, ldict=..., gdict=...): ... -def broadcast_arrays(*args, subok=...): ... -def broadcast_to(array, shape, subok=...): ... -def byte_bounds(a): ... -def column_stack(tup): ... -def common_type(*arrays): ... -def copy(a, order=..., subok=...): ... -def corrcoef(x, y=..., rowvar=..., bias=..., ddof=..., *, dtype=...): ... -def cov(m, y=..., rowvar=..., bias=..., ddof=..., fweights=..., aweights=..., *, dtype=...): ... def cumproduct(*args, **kwargs): ... -def delete(arr, obj, axis=...): ... -def deprecate(*args, **kwargs): ... -def deprecate_with_doc(msg): ... -def diag(v, k=...): ... -def diagflat(v, k=...): ... -def diff(a, n=..., axis=..., prepend=..., append=...): ... -def digitize(x, bins, right=...): ... -def disp(mesg, device=..., linefeed=...): ... -def dsplit(ary, indices_or_sections): ... -def dstack(tup): ... -def ediff1d(ary, to_end=..., to_begin=...): ... -def expand_dims(a, axis): ... -def extract(condition, arr): ... -def flip(m, axis=...): ... -def fliplr(m): ... -def flipud(m): ... -def fromregex(file, regexp, dtype, encoding=...): ... -def genfromtxt(fname, dtype=..., comments=..., delimiter=..., skip_header=..., skip_footer=..., converters=..., missing_values=..., filling_values=..., usecols=..., names=..., excludelist=..., deletechars=..., replace_space=..., autostrip=..., case_sensitive=..., defaultfmt=..., unpack=..., usemask=..., loose=..., invalid_raise=..., max_rows=..., encoding=..., *, like=...): ... -def get_include(): ... -def gradient(f, *varargs, axis=..., edge_order=...): ... -def hamming(M): ... -def hanning(M): ... def histogram(a, bins=..., range=..., normed=..., weights=..., density=...): ... -def histogram2d(x, y, bins=..., range=..., normed=..., weights=..., density=...): ... def histogram_bin_edges(a, bins=..., range=..., weights=...): ... def histogramdd(sample, bins=..., range=..., normed=..., weights=..., density=...): ... -def hsplit(ary, indices_or_sections): ... -def i0(x): ... -def imag(val): ... -def in1d(ar1, ar2, assume_unique=..., invert=...): ... -def info(object=..., maxwidth=..., output=..., toplevel=...): ... -def insert(arr, obj, values, axis=...): ... -def interp(x, xp, fp, left=..., right=..., period=...): ... -def intersect1d(ar1, ar2, assume_unique=..., return_indices=...): ... -def iscomplex(x): ... -def iscomplexobj(x): ... -def isin(element, test_elements, assume_unique=..., invert=...): ... -def isreal(x): ... -def isrealobj(x): ... -def iterable(y): ... -def kaiser(M, beta): ... -def kron(a, b): ... -def load(file, mmap_mode=..., allow_pickle=..., fix_imports=..., encoding=...): ... -def loads(*args, **kwargs): ... -def loadtxt(fname, dtype=..., comments=..., delimiter=..., converters=..., skiprows=..., usecols=..., unpack=..., ndmin=..., encoding=..., max_rows=..., *, like=...): ... -def lookfor(what, module=..., import_modules=..., regenerate=..., output=...): ... -def mafromtxt(fname, **kwargs): ... -def mask_indices(n, mask_func, k=...): ... def mat(data, dtype=...): ... def max(a, axis=..., out=..., keepdims=..., initial=..., where=...): ... -def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def meshgrid(*xi, copy=..., sparse=..., indexing=...): ... def min(a, axis=..., out=..., keepdims=..., initial=..., where=...): ... -def mintypecode(typechars, typeset=..., default=...): ... -def msort(a): ... -def nan_to_num(x, copy=..., nan=..., posinf=..., neginf=...): ... -def nanargmax(a, axis=...): ... -def nanargmin(a, axis=...): ... -def nancumprod(a, axis=..., dtype=..., out=...): ... -def nancumsum(a, axis=..., dtype=..., out=...): ... -def nanmax(a, axis=..., out=..., keepdims=...): ... -def nanmean(a, axis=..., dtype=..., out=..., keepdims=...): ... -def nanmedian(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def nanmin(a, axis=..., out=..., keepdims=...): ... -def nanpercentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... -def nanprod(a, axis=..., dtype=..., out=..., keepdims=...): ... -def nanquantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... -def nanstd(a, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... -def nansum(a, axis=..., dtype=..., out=..., keepdims=...): ... -def nanvar(a, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... -def ndfromtxt(fname, **kwargs): ... -def pad(array, pad_width, mode=..., **kwargs): ... -def percentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... -def piecewise(x, condlist, funclist, *args, **kw): ... -def place(arr, mask, vals): ... -def poly(seq_of_zeros): ... -def polyadd(a1, a2): ... -def polyder(p, m=...): ... -def polydiv(u, v): ... -def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... -def polyint(p, m=..., k=...): ... -def polymul(a1, a2): ... -def polysub(a1, a2): ... -def polyval(p, x): ... def product(*args, **kwargs): ... -def put_along_axis(arr, indices, values, axis): ... -def quantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... -def real(val): ... -def real_if_close(a, tol=...): ... -def recfromcsv(fname, **kwargs): ... -def recfromtxt(fname, **kwargs): ... -def roots(p): ... -def rot90(m, k=..., axes=...): ... def round(a, decimals=..., out=...): ... def round_(a, decimals=..., out=...): ... -def row_stack(tup): ... -def save(file, arr, allow_pickle=..., fix_imports=...): ... -def savetxt(fname, X, fmt=..., delimiter=..., newline=..., header=..., footer=..., comments=..., encoding=...): ... -def savez(file, *args, **kwds): ... -def savez_compressed(file, *args, **kwds): ... -def select(condlist, choicelist, default=...): ... -def setdiff1d(ar1, ar2, assume_unique=...): ... -def setxor1d(ar1, ar2, assume_unique=...): ... def show_config(): ... -def sinc(x): ... -def sort_complex(a): ... -def source(object, output=...): ... -def split(ary, indices_or_sections, axis=...): ... -def take_along_axis(arr, indices, axis): ... -def tile(A, reps): ... -def trapz(y, x=..., dx=..., axis=...): ... -def tri(N, M=..., k=..., dtype=..., *, like=...): ... -def tril(m, k=...): ... -def tril_indices(n, k=..., m=...): ... -def tril_indices_from(arr, k=...): ... -def trim_zeros(filt, trim=...): ... -def triu(m, k=...): ... -def triu_indices(n, k=..., m=...): ... -def triu_indices_from(arr, k=...): ... -def typename(char): ... -def union1d(ar1, ar2): ... -def unique(ar, return_index=..., return_inverse=..., return_counts=..., axis=...): ... -def unwrap(p, discont=..., axis=...): ... -def vander(x, N=..., increasing=...): ... -def vsplit(ary, indices_or_sections): ... -def who(vardict=...): ... # Placeholders for C-based functions # TODO: Sort out which parameters are positional-only @@ -807,7 +854,6 @@ def who(vardict=...): ... def arange(stop, dtype=..., *, like=...): ... @overload def arange(start, stop, step=..., dtype=..., *, like=...): ... -def bincount(x, weights=..., minlength=...): ... def busday_count( begindates, enddates, @@ -848,13 +894,11 @@ def lexsort(keys, axis=...): ... def may_share_memory(a, b, max_work=...): ... def min_scalar_type(a): ... def nested_iters(*args, **kwargs): ... # TODO: Sort out parameters -def packbits(a, axis=..., bitorder=...): ... def promote_types(type1, type2): ... def putmask(a, mask, values): ... def result_type(*arrays_and_dtypes): ... def seterrobj(errobj): ... def shares_memory(a, b, max_work=...): ... -def unpackbits(a, axis=..., count=..., bitorder=...): ... def vdot(a, b): ... @overload def where(__condition): ... @@ -865,17 +909,6 @@ _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"] -# TODO: Move to `np.lib.twodim_base` -def eye( - N: int, - M: Optional[int] = ..., - k: int = ..., - dtype: DTypeLike = ..., - order: _OrderCF = ..., - *, - like: Optional[ArrayLike] = ... -) -> ndarray[Any, Any]: ... - class dtype(Generic[_DTypeScalar_co]): names: Optional[Tuple[str, ...]] # Overload for subclass of generic @@ -3165,8 +3198,6 @@ def empty( like: ArrayLike = ..., ) -> ndarray: ... -def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... - # # Constants # diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index 5e49eff6d791..d2e744b5a7b1 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -1,5 +1,7 @@ from typing import Any, List +from numpy.typing import _ShapeLike, _Shape + __all__: List[str] class DummyArray: @@ -10,5 +12,5 @@ class DummyArray: def as_strided(x, shape=..., strides=..., subok=..., writeable=...): ... def sliding_window_view(x, window_shape, axis=..., *, subok=..., writeable=...): ... def broadcast_to(array, shape, subok=...): ... -def broadcast_shapes(*args): ... +def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... def broadcast_arrays(*args, subok=...): ... diff --git a/numpy/lib/twodim_base.pyi b/numpy/lib/twodim_base.pyi index 44e15308e943..79b9511b88ab 100644 --- a/numpy/lib/twodim_base.pyi +++ b/numpy/lib/twodim_base.pyi @@ -1,10 +1,23 @@ -from typing import List +from typing import List, Optional, Any + +from numpy import ndarray, _OrderCF +from numpy.typing import ArrayLike, DTypeLike __all__: List[str] def fliplr(m): ... def flipud(m): ... -def eye(N, M=..., k=..., dtype = ..., order=..., *, like=...): ... + +def eye( + N: int, + M: Optional[int] = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: Optional[ArrayLike] = ... +) -> ndarray[Any, Any]: ... + def diag(v, k=...): ... def diagflat(v, k=...): ... def tri(N, M=..., k=..., dtype = ..., *, like=...): ... From ee78c44057bca7c330121958d26335695d07f626 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 25 Apr 2021 18:29:09 +0200 Subject: [PATCH 0987/1270] DOC: Add a note to the `np.core` stubs as to why it was left empty i.e. because it's a private module --- numpy/core/__init__.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/core/__init__.pyi b/numpy/core/__init__.pyi index e69de29bb2d1..4c7a42bf3db4 100644 --- a/numpy/core/__init__.pyi +++ b/numpy/core/__init__.pyi @@ -0,0 +1,2 @@ +# NOTE: The `np.core` namespace is deliberately kept empty due to it +# being private (despite the lack of leading underscore) From dd2c077119a3624eb0f91ea0ee9150a04d320f04 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 26 Apr 2021 18:57:13 +0100 Subject: [PATCH 0988/1270] DOC: Break lines --- doc/source/dev/development_gitpod.rst | 163 +++++++++++++----- doc/source/dev/gitpod-imgs/NumPy-github.png | Bin 16266 -> 0 bytes doc/source/dev/gitpod-imgs/github-gitpod.png | Bin 22137 -> 0 bytes .../dev/gitpod-imgs/gitpod-workspace.png | Bin 86700 -> 0 bytes 4 files changed, 118 insertions(+), 45 deletions(-) delete mode 100644 doc/source/dev/gitpod-imgs/NumPy-github.png delete mode 100644 doc/source/dev/gitpod-imgs/github-gitpod.png delete mode 100644 doc/source/dev/gitpod-imgs/gitpod-workspace.png diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index d53bc802c977..282103cc57c3 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -14,18 +14,25 @@ This section of the documentation will guide you through: Gitpod ------- -`Gitpod`_ is an open-source platform for automated and ready-to-code development environments. It enables developers to describe their dev environment as code and start instant and fresh development environments for each new task directly from your browser. This reduces the need to install local development environments and deal with incompatible dependencies. +`Gitpod`_ is an open-source platform for automated and ready-to-code +development environments. It enables developers to describe their dev +environment as code and start instant and fresh development environments for +each new task directly from your browser. This reduces the need to install local +development environments and deal with incompatible dependencies. Gitpod GitHub integration -------------------------- -To be able to use Gitpod, you will need to have the Gitpod app installed on your GitHub account, so if +To be able to use Gitpod, you will need to have the Gitpod app installed on your +GitHub account, so if you do not have an account yet, you will need to create one first. -Head over to the `Gitpod`_ website and click on the **Continue with GitHub** button. You will be redirected to the GitHub authentication page. +Head over to the `Gitpod`_ website and click on the **Continue with GitHub** +button. You will be redirected to the GitHub authentication page. You will then be asked to install the `Gitpod GitHub app `_. -Make sure to select **All repositories** access option to avoid issues with permissions later on. Click on the green **Install** button +Make sure to select **All repositories** access option to avoid issues with +permissions later on. Click on the green **Install** button .. image:: ./gitpod-imgs/installing-gitpod-io.png :alt: Gitpod repository access and installation screenshot @@ -35,52 +42,82 @@ This will install the necessary hooks for the integration. Forking the NumPy repository ----------------------------- -The best way to work on NumPy as a contributor is by making a fork of the repository first. +The best way to work on NumPy as a contributor is by making a fork of the +repository first. #. Browse to the `NumPy repository on GitHub`_ and `create your own fork`_. -#. Browse to your fork. Your fork will have a URL like https://github.com/melissawm/NumPy, except with your GitHub username in place of "melissawm". +#. Browse to your fork. Your fork will have a URL like +#. https://github.com/melissawm/NumPy, except with your GitHub username in +#. place of "melissawm". Starting Gitpod ---------------- -Once you have authenticated to Gitpod through GitHub, you can install the `Gitpod browser extension `_ which will add a **Gitpod** button next to the **Code** button in the repository: +Once you have authenticated to Gitpod through GitHub, you can install the +`Gitpod browser extension `_ +which will add a **Gitpod** button next to the **Code** button in the +repository: .. image:: ./gitpod-imgs/NumPy-github.png :alt: NumPy repository with Gitpod button screenshot -#. If you install the extension - you can click the **Gitpod** button to start a new workspace. -#. Alternatively, if you do not want to install the browser extension, you can visit https://gitpod.io/#https://github.com/USERNAME/NumPy replacing ``USERNAME`` with your GitHub username. +#. If you install the extension - you can click the **Gitpod** button to start + a new workspace. -#. In both cases, this will open a new tab on your web browser and start building your development environment. Please note this can take a few minutes. +#. Alternatively, if you do not want to install the browser extension, you can + visit https://gitpod.io/#https://github.com/USERNAME/NumPy replacing + ``USERNAME`` with your GitHub username. -#. Once the build is complete, you will be directed to your workspace, including the VSCode editor and all the dependencies you need to work on NumPy. The first time you start your workspace, you will notice that there might be some actions running. This will ensure that you have a development version of NumPy installed and that the docs are being pre-built for you. +#. In both cases, this will open a new tab on your web browser and start + building your development environment. Please note this can take a few + minutes. -#. Once the build is complete, you can :ref:`test the build` by entering:: +#. Once the build is complete, you will be directed to your workspace, + including the VSCode editor and all the dependencies you need to work on + NumPy. The first time you start your workspace, you will notice that there + might be some actions running. This will ensure that you have a development + version of NumPy installed and that the docs are being pre-built for you. - python runtests.py -v +#. Once the build is complete, you can :ref:`test the build` by + entering:: -``runtests.py`` is another script in the NumPy root directory. It runs a suite of tests that make sure NumPy is working as it should, and ``-v`` activates the ``--verbose`` option to show all the test output. + $ python runtests.py -v + +``runtests.py`` is another script in the NumPy root directory. It runs a suite +of tests that make sure NumPy is working as it should, and ``-v`` activates the +``--verbose`` option to show all the test output. Quick workspace tour --------------------- -Gitpod uses VSCode as the editor. If you have not used this editor before, you can check the Getting started `VSCode docs`_ to familiarize yourself with it. +Gitpod uses VSCode as the editor. If you have not used this editor before, you +can check the Getting started `VSCode docs`_ to familiarize yourself with it. Your workspace will look similar to the image below: .. image:: ./gitpod-imgs/gitpod-workspace.png :alt: Gitpod workspace screenshot -.. note:: By default, VSCode initializes with a light theme. You can change to a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. +.. note:: By default, VSCode initializes with a light theme. You can change to + a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or + :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. We have marked some important sections in the editor: -#. Your current Python interpreter - by default, this is ``numpy-dev`` and should be displayed in the status bar and on your terminal. You do not need to activate the conda environment as this will always be activated for you. -#. Your current branch is always displayed in the status bar. You can also use this button to change or create branches. -#. GitHub Pull Requests extension - you can use this to work with Pull Requests from your workspace. -#. Marketplace extensions - we have added some essential extensions to the NumPy Gitpod. Still, you can also install other extensions or syntax highlighting themes for your user, and these will be preserved for you. -#. Your workspace directory - by default, it is ``/workspace/numpy``. **Do not change this** as this is the only directory preserved in Gitpod. - -We have also pre-installed a few tools and VSCode extensions to help with the development experience: +#. Your current Python interpreter - by default, this is ``numpy-dev`` and + should be displayed in the status bar and on your terminal. You do not need + to activate the conda environment as this will always be activated for you. +#. Your current branch is always displayed in the status bar. You can also use + this button to change or create branches. +#. GitHub Pull Requests extension - you can use this to work with Pull Requests + from your workspace. +#. Marketplace extensions - we have added some essential extensions to the NumPy +#. Gitpod. Still, you can also install other extensions or syntax highlighting + themes for your user, and these will be preserved for you. +#. Your workspace directory - by default, it is ``/workspace/numpy``. **Do not + change this** as this is the only directory preserved in Gitpod. + +We have also pre-installed a few tools and VSCode extensions to help with the +development experience: * `GitHub CLI `_ * `VSCode rst extension `_ @@ -90,76 +127,112 @@ We have also pre-installed a few tools and VSCode extensions to help with the de * `VSCode Git Graph extension `_ Development workflow with Gitpod ------------------------ -The :ref:`development-workflow` section of this documentation contains information regarding the NumPy development workflow. Make sure to check this before working on your contributions. +--------------------------------- +The :ref:`development-workflow` section of this documentation contains +information regarding the NumPy development workflow. Make sure to check this +before working on your contributions. When using Gitpod, git is pre configured for you: -#. You do not need to configure your git username, and email as this should be done for you as you authenticated through GitHub. You can check the git configuration with the command ``git config --list`` in your terminal. -#. As you started your workspace from your own NumPy fork, you will by default have both "upstream "and "origin "added as remotes. You can verify this by typing ``git remote`` on your terminal or by clicking on the **branch name** on the status bar (see image below). +#. You do not need to configure your git username, and email as this should be +#. done for you as you authenticated through GitHub. You can check the git +#. configuration with the command ``git config --list`` in your terminal. +#. As you started your workspace from your own NumPy fork, you will by default +#. have both "upstream "and "origin "added as remotes. You can verify this by +#. typing ``git remote`` on your terminal or by clicking on the **branch name** +#. on the status bar (see image below). .. image:: ./gitpod-imgs/NumPy-gitpod-branches.png :alt: Gitpod workspace branches plugin screenshot Rendering the NumPy documentation ---------------------------------- -You can find the detailed documentation on how rendering the documentation with Sphinx works in the :ref:`howto-build-docs` section. +You can find the detailed documentation on how rendering the documentation with +Sphinx works in the :ref:`howto-build-docs` section. -The documentation is pre-built during your workspace initialization. So once this task is completed, you have two main options to render the documentation in Gitpod. +The documentation is pre-built during your workspace initialization. So once +this task is completed, you have two main options to render the documentation +in Gitpod. Option 1: Using Liveserve *************************** -#. View the documentation in ``NumPy/doc/build/html``. You can start with ``index.html`` and browse, or you can jump straight to the file you're interested in. -#. To see the rendered version of a page, you can right-click on the ``.html`` file and click on **Open with Live Serve**. Alternatively, you can open the file in the editor and click on the **Go live** button on the status bar. +#. View the documentation in ``NumPy/doc/build/html``. You can start with + ``index.html`` and browse, or you can jump straight to the file you're + interested in. +#. To see the rendered version of a page, you can right-click on the ``.html`` + file and click on **Open with Live Serve**. Alternatively, you can open the + file in the editor and click on the **Go live** button on the status bar. .. image:: ./gitpod-imgs/vscode-statusbar.png :alt: Gitpod workspace VSCode start live serve screenshot -#. A simple browser will open to the right-hand side of the editor. We recommend closing it and click on the **Open in browser** button in the pop-up. +#. A simple browser will open to the right-hand side of the editor. We recommend + closing it and click on the **Open in browser** button in the pop-up. #. To stop the server click on the **Port: 5500** button on the status bar. Option 2: Using the rst extension *********************************** -A quick and easy way to see live changes in a ``.rst`` file as you work on it uses the rst extension with docutils. +A quick and easy way to see live changes in a ``.rst`` file as you work on it +uses the rst extension with docutils. -.. note:: This will generate a simple live preview of the document without the ``html`` theme, and some backlinks might not be added correctly. But it is an easy and lightweight way to get instant feedback on your work. +.. note:: This will generate a simple live preview of the document without the + ``html`` theme, and some backlinks might not be added correctly. But it is an + easy and lightweight way to get instant feedback on your work. -#. Open any of the source documentation files located in ``doc/source`` in the editor. -#. Open VSCode Command Palette with :kbd:`Cmd-Shift-P` in Mac or :kbd:`Ctrl-Shift-P` in Linux and Windows. Start typing "restructured" and choose either "Open preview" or "Open preview to the Side". +#. Open any of the source documentation files located in ``doc/source`` in the + editor. +#. Open VSCode Command Palette with :kbd:`Cmd-Shift-P` in Mac or + :kbd:`Ctrl-Shift-P` in Linux and Windows. Start typing "restructured" + and choose either "Open preview" or "Open preview to the Side". .. image:: ./gitpod-imgs/vscode-rst.png :alt: Gitpod workspace VSCode open rst screenshot -#. As you work on the document, you will see a live rendering of it on the editor. +#. As you work on the document, you will see a live rendering of it on the + editor. .. image:: ./gitpod-imgs/rst-rendering.png :alt: Gitpod workspace VSCode rst rendering screenshot -If you want to see the final output with the ``html`` theme you will need to rebuild the docs with ``make html`` and use Live Serve as described in option 1. +If you want to see the final output with the ``html`` theme you will need to +rebuild the docs with ``make html`` and use Live Serve as described in option 1. FAQ's ----- #. How long is my Gitpod workspace kept for? - Your stopped workspace will be kept for 14 days and deleted afterwards if you do not use them. + Your stopped workspace will be kept for 14 days and deleted afterwards if + you do not use them. #. Can I come back to a previous workspace? - Yes, let's say you stepped away for a while and you want to carry on working on your NumPy contributions. You need to visit https://gitpod.io/workspaces and click on the workspace you want to spin up again. All your changes will be there as you last left them. + Yes, let's say you stepped away for a while and you want to carry on working + on your NumPy contributions. You need to visit https://gitpod.io/workspaces + and click on the workspace you want to spin up again. All your changes will + be there as you last left them. #. Can I install additional VSCode extensions? - Absolutely! Any extensions you installed will be installed in your own workspace and preserved. + Absolutely! Any extensions you installed will be installed in your own + workspace and preserved. + +#. I registered on Gitpod but I still cannot see a ``Gitpod`` button in my + repositories -#. I registered on Gitpod but I still cannot see a ``Gitpod`` button in my repositories - Head to https://gitpod.io/integrations and make sure you are logged in. Hover over GitHub and click on the three buttons that appear on the right. Click on edit permissions and make sure you have ``user:email``, ``read:user``, and ``public_repo`` checked. - Click on **Update Permissions** and confirm the changes in the GitHub application page. + Head to https://gitpod.io/integrations and make sure you are logged in. + Hover over GitHub and click on the three buttons that appear on the right. + Click on edit permissions and make sure you have ``user:email``, + ``read:user``, and ``public_repo`` checked. + Click on **Update Permissions** and confirm the changes in the + GitHub application page. .. image:: ./gitpod-imgs/gitpod-edit-permissions-gh.png :alt: Gitpod integrations - edit GH permissions screenshot #. How long does my workspace stay active if I'm not using it? - If you keep your workspace open in a browser tab but don't interact with it, it will shut down after 30 minutes. If you close the browser tab, it will shut down after 3 minutes. + If you keep your workspace open in a browser tab but don't interact with it, + it will shut down after 30 minutes. If you close the browser tab, it will + shut down after 3 minutes. .. _Gitpod: https://www.gitpod.io/ .. _NumPy repository on GitHub: https://github.com/NumPy/NumPy diff --git a/doc/source/dev/gitpod-imgs/NumPy-github.png b/doc/source/dev/gitpod-imgs/NumPy-github.png deleted file mode 100644 index 63326d12e0294f69ea34d6337c418af38182d207..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16266 zcmbulWmp_duqccLcXvW?Slm4X3oOA2i@RI!;O=gVLxKih+}+)SyA#}$7oV+3TE$;5oO}u561J+1=GAS zIi=9ccQE0|l&NG9i>I-VM*GXz^182o@Ajvfg%+(mHW?}S4Q0$%E2u+;E|Wp6S>=k~ zVNX6x>~+e)`OhUzILc8y@^*!%rj~Im!BqE-j3}S+c}=fsT97d?y=yO4DNlVdGjn}e zFFHxJ0E7DUnigFL_FtE$*>SO2sq_R`PcCW$_i@Y)Nyxn?D^w9F12I1##>J*+tFQgI z#SBkdZ0gBNGz~M!7Rx<3o;MmCs;^yh7+&k@CCh244+r1{_>{`?BR%gm|DDOWbf6v!F#eL^Z6=m z2FeIVAnITX+X3nT=ImBG4P&xJCtvDK0)t+n%|iwY`W1+48CLTqNi z!E?4;`Q{dqTP=V7wa_NXTm~a^u{oqLa+^U#V=mRBA*JCJ^8P`%ajvGSSb%=jLuWNJ zqWTV8)72Cwc;Uk(a!iLa6>L3t>&6j&NC@S(HZTPsOxFG4D&vH79L?ky&P4KEQto1S z(1IyC`RrHD?nY<&Wjim=wON9)^eI4p5bl}kmSwcus^Q$*TkyC~`p#O}y9_PGv0>jw zFHXq_t07CvN=!sv{a*`W6}+kcbFLXfXCk)w_gH8NLeGfmZx86CSLfrNlPPR&?#x<3 zG~h9UVA}SU6+E~|(M*inFCTNhYbvgOye(TPpLE$JW1+q4xUgQVy^fji_Q?*ch><&w zX)m9iogP#>J#fyft9~>L@@nZ9Z**E>9`{y5*yLreZBA4ZPnY&XcN40NUl;VyE?h~u z2xt6C%`jRl^4Y693Yy(w7XP4Ggw@>7*LnJRZW;AaradJ?wp$EW z6fc#Ku#;cO{uS}b-7-zIR%WDYyKB#lCkl&@rrf_D8_E_3iZ7?8B*Fze z^_w~*iHQX#UGbAzX5p3Oo(*Pt+?bf|l}2kydz|`-cXm^Qzs=!;0`y59W{Uw=njTm9qCo zXPf40xf~(UfKR@+fz~~=sKM}!(%C&G7YJL*tsZxpK|Vdq&vikmX6WVxLD=B)oQ0e`gW;VTWdbT2!q_y@s=qn~Z%)}UWG~ap~#9z`brj+7th1MpY;B?_7AV~{#2BUtuAzc&s{>c>&fb$nbhj< z2Nkbp1cz4k@>ydk!mO6Z?z=~`cJI6Ibp0D>8mAL$uJ(#R@F#4>MwD0_*38&I9-IfL zVYi1m8HlXv4Q3N))3hd2T%8H=!pM9p+iw`J4y5)M42O&R-1#-QOY2|gxP9lVe{rKTpmL=(fav>$lm0v3PPC@_d zf`}AhG$aftuZ{N>3Z5oV-Gro;P=btDpQboYG00PG#o=6 z`(l8!7BV=)HiEa95?}0UT#rn%3r0VFO{g=Guxq|t4JG%2*`u~KOpQxjq0NWfIv);Z z|9o@vO?C1?#v;2SP+H@=FcasM(E;fPga{Kw9VUEBjsqmQTY-{*Oz$6|E=J|F)RiOm zz^_5rKvv%CSz)8fD`YkG9$0lbK)~L4OdkI4cgMIv2+3wz=n2>E;qj~xUC(>?hGK(pYm&2bepjGjrpPpYFz@tXoxKoVk9W&qCeyW4g7pJJj8@# zJ72TVb8&P}7%|<4SJsy#14y-4lmW-btCP6wn{&m)D^JM*mMmj-TFE?OQ?26-jA-lM z*84XUJ;3pnyrC|0C$FUTb+zTei;;ZrFlBq7i_ur%nlO{Bm}cApkW0+aHM@6}{GH(- zrIZNsP6PfnNV7?d!-6#m1DoKVrI@1@3wmIOv=vpc3snhzHV-kJO{;xGC%<92F8TU1 z9OveQ!a__H=hg^Fom2_Rc~?P0jV0uL=tN%$=a7C>Uh^;?KgCg}T~*8RP5z{vtC!EE0Rlwo5T>#C%3MC6gFZxgp|uHXzi zFrx)8yMI~&+N|7a0s)V94U^m_8fkbn8ws+S5fZjT^?h}G9@PItPl<-lfYy_oWYR?@HF3=(VEk_t-BWt1? z>nUBSalV_E*@(!ui`l;W`bT%5RP4Ts0Jz=<^cZSQ-ewF#^}JY~yVS)=Ys+Zdkj)+7I?vVLz3y(SFvUsHVWqy|T= zOtI8B?ep|ALc>a6DG3@)OkkTbriCNrMDR>BWt0* zQUHxM;`3|#&gq%3TvX%V)xM{x%{ZT~GaR&4BCV1z*gkoV!9H)?}g*F|0GihM>BC2wdh8WB1B2Q{6bDrdx(^ni^V z0+O)7tuu*#GeArmYOsqchK4MJIoTjSGd(1<^{n?INGcW-?7ygfLS-;NCtVOojN^Ka zd@!_AGB@qrhD8nj(lwiGwo#FjJAF ze=_->*!@HL599yF`)4dIy~4Sm9_w5BeW9(%S06?TFjjB=Xd`>;d?TxJ-P!@%INwT(-{Omw|row}? z^~T^7n?fF%ygjN9kyhgVQQO{D^zHYInPBxzxT6wc&D*1`XK5hg9}Psh5uc!#bhK?f@b7igaJD@KXXnw@X?abI{AIvh^hp2fR!Lt@-7Qe|H|6hBMg#-t0r zL$Y|H`6YPcSY)g5XMe7Ve|lv3(^|*b#UgrRhL*Ox{5Ux&S>bUs;gFeI@WA~ia_u5T z8i}D{FfKzf@2a2(e`aPn-El@s*JLFU27F}`Ej=+I5lMQ6{h_wz%Ib=!Z=E@p@t=7M z3pX`qn}8svkAMMvll~HC(kD+(2kA4OC=?H&Fh^YA`P&=bC$Dv`>f z5z+CvB-4B8a}6T*x=1B!cCjl@%gPuEAkt_oI90P`^zr|uKP@?wCO`|*S70O1=&cCH z_jL-B%LD0A2YhxAXM=hsEZ zK+0PPwIm-L8hl-i@u57YDhfEicsib(9X~Ws3U!K@ojM52vIotE*U7y|; zEMa`s@nm@$FUE@=hC~N)S&AY-LqpW7+A`Ts<$^7SSX>L@P0Hsz9M%EzEVZXz_E=)tr6BTWK%X6fXZ}rf!Q8Hh?GRL z$Sq2h0OaW4kI@GvtEJXFq>0m)tNLb&?Wn^{bD+Fcb;Mx8$|H z$1%7t=2fbo#bH#QUs=3k_8r;coMhpTxwmwYC-fc7`bkW^KQrO}IV; z%&f<;+lQ_&b|`nz`>?v*d>ET}51F}B8-DZag%0Ha#wa6TxY`+Rvf8h#xmC}QtfwwR z{vV${y{=i=CLWdLw~ANBQ79f4ydLc7A)_L>!hyNAf9u?e(C>V+OORl7#_i|+gY7St z(SS8KGMKS++Lo=smRu7Fa9?&7dAYirv=6h4Eh%r7wE3B5whJLaea%%69=887`d&(e z2{|JC8U;D2&gYDPLl30u3_^5 zueUz0uWcdZm#M+~$GE5rg7baHs#rxlmqX0-yR7N_#qGHzBxXVS_S%Vb`}TMuMNekl zeq9)C?(1>=8wdr+qFe$z(dg@KAS>II#}2Bh0BXAa9vYF0yXgiACKzCn7rCG}%;5TE z@1@-(hSY4TrnpMUcL%?d0qId(zud|xU2R>jjifn3Xg>*weoL_L7s>6x5`(^z-?dl) zLu(+dlmw=UGDEmWzb8&*lN}OnJ(b|Q zKenI9nfR+GmAg`VtcnsYToGH10Xo$2hsuQjmDni9TTJ7D%>0nun#ZOa(@Ba$*+`L+ z>d%J@s*aszpvLEVf6k#J-0C2+=%)}@2L(JVy+ADgk|A&9QCPq8Ycus+zSWAhgXMrhJ~N%x?LO2*JgpS<<(lPmV3mX>PLSa+Uln$W*abC_M?WSIzi%UM&RhYWvpvZ_9)`J^VBM1Ond|6> zF|)mclO(a(#xEyl@FR>PNv6*HEM*H_$C{xaBMzvV6uUv{d@ms9a$<}0@f~%p16M&# zlZy@sd->DbJ~cz}^1v!YzK*QFu)5-r)!IA}XFS&l_4JuQ@|K_F-*)i7kjxY_Tj3G- zwWx`SPOEyFn_DZsR<>}8dnwH5v7XFKbH5A0W+#mt!lm^PI6cid(WdDa+b8pL@4 z=v%B}JIdP@)){Z%K?6s&UPqkorn8^BD#%0@nevI4U4)&X>M_UQAzZ}8okcSlg&p6q%^{xoka~%QeLmDc}HLuqiGvRR{A1(?=pWayhbHN?&G{hT4QS z1`*I1TvIe3J~fx4_2WtB3~2d2#xk zEhgPdfuFmR+ZZ+sn3EoimwINpcnc}}m=16(_ITDsYiYaRQ!e$Bb-jC)JL1d2OkshY z>_`j?DC|V%miw|&@=$_B2tpG7bG!|i0R1HBmTl9DF`eSetx)9mk@1E*eOgt=f;qu2mW}v{mnEXs#{w{rE)5P*eE#sh71^ z)|qHI3`iut#0RLKo+eoOL0JIaV(L5V(#3#}Wq1i?PXGM7_cM0PVPh6u^cNx)) z&rpZ06i=Zp(8Ck1K4)7humEv9!o3x~4-i})8Nu@#bCtZf1_3n9l%HU1NJeGP*RM-F zZucd6!(<`?(NpXB3mI%VcAk44V_H>>mrAIYH?B?1^U+UDA)*iVi*#rpfdqJv%ZW;2 zFU%z<9Lb$Jt4~-DtfB-9iteM6>z_x1JUxoOB)v4uZc2CSW3_ye3u9n@2N37oqy}b8WS7hU|Cq(+qFQgDQ8p-=#``jU`4JjJLlEM5 z{b%)~68zLei+djK;WlTu?BYA{MmIsrzKIA64B&WSa=a#J(wq_XPf;1o0W3mavW?ak z8H=9~$XY0IM-_J1Qin2&>*~{+TG)0+{pxQJ#dnRaGngT$tLXPSp5Ii`SrSMoCcc%U z62bu2%2a-LJ_*a(B-KnxX)yxGI9b)fcQ?5FBZ`^U4CIPm(|i)16y2(^9@KPxSh@i5 zP$1M1Tds*8)q4C|KQmgJ>oQvB6-r14^XWu4tQ80xa7Mpk9b&?sGa^KNKKroxyC~Ud z*SB>Kmf~Df`ly($hVk{pS>y8IUN%xWPP6xjkkG zjGDab>T{>^uw`7P4h`l6^PQ=sL`aSQCOlY&CmGp=GaD!6DTy$9a)u>bd)VpWxSJXW zyC30XyE`ZwKo7`Ix6n6QQu!uQsg2!ypTPh`VWc5A7j_|s7U3e<(ZCJu&i6l!fLm@-*q1p28T+1t>T2$XO%{COZ5^He$` zuU*NlQ+G{#(pB&=gT*Qaad!$AWDbbhy)G+P>F*Hyu zA|xaq4Uy$AkCY`A$WN0my@!%9M6Y67=Rag&IEE zQH&QX;L!;*Xd!vGm@rMQ-&mps3lL0NOf%1o;?ZB|1KX$)Z3fB@jykCJ>l?aYwh=q$ z2s6-2NKH8pNt`9VM8t+eeNdcVc9UR?n;FJCz29uv6n;8UZh1N~Hj1x%dhig$x%$S& zV5jKYN%AAQ!zmZNi&#!ZZU|4a|);U&`$rpfQ_7=xN4 zTWniZM6=Q7?@_YnjH4DfzO7w0TEGAl{x#aFVi84Z6A$D_-IBznW)j)g1Xk-Ncc@q= zE&(iO%*bdUgPq}vctoA|ULigoPFS2$%_oJJ!zH-aBXZ_yOmkVf=4INzc9JwOgP?MQbLz)So8-5cy2pOj=E*v9Cf6TFQ4B^~`4k(WlAO zLxU45-yJ%UOMY10=Lg<=@A3G#s1~NVs6007Ka;5XMVzGhQ?M?xS&(*& z_q-`GNy6@~xo13J_a@R`Gs=tM#>pLmBUZAiu#hJNW(wOo;j8(@L<2Q$)ryfHx3XVz z%IS)LteFa0o3_E{A~uRQ7F%s9Kn>fAcq8Z$LUHk)$lim7Ql3k5u6uE6*l?wbBBs`sF;{HuS|Gi%_+)~;5^WmV_H}2f<>X=oys$;dYje~`G^ly_v+&xMv3ADo&p9208{H*VvJB!nQ}bP&(g#sR*G_8@XG*5O*LmeyS!wzKduW zYi9AT?k6#EN4)xn7yFEiLh6qrlrOo{2>)7^)Wzjj=T#a%7oLs&Ld6WvTVzXu9_|=O zU%mL+O-_NLRyWufG(SPl#?jk`MJb7TfVybhHdmiAspLT&w`E4`Hgr2aP8JuaOykK+ zP%($sU73BT{uxZ=SSV0LBg1!K?3YU4BnpP!^Xhx#b#ObRX?E)Db>_TN8Z1HYo?y33 z#pS1%^5%jCZL;j-9@@GV5Jt_T07NhEH?n_y>K{QTkXP+2=2OAsxk1bx2N`+{r9|F# z`8-{&9Vob49-Y#XjO@Xg@(SIjuf5vOyw>|X?}dmyH;&B1hm@LR$19mS-|pt|&4I%4 z%kh=6UQf2SNF!C*xVM-?W@`$<0#!}Y1Uyoef3XK~u71ek`ol?L)5m=1A`v0>4?FaYaMt}sUKNq-1;*pndualk2n55Z1j4b?p)!J40vBtm?zx{cI zq8K6KD=y2n5q^#@&E|ag79jr%6X%US0Rcwk4yIb&ig=P&Y|Sd>?+;N+%8R(YhDdXM z>}az;zRh5Qe&g;a>b()4E>4{}x94S-gSHCC4t(NH>;t{xN$IgkFV?AjoIa}s1O@qh=( zsP7r@ad2(XhH`NcC8^;za6{Ly&8`cJGB+eiXm1AjB??PW|32v9h8n&%mY4GQQvSn> zeUTMcdJlV_y`fclaim=c_{MLD-{BA`7_|vMsWvE1O3`V;6vF`Nn-c!V_C$QutFqh@ zc)iZN?w*j01Xh7>6U|@j2S*Ec3#tvsI9PSS+OBciH%p7MHgB;)lNYia%CbijmFLpS zHg%@7M?C8pO-L43-oUtc`!Ufy>kt#dxELcZ@(xt)i-W%tYm=Qt|7RM)XB!=macgAN z1s>*ny#TBTc!rKq?84Cw-Ym;RaCy!&mg~jMo0j(pZ?ENJ?!mRj`2F}2C&O*DGb$T2 zE5GsUGVN_~z=BDjnLTf!UD%Z4f@6Y=zeX?sPa z+ObMz&W0*(K)3>X%5c-LWSUxQi*jX&^ghRT_nR*>rl@BqOjrD$P>_5}{RoxY#IZ&f zAxh_+AT8Qkm%4y|==Aj=v5(;Lvhb<<<%W8BegFu;geUZQ&4d1b?SlJ4v?*NC!Ee3u ziGv$YpOQ4S8Ns)kA2vqq(Qo~DC`{iSRvpaN^xn54Nl<*Df7|eee-9V^ZN9(%+im{` z^*`YM>*0US|8GQp@&6OE{~P}Q{t}~hQm3o%F8T+SS~9SKO}f1$?+MCGolUb>aHWmM z?uSCbqLt1VN7J$+eWA6X(YA$^fTpI0oSgWP(u|s`5j}BZDR1LJC|@hZ>lwZ#MKix8 zgux0Pp+&)snN7b(x<;&(oCv36d)Qza-#xc?L6WjYD!}wT1hG4-(2;?0IwcNMD5ziV z2$sw6I={MY40H~fnmyntG6gofGL+Q!qMLZuqW1B1@|vA8ZMpOxA#el3tn^G3X90@O z3NPdu@k~UVP*5zY!raj;GaBnI2iK$G3S)%!&SMk9)xp|X2g*)yCsWQqJ8Sl@-4M86 zWo0UHwSV4yeB2|z9ZH9W`UW{HI#*~yp1js_=AaUt^ql$*IHeY$@H8GeV{+u`){ZKl zj%u2jN9o4?$ozF}V!r%@ii6e;9?G}4zPWEQNC0KIys_14p1+K>KbOS0sFEO<=A)FU zi&eUGdOg!9bJ@i|pGRONBus^(bFAK>}i8`T6ASt33MZf$S!`np1OW521kOW+e(cf3K-!JP%jcBPRHxx1>i z_If(Iq^zvk8w7KPuK(tbY6>9fUq&DfKt#C6+rhIjnk1P2vVN6HcHrZvvz(L(#ysN+9<^c`W^M+kUIV**2ss{b@^Qo4cDrGF1bFr;#54IFUAKw~l8jzZUXk#;y%TK?(1?U33UsuL= zlhra3FSb)rNTrUJ#}}>*1YzbbPlPdBEsy(B5y9=x(`#+7TYBE?RxFt)0BRF?TaggE z)>}%mY>#s_3@f8iW@c}=JZ2Wr-!Auz{;v$7Yw}@Vy>5ck*B$2~u@|q>nehZ(4b^7P z&e*EH@CZZR`ahFuI$8?GSlf1#^*T*eqKv^ui%a#c&US09w@*REuh5Xv;_{7VZHv=y zS>e%9;{!v^htmDDvP038imo9;g9frLf~nVMGu^{s;qL3p3xB^PBLgD202Z&+fqfd?YOJ0aqXmn9NG` zQAGJ9_lMp@TF$Q``M9(G+yTZI1 zr^OI217PS%&34grlCo}_!_L8;E>GDIv9Y>qy;kqL2yrJKW~ynoh0dQFPn+RkgEGeH z)@eU=yyUb$JeS>5jbznB?DQvY4@YXyrTQ(xPX*O1PkQ_4=2pp7Kl;39R&OvhR|SWc*3Qf84?(2@l^wt1y8f3#C%>KH zg=)8oE?~buKEQ3J_}o|6^~HZXZc<4dLS3QJ9`;eNwYBxz`;ctU7d$g=1ta&5D-H%wCZ^umnB}6QuRmMAxEsA zZS-enwz#bHGf7x4$t3OmuCv>W#g@b?Ft=@emV+KHU50U{E%|Mo!UDnl=r0%FPe;qD zXCn>KKaffqIj{izwrtd~FeY<&6i(4^yy)RM)NOYYZluKMlCvIYb^Ak`7NWAYcXYa` zcV{(a#kj!B{{8uP6aoUk^=OfqDc`?#2&oZGJe#|cW?VG&P~ z4G0jY?;I=Qfj#3ssCF9oBoY)uE_k9DOdiXzzqGWJnU@@&N>FDc+SRr52gno+)KgTh zbQ!)$>A}5vGBzK}sL&*6+InWiAJc3=wn`7TS;EOoiN|YveBUdQT04VBi~@`S=+?$v z0M+e^%5B)}k~6@q?4ASyj-iMJ4*?QVz2p`);tm$y57+7^0iK}RFX_AMS7@J)o zJ#_dnh7=3&TA^CeVxP(wA;v_?6Tdiwl16Q6_WztF^A48_Kb|0iclzZg(Vqy+E6>Ot zC)c&|d5q6Z>{LJ48ML)6dX8;!*N8Izo?kx9*RZZ)IMeZZJ(dp}Tex<+U2-|ueRbGW z&ClSRvz?c#h=8x-cYVoO!uw{hlSpfPiO+JLKQv`hBA3LvH^SN4iUVNJzfd5sojgXs zAZxvyES$$XP`LXL=_Hk%l<>L?tT^{$c)yC@dUcI4!G~+wH3Zs)1A6M2aQ!BfE}Orj zUJPOBLIW%Wbi@wf-9e$|Hn5EIL8t`+Z~*yNY3k*=3QUt@QsTGKrYphB0(mP}IFPUL z;MC*mLi<%yP85K2MT}8`C$nq$BtQfkUTe1wOvQ^{bFF|*iTWjaUk749%d`D-ww^m@ ztIOKvxvA~S9Hc}nF@q=2@7{Pce|>&YQrPZuhIJqc(EQE67?8!gqV7dM(0TDwI$N3U zatwhYq4wj5l&m@=FOoq|N5u;NibnsE%j~`%iJKX;=%g^{PeTro_nC{RMWa6tyG8~- z1WdO3Th3S=MPaNm{es--dDyB zmOSpn*@n4BMm62ZDC+*_`<evt*)X0W>+eA3Go}FUSfe8=Y*(EJ#Eq=^Wd|x z1-EyIN8wI1q;02NG{Z!Bv68GXN?rtQRkRJZ`bZzkb?arkA&6*Eqgt*<9Kn2k`Etz; zB&!exRN_LY)oOXGL-82lV=w~+wZ2RXJWj2c{z1W2hKBFDR=|a=hswM8ut5e*dV|&g zu*4WE;QVR&d)uf#DZq#L84k~*@3D|^LQv#AlS%KaEF7}Bi<;3k93W0+-9*iI%-A59Y=3c5!6Jt%2^x+RxPliS z(qow;hon80!(%5e>^|P1Ic~0!r?F@IDI-|17tbC6SiM|vMG{h z)u7AuqphfL2017MaDzoBm3psY=89pq_Pmah8T5m%2dOuJsWe7y+b2Sc>Oxf?NsQgo z-26v^SUW#}gK!^_fJ6kLzv5T-I#-8yt&fC0|- z*=p8D^r-8X=3H-jMxF&Y5z6)8@(#F?fO> zM$_t2Rk!(&OFGs^4ljo0(+LX>X0A2;a+Aq8xQsoIcj_*Ci;9DkJ9$I&-Mcuu?{*1r zR*mA*aj`HmUIdz3%MtIYNh|S7d|T^2>W)PvR66F~_?I(hwMy@&_r;{IdeVhw-LN|E zmd8rwaYyuvJ`1tiBaI<-q@Q}OBZI?jI)&eBD3Hd5X|+{N6m35vD?x3W~B|g00GcIo2QoHN(7LMj20<_IxBayRVoNI zA7fYA=+b8V^q0OahPxi1i^{g`YTQWwS+4zj-eD5+TOIi3UlSVx0Ob$EtG_0;&Bn4y zN`*>ViFzWatTNV2lrfvb)@xV2_#hJuDS}uD-APmVT>@K>hUF| z#KYlYw(29e)P?!z%TSrCO0qS!{j?|+Q|#8m^ve%mMpJueYo3j5u~6vZolqQM8PTlc z2n`J`8c-FD@~KaCbg`$n1sOV%Hu9LFx4W)yh|;w&E?yynzt^ON^EKpbSkN4m^z6*; zS8EMVj5Dmg$DyHTdM3!XddushC`z?bZkJnNsw0&32UY#f@cqT5JT+L8zSGlivR?`8 zG=&uv)XAS1HTi3g+>nKGq-W7ryhA|&^SH_Ld{_9`p?r?28jLjR&yihxL#x-gA~k{M&3}rrvUD*>80@{eLWcc! zRuaD4*i-S9bbz5@fZAbP3HnRIjwoxL9CsAFhCGk=@!xC8Ezdt<)!i@{sZG0lv!StT z=GvLX{X)mXA0>lYF_6-gpNRm$1>QBRxmbj7Bz~PaDjeFD&3nCvIkfhDQ zxCC~Me88EN;t}vPtUS{BRou%($Gdo70vuO8fO|{$j4!H^tjs-aRU0cN7EzJJSr~*I z1PeE@<3ssRJfms!j`g1m=Bw)3kcb|s4M^}JrS-#TaB$ea$ZmaH`RhBTl?-QZA+m~n zYGKoKFyN7U55JH8YxpNysoowdqSgCtDUI!KRDo43>gPlNq`d;HK$(^djrry8YX_lr zaN&G5<~@=sK*N{KOD6xg{h;@-i!;((R9V%ZoZCC&kRD>H7*9`@8sZsuN;q> zvnfuy-?fdb)!WOYs{^I1XUhf{Gzn9+h@VwOM}=2SJE6eiCf?Wu{c|R>SKJKL zfDI}?D#55#e-YdqHe zY2O$c_owVXLBG>E%UV}>i*8l=gQdKNd)~3w(Al8Zj;Wr~>0L}YE*RjbP)(;YY}w|~ zJtBApSrd}z3isKmI8D18V_y_bsqkg|JHDJ1JZ?fzo@Qkm~se3f$M zM~N8X;~X`OqRT23yh@Y4-=J7;5o<&@zF)-bXO5q4t$ylidwOK(KPI%~7T(W4A?a~l zY{EY2h$qUC3`hMvlfg*>=d8ET-E!ZlvFy{S_Lqwh7+`hnRBEc-oi~{++k*|WnxK#W zO}Xuw5AmK2Pj!bfQQ_~HaPaGg8;kK?#PMzsIX@cR3iFqYD`Z(SnNPj`Y4Xo&32Q%^ zRO~vZ{PVxK)NMR7^hH7wR-A>Qk*}h7LjoOrNevS=asGHPr$3nj5!B$-KNMiW>$;R| z>O?XDE^_clPP+FXZ|k;>q<$0+ZoE>}+RbctCSbV^X4sCd*?*@#i5a8`sd>}n5rp_^ zu!=^vfBQ$iER7+?1uAwIQ`sV+Ob1Bu(kk7$N~iT5aLMVGN(<6so;v4ZeEeGLIQLmO zSd7s3GyoXf%ZC+@)%orl-@B8vb!}4{k-z&Ro8jt&gu6HkQznTz#Ka>@%y3L(r zC#xe`gZ?$4vYM+ZKrPZc$P` zEBNosfqKvM(A^82Sz>9mlYs7KyH3T@p7 zLn6Vxf-uhr8aX*RRmR%78+3$cLAt79)j$6~aquOv)`rsUog~AwOswk} mrT!cE@%L}le*+!7o>1MyuQq9x?J92+P%@J866K%t{r?vo4KZ*4 diff --git a/doc/source/dev/gitpod-imgs/github-gitpod.png b/doc/source/dev/gitpod-imgs/github-gitpod.png deleted file mode 100644 index 54a78b016f3ec3019e959f9d8e42f4814b2b88c7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22137 zcmb@tbyOWq(=SSb26qT90fKGZ-3bH;5Fp5g;O-l@1lIt;-66Q!#@%7#?zVAvyX1M_ z^PF|oz2E)k-d<~FYN~5$daAl>dgfOh@yvqM2M8U5_ zF3Nw2f7$;K|FZuQ|484`0{d2g?RC-4mLzS%f6FNw2NOqZj*UH~v5Co!r*S?gjGkjF zFp0Fnur5aTw%+tctqez7u_pF1+Pi2pHR+5OPuP8xPgnzY$$4I5H%!$-!g`9%!Efp$ z?h*Vk!>x(o_Xd#eoyZkob*#+tvH`fp@>rsPeJ0(?=D-W6(=NF8=*i)zzpDsE%Gu7XM3^WSQE9RM+@%UJG zvhr7rxfWqg%))#pMb<_}e%#GI1TUgi=xSmuJlgQjs$-T}6J8|#KaUpxsKdf8Zg(f3 zA4YEq(!wxL(+BLXm$Qq6!d&zb5ma*$mXvxp*pu@s5Mo&AU#6c~7 z!GDW5ffe5n4WNrsK~cHj8bfsuiDR_SL=8f6o#wtjHd zyq>0HYN5HjSw3HE@stpb_+lD#e4xM&twh%^T!MTLiBsPBv$b8-4f zZ=I-s(bWJI{vv#7OkBOIapF1wpPRc3C;hO?8oVf7+&9479VKk z6kIZ~U2UaBQSPcNkddfrlf~HMuv%D6z@M?<0VefprZnLjOe?+VIbtfdDEkz?RpieKTVg-^5!4@TN1atXWa^|N>X=v=O$Ca`;7@^UTVJ`j%~+d(ZtJ$B71?;Z z`44{{p0B*#4W0C6A#r5D)v>-0eZD10&$PbbJN-X5YvBM@uJn{s`fUhd0KPFH~ShPbc0#z%N4j;L(-(|zlGVRKfD7b zl&@}-ue6$qIGN5jc9aYW_nOb?7t-|-RF`*8VFvarLeS3>M?Cum9%1IOQLkn3!TnKI z3c0Y)HNV70Pl-~V=uXMyJ17&df48(Zz zfmSgCmPbrjVYt5Hd%FedeC80vRvD#Br98wSuoNo~9k^y_-QPW%pk8@s$diu*%rX-& zWH@BsG5rRYt%wi4rGf4~w3#@CiV@*&9XhttEy6#hRz`Hley3;DggRtwRyAJ8U^}MU&-xLnLjZ2S}TF(i{nf-?rKKskcr`HT5-pB)Oz$8Hqe>DVm~Pw;Kx~L!c(`VOg!td zsONuNGo5uH)b60px{!WPRJS!R-Eq885H;UydZLgQa?{s9bEUD+c!QaBurZx7RTy1m zqY=oc;(g~ajdiXYilPHGh@uX9RijYb^p)x*$|rl35I0eUUH>xfK5 zZ7iMfF*TX_Kg~r3c1gE2g0?L}G1x(PxN#6vsq888&~MuH zwtPwCw`n|KO?SaE|B0*Ov;92m??g&KUJnDiWXBv#o{g&%=o#M6+m<{w6K}4X;pK&& z_r}H&xyAwjJ{0|Nm)0&+mg-AV1-Uh;=*~y;4~1i1Nju<(6ddu{w5FTqbv+U3m>xhK z*UYp>gTugioKC9Oc}@NFSbg@6*rLmErS{IKo}kbAgst#1=hIzS!m)r|u+opFcD`uh zzfqRy(^KDY=p*#Pg7m>vlt^qC&$!t`nMNd$ONs+!}Nee&;G7Lqe#rZCf~Ef z1s7TD>$*0I%opaC_>xE=6A9?EXPM0`1$3vlD^bs4*4yn{DH^Lq1eDo*xZeLknYjiS z9ZjvKxM@4*=u@bXb$%EXVxP`0)tt3iqN=9xFB4@2F+F@xm}zQmo!MXD-}m-opR<^o zIvgS7-ajkQTa&&Pnh^0hW3N9gy%qLEtRhjM+9=g_x%)j8Z5EC~;yXgzMD+R|Tm7bW zb(Ni-ItTf$f>P+tEmDeb*OhB2Z?q}-ncd|5j*UR_D?9yPHlz6n^^iz?yu$l|?i zc{*>-;@%hOXBy3)ACbZ3k-URza(ND+wZPV>lKD>{<3a`!3W9EXnKf~ltg)<^x=vTH zi-N@yMOdW4&~QNO~n1>^nisA9>o`i z&e63swAKy%xApeOKqjh6pRub?3XJOI)exuk4PyduKVKrBN9&@r8R_msT=5}>kwhjC zGps%j2mFmQ$D|`7d78t*YOXmhA`Bml&!lOhfIms-A=|;6CPDCUIX5aZdY3X_oFFFw znwzbE(qasFZQas*{l(*wYzMutzb|UMQ;$sLneUBZHG}&p5(<#HgWIyTaq&WFa_+PG1%n5}P7{HD% zi3=mqyUgtPO<+9maB1l2>pZNwF!ElhH?HDo?4C&}j;j}2e73~!Bo&yjnWu#1&HE>c zq^GUJOdzch|4p!=_kFtAVA6@+ifjY#0WEw z?ecoEgiPe?6rV_IGrXHOojBK|EraII>nD!uSZMgyDa@x*p3r;i5(Ok+Y76ka$p?fR z;VV|G^~$*E%eLB*LG;4LOvK0v$Q8@vP;s*1l2nUu-{KwCKY__BL!97t-Ebwbs?qma zT%!uo^YGb^J&+ATMHE(j+QY>SAH7H5Ov8b|mph<>7iPE@RF({OM=+l&s5FqKqXbpL zB^cy|QR2f9N139pyo0RY)V6~EOeDXs=9SD4Dg&M*Fs{DVAaOXnb*lZbBq%RMBbmrOa)JGxbfW=A1 z2NTluo3!n;zj!PqJ2D$FOyFMS`jH`Y+#J64Q1Nl+9UKIzqXDx2IDm&96cSM8Pn&}J zNv?RbW{z?Z-dd^{Q3opbg!DJ&yvF(c6xZpZ3zD$Z?6sDCS@G(=AscRu%_ooUmr6ZE zs@Kn8m>-X&1dZ_xMMcSFMMXt>ubTzLtwG z@@PA(MHFPeE+xs631Fnd5DM;}Oq2gs;lLmfsdu~4Pxvtm^*0(x3>ZpmT33u8YO_YjTyG5pZ8cc{VW z2kQGj9p?HJnc`Cx7| z$GvB>KdU9jad+j3-uyi`l>*hg!Ii$GCEBcDvrw}an-y}ueQfy$Hvx>)DEEl&^c+u8 zs+c^yIBn;{qN1`MVXtd(ISX)%gt)=^vxE$*G~(}Zc3sq&&s=ZVzOe=%4|th#$^g9# zRU8SpOuv*HQQatv2u5++82*V=4LTblcau6MJW7q-js6EpmBQhf-}fiY*vqsPtQ*# z;$||er1(Ysw^~0Cz96&~Z&`|)UJ$AE{<_aHU3dq$cJ~sZ%Bq+y?6TUW^u2*{E()M` zVVA)Hh~cmNV;n}XMUgSq-D;imrm#pIVtLd%;*1O28gz)F5mRgzP+J|5nytdK{OXER z=JR;*BHFXtMD8^js=rEsOB? z*v#{AkD%X=jil0SPmaXmfLPl45H1`~-(;g5^l(?-_7(T(_JCZ9Ca)$E6IZHATS~fb zP8K9aYp`O(j?L$HOYVB`6e;p@kBAdga;PI~Xwh`U$$303URasdw;nDnfve0gGB8xn zNzTQ|SpyZful#&EJ7-qb^wj|-zU*6s1(Ap`|8+G>^Acpvh-Ew=^EXlHAj8*difzt; zP3F+DTmSBo)aHud{s4LPQzUt30-%8NiPv_7%kl<#NPr&`3FZKA6K{HHF~N4lO?%c) z_Afxy#hDd-nepvEaJ->!q!As*ukHoNF;{YpB?az~;X*BHgM0H$zQ39$JG`Ff=M{$p zZ)&|=;ar|EUR9{c?vDIWQ7!Q|sh;@Bz-LJfOKkyt!9YU|@bdj9dN*bjN*b=x!p4A_ z^h-U-+lnq(taW~u4(z62!2FL}fy3-DG@|p;*Kxdzf+Qj*6*WM11bQn?U2YfUuwNmS zLm8U;!+`ximS9)anQV zN1Jp?ixqAuInd514UxykCK8+o^oIEFC7}a3SJo<$1n(QQd_Tpl!9A>=#kd>^WeSSi zZWR~Lw&;WcxMAhimYAsIV-}3Wlm}wpSKn#|FSlAoo^OHsXIEc_Sst%v+Xw)9hI(80 zB*gEeU5#RIzXstzC#E(0iP#W+XY|CRfB__KG&H zw|9|0@jf+BCT0He%2Z|+a^G`faSb^e^BI>9lS>{*}AfUE4pp^|koJvxgWQp-c^FHUu7}D~i2{u4Qgv2?KruaIEylj`V4C3czQ+{LQ zZ>uA+XkfaG8w_^*WXLF$aerz0jI{U~GGNRDW_RXbL+d)s_Anenty?(-7F&0!9c)>%Fwsti!MXxu6~ z=bk`^ZXoVb|3E&5|_rP;kkKrVAG7sCxrTR#1oASqxyKBl~=gfrD;z z-(Q5#)=j90I=XsAb~D~<^u+CDo&qwNt2g(#E7l8>AvC9BIIPU8w1gNZME7-YTBCLQ zQOd0vA96^oLBBCgwpYY^k>-=Qop?v ztl;%KrQUBgme>^`MtQOF0zL9?fP`T`r%cxFiC52^kGy^YzV%6j^ldkWD$?@=_R78c z2p`dpI!nZO5SlE>xIQ27Cx}#aeXCaJs3GVM-~781AEwS78`Oa4AZU8*dDiC{WmYXI zMt0Gq^~MXZmYcgZm53M2Rk~z2BHu5ZSJ&BHan@n)Idgu__mS}1r@+rL{xKyRcmA-< z&<*sWA_dd)P*giNZ*=i~<|l~kNGwxS-Ew3@JtcG_IkP7d<;JK&$i%mSs0!11Z%1I| zYDEW|FQL1Hq!o8OTDP#xHHdd_lcPZN>l}dzayH{*An8wl*7!*R&FQ7lr_jK~qM+~J zN{Nhe5P?gr4b5WD0`S18gttMrH_b}#aAX}amnQV`Y--{gCenNX#4OtQR{{E%wgX1D zOrTm}VidiswX!!GM1FwjLp51!Ss#m}DWaNtFv&3w=4CfS?^ry|MKwrjjjY7)`j165lpC{n-hrq1w$8W9-Am<* z__hQLQSf>t^@6_rVuQ+6vE>t(e<-&SuNr!6*lUFsP88<lHdQKB6H6Zg#2Ah6$c9LsUc$}(~ zF3saX1YvPxVDqnAr~JA?ix{lmP?HLa>U|4|n@*q#`%$Jk^udy~UnC-GO8sXUchl|i ziH}I;frHhXPmX2O+ZpGbviWT)0r2fh?Cq?6V>A&LVj8F}pR`10<#nG2QU3^4!h_%1 zcvZh4CTDj38+x?JT_UhFjzT{PfvojI&$pxOaepKFjWB2`U0HS!<`pa%3*;DVe1m{8 z#ch+&8Ti%f27R@PLx6WzZLxoL^ z6hMy!$vZ4XOCru+`b9@K4^`*$EVK&GVpg@c7^X!uXDvfPpO9TsXA4h4j@}x6OGkUk zh5aW}-lm8bl0abKG0~_--DiO=noz}t-y&3x!%>g+kqN-Uw7)DIpa2N51kU_s3Bp)i z5abg^Y8p;`OKBP>LYQuieV@np>WJT)R3GY`WBYd4@5%(=fTHHY0Zu7D0JjznOk%%> zV>Z!HiF0uqcAPexV^zK`(Qo7D5y50wOEo|KlqU59fbAW;3rh_?$sKMB0WZX63yoDQ zzcAN~TD@UK73s^g)YfJdmZg4IMp5=m5%K!wXOS9U8-)X6?HoQtlpuiC8K!(F&DpTyrMi9CT7TT z^>gbK`!eU%%a|u?(KoQ^if7$XZI}^&s77Qlp#`-GKLMkRibX>PU)r8WY}p9;>=~07 z&G9$WplT0Uo1H{~7w5o3F;NYsAo&7;AGLCKPnPO@(TYkPPqmay44)6Ne@z}DrV>@Z zrE%cf?RV`$=z|l;&04C8IvO2l&F}5fYXvGk_D#G))Ph{?&$j4_*mE&$jts$o=3(Ny zCWi|r^5GU(I>t8n9Ti4j&0J4eyyY)1CRe+e5!Y29GGJF=Z$u!s)|W~z>;J8VO6H3? z8gYc)*>-=1gR?wVA0Sgc=ZMFuec*5>)Nu*lDo4)f3ej9ry=(tUOk>Shkp;>DD%{miVZ4!>brjf(`W*<2eHHAW)*!#k=wc;>-%i8(c=^r~8W0~5tJ9gI>xTaL@>%HR2zOt`w>z#U`#&O|?(WAIQd z_&q3IpIJOgg?5lMzRGFe;*7fE%43n)&pz3@_M^=Hue5Bb^2r;S8et z_OVh=hJWzPQrwWL(wi)a(+xT_TjqkpL=luO51Y*dpwTmJ!XM@O%bWLcvF2B9eAE#q zh!5L4x#ZRC(|hePa$X5eL)!+(bnlLWVV7@_EEZas7S)+B)Zm*`)BgWZ&sdrEF1AW9kjS@WJU3& z=2fL?-8IG8dHWRGvc;Ml(*<{~-?bt=SFFmPE~{%P73A*-KR>@~x0biqift$+{A%>4 z+iO;TvM&A9;L}~Uip2x;_+I4t5C&G10;vm1!ZS<{7MIlc3`YSm(1d|SxsGu0CBGex z?{mAe{0V){x4{Iy{z44?n|(754+BH-H~Z!vR`%Z%n}1o71g~d*S>LbzpDt<@+6xVP z0|V3j)GS5K_Yr{bs^q1{@^H5D^)i@t3ZDb>!;fLFVHAtC60 zVE+$!LjR+-;3tPK$BdzVrw>y^@m&ixGCi&qF5l2B>npA4BC!_hV%luDodd`DR(T%Y zHr=?~3}0Vgb8&NPd{s|~SEE45Q!0mryyt}xot|_cW&vbVFUDp!KbYG_-U3(euM*$? zCgA(c!YEDfV`#g`)+`nQAxH*j6WM2(6(!t){{0*qg#C*Y=F3&hQg37!w7)K^e%of_ z;HXXM_+4WK%lSs@pCJ=j-7VSHEk&~Tz(Ckqr>j!yBb!K#Y)6$d_&1FV_H*s~}+9?Ef=r~zi#i42V~GJM9M_b5<|lLPY1Lx|4a z2N2<}z5m_dYv;!ypqLhIpgh_3)Z94y7hPp8_WkTVhz=TBB$Sl|CUd{ks&97Rh@EBz|l zTvLOg*b*y{;_C9cm(CRAbVGq*oUPvWxPv%1ovbWw(B|JnL_~}d7D^`UX|_7Ow50M? zrxQ@02FfZfYh`-{k=362e~$eEc13SyS|FN`Ed3by)s$rumJf@xe*e9Q1FI1vLA6hmj8$Rt z>3zoU0b03^#4($=pSS;j-zi)!m^@L5srX*kg5ofaGuwLLZZ`X@Q=sHtgCi^8jr2|9!;#QL&5D2E?vlxdCuwqJkX zR^{j8d$~%+DknFCdMqNl=ti~PB2l7>8C(uD0egUlqrsulo{FEa2y&L@^2tgu0hNBtfp9(9iPeK=3Id@>##=XU!SZAe9;eE?q|S6)jbWpOG_Jplk7c-lrG zwtSz!bfZJym-tvDE4h0J-zlxzF@l_&Tr72n-FBFqWyc97cMmtZ@}UDVztV!`Wj#;2 z#^&N;fA7C11m}jkn|Wl3WFK)~HQkVI&AvPrD4Vi$>w+DehJFF9T3^l*EMT7ony$&# zqN+mWg4joa5*{)dj`GBvc0U0lBhj-Zi?|=5*g8LPe(&WLSsq~OK+h=vwN9Te&cG@I zR{K{EPU5rsmL7c9>zhH!Js}VcN&C)Cy2$Zn9~(iB7mmji4Qd~iftlxDK}uwekZoEX z`0}xkVa>pXSONYfWnZJwK7e**U|B$Z#5h?6UPabFYM)v)J5X1B+RhQ~ z={Zb>vfhd?URiwh4GfT}tEsamThJu+y@5~J1@wj(^(v&-IrZ5P>D~R&X?3^0y!{0F zn&ElI9MfR-d1v1hgNij51{TCoJZoe0ry~Hu;W&`Ai5)Z6*KIGq1Os0amIc$qUd`fj zIj@nL8sNWiYIG4L{`>6(-m zu0#Fn*CKUzAdk(?m!#&J{@&~cr#`?|?Ew#EMjK%c&H8ms0Ha2-89O39O=+e3+Vn03xaFsKh9QU@a< zzF+*+gS? z#ry3J7PLFfhbW_5^Hf4(tV`HRQ-o%iBbg?_xgGcpPqUxqD9#!-4Ji$F8H`+49=vhe z#&@>v*M~7#RK9G^kjX0Nr^GKIVT)35M-d#NXX7nQ%=8cl7AO3@4c>{4rZqniwLX7jVWGLIgj z0xVO3U!wrBJtj&{cVhD+#-S-TsFJNH7Sj{_q6nbFriSFUvHNcLh z{F&HYvoV4EJ$z(xoGmG#93`p!Lul!IGqDhA`$7fYfeXdW)fEbJR0rK9BhR5He-GpX zbR(2tQ?(yy@rG>N`pa*}POIDHtks51n8Ag6gux6X5yuA`GMaJ6b3JsRk;oBndmB%j z2nFOu`*Z^g50QAb-y$?RG@LXUy<3Z6c^BL{2#7q7P+(Tx-E;+1aJT(|8%dX-6nurc z6O-SCDCcRt1nv=+h?6@3e!V53a?Y!pcM{q~7kPh=V*km;lEg~kDb8}%4aW?%1^0BJ zD(Q1fm=A$5K9g?m+VnbRP}xuJ_?Z=0@D$P7iKx7y{7~U36$*mGcdaPm9D(mP=s(rI z&5VnTHNAqJI}cG@RRpei|CZV7sLIB8c8B5%Zvh*|CLts*%vZOOxd%3qoWhVXXRBc=?>pWH zgvi<}m+ZFr#4xh3_t%`=2XU%?Oby5r1Y_Bt`e)FI&U-!DQhp^to+_ zHvPb#F;kP1mU={Lei_9}{B^4G(|G_-*V4#5?ESoI`|!`oFiI|Vcf~R%Bv*SoWejLl z69F=q+j?35^Ea!B6K+l}>hLsHDfj#D-1A|dsTCz6cf-MIj&U8PyTVRpSwr|1W4t8% zdB^Ew2=_{+j5vbBIvXmbSLd?qc0%5dgh_1|@ujPEyA?kHhED53Y-ikw{Kf=6@W3BQ zW^g`ymvi}11SQ`vItx90USMd`QUz|{fuGDZCnQjm3dqQORgmUqZR9W%D+andBD7tk zEZvcZEn8Z=emb($I3~=>T_XT1@bBwa9{LAOnj^mQ?#K*8rBr)R;w>&sFx(l4Gmr$> zUG*{#eleWrqsb$NcAiRk$ti9311A;4rl-h-um;POevn&Zm;Ovd8@zqytFc;HLh&Lq zI3a2lBLH~ZbFStX+89YrMM;mI#MP5lWqp_4oR)b5| zFJhK)N02-#p%hYZxVGVEo$SFci3<3Md^cG8q`q+A(^$<`?;uQh2+fB003ahs74Npy zeQ`*jD=atZ3lYtH6Nxv`?kv-3nr&tl%47_$T*~CcEGD0X+ioc)EYSHzT#UeuhNYV) zTV`-2z_Tqc5vV$;PHnjPI|LOh5K9~orQ@V&3N!F;f2a=rJ`&jQ7*rNsVx=Ia-%k}5 zi%u@k0Z8sY4K~|3f>`q2mO%0h29uqq)j;A}x>wGhd5T(kHI)x~q7}+d=s-zvli$fw zZa?miyTWaTjnU2;sAIoLNpy-^Sj!r6$>5)Y7-Q7#hBd&XJd=BqJrVDTdtaj@FwN$Z zf)z6w&A25LGkPKvAU(K0#=~k)IX<-KmuSQHzauIB(vid4haBWt3DI{cZ zVLOvm%7`h`zbyk*z`g%D^57N#A*!d7Cz7RQCy{i2c#1UcKOX=}hFp?^CpfA(hb+zT zHS^#74YX+^5!!Wp5(Hxb^e3`raFOpWt5A$xFQrsp*zezg;Y|87XAA$Jbm!WqY*1M3 zbB*|1dxmqrf#r^~XTO}O;uB4{pnR4f5Yxf@k{q9^klO3_lLTsTi$V34Oq2AzOBfjr zvU{Ydkx@o+bU=G7xg6R(A{sEI?g&y@g!A4Jibi^mbcNml3k+=H%#gp%Xy|Xo{%q{d zS85t0FI9MRb3Mu1X7cmI6BZ4q*ZX<5y|d228e0*PinetA4tR^1tUB$?lYQ40li?%; zebXLJRwAyX(@l(agB4D``FO|-05uLMNUZDrd3^QOo)Ev2XO)6UAl^nc*VNX>@gt}d zNhV4giuDTEYZe*!c`hZa)2XJKzir^s@mmY8f35a;g-p7TPZnvAr#JylstJ8Es-8P1 zj#)IV?h_RP0Pg#QPQZ*H48-_A)vv1s;zFhS*w2QuC2H2*e)yZGkjKi;;?z0Ds`uU( zFrJ`dWk6)hgzo?4JxWUnB)~M2zPD0zw!Iw|c`X`BBHLBbx4iV$zl*XU3n`{{35A;Q zS8y@+Q8rAgbE+F=?O--<#!?Re`4$6s>p2)DHRYP~__){IeMQVutErLA?h16A1!!RuTIH}vs1-l8xJmg((N0Y=u+Z5&V#%^Ql;Ob<|Gg~>rc?|cU?C0G9I zH0NV?+Kq|f!Ym!tjA4Hj}FoiFIlNqZl~1N~hf@GVavb@OC*hk9+$%(sX5Mm*VkImORI zzklF$;=dkcsJ?2^L73y{!A0SX)C9}j^BWKo`|NG#VOo`og#1D z>qid|&$u2wAV7w2F^mWLHg%Zxpq!O+J_{we6Tw*eaTG#B(BLH_9zla>!pJ~>g8*#y zN8-hidx4Hq;|rhZ@Zwa-51tQN46cEKNbHWW?UjERJc|-ka!Tsl!3X4rzst41bPb-q zv+F6UZNma5o`{-&3~G@r8VNdUQ(Q1_F~n!aULjes^k)p=uRXpN4K7qo^na4wO>%Ji zsv2sRQ1xr$YqP5svW%-G5=?SZG0w=rQ<9UUi4vR@-p5<*gXCDaL+kw|Jlt?JrXr3p zB^HzYx~a5%$fYbqoFIGP0Ry`h16X+pCCKnn+&yqjXgX_3P*C(Cngh;IKY$I1@@_XmZRGX)uaW+ga&sFN#^p&Isv;`Ryg-{DWfbqw zA)ZbDIJNC+U12o6fY#4x-QD|LRhqR#{{e#(k9t09=(awy8Iw3N2(_vc4;!_Mh$-D0 z7|r^FbN<^Q1RlqNC>+lW0lcLmi&Nf#yqvCnGx&fdzM0yK620B5r8-*bAU{*kh2q=w z{*@9WlA$zPPO#yiRTd(PUGkg_ zbK5b&ZeT??+uiPvkk*iz6~?7?&qGPoTxe&|&8S1^ehz+(v^J|WNm#j00zhQUw=Ny= z{e(@KgNMaDxU3Q=>zVZX7sEYp2E@stL_hFF=E0>NeE5bG$H8!dAnnc9=xdykjn?fH zVIsGwjCMw<#ckR9n>x}4sr!b~TY_K}3BQf4efld=SYf&V->D>s$9WM2y=E%+9_5(4 zn|+)J>R1yTeAVl(zHurhK@e~B+*im5{!nGKv*(Kkew(2wBV$}duHTDKUAhy8tD8}# zSx(jTN1sFX^m-C*jmu*Na-}v5>Ir5sV?<#IAF*Xta}DRomUb1LP1||v476+z;C3r~ zmn&w~Rh>tVE77Xr6J)?xek^_LHKlZUw={#jwO@e1#)@h}U7!x6VG1Iar9K@{IPQzq zBs6^DJ+C%#BHq`;yBsKapIP57om;5h>TlWV@Ug(+V&3&?KDnF9pG~Il!(&dbGGu#w zOTn-Bm%@=P&@g6Z$i02IGAf27Z1c^{0DuuB^}4nZUZmyI-enkXw7B-X&7*j?)SLa-%zh9^6OBED!*}=IVd9$NT-x0 zo!;~vs|pq*r5D_1A5|Y6-J-eb1mGS(210(_{UV{k;+}=ais2_Yz$s;^OZVTq-@LcKqJnNF1F(Fc1->AghTUu1t}$1I`S4U3Ql6 zi{aqU^I}xwS8T>4Ip1&7z^fPcefd3}C0@x?iM_G7{EuaUD8QeeY#cl`?uKT^^jg>L zWc@?7iK4<;{6F>^-A#?F$=97^)6{Y|oX=b&z2{H)4X4YYdu8H`U)9<0;WTHcfLXfE zbnr{ND1o6p_ZwM{PXN=|11wzHab3LLfgr2cB~@05S)b<11)tVSeiIxESdlSG?~F>O zyYR0aR)45^9+EO~?-1{kPLnrH&%Zcm963`V!@0}{GqQ#h1g2I3?k`ce;tE4I%4>^4 zNDVAb2CuYfqjIpR9Wrq{6vP&%7-zXS*c;w`l9>dd);1`gLks{RDNY znZGr2tqG*F&Q-BGQ*PEmJ+Wbd5!s^1%j6m>dcU>H#Km?FA> zI5*-(t)EEYZnn9vY|y10Beb19uQ4E02~{7=>eTy|y%?U5fF6-@K0XVF5&Y>S(#Nuy zY872>70+3y+r(w#{)2^Eom2aqN{=8^Tgu|%-ImIAl9VFAmvgcT?Sfp^HW88atTd51K zQgMK~e~b(H%6tCMf%Hio_&|F`v(ptWlUGmv0GR9UUUnQ7i=BWT1)(DZ3*}KakX& znM(q*_q8SM!r|`u|7i)Wd6B9puxW=_QK!h}F6rv;CGqB*M3F7R%jEvT;mLdTs>{}AP@iC?@yI&fxQm)P z$bGDox5>VD)YyG61FaBWEf@S1n7*6GN)CppV)D4ZG*8Zd_Z8dPoeM?-?=9W2i}t~wp4X@M{;Gn;{)Gdd|383p|3%>pR?#^b z0ylVnwVC{Y!_BNm&PEJ|Xnz1Zk2EgUO)kvOmq1Y9daSK7GRAO7o^Ll$=`;4rK6b4m z!NBaQ#jbyKF(1wzx~|GUIN(O7SGuV3^iIQrf%$yFo~u(^8PNi*0^(}mJiN4V!oZkq zco2Jx8*A$lIZpT1+MvR~6w&6}V*~5v1Hi;8x(DqrFl*G?s>>mO#lukc+dg=h_RxGc z`awC+yon1hf(trKI~cu>0{j{D2IdEp9p@Dl4F+)h#Xqx${(_(XIr(qRf5h7PB@;qk zErkg*`HPH>8`Hjmr=p;wzX0ifPX1f-kJt}AS2wQ&2Zr8*7ig*JTpd!&-)~YbEZN)g&W4JO%#nq|fz_4yveM zH{F3|K7RZt+5Mp%$7VlSwC02356X{YR&QiRb;fyWJMtLy%}+u#fX$D1dn&o|LZKk@ z&K$b2VvqEG|s=!=|sbV2*uF2O))`w4}<8F1((KwwGtGoP+fQO5VPMO~P(f3_D;;7yvJ8N}( zIEtqvo&gSjlZT5%89@Mz(s>vrQHoX!hXZ=9NWkihh12z=UV_WW=*VV<_Z|69D&W5I z*v}6@y@zWI;irIdK$pn#1(Cw6ttAlb>}Z@I^7OR&;`5Xifl7vlGj@yA6cM3hYGu;% z3Z?dOvwyd@pJ(xTkXgrpLn0n*`EgmI6EC?EsiUl!_B*|C4A+GYSz*HMS>A zgzXnNUpT1>HMfLfmmf85M^HX%i6JN356M~r?~@m)+8dx%JftextBD>jXXw8HvP_D) z!%Z_(CYv}hb))IF>AWn>?&qBm8&z^r;3-E|aw#xTverIvGK}V2qr67IXKNV=Ek15x zHa5OmrBAb$!-tUmKPJR?3`U$B^2CH%FYS_&N;^FF>0Z@x^qbLPyPIsZ9xeskxds1)$CgH4j@FlfrN+q!PhKYDQGA(IT) zApL>pO{(CSa8A~TlPU2tP60ao=u z_?B={CmE3Z&YB;t8x14(xhP-z!Tf~RDzdspkUlj+ zdG@Zwu$Yk%^t^pJlM&_$s}`ws=W)mhwf_Fr?AgFpTXYup9Ke0--NAcR@9Ju=N=Z}N zBDCJff)ZN$MUAzBDI!XU~sMNzlvIQYeR{M;z}_QdN}RRoYle%#Y)k~48 zOZE2lH&GEjzsBp6srK`UY^(?y7qaqe?92bT)WWm4o98D|-~0H9ENz@QiY;#G$UW^l zL&{6!AF9UWn(Y+9zR?!4R7x2*va?B9l9Do7;uSBm{(pQt->R`jsE|Xd}}7CYWsf z_v`rYJYlzX-BfYCA5Jj(?UWfD2aan6W3>Dl~gOYwoJ_G&t_OAb_<=V8HDgY_fSXViUA6$cg5^D6@$H1!zMul+#z^51bgs0YhwQ_;{|j+j{d-LVvYQgd{5N zw7VJ_a9(rO7Xy$*_NclZf?f)4C(EToEQa*_3uPlMLrg#!nI~{A7Tze=98~&oa2YO*YWUt`{9(tR%K_=R z)lbr05w!)QooC0gus+5aX@1Z)#vuTbq$dV&KjxiT;=B5dK7e|EwH+dWXLnvK1z_k& zX1Mw$(4DSnVe&}^0M_JEuOsiFNERIyVkcJTO@Qv?(S6n8n2Lx|sP2<13 z55LS-I)r!O>aN8w?=pd%Y~wcM<-Rxl+M{#0x}=lDIDO4BwVT3UIS{7bq^h`SzOuP_ z&}X%Lj^n*Ln9kl;Qi9=ThPv*<#`}wD{}wW6?{j*3V=ni3w}ULix0x%9BhHS5We_#t z92G9@Nl0C?WQ#%M!gDTV7y$aR9QirbqcB1gZ=d>Pc6@Zb$rMm%syAArxMO=P8qnau z18c-&F^vL?PUu^;?zeu1eQ|Ox5pMRB(YiA3+AapWYs>M=T7fMD&tKcE4 zIU1%OQ)>3+PC6W3BRrbj$r<~+EOXT(c9Q+Kk#NO6#x_As*09e06knqf=v{&6!_F(j zacUsci?Xsq5U<{u-LI)=ZhhHcJz?M+VSmoNav^qFtqh(QRL6({(ghl)PjNL`drxd1S0YFfgwEOL29AB z01Q!J--TO#07@-TupH@r1(164C}WgtG6p0fX!@mjCEvhFEMxW%oA{*{bJ^*Ky{9&d zYz}cEM;U&Fcj0@2rXqV*E4xz%CDo5Db1FJzKN_+F`YSD&z3d0&*A3&YwcB}oh3Kj7 zW*=K_#+!R&L@bTG$its&KYgEz1v|vnWrg9aVnM+ZW6VkDC9uoivrKxwu42(_;RRXP z)HY)|Ieb7H6%MN}c@+L|6IfwNn~jdCBCMj?RwYm2Q1zJ{4L|adAeq}oDdxxXoKK(D z%3;YbW~~J#41G?f6NTSV&{hY-+{edsL?!7~!M(O34M|%pFIq^l{OE()y6OxW!@>-6 zkH)g9L@GvB9uBL@xeTk?5Q5$XkfCO)%+!sCmF{@35c)HIZ(^=8v;2d3vkld)aAF}^ zg1#NN=M1vWQn@iUBGzWStImKs$cICbJ9p}tLTIw^rL)G#o4OiSE^FHj&AoYga1b|Y zQB1VKMgV3xmZEeRHY_ZG zzD{6?1jV-1(Ly572S0Vu%q2hjbLU9(9D%v{G;UpgSA{1U55qPeVCv%C_-k9ew#pX{ zN9@i5F1HKif4Qs|oFREHHQlG!)3-m|*38pKY7u2fsL`^TV=rbhQfJlB4y`GEj?Jfm zlo^_X1e@1?ko?qrlGhQm&q_`|;`f_x3Tc=t+Pwjk(|WsAg(_bsNh{T#Itk5%{? z^2ZqHgE$;HRQ#@2$%aHm$ZC)x8RcG8o0iig&eg2+kl@E?HxXZ>7Uvw}F3L)U@s`)sLqKj+wF&;9IhMtk%x+O&C{FcM z3$FD-b`4?nb=7A%xbZ}fx)?m$p#4YFQ$DQI8^Q44ZpN=5b8Gv|b@X*sBp0lYAj?m~ zVfsFV-36$2~f02rRU2y-+n|xva_QE}SOQ z`Zh9m3N{QkqaS!|ZSFP0C?64e>I4@jBj*u9qY)>|N z;%&^^?y8J3&Dhkxygl%->2EF0k*I?ljU3rq$=c8p6bUjixwoR6QMDpo!a34#u zo7RO;Z}Tml6%#(fZ}eemY@L7ZiCW%9!XpyjjLC$9qoT}@AaM%F^V&J7WSLQz=pgOe zf?Q8zpKY4C@UyjsjtmX#E3z=r(f9qI zvPSlly1p_$I?ao-l;XUD5HBRV#_03D_tssB1W!-{Heju3jkxpdy6Vu3narDr;ZQd4ecO< z%FerC{qf7Og-@3ncZD&8H|3)Rn6tCVQSn~{yF(4?&7xAL)X!rEwqw-X)+6d1Q8>|Y z$y04MEteNnqfgu4QswJj449AV(}6zlZAp9b5sqRc%1!5;+7%vuR@MN26H;MDQAEM8 z{^IVor8-MnV^aY@Ma zVUeGbv`H<8#!i^%Krtl=`rzsy%H_D<>_#bajsRF=HU&!LdH*|(4spg?4<9-e+;p|1 z>?Y9CFNj|LM8xsb#e~`I|t8Y5jiT*jMvJ z3$7l-qIid_b=KN7<{Hygf%30`%&J0u>PiX}$9@7!`b$m#*y z3Z<0i8i^Z$-vB;a#|o60aug+#Q?KPmcJCK1U#`V0NW0F8HU%GmJ_5I-cOpU78_9m9 z>kN56UqZ?y?p`mL(X3z&)Xo1pk^7b+Jm5U+K8z^Wx+#{Sn{Q0e*Ky(uI`l_C$NFDa zXXQbY5(gSmvW!4t&@;3!-xqWLz(?KRlDTfN(p_*n1lfNE{uscSwO<;>1|{`lz~j}^ z6(>{KrnSv%ZGAq&=fmiG=Nb1l<7^7rzq@BUMbvDFi}sl6-o2n<(d7ji)%Km4Jeh06 zd9DX&U4W}Qk7CSPiMo(}eAdzTdhC0Ka{_yiS5gDthVlecwB?X~#rEbK!UeCqW_-;@ z@x5M{y|Z9N!A7L(j=kGfH6ngObu>B^v!SWLw*wqd33^Gm@3T0J)e#cy;Q-L0tGBc5QW~bS8S2nN4K;;h$+_Os%K0hy#@+V*G$) ztAy=T3?{$Co}Sb_0ci_o8!B+noooKxqC(%G)o+=?izku^U zSGXGDZ1Y^K{lj)G5#xgT836r5H`u={oa`oNK>lUi7PN%%d9&DvElX|FHbNS7w%b(z%&ujAJYki3;i?cxxma$0ilZrYo z#7|keOZbT|tx51LksPav+;p3A6l+LzZGC^N!IbdWu@PZocnpf{D-uiujf5TNr#g1* zCaeQ_41Ma@v9Cyes++Lm$Byw+p-=t4JOaH8jZ#opsIIO)?r|C#_IY?8vMo`}<-SLy zA*`>r*C|_8Q&UsPFt_hrqWew*i-(d)N@=N!zCK(1!oq^d13SC5uV3$6MTIAN95|Dj ze%a9oS9{y;6C$zm{#g=xqN}Z;;oAeUE-m61A~5(glDr&aZfQy5XklUTMW2+}V{Zk6 z!O&H14!Lx2bh=K2C`%l$)#v8sf&peGCiDjpt8HRSd3kv)?d@{Pjgz)mnIX5$!KlOK zS_&~Sv2maA
      Yo58IQB|>)`4UU#jYXNra*;vYVS{+v^uJMel3WdXO`W6Q`!(2~K zPZvZ+^V_#?Klm4%1(9h!@IVG8w~HXT^91`mQ3I_9qZDq2MCp`G~=pfW@g>d5fKqlm^><- zp^gqJlCk9R@$tKwaPO}27bTpW)+lspR+g0q9Go54>R#kL`|4DzmG*$ZjT>5}?5H>e ztFQNBQHb!_*;x#V>WAyctgIy~_zI1Z>y^ZIb)$;i#nLE_vuEy#KDje!G?Cjbv0Dr# zOdTAD0GHGlJ`^u+)#|!jO}OIW>|ETBgkxQ|CnEy_0%i#fehFbIU9hQIcQZ6I3i9*2 zE2whw^3Ku*9d1~?IZ(h~9DeLYOSnl$ckdN}`jthcvU=Mp<(B!xKwpU`gx~L}z>V?> z3Mw|fYv!k2EZbXc-rwK<*SiN?Lu=FR?-tIfuwWXUGA5rRRv!ng@8Oo0t=E)u`ph?8 zVDE{3;O=T^ZSBTy#~Tc|(7np3YLhW%Z6=x~IXSS5>_=C)&JOrz!8H1b!toZO?cXwwe8B^zEY<&6qQ+xX_!jaAA0=|H|zFMdY{DO(tO|@`lmGScNsb*pu^~dHLpP^129Xn=jC8YGw-SCaap-vP0 zt`hl;fP0>}YBbwI)%kYEd$G8#u6u#tSlQXx0n%;LIXXJ_2x2!jHtMy&;yaMPD#db# zRh4g?Ga~Vp-k!h%@>itg-agsBo13KLoX+OKT8KxHNDi)NZEZ58uoB}1Y}^z^+-EMh z@1TU!=LO)Ak7lc<<<<@qxZe8>NkBkAZVdnOiG5anNNmIQ4-Di9I(RTlx zD-jjjQ?cu*jiV=z7EwH%DdwUq>9Ulrx0# zG4o>&cR2adF^nIVil_Ohih_>}hw!i|5OJbXUJSAxT(wnGDn0rX-Y;alJ2JcH2SYh3Rpql**WO~2`v&%?&j|-c|;4H{IDt=FQd3ezAU_rTXK;=q= zyEh`NeVUMf;EcEQUn5cG5a>e z$grtWWAczsQ1E+q_x-ChoIMeJeSP)yvS=@}YI5+QF+|T1wp&(BjBYUb2NTgo37D(N zm^fB2K?o?IWiYDIY*V;6j;VHc^O)|nmk8DF>jhrFUQd^D-IQM~MY#Nl=G;}QczWdJ z6x{LC9Lc0-{D@R;N=b6?lK3SqvTe$%F9yqqY`WfW4``UC?VQimh`ZH#`=@)(ul@ng zT+6|dSRpIRfg+oYx$fP~B@gh&-rnBM#?K%9{WWn7AuN2pE)H9*04PKZr$%di-@Sdi zb*-{0n$M_)wX(gXh1An&eWrs#a_{D;(fFS~e~u$mR8?yy^38gtrl%X+QcFsneS3=@ zrKMwF2w?3)vNa;$;J zENnk_mJeKQF)>jgtD!QNO-lljGiTm2uampSh1KX2=#)80jXZYO(b2K7xz193$~rV6 zqWi}WMS5aSB}-G&v7)4;q{_-fGC9ACnU}C=q{^H*Jd3ZxWB+0nU!*xa%p5L=O{2z~ z!i=uP5_!MWBPAtWUTyD?BxYgE_V*H(BGgSHB-AG-uTfI73%y%&y;tY8!Nsd|efegz z+|A9+vlngpXJ%&T=;?{kUP2U}nuV6-PiLtK_2b7ILX;0L?VVtYK;>Xio$p-P)qSop z-YIE!mCwPX9=wV^T&p~!JvSox18HgoJ4KaTKP^N>V_9e78OcWKF0VqwDXg(Cbw;;; zYfE_e35VReZdaF~>#ygFB=(|QaNG@-*C*%Xw^n*R2`S)UcjX1C zV*S9{I_nW(W2%Nk@bdDI!(G0Qp{&$GSf>rb43RN|^8nxVzELiEh&asCao37s!Ho>Q zE+izx&tG@Eu*5nN-K#mVx4o8xdG-3WWpDNi&ynr5X@JJf0#u8Uqq+Fc(WdW5>cg%A1I&vEWwHGa*K^9XxQjYhX`I?>+wO*RRPK+{VwI?X$lwN5{vL2@Nh=V!;Qo z#rmOhA@aK1CA`TZ!77589C=w;<>yDN$^V@s=HMAYh}1+=3SLQ{j}m@5pW5@^kJVdI0RVL6WDfx!ogyV zH;JO`dx|7Xb}mQdxn_R2YqrIeyW{=&W{lkQIx6}&#sM+dlLqQCBk=^rZ)4M2w(7SV zhvOyOo;4B)jU&U*$j*i`-aHJBPX~X;k}LJvgD+h)PdYDzomuLgd=lm7^~r>?$MH3;3NFm)e6D(5N1 zF!Y2X20X^r$ayQ)PkikXWK_^y?wh4u+{axlG0Mgg?<4cA=dwzmD9rZ1_lNIRzkq*Y@cvo>bReQD8rza56ILr~}7syzvB(a49Sp^;!wDm%Sk zJ0M$t<~hNag9~C160{mp>goNgC+Feefn)sHlSM%yTVOf(>|k%ZJ40Rl-o2|rB9`zh z?eejd=rWt}dLEWNm&vKAOwGdJm0Dj4Wi0nmqEx?k9d*7JS#c46#KHkc4r5c( zWMiJn!otm&juicX?d|Qh;M)Y-mYSLjgk|S!^q)K#3lASh)GSq+e0#gUyZKFF?&nYX zGP`MVE8DrQbby0^#jX~tzZxucPnaNE!f*YHe+u?=DHjP)dX;1;^q>Mpy~CwaXq;$^|oSuQagGv&V;`YLo$48Z39| z!Ji#;%j8eS;vD6m%~R2gW%aAOa8YCtJ1Y44k|&EGmA~w2qYcc&+$>&6_uG-y#MKZsA6*(G|#*JFUPQ27ay1 zB63**^@QN98^E;)x3#w$QQRvbkV#2Po2+#)>i_X$9Fa2sKE!%&efHb(z4OZl zx~;gHniCDW*3GZEE?&GyIyE`jhlJb~-ZId2c&nCrb#3Wzb+kD;I{I>Y7rUJ&shR9+ zQeNt3K@e7Dky^k_9{&a{=m^nldPf%$S~PSg@%R1D{rqD`*eLL5X9I#)XtN}DO$vTj zRmJtwjH#}c#e(PVrkt#g1^n-QNQ54(L#)0pEzM>h^AXJgja#m`7hw8jc0#_{m;BWcngk5nVilBFf{>2N^C%c zy)Uj-zrvRo5_m`XfRSl`>YrKRaSPX-;ccEB)6~+}_aFwBYt&w`{r7VqBy2>eJk8H8 zFXxJgHW!wL&3dd(mHQC-}K~ zHL!yE&qcvcq;tS^`e8@+moGBG;V$blcct8i_V!%Q$DnX-d?Lqms4L3{>jX=G#@>5>6L`wvo!ik$X$HZs(+BwT*Y7g+ollfJ8^ zRc`VPQNOt~EGZ~hk5X5sg*hDxc!;>|@K>(ch8G7I4VKu0_@6=0{PAk#;l5SPoiJtB zm9a0C2OHc3J(*fA;P2pVUqp^x1G)Ap`5r2YSOPGD$W5a-q8hhbqzS?{Pr7yJSm^07 z6~g~o;>(hf{=G6zc|qkgg|6=JVYg*veZKV}$F5SRjeP&4iSzA_4eW>c6N5y5{q0m z7jOBJlaZ40L|2zOF8Ls8Rsi{V=HjxAUFHZ(6n74LTC?ae|)Q3gK#icri%J}#uULuCbk#qDEA`HcrlarHtHv8i^a1WzVnlCQX zCfy0W7tA6lu}>>{`uaj-X{x=eDdm9G)DeCb{^DW;OzRan#G9adE{GL#j6pHaUy-IU z*(qPy#u0@Kl0~jxO}Wg?Iax|kS`q~&hDYX5w&b(cw-iiFO!jQordrSX^V~Mp*na`; zj~9Yk+0ZNURDkkrajGactuym zyxNjDuf2-VDTgSJ{6Dh^4kly^*nid8 z65usaU8x2BSF$=%fJ4`yspl_H9}$_8hXLtA@PX4=K7Cx-r}^hOXkQL;a#6>`=7vHa(Llt z#;6FYJ)O3nx|fIEt)TnY6|Y&m;H-Wvx7u`-aOG_!CiH>^9IHTiTr)8ziGwij;p1&*i`k|qbPDNwz6-l>kEwOoqOK2N7eKl5j39J%~z*&^rbHyD#U*4q6 z7U(lF~!5j=9j2$0CFpt&n!57Kf|j=pKCG%peGQ!*# z=beLyC~l9wys~V9s&HZ zz-qWnD%-3l6X;G~k+ybr+Xhi(V6=jQg3BrY5iaV(3VKT{Q*{F$m<kfyrTH0i3yVvUUsdVMjg8-Yu%_fmDiEZK zAzF+61}sno(-VsVTpfxR!UzPhOZa2)>cmDwoJ)JWG*k}aMAC+{lr&FWdyx{(OD&%? z3u{)=6eBhEv1J%Q!Z7KvW56xURLX zu5K}cS6CS5$F98#QJc*u*i;G2=Wr@)(z;j(m23TuZ?GgM`^~Egg z;>8G31uz^vWC^27w)#{n(<6BA;sJ+XV)QwCjo0+l6GKBEr|K!z4mp+W4UM}sho@7N zq2QW@9pJ(~_>cLYh>(qjIlnm*UR+!}AJn9l{!s|pzW<;lyrmeg!JEC>699ql-@i}r zCl)&5U163GvHgD!N$U_{C-CA3-Xj0w*1Y)A*3~uh#-x`VU|Izecnm=1NXV zIsgo;$$yn_bEy(U5|jK8SB;B{^Y->OzU2JNe5=MgprcUn_vyg#^9T$HQN87#b#AcK zC6d>6ay4y7Q%L`S}+oZk{r+s%&=* z@Xy@e8gJLqqF|9+y!7(U%`ukk{Y``^%H91!!##=vCls?$Tin9ZVM=~}kW_{A{ZWhd z_|j!?sb9aQAy^uoj=zMzYf%@_UX{(|2a7Kx3BhSJRUuYWvqFe$r*D4)tk;Ktu+K0; z(*%T<)Z6=~{ZVmlPGlHpPWkG5(!5nA8L$;-2&RO20|iEfK#(eLpfuKT(iX*RUeKh1-O^mMv%7TZ%ehGdNHl2t#kwhqMtPGh}9sv{%wWj&y=ks}zx z)G>^`Z8`KYL56ug#|vw%cS(PW6^&lnImY>jSz>-lGa}+XkYd<%t>2N3 zkF?6k$_j67^sIgPa*6SjQia{S=74%G^<*fT-bKNcN2@4I@J=-vhl~S1W`r`_U^9uGNlUVDGjP(NT}?-jz8{91`XX zf;2jPdDI?xQiRvabh{4agBszb`eBCu{=_%=`QB-8gS5&)OXD{okIEm?y$(vfra)`y ziwR5ql%1p3$a;~4#40(&bv2mo;9=Fft7~n0u@F|{9U=#QBde>bSgulxW~9$LTC&Nv z=Q;ru4^L{gDex{Qb$7J(lGvwz!uZC^N6TFvlsuT3jxJqTuy9?88fx;leTnK-`A5J* z5#U*m*YrG%4rlW7Y%3z}?J+8V*byNmrL?rPM?wN5fVbfJ5|Mz{{R{1KvadN<@$ops zLQsZ4F$_a{9k%5NvMn`D0v0OCScrjq`fp7!wZ$ARjwv|!j#dM5>=t`SKht@}JWL(r zG9aG<=AdFIhy@max?ujH>2Mj4bLW&D(Fb07zlk6Uh$H}scylq?FHkbh5B65S1kkdD zRSaqWj|Z0Zqhsy!SAgO|AA_sSy!JmH;OFmxN!8ca*9^fwec~#cK%D;{kL-zvKFiO3 zPOhS^srhh+fzZ6T#P*j)JaEH}jSn<5{8X|%4i7-qU}i0CCEnN9cXQ##=E1&8Foa8e zNR`NfN|i8~xB33RnN>AyX|vXChJ;J32GYT!X=|y$R zK$ZkV>!g-IfXGeZt6+Nj{=M;!0t*0(fJiWdOvHEy^NpQRUzR9Hzy#6rRy<%PAbZj> z0VO4rlz4DB42*s2)7CYX{^8+vEuK+AP)!j1e06n|OS6CnM@U9a&RgSl6A;c)hlTTp z6GoEe@U*nFSV4=io`m@L8_(7+C+55F@7PVZJy3!o+EGYn1=kij@N|9b1llY3Yi}>g zxVPAD+J}sx=9@QtOLOx@`9~#BF?tT62E6Gn$3ZIg3h||voz;({T!Hin)WR@AE1agc zBSM)cH$UGVh$?kw3EC4S+no#yF132Mj}-y!cl#AB&x>ud?;l4uFGPku-a1P#Jv|-W z=sGk9(i1kYQKRh{O4S)wCBqD=3Pn*9Q&WEkwOb7Tk@w}{Sd_kjflM>WeK=#T{2A_9 z)^q31o${MK^i8hb^+HHL8#xB@-E{%kA3c9jWi^BJ4$)6JrwowV@s_GNlOZh))7_Ax zV|%mgIT{r`I`|T};uWuPplyDYkcOF9SsgA9(Zflkyq;1Ma6c-lDqZ1n7lepEZvsT= z!7T!j2^*%r|CE&18%i#2OR^P3H4SSQMGZ-6y;hVle89N&_!dXK$YWj-(Xb_U>6@dA zQD2c`jib!b)~=h2AQz&lsyc9@w*4iRA{JLwRppC5PZ=6vSH%B*5|fNirGf$>Z49&dgrea3l(sWMTMtkSpoc zlci&~s{}pE)$lv-f{#IgA^gd^K(mGPM0}p)52*zw|Bti>?!&*>*Ae3WC58VVr1s$2 z=1wW$0b@nVQECa2P9dl!5_#UkwU+5iQd(a6YXxX9+>j=~>c|07(V0wDk)Iiu6s6j?*#r-%6;K*iFI=um}Q z^X}%aU$(A5^aDZg{|ugwVqX-^1FEBJ4M?Owck8!(Y+RMj{SV-CTNyj;d5>DQEm5-a z*~Xl|{~6h)=H^24zM43T9|Zufd_)*&X=pM*99-)|ru3N7mCyVw$|vd{cTUDlsucVtH|}6wrPjAA*VvuoNC62Zz^ID`DhN^8L6-N<~G2 z6s5349};A|7_-Q9PsCWASj52ZHXr!a|R&ob+_e zkW{}PA;|RyGv8<`ta37jD^6Kwk!_BEuxL%|b-0(*f%I}CqUH&VylSc53)I(S3$l@p z4VAkLTjTxtZOjuy~s(1pbWvwEB;7Zt$%;{g4mOfaR?Q4^+*Lukaho|UDW8! zTulHs4{WN$g$>!vb#cTH{qPQz=~Wj1F!o$X*ND=xYBV^CNeudgE%hm%^Fv+D zf0L#9U+`RMqI!Q9)C`SfeE0y006HSRBHzAy*SEtT!Ac#2x_g)Ef!E0bBw1m(wNR^X zx#wAG2gTE7veK{zyuNW%%r|P^L6v{Hwi=7o+7|-9$w@;J#EC(DFo~$C)n@L>uc}Ua zWQywl=-iM88$XLYGh&zr#A$*dZ4&g^sE{-8^!if^iQfr zKkZ1zc&r`_2&1gKmHC8+FG!=#s!+wCw6(QCu_c6TP?`%Tgjb-bvgJ(F)s3XwwL7|N z4%n3|GXCmszr}{-!T4<;YY`*Z*`3c{8y-sx4~In$|CRCZQ8_Bm9{!O=x0Q%fPDAnt zeehs8`~|O6WZSN8*8gZp|L~EH$<5?%VhI`T0jY;X$hRZC+uJ z9z8|=1WBnv1be8tNG0b=ll4cJ*g@cZ02{TymmYtRZp1Ox*0$HM5|-&-CmMbtT^2gw zaI3b{s5LbMqok}nJuz`dH~DOwn4>PJWL0SWq`3j&Q^vf)VQ%g$KMC6+t*i4#R@KD^?hy7$xj-nG=O&je-2w#oZFtQsv^NFVNw-eR{Txj z`T4{zaUkQjidbFN-Sp)yMk6*5HLbirw@Y}is+s;5=#oi~@`d1po$8eKytK%j8d--G z>|on_tkiJSt=a`^aRGtWW~2558%{DpAfEf1MTeZ zm)=`w;-Uy_R1H9S5(UV&2T@_BtKa*Y+KaiVb`G6e2*3%9X|IV2Zve?y;2 z4c%%WA<4pfrBtO7fNFc{^(!sP3V>sB-hAZ%e1oE_22a=%jpOl`AtDXgqW)wyM*pbN z>Awld?@W3l6pY~2c8G~V(W*H-Vf!$$Tx@w^MSETNV0ge|^x#Gi3i(LgkM$IB-V^(R zw#%_sdv(bdIM#l=6`kR7JZb;kKFzG1Ku=~@FVEgN%tdl-;vl%z#L-mlU77Z_OoF?c zhccIQ&V=*yt}Dy$^7pFvBv7bBmjnw8lRK5g21$($sSenqf=Yp zWMpKG__}4N91$u{evaU+jlqG$1jq?k^=9jS|M&m|gM;~I>f0ILkRTylVAvWHn59+v zt20e;asp&-xwOlQGBYhfq8*gE(>KCIx5q#&;WUWvKn5FlW^t$GH*sYk^xnJQ7OHB| zpEnB%$eSOx9aa;v0G*jVJT^9#MZ$GU2?mtg2gSAq?q9+#DT3^*b{*0Pl$b)>d9>mD zSWs7Z{l#9gHwo=Eor7%PM>$5NXx$6*xUr_ zs)SC<<1;A3sUim-UVXrVjP`KYi*9aiR^=ON!Jq<>FCqCls6b3$?-U;LINX01ivtyo zwmu6#3Uw-6i$TaVEb4Kv$96Oq=RT2<>7HQdl$}*$P^L8{_CNiJ^*_n&)PRnJ?eVIr zo1LRuXX8r^SgRX@NNUxlMnG>{sKXf9EcBD)$Q1%; zxD>wN>_qdkdL~Ho5jh6UuUCKO_Y6;5h{v?4fznl|Z1(6rmEEMoh@TFZwgF{@{X=je z%Ymr(7=4yd^YgZV)jST} zs$|UX83zUj2RC}67DQf|&tbL7s7%Y9%!W3 z#a+3_Ym{+6Q2bGNt6Zjbc?@NEgWc*KrB9fygvrJP#P*Mmx{Wq%d@FswZ#UGK(t%6} zbH}1@ASmDhec$=5oYGz3)nkB78^E@=#zPWK;PMw9)ZJ}siNRuGPz6svSr~*3YKGtK zmUY~!4~T|aXb#(MD;5XY=Mza1p&-JwgABozW)aNftCLkA`$&Y9DXVY zab6z55$G&{;ArGuSOvX?1$7inqssmL2YXKv6(*N)xMfl^I5?^zzAmKoA7o`^|Ki~9DIMV)QRbcp z2XrPMx=mG8zX4(Gy3EOwCjs_+X~9=X|G6A0s_FMY*<3??y?^@5;$kKUI1Oq3bIDkv z92AWkTR_<&zCIk_m=x5Pgw~&XFaat!fqEo=<^kv`sBqggMW?=dX9NoEwrv{2E`h4V ztk%Q5wRT#rhp$#9nuT_OqcSkX1`#72o#D6WSl($+rU>EjpKZZ?qnapP+NnHf z*4f#qXg$R8g}GXTgfsXM(8dH*I8H>UH?s*N2ooklK&X8=7MFC(cd9jZb8BVX))fT2 zpjxs`YE{9R5;XJx{U=i`<3GnorSH~whioo3PV~RC-3kXrK9L1D#+R`bv4#R zpM$pt4wpHP*I~xD4{a+F1TJ!yVp3K41qE+2BTcVqmAtxiA)SF34h->HEGkaa{)%$b zL<2Is4%@#6Qt5_l7sw*L%_9A7R@QU#rsvu_mG2 zoRh&H+KU{Qv{&LxuFwvphZ9J%CtYC4TqEb>Ib9fJr}cd5NG%VvCvBD8pRD zHzz?C)36uXwXRpudJ~7h1hvYXE_g=NCQjs+Zm+D*s^+t*G&LKmC{f?}Ys@Vp5Q>;s zNv^Kv^2uM=Tsn*+2e57TXpu*y=?s?F4@5MAlBR!PBdS_m0?-Iyp3r^O#Df8#aE}|% z$RBS^jAvIaVA&QAY3X{2Z;^T~b_?xA?+T$zcyoqnZU#!bG`6rszZ><7l7l7sFN@*{ zhcHcl=N#}%%iJY=d)xNw<0RYBJ&|~rggUkG?OIUd6~5i$2%Yi&h<9dXZRpg8p9BR2 zf`Wr-z`x+zv5etXw_zBCG!-2kP*&L2F8j1^ip6 z%&KeaK(QSKiv%bd+w03UTI&5g4lJL37+fS@7I%#{Q0~zf2Qm$|wt!w7#0i+GnVJ3k ztk*Er5*AET8EQb12IBn3ckkFeL198~u8}f1{4i=S6VNWm*+Y$G z-07IE9Kd|ehSmiD?gPr`p&l?xMoD+^@ul)jZ3i}v>46G2F*{JJb@#S*j(%g|uzPGh zc3dWaW^(JBvSh0b=o~_Tx?$l4Uvj1rP!;*Zsxlrn1kR@Q((D9aCDGQP0EM*w=kLZO z=VEkfdAS=XECi02K{BpCYh=g&5)pdGQsn{-MiY8()nS9UqJYzC?3aF9W0k`iIx zzGx;7Kpx?-)+#ia)m!Fl2b@3Dva`AxgX>7CJcZ2*Y=orko7>}U@nUPs%@gQUL6Pzl z8r7UmHWHO4L}zDbm}e*8mXbR&DXS~dfN(|_;bLB$_}wPP663?lD-ScVwA9WuM2iT6 zpEN{Rf(rSRZuI=!Fz+iOZ7qzTXwVk0C3+uf6@Na_fFEm`A7dfa#LC-^P_e|ts7HPR zWO@-(_S`TTVGAnf9>ScTNn*{p!=ks>K)omZ)$TS`aLb?$JEc^o)2V?#S)*0OB5apO zs>PYYuc_{pI}SM;fp#1SZJ?GXHvi~!b9g)H)bTjLJ^QuJ9d%YYTg0M(Yf%>RcxGtr zibep!~7GPxVHjx0qHk&B`x4xwY1_v zsrk`PpfKxa(&Lk$fGd3CZ-lW)PikM{LS57a#c^Q1Bjr-!JXgMJn*VtBJ{+Q-L zN$RlGqJCS#VEO!7L!@0>$zT)9rAsm|)PdfbgYt{ToyvhisEcx1j)V4~=PpKGSs4wR(n23pq z{aJk}SiYy`_<2CVQO8Zm_n@>H%u(K8hc*N-L9n%8Z-GP$nk3C@PlFn085tRV>}QY$ z1x#g+4YUTF>e`n2^A!UWPQ>CsNC&b#jQIO{>{ya9 zoH=BAf61eA+k=XVic{^QFmTPJ{8)v^jQ80*%bgq-bbFC`_vNIJ=QwbPED|Y^%{|Ez}nwHb%EQhu{3wnbz~Oq zQqImk7-(u`rOppN;cna4FiNueXIETfP`7X2=8;0K%alDFONvWMd>@Xy$DjB0)}g+s zw_knC=Cg8tR=}7nOiXNwAD!n+*%HEnJjI|Cg@Br0hrX3n)}N(4jPdLFx!>C$VHoJV z&()!D;RQPN^XCp5#uz_iwC`*8Q!r#|Qj*lP=Vg6ROEH@S4*xp*8BqVVG&E)cJBdF6 zgpKJ!#JDULH~ie#5u)lVvPr3b62fYOH|ArCVY1X7k>3e=Y^yDhnStaO0NrhSF1 z1wIf4d~NUCW<8+bF8+E8E0g$@-t(YQJ;3JeQ8O{qSAV+}i_20&C_1 z*pDrw`5{$iaqmh*#Z2J%&go;vUh2+gMuR+5Wcj=SSesR!J_#;1l8e_i9bvnW-}7sI z-KzwnG^T)dgGT{v@;Dx6jy<05_@MsC#{2qIMCCzRD~qW5#%U|(H4QZK09^izRIeXH z^t)%#r|?*NjOYwhGqk0svlDiG3QZE$B{9Dd>|4;QsCU>Xgs5lG(_V||aVpm~0QHez z|IAP0_b-oXDmQWeqYjLI>Dlx+1-)KghwM@(2OBV3erzA~g%58qPoF+@-T9W7a0li* ztEkie{ri+KIa!~t_O&2$9$xW$wiVXy(MGq{-&0YL zmL?i(TO0p`nlo51IHM@jhUQ1mn?L%TTvbH z4YzraT<_`W5!0OoYK-e#`n$NkBfkfx2#OBE!os|~yp~+2#fFPUGIaLmG|&?%*vn)VhP zt`|5s;QAfsdsjeCCHC&p?P1+mT&csm=(|9EzZRKMSSa+==;Pb88_^NbNmW!%9Ikzjp1~l`Ep|dqsj$Wn<^Ec#dmt zd{8&Ji)k3Yt0v5zWC8~j8%=OiMp(&u$Hq{!Is9UKoEl|^0RMCy0TiR&4OiE8o&=F+gd*bU zH0ZSljU!|qL6u9!OOwZmDuda24+_kq1;3DzlWPMf0(@8cpxYPpu0$}ezXXkYJXJg~tZyvjDE>%Eiwq{JnW5 z;fQ=xT|%Y;%w%6DG6ps0Mi`Iznj9FxsouW=qD&C+-b=~Tkq@ff)O~&RL2}PblC6(b zt6#rY5UqnYr@*>wM=(2AU)0xlfueZ~6_!{X!BtW6qN0UB%T?{Xl{oxd6?+aQAhw@96bszrPUT*fn1S{K{!jnf&y0 zd8)b_3MX%LrCMQ$(5Z!bRa(8h3$u<0X>V!v=e?iZOAaviZN3sS3EtA!4D1wPxeVPl+o3GToXO!J$dH*c79du>nP6TKs zUKrzN|2^bT9Ic;iwRP~b&}^yYMIP)nWf%F`^Ya=DH`(8P;C(N~c;i{u?YW0O?#=|@EJ z=FZThm@+c)>u)4C^nlS*ppiOtyS=OXW|o6mTAlN!q$E*$ECBk2kLQVTSgXiO8@(lH zGgDFSTVY!iZ)0z1Z2WBWO8Cfw+`Rl1>Hf%htkV8Ip9dP;m6^dvCb#3+Fa5~jHg9SF zz|4auu_(Gnw~x&$@Wp^Kkcd8%V&Xw;DF zH0fafYbKQZH!tDCAgB>>%^;ixfe9e7;F2D_bqwwG$CZ9X0v~ncri|f`vpAx8c;5mj zJ*ct;T+jr3aYM9l`CTiPLrXrlTf=C;-+_07#(m5PxG)nF(|&le(3P8k9)-~rNlOUW z2{2F$4mbPlR^-^mv-oB_FStp`>a`WzYZ&kWI+EH`}-aPG)Lq z%Fhkp;}`ALR}fscgUscXD_8hmS0!c`{hXUSF$~3g9tW;BGAI(GqTYvSX=-L{^F+qS zM}&n@&jG4<8EE^e%5)_dhw80iHL#I4VZmA!?bA=-VXRc9h~p zyM?#iK(&qT;lX~N<#oOKKtUVuofS%K5ejKtKCC3KEABd9cll-hYBW+8ZpnW8xgbc< zY-lS@04?ICwb!Y;%qsW0LC3!?)oJcVB#7fj?~^hLFN5-^1s*Jj(PrhM9G6F=SZp$^ zbbf-24A)a((EY_dWDL?jrXHeINR{+^SE5n8RC@=jK=UfRr`&x5)bNo+Xp}$8r?kh~(0Id;I-{pEX6GXOES*K+(0!wk=LXMc)5m9~iph)<_6Q zNzEuch1S}9_I;2>gSt4m=r&*|?8ijicJiL6)2{yPQ+$r1q&}p`wR+cVVG2v^jE3g&c%B;id+$byS-jly#>nhft2+P1o2v#ygfju ziPMQ)E;ar7M#@;mnx+a)^H5Q68W^l$>@&Vb-OCl!=lb|sD4XgEr}`hm zi{wKiXYTOmRs?Eh5bC}Xq$V*3S`f9cN`4Y_!dD^`PU}>izB=A#zj3}I=aKreDnwfq z<6aZt$0`}3ndimY{hq49$dh(LV!uW-h+`M&gK}ExnRrfHAOA8V0mB?W8?4%;@3cp= zy$3q1)Q?~2d>r6$C&VV}Tu<_aHv}3f=cP~fD4gELrPrm9wx1b3aRAhke&YvQ!3{R5 z`d&=zxj6REr*H2WD~4iiv#L&!)7xB>jaBmEtEc~=J%Ez&vDDOq-ytu9pAB@o|BNPf z1r?QFkKF5FHNSzfxdf93sZV`4lZf8N!FQ$~3?3{uY|2cp%-_0%idS{Nl_tCWo^%3y zE7z0D?04T(-;t!ZMk+duWeIc&M-kn-YwV}r3=!nEtsk#gHkdk|1EHT*$*VH+R=6b( z6$cYRgqhg;-FES#JNdk>r_B1~yQtn_wC$$1ae?t8tyZ{AgrL^C35 zA5C5j=`@@%yMEU47!5huN?|rePSMDf$Mk*!^3-t&g(T!xN~O9&Qu$Qlls(=kR96V@ z{XKyXdhnXJ{pVrVk?Y|KmtZD302$pl9kzJO{l^g4!h}zuL$WOYV$#kBi3^~=F zXJ8yX=c8(SnjU0(WBiQD(%!>Y9ChJo;Ui2PAv|vf_;)yzkduC&XO9cMWbW6869pf? zNnx``=xkc(;7*t_$){Fs`UR29y{~AnlGlPvEluu32BXuh2&14Y4lWAf$wASx1=EG4 z-Ka06-cm$FbOj|NB!*^zw}Rp1yMC=4ycY~BE)QheSoIGf2l$*E2~ zh?+sekj3dDRyH(Xk%wxOu(C{X!5OZgMf#?3S+oY}rTux90E_w68twP|%uU@_5+pE= zi@v^N*MI23#ZPcyhhCm0sTmii(i=aE$*)4z>1}xz66*HaSri5cf93P9ey9gO(AyZk z7AbT%Dge$Ac$8?I%S_*zkx_eM*1p@%Ukiu@ix*UOUKXYyTwEhB%kxKHz0LlFa479c zczk8L%0!!+Sx~xKUa*b-J4V>Qg|z9`81oP9`K3v9UykS2w0s$fVlX8yuM;bF);QJ_ z!&5h;wCOIyx;rNao$N*@{)O+e)VfJ#@>W=GsVAf7&9)Fq$a|DOw3F#xEY#!})inrz zKb!h^X`nbQ z|NnvGcO`ot$;vo(*&{Q;p(7)!qGXeqEhLI#oQxciO=J}b$taP%vPmI(mYuTsJ&*1_ zpU?OD|Nhtif3Eww?kY9QB%@|270;y8y{_XT zj|C(q2UT`&7x+d7WAplO4fa1HuF};PCO%-wL`7>HIxVMeV?X@-Cu;KY>0d*Gqw}RQ zb01{p#vPsA%x?WCl36qS?FRlq_UA{t+G#5>hNre-0_rHsV}u+S^kp=|SDpku6m??R zkNesB9vyMrBE2HYfh+lC-!~H7J4;>Sa^`*<9#TnM=S3nj5ih_8ROHz%Og56ub_H11 z5l}Lv^tNhf)iizoYEOGb_m{3<;A8a3J7RT#o~{K+Td}53spE>mDO?-^S+KOkszbJ1 zrs5Im2%6NeFyizn`IA0-ERJsi575h~Ky{LxAmLH0>FoBXMt}}8icf^S;ZFYp`{#TS z4Xn$2fd_d@C_b$3=zYH+dh?5XL9`8*V%n!D2|9F#vC*t`<6o>-^%ha9$KSf&t|;5m z!Un!*$2R<+#v+6WNn$Ei$VJQ6e!WqsfeaPH7eU>9Oh9|fDe2x?3DYExtFQ?Z#c+7J zj~rAFQ;FLwfBksPsbtfY6c?X-)4;I8VNUzcjgs3YFk%b2OuQ#fBK7f(EfcNG5EiD5 zLQVH3(PRoLw`qPCIfxm&sPuU?D+?z~6`(uT*rV?Gizt|2L5^m;mEML*Zy z`U%7fub&uk#d-2t|8yKwSAU13s1urpOH1`+G zj`GCS?-^XCuhvdS;Qvl}9(C=Nu{t)IDYY+0lSG-zCyE@AIQl8&&sXCAkXJdD=yOm) z%}^$n7xLWo0<@l6+%bXPl>hUVHgdVe-n@>a6c)cr=1RvDf4n?m5j z(!`jqySYDc6hgGU;ORmBfBZUAUWc_c6Yet4evSAv}WJ>DeCt5>m=;zmDbty{1K2VY;)?>E1~HpdJv3o)OUm%VrX za?r68gwr!Ks*E<14cJ>Smx4-qfnRcX6*t+F^NTZ3dyP7kGUeA|9nZ+@z?ba0f2Q+T zs@{4kx8P&B@r93KsNA{6d(9#hor>R(_*cURZ_5*IB)=ykQ(N*B8!r4v_fAS{lp6D& z-NN#ngy4STi{<0Dj$cdkbF~4^lbs8|sFX7Mfk*z@S5l-!-KOz%_TIkdmmUMxP#*~} zXHlhcszuBXz8(WYss(wsX<{eY;kOP_HlGsSysf)Rtmp&9I72pOF{_qnp6{@#e-}PM&`%_YcZIMLHXgfNj%H$s z`^KTAP@z&vVUXhSyljL~Q6EW4 zR-c3Ix#Yql!*;anU}UJsi3e0@<@l3t+?)HxBod6(bk^i8ahRAKM`>TXrgr+c%EMf= z$rVgO)Lhy{HyEpwfba_V41N zc#k_jjUOB}m%8^=EcVhZb9QnYQiPpU5<;eIH#LsxFd^-&(~v6vgA4`*KW|$^wXTVY z_Ohl6vY#>I*mT{-(r`RG&5&ht6oobh3yDqEF-@%d?aAmTKn-x-X-G3kg8#A^^nSA@ zxYS-q*zx|)`B;>mw8c$}2zc>Dj-#%XJKox|cwF`Su~6kwa*ELI@_zH^*HkNdZe`*V z*!I9^u{QvtgJx|XCGHGC$j0(Fxz6Xc*lwtK+JAc|sm&#XQcfHg88Ox~v*4{m$G8HB zE=q;Nn;=w~_ebbiu6`Hm=?F$~N*3uNi28qE(c&(D&pM7;P}z(azK2#_&mis?IT;Ny zatc*fy!H7#G3{4bxBeF@q0h6ZcX}Wp?}kwev78eZlnk4p^7*`I@%zB5ml;|OM3ExE zV|h@EB}lKkCdq(sL<5Ia5CX_^o`=?UL)n-5mYDB`?Py>LXQ`{`#1b(>ZJNZ1|9u*H zg0x;K=AqxJMn)zxkzB+?6xs{Z+=Lh=Bs`s{m2Y<)2Zf@hc7NmcX|tQ~ZgT33mnC1! zqsA~IGb!3!9V@O;{g6uOD8TphQ9_+5b|aA~pS${2*hLz<^Z!|F+GJd2o${p8FhV==eejQ*}fPNQYl}hiLuBlZ!3FmPx*JV8y{$Msh~PYNlWzNG2eXNl$0XuH+q@x{YnopPY{0@CCTNg%P zv2Y;O1}@J(e+*+`f4|eK8^{~RrXDLE^lAuuu8gLtL38WFH8Vdz41CBLSMYv5hcMf` zumk5aB|iR=C7cxUi?LQM*rI(lBayX*3Hw6!qW}f}0Pv5DxOsl)?Kw4edX|yyIHAOJ06I%6StM-2nLpgu>OEIS?f6AS&CE5Ct+}zw0E=Y6=Z`aynxNymh zqkSfexBYn6R-pLE*Bbx=2svm?TnA2_q4}VAQ~yL!l@Ri;02x?11v3sE`4=Nd8c_>M z$Ew!b&Y?pgLitdL+HWcF9vT{VUhAe-+4W{;JwPu zbX>Irz)94K>rKqg{S5V+0jPg!c?|xU5D(05X;69&Mm4C2EJyg(yXh)z0`2rEuv!@b zFbeg$=D;@-ffc3=erPlv8O>=Yi^C&5b{F3Ogno9t!a(fq&gROME-1(|&!T)cm(L6V zXA>&C3Yd+xhTvh(|6M~IRK{t?XqqD;6fWgamDqGJSzMqEk|utQPLq3cx65SjWG3eP zIGJHhp)XPtD)X9?sJh*>Ji^M0ir+cT@Y0?SH2UBxxppS@?ipDRJ{+gHVUo5L>=@jh64`eLJ!|D&S?4okQwiV=wf#QvQ;8z_+gTg7vzG0IH8YzTrvPKW}8lwq^% zTY+j1!m!@^D26LdZN=_7bCOs@ZF~1}%0(!+??_(b-&1NK*;at+RBJ~^c5baUrk4wq zq@zi|5ol7J2H_RL6Df-eZhty9G&ynS1mVxGEZJHxf(C#*x?7Hu*6L0(YFxDVU5BKoAwuF4w#dL$l7W6R72>B%ZD_F9yf9PkOnT3{H_@?q z;mdwD$ASaRq~JCtN9mQYf)+`grdUN$Q8d-Jt=KtQ12T?Q8UJlYtJnh@t-8F@sC4zG*+w%wFv>gW`Bs0kV#=(um-PjjTG zdLP!My@6Yo_$m5nbZ32bj;ZCTV5Kh_dBhs56uKe|=@Ra@pYi{@Km^Qg!<(!=>)Utk z{a%{my^+=YF=N32_s^-88)c5_g2h~x#5=YC&+!|ozJE{Q@$;q9<%+8(o{u5n-wrl=gJ)hqllP1 zj+01oRfn8ctulOa?=-82$Qfx0uMky;S#(FUA?PB17HaxbYqsKsY%%XhZ8!f>nFh@74$Nqbn-35_wvCbQ2n{(Ihr=cQ?Od@hZ~ z6G@i_nNNIe|M>8P-9P2R8&BT1-xQ}UuDj`?E^kRP^&(Y`*9{|gD$TN6ou0}PpD>+I z4@U_(;4N3+H9S52xoojwNX;If2x^*HZ_ z;K)^)KSB`8P~T|gWJ|sE$OnrmB*$NbEX{En=4Yx2bttsu+Z*9UvYn+Ly+~|tH%1xgXwtd;kM^wN~5H@tZV!v!s;o+=P@Jn8$=f-cvahl z=22*8nb!z)4{nbbk0+UkiKGSRxPOAjpc9}XLF!~;@m{*D`R@~``^gbYKo#os8F6Q+ zq7gYHl{#RLcf+!{(y(V4H>Ph=9mbv_j9}2(O=H{7+=yJ~J4sDn9ArjzAEQjmTUY2= z`l5|hSv5wQrmf1f8!Hm*IZ696ETWs;BbYEc!`*~exmq(%Mvs}ED_ zYCwi_($&k`q*0bpcA{)6fL?>1{M#}_#^m*RE?#c(9Hi{P6&zhLBI6o*o~lx$>%t}r zcbiXt^axhOx<601pEN^A*6HP=?yfYiQYohq)CP}?7`z)JH^EU_%o@SPiV;OQilzCF zh7+=41}Z`TWu>^*fY7BqG70chG7*fX#fM#w&Y!QH+8w^kOf4Ami2B&~n39v4#x|#t zFS-zwA-(7~FoZ>N+A%tIX^p0dm`mao_UZ#aW^?e}0lBt@>^(T75 z``-02=XfbC-a1{QbVsJrmhgR|Y$R&jQBnf34(!3RV_88>yEXmnqHpI6C3OYE_(&WG zHE1o=gox9!f1^JU=MPlRHfU>CnlWKhq+J6maeo7P59H~rUoyOIU7usvoX)*3ZGPO| zE`6|`f_NbvDbOOou@t;P-)o3g=Ah@CuKLgA6w>jQOq;19UKH;LbDxZyMJe;^w}nTr z$i#d)9sHknc_8EQmhj&^_CHP=pI#otcK8!<^rXR+9VPN`qaOd0`GW5?yn0pF+}!K8 zgOSnC+$+g#NiO%h4OL$H@Es z`_V!O-V*obD8{~A4h4EZV_R#q0?>3mVj@=C#NwZNhLJ_M(*K0MdU|@ATrH(9^xH0L zJb7#dzyB253h_2{Q4OZ|}={QizqCJ?va=+cWtxztGq8xJ!rR;NJWtMaU(t=cE!hDzyYBDmV?ntj3jG@}R^zRL<=o z?6mV)L)@F$LZSfgpZBS}0_@N`H?pq3Hi1E)ELVvCAj5<>To}w(y1y?_yXxlrCtcO% zQdB(5NxoC}!1(@DfrW#@Uy5hDs-Zm76cxoc8!59UAyFfi)ChCkF^6zdzKbOH>xcA%d_nUN8zme4syEWCmCzIIM zwuaWT2gmMGz4l+UWaGTSrJ{}b;Q95kO>JO71@G=#>x8JH6;0cIY3qyf30SDZkjv0u zT9&Bx{-LVtT9Ui#BqUV1o00A^p{q*6skZTBP#6=svH0}~A&o2y zU)5quffO0yRbvj$a;M>@e@NRXA&xd8H>~n@1yT?gzAsiLPf!f-hdG5rnJciEYnPvGtJE@nIpy~mS+RH28czjkmDnX)!WQ~+sg{5kpyzSiGZ+qk zF*#`=i~UUEqf)A}tO?E1VvY^y#BTRoVx!n+pk_)DbAWM>gs8XL4^EqIj)NGUXlQXee944O zIYd0_;JOj1XmsI2;8Vbz(citrH*}c`?Sh1vjErMYO0&L`PY}k4{eJN%UsHBey~;#g zi}QQiWFlgMUk10?PmFppB4^lk;sc_7HS_uprloxxbidZLAF$qfrMJHPj?FC(Ua|W_ z{lR4P4Y|ypUtej&iU_3DdC;qmVYvp8W4>Ft1tMk*wnLkH^QW_&V=wVPc*CB^T^iJb z7S$%WUF`T(OjhMg^t5&sbj?oM?%k=2cE>M9h|GLai(*l) z-R&Rb__+x=X#O`v&+q*JwaTuhB&o?J0lJ9P&W0cSXU|Ti<+jSwRFavQBklb!IdrX0-9apN`^e;<3WD&5ShkrvbL@mPo~&T3-LnfUmARl(gH^hxZ=E ze|a4*JmB}R3$^@d(MWtsUtvPe6-geRD{?`lQvB^(^jT#BzM!^FG1tVaF-Cba54Xau z5?{zT7Q6THnuvlluE>GhX_4@U{I7m}E5U&oR2~=Y5Twdk5yY?8)$ru-sh?B>G3Q;z z9*~-oCrESRuj$1S3Je==`D`&lomsvGYLidMD=^v^c2hr!#&2iOo$GT&E-JUkjv#53 z>iIYCrM~3n=ePS9Xqd~3;N#=Vb3X|oKnt^Tm6Avu$jBUY9qx6FZ8b?zvkm%hYe@c1 z-Ts~*Ubxdu1{lI;dpH_^BRsaJF*PA)#dKIJCl^ujl*D~HITp(hJ+HjVj%>VzQN^czvNUb2 zQ2Dvw7w-=swXmLX8^W7P^ykaZt%-bprn{CZf^a0J^6&hC0_b8~v|RlwVh0$jda7z@ zUmJLmAS-JgHTmPk1ZuvktqqxxU^J1aGe1Z9NYt1i{`57CF>G|D%16nyD*0!b;}@i^ zQ+Od!^gp|TtDc<=h-Mm*U7wD5Hbp4VQX?|aYJBCv?)1;s>=0Vi-dPI=SkwmE<*1v_P7udFBWU$WC6>xx`{lwdS;z~C z&ob&;^(~5C0!7-L$~fhXRoh49m^>>G3MHQ=5USea4k1rCpYYIzVo|qRyA5t*wha%C40qTd!AJOx?@)&vR zf)3Tz)Ct130DYI&*DN5cTM74>pe#G4%~endR6Qmid}&NI87RX;aTM;zhUDpM^{G~= z=kSHBJY-a~zv&s{pNUrjxE;I-R26>zI8>S21aflC>CTJVo!`If;7W>4JAdBu*=xKgT{g4BW> z3iQ-4W>26m@cGj1?mjc#z!F9yibJxPSM111lPUw+T91*{eIkX!^&*$5!z(pPT?5xm zJFfhmfzD5^so%r2fZg(dDPexGnM^sFOkSHJ_fN5{n1`zi?-vKMQNf?m_M4fE#C?1m z5Vu7W_%*G3)u8cqRQBYJ2}IOt;RCW|-j7^byfIa2XeZt}3VVxjbtht0j;vtOSd@A* zwp)dglH((k;m9uZBQo}gQt56wJwRlbu_cOaF|`%w$))>wWf2~>G(=dP{j<>=cc8+A zU8q<%tr-4|D7mxS9gy$qKiyRboA%TFCUt!e_QwuWYkv7rf)_jv*lS`|g1%aMhnr=w z)cdt}#17lB1sz&+NPOEvX5de9F|8*N(BYZT&5D1N zHKHZ6dfBzmy5`6lWhnn$FEFH$qtWY8kJWzDboSJIOA?7*kQx2U@)CbSe6FNT@P4f? z{eZ4-?P1Mcc*8B5!u{U((Yp})-`~9%@J>c?xn=^U=SCP6Y__gFA>ln9OIGVjy+bp2 z^H%cv*($L8W`=npb#`k17Lh%WR?kufu?;u7bPy7;haZ*t->#6$2splUgOuG?{X+`z zpng5pB{$S?g8SJ!qOLCG!<@+EirMZfdQ^RVcaa3HmU;9^todLv(T_V2q4T*aQcDs$jrX)?Jm_*D4A&dv`8^g=;|EWTkPM80D=IwF&O zvn=|lktLbQ%g9w)r;uP`9M2>^2nJJ}m1jIe#x6oI;WkPZj`#@Q`!PfgyZi0@7hpSS z%ni*~zpVr})DoOZ9N@!G;n}Ap#UWQ%GxuIn9zB^OK6wA9P?O)Rcsd7dj1gm7Dt1D! zC5eMHtMG%j5B69_2s-57y@e7XCJui|So?osQ{|;QnQUxO9rGjd+fqE(&u^z6q(+)1 zZZ=aL)Q?>cjWHg0byKQI{2W5^TGx9eVdgcP+cj$HG%sJfb&tn(dJlhtf_-^DXFWcdlQ0L0+CN_n2+_3Y9-{Gbog9$j+lgIW# z=q?>Bh8Q+Tq@s7M@U`xv3d%<-uH(Oq?;f?sU2?TZHJS3W%gxqL;q&FJk7nWg)Qz*| zHTz4>&Q)too2iex0uFw5$^Rq=+7((Q5%C|Fy^a#s>x6$o^vUN`EYj2%<@*`rY?&SO(A@;Y5H%nxfcYJ@7W^$&}GJa7kTg-(wVJ*T9kjd?sGNK2mD;) zIyb|$+2cyV)d)12C_@vSLL0N14!LtLu{^X;prHJ60#!ktvr6wMNEez9tcK88ey9r7 z&|sKRLH})!$7dt3mTKK>x{?y?Q52QX!T9j*wgFCdozKX}BVwDGKd{u9JjzP8XzGCq;2 zE3S6b5X6_~)Pv6TVLg5*GHl}JQPuk`U2Z?63Od{8HXf#z%W5sE>4km!XwgkP8|V^& z!k1-vq-lENmF3Yzr%I2#eaKj=Rir!u>U_>(A>))0YPOgIMsXVm==d6C0Y{)R5G~&* zQ07jiZ3WN>44LY6_M<)}#J>2J&?@eE&B3=P#R#L650lmD2j6GuslI)loVXXT-d^G~ z(-VAAC7KM8PV4Tf#2c9WjAEY3egx#ISP1UuP{$u*p!T(J4piYdkEzW-0CjzGCL$vg z_lnl+t%ztXMEWHA|E<;X z^@_TTUt}s?M>k_J(ln3mW3(+~m92=dG7^(RNXRrOw7Du`*n{dun6xo|i2#m9d0vDm zcOAp;pwsj}1;JmL9lys>Nt3R`M*r|T(40@@kk7gi<`^C$X5B_-AJ%YEj_=LoN_|a~ z6R+F)bZfWz!vQi`8YVjK3=iU)$1{}q9$e-&kWJKRz52au83c)9Ku}T}V=ke?MR#t3 z$#&2MIXxxf4P}tU3k@O`D%9r~=$Q(F)Xe4IcuO92weVO5?JN_``~uq~<>_4KJCVnK z^&npmOG=o>d49ZO2L`{QU$bP-Z(+#HGX5^|D83+r?p!+AA zQCcdoNLOEgFHsQh<5N*5Vr~P|`@aUpQAOw1-V=oeUL~R7%cCwU%i#3@BJE3m>?7Wk zOFUR=jylD1p_eXd((>vApPc`q$AwKjrSPgR0Xk=uxol4ozeG9)ko5~iwi7rGmR zjCD|r>R+A{tlQ;L=DWN;eL6>^SdI7QVTcvW-?G*`im7FRN2Pxr^UCYRS>@@IB>V>Q zS0(;hEJ)9{<#0OvL8v$YQ0h^mIR0J`q)XiFwEz8>zfJ5v8xrpaj$WCFzs+cHt4Foe zqZ$VOf#MH-d<{gqG(MZ(?m~^G3M!8m>^_nf)Z2HQ__ri`^f6G#e3_S*cbV$i#qgpV zKQDrV#&3RnY(lKddiC@Pq?X|;A5J)fJy^%q;bNPEr%N|~s~(j_E3=?dVFu&jaJ~hE zg#%#be!dm2z{P)o$T?}U0QhrG(b09cuz=D8=r!2WYhyhB^I@7lKDd26{M4CN!hbm# zh)IONkG#u+r1@!b5T6sI=#u7jn7joJ@U`2Ai{FCEt-ew=uLXEI4y)nIc<|rv+usUC z^O?$LKj-@g-PryhCH|tn$u2ErMcYj;qaVT=e3u!RFrn}lDhlVkx7Y$iA7O6yg=Z~Z zT8H_3#99FyNr&+Cq=qdh&0TwK(qVcF6c|j4%xVvl@ns@~BltJK=}QfMDTlJnS|F3@ zmu?TffY#lo6qzRzi!;C&5T|Q&m;`eTZ~J0)25B@%?lK%||3Tmze8)2m$a>m;;nhxg zaQ64|xAq*6^@0v~;pdZ3XC(BIXhu0+cT)oxjHTR^gBg;-Ku#QBQNm}rPKUgKno1*Q zlfz%Qdkl7C&s&zq&1#eJcX%%h6qx#N&wwBz7s3wv2Ss~!=C!^-rkK$bobsMV{aJLD zo5%8qO!bXQw`^XoPM};$k@Y#7a`q0u6vVw(&HOvzDJ0ZE0HJ98%)lJU;VuW1KBJ$+ z?(cYTv-!1XvyEAT_t-UBsdGS8JX zfy-uwUXbenZC>U5xK)PVpyOJC7itWGER5^V_=sK}X3o$2pU>=P`TSo?ulWfn)n|l1 zhRU_aCea!BC*f?QEeRLyl`VjwJ*KZ&$6cfAV55|^jBwQWoGE+0cNMdK$s48(9&$oMW(fL{8^<>nZzYfKR0 zLg#CH2!`*MPI~~k2wBW{?9^Gh3a_Obt2CEnp_;s3rB3+XEfzoiBJ$-rF?wT{dW}KY z7kaOusE!ipMgfhuLhf|GbrRY7BA%KvAmVW6(Eyyo8P44LlmJD9`kzsP*u@Aa^KrK?sP3VKXvU??=gA?qlB=L$Qur~9|f9jN!|Q3e)*1sIx++ojQxjx=C}*>n%XIz z%v2+5+R(~|2L_8L&lOo3x@%^6=<1G6ZsqQb_$J^Zr0bHcUF44qXt>4w#}yCv-u!dL zZ{WevL{AKs3nAkPoJ?OO$t)%GG7VTIMxgq`0y|h%izdw+_Vgzfe+x+)JPGT%39CY@J!RH=K`7C@oWTkch->Qw>h(4MlnfV zwj;kHLSugE3j~*ST6N2VuhXrbSOEz+{$qa$$A`LLIpVm*AGs@G>QFinMORCs;GBs8+Onf6sNaYj{GP}Qg=Z^gDT}htf@h#VXCCT5HMNv*-)}x(; zBjs5>danD((cKLa!g%d2?%kt{HD;g#vYf=AN>1>~uBavOK`RlLB(++DK^d5wGBPi6l)>f1$Bxvrv zoC`C|T=zV$de*f~?eeV$RL9h<6NoN0l#eoT^1Sd{(e#j~>3zUyeVLs~PbVlh_O%P+ zb!)P<2+@)-n1&RwP4(+PR>O}# zoiuwiD=c^YwK0)M_Ooeq2Vyxv($UQp_ii znPwUB!QwN_+HNY|K?>d?)cgL*pM2tXh2l-W1`E%n$dzHUf=)^7eWAY{2Hd%JLM2%C zCq!!|BR+FzujTf(vgi<;E_uS|S_2xw@fv|vgeHQ5e{!jjr|$F1i0xMPtZ_{LN+28m z*2<8Gn(<`>k!Ijbl6(;lULvBG#e}-SgZAJNUO@}8n2PAoDKqVJPb?DV>J}eYEc2w2 z*&K`lB{Qx-O5C{iNbgF9!^)ORlU@|5J!wWycr$aoD#7Yzw1GN;qz@x0CXix&cSEEc zPkB!JBg`jS@Qo3SeAMs7yY^j}XA*d7-lLOry?yUPF}5iDGLI2v)9vIQ4^WhTdMCo{ z9V0XxUZfTuS4@EG>Jpl$DdXofPg-rJrmrYzzmb5~rB< zSI^^8Jf-ZAagC82PQd+&pV7X7fPvBhfDzU;RJdKedMXHd<8w1J!Pup5?>C{>X5Iv| zwi8hDm;i>sF#N^B?M}Chx!EBFyJTmtA=K-^nS&hnAJX4Hx5eYMb;wCTqd>S5!ZVDT zr-9bHF(kwj?7#5@G*FvKXMwDC=$)qnC{z-;Zm-XP(|29nU8ykzQqNvg0%3{Af?kw9 z8))Ci7GfQT%OVkz_DLZ6vaZ%?rKF{yK}HlWU{sgLGC^wT9P#>Kp^vu-+&a^B8|ch7 zJ^0zm2StunWGYltgm0EbOc_7jbmMP=X3iX`srwP}FWd<%R_}iCWA^pe1Hs3hI3f?X zniZLtnD{s&Wa`!+jKQ5{80_z-Rsv{rXDtK^u60)w_YF)im|b=u_8IXIx4?c z#;$zB%YP4}Vi>ut$Q0UzxZ+OX_1z(jm}Ixc?A2Zex+qCu&VyW0AOW*FO{>k5hn)rZbg0M`B-T%4J^}=n0)^uC-dwpBv%4oG^1T^IE z%ydzrpeiX(#uEWW1|jWcX@I4k00xN|R*MBQqYT^6k0uJ z{VMg$hrV8vVSZf`izY3$Ya~(&!_TnxhJlq;dltJYF*Yq7Z{ec>$O5Zm^!Iw-scj$? z^gFLDZ{9;OSOCXXU)2$*1O^CB74XosgqD93K08WIk>yEaVO-g_DWX5$iVo-+Kd3RWT+&<^co$8W^Kexl0d!P6 zFkFVK{4oT zo+za$26QG|uG<3mUqsK?*p>Bh;tE9P+|HCxJBg;FdJ-D0n~@K1bm7~96ObTLK5TQc zQ|?lUyXm#EV2Ox`okSs>!EYN^cMQULX`5lc9ZMJC$=${Cb)Lgf}NA` zrf46?>VX0U$S0k7v3J)-$%W#KmGHJqfkTCJ!lBNM!RCb@B+?wgc*}-me7mSNCg)QX zo`sz7|_+&33+ zxNGX^`D>(-%}dRaS(5NcqscEopzrl15wCR2>$RTJJ2H|-iWhirN{DU%+o@(SFk4PL zdq5MTDUd3+se-8V+ZGFIwP2RKvoL5HkXp!7=axW@*ASXEh0|wUsd< z!(s8;`}jw-E8juqBSqiTG<|lz+Z4(TpXDD#E3sjlErTZtx84<7uf{AXS9}0yuXv6e zo?l*_F#^k;@)M?|rAC=gVuD8Wz;sUpp=)osru@^e*?FPn-fqboBI1uDz(RGwO9_OeZbVF1IJRJQ+e6eyC#r z4BE`orhVeJZ*G(w7VnaXWw4bTnr7-z-FLT^x0K(EfP=;hlHfOaGI5Re&-Q?IX33v& z-mSD$O`4z0mPmraRL>P-jo*7klCZbB@s6rAFJ!wFE> zc)cGRY-O&Ia;~<<^)SlndGr@L3j~K|Q5N^Y$%ZJ{SxoeuyjL_Q{F}7r(Vuh>6U=j4 zK(w&+uIj#TQdrky9NukbP3X4oGgG`~8mc$`BQSOxlp*Lt3NDh$xK0w>%xs#1sKDcx z7rFVRs}`4fQPP5Um@ZeEiMFsU^U^Atb+iM71GJ<@YXZ7hxs7={cFoPnm;-=s;-Tp; zteCOiY!ceZNwArpy`axL11}T=<|2*9&HPg9XpOO;>~+T8rZK*)g@=N*b&DDw*zkdg z>1{;D>D31ewEuv{2$K{-z07C^sLLcVR~mVa`=iZn>eCMvHS!yAy~RE_TkDqWZB%&W zCHklTmS}_-%@~>p>v>}FuucEELLX{Ntx1w5?XtKoV&Y)xr!B)Oe>bXPTw;+Iq$cpU z4`^+zB($}wC7L-PG7+?}=9mcr&f|sNT8w8mIq$2&1=A^qX>ru)xkVI>y1c z%@Q8%`H(|JW$FpbXU?|v6oKi==kJWPw8M}ivlK`jNmp34(njHF`+%u7zMyDESR733 z%s9|I81}X6oo-5LdN(#*QZRL^)V2rg5M$7X2ADDb^7ZTXcKI+0zmbc1g5Qb&;Xh}e zfh9E-s9=GIqTK>P=5NkSSg7MH)4-!RSlu{%@$HpB!JCZkoricQS+% zrgu=(QSQocs%54C0=g#|5?}CmM!GR8CnvZZ$sE)r@htt*~)H*kNgLJ{8D4x7WE(w z2|Q&SMy>z+$vcd4fd&c$)j#Pz033zz%h3Yh!|0Y$DIAsadsBldMke$RB-dlVLSe(yDM03UFTr_?pxJ@E8uht?F>agxcP2e6}1Fo^Y1mPZhK7#j$ePgJd zdxV>ZznC0uUw-{LSYp+R@%P0ugX~`n!!L~#H#grMyWf01PaR?Yc(Xt^0wj=H^(_xX z>>DVQfKu7p5@27J-9}$BB~27l|J@{ik`>KmrK;0+M`dDCcS}kHv&^mfE0z>V$;iGs zrielxBF^!_qlpP~R!u~2eXA>Vy0tq%5ZV7^HK6*#2(bbO0=6wa{O{_pK1=jOGNGA9 zOvl8eQ-aziV3lC^f8K0m?0?#1}n##eSZ{)9hA+0~yA_A5$^_Ww1}I?|AZ55wE{|0Xw9-d0xQkI5-0f*Dr< zHVQ=E^7nUQ*MNu;)b`FC_#MSDf3klx~zhL*0#1A6)xsJKHF4^dmp_4 zUj<;zwG?1Zs%r*UN-u0;Y;63A=1$xbkQ$IF!x~rnZG-#9AAQZjZ3sfNlFy3*pRam~ zZXhH&__QZTQ``;&sH8qhOG({5$b+fV>|MqGq=OcCo=`i-9hq$g78ddapS>;7;K}X` z6|3+BalFR68b?1x6G*^rPcT zRy_o2WL|`N!b94>3yAg|v3&a>Z`$koB~{OX)5*m8;*$x9?CNYBH%0E)+aqs6+Vm)| zwtY)D{}yB^pAHz_tMNpE5Gq>@f!v$r(GST|ft;4fXg5WH(FyAtkK6I8Nk?bwJdWn~ z76YsH{>2b2%r60Q!!NYCkF0<%1oFcqCZ6Pm*HA|R(Fk|y?d@%W0h0f2lo(2TRekrq zFH~ur%sgIzl@AOI^hmeZjT-KMT3FS@K+*IZWQ zMDU7)=Njc;c?e#D{?94fzQ*;x+qjy;QD4tUm+|kDhcd{2Z}~_j_20w!|9|P@|LsJ! zT<=FQvA!Lt=~;c5aRIO6a&)zRa$3`MQ86C^DsOlcitSYb7Z_Bv2Zx85Z9YQe1I;K2 zF)>=3Gy`$wKUN~f4$*3K_X&@l(FjBD=WpLW05A>cv7;Y?91?JAcjzEq~mnN`L(LaiS>&dc2lRfW5ZOR%C$iLl1Oi%X&{&)WT zd2p{uQ!JIPQ-nm88GQWw9I52wo`A`7jXF{} z`eVCWPD>0_KgZIU=5-d~<8Z+j9Kb_>)O1nai~nFSLV@lJ3`?^ zE@c7|%T)@uRLIDSBoL{^B&&Mob2enc;mErdOxUC5(3e!D;d?((g+8n=6qPYl_9Z8BZRAo%S!YU zx2B- zhpsLpDbYOP_MQ0Oxe70zCnd+}M*HLo`K4dxHqBjfK}mB??cI-}&@@nZsykdu%@e2- z=3^Jl(N|LNCV|+Eqd+amw~@Nhe&VVft-fV*q7BPjihI=af>^9T@Gn%R2#)VjCf_6E z&)8W?#m&mD3zd1NDjV;VrV5G8J{Q5oImASI3(M_QKk@o7k)1ncl)6rR;#c~KONXC% zZ@lT;8z*{dks8-0_sxZ?a&NsooBvTA3O+8AkCb{sMR0mV?&qXJUyEQF-@`;3srxsa zgn%x;Pv6nqP+w7J4ub{Kl$`A)jfW9#Cnokep7E63rQ@Mh)umI_wQ8fL#u@58GQ-*V zggd@l$mv^3K1F-pC@wZ}@8Ts^A1f)oTZ!gKb7Z(t7gSlbq`qw&?5=2B(oI0aU(>1f zIK2MQmG%eP zj_hOuz15k|fgh28!0UM_UXQ}xlctp+KPMU9qGhP|sG+t}JQv8z-SHqQFUFn4+lu$$AKriY0A1<5wdMfe3;t@2vT3TK-bg26 z^&Zy{{*cuMQ#KiM-vSfl?myKryGB-)lQc~iB|RCYxTh<_>AHed zikzr@etp?3fRXLqqUZn-;p#=XU%%`I&<+5q^%0w^gso`qH=B6-PQ^sXA^w%V^CI2; zgrkAN{k6u^gGk3q?X*4bt|zp7JnXymx|t_;^lDX@cKHmF{eW#>T$4-FO8p!uVr}}@ zr?-iv5^pNS@7JdL%rY*(LsK%ByDGlq*fcvg9e3Tte5zRdJx%Zn%?dHGONlM2r*B!2 z<(^fy>C9iw2xT3zh6i#+#=5$MLn$#S|E|D7>%RVCuM@L#p^(rS`YA)}H4~TGlTJ~e zClV(2i+(M@+xn#W2kY1Rp_+n>4^Xn+jj}e;GuYNx}983yDE=$elNFa2x6T&c>VMyjLf;hs?PUm=gufg_Sz473bzrqe!Dw%!A0d(%KhJ8 ziSCQcUd3I!U=w++o~qx)=`B@3f5?v$L6=@<#X0i0ZB2wyk&i$0QcDbTzLf54F_X!c zbQn_XsdoO0Sx`xd*BA59NU2e2O{=sOHi;COQ#>V(DCs&|n*{U;nw49lw`b|XJ&-@Z zAfS3Q6VOt1wsU92&OKRcdL-2T6J)cO-#N;-PrYF>FBCWJ`2JW7r~Bmfj}n>UgtW7_ zW$6*F#C=->Vm}Pt=*yUv+0G?2-EBCAct_wDYpPj8T_LdFm#tMuG?jo}SjbHi+7K7? zS!fV=bYDjlSFOcO*4|z1iw~+|&-8 zs3VWqoqwpx_3yKuu>(s(23&wBeD5_g7RF0JDjy1W_)_eJ8;VP*KmX!^~b z4Cvq=rCpM~fEaTa-m;=}6Ns6IheL{@+BWun8Qa)D&7Nx)nz-%O_^g4H>&1HXF_-^` zwX=?j@@xA&B`q+FARPk?AxL*9Foa5@gi5EEyVl(e*TgK#$f z;(6Y))>-dAXO?TZ77Tmtd+xojeeEkg-?^r~#(j7G2qe!=q3&I>ueOv+5t+o+lg2O^ zir#ry=6F44U1b(!w%~^QB_I~? zGCTFndcJMcyS779qG`3Ze$kbbx8m)hhsVJlHwdEzsU$|9Et1{Fo&>gl`E@Ev3HCo_H)mw1fJ2fTBv z3p&DgwfP~gZSw+cBDcyKvpL~1XC{5YBP!y`yy2=+^u{y#nK=$x2Tyq*w9a=W%+{yP z`f=b3qa#mXPy*Vk=G>O66lp9=0jN>q_cMmO!FzX`A12~%W1lP*w-8yuWu|Pu{sOI$ zzdnECB1e=FB`C_#g%f3%!))nW7s)8kN)EajZyxTgWOY9tw||XuGt5_-Pr|jPvM@H7 z!;|f=842e1tI~nWEkm$XURc1=ysl1>*v)dhC}8v74`1zZUuhJ=yq+7{HY7-WPX9i8 znmU_LUqvi+@*cmDh4%aDIe3h~Y8Q8skR-YTF!F+2Tlt7(ee7JVHnTHai8*9h%?Ji| z=Gq6FN9ZqBAW{sQCFfgTi}sXlE7ufNB@bhCcZbkuuvrQ0!;LN0E#|=PZK3h~LsD$F$C+4^BBAK;h0W&z2f1B!YXvn7EcA)<>8vIEVgtD? z*3bAMFXs6rOd!xoFB&L{M>m(y0#w4G#bN$Fc03KPYToxNXbUyJ!+xt-4Ts#{m zA=tuE!2zR#1aD@Z{a4coLBkF~8{)!H_rc3h;N}dV0Lu#N0tUbSZg2@sC#{O~46F*? zA`{-woLKS}l~BR=HnZ#`Ui~gBx_5rrd!}{EOcFJuL=ap|v61@r2_CBUc+0)Q38$y* zxnhmSPp4GFh(dN~{wP~lfCfK=pG< zn9sPzQrf3^A;--;m{i?z#tFzOT>X4hg)jB`=Vl)M1jc*uk22*`BcBGpJz)|qug8eE zE6L+pqrDVZ?@@$}y<;zK1S8N>zXflmCgQcLdOlzgS)~pyoiI;!qjJYypiMuYUa>BK zr<5&|VRah9?$-uY@Mcru;peUBR{|e-APqDW3aV7!h!|Mj3ElY-8sGCtbZ3UK zA}2p~tL;-h-=rryMV{cOfl_O3#VeLiPf8|BT}a8Q^MnL(cMk~3L~DBSKMX#h)n|ea zJG%zbmbf2!r}Cm4+;`t-9P;VSsF5xDR)ZBAiy0g(1`lTn+X88I#h8@GZIku^^;#O4$^f2tSM*caZSHFBu?^b#j9Qtnm-MI_p4ru7jBk0n)|=>eTl^Ik zf4xu=+Hg4jriAL3R3K%$K>I*Y&{q4dYtwGEFB_+h6`e-%hEl$1^N`NL#=$ts8 zd5e_rJnrEqcv!$x=3(_Tv~KB>j*=-z5FRJP*1tsPqsq}SNhU8d5}a7RhR=W8l(?{Bb5(N?%ktXoYd z`wu4Aeq_J0OiZ%XZ3vmht@QHZgS>+#7d+&wTBFZQ((6u20lQI3dC$#+PU-<~9{yu| zpM>H^|2XOpi}2>|q3G!3{tk~MF8OPh{@%$YF*@X`e*g3K=h9;3S`ktHxj+dAjCd38 zK^yPKbb~40%$p}_6|PhS-0@WlA$KCg*4Z3jPm71*)1Eat$&z0=7JXIgt%s8s zid3R=KA2Tb$0gqubSxP$U+kgGt8cH58u(i`{SrzpLd>yD$8=Y za6q`&b$@^MbI!_V2b;#yhQTE{gX6M(6uf>BAZ3`i>nIUnU~qTteOyzS za=hy29Ro55y1V?tYe#EuR1Q4~mjM&8(*{3j)ClHhT7McRRD6U}$@ao`cJDKvW$vmk zhVx)lyDh%#d+Mr|5G4dgY$G4D$2t=#4mVgbuj5IjM6A+y+k|N0zD=BT=4zm^7S&cs z%@Y@5Cx@isN1`sGV|3|k-~ySi&wiPfzhKXdAP&J5`4bCTGUn5kV~EeU=#(|DpT&0F z5t-6bw+Wq2-=3075B}48k(Aq36Q?2=Nro*1Yv}N1^~@>t85#dZd}(Z-M_eLjt&YBs zCp`*uSi`OJ%`d(C5ZL&N_H_A2r~a$awvi^VK=oCyi?s;016ncu2ChRoB}(1{|JX9U z2Wh48qs;!nd2QqUSm&+oznmCwJS!gysl=tXAyAU|>u~zYr!(dH>X$~jYlD!}o@*;w zL2rZojN193NdD`x2>vVq3#J z4l##e-WUwh{pJ2X%mk*A@o}n2nziFe{D4!o)B>{xb54%3_FI3vr;l^_HwtWMjfn#v z-a8nS1=m%;8VKC0Kj%zf_M*oVBkK>Fp?02!{&o+Ed&H=eTB5_Ih?+m*>R6`SUm6ozx8ZH+!?Yy_eKiOuFeJ_5+hTFW%4PRK*>NDKWV$Lc3 zrF$+Ctd?}GXV;|;*HGsmUn?dMdkdSqF-M`JiiJM^)VPlL1$Bln_;mZlaN)S)kYBBh zxYLMRz3Y4^-qG0j!C-Rh@xn~i8$I?fP`aW0aV~hS0;-9M|D4Xjt%LJ(XJy0g50lET z@|(TCwaZqt8RNWd(qs~F-Sq>4S?+PEB_3>t_JxL%}D4k4sy&#b11<)_-%BI$GezF!-_%z|Lfhb!LBW zxdc&L-y@)-RB=`&_-4+_Z&m2KKIZgGb0g)U&KWLt&Bfl8$!sAiC%9Q8V(V=%ZBz+* z+9K+nMomRot*K&=^6e5Z#o&tc(xz z_ywesu4RI2t7n~8E|Tr+-e9Ut!8Mek$BoC1U%iBd19gk*5l~qhg?4FfXWjF;^T#`& zOl^BJ_4TV;UEhzdyk|SERp5TF+}*G};kav6)pFMsuW%PDRC(Gb>OJp5t7qnufn2AI z{tx*t9Q(^K+}GXTJ~;gKY;5G+kF%wjbx^N%cyy#P>U?5vM$RLV88TYY&Am!33m7 zxibIp=7;{6$uIRN-l_MoMb>ZUtyID|EO)eYSENMYnSMd0wVY|~3`ymTtLAD=>6N3# zC3hAL8`6%nHWv|>LYN7`dpBDO?^Qk!2iNoG1z$+$^sI0h(oN`WJLnDyeaLB0e^Ivn z8%zUG-Igi=Z8`-chAiSRAf85rOQCE-Gr+22JSnp&=nl6zm4lA>>(|4H8d; zvhrULYN?ubp8|$RZP8R60hD=SKSsa^PTzy{Y5kC_tJ+`Gs{vJ-; z+ey&Z(5bG?PRg2lkidbi)LQfHm{|6P8tc_WjiV?3cM5Czlm_K{s--hRCx1PN=5Ihg##BZm8ZHU<5 z!bM!*ckU#k3Fj{-1i$R*h-eaL5M($& zTMY>00V>f8(BT6lOhny}2!jM)DIypJ@^z={?27&Bg7$YO2FF`XW_^8qE z--Gnvp~Q>R04oJJy#dJ1n0&weBta`Naq;b`?{y=>{@Kb>L{3#NM>)m_`IxEZQ=gvL zBda=NDgiSE$N(mE`nMxY1BFFI4z6b)9un{JdF1uABxAhR6Js5k|f1l zZ`cyZs&UsJkRFQ$Dk>pv@F9nf>NDePy|@!7>!Q_(Slk+_I7pAxgjyG!)_sxIl2=!i zso$L^EN-QUk7(tpa%tZD&N=F-%|0wB&@D;`Ro9yNNgvJP+|GAmNMj;yl#U|0oaT}@ zrO(|)fG)wKvA8kNx&@KBvFN&4(sA;8`~P}|Yqr~%!JBE`%k^oTbGlE<;;=0M@sa+6 z&y6UDjY1ufy34Z-pl!y*#g$@XZ5muj{>z`CqVd5!`99&H!PA6*}j%!C6g$s zVa^i+M6Y%X`5^J)PKVuOarQTF9<~P&JAjrV?z$7wPfcKz?Y#YvC>>yJ|C?@u~h2b!4;$93CAiTo~iXK!tZ5j}hMG+PWpL-W#XAY-oH z&r z4o2MhybRsfdkZ!MNRdbyiB~%yqQlO{xnxp-0kPWZJxvvJd#A0aqp;T#P!(4CIQ&fP zp!$~@Hf`DSAfoko=>bZ!%~6&IVoVQ>RstQj+L2-5d1~cSWlyr2q{zhHn`jUDeV4O% zr~UhYJl0LEqo{qg?Nxxgq{*5tZID@hgl+~O@#=gr`IFlltrck&n~r)U8*W60BXC;^ zvPJsUy|7MUCVh*_wPwJJ%hFC|oJX$izEli-7%k}eB!0E^ki(f}@xa`{II&;KnBHRK zCT)ZKVfiDQ(UIQyLFrY*JOWDxaNaQnU0ct;Ejv10pr0U zu!g$%8lsb+Yw5;dCgAo+j1XNG`}aFQUZQh0_AfM?6dGdr5#DDlI6^uUFZZ{0bU;5a z{$iLEby^PNfxtLhgFZ3;XhH<^0FlFlFzduQlsBK^v3 z?We|Si_?GO7HN1!#oXXa}Rs?lSo76}YaFou*$wSr{4<_WG$LXGc-Ol{}0 zrg5-W7X#&jNz^oD*zC!fuuKMt!MkfvCbbj|ouPFJZ;6Iy-?|`xE?4P3OTXuxRuYKQ zj9Nr|Bi>RBRJ-VJ;$*n-+#i=nS^TzDJ<0Zea-CdcfxJ>DA$l*MeBZXq3U2Bu%oU`Cnhz)0zs7FzStU`v3-fJqtqHtN`xKl@jMVxh}ke#yne%_%7=9rdclw+CO^+? z>#^^pwNJx^Uuf|bP4cDuT6}C0QbN`~y;j2THbcHURej`xD(f`&t?s4DIOlVu$lpGW zXRT5zFKWFM#P!dHhPwty$W5Yf<0kg{jrCQYJbqmKF+MvMRE%jU8`Z-V@soV~%=8g+>w zoH)1{2&-U4oV-PF`CID@E?fbl*@3{t^EI~23CCRk?7XxXP`bS$9&fG1n!%W0_j5lxJV z7qu`(hkIiO!~LlqPOZOeeuUQysFv(X-F^H6NUnXlrN67DZ zt%dda<&pRaX7lghKwxhl0ECc|uOW#KZU)WMuxbU##-ZmK)dotL`12G83lDtB9#21g z(M}+fe`n-->FZ&6J06OG?aVW*lhLX5o9v!NHECHzE9(WaG*m%&{4#UI&GLvjYW7He z2uocHLd9tC_X)!?a&tQ-nOe*pf-zr6 zp%J0otSf6zCWJDMPi142s9jj51iP^nwNCEPjmMjv)d#@Q@6?u=dL|?Whe97<(c8aE z$Prb-(ve2D(NR_G{N}LelC@3sJ(Bv}yXY?c`KTKb9#I$siZq5W@h^7qUo=LBb?A+) z?d<#$0@?!u&v6*4Tz50&Lgie%qSNjd<8jJrb=lRP!Xf&%AKq09EE+9!K=u2J^o57i z6&Bv*=SM6C7#rP6m2`@8#%_izB7_VIZ>V}mp2BEf6Wt`3d(Rmysnuf$>*Q}n6k*CUCfMVHH@Ju)wcA>X>*j44*h**=sA4@S>m zk2dDZG!o#D3~>?7^57@EKrf48e5lw-o7IX8-o;!%%!#CFuB)WFDZ zhxL&SAM=OwKq@5_&2))p5VN1dcTa1jR^S2udGUh9JDF5tr*BC?Q{~NMh)X`Tsf3PY z^a!+xAN1qnlMK3RbFQ1R_xpuEd84mSmnH=_=2OW&;D0rBBlSvjJ)A6s*nsdiLJz3w=exrl;tXIQJ zBr(_ohEL!EKk=b%;q1{xCvB2mvrX!K9A-^h`m=?r)FVyQ1TMdT2u7bDH-SC-qGtG` z(=7oPr7-OJ_*Km2hVMypKh^CUv;3Ay$gjbNrdrN%G^9ab6aM@e(=F=b+g!U`6&fF( z*60K}IY2F0iXpe7hE{(YyTeP%WD=6wrT9L`L1-Yg36`pYrx)nL$&nY8ro5%3yp7he zN!C1(MOMjsIq6G6M)B(q^pW9LL6_3ILVX0yDo@@_ggO(9I$Dmh9@!V z;prY39QNV1xiKAcQgAIvS8LS>C$k&;4weQYm(|U+Q4tvsA)~fVp-&AiJQf~h(VLej^%oZni;wQJqC3DYybL2ejJ|%Y zA!HA3MPz=OM1EmVZ(GX7UEYB)++B@E6rs^KB@~lOWK2@OY7tyeP<)D_YDOr%-k+un z56(tQMI$0%!wT`0{cbaP@KnC^Cr2GLs5veN5gcWO$;gf9)ZaDP7+l^$qWHf`Th(+tUb&6?hy@Rr& zuoi5e_1!K0MC>m6ru~_k17o5!5I2Jw1-_ugR+I}%&od;)>eubQbroLZkT9hyGOl+fJIAOat$I zJ%fXlnxGKkHbx~2SJT#VT`m0x)?5)>L)eYVubpo5NS;z{+@hXeo+LNfANdv&t>xL+ zpU|t2;otEMixOvgwHQgf5^~Qya4v528)H=9dV@PpvMFWQ{{EfT6#_gKM~-1`=c}y9 zCkS|{K;g|&Lz>n^Nrdi-EX#O)Omq12fpM+e1+52rp|=Ckw@K9FY!sxT#j`p+P+t@) zGw_WuA9HBLS+s`n$jhmO?A+caYk24)Z1rlloRq-gL3opN632t1u)T*?`!+K+&0a`2dQQ7 zcN~!2GlUH~bs+wZWu?$SPel@fAr$lbY(}uGbgPp6U~4VD^@ANg2x?fOpUaWy@4$JN(+2WP>AoRTE#`y^74sQS8tp3U-qsB z3;79>kYdoU*`&^i3T3)aeyihGHwj4WL{m}-h;wxlXpz0e?rPsE%yiX}(K`pB?V{YM z#ca=K9#H&}MwSK4=CYzO40}-0rTyQH7S+WEVHL8BT8bGcaeZ5o-7c}|Sj}JBNrOv# zpSQru*^D1%5FE673%>A7n-$KyccPzwJGDAKDey~29m$f%YzR?}x@E+4YzAxfOfV#w zneDCSHPWNKZ%YWBNd3r4^uUxlx2AG;ZiBKHaWrEsoc4ip@z4I`Ks;)Bkv$61>2z zyv>?vt=|-?@+m$VqX@*(O1W~1B)??Om(W^cF1bx672i$v^reF0?=j)(vypWoj6Xh0 z7t=nPbzo6VyAxS6W10SSuW0Khp%KH?tB-T;77u^>SMrmc3xs3bb(|}6mFs>R?$@H~ z;nTHX6!wKfUq<;gI9hwik1bzVT#Wu&eDbdD@xxR6FAkYuo*_1t&urBRyHiW12_ybJ zI*Ee#aPYg4m0;va{25y#Me#S`8+Y!OxHE%KOkrg0b9()>3WVOm5{OiO5xmNbT52p2!URnT?8X|hp5-Hx_*$fyJNe)+7b*3!}V zOcG={|Kp@M4T$ukUF%&(lii2zLRF!nKDGM@DNffvKfji|_JK#USi&&=c^?Ky8niK0 zE~p#O&JL)Y)**f17-&G%8KgV}Al`HnLkOvvCR7EGlm1Ht+Y&abLb&f;>(+y`z!{13 z4OHDZo|t=jaYq)AX2EK1)z zGn3X1CB&xn#<;$yKQ~K?l;5DD=8L>+LSOkE79MWU;5E{p`{4uVkgIQK*scv)Ur%yB zrW2uW?(Z*b=a-b+Piku5`b%Zp2i`oHSXj7a(xKu}1z$KjQEsL6`0}5zOKWPxctzLf zo(eA3YQRvSvn0;1841nZ+b5{NH8VEWSh;iO&K7=rPkfP(aOEo9*UuF>eMyR%nq^Wz zU!}CF>YJyLtn6YU|H^@;q#!?k)*UK;+fSc90d93zGeSjWYk67gOafB(T(~O8*2&=Y z*@14|`eX%liB2#7-mhOal7;Cn(P~C)U>RzFLL=_1*MMpcx7blRLjU*nOTT;(k`!O0 z!^bI_c`G{vBzqpMGo+^KeZ>CxGqxtracF$p^mz4cX{lq~mzo-_@9}YQuWXeO`7)F; zW{4FKSg)DT&$M_V%c;}4h(|_}x+;054Vs)@&&m+6THI0xpguJRTevM8ru1JctrU4K87s_K`JPoq@A9+Q7fsc7Fp~N{UhUru<+^aX$uY#<=g!icPr-U|M$*PpVFh;(`d z&~z{QItP(MgfY#{Fe@1TU1#2V4fml+e96J)@vFb_Da9(a*YXccTwO&xyCMrM@sXa> z?ks3(kgq|M(%f6>!@MHukVw6Ujw2 zY$px<=aIMGRyGAf?9R4Azk?B3plOx-*KCGN#;r`+`=G;wRZbc~KYDvw?`Z{fuo;2m zVJSSbn(m8|yp0S$;#Hw=!xyCG0OUq!NcYF^n7%$`y>c7P3hiKLG?3*ODTi=n4jGn* z2V;bM9vPAwjWXoP!($&qd~_)h8@5&e+s3U@m>AUwWL3kv58Ik$m&4t6Rx)LW)u7K?vVE{E8BA4U_A-%%S`M7 zdN=HC@Y#P|LHc&d`tUbG<@Y<~(cfA_^2733#2+%+@#(dlk{sB%)w8#k;VQmU6JQ?D zJj0ySETWM3XHsS$T}QutdpRGbyZ~(1^=QCoK7tO(#E!)YO?^1v$FPKfxETO@b=a|k zys#=W>A>~71`*)hrB$GS=j;Hqlo|Y<&5#W;E*a-H75}B82hsqSq(FacvLdO06ucW) zV~jabcMEiyw$kGvW@ceO)~c^xr(Pe2ddxaC6=GM;QQ-ryfD0>@3k>|CMZ+z3xYnrC z7Qkuxw!hsPUHg@|mOAtG_4(%2>@QyxEXHP_Xe}k>8WlC`MTGY|u!Y-7>vD4>cL204 zEd;Fax$T*HdwX+No}8Tla9zy6gA@`OT*iPXCzR>v@-zxWz>Q%4*;)@@9^eF zH7SElTPNo81^$=K&CMqv!B9H<9>rYcRD z0M9~&83t>nW0*6jQj56ZL<-2)c<$zR?l&?qVpsx_fqoS8gPA;9!kQkY?JXMtbiyRR zx8RR)8J7@Chyd3O6mS%cg4YO&;8EK=z<+idVB=?&#gp3=Oe5I_JLpp@At!=Xwf`J zIVV#}Nf{O9T@e&WLTZ108mkVT)hOYcFE8HJK1T;?C{nN2V^~{}f4)v!QHUr6d@I<5 z6mtL|VaB9)E`$WXYmY$zW&&qcO2)G)$^*`gD}l?SPln$YK!&i7HxwfIjyJ_fpe z`+(y&kqcM(Ggv3G6G=fQ>E$s%)~YS8-wMExAn&<+GRp*qe*mnT3%_-eW+x{L(33%yD4-XHDpULtgOYhD2C?OSl0VgV_^pFDLYOpigTzNDli z0R_q!dXUUt!dhi4!aD*9|p8U)PNM;zjG=;bTp$S z=(B$Tba(j~mL%o&DZ&(aD-5mzK;9h(clR#}`;JF9T540L9I0ce1?8IPw zPrFa<1SkN}S(Y~#KaEI3SBY84$LuZRZ(itXZtT1H%~SAO@4KR)~LDB0a9`_M@7^!zG!kJ$U9rA$yO@Q>9= z2~Oz*3HAuOd*-yD3(IAM;&$UjNWSxrBTS&ue;K<7?#s6v1OE>HA@rKS5?4 zbeGQN`GQtIkrWDHTbjOWBDC2WaX*HU@BOCFYIZR=@;-d>{*SIjIQjPc-aKysiBWbm zJ(*`}<7MuVYZR4P_tU#nGv0l;2k@oOX>PQHN8S2xlOsrvBUdR=hEi7Att*-(1{pXn z1~h|LYzG!#nm5v6= z^COQqsS&ZTB+yzOpDmL!@b7A^CCbBs|ANr- zYJQ+HnSqVh?5@5QmeTpC`3JW_ead)B@Z6)=B3ka9!@~z@y)^o}J)tpo^^&}0sREwT zV{1Ll`C!@=ePs6ULm$(rs}gcd-7gUr>#pX7zTQN;aYwr?4~JCVo?J_Eh#VetgZ6#` z=8a>9+XW-aCl73>vvWiYLgqr_U0|!k-5(tMLbGH@%QK?ilWyF_s^*!>miB{+_RQF9 z!0>>8R}LwINk5SAo(Ohl_1ogj7kH`)HY629RZvkXz&h}?zMv8;#+Iy7;dPWn; z`y>euGr5A%d8_$tQ#t{lwCaOX+O_(w?odn2TC(m*i^vUpE$?Xn=zpqco?*BWs1lGi z()h95m6FxCt&`-F%xtk-ze)g$_NMz7Ns6)UN;~q8xo;{4_?A>gmE9l0WA9f&bk+=t z(PE{^?kt5*8ja$_-Fd_^E93Ea36b{KJ_EKX8QarcXvZ&{ulqIDFXEWYyi{KH0>j^h z!`tKi6py<=w3|B$KG?#UQexbO-<{oC+}$342Mtqo*hX|ltz1n`WW+r}Mak55^(OLj zU6leWbyu!2Fjibvj8L77lLr~eK-gLbWrVzy+UMwRTRI<1zbyvA=e#YG_>JO?AYesn zCB9xn=)8%$Q>FmtyCEubL^2TmglCO4chCpN_%s!WggQCw>x^s1h7L-mF%L{U-b?9& zF_{}_`(Ima2!Ou+{MyNxn`E&VP}2j>s$lF)qf>G!vM&HN0yXVQh?Eo7WEMnZwS z@3DKe1bw`Dd6`ab2)Xgbg8fQsi!dCofaf=3U`HJk{Tm^d@;wLW-9-cscN|nYD?weq5i=<&AT3G>J zWqYjJhcr1G(I4ZK^K_!9a;~JO{PWrLwcAZ9i1*51-_k8L4RZ{6d5>q|Zsc?_?Dzgk z*3EPhnme<3XFUH|_7J6#s9XJqcaP;WSbixd`b3Bhn6w7;ppmb1;=UYDc#C0GO2uky zXgkL@fir!)63cJ{`*=6H@_mV?6a9JmeY;;~`zn+Q8~GY@s^_0S$3KTHA?nZ8?{I4~ zECSC5x1PDAQ1K=IU{}K>-%3auUV!3Q*PJ551QjKNd@QAY)^}Sy_ie!C^PJ3~PsT1VnNDQXXdA2*lq=~EXaX`yULG5OJdk+or%SrWsi8m%c+&jQ(f&TS28C$! zx~jM0nJw8Y;hJ(?L&LrK*PQI(;w^>kcz=8>;N74qskGe9nuzehxO3vcSd3hLf^Q~r z^V{w*&WB27Lr?{li*6a{&0*rFVPSj|&P`2C`h}hG-1K(@P#;_tTJbzd|Gm%v61ty= zgrCuRuYoib20GwaYqgiX9R28B?1V!>fgyd%C-N5VDmMD7xCpYB?46{zUy6OHSk$lv z+lel&iYs-tWpNU2O7PoJU|W6uM#{fWY~0#Au+Q0(SyA}$BWI7qz=(iyG;>dBCR_Ht zB0GyOE+lYDo)E+)(9jZb`5dqsfk|hVKXCP}6ap*uRFi^9S3AFyRNBKFY#bb6t$*6S0i1{LINi+;*T7q) z58gvbO)c)Yq$n-j1PoXM=z*#NL5_J)LwU`%*Z`J3i(5%q+2%)0MNSTj?dtmaWT_d! z9X<{Y4xoYH?tTvZ=5*47qq8F`muhu2<}zR$`s0Vk&C(`6Q_&#a!+PXNdeKr8gxJQoER z?V~s3>+)ghj~};wNJ#L#=j-ievsL|igS?^t1Xw{(UTBs7cgOrS+{H~)N{&RdvQkKw zh?s+uGutPMaeaMV5f2naKA#07G`%_C8GggD4Tj2s2-zFphhsAcU+cv+ftqL{I*HCX z;D=s_>G_N7ZBFnfVL14PmJ{;fxCOdfl25k;FvqsH%hNTWa!x zmRw^;!9{h!OXHFh4OXB@ioYuxn0qp;NHFkAi|?@{;nK6UpmA2siTNus0u+VWu3{@n zPMO)Z1#sOG<0@DaN7+zxnbHRb2d#%i^=Bb09770WA5c-Vb3n0Qknk>5Fm&df8LgXn z&lCcbuTF%u7=nUChyrXYPtF zLi)5Z6|@=Nx%te5bm;|*vnlwqaG1%;qXK!i4S2H_m6jWG=bm;lE`buPZ)dx+6VONg z`m%*khX5B9UVHKRFmmEve0so$@OThVGi}CW00o%VhVz!A!UCxJ`g)zBrlDLrOPC=w z(O13kWLfIhDv1dGuE=cpj)KSy+PsLw*d6fn%DT z{zgO930_Fz2J2xR`Gsi84EJ)N?YS5Qz3 zR$H`zf%|h$)nWv2-~AEbOxC-dE3aA-mXu7yp@O;{(ILr|c5Bo%;%Ek;T{n zWb`Wo=?32r<1Vs$dElAR)YQ}fD)7u8Lci9viHzK~wc(RBEQpwX`Lipfexx11JIa1F zTE@g7lJGc*y0go5FZ_1D2r%K_o7Q6ikLj`z#Ieok9^O4&%YEu~$JUTbsZ{71+Jo~l@GsO znd`k&Z(mpRFI(+@_T7kd^~oVaOC9VpiqE6u!2wQh-|G$PY>{ZnjXl9ar(|HFPMJ_k(C#*Xa1L% zWmS%>bv=dsUR6K(K1lCfP>l3Any?_m*lrYq7-N}Sv$8rB*9GIkqbSG)Q92>R6BAtt zIV@qakE4NrWyhk1uCW(gpUBj=Z@2XPrCnTvg?Hefs=c#Uemi(7AzapIQ7rs}A^IXn zlk8<#(v&CYPz+B^JzAJg7(dwnkh&LaVQw~3k??*S<*}Hn5)&LMlEC|Da#vTvm{{mj zrt-%_|GWU4OV|f0pss^ zrm4bV^2-A=o+3|TxkA#@)f^n#PMP2x-Tbjuj{S-AD`94$QVQv+o@azizo4;WUq;Dg zsi)cr-cS?z;7Kd_FyNCi`!g^o-s0X&JoWsfeORcsf@?fwQ+af(8B;M}yzMDeYtKFV z)G8JD4d93p(=;|V2GDCKECax6wZFVL4G9SWq8IQYn*an2Pz)L}rws&-#Zf$OI zc2LmL{*(e0)Gk}ofCv{hi*kzo@Zm$I8~_qtd#=|k^QMf9XO(w`8`je$B4*4%lnMab zR4IGAx`4uI(LzAUtF>W3d;{@gM@Mt><*XWo-HO_!S;NtfPYB~;V(cA3KTLgc!vwi??XAo1TM>2o0zz(NBw zp8DP90}^;Lum{O;y!`z9vlSUgpFGqIqKeDL3^=xn;4}|n)0+Tt*X5Ej(AkMA4BBsy z%Py;fj9bv(=>M^SMpAMcWfr@)KbcEqBL^ASAeI`dwUv5oq)S7 zI#X))2;h7*??rES8hU(cZ*OPwJ!KEGXB3YG&MdFv(WdT9DuWC@-fp17axq|t4(W>A zJDUK9p=*~gIoQeqL^~Qw8Y>F2bkCrBh-rif8y8oOv4F5JYclRCRY8Xl7KjKNHAK7K zL!D?@E0^v}QhLc{M}gw#eKaL%hhJg;Jo>E?F&!=Sw96;QRhJe5kg;4fSb}xwiPa+9 z?;2>?8R$mu07dSW@NKp{90EJg(!i7|b1%&)pzbgo|M$_RgC=BR;(+@i4Bos_J4mY| z{SHv>SU5mlA@l_-rB_sG6<0YRJ#^-B9Py*o@t6;w2tncBv0e{{Zz^lo`Zb<Pm{kWN&F~tp>rFJ5$e@wG-#xJ3XoD0>SW;-lSq~Jk_?EWDp=O zh2W(}wlEO4C@k$T1;qcu#akl&hG=i^NP7A-+Q4%loqHOfDRIf!l>>sVmDq4e=CLy6 zSK({l_xP-t!16J4BbYTSA?yGf;WT5tDQMcW#;4w9jc(P-591GIzD!91@^`%bk8GRw zr)%BbNz=V;0<|zTIcs zVY6hc`@Mf}UI)C_uA{x>>e#Q4gQ7Fm$gD~b1nUCxToc>L(GkR@K8YA{>eUD^F`4|D ztVl^oOgzooQupBEJ{8ufcuD3D(6}kr#7>97Z{J!KJLv=7+V26!v)UP)?FC|XB2rph z3o0w)cY#IYt9Sg_4bpp#JD}OA_qb>NE1+Tyapx8mMz}i;j*oXX!IE)*A>W;%G}}Q= z>KTw}=-cjk$I96BUu;GSa$8t{&p(##aÐU6{+n+}yn0N*0Q)&QyoAe>T~Da3IhD z>K~qdv7P{VtasZ9el$*CM%zyh4>x<#0Y*Q&(6!b#&lF3NfteXg!@z&olbr)CAdze_ zu)wX==b;ESx{2#OUuF*v3yNyuAd9Qv97Nz!f<#V+=Z=3iK#vERT3B?jmT?9qGIhBq zZ4<=b)eBMRgo3HOFNgx2W-~GVz}W>i%IgZOAoyN8Rt8-ZiD`;SSCZ}O@jQc?s>%i( zs^Y|XR(=Aq@S3uzrDgC0&GWWzvA{*;!|>_??PexX^eK3JYsvxdD3=0&0o)fi>L*}f zyL>_~_G)#q;*&?bQ!qj)9QmKwU0Z}iB9HnbPD1f$>4y>HC;s=ox&2R*I!}B~q6rAi z|HG!9c6KW6(Nc!l$-Tv01SW9aBLs5-)_XTmHJC_(=Q}BFLVS}6NSlQQAqR|_2@ZWR z>5}(B?xe8{%#P6AD8s4{t7_SkCe0*V;q2G|X3(?Q&hRYL9wV}&D6^&}>51G@_d z&c9=>60+QHPqlJQoi)H1==Hj5Vq$XdVh2;0kyO1Rb2jdul~|YxwjxXXeLZTp0v6ak z!jf@uILYR@ua=jWO`+)G#e9PPkSK}qBIX%Y=6bR<#+lL>H>gq(?#er|X8q4=GK;t! zy)5%)Am)Ri9N^yW^h6}lT6%4cVaDOh`}YhvNPAhL1mC@N1ttFZQ_7Y2`!FC#z-j)g z7wEV3{2$KV0;&r2+a5KLl5{HqBF#n=>8>pyT_z~f-6dM$h&U|Ot-@#y0^E~^T3u=d1 z9`_D;6Ywq7woaxrBZI31Y#8<834FGB7ny7rxg~A~7$dv=V6=}Mv6o=Kt6xE|NkC^2 z#qnLMJe>#5P}~AJih#AiFHo#zwT^Z5s~`kNgiw<*{Q?mwDX+X5dTNF}1D!XBWPfRo zf(Z00x_HMUc^U`*i;Cuqt!|_A z#?~jRG;v|+`<&0CMNbvK5#_zew`yCL?U8h{hFEVGY~7*pCYNfeqjQnQn<`97!$F>EYlLLgxgmf)eplnK^{^ zhWkpGan5=_Nvh3ZWLFu3Q$FIaBI_Faue-lXeU8r3!d$_7UEHVA4$&-6uL+l&HXzK5 z;NcOOC!bY1bH5w+$uiq8E)P?A#;%b$?|dU!W`U6>vuE8dnwk7OrRfkc6(g-tKfbj= zB~mICKdjCv(v|xoG|ZHew{KH8UdPl?P|T6>Tu*>wiGxNl1-uktQ(>M6adkxeP>AD| z>ZW(AQ=@dgroRVqTTIQ2CY@%1VDhOQUXigemsP*wrO`P1BA4&|&^pNd?U=n;FC3G3 z{FzhYUYtW@sY>&sL^Rh4CY^&?S?05+!uRkKKeI);zSwkkkT?;L;xgJSG*cjKc4-Dh z-ku~jnH4TIw(D5^apFbg{##>EPX8r}Jq>u^Hh~_u(*Gj8{eR%fzb>;8V!H_|CJ(9d zCxr4hEB^OC7XK{KV_yJ>+2~;t2!rR(pQizzTd<^&NeSPI{|n^&X98<(YI>EMJEaG8 z1lZ$r?1A+nHka%t*R}60KM4y1tU!{>V#$;wE;jVF(Cm! z=CR2niiLJ_KtYMC7-_*~DDxTy$P3$4+Z3B0+*VDlUOiDvh~iu<0~_Ckn3dV5e<1sn zVSvp0MMcMQf#G^pfc#=Ll!!v~vP+G^J`;!&W)yfNH2AL{p=O|X2NcQ;*oYtLR!`$_ zc~3Nv;{u`b!?})!=YUPZqj-ev;bP=*O$Llg5OsO^{n^`Qi1LoXL7L@9h*zhcJb41Z z2?LAW<;-Oc8k}SfbP#;=kBEAln5-Xp;y?B|G zgq8d~I5=496Qeba*!JPoFB1u#RD)`>5f`h zuz7rDq`$;|9PB?z2-qe-dgOJL&fhYxD(ggo{Fb!1^7n6H+tKys_im|C>$TLrp}R&E z$44$m&M37e5E2+v%`MN`9b*>=T+hm@k z(bDvH%C`^uQj`C9Wi7X~aXr`;_9Cp6I{dbPG?_>9&j;?sGaLmJfGV2vDpgU-`I zCOV%Z&w@FT1ojiTH@)Z1Yo_5vdR>R~bnvjks zJ>YpnY-zhPBj0lI`Zp#5>~&44FT&51MC;{B^#mnti2XwK7EpvJCyWk^!z(mu6{(7R zjg`o64=&ZNjHe&E{|%ev#qXBiWyHixXpF2*ofIVBp!xn_{<4&=-pVm;`U*5ud4Yz0 z)socT?vkB_=C-yOJ})8gUVTNIezeHxN#dzxIo zy2XC3b<=fOct(Xexvs|V^mX}?aDU#JuxI52?d`=|bLXz((>d01mz+t=ThU%o>2rN* zi1a0s#3*@px;|3qGmD{!`j5-Iiy7HiPJXVRQ|Bp~l`lN&kCWHOtSGIbS1J=7AUQk> zi&D(K69fMIY{92d!@CCgxTF-9rB-}2#kkI9?3G?m4jf<;l3AM5`Jt^fD^)+71feP$i<|ERxiGBv4F1!u%I ztr#}>K;2u3ist-zFJRw5OdcbrMMnVLAlhC>l(wZ^{(^}irO;5P~$ z15Zw2?Ia>1$mNJX(^F#Wv6e!}QwTy z%lN#Cl*;Ceppwl;(%@DUZAnTawA6-l0F#mn415tV-MXk)dkf%ml15PO7(9f}odyrA z4noooy#MBCKV#(E_T+lq>;yDLbW$04bZji|d8V(>={LCEfdqS?q60|&hU*eU0O5c# z?NTQ;S}&*MdIZZ z{VF^&z^kRCp^~Kh3KTpvf)$_89>K=JG09i7u?6NPSR}4I<|RifV)Vh6ODFjv01r^! z_ekuP1tu#~MnYCYrg6`QR=AfnQb%tXCk2OySf8 zm)O{~Lr)eM1u3p?cET$Wzdbx#IARUY>>qUSUl5V^L(mPoR&!KI!@<$9AU~fnED>6W zCSpI_EWDAixXN3Cm|tWi4hoa z(1C42%G}4g@QFw&i3b6sW$)1CzJCudEGYd`*6+3z^3sqoH4%c(-AH%|+D@5Fzcykb zBQvRCYBB?egY_@~ke*l6iy=%*wZpqh5G8YgX>OzCBh8By_Zu2w~nQ|7D|s zg=22iyp1ZJJ>1tG3CvxdiJ$H&hRO^^m4D7^pG;>2YN2Q&sZ`tN+uiksz5LHIxQ$3w z9iNpu*iRQWQ?C*Jg>o4@*->|-a1^}v1Itzq?hUQ!df!stc+cye2-q*k{S1}Z8-8zj z3KblXp6RHm4QOEg0o5iH(wfCK+8!RFOb|N2r-eoj>?Z)&fgVom2e@Ve*ii}$Lf}JG zfD76pm;~l(m%+U&7>wF=><4)yp~?4SV+N{6Y~LF;lHj_{0;CZjNlTAHrMMW0v9e}n zW(G>Ce&eRRQ{jF;nyjtLZWIF{PFNoT6L|L!v^Q%xJ^&=W>jpe7oc6y5v^3|TkpJG4 z$Lis%*NHh+hw}0TnXBSV6Ok8GS_0Rnp5}U&0|L$?D&IOk0$c>)t~9M){PRDFPk)@_k|9@=AU6$xv=sp%M^q zhCwZ^0ui8CW8a`4_YCe_2IXWfdHWK84o9Sd$n6oMlQfPJjru#y)zwED!NgEmZ1j0< zE}$_Un>J|*W;C$-FmYmr*}wrn`b`||h;COd<>%zsI5|c4RCD7Kr=tCg*}&@{mztI~ zv>m;yoq&&*M7kn3D0hLK#>Ls0=M(eyECA{J8?I?;HXYSm!fc5qHUaB#JeIu_)Nqtt zRgrmqQ{eg+lHUHSqry4{EtTxnztg&Jm8XcatyU@4I2gWE&)N~9Qlax4o#*a9o~Ov7 zJVNgo&LD&j^E@Obu!;-ex4O;2Nj0H0lY$S419SZ>3CwL?q@PHXS5RoyN|e{sMbHMg zvB2DexV_mPrUtX_lws-ZmH+swY~3kOMek*L9IOeB=(bn7W~Q2QCFq(l8imHhP!Kl3 z&cQ2E860P&?|OE2;gU$*Iu$>>7yNb-qKPX!?5g_Xqy{aJzN%ByMy0%NmR73KE{i@c{t9&7@k@>P0~rnki3JV2*#BeN`)E#t zJEw%vgEaf^#|MBqr}V{Q%L`LfH=iJi?(q`1O7>)x8}2AlWZY+8Q2?-2YTZ>AHqb`L zJx0*Py5qdUsyT%OyHy!UhhH>wx#wAS{C+{U(B;d}T^*E3Q@c^#gG8$pgZ-0(%dNj{ z)T%+iRcmYOxUprnu>#vMHImSGU+_=bVaBNchOda?eH=XV*=AN+|b9_yoYGuEgHcQ*1~4 zYo(WmoSfX+>!?3PZfD<02ub9g$nI3yw9E67kp6lKwVg7Ki&YQGB69gqXZNaK7Rf}aMppQ z->waaPQp`Ax&XJL{nICLyKhHKBbhmLD``~qJlm7SVO+o*u(9F15dQ4hC*i>fm>B|R zO@E$D#_lJUBQQ74_3o5Q;)bgcIKLFRJ>JuI~1^ zEy;`Ztz}=vi5N@#-Z2o4l5zq<4DD-!Phpg-7G!6Ij(pGun`avE|9un%_{a!^?NQ8$ z&^r~U!cv4C1D3eCxh3=H-RxS$&B=_&+fu#kNbq16k0|ouD^|~7zz4h=Uek)<3Qj01 zHe&aT%H3*JO0vNAtL+pEm!n^6L~Ja0E`9t6J2wHHl69kU&was)I8;){;qr-0*Z>45 zg$7~cx&q#J4$4G3Gzmd}auxZ+1TB+KUZI6p=KzW5|XQY&n zUJTyrwCrs5_oSV**oiYepXdI1uYS@ga$L6a_b6FIpm`wYgnAATxmIqsb$BSCs0{lH zofNOG|sST5F#q1p#3Uc`C!)2+3t?G^uOkw4u$1B!y{8yOQ^jp{Sl7ld^YXjU_II@jhEiqJ41 z+*H!Co9n+L;*{PE+Qc~;Y^nJKrp|Zo-Yt>t>p0$eR39ntkc=N}gF({2bIrjM_cv5l zRpl8Pf2NY2p3Yne0AQJZQXnY`Lznh%!lgK`V{yiY#kO^ZA2Z6y8~Zi=Rg%!x z7h%x5DgyvYO2J1flG{DtQPzBkhIh!fQBOcoGo%&=p^ALrH;?MO!mKk{OTh|^7{X;^ zm~@E=xRCVbrxN)7o#RRzB2S(3;@==Wa6iswG~UM9>-{dum-wsjH7*~7FbKaLKBA&& zR-h_RzY#7!hRh$8OQNz(ehk+#zgXV$7F5VrnUbVup2I!DpJp43iPalb-XjpQnxn@3 zI>qlDf*q)0iiLCvEp7=_!=)`jVbsW-ubuel)<^0u>1^U z!b3LN)S~oq^9yGcmO1@a$-@c;E=^_X?9TiF0D>eQm!Q)_3bOp0sc#ak$XOuVeDud} zNpuPBh(lY1#hHL;L7t%yZvF!^ z|A#>${F?s}Z@~ZGRPleoX>U^?;Xq4G_K6c}S~zk4B8Iq7(@>Y*FJs()bI?3KT^sTn zeqJ(t9|rQYSIs%@mE_ruRGN81v&7E95&E~^o)83Ux5$Wyj;=05MJASXJdT}~|3ci{ zGD+h@(lJ!^)uYMF5ihS%`9~6(|aJXL^A{$L+nz zS+y4_xnKt-Y(OQ!#<4Rc3W{pm;1YuoivS`ApbOlweu=r7@6ukq3X+7mC#Oa7KP*UW zSEsD3{NBa6P8Tvw$6ga1gX19VD8l&G()+Z5j93BI{SROm6VX%o_^ef$`o>IcJA-b9h)&?&;0ZoY&5g99RD8t@GS7SX}E&@(6px!S#mA?@sQ3!0qfD( z5Bl-Ww4JsA>kvvr{-_BI=P;J0P(L{;3y~}~F;!?F{?NPr-M?2NJ7Lbv9oJa%5h#JQ z54dXBNs(cBWC2~*6XXFVZ;U5M6tFhf1MW`tGlG#8=q_T}f&J<&R_Q!{x5(Hl?3W|# zFRp;&wY69*=>wW%3?JENQOsOe9$`S37XwyQ73CFni{e8auW08xy+9U$ITV)GlK4+v z0mzk7#po{-$^jL=Ye#3N6yaB(xoaimyZhVTcXke#p#{G@`x!ky`I-c3lW4myz!8t9 z#KJ~}h@Fd~KBZvyqnl0D5U+Ra_&82a6;>#~${^Yh;&LhJ~% zavbcoSBx9tuL8Uor3mlZ)^t4aIf1|2!R^A5#*12SRzlfM>cf~~v>o@qx$gh>RcML=0%Aki*S>hi?cuD&oc;L^iJ3ONq)*gj{{)5;Cy9<iIxMPukN zNi2$XB6-rln)KSU^-+R`r(gFzA&Xrkg^7<&$+mf~;8Z*IR8aaleMd%mdGt~tzamYNLAb{aD=wIVp^F`(_4X{WPRRnxej7z~R!}Cc0N3e*&P>D1F1mOlBGLA6h;8#`1ol9CVq;|&QHJNTGOc)%li*5`;5@e7}_>ZsB0*l)#{;)AFej>J3sHvKYVO4=x*7v;U|#^<(4 zeE^P(f*T~Vt{RBc8`%&F%;}e@iCK}bULQ^Nmll99O8yl}^VDe? z0d+V+SF!}x+w*g)Pl+h#5>ySNM997JC%sQskeY)YIC{*bZgtE3U8=o zhGkH_uzUu-&S`2rzdB+**h^8M`@MS9+0~_8ofKZaxHwcH%EuS&eazQ07$Ni|3UwDj z_3yfHs^uqVAxz?q*d7R4_`Jbl|5%oQyAnwyI2bby{D`0B=L&WorD+IQ7BQTQPMuX1#qY?mVaI%A^{~)z0(`<#PZuyRSeJ z9N#0MlkjYUof|w}@4C0{?(SPDZLp})Odv*zVyb$1;bECQa^$<|;_U2;u)!)H)LwgZ z1Z;by4_@w+v;3#()^{Yk-O9DjM8deIuZE+q7LoT5UzcX7eY|nKnH|G&ujzZ0Ga^1N z&TMfms0!NRv~9RZ%Xk7l9tAqEU>93eyX~7qb46FkYlp89Yz3N;kv|C$PKYJbZ7fA7 z-D{~)Z0Mn6rP8`y_&J9&OhDVjdY`v$bEp-==qd;7oGoCfriw7(L`_;@;nR&8S9w|- za*U#i3h`(Fg3{K)!XRn4i$v%ORHF>O$J<76NFdV*)0-}he|~%M(j_-H0G4f%pFXYk z3OYTS1i8>ox^2Q0@E(|8rpkRbnDk)2ezFndn!jN{c4V13HCbJuM7Rp?K*OH|Q%JKq zJ0@6tT!z4i1b5qO1$9zq1%>Y|`v~7wC1cd<#sbC63;utW0S7(4 zs|EnJcrMIo0v!jnoHRu;aty&qo#cKQn4_Kvea$-8Bg7P@}BEJ@5Ql6B%C zjQxN!jmY>CE)|zoOZsu(gY%@|@6R5n|9aH;qL~d$xw3LpY!~+!l<9(&BPAK-9Src# zI`9Z?Km8Xu5T%P?z_X*_Tx(L+%aR(D0XuhLdEtkmT!9AALoW-x(J1k&+J;4|TBU}n zt+C*?wzh2!A4`NjYe$J?`aLN}f?|UN+46g4%PP#-jRZo%-zNcYZ$}3!yUO?A*te3d z^vDsa_%$=lRP`Cv>p2iwnqt1UCXD)s!vxTN0n0D<`S_QhL2yGVGKHsW5IDLXT3S2J zL@RH5Vq#qVPEw5`t1J-lhTcD?hr?fGG&jjjMACz%AR`^TzQB$ME8+c((+dhwop{PQzSLfG%H; z0D9Ch3O??z08=L6mx2+Ik*7$;;@Lx%#OCg=B=Y~EH@39c8BR2P^0ILUk{LmYxdqjl{v+Hk^qvwVed$0-O`=Io zkuzXZS40qmQM2UPE4aE^kXSQHuvVa#D)mwDve1pso6-*rzH;%4ozi$`$<7vN$fd-h zoHOcZ?Yfy~dU2p|WkGK@GH;*I_cU2s<~M9;Y&8ETG;Vi&D9_5mYbK9rAd`rri|}?Z zl$LJkr?L|kyyJ=3eydJH5f@P185T}pkkp#8ITO3|P_$L+&Nm07@jdw?mu^fZ9!l}c z+lrK*04x#Zqj~8)rI_>9-nY#&wB`}EHEPfHte#IOExm6xwE4)gmn-Q&_S&WEPhf^6 zJk`*pURTM36iPK4o<}tg&syb`?%o^?h+Od5v|B}O9(FI+OHQKkMAi1(mQ^MSHX#!L zOU2x98LEz*@4qQ29294659i2J%`Ge#NWhBlYFY$i^M)z=H zJH5uduBA5CJrT-{)rT zwZiVJ?DF%rNGUvqpqtUL1q!B=**|D~K4ypCXs^tr+ny@!Wx@NZ?-%8>_`7kBRKaOI z&hTi7`xT9bhKBvU8`_`jtqjf86v?)WCchtU9_}A*6_ZM+Y*Vm;IOCrM2fx>h+dd*L zmGnM<=z85!QhL1I*F8aSsA_XVajRrlV7cmylf$ad>J}{p{ z)Fz@v3~&8j)nJZGNU*T7D!<{DxPN`BwRI?4xQDia3a|!1pyxpu$wU4OCfZW)1vSYx%;VsV^@thx{9G@-+htmdhtFfEZ%i z6-Q84IA>D`=m79!WC?`8(ZR-M0jsZboL~~#DI%Q`JAIfsEek`LCgQPSwdO(p`)a9w z5pUzj@+=`b@5f2vO0_|@U7skg-FqcT>R`2E`*6MT*akvaN5>^CEt%XwVNJj)Kq}-p ze;!T-C=>L&)vAlwD#MOio#!v&0Xcy|fs7N%&@^}T3zX?0c0A3#C*2gpM{f^4B)!c#{>SaC%(-_h%&_uC zzJk*TJbD_Gw^%X}=5*LvA+IuT>eKR_YhY+qJU=@--bQot=1ne!?{6(lr4TRz2R5f! zG|)OHpSHHOH7u^I_v$)c5eGTVd#<#}ywRtBPX$*3Eg8@IzIY#YI9i<&Y@sD2zumGk zx&b^+zvVV;n?Q-%Ow&rGU<(00rnAn%($bu2jU;rt2MA&vU%$q8%3u-$Ea6*@!nY`M zp>>lbyhO4W^eqT3*$DLk^CO7qciS})_CJl_l@a<9dyqP(0!-lhnU$0lgZjwNtZ z=(N8?(VZtsKqtsMLi;BBnf?e_11k~Q+@8%3<>>9~~uNRV(okvF=K79BGl-koz zlqXAQ@+qBC^I6?rp9^-OcE^38N3N1yQSmGmC@6DlH5pknussG8@YaKj34&1IzF~yU zg)Uty8+;gEBZTKfw$LEJI7fi-5^^*#2@ow12KX`|Awei==*l#*{OtI>wx6WUkWIuk zs3t-YrXWBQ3%s>Z5R;Gj5`OnzeP(l6FImsWz4k!DP2IN()b52_$YVv4#yI*Z-5_AX zTLSp)4_S8vNZpY%E`Njpiam|QjR4|1RQj=Gw=21h#guYsF-t^|r{S_q0M<YpRZg9Nf9r(4K8d9%B@qTU2aBfth57%>ynUE%RS0t|re3d+Z1MqfY;&HqMT;<& z0-BVhOr?AfH|VFp!dCpGw{5Apl7`ENIf?E?uew5$$Ulwpnp+;4{(3$-UEiKq((i=O zzxroS@4meq04tUt8SlklCGinwa2;FWrBsX>$3|B zG1tS&0=v(-Y%Bh7rItuS3U%bt*)RynP!NzwS)Dqz>Eq1dDCog3C(J&>;pS~hVrt^- z?1)Hrp&Q;j|5Kv#mqP9@)niaK{wY_A|4aN6_K`RFA`AmM9G>42eTzrhBe-CT_v@dy zz9r(Tq?f$++#3`$ZPmJY_fFx8Z?!2>g)}t8eDeg*d(c5aOPiY`Kx`NNDLZUJ)D2*c`?NZp60_)J9zn=K#zkaSy zgLvuDU%wp$Xm6XCsDEqh$&fa@0wSi^Z@YpJcA5?^Oo@(rb@~+D*>iMsXK7Bql8*g| zd+g^XHM~0!dwb>UqoXfhmJtQ2^#ta^0@3F9n8AIsh1p};F|o0hTOz-|mHY&`WrDJW zV*XI2nrKg7`20Z8j5_Oj`2Bdfe0eXgeOT#*am)~1b3R~_^RTZiBO_A|JAjVoV>q*)72e zi^nHMSV!=jFYJ{S65AYfOz~VnkA_D_r}F~Ku;?f~%atk^USi`4{MdpK- z-y1Z{eEdmn+_<5g&{WO=6?cHzQQYff*G6dg%)&*QzD017(5^=ot=d-J2?Xw&1W5$20kvc6TV7=Epci!P!^+RAA#p+Xou5XXNtb1v48Y?yJDQfEe8x-&I>tQeh zm}GV>^iAhg69wk^RO+VFYU6oL?*`fkkk_Bxzr&#-6Vgt41;MQ~1&azP-aF=J!h`8{ z`?dYQPC9=jt;*${4D(mTesl|l;^dAkAps95ZC+jA6pTI3Ee2vk%6ICfRb^!ZR--B6 z#oY4SXoMZ=##QRFFYj!1$kBb^BYz^e{aoo;Mca}jP~_m#N));O1A-iESnPwt{A|eR z$t#GCdqpQX%Rl#g$KVBwoT{jKiM|kPwv>Py>4d$-b_f-j*-VT45R*nTnbP|!@YDSK zUtqv7IZbp!ODl0f@Dm{JOiUisK@s8M&jnJ$dg38dPKV$YtjQt17Fj1&RaH5I z{@y%~i-}@vfB)UuOjs~Hm=vty&O9e1I+f(;y-|)K?0WVgNAH++0R{OOS5d%nC_)#bQeuerD{Pemam55 z2E;jP30U<#4|GZ-a6I#6Ysa)`qYas`IHvO;FByKNGKHh`Y|ev!uoSsEPB zPAMUEa%^}`(|ZN8yHMe1xg+i`+&1t`t1D-3-*)|#2@CKnD2MU*$oWZ#2|d<2>Q)2d?fSejz4$2NAtwQ!lOA+ z!Av2+h%%ny6qo(=Qf0IvRsE90bM%FK7Tf0%qx27W&9M>tN(z^Y>uwG9j}? z7cHco^bmK@P@RZaAJrG>cC&mPk(P$JoXBgC_9FWO?@}8Oxo2hs60JXCvc9;t zd^o`2KB51te0nr+L0;*7e$yG$;cHb1@HkHpHg=_gNBDlzl9luB`{_*7{>*08;Xc5c z9wk`A<0E=czD#ByYb9r$F8EJ}2*=d9y(WKqd91V{OXHEmot-bGZoSvvcn#*=H9fHI z3whnMor-9jX?3aE)xJEa7h1PWtM}yAThsIz4!{vxT6Tqpb4ob4w3$u{7_G@`#ny0&S@M$)p=C{J zJ?6MoBn@_mRO$wRqa~%=4l8M}qjZreOl&2{^yj&Thz^OV@PT~pO$DcS$p)^1W~DVC zGHfL-G&L4%6C8HX&hy4+J4*FnDG=4|R$qSh@L-T0lXob2qq&e+QCUaS!U z$dwqpeYImQRc@lJIev1ja;&>vg;XThFCiu-D{~qha8?>(HZ(38l`Bte9yMJ(JCtWI z<8xd4(?_wxBCY%0o#_g9-^L)N5X=1_ryE9>`{qrT#(mvHtW5*dATddrbhJ6x6k|+$ zoZGZdPPQ-n(Zs#EDHUL%LxFG^Y0?B_)BV1gY@7c^5>E?H6?WW*`wO zkT?&P&XvYqL91S-Iq~AsQ|+RQVnY)`mo6!7@^Umj=n|$kdmrS@=$7&8Q%#ch_D(() zxWNA6=&{KoWGBeIy3d;u1!}Xf^fcGisoYgy7C_$Jb#wa=;9~gX){xQ#aC{RMzH>*W zxz?mWd!u4;aNS2M$+zi=rA9vu>HdtG&vn_31fJ}7BJ@8$T)3<5t&Ecy{!%x^-$x}y zn+aKs6sZJ?ybjF%SdI$1VlHhtb}<+$pG?ERvn5r?$M+^&dl0-rgR6|d&Y=76|R)LAr zv^$mO!()vjhX*c+pPZX-sEOc3Bo31bxlF4MyuK-GU}zXGG=9m9Q94sKAC!hh_5wuz zF<(e`xf366!3cHMaS0(86QE$7)p1SzWKzZ=*7pHTF9Ms9D&t!%3!cP8J~XLn#2 zrHaEBL3g;-ShP?MT;JQ^yp65RYVL^3gQ4bq?B#tKJNzu=)KAynC1G(yrwL8r8^m=u#VV07)$|-6lSA3hhebyNh;5*D8Zwi|1=HeC|A*WU+ zpmhgTwcFXB6~v7yw}Lq<`vwQCghHtKEiirr{R)bT)T1)Jl*5XL<38ITb+erAZ8wf= z_RHjW8HeW3NO+bU?C6Mkfqla|34a;TI)NON39^eK(`7tNOwulqHfCl*pjMnofW1E| zb-Vt4en}F+AW~)yOiOqrH+?@-KkoMda$T7W}%D?XiAcsI!Yzj8-O^S81Rauy(z)&;RMf?V-)}Tpw*8uLOK}{eh3oxPox`pu7O& z<7rF`l)=C`GE;rxfaaIS`SmOW{Q?3uMD8-9P`cbwB#-WpLpdXM=K2ZuiKim#*x#~=4|6%WXooSa71uE^9T0ne|wA^V{&35;17ad z|HNMXgJy|cY7gDDk3m5}b~cQP=WfvK?e7m+F~WLr>!;=~@zA!G@{x;+z(FBJ@s)pp ze8Hdk4dmmy%p&}TrxSn)2Bnk*8^r>s?O+f0k%T6yG;eN@*nCUSAB#*20>j5{9YsTBK9 z6FoBIZmBu36qx*Jha4qkx~{j(%i7<9H@FHOkemwKOU)t=vHTKa}(hn1w=vtf;4ySfM*1LsKOclIa_u93vPu{+| zZmlL5@qz#8we^jM74P2dXT9GF5x_caU_0uWLwZw!a_lYg9NeR0@N zz9plZ(4=Ks{?2*bVxBz-<+I)S(#HFdpsA77*YfRFAM)a&B4l0t`r0entel*U?vz5q z-3>@>UVExRVZmQ~uC-y1X;t?ny}jY!7#faGeUVvNU0rcFtgjU2QV8?2-`f*b^)ImA zT_^;WApGftzcvmsb}kk!(kJ$}iKr3tJlh!eF+Fzwo`Z!&*5e1T;@4PTPjU9FR)vzV ztNA?>D&L!5we zX|qKgcCS|XY|VA8R`F)4zTS*~8WEqIqu{l%FucBA@BR2?`h%u&tL zsHmz+I829^)9mhUuWwD+YW1&t&rH#&@Qk;lvt1l0wowS>w;$~5VpVHdyHdG4Ih&iA zk?uqh%+$w>Mjs(Q@?QUhEHRyc)8}ekpFRDl4Qp3LDrGh?f4ZpN0U#dU3?BkJy5#31(Ts9B&|tj&oo`Vyj$h` zy~Rb>i{&DXh-)>Gw$vmWDXCzu+*4y*T%6A`uei=(iZrd2o;$(=a(s)HA0J4W=qA&1 zS#5Y?&Gqg@AZdG?$d{g>2Vo#=27s(Za$X?B_4)NOwFx;^Xt=`r|Xk!jDhrVGPVKXAFxU zAUN9TeTjIQ=->E7F#FZYT)J}DjA#rbrLwvdGp=W%S5=8oL_9WJ;wO>uI<}j^Vyc#c z4DIxg?3Y&DccdcWtt(pk;L>h$P2Qqf?fX?fHYuK$GUYPHVn~wHy{{PV zMPJy-4qjLWhjucx+w>fvbfO~0V=~7P57$hiE+VCh1U#q1&kqz`42=r5)+*BTYBDn7 zB3E@0)|XvcDjRPMDYCMuaTn(%@7C{+Ze~Babv(0Pt$@iT+P{|gfv|%^&g-u?*}s0d z6w67#T6b5SyP)yt1!jNbZoPQit-cCJ);2X!q`_+vEBpb2^=cY=*_*zS+Q$(`9}JXu zYnK@^+C|}K)W_jx&n$21>UG7urMMht`vsR4<$l`c)Ww&~th{^%FL>5Uzq~Y!E ze(OMTe0JRTTdLFa`kt_(`h3j%nK=6WBQJOvaVUIcgP7#*vt12S7rZIK&UbqLTMbRm ztGeh?L&l#M|H<6Y@z3i`vA9cFmvf5qispx^eub`|e^0iSXCE#;GyS2Ae3r-=<o<)mk2!>wE%%C9w?d08z9 z>iubl7d-V$Xj!j2>yMk`rr#TheHZiXerdO@P0>gym&_KA>2zGWo{5)^aJ6#Ksr{Lk z7tsfC59Bj5Tq-UZ)!9EqmKp{;*C6(58d~t2a*p6UweHX9dDJ+g55dd&DF59YvqO2J zxl)^3+kFzVAI$COAVPw3`KvE`>B8yH2(0Y4)5dR?9_*)hw)Jp&hY$;kOx)UBA-q3NJe?}@HyVexrw8gnex9K9#ZRIr;HK6OWC^^aLU9#Ihdw%`#)H9daMqO0X z-i(X-#X?;q`32gqJI#W<6Ee&-8kyo>UenL)Z9d$rLP-eeqe!S`hLkC4T`+~C0V?vx zyq>(#F7zxa@n2x*LB93Cge4p@j`=wR@v$ZII5hiz(=!qe=cq<%uHIzd>nBb--ZHY$ zQ|xyq|rB9N>w5_v3XZG;cuZhRUXe!B)Uuk|XmGhpkoAk>(&^KGC zEVbT}@_$Xpiph!-Im?RC=rAuo{qD$Bx>D9KUIz2ZfF%*ByN;2wZA#j620U&79~|>c z`9?d!JkyI^%hH6zY?d1Cx?Jrl)y>cd(ujSbMY~shJWC3BBE4^z>l0L&IgR3z9rK?C zyFG@+7iR4kjrs2wAF^n$*Ey!F(g?q!5-AW|YdSW6UyIr%#JwW*joePv)QkKbe#sn{ zXO^3fj^*8dNoZ&@n>)u5`AYLtuIu=$u8*?dL5*Nl^Ap6W$lY0!iJ;D=A404c#smpP zWDITdDL++pD#yzc?z5j0FfQdo^DAXS>279tC8LC- z3cYV0n62?L!n{vWVq~k$rCj=cbV5m{5V@#L|Mt20Xc4B!&h}e8)z?OCefO^@^f$li%p>y&KAJ;8X=DMUOj*QRc=_#Hj*E16|ZH`E8iHkpxZYj!o*kUE_k@`?P zoG5SmhMwBjvu_uU%EvTiUGcxx`{p6zXR|X$)8F`J8)Q`6U6Lq}2$k=hlA*lug6;*I z9_y7iSB`12jLeu^wxGs&EUuihO%WyLVI@qy|1J43G53PVc3mr)^7*-uC0y%+^<2mE zTN7}*o=e%$l`VRGyC&Q&f}oXA)vnkj#ShJ`G!)7o&r6xF95p$nR#denyVrE!*lsTU zX~AP5Y^?8~tYx##usOmT?pIK=S&JW!iu_F%Q+gmdu&5-%pb&kuqaH-0Oo9Es^iJXiGj6mk*TU~#_>5qC*#w_c26i9 zUi~m~!;kWeVmk$!(tN2fNoRz8$0h@ZQ`aXOPs6*Ut@Uk9YOO`4XM!mBU$km2kkt`c zlrglC*5hy|c5a9}+^7h>KED3psMgU(A6xm<9dC4}L|6M>&khp$e(AP!wroQ9Eh|pq zq6Bvb)a7ahzLX*LB>eifS3VXEr(L?){)WxnWhHyfj$gOrC`g|3Ydrqve`Nw463eUsrDoWI?fPxyt^(t(lM zCNAyu{i{6jIO2on+zlrrRmr(|m1(wq-0}|!?|eC-f9tTAHC^4Y-F2ty?)BP|v*W(C zs87q!4Rli)Dm2J;Ckn<^tt>MV|XXHs)&I zzisqxpU$vu;MhAKvn1T^9&-xAV38kxT}_g8{JMs=d!p7*WVb(uKKTHHWnzH^by1{k zUK-h>>OFo-P8mzr~N zMTkb&7s37ck<7)Qo3#WF)Qu{hk&So^@7v5XqE(eX1uJUafB!`C;m!9_O$4tc7C*EZ zZ>LpUjzkO3tJad~Gzy$4ntds)_}ZPsCdNtyx7Fe!t!j++75`|}HrJZh-gsjrU7vUN zS7Iw~njDSBUMZ!!*B{`m)(~;aUO5>xS|wP~@ud3dN0gK8qW1##C6wmrjO06O^iMyk zk9}fx%eo%cB;(2QOdK%Ht*uO$rh{$2rNXmsu6l zF&BUTRc!*HA5~HP_?1U74XSSCfw$i0Sk~2|Lzp_(g~`Y9F*8Y~65}%$aZ6cU?+g^g z-rmluQ3p}y(?akH4JF!`CbucoV~M9kHRo<~oi7C*3_Rb`&a zs$7ziD_}q!4%4$Th8j*G+S?X=r^EiA02&(Q+VwjSpsPeIBHMl;*ZDtN?D8 z6X^wO(P*A3j#J4vPBpI*C>^h+>FAfrRR1bADlZp;e6ZE9D?MgXTQ}c*sx;foa;dcCa z#PFJXXNZb3AW!w*|H(;ije(t60?Ejgv6#n+U^JM75Lg_c#G%Re-cau>O1pPP!|PZC zIy$|P8>+i76a)R=B*Usd-LD+4?wN4;_-|tr3rBKR#g`(vdih~P*kc{Ve$>J^VUDDA zIT1{GNh1T3W-?Ay$;e@OnWVSzA8t*M(Lb&2+5wri5=oOZ~fMyf?5 zd~g~LO1??tuMnF%^+2skl^W3x?J*yQscasLo{|e5Sr0)&4EBSzxU&w65m*#XWKiQt ziEx?0iJ)w5e$$)Oe=&6Sb*n=`0hOuATW_ycl%i|3cUr9|_ZtqIUFERQXqUknLO&)> z{a0^O!qW|Tt#@VYLYu<4xlHFnN-0!PIuIDjgLFulj5c630M4*REK6>wxCEFGo~Jzd zNwqVpd$RbFmb9cLElGsGe=mIg^Ff}}GEGa3b5kj8oJfmd*xxd&9NH*qQFyVrbw8!e z`b_6U0vKZS6L&e{bd0Adkg~06pfXg@?wzS);Gl>ua{&m2K4=3}6JvtKfQYheqbP=$ zQHE4w&Wi6)a&b>BC(rVs0Yk6CP+5Rhd@Ju_h*zPe7AUrRXR4~xIg!H8u#oq< z5I5k>204+t-sw)f;NErqbDwke_hMkbK*3%jY52916(_Ry`WuD~v3Ewm$RH=O{hg1Y zLh^g}?RVYz(BmBZvGt>`j&LHj%+%xd%h-KmuT3{o2|J&8@y-VxS((|1vq4V8WEKWu zc@_njMRjR}6UpLR_sWs}IOEDewNB(jN}%IQ6&p1Q@*RNN!)ZcR?)Dp$OsRb{HvMUdLlhW`Cu{>hTB-k8UU zgiz;as%?cD%CUaVsPJ>f&lAZmr1XYh$Zp9DL)-r&XT)%>ii4;`cu*S&q14Qlwr9fS z3#J&{SpYzEJx^Q%t5YW#Yi}_K+})B!y^&@b&F=Ix^iwzy;z1zWOpjN^Oo0(jWJKz8 zX?HeRc`$H;L#A7*MY?xz8V*W+#kJZ16|Qqiz=9^Xb4o`Qca~ml`vE6acZOWctZ6k~ zEww3a1}CC_u!qNt8vPHi=-xn#>%S#76=8&zRFmEf6`qsmmEuH>To1fJRC*ghEiSi| zXHi1aQ`IdmMYuzRE?|y498yZrQf5JIwDh257LW9FW*8GgR%ftLanZ^3p#&DqIQGsH zAv|3jyQC#8X-P{G62f=o)G>l=pU6bOgGJ*fBhcH}I>3f5Ev!XR1J%*DY;M6J<|6@& z6FCvd5C1LvHVD6GG_Zw~tEyU1x#dJyt^~_6o9DhK_s{rOjqa3XfZ z2nIQNf6Y7Z;ZzZsRe}RboYlJf`WtmKy=5bu$Z7uXf5BoR!UI8AnvhXjvJ)INDTZG{yylR9KVUToSon30ij`YlkcWSMb((FC5aZV&7?xcfN+x#?H z#nwGfq&Zj8;IhXU_lcZ{R$JjuCNEma#%`Pw36WRZg0c{8EV_PmsZOAwJw-LApWp=| zA-$xMJk_q?u_X#rPVZtA2lvj@!5kHT(epOjg`J=c3msE95fWSeHZh*aiFm5ih^K5- zXosv#gVS)8}^c3_8tHOwloI?}_OE%f`2yz_ORR_{B<$i+OPt`Zce9wf26Sn~JT|@P>Gr znn+-&pbDC^05vY9(MY}7B*dfDnK{G`)1hSgr^|_yI)brW3Yny(*`wn3-8MZ=B!IzsfHeS)cMHE~r}m|*syf1n6zGC& zMmDf`QCXY>kS2nLJK!obQV53?^J-gH{BAjsaYVX4C*su@5Nmh=eo#;WEskM$u3^;F z0L6)%NG!E~y92>1>A}Koj4Eji=$)M`mcTsXp^z8H1t((qwDOQhY7%wwIb(JABag2y z&Y(zqnl~{c{YM`AH(Q7Q!l@$r&p6%wqy9|Lhl4*H3sRg&co6qEHzdE^u>HALxEaUc z_x@AA{&(P*3HL^z`Q%2n}`HT0`E5ur&+qhZN_PKen%BqxF^%pp5ag0V1gc`6o^v}8A_ z9?r3#I#7ivZa?CWsTGk>Ps#ALjvrfMok4+FE?^fGm}IPT13kX#JP}sexbyK$4ds-# znXxv$t;ytOW}iah4=dTEFi#!GQU`*x{ScbNooeXqb)HD8@#H++YSqX=!Np$aEXrbO_8YuZLCt_%5gP=xGd*MWuw4^01>5z%Jv8%ei z7mJa%4cvFi215ni#^^B63=kv$QYIq14mGEuV{_Xx3S(l}qa-LRoJbGj(dVn+_og_J zwyHg-+ylu%6`ZsJ6RK6kLc&zN5AFoXix5fpiTY>Ha?bxwr~)$-2dCDPG;o9yX%XqX zIFSIeO5rs`%_}*utpEd{m6HekUWJ-kpxDFYP&Y6vI;I6Y`2q%{5!4427sjr=*w8yj zfS3b;^m7m9%}7uagupx4I46QS9Q>jmY)`aK@|=io&D7B$c#z%qJRo13$z>Bw|NeY3 z=I1}yN^(BPTW;GuV>P^qC=Y-1z1{;(ak^#;l0tY`<5CnIlI&DKBke{$!!s{0DQRXE7QrClb9=AK&KI zng&8D>KhZ$ELMiiQv1rhrN@cXMmuwlMkDm8k{c4B5vCyb_>(D79A0*{c>Bn40Bx&V zPJ}x#$ca!2TAF9U_YqDc9C_;F+4OnaGp%>(z2%+Om%S4Rr^(7MMc;l^w1#OOCt`3N zOYU^u?zeco$j%y_vvE!&s=49@dqdxHBGumXDqjoZFohH0iy8utqz|zh0S#m?3kB&n zm$xJU63wq1e~N5$ulh($rJNh;D(ljo7BO8 z!J^2!k$!-dG%89X-}+gl@tnS%Shm^U;9|9GHWwNW+5}R8->LQdSRprjd-faCZ;yqD@Hhx7Lm^8 zM7mdYrsTIk3?&x|LKw*`U_XeOTA>@gu$;vj_WynW(Wl|#=5Zp~Sa<6G{U209wU?cy1`!`4REyqTHB5L(Ajavft1hRO2ilCZx!+r4Kbb;h>fYg__GvO4Sdw&Xa~-VG;tP9zrwB>QNb zFyb%~IUBi(6PbU_&=w+`(KT>1)k zx4gxj2o87l43BNF$e*3$`{vA@=>0Wz-pyT^YbgLT^3|ClcjSrG^H28d*)tns(M?-w zPej8w=2=?2 zNbQg8Ju5G_Cj^Usc)pSgTl~+3Cz6`rUfZAS3?akJ}il8AC&!0b6`{8s%QB*%qL@L7*0cb~01fZ6l2mk;8 z06?jp$mV|4_C&Tf9-esi6rt=o@VJ2|0+6F80ssI201$>@nV!g(U1Ps)8S-U)mv3u3 zRnZg4UkKre2mm}000000NZmsu3_}%(%$zx|G*4t?|DG%7jlQ&c>bJeqzwMi@_+^ie zTKG#}_u6t(b9TSBw}skF%bT;3mTQ9!%pZo~5C&rLCY!kW_Kn3WZ(jSTB_XMDYaD z#8rct324?8tW3@E;wz39$>$+L6eA;Mo|xfO^Wv%1IWxzig`fz52^F}mR98gEQG=U? zU^RFDiw{AfW}m%Ghg#Csp%6GCgo@ya8}HkW?SFGnDa4pO?af2ey_08LL^7g}m7*7i zB_})Z!{7It$nsI+NX^nA zm;665zjymy`FdRV4_P{Ut-K$8N`A=?J*C@6=)v!hVuly`vN)&~$Gk9r*GXmDCe0{e zJ9eh&MY?;k-C40*-;!VQd(m%yPIt^68AT0+Z7lXsFS0-VmV_PAre8z#sU5T6ZX8MC zxZILjQhQ|ClB|{t8z*B5GuVzZ%*^z%W(aIH;D9m9{2}hX!O;i2lAM`IjvpOu-uu0J z)umRs8Wa5q!qO*0!`E)#E7#Tznypr+Q!G`}nQMVS6!C7v5{897Dze}8GFW||1YXw> zHV~!=oA0q43laP05Om`e21#y@Ih2@u^!)YiVe_Ef{(Nu$#k>4;>N4>Ji6`tahJDtg z8k|$DdBs{(tR=B*Sw7B`yn=gbjhy1Z-fa3Z}P@XxP^& z;M56~U`(;yM@NVtBnj9Z3Pi44zLw7w+wIQ5VRQ4d?fdtijgHT${)pr@MPgx*+AGMa z3{oKjfOs9?umQ04!1ebSPEf}X2Vwlqma)ArdUc}0Q@W{%g+x7MBufPB4+aG?}Y8>6X`*<)ujszJFvb{@13yA|r8bZ=VhbWR_ zU|mv_v4h|zIVp*x1o!|)V*f!+Bzhu?kPouRfWu!aBkwhlGni)(=wD&{%jjVHH|Ae8 z-$zb^|6x-&o9A>l{9lp(JpXxqK0Xur?QbG~`olTr{95?q@98(+F~m5(AO5BM=lSsm z{>A)fG!fU?j|YGgO-{1^??i(6Ufs~%R|ULRH=Jfa{x>y|v-JZSaK66h`Oov8dZe9L z{N6d;?NkRO=#D1R*DiH%uxRjtim;4$!eKHpKKtaw+s*BrcCQDT2*?LBv&$-txLCJ~ zlU=+QVf~||(og>J?3)hOdric0nvN`jRTnQaEP+(h3)wfhVzbw4_Kr8UcOJcXH#(gX zWs`UYeCCKAoCZw<@L5Z$l~#j5Mm1C5>sH2ZWvI1G%?zb!%UGi7+#pRwgPCL`Gd!4% zgC^pSD}+jdn?qO*@2?@sTM(^1+Z91fy12m<`@w zv5CbH)PHi1iPRBx!CIUH?^rDKF*#mi7=lGr^$%RT`JlG8xx0UG(CTb%?>~O_Zftrv z6d6HT14a{4iIe%~#P|X=xoA9XPiaKeUbD`k8WlVu4qLQJoI+6^{n({?AHYMH= zdEM4T1j;$}xa^V)*NG-#+M38oB@|m4^5hqda)O6K}t zWQ5YF&o|^zh6oAqkdW0Fq+txN2VvmIk$c{MY z=jf<;-0dB=THRK=+wJzg_~J{j?RK{X6W!@T0Ufa2<8H@(3cwQBLD*W|E_i*qUwi@0 z&KF-akB^VKUDy~PmK}DxEqe#rPaz3pfUaM(zvwl3oz2$K+qLc6Z_876@z#=X~!c){es;dRVu0;>@gepFK# zJ&4Q%8(t8HuPZapjcmcXTBldn)$FFcynP|lxR5$j(g(iuo|5{^o8HitOEmk6X0KhC zPV$2R6!h4dh()l8d7Zb Date: Mon, 26 Apr 2021 20:19:15 +0100 Subject: [PATCH 0989/1270] DOC: Update snapshots --- doc/source/dev/gitpod-imgs/NumPy-github.png | Bin 0 -> 5368 bytes .../dev/gitpod-imgs/gitpod-workspace.png | Bin 0 -> 118160 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 doc/source/dev/gitpod-imgs/NumPy-github.png create mode 100644 doc/source/dev/gitpod-imgs/gitpod-workspace.png diff --git a/doc/source/dev/gitpod-imgs/NumPy-github.png b/doc/source/dev/gitpod-imgs/NumPy-github.png new file mode 100644 index 0000000000000000000000000000000000000000..010b0fc5ea33da5ec6913ce3b0da8eeef5097250 GIT binary patch literal 5368 zcmYjV2QVDY_rKFyqKjUF=yHePL`xz{^zNcNhlFUS6D^`euct?}L?X)Jv^ZVJAqc^7 zzKGMm%Bey8)$L3_$TH^_Z7#fXk0pJ8#J0oe}iGN@&ZqEPmN`c3v6buFc zU=)8_635?$JO`6{=jeG{g}+D=;~~46n5qV&fqT~CQh2*0g&p!gNv$^`zu5$ATTg6` zRe=M6AVNh|ds&cQ*0Z%;91_T`Ojt!%QIz@T$8*R620X9;cG&^CNHEv+BneFFfc-mx zU29Qb!z8DA_BZ`ke@MK%_zcHtpEtm6DhI?kviwn-g3B9lP6u$YJQ#UP_Z%nRt5jV! z=SjKcj_A}e#3d`B+K?TPAtLQ4arERDelzNIJwlFckZ{m9E~s(JX$ZV5R?>^9on`HW^Gi`qcT+BqJ4iqpjH^co6*R1bz zsGWLnvkQVf^L6|=X^O4lmS_s^>uyqSDq`;UF-F(5AcG`oyF4nAT#-}hWRV4+wIew4=#EYP z)rZTtMvz2BxO>Q*f)o-%Z_x z6;jFfWwaL;XE76Oq^r2@@R8?+P2_uaX)E+UQ|fR1vEo(JE2yR({*FU~AexphV#0Po5u z&V*SkED8u=^F7ODA-OiPIu)okM-y%pCLrZM421f%wHr#NL*mTakh6o^0*-(FtXO`q z2^hMU`LLc!aDnk;=iD$6fzb+&RE{+0o%i-G%vH;kNL=fgiG8#Z6T`9F{_$ph7ghhb zAfxy7#r%s26ulb!EIGscQ3e}BuT-ivu>!(1C&!p|=#hPNvX0H$dmwVC9dhv|&+#+G zMZezZ+sw^SAv#${BsHJk?U|J-FE#tlOYQqiJ$mdeJs@Tv?^>-DWyS|Oa%!r-i+Ux= zV$UWOpo6`jOttMIa>UA4=6=k#lDxdUedP6!7ku40da&3bnHY!`ruuVe1!pj;@aSxN ziivIe#Cw$tmC4J>XDmQO(B{ro?ME-{@X!ml=Nz285DA6#4Yy}1KNcpx`FLHa2YB*& zd82e+UcPE1?h2W$hxx?w3HePC>Rt*VlW#6<@Uhc%-V^lQ^ouK+R**A~IM%16h-;|^ zDus%Y5PG#zS=q8#ShEeb*WK(McPZcbJ(<7t@BB#*5n_6U+HXeEMc48~nNG$2F+*QX|N~)_LlFAU(oWPakSb%lR~IM3-z+A4caqWGAxjmO`o0d~ zt4(M*D~?$R%;fD+@<82o%> z^F#=QQq^VvX>}#-S_X`Lp;v8yo!Hs&jz$Omc_+*?caT`^bCTLM*=v7wgbCthiYri| z<}V)*)Jm;|ws-FAr4P|z9q@+XXZLGE^yxtEj(r)UZ`#i%WsbM+$46azaFzngkvA*P zt%rmn*32!Y_&1V%s{IM=Y{DFFa#52KsE~K(eCa9P``dYUEWa*%hOzZuwLQ20wBy#S zX=L;nrXJ#RRJr)6t;YZ&UmQ`3xkp0yK}I;K8U;by&i}*}wlvVa)hY>^yLwR0sTEka zn{`hy#fa1;1xj|xcwsk?G}u7YNiFd2@5~Umcy-;rvpQ z;dxcXFUONCAD>>px`TY~Sw%JTD!pqm0Dxp^TBJQb^nqdJ?dsP~7>9{mpi?6Awzr~} z6CE5XL!NW%%<~tv(pUMpvnF zdt+k=s$*8oHhiBxr_gm!!X3>;x^B?E?+tkAHFO1`7_VJC3aV3iIonJrjSsO0^x99` z8H#A+W&*H`pn?!#We(;xqZ5q)sHtFh^w2ZeHRQ0oVAuC~l;vPy!Ic`{zP@>E z8(Q0OtZCau_+60x+}6Lid&6~LyR~sK{N&~F12vGuDNX9(INZ{%{pDIQi|IPO@X`tPCQ(J)=w68gi#{N z*CfgE3}hCph)sb3XPDeOLlI*bp-6mq;1h8mv7_Q+)Y6&m&pt92@ZPRf=yV$7f$$k* zd$Y3U{=>xJ)8NM?+7-WBS5w*_tn1vw=6ZY?Yg3sEoQPnx+Tr<^?~hw~>v>%hQsVxl zcT~!k2uqKMbuHbybt|j)tbxY1N-E#aGzjhZ(W|=O`E4mKN&aV8ccpwR+bw-5yE&B~ z?sW5>uDY5JqaY+M9)irsP`(hkUt0LE^OvWW@wmBwnI(-2Zg~*iCm0fQxrSLG-u*(T z8!1K`b;kK$a5N{>ak>|YjVD@8xvd86x zQ-6iahx-vbuWTDS_(p=>utkU@{Cz*VPrf<7id|Z4hN1USVFO$VJ*<5hI1ZVCPg4NJ z7tqD*sU{}hwxhskcAPn{dUAIcmPeobJEUsRuvjL7hpf0F_F-;R^wliw=P+o*#5|W5 zCh)LNXVbYj_z&DYodnKO9!#W3j2yyl+k7yNgmnFUD1&(#9R3Tn zvDXWC&ds+b=W|^m5Wii|_}>TJdpF%J8ShG>of{FbN8erdq#X9hm(efvLRbsOc^WI7_>pNkHkf4NWG34M>T7vlh`)7e7VXM zh;t{2f@&7XwcG3`YW)}8Fc9@-Bw%m3z;yWmJYL6%F_8f)(5F1#VZziR_v%goN<-UI zQgFYk5b^0p+mUAQqQq0OgJ5bkqE%F0DgeEiIV|BmXhC2(Q5+mRH$+JGEa=0o;$Csv zw-doDhW88a#uDWhvyI+d$LzC-Kvm@CNI+$O>2ih)oU}gWxL`p%gPOu6`=ri0AS2;Z zCVH`eNe%RBaM*1Hm_+3|yk9^ zeoT%RgKo@Hley^gs`gvO2RXFgW9BbU$&ofNcVLr$v#bGMZ@$0VR8M8he6I}|S7<5z zieZL{>FX zvuSGS-l_tDXbVJWvf*TpSRRleVi}3xk&m+J1qpc}CtOm{r|V+uNWmH{q-M>S^(d&k^@;v<6mcvzp-VX663*aPO<>x_<3<iP~dvuCaSBNOYZaKXmLQ0d{|W~U>LtL>cYrO zg*8;ZEDV0qv1b?`!ze&2Sq)P)Etdb~m0FdOhgGfmZeJ z&%H7;YS@WW_j$qL89jZ7rJPVE9#~2wN2^J+uv2Ubp@_`QYPx4yRAuWrD?cWqd*90k zm-%QUvFiF$0$##lm+>{|$h`R7?^?Yw z2r(WCcyQ|L>NIfvkpV3b#SFpY3pL46nc2x)7Xu51j#-%@Qtk7qU!{wCZ1)OsUDIMK zkD`_upA=gD@OZt{Nm^@U#si0oJh+=iPa-5J<@o%K&%HrOZ@F&bN(^oz%7 zVQ?Bi*~0^OFQpI^{#gJ`?1XaSx%AuawVVzjl9 zhq9SZ`x`9Z#kZ2uP;B?Vzfq4FPZ2;nY&VgGab_g9}agd z?9Hd}ZmKW-D84`ZLbo!~6MTw(z2*Eq-IVQ%t@tzA02WaRr!b6}Uh**ZcUx=43%wb7 z?Q_S1gXJ-QN?W;`oMe{ALg>cR^r)`L=N#tsnH0##ljpLcB->QT(xUT;AtH?qXz=Vj zvshn`5=bXV*dLJy)O}%0hMgnikF@d{gp&T?o}>ht*=0&vHa{s1Z#fKm*Ux6D%6#cH z$zuc{J^DVg+T3 z1cFIbNSsoDeRMY8qj-VfEs!77uqJ%G<|^YMHlCy9?W7>T3zF7}LV8_2H4@1v`&T-o zisxWJJmrxA`j={66B6A>aw=Bh+&5gqF6+AqD&yiOQw(5qlYh+O1<$gBx9I;{G%C`L z@>+#WQVn(H)tcR4M8P4RhW7a$vWlLyoTZ8(WH2^SxdQ__QR$h@&#Q+W@eDP z=L(&ei!{Ty{9R}FUPyHspx<&GpMD_n+w7)M|Cl=kFtDTVhR02l|RbIin$ug}s2 z#_@jqo)vM+5P$9&N6N2E_~rOtF7KMyWB)(8FO34`X%N?Yt`D8Y6edCFx{%qh0XlW( z5X@Squ>T1cjDmmS{tgahj8HO@_RpC-B3@fDdOE;O0T`eIbO3|<-;j#zh*n;6mjIo| L1{$9qIz;~mEC+XR literal 0 HcmV?d00001 diff --git a/doc/source/dev/gitpod-imgs/gitpod-workspace.png b/doc/source/dev/gitpod-imgs/gitpod-workspace.png new file mode 100644 index 0000000000000000000000000000000000000000..618b7063b8ebd7496a7d7975e2e8472192d12cc9 GIT binary patch literal 118160 zcmc$`2UJwq^De4mz=S%3g3tsN1p|VJG#MNfP)P!kliCDnXpo%2jEW5^N)8H=lY}M* z5$Og=lA%dTlXK4D)#*mZ-~9gfz5CX>Yh9NkL&G_HpS`QT`s%CN=h<~dS^8ZpyEbjw zM1S?lWtB~vw%2Uhv~_gHAMlBno2JpG%~cv#FaLeRaq~#WeJ=&vAEe1ujd{L+V;#o? zE@18)@H%||I9`YTQU3MvD+Gt_2i`rMW4l>;GetGMXlyWatTM151{atmCK=Av;Zp5X zEfF7IDADhO`9&@ZlE?d$iPp3ee^%xSQ$6MIcl$? zyp1}v9}XwJHjk0&N#Vm%yxGLIpLSm!t(zb3l9iQhPBD;@kkGSrn)v=V?;UjTqPU=l zV2jakQxb(jvFyx0Df%_thh5?nqwlgop{rAWb?9mJ#6$3hnOVf9uae}$;ib~sr;};U zj6%6gc1w@?HowrjMt!JzWMRR^r6V40$|#K#{m{<&tVJ#^F6;P4Y}6Kuw#=l3lFCYX zr`5JB%jsDn9+h+WjAc!jgiXiC+B^B~tIlI1p<)itOGAXMj+$sB>*jc@uSLrG^kpuJ zii@wz4mTg|>vEkR>*(FK_zY&zEbdg*BDs~ia*n>a23Sh8Lh$qT?oao(4B9p)>zx*b z2lq=6vh3uEsAFgwoh$WXvgs*%#aUp6F&6X_{J8AlKjX(ed?_VHKG> zg>82%9+sO&5cY3nX0Se9E#chhv-JnZyNbeZ`nNuih|o9Y>gy($HP0pEFI~D6*tqbc z#B19Sx*74d=vW-~Dk>(a5w++B4>2-<7b;zH&3-TJGW2RSver>$c_O9k8lZ zMZ0+|2-?2BzBWt=$V6lDvkk;^C3Y@rD^9h?hfnRbZ7+1olWjOH2`na> zw{Kx_y5H;hR6~wUf8P_nmv@lwsSDfD0J|;}k6(q`ZgNqXUk`48X(_~`SXp_v|H^C| z-A3SdvQi-RRRt@BiVA|2hAw8`JX6Nnm1Sw5Y}NbqZO)P22@1)_VEBrreAUWSuN-21 zNt-NHid^O`YK7wJnRSx1&fc<-UWqf$W-Nn|!ANQnXN$>xp;_1|woO z@x3@g&iy!@r>AE^NyV7fnD^@1>heF9vvL4@JK!%NNrOza@;J|Z9MP?KZ*N8q$ElvH z-h1fj-zur;pelr-NqM@twkKoAqjPhkZ8r9XhQ}E8)iYOU;qfw&AuZVhRVfBVVVrP% z@;{bT^Bzt1&5dX^?{lcYX)^Nk`n_eg`8!u%Qb}IEXSyTL!7F~sT-y0|Ex&+(Z&JcE zK|G9uOfb`!=RMyTY%-Z`y;!E(L@6I8{jq^K5^?@fi8~fYTGPq66Exf!xtfIjgX#m0 zzDwLH|AwFceMF+6-@umtH>kTuj@=k67QX?F)Q@H!Yk`gYhMfNeruTX;|2H=NGdV!s zfm@7J`vnuGeO)3TfP0U82%H8!fq(w>^%ppEZXhz}COON0-z1%O6HSWLNC%zefz}|; z$$jZg^E#C$I$hsd96PUnjBi+a$0KvbXD_&4zpy&$^NREW|G4;1o{%!@10wx<@RSyu zdi-jQo8UN3kXYYvGw-B?)DheG+}ymSzU6@sQRbUq>2m(ON%8S@EurF0D%`z*U83eV z9HC@+etcO_DNH=oS3yBR)ZEN0y`(BwX!vX^Af;{Rp1pgQN-Y+rdd2KV1_lN`4g=Vy z_~zu~*ryJTjb(b7VX=R0iZ}|-jx0mK5dhh#X}*fDU!Sikcz6_ro8P(f(f8VO(Zr-A zLP@ZY<&xk&cJYtCbob3pUB!%=WcGG_9Fc)x@TlMtx!rhll-Q3)6U+Z$4@8Ax1wH~aC3Qxqhwzj6| z7c3nnSV16gZ=LV*D7yROvqPhd+ffmfGBOgiLG^m^6)qemD2I!PiemjwauIvj2$O(2q8X)R`{>+U za+5;adv{7pOG|gDm!SG9)KSy@?MKWw`-M9*)8 zTK3G?BEq8ll%TUMan2CSn1Z^Z!-ZiQ1&njF~;$nIpel4 z@g6^0dAYar8(f&ZpwI7I8L>Sm%5h~O^{N>q&8Q;LdMW>k%k{{~ttU-t!W4i{AdF{| zqv58KPS=v7L!;KmfG*iXim$C}IC8}CBievj=xlbMlm zXMZkw=+EaOA|fXV%3^;nhseJD0!BVRJJ)J)c4}pD`VINS(teK@k@{-55NH2G@6mry zy?@h%FUqLhyEEh1s~0Q61;fZ5D-}}1UyrZ*Vv70EmSNjSd;C>U(iKsv*vOHtA}KWU z8ULmP7{fCtW5CDi1Nx7Rb^ZJf9VTgKJwDdK$cnM)yA%IiRv85zbl$#mSGc58{H5_{ z=1Qg-#b(GYn66V=Dx)easpC7?ZuK;1C z{&+KWjQ{%9b#_Q?MV3WQ{I5-isTWdzO}K<$b@S%U30anko5wa?KnMrbh6z?r%qSBR z6Jt68L(e3bn4Km2{HppNpNjh2P8~2B>MxAOL{zpfS4L75gAR7I|#l^;EdaNUlV1*~9 zcMtl;gwqmZSX?|5zU1jPLZl&LY z6@{vpAP*EPd#ECtO5GvagVaADFNm_Q4~MVCYFt`Bxo+%n@mwML$!>4`){Mr=_m@0( z?b@ZSt*u|^8fe}6{rmU9`WQJmIpF?X_4NXxqC$k z_Ogvy%dGKOn7Ab^UFvs6$F?6BdOfp{q-3(?Nj|==RE(^fTS4-Szq5 zNH7{#CnamyEF*DE#eXrhJ>N;SD-YL(UQOz{&AoZ+E4qIc8neqNu_Z+0hYDp5r^E1i zk-%?$H{)yc14iFt@hOAFjQE~;`*hB%BxshkbwKDF!T8jzucyKN%)FJ-) zk}S~-3~Vq}Y8?>Do^p59(lbi~)_!m{dtynHlyg_P$oKICvx zkj&iiEZR^%`|ho(AVJZFFC{%XDryb3S4LZtnEgd{(+sb|?G;=agobp^&(@hmc71ez ztP)u@oS`4*C1|A56?=B5ZR#>J)nop#%a*35XvU`2)GU&nj0 z_`iL7oKGb)EO!5(X$>!9_|9xAij3HeZ23SQr@^}8+hhqBQ}9@)=g#iMLyZZe0tM*! zgNpLKp;>41^|~il|kl!EgN-a7B{uLA(TC}Fb94Fp2%j7W|v?dAUPWSX*B+pzGn<ee@AGLz~9R~T+b8ljFnGLU_4#4G<15^(BR_U3lC!d095>94Yp0}J(+pj2^ zg3pZfN=j1nQ$g8be3MUPiD}%L@gHF~pUtY(Fvzo4=m!tj`i}>268JA;XTCXk-5i-h>c_yij!{2eC1`K<12BHQ80@?lpMRqXgq!5 zZfViHmQx>jFeVZ`#IZ{b&mvavMiJNW)l3hQn)d#EvA%U7HZLOLB9nP*+T*&dTWh!c zGj)vet@qYH{u|tG?3I)Ed7*&UZ*6FPGcoewd2lbf@j{zb>BX?HPj{H^q#PuOC-F4x zt(DS3Ni%F$HQqhemZfBZ#*xpn5o(h`mdGjka3iSi8aD4jjnho4S0*XTzBn^@(&Lj` zcwp@24Rb*v|2uS{H)(w-M(PyHljX<;$JkuaBOJGI4V+BCR+91YrcL7+&SVuJY_AM3 zPKshtchKQ!@2VIoYig8CEF$la4i=OaM!kFYDVa%P>8>a?(k*YB?C6=uCQ}i?RiGNA zdiOMowQ^r>mNuoPI`dV|$kgVRf6d@IS9@t+W%;KD;Vm{OK7&HmrGm`7nR;%eMrOf} z#xd;uZ68|5Kc=RvPrp_l+si8aovDTMPf%*OlkkP3FC65{D=O-zS$)IXVs|at<8F}a z7a(0@8Q#94Cc#%{6I0Kukd={YU)99h;+{1-J3I7&odI}yl9hvl!&j-j?)PgO|X zL4L7(jSCOIX}dgDl<2Cj`Hx8Zi#h;7qOP{_eh8~u5iaq|W02VU+gMV2;aln_52!)# zH{S41prbJWT9ib7;RV7+)ewQLAbbb$;Gfe#=)jGG zSWbi*z(B^oILzO;4``UwZ`T3#(C$R6{F_RT;CGSBe&RoE?ocrCSpGl zzy>4|*P3Q@_wIGdi)~cS6pL5B2(zS8h&!ccZSU&l*(Y@-tP|@T?IHbFa!3PA#>>jc zocHtPRvJ2s*-Rrwm@gaT@B`ZUt~nO_Tn6`MXlUrcgFpJv#=;x7uD$g0)(G*CeTR1E z`8ny=zb0L>O*REj#`b-Cet zd237qk%3!r;1ZEYJU=!%+NkMZH#^k$kZ#|QllR-VZ!MWrIy4m&svw23OO;#!0fuGZ z%*^2prpF1JMx&nuE3OBTjWW`?ntv(lXY8&H5sAE%S840uFuVeBe&ibxiW%t${V(=v zBx%3(_m8>1-0j7;;=-qw`)101`9cbQt`pe7vIG3AXqZd-pmp6oao2^j5<71F9P8K0 zNC=0CIq(81z_T}>zfi@`!^_+Bi7~?h<1tq~$1xy&~3*U_3E16Rnn6U0pq6Mp>F2b_-13jzXJH z?0l_`VmhO3IL(M(Ln%u0Ih{$j|B^ZQsGOjj@NxAI1@oCX4FiJ|D|h!okHIfrzCbjG z#254cMg8+h0$N&H_OU+KTHlJX`q`FK|^EIplMi~=xDcEs< zpv=I|?UvUkb!x>_D(RW8yg$URTXTc{_L=-B&jo2u$D#aOk7wl+TPC2bi|KW4@*!s*UAcYAP@*NZY^Md z`Wttd=F~B2#AulW{F<l)sm7O=5;7-k zv&pfkemBfU%nGU?fr!I@vfWKhNH8eLc zK+cvcLX*epWPR8hTh!Fl9w+0?ni8+(+;nwyEq{Ide=)lhd=P42IEWCFu5hryu;i*) zq`%}S{isgbfcz3c*)oG~*3CHMjTqN0GmM^IXceI$ldRpFw0!c!iGceOahqu{{Tm)N z4ql9rNx^$D@nzPaq!683jxg+ylpN*AOJ{j-p?j4pIE0KcZGgZEukz_hGJw5uz zs4|978~&-Kqa!I5$`FBy-rn9zbeWlJQZDnw2TMEn?yeAmvR>ui*Qs~8o%o42E%nH| z8Nb{6TF#np=JR?I!Ssi`y~SdviFb&?K{_|q?tYsS0_$huGRGJFUuAWH>che84m*IJG5k5N#E2$izca@fm?}ny~M|HAb4=_ zjyF|UeTQBK`sSgBcgt^0B3^eZOJvBl7;swz+r$E;7f7-w9eR?MDX#_=HG7iw@;w_U|| zKdrA%8^%8R6AurvaG1fx6m!J>M2Wg zE_l@uCSsGBk|MQg)8)Rbw{lQJU0oGO*>%)D#*Hl?ag58J9uamr+Lp@i2`Rl{fwc`L z@*aVI&z?O!d%X?Zoff3lr$c-Dtb>|K93Cv=-#Xk*<>7}NI-U4dZAd=hFZvsXXAb}q zY9Kz8rY%suOTXfy&zkh1J+ljwl$dl*|L0mLhldysk9DacbZ55vbW18_*7?%Lat5mc z+`^p3I|@+m&t_W4Pmw!2tFaX8bgdS0Hj0%pyoSZN(@d7q6+*K|VNiS@eZE!`cPyo` z@ogen(skiu8G+y6ZhI1b)|t{>Di<0WH8?m3pgt2P6It6Obv;SlBV7ZtCpjsJG~1Ck z-!1Cix?DnDsNgHAtm=?*7;}xg!Hx=kYMVea;t1pm>kTX>Qn^O^M^lsf>x$18IolSk zY-|J^a6IOBE3p_`;sSXs)unoNVc~N7-hDnFaEc)VPm8sa7@L*q&8f~*I%FE2mtRXwDxmw!0^nPtLPG|Ozo6O{=&i5Uf{R(ZRFi@%W z))jUfR4LSK)#D{qr0C|jUdK`f2JSQvpFMk4&`d1xEW#4B^BzKR=~;1kd7N7lk%uQU z<#VA;e==>A9q(;e9|X}HAE&_3z;Zw3l#~>Xr{sVr;j*mE*5)k=i?Nl zvSyh-DdWcyo`4225RGHOKR&e5<6ASQit<>v#g$Bv5147b%`Hn?#8=HpXi29j!>B`d z2-;MXGJue(7s^&k@9DXH-ch%5iA)}>Hz3I=YB;0OYhtaQpQDK#4E z1_KgBnYlqLQO*T^xaLF8(pq(|T}(BjEG#ToU@MhTbab9|=vqe1#OKFr*pXTN=-Dq~8j4D^c@c^UecUP$fHqOzj(hRVem6rB;rTSrM zDIo8N>7T4jOiaFRr_HhEQof>V2aPx@s(Q_Pnv&YAP+Dibo=B~)*eoY|*CgV9la}0Yl3{J+)_xeFB#veTiT+`udZYw`0>V2b=b%Bbl=e7zc^CwEv45;)^tFl!Hy)&1g$pZI6G*D7dOe}cY*FIx3Qz*gbFspK5?1Xe;Q49-xA%GU_C3pA^I#XXjHo%x?YF zqdjl0A$=P#lUILSN7=xCm}jZ&fpQg@Gd14a zKq;eaAa`YAWJy)Vd>|GV%eA$&ZG=&Ti(oPAAas}XS?4qyj7yzg$$?n-;gAXph|KoG zA!-CnX=-jhEfI56Q2msI4~LZEF3xS#5gKHkQ-OEh&;~z_2C zlp1?#G7AdaCc7_uAXa<(?qg)~rG83dL=osd$ewx6oUyaBzl_WG@Zdce_G>w|BlBqY zEiEn8$ujo6_-ie0Q+H(x-^M(MOdEt}W3Bk5Ho`4!n*Sd^-gt^eqDeq_xYBPcH@_1h zQDy`($l*&%OLhJ?b8=RC{SCZ*d}2x))*N=wB6Tb}g35v@YxH~egp<%lez@J>wq62w2hBTn5aSclR9BS>K*MQ^^5!~8GeCM4kA$>dt|exx>fkmB!Py*|UDoFiRt$u@O7#DE=)f zeyIOZaFxvKV}?tsYwktsVc!lC?dk??^ExtlD(qMb|Exc6M$qQ+9h`L*u0Fdb#IZo% zW5FpGTW|#V$FESoruu zlb7Z{G0lUy^!NP((-J!l94^&NtIs#k9UmQ$?Z7^doRN4OuB3QsIkBMT3)^xqVeTSA zj+AL|8eT}&bZ!T^BZ2=!(wVAc_N%3GFDh2PI6uL3?>_w4!Ut zTwT=Pxg`-_MW_jE33!!gRXsV?TVb;@N#YP{{<~;-EUyZhRDxoz2F?nC+){BR+R!@@ z-`jsa@0PlnKf88k{+FsfXb-Srn)OrZeLOxgnpjDfNIblgkEv*;PHx?KpC9;34ED;2 z%9|P*8jUZ5+57r@PCAI_^cSh1e0R$#7OX4{r`W6v#)S7dLJ3*zlCf43f!|W6@W-l~$AYgKXwhoQN#NA=S zR^5kGF(Vy$Sz+43b8O-1U2=LT0iCmE$p$W?gq`&J8i^rEENSH2WPH>X%- z%WGFw?H9|@sYXI?UX@m!R#ca&!nQ@?|t(0g77_k1YPuew0{gto!UP;7e^ zHR;*NRBoW!l<)+3bNM?^YtxoE=P=fu*6zqd;7{KhETo?gWlI^NKB~g}OPNdb!OlWA zvEeihvajg8iMhooJ+2#Ehx^2QR-Pb?EUY?EAg0i2{;r zkz~#LG?1e~A_Yo~ZHjm+Xze&R%O@5kA9yY*<8#?xeW?w^$Ge&QS|4z@&D26)l|af_ z5+(&d8_lnyzcy}Uy~D#u?e+4)l)_ekp5;AIt3ELa5LAoKCTzDau)5` z{Ap;vwsY0tQbp{oMIIiV$87dMvC_Qdr8_whU)$9c?!wm;{#r;L%Q}kct1xNyx3*gA zbDNs-%&gJRu{M%C?b1*gz|ZNT#{D*)mYvZERwQ}B^QurK&u1Q!WOf7lXz0*0^=ia? zCG|2shYrLTwHSm?Ih<2qgE$l@asl~>wc#F#@;)hl2%had8poJ*&!M;P~V52y+3p zF@BG_l6*pAkf90vP%22U&<9lFtv>72dLxtS7$7JC#(6O)-=o~6qnW{i`Cv*?Z zbCrQ$jkqEtXaUevDd6mnFV7eneGwHD;bqG|e`_m^|D|quht32-Qn}99Szy*2|Mqm@ zsD7GhJuy;td91TwxuJmw&0h5M^lCoVqhtcoTZ#XNdSI4;K{Jqi_*RUIg?9G|adf2^ zctD#0)Z-L`&JU(|teUa0%F5x_7sJ;@oThq)l&9#ImzMfKUA7x)fb2ueVeCaB9(ji9 zP%)ErE;^(qjHMf%E6;xPYb#Xr*CtB#J$(33jbUTU*ftR+Q(+fO4Uh9B>(#e4JM=Vs z`ji|hCg(b$*D>8tiCghmU+~#VI$!AVilxhuf|soZ{TovRHVZV=G#fx|IZlbMbA2 z)V<#0`O}IIFl}-CjJ&!5!VB#-HMq9SGwZ^wc+vM~Iqh()tgMx-;#D6?UxbJ|ontsu zA(v)qx0u}@?jgxpAFUwfzT$Y40)4!4cg;dh%S7KOZ-s1 z0L>4gMd(a6*5QUjn5%f@zGHLq^Ua)|d)cr}MCkbK6E@3Ac;X|ubmF0wy`I4Y0KHL= zp@hK{Hew+!F@HiIt7oTBZ&(3 z-F(5Pbi#G9*FWLOl@p0(6ea`LSCsaIRBTJSiPCl?8uxa( zU45=~J~T;@U6g3+CRGS-Y@mQvtHY0pi?mIV#(DV+Lxnv-{c=U=wxGCy`kKjW<0l#un z+E2nT&2+Ym$ygvzRDoT@`oqwbe}({N-BO4ldHdbTc4iXUJQy@{u%8wrLaT+WMgMDq&|r}!g4JYqsfa!StJe6f ztu441N&`LF2??i3NGB!B_U+r#7DT(^Pi-VfBUEq2h@maX=hN7E2erk8g@vto4aa&k zEoO!9P8l{`-$zex6r>+5jvq)ve(qKNx%xqy?uBrjSiPs|k<>sZG0o!#qmxBJVa z?#qmT3+VQjgD-mt%pSRB6zF=qrsFyo*IPSpIr4Ui7ad4kF2`IQ<0F)yF8eTPqpkS z%<6Pb==m(Kt(#?`ckQ+}6zH^gvpv?`n^aAPhjQiIp`VjEktH`TZ+Q0<*K~i)`fRfS zjknMSNOjwU#mv7xzMhM=Xv;Oqz{k62q*TX2o3hj_1!?rj!*I z7w@9lF7X1+oDPfOh75Wid;5CZ7{A0myvg#LG*7#PMrSI)qXF*&a!WZNo{o@ZU$S>j zbjtXZ=}aFes8!s2fbkBeJ}MGAh?(!u_U9n!o*V7ZK|gBEFr`ec!Eu4*rsTmhoUL3& z3qIF30ttlyY|IMo2}n57m1X%uXt%Y|wAH}3;vZ7K!juv`772XT%lu{CYTNdv#o$(N zz{C=dke^X59PY3eq8j@eN$TqxVttGv{d(W*j{jV=-f^Gz#C_!Fh2A{Du_%? zG(mlVw)gE~Tc8O`1LeU7v(}Ld7hkLo>&OE_-&UU<@6MyMq~{h%Y?Mf_=**8pvQi|z z0QdeB+dyphOm(nR+AHH?3!XSn;;?+%K`_!ZFg`bJ9MkP4ryVMsu33>;|3M*xB# zHX}hzH|@i4U1_){hd)Bt;O-kzAsiKOx!T`Xr!t$>1t^klT;_TO0ml(5s6*4r42VO7(%$cyOb-}&))y8l<8rE&ujc6kmLb;g(> zfG4`Ttkf%MWS?*G!3zJ50v8RO>-pdBm#&*zX($fe%Xdq@efyTxA$QKbXW|xNvX<&i z8|+rxrcXYw#c{l|z*BLwlW;ob-1KCP7K(2BcG`aYq&K{(Ouh$-mNQ)(A3da-??+A_ ztUV%3LNH+9k{=iyO(Lh^+UBC<)+NuMzd;!Oe*d9Of3=uRpSe@6>*DjyPQk_j`%!YJ z5-V%I_Nu{#zCc~iO3AZE$7dl{bBNBaF3mD`W!hDQe?$|Z*S-zW&PDnyJ(frMy`(fo zgCyOIJKyImw@sZVg}b4nSUOe~yLy5uZzQdyP=)@D(uwhAyBsM>uANy0BY!E_vdR&i zq^59C`irfgziWMs>^Sl1J+w=iSWvR#ei8)gUe4Yg68Af^yu9YNJY`FH!tu3?ELc!7 zX_ADuyIa}iVS`J=$IvX1YEjVt4grnFDRpt-$MU#kC+pBBa?4_IYBD}B1kQadNqgnm zkF;0;6x1l|I)Hr@F5rf&U>KtEsW_Sf6C=Uq27o;^c5jQJC;#{nxh%xw(Gk zo%yp+`wWGyhu`uw3K)o(!-!_?OrDWkQAInkkW=<6ZzC3iDu%ScmbikG9CAy}GH(rv zfzos$3nPzaK(YCCU&@;=tmAE29)?tA0ma7-*R~p|a6hFH2p+$@1Ii>PuoyO02y7B_ zc5h85`<0$b5&^jExFc zEZTycagtyKtO~Lcn?BdKPETjQhltn+WP^EhX`$pa&67V%+9w*&oQaj=Cv#2xK#Y>} zmu$K=#NpAdBiLiQJuk0lRe~`2_3deqFwt4B_#<3OYr3?{Mr?oP3P{%*gi3D-!6w3F zd(O04No#ulIbjSju}c-TKIu~;QXO_y?NW~-)Xk7b#mH_HN*al16^tG#J;k>1WejtO zn1j9^qK;o`JZstcKo&&&ch!?4tv2ybu6)_>0WrUyiEZ>w3sX*HKqyAUveYpOWIN9W zv5vQ-aw1$So+XVi!)Zo=6Ifp@1gboupvf@PsY`|mxIa_YR9{t$jndFcSLV5?>C*6u zD+XP);tGsBwG%*`s`5l7)6m2W)B5R|yz#X#%I(?+UYc5;dTFDWfW^EvXo~|&GfKWa z78}r5AGIB7@&3EPY&7QV$~kw(np7=~|HVO4K|yDxe@yRcJ#&nFpmy~}vK5N-<50B# z%0O+TT3%n};;GT`sov9eCi3(vV|h{YWAx3aRV=IpmzTDJzNf{z(1)7 z5IWR9DGQ_zNVKBrid1ZPN^U{_ry|O|Fd|`u5LmZ=ul^ZfHrj5oNf95q1 z#HjKzOn??ep+6o82GlVl(2~@U36HdKHyl*zeq~MnLzeV}HdQw0-@d^3>v@%_F-{pL z?zS}ZXeR@7{w8?%n>8o90n-H^OZXX42lvqCy0*C~S#NoKk2joOs3H|W9-;O#2lO~V zTRSmsVZfp2EgR}HU~5v7G))^Hyu<3Zh4$#ev~7L#eX771B$o<&ktJQ4H}#B^KhJCKq>~X*$|C z`F2>JT3aR#)L0-veB&RXODfg%GW|aZ0kT0v?{sLn%E*TXcXiD5+qci%_8u;O5qIC0 zOW~{4gPYav6UB71Uk41JBXu;ZE7=UQy4UoT2ya@vC?_*V$$vfSNLfR`;G@PvhC{b` z-GPh7D;xf8%i99I+>IZtD8yWJpP#pgJ2h`{oNd$)SOYNNSo~9P!?1sCLK!rfL+`jS zvEdM85emCj7AQUm%K39-vz)_ipKs6y5R>n#|mNs>;0sv1xuIy*PVds zi5ibJRm1{GS(CvmoWZi(+tzydJmO>~$MVMe9`Lv(vp~D#^=6_9dzVvxX#A0%!du4T zpHz$X27{sbM*zJhyO5wvp@+(Mff{ieXti^m0Lcv~p}b1vB%B+uDF)0=crrFN29MWm z?M3X9k{%lkkhpcu9jXGdu9s2&2@{HPcxhv{=Bu)pPX);Ow!`>85Hg8N}&;2%6Js5wCX3@Cj!dMMt?^O7i}S|=9Bfn)Xx-K zO-4MX+c#FzvHf=*>1d9n++SU`ab0WA>8)7P52ApSqMZZ*jymbl>okFw0y@L;+?d`% z)q_2LB0jAxmk|p>ToDA)d!%SIBb;N^ug}ZfN{W&z60;k6L*6lw%?|nC#_E+8xXdpt zdGicv>juG@VwA^XFB8FPWYcfIq5v97SN0FjzgSh-eb~g}(SKy9QUR5wVReIC0y*oM zD*k4J9Bb%}xD$gcz{dSw72LKTZP$80iFn-AvjlM#mcd8wx66;N54$veJs2gDted(5 z?SpXUL=1s7co5{v3wcv<$Ghv@(!1qs+9B~z3KnW4zWRN77=B@4U~=TYSZ(A;_kFn= zW$xJ{2)V&`G+D>T$Mw@boF}a=&k?kck-@fUd%zD*{)U`wS7A&@-HC9dvhXC@p6Tbe zkS9v!>Gq1-<3K!rkU>>EnV( zws_~x9o?=hIPLT9r`(2&JZTjwzNm_JEI0omZv8T*#D@ZzipKoRo9b-y)-EU+2EYM!A}HIr2u17!Q8iT8ewvL~W$TIG)NO`o@h60(Z>a=C zwa#~2&UBXbhlQLj`~;dVIMlwma8 zXI%w*yKkMsaa>l?2$8^O5$+R1JebhN>?T2SG#a&mfA4j!T;k0>%w|;Sw(PGC^DlL2txN`);rpg&`rt z365rPpLbpO;W=BuAS6)tW^X{zc+tAMTNNaS9JySBHkWa%v)wmJmqt>G)|Q%!*6c2p z?B>g#sFmgDJ-n`ED-Gh5jw20k*HLmpx^)D{!MgFaW?9AF2Lv-`PZVT^VO6)Dgnx{me#4$+H{68X zbInXHJwaAw@-NCcsZ4>Qg*HBB z7Gjg{x;Vw^0rki#yKm!#%NFVA=tq5~D#mVA(5Zax^~pd^Gt8n$l|_JBW0a?#5TBL6 z!>X?3n1r8Y7le63^M29b59OX4dZ_g(Q2{|?$(w@PuR7)|MEUD4HM@MwpRN{DyL9o^ z_PpHO_$Tc%XN5fEb1GUz*WZ!@bcL;ldYcVALMcj8tLI5@bS#eLTt!+k-l=VhUg><# z(z%YQ_hfjv!M~Y~fRg~7leXYxE=5RzKW#yvbMj;)^(`Klf@{uDz;jWl`UDG-q1xfaq0Kw*zf;sY|C!SbLwK}~PPOq6*w>kZDF1GCTkP2$p zQdlL>s&(Z_Q#vCH8q6HEH=OeuPKey>uSiRV!KQ(>l$p{=Mocf@nPA3+dknV|_3K&7lu77Cm_G#WCnzZQ7T=$h%{SH~+cad_}>p7d=}O9PRB zdM5g}yh-|~VFgSOJJeWp^4`qLUkwp7Q*Zs#CBw1>*c01V;yGp>DQ?c$=oWaRh>=pj z6rrNkwd57yPe*96fxOI(Rwk3*JFH))=G^@{xfIHA%ob(!d;HRKc$n5(f%5&|D~0~I zAB3_LL5&OB`&{Lq5RUBFEhKV+Q{?ULHKhlb*^Ddye+#8g{;N<*|DS$`$Yyd_`JE8A znuzpLJ%g8gAWZe#;5|{y&!>zcd7TsBPfcmWHZq*4s1|@kDkbb*n!K2hjr?7n-3jT( z@)Pru@5xfhCV}c$lw7oNuI|vSYqu%9coJBVAMVyv+23JJG1giT_Zx^-#}dLAmcQaT zZRpCKy#huKsY+~ye(_laRJwhhWGB3@WScn9LWJ~*54a-aOE_6=MKTc60=0>go`H1> zpLg>a)(og6s2hEBzO9|q1i7lsG^sMKvRXjEq`I<0>hwEkr$=@l3Hwx`VT0z-kk3Bi zXCD1CLq%xrje`J3RP_F*1pBjr31+h?{yDHfYQ*}d3i~r(LwKN?H26L8%AtP>vA;T< zHjN^@2)R9#Qvc%Sbf(Q4N$)>Io7yu?<$@rB)3S49cr3;7J&jJrsx^Iz`aI#L81@t8# z$Dl#e1UXR-<4~ZfGuk^zwBE9jOX<%8O}tfDD`INs0>y;OVI;c>Biu z{Cxj$wGy~u`ZJ{3RpCDetJLzkng!k>+btU0OYRF=y9rfes;Ezz&{m_&^ERy$r|x# z`EVV;N`&1fR6!`tG--s zg`i0j)A^CweV;o0Q$D271O?3)B0zj!=2;<&cBo=P9?$*as6?t$7cu@w-BAH|up-v} zak24y)uIW!Q0J8tp0+ZoJ+(ahsoYx4G_<(m#GyFsZBymivzhI*yaHA_{ofF%h-qt5o+9`=xplYa?Y7?2rEuz1jg~pbYXM zXm3Oa!PV!zH?HFCkcy>5<==?b(!}%zpHz|AplmD%uZ&5f?aHsiF~Zfoq%bKDNe63I z*aG0oxw(Mqjjl_ZfFCI#29VJkL>Qz!KVB)6u_ka@Fa>-LgUcj zARL3#q}9~1oGYCO9&U&Rzn+2$D;tH4mcM@%r-H!hxP(xhjVcf@{+#p0Njm8D1xlFIT&inp*K~DYr*Nkk<1!U}S1mKb#db)1inFQgbz z)RwXzJaC2XX5=Iy06YX2=oU^T4{Rs3x9k3VZOE@9zr@9z-b=pZ9n>5B^O}uG!L6i~ zm6gg$ctJvWU0ogIncUpmZHsYnanRTcP(Jj57n*msCg9&&`f)3TWe^C1s(aZ)KQyDy zpBLr>Nb{2v;^$X3h>nRNIn`cTzZ5D3Z5lE%GODWeGcy@m{xu%|9}g){=ux)op!mEV<*r(5 z?Pl;ZSjArlpbu4l*VNOKSSFvqJ|!FHrl+f3`V$)R;sikZX$rW!N*A`BeP|F4Swi}| z1xsP-bcJ(85_m7K`rx(`(h>(z>yFcX3T&shmuc^&6NEmZ=j^4W zoo@YopWHpFla(tf){(GF2b}2Cr<}OSgJzLvOzORXtv`~$YMZv=5A(!$#_O$~?g z(O6lg&{yOjZHLBEXs9s_HkJMarQRiDdhMC93um-LlqYN576I}J!jYM;YEwn5x*z44 z;nm$6tXy65mtC5+V(EPw95Mxv@h%_-!NFs9FkmsNs+en2q`9dX8`H;+xApcKl}bO> z(vq?Y#;6(f!s+0MWIP)in*}z+pL_z&%ZQ$YmycavOLsZ2-!7QCF#!RGB=pja@t?b) z5Mnoygb!N9rKr7YSZmN8s6uwBhuVm|6q!hpb=-(al-pBXoQ-b8( zHBnKSnHgJ;NmqTS&Uu3qdZS;E+d@LaA5j|u6egIn9C=TkOg+w21(ZPW3_0&3yw{=D z+(KO_qr6t8_AECyY1tYVk#v5r^Ih<&dr>ut{<-{>D<9orleCB2=`LuBZ=s?eZJp8g z$uff2E0~W=U<A#t2qd>n*RY-;XJ;O>s=eT7o~V**^I8!o1_Ggv999_YO_V zV*jyz0@%o;Mn_&p?3=M<-re5lHRXrPG;8@TjdOjW@lJ$yzW;6K#N_Ku6CTz)=ip-U zeX@Q8oAiSYRSk^<)B#qy?MtJ^Rr%3Whd`$G`P38%`6e}UF0<-=Q+~&OcwZQ~KH~Eq z5@&c$fDPVj7T5r0xZS~ny`9c2_4PIR>9gMf3Kr@>vMB^d}^xuZlxz#cI8Gn2h4U?4m_Va*YT~hDH^le`*^7| zx$*yD>@T3AT;Dff)D{&bRBC7t6-9caQGuaBN>I9UC;@3qdg$)%jsc{khVGuByOEHN z?-{r5|NedFoVCuZy+X8Ce0EYbma6uf-~sCe{8lktahHJU9Xd=|l*Eb~O-gQ3jo44-WhChatif{_w^85Z4Qr{6l z*4{3$jx<$L;sKf+yv;}b3}&4RKZ=Jhfu(q7!8!tCz3FKutE8VATM^hQ+=+_Ol9g?;9Dhe8yypk_(8k%e^sBOg0Fz=2)QzJ|EFojy`E=#J8(g ziHlr=qJm-~9RviKm3d!#Ob}u314whhCjsgni6wIR{g({HPW?sf%3wcO3M?$A#{bvv zPfNiau%AD*-+#Y@?Z?i>Hsc)l>C-15rXOOtM}AYFVL1vwc+>(QI&CZ!O-}0YTrHP+ zXcm3=9Cw#My#lC7;s;B1=&2q*YH|BH^~uTD(h{7z>Z+=-EhAIY z{?>&B1Bwr}w#((H0wV-u>1&<#^B_{7lzlhusHd&14Hy#Pg1aE|5325&nV1H< zXa0_c2&Xi#5CKf!xX*xj%+hm!go5Jqn=Y28qu6ASG2SCCix52s1c7uk^iV@lP0dVs zbG$M=9RO*q`L?&WZP?h^6`Dc$KF`p;ji%G;5L2x3i`{CY_An;!26X~9Aosiydgsqn zVyQvY%BM)EsPd<{76%>yeuXz({Ue}6^G5Atd_+V>I*8Twrvjr!xx}a-7p=bm8S{UlgZl)n@*) z%wUkbjdg(!@Y+9=pyIOpQC;_l&U~am8yFm9gV-|sY{+x--7|B*6`2tX)yF1Eu?vsY zA?|cs-!tW|7i(UUr;(*2mz0ngtLQ*2@W%5w^liPyFZsxIjLs+}rodAYiN%G!?iEmU zC>(5Xe{S+k^ta{s=Qhu5EC=)qJ0;#24#dolro(z2J-tI~x{c3DQBV_};%Bh}E@+0g zyv%(m2C>SMG5YY6Eq^z1{3oRxDR;lREa9$w`OlWsCHbtYs5r2upnCy9B_%H2`-Z7z z5tx=X@~ez1Ty%5|TdzG!oH%Y_Op?Ri+VGVibwR`=+_arI7I!bZ|CX&%rE++U@GSuz ztf0q!fT_odTPJ7LBR`v+k(pV@^fu&bsbg}bgHQX-WcL>E zQD8XR8(~PtbLJ^lS5|VM|GkqHK`%ou)*!;wzpYrpUR_-cjGePy9wOTpm09u1v2unS zlcT(^d;1bg#bR%$LpUwvAC@w892}I#_Kv}iU%T9v?%6YUg2!90jj*bBc6WccAZ+mW z@Qc9}TNL0W{0RyQ3LrX3mz$B1jt*@y&&bOgQt(0`2JMAZ3+3+S-M?84P7Lg6);mVd z6^d8GFt`)E0b*ec6ox6Mh9N9=_3fdm@`KyH9z8hDT^V$%6P3kG|-E(FKO`-oJp zt*w2OPGZ7A4+8jwLs@WKM8wY;XJBQ1`}U2er^H;n8srjvoy7fpeaZP92hgcT;U+HL zvx&Eh;P|Pdl^^og!*644bnwA$SmHWrQKot$QY#o)M@A}5EH#Ik9IsBd_FCfP=CCvT zA&z=qS<&qxtWv{X@?UA?L-_xaO8*MY|Nj1m&E>Cj!a6PgAKz)I{(Hs$ukZhV-HGyl zuHNcu`PuAG8Ru0X>gqN*Fffn={G&gj8|G0>CR24#e^Aex0ICB!Ko`#!9T+L0u13WR&3}W2Z)@ORgSBdTp`~94Rk9YvN-Jm zsn67OPQrDgmQPkMj`p;P1wXVz?WHBSvh7YbHDui@C?MXBw}K!JjY}fa4Zch z12q;?5fvA=$Dcm1)D=_%stOhquB(3C0zf>VC$R*<=|}6MJuRSk>i69N1Je>0J9~e1 z)(&F?z+#nh-1u{sww zr;m({joH(LZo8H+>3~m3X9-H##4uviAFkL~5?*^Y$^EZ^BL*LE?=m-0!+94h$7u<2 z%Zy*lc?O1_ZKSK3*znoB*MgUXcW?wd0vXFQbkD3=sK2l@yJK!HGhaH_~Un*U5kd&7H?L-h?en^|cuPr`zP;GNba zJ&g^0li4cM8})>Vx2*>XuhxYxE2qdIpk-)4qkeem`eEb!QuvkR55P>b{j1;u)gYX2 ze_5R7)~041h`KJ8F_m2%Kml0S$MYK!a5c2^ORVMhJ46E(fEol95=0)*KWMTzNv%Cl zHqGl+fpDsFYq4>bS{a+-Bo!C;|L5N-3ih@6N_e|5EMNYHBsHA{rfVi5@xKOfvz6<{ zqN@!5R_FPiCR532YF|>Vb+HLJ-`?KVv4B;ue-Iz9k_ku=zTe);AKii`XL$ZxnRV;Q zL(bC~hR1cmc;_~U~4p40WszVSdIsa9AHhll?`O?CE zPSI(_ZpeL$s=^G@2yGN}ItqD!6s)i)i|bN>)SjPi zg8CRai29ws6JLN{;*wER3}Ks5JwFEU$o?)C&Evand@Ase$&=1W#pDzy8J2kR_pQOS zx4ECM#h6ltU**$xq0%j`_3OP7K}0hiY+=D8bUQSonl|~taI^f z9>|C~lUWcW%+AWg#4PQUMvlZpq z@Ab2$n#b>e)cHOK*d#SX;2d2#32r6=@c^&g-X*X2JBFN6Thg_P<$}Q8ZU9!ekP+G2 z^`6lPk4m5l=(L-DQL_t+Hb195s8&+0L9eah3BlFwaa?t!`8+Fh-T+vOxbg=9is)DJ z{d;q~#R;jGLFH2Ey^nuIxk>_hDRqsDhbPOc4BHaq*|TT%GS?lmg1_G@(4i}}vbrkD z)U(_{wN93@uO?htSWxi#`Raqe4jMIXX)n?HfvJ4pL_T3g3Z!4=A8e(*4EUIR9&@e8 znk4^y42~@PUbKAf1G470{!gK$)MW~gaCOT86EA@6eWIK$y`4j4!A%^*q+f}b|EX9N9!_y*Ku9`Zp!IxZJskb~GG z&|vfhvn)8}4BXUe;pl?vYVvji4(~w|%Nz)q`S%cT@fLFW}U{ zF)=ymQI@&W-V^sSUVg^1@5*H821i9@ovOCAw_^c$=~3HjL$E?7bDJ+nMtPHget1!8tur<5yxFR63RbmAO2008Z5_&Z*$!wK1TS zOQ}_-xX9TfFL`sRpLqV-mpR$=s> z+*-*Jcs}wSapT4f9tCUwULgkrzH^wDsqxoLd%a^y~uL>{)E#)FE)|#lg%1QEH!#sdV3YmNo zsa673s7tC=sTF8>z>~}iF6>B4lQiW^rT_&wva=63lltFQ{Tsv}(E!wiCW!isj7O3P zAjaUV&D3VQNrNa5&$u=K-mb}9Z+}=;?6ZjCwYK@Z7n}=|n z@;6R4VLVf$zefjLOWhIqs$`bE{m#Dx5h#qpwX2-V{JG_nu@K3$hyYnPlX#@4FI z%M(&iz#r;^dOS1@`WfI@U&i8QZOl0Ad!(JxGBd>u3Gdz=M`!r=K!>2Fm^c&F^^Z-o zAR=eO$IqXeU|jJUX+ECy$ar(1TJwJ2=Vab1@}&bZ-f6l{lbS*l@EN3>U}^llKS~te ztF+L?Y}W#20(U;2K79+ajyVrJfhs89U)pB@QN}7G8ym&_5xqt3_om6xZ9^*v#xKQh z)#a#ijmK%sdz98s2`x49`-bfd*nsC)lJzOz&L8K2I>CH@*vN-{cmUJv-=3jHr)G+y0!-RvltI^c| z{2onQWx0DMxg&I0;Og(#8a6+6;eruO{nguUj_0X|iF=O;?)=}4b|>je*EM8iyFr%h z^0V7`cy8OR4?!E2<)nj`V-~y~%;%u809ZJ-Ha1TeTeARD)77hoU{lYk{a^U@hpgLC zKh^1y=%xcGHj_DT>Z?x7&X#-#g1NMdynx|EQ8P0utBrqWyX;h6Qv<;%C@2Vp4xK(& z8!1fA$Y?>Y77>E*CKZT4q=Isof2p1mfQLVW26O@WtRJ(iiK0OjhgKcyCDY_lxl=d4)1IDz0mFj_qx}l$fa6PA4pK|y7!*dQ2RgGVv?^SlL~2br6S$!trDn1qD)nH6BIVYWHw zpL9SNBfPwNHA(hU&KT$!7+|8JtFjc`n;r)G`s+V?YAJsWI~f^yqRS6}h4!1-8RtSo zOw5VPMAwVUuc&bn+{P`gJo@BHt@!AL5+Nh4$kMH>3!df8EE(iu<4i;29PGus?N;3# z3?b{=?7ZFVO)kb!8XqZ+rbrHOLO(XJ@bQp02a<`)T~J#|E_z!0v_GGg<|~r%)-TDR zE3L<2hIQJ~{Vbw#yEJgV?z>lE!?yl;S}&F|0@Qk)y5`+OULfI>o4l_mR=*cr#SPbQ z-H9m|krNADLc&W7`cq{n=Su5W@PlhM#g)~E;})&pN##uePp7ZkC=<`}BBPxnc=D-n znkwfXEOC+PTVg_ ztY4bkL?Nx*G*bO@z6Y&fntY7hUuwTWL#Y?869lwizU*Fg;H#4);dabDc95-s) z-Cdzb>&V8`)Q9NEekY7krscCflimi^&gp42qa9E9m+yr8U z_X<}95kd)Eh5dV!!^Nc}Fpmb#ji$tGyiN(05%| ztVouLhzdab<#mPUjIKC5BP*?Ce<>&fI9>)dTZO0OQ(<$er{|gT~`FX+-Tjau0;3uL0A0DED(CTcleHvk8_S0AT7g#i6scOIA0>5v~nH? zx*WVVPzrwAD$JHrb;n%TQEm{LedotzLMQqa&(^`OybrublCBbT98};WO3H|o(%{Lq zxsQvnGI%S5LCcVc4446A6P29#tx}RpLqh{ufKZF)9FSr=CkhJ>kIWN9SvJ>ceKgTy zJa7Edg&$v?Z4$DBf_Lw-ARK()$|ha2CTu?mv(~SqUzQG+l0Y zm|Et$hJoc!J?n<{>+S^%q!%IjieC72vkgC1d_Y>wb*e2@L8*2_IL`pHL`<&&m%hcQ zkiOKkF*`zs5nH)8xD?2;&eflW?9B?Kbq~dFY?a_bW?))01+aCjp{iPxZln#Y!T!$l za_tx1VPGOpd!u|z{$JabIxfKcfSFfR&7VGGoy4&)>$@1;)$6trSA6@kB9hG#biXXI zr0%@dU4nGb_3}YDN|JNm5D(KT;uc$5l9`xd?BBodm)6|ArPkGj?4{ei^u9A)HRUJA zeI%lhiV`I&v#!XXZ!nu+bB$aag-8ddKe{+HY_YKjE|Jg?lapr zM?}Ku8X_zol=g*d;F|w*XNyB3c5}><3BRS8LLz@;n$fk1Gu;n`?;=^oA*^B#WhLn? zCGW07jJE?@q~Uazh01dC^Upcc)>?P_J|FAKvAd`koUxkGxf9e$q+as-RJlVb==ske zvBUiRHDQcaQuJXNVuZuuP^V=qv``?2Hhu1{)pK(E5W3b!{mn^J6CV?YiHB7_b(%p+ z8>1=SW!cr?lqQ#?H}RT6$UZgBjG56XJbdWss;_DBD%@T*XG0MgNRiDwzdE?nhoi2K zqBXT4p<1pkD@Gh>QpR5w-btMJnRq4^DJpPjh)bb&8-s>DYjI~8`=*+olVj#T?RTQ8 zWz1eG3K@mP?Ju%c3HPPRBMK2TU-2;{n1k4g@^aApVRd!&>!}dV@1M;rED9mzwRu#% z=!sT%^2(_kX~CKhZU9O){>%m;roK4LYSOrn@NTI=^dxMRl&N;VB;b(h(bTH9RF?td zt*_K|0An8Po;jw( z1W4r3CJs={QV;EAy2T~f_7@xhVr?5QnAWgXv_wb9?hGG1c|%9nw2(Ix9&*Bz4euK* z<)t{8GlUijREn5|Uq7!iqjPH*^GkTt4DbNrxi^C%-d^(mJ46x;$v*@8X{#nl@RRG& z{UlW}{_L_=INeWM7po7r*WZMVGx=8-lnO-A`tr}xzh5lZA6_tu4+EQ)oQlenM2sh_ zsHnLF)lJcxwgDx-Gl+=wu`2SuTU0|L!Yhl($QR?H70NtHkBo1Fem6*^5?u{Xtm0%( zn2E09)nIjb>Z6K*WqIn%*ncR~)M6RnFw=uLD2(|PF!{`LQ%h#s83cukn1`QRlY^0h zbuUB+-W+(L4X;19)4hRP%eY_+LVK@Y3Je)^^Y)Jc1D3qgEfUMu)*!QXJQ3At|?~^=Xd8AM_zdz$W+7S~^ML zv$de7L8kWMc2ecM`N6;-U~pS;`VO^u2#gYR^-ipDt4mE-?SAEl7dJ>xSJw%{cn)B ziz1k-cM0)eCGE4dAzy6mjb^z&Y6|Rp{lEdABZ%~P=vO?QZ+VAqa&pq&l?OENd`z4a ziyj+G2{sH**%M1c1f@K3PMeFQp4~cU-oJ3gvYVQc&Ee_X;ty!q_99`>j0p9y4FNTw zVyfGK0_IpD^5WB%M`35(M~!{3f{omBFU34-W8J6C0$$0YJl69BlTV6_tSXe~#nTWQ z@VO6(SN!uH@54l+)r z7teA=x9B=#vPDE5W?T2#+!qJG8tYd4U`>ME^CBaNCgM)wU7sFFFhjSz03*BOIUA{^YZdm?D)kLCMPFr4R}^m za0k_axWAeku<)a>8PQ=1T{E+%NS-1rFxv0q+=FI;?Khc<5^*?SeS+w{>KXC~9=@!! zvOCznI(gVE@{5Tqn`O`qyLxE|{Y%XNa5^KWoA5D8FCaIt!nMm;svp*6n9@nn-!J+z z+{QP(i^n@N+4l(4SIWdu|8q~?w8yt=TxA1MAQ1_W-|rz z==sQ(FPHZi(~RlD8+t%~oQt)T{=F}+VN(6O-wP82UXZo9q_7Q8ng`!=@yar(qPN?<6R+lv@G9eDZg<-H|^N4a@0~ULs0wpG6i7y)nsL5tJ+8X#8H4FyrUn~5{j|GL+HhWz=|zILWkTB4-dlusZG%< zT9Le9KYJ?&2kvW@r|kfdtag5BB?sVEE65pIwH9@&AP~w2kx+>saIqBRy%fZ>1%%nS zPgplX1-jM+AAlbpfZKw(N!J3q1Uv1&pJEUJuEB0X?7LchFf;^k*v!^eH;r-@&<^bU zHttW6FX1-ZM3=$!zjuQ;*!wDX#B~~d9=fvn)`1)S-gWfiZ14^YC>=1f;Rq5Y3wJw< zjH{M^%Gz91W4K>i2MaKAKVKv`4{z!sUfKq8tN(Zgt1Lp0|6zD3? zcI@u0s!k=B&K zi^5MzRS1mq-+PJU&E}vy$=>TI>+bpF`iQI)MY1Hdz7zk?l!(QtFc5W!AetmoDmWOt z^G#{NI%4Gpc!n|EB1;$^eli2kF(=@3p5(a|^dL*{zyIF}uYH*Jyrw)t67o3kY=5Y8 zh7~dy`Kp}&po_$2%?49Qz;zs`z@%zsAA#mP;oKhV?=$qGFb@7lz>J?SbmQbHb8d`2Gj`xS96)) zD{OaW{RRC5-BZvKe|Mc#1}$R7SQiS@v!IJz9=5{J)A`s~%mQ{&Mq)14y-`D{2{f9F zd4F#YfJy;cHJtA)i2P3b_X5*aEUWbIki_oTuapp+laRKiBG~);n{#!rVi<(dLPszF z5XH|9`ZUy@pE(hck&-$<`(S@o8LWh(wI(h*{3#*VHh-qCAu~MW&w(fkBD1p5J+dA9 zHMGm#SF*)=S=5!x$HxbhgaAC+1c7{8n|6nwpyCgfs0Rbl2DdsOG5DKil?~6oODc(1_1wv6PDRN)}jM0z6W? z4<95Ay>EzLep0FPy)7~ToC+Cf8PwpjbV*3z_oDi@h^qEH?BR%r!7BgHVX@j$%^+L@p?i+m#M@bVtZrx zi%NKco>Ura5rTxbudzR*GBl)ZW#E6>$zmRt*lAV^_S*FHbX%Lm6(6f(=@f+hRueJl z591(g>FsU-1zVsZ0xMy>JMt7SvckV9cjQ|;2GRGUQnQtlPY&-H>%Nrgy8gDMgmn=# z)}a4VX|pVEO_&uSzE2`m5^g6?t8WXlodC?iR?oB1rv&s(2*Q>5Iwu92X#!}--3{VBb`(-#rTndpg$45m zNvXUSkgctQm*QD9*b`%QK_^ouqH5*|iZT5mRzOeg1~I2Sv*gmHk#gGgCh3Q*Bmkf1 zynTYST)5Zb;;yD24vpsvc}Ma4`g; zqGU%h{npRZ>{UK0zKU%Caskt}4J<&sfOArHAApXj0Fz^CAkUr#!#uJ+fG2x;;#AiQ zx||4i-rUF(-Hg8tI?%r-P4x3P^xmbiIE*eu(9$M>;wy#l^l}bzzVU*ysQe!2hAJnj zWGSLi-do}3_!#t(PM{q^k8XQtWP~IT46DFE_hqz5Tl{D|f!;maT{t-bRe zcGzLKGrDDsV39abj^c{hr;!Dtu( zb~<)xYh6#tsHpH{n_=cACXDnHAi-!8w`hNU=JMtJIwcbF-NP$Se=4oMZMTm z<*Wzw!8#05^xWP!Rx54V^2bOcVsx_h(fjE;m#< z6Rg^JMk;u<#l*x^fu6GDpm!$J+X&)BKtRwOea|P*=jT8qSI7}%A0{x3$CLft#?Qy{ zbaWb5JDY8Up&n{q!E^#i<{Vz@4zeyz+jBQ83Zv)e?!f6jyu#25t5(D3&MMwV-Pv-E zok>K(=_Wz>j>jfQ4paKf&(C}M`}qY)yn}MY^f)qE;^v&pqb6I;@(l@ZSC9OuYpbphi-dV!UX8~2Q9nZ%bz4u8VlU>VEe`$H^}_zAPh&mK;h zDM-b2+KZ9?a(1!EK=y1T0cQmrEm%DQZoM9@>r~BW0k-!kBWDkczNkoim3l?DVY_9P z5&>W|hUqOWu}|yckasa+({~=eey^YlsW@K=N18xJ`E-v)9u_lv3h7!V5M}G?=qRAB ze^z}P)8bF$0TN#!j|jB6^!ERGy~3|8jXLhL0>G;G_XwW@hvv&?3c_ynQNjC*C10gF zVj3A^x>29mKb(;Zw3C<$)C}L?n@LMJ2H0_4aO&?71jb!e_OzlaEog1!%kYabWw=uG za^9H7ly>gn8(F_K5e~p3SH|_w1`<@bbP?21bqNbYzo(E|m+3bZEcQeH(PPSfJm)Bj|;CdwTY8Umr*5F8KcBa@hIc ziGN-Grv&Hly)k^6Gf@{vua7p94=0+295BSHTR;v6++011mfVp9^Uc~;8@Ho4<#{p1 zh=Q6L8Ic{}k6y#yImlO637+tDPN^rmKSF^KlT0PTiUuRdsuPzxhuIQ2HzL^7pv4Hn z`!&6Jv?TD5r6TP~XO}C%ZwZ1UeP8lYAO%ApJv$YFd=7W5&9_IMX$)xlGZU&z9nkZlHJ;IaP)L863?Nb1|Z$Kb?2V-af( z9WcsFLe+o18qsL~Jenm9Eh*ON^J;kk3Hq}5;V{v$%Xje8PZt92#V;qdlhgSocQGXU zcuFm-C!%Orjo;)2{?WrcvD6fUlg$yZb$g-LaKGMIR5{P*;|D6_pIt)v>1f_0O8eER zg@M2}SuBy{V!$(Ew!#W{%3dUnI<=%B;T4!La2`S~o_H%h{?LrW4#P(4x$_+{|V?*rqjXG#?aL z$0Xy)j+M9AQ?&ADs-aSHf?k_tN8XK$5_yP@(;9r>O!^sXFcbkBcosuz3;LFfv;4Ja73n?xmu z{IenVup7+?NX(S$p7KW9Yu>X8D#_~-sr?5>UTuYCda( zmDRJ6Ao!Id_Amyc{vg8pV%!c+$iT-KPeJoIPHy)mi4~BcZ0gw9&M^9Cd;R-DatGNq z>u636HG3f&4NCdv(z=a&cj2NY8YKK|M7s631O`&WYHq+$&9ei=lW9f5mT)-J=X<8b z$$nSF&8SfY{FsCJc4B!>x-UW811B~)y7V8wqhmTrx?jz6`2cjf=MH8z-F0t`G(V>l zp%11QlL5{<_8rCi>$SpKETtZ8$Hv8>CY2r#=U!_V-2#ZnmrQo2O%-AtNf#HA^kUq- zdWi{H%$Foh%1rZugL6zvjEzmO;Hp08wK1j{e_q*2vaCXW49*+;i_WTvtH4 zL@>54aX?A6FG}ovenvDB?b*gn^5gX40WJoUb9%}Y7^fM{}7y9`FJ@hEpzi&UZOTAZ*>Nq3okp+@KR{M4l>@LD#ZCbR2yPFN;7Y(`z&WcJimFji)9=MdYC z0E?_M06=QT>YIMQ^Z0;BJso;>t+{jms{N)sxV$v?W*nyo;TCK;moKNF@E&lB zx*YF~1Nx(_*=C@0f%OS+s=rtPaQEuftIY-p0Dhspb#mei1Kv%a-O=&!4UlObT?Fa+ zx95Q53AjZaU0sw^RQrMV@891O3cVkiE*p>S)d4yxF4K(k^!(rfFdD!P@mwABrWd%S zQDxtYMmrvE>pNn**N#A9&84KJ?-LPO$2550fl}T>Z8|5JOJB{BK^g~S9Kau#N0@`> zH=41W7aG-3kGnvSr^0si4H-ZQLesLdR{<3P^~}%Fi3u5E9{+Aoz|m_LU~>hKHi@DE zLN7+p&6Op*4GJY371aY3M-x7)jRt)HyYfSn%0;+gy3CAd!vb{NrF_ofKLm-r1KL>D zd6D=gq&z8FOc82<-u!0}U-63&MF^{D6-7m1-Zns(Lwa(!7$2M}#zuX^i(z>T8nn9) zy2osb-=QX8yD3dT2yA88Nn}>TyQ8QBiO)H(Ycqgiz}Ny^;^ie=1HA5N)6qcWy|u~O z+N>;ZRw-6>98g7I6m!wms0|NqsrXHf%FE%YA5$PO1+UVlhNZLn?x6jloSZbI;#wsN zKm7?t-&@n@LB9Q+rcu}^3MVo$a-&6Rp78=kY?%K5*K_Etk3Ud0)MnX+n^|=Wc4S0x&{c9tDA!ARkTQg%X?J(tl1QN|RE{55<^5RLxs|D)efL=FqMk+r07snlu>?Mc`uJvZR zQ~2t{^5`bIx|YI1R$;@RKa)mL2fIIoX#)8lQvAB+FMwl-ewB)+ds{)Rx<*DySH~JW zxncl~e)s!D{`V2Mf$+v})!8Oj&hESs_-6JJ57e{J*I>kqmlG1$!Ey7TE=|8XPuJCJ zP97c}t{PL%{BlF@2UP(=F}IK(#7D1hGIoI}^f;Z@f6w>$ZbcNu8uv0m5#sMr0>{d1 zmg)_HMp3jd>6sWzpt7=(sjwHWt1pr|%AduKxfepdpLSWY<*Lc&IS9j0dRl-FQxomk z2yHim{A8pD#d!ogg{4F}!EU&9&%grGT32${DiSTL_r&s~CGMLT!~gDuYXI1MqCJSC zhl7^%(L&u;Z)tZohfD(=f}u+7YY_L^f8~pF?|9EudXnMk09_KjPkQK#!RpfYHhOYnJ2z;)c7A zpFBxkzehxL?P|i8z?A3PdMV5R>r)5ds=Z>8lF^+uvw+vcmG;V$S^ft*Oimv@PQ&h9 zvpj){824V@h05(-Ab2!)^V0>9?$^77m4mU25@lq?@X0P}W>iSP9V(?Z>c~c$z5N|w z{2l%aZaW%|dmX#KRRNYXq`}i#S!r-`xd4vgdNh8u(amjalYKv$32I=qW44{$Z;GhYZ)d_bL+OBea0d>pQ={Hhrsvh(ofNw?{l$Z`9O?3T} z9-XP7NzU=^PR{nQ{?;^>+$OWb;~R(Rc>d;ju*$f5!<$)7rG=tz!?Fhf1iIUxxJjo_ zBY$>7hIwW0%%9Y=@bw{UFdm}h!jE$yxbv^|z2TOs+@Rl_=vWO$DGtBF^;;Wq;Q;0`@C{THQxCvCHk4erz`YpK zH*P5}!M&Jydz}j4v9NuOu{TpxQX00Ue7D~b(l_V6=l@&VQn6siMjJoB6-xHs`c*eb{xFQSC!*JU9cDPli*6 z8C#T-H4s>apK5}BXvzNh(}~iF^Bi%|m96!Tc-Q7z!@@nl zhiI@DbC=DbiL>+%4;;SW>n=)*v9;A70t-1RU-_6junyfHDSOQg$^tmrbY1WF)?wty2|&q>-Qj zM)Nne9F@Y)xY!tK>VW*acj)zEUlP`OHmL}fPyInbgdY-OuuU2M4ElNzHb*IcniX1X z2nIM?@s*gg1!gb`1L5l-LfFR@%BUVqj0@P3o6m*5R7k6;TxVP95S8CsTORCPuC}UCS z?c2J?UfPuA)8HRY2xHYhb}kF6s`xg&bubZl29Pp1CXCI^tpw45E7VTw3Bm)hWe2Jt z+~cx8{s99o>lKN!pKA#Ob)R>D{&(Zv7?l7tNx}$%s@x((lOMR7Rr^AxnDAXs@&QHV z_3`3GK<~5}6)gNLkfx)1y$C@A7_<3pX_i`Xm4*ifq~Zlm0g-zwcQL3YW1NYedqsLh z2nDFI@`~Mp4!_vUG~O`RLn}sgP^?qvVHVFg0!_9fSDURI$!hs{GGSJW@FV7s* zV%fDZCji+`i%;sthHALY>QI5P#Uq7qaByg4wshP7`i$HHh{ri>Jzgw8;KwMKQ7;>D zwrO_&gXNQC3F0ZacwwU)!w8HLL1_QX1WUUG+O8zMvX2Q1BU+&i`+NzTThJ&*WT4XH zT^(BjXgY2$P6A;4i5VTg?0tX!y%C^CxQ)!sk-NZ(ndQN@8>qd4*fUGrv$+eP`}bZ5 zwgaO_cnb|XyJKht^H(x6H_;iN=RgIR>Fq%te@)VZAY6|32Xk^HUDroS*@Xt5`mh5D z8OvMoTvO$3j5o;xn?2PCA>pZYumaYM=aQO7gz5vM%&liHXYj6>R{`quemw)}4{ZLV z>}fkiMFyQ@Q5R2c&|?=miNuU94}lAs7iC0q$)rn1yW`FL&~~)8S9Wo!PUS7H(8G8R z-%|a&IeRbK>&@(sIzA0=xSp^H>CI7?Fg+nc*kk&-+Z+39*I49&x%9PqZXtV>=y)*+ zmyAi8ZH#3Ar3l45`?cAK)@4w~P-t_M4osWN^7eatX0sqbZ7so`2@EP^ucGc-W3T0U z5g!dM=cRzubPv_?UWtT9H$&iGLCf$p!2-cUTih)gOvJ;SCv9|NVBZxXM9d+FZeD}) zhhHQLu?p9F6j~2B)4YTlJ6U&r$UD~>_c48I;bkl6_FeA`s)JtI5!B$rTj&jo;MfoM zfIF|34)>U(fQpmk9hSFJMpL_yhH+bjYi~W_DTV<$xHjDSaS{_qg296U1zGKxFeX)l z#{=f|fh2u=W_0uH9#LK$W?AnSV?S*BF$>^v`cVl|4z}o-Oi+xX$O&HIjKcg)^rTUM zv@am&qIphE#!np4QIuLB{0cabu-tzR89~z?`IUsL!EjxY&gOkfbHZeWFZruDpWC${ z#w4}d&N5{p6todR@L7b|LRmNpt0{E^H<4Sav{Oma#NMU2B0ZfH`l07L6o5u1}S0 zW6y^92d3aZLFnj1NL5Qdl!)fz-s0?o00pdH(4-uQJA1Hz%%<0o;1&AuSRw7_Dbp9G z2?@!ER-pTzZxZcAQQFl5`UwaIrs-?Fv(V{GC!s5rp7E2gY5M`UF4vj9I*I$6dn5vE zrb{d#b6ycCLhcXa>_7L3EfNjk>uKP1YKc&$2_7@GGU;73W7JRL@R8j1E8x2g-vQ;Z zN`*Sq{y%@^yTzKl5Q+6-*+>jT{-6ql9UcbkCRjfDd9HsG=V1ezYFd+lhMH-<|4DENU#gbnIg=4RP@5HAI|vl?AUWxeO4Iw;{L zs<0u;kAB;_)}aIzubdQY`!BG)e-FcXd|zbm05 z@WV|yNR8ir#(E>G_Z!{ihj;xB;dt-{U0xV3+m&CE4kNIMz;74VMCdRPykP60fQvaT zaPVrGck-}H{lmD(n5(F0Gib|{V`zw}3JH6<;N~nV3Q4`|Yh`dV@ezh24Wph_YCgMl z_hFy1HsSSjcCIPa$s?jb$3*&9r_R&2G;wyuZ2F}aq*WrnDV*GCzC*`ENNPtvi2Ls~I#aPWKFt-9NYD1Esl!5%D{}PZgSAMI zuw(0ZVT&tYU6L_*L9iW9(poh2g!;ipQs0%-WLk`8(39Od7L1rIo8iM1)$j@%I>h3S zm-m7^YCWA#JC}s4XA#f+DEZOoO~IgXWbmk}+PuqL#JQ7#9T(wFw&tnHfEEwp@+hk2 z3-)Mi!$%<(p#$Oth;pbOg{yQWd`?5PjU~&IRq7hXNT0`9Ap!-RxtQFnO&89A>@{>2 zRcUwGhnx}Z*uYwRX{}2UHkU4$4`j%dF5Y;*O`S4zMnWCv@oDTw0fN9cKp*lRt7gS4 zM>S6eHPs#FLzQ3r0JO`e`R7OZo)zsnknoUj)kiu`$CA^uzQ7((t1)|A>vFe*F1+Oh zln%eo~V zcdeuwl>90ll$aTj5w^xyTIfNfl_NJ^hFGZ7ej=dMm5}+QP=>hH3oi-FU{y?hTk(2p zD)e9>lFHiV>+%?~LH zcLYWfy%T+lOGnYNf8`BZx8!W@d)y~=lwr*GtE9}RAN#I6>QSWrB2zP)s>F9G{Ys}p zcX>)ORSRtW*|$$L-vLK!fyc+-xTRa!b0qo|HmDBJ001&j^v=Q{C}j)V10)p<)v zS7ab_Tw$3n4XIDk1WD%610RPuLKZV!=aFJ$;H4Y92}D^n`x8#fR0*&&xgGw~ET!z0 z{#rx%Tfly#3x0C_HrZFzsug!g-tms0Q}=Mq_9u*c^`fL^`JVszQrm~B*xe7c$KxlN zW9`Vm+7W5Mr}>UF&0?g$PuM9mJRCk&4$Aahn{F=PJ)3H}m`?xfG4qbu7-86pPhnm8 z__-K+uZX0g%!F$jbRkcD=eP`XEVz#ZPxllCd*`0{Ei|qGT9>&$%hLS#E*SSKneQ*!->AO=)tZ6*$k$AH0P(vBM z0`pJeGw2t(V~n?H;qCXm=Xxf~f*Y0n)J19^yS4ry6W73erP=HS7!C-*)RB|Vma*V% zeRP?G(&D?IZ6!#QM+U6dOeUP<*`-!YAf&Gr?-{hH-H!9I&^SbrU|NOT~=i9}%Z5M`5enq0aSI=g;%a7vrt2_>(CQ1+sSAexF za-tjoARw=fM5@alJpFX(@erQ@D+cyvYNFV}Vtds{r-Wis{CmL7I@St3?J+H+pMnQ7 zi#R>ZAGdL-V*eu1ITEK5yAlKv4eyJGeldr+I%U=X9&67N$qhcCi3DMl( zQQsugHoWMrVO>W5LD3se{-sJ!&t*Oe$A%{Iaeer$Yy%eQnF8$wwWqN69iQ&?Rh)iC z2A)H>4T#*z9=wi)$injE%IHT>U++A3YLCCzdtC@=rA3GJZ=5X*9Zt&b_d2BPx?q-8 z0t&+fnJ;V2k)0pw?THv*Q|z>~_y zuGU+_ueC{mUKZ!e@kfaWQ`$kJd@6LwUTp2rlexqA^Fu1v{Cz+8+%Stkfhx_&5OQ4y zPFH4=&6fGuyTn|#-n9AuVe6~IqWZqCO+-Yc!$Fh|=@>$g9=cmfItK)$yK|71Mmj}u zXjHmeY6uY|q#KcV&*0}QFnfBRb2+m>403UOI{ zeirQz`_{!BK9jaJzm6Qay{2fm$@m+B=NPuGA+wyQNp(y1qW!y{b-5oVmN9c=!-zw= znct#FHv2`HNo}ak#i+6$%|bbjsol=lPPp8xLv#)-$9N^Zc{Ypir#cGES5@8{97%up zgvI@$E_=iyDn_;=Dkya32XYu|n~1*?PBrSp8_895tXKi(a_;sx6Wz?8Z;_G?X5GZM zF%}I*wZVfHyX|VggawT#CJ@qq*m(^*6O_?CNAbx+Pn3naORuy!!B>Wx%7b5&dXP=D zI5G2kOG}_l-|Z`OFRg0x!;k$2OYC{+6%$C3fPy2o!Cm6-KHH(pX@<)T)gAL}}VE z^ZIyclNPFrS($=howC9Lz6b$?vnHJGlmxczZ%2BeBS}OHF`i1LQKud5Z%lh+NiF?$!Q_m8D5MJ*oCc=FxZ#z zm>nV~n+U(z#A~*(g=;urqO{3N?c=`{sB&|2t1_Zm50KF!jjS-z=;e=cyCRGsx25pb zNH@I}L7{xXEU7_5{z*N&s{0Q`PSiklppHA_g=5;v-qfxK} zMz(fwpey5f3vvXT2mP=FcQ-kAlz{`V`EoCX;X|L{rbZDZE`Vp0)mz>Iv=c zw9tv5;#ubXkDB^#?uW{`(U4UT+hi-gM}91L?03ezyql#z(?Zr-MizD`J^r$Wozv#0 z%|-9?Hf&;}cA9$)eyIk+i}tD!w|ERyUWaU-|Bj)|Po-2H9hAr%(3r}4h4XW(-WoCf`qi!sZs`*14b z8^y$}KN~)1y(Wt-nT5W@O#ke!#i(LK?u>y-oN3`gMt@BWEm~qz`5Z3Y(AD;c{yZsTU9WQ{=>Umq^9ima+5M_MY>-u9tAOiCj_44@xbNyP z+pi%w)vKQ#?qm-Ma@niK;l^VwGd1p38Tj(o3Xho|ZXt1pX2y%L<>2Yuic6cv7UC}~ zYaL(MG#VWFOxaMaA;qr^N9XJ>h&)LlzdccX<};}neC{)Jv-u|8qWcmDXS@mI4ipVM z^VOyb-&45|E*>lH^0^9UYv}#11xv9l-oM;&KB`~F&v@F7ch{0@_NZ9>6YA5|8wLRj1AZZwbvT%B`Dbg9V)A`aSSa`>5VY$5$!uS0)N(?)o}o3WAX zb%B;P*>>h~6YTk@9P^s*K_%xUc3plikTD-nun*D;i#iy+qug<<1cA-Q5&dg> zTD~u)GP++l-5WH0!c0zGg}|1so7mIRGx_kQ8AA1fpWs^Oq@4T^Avh=!`>^p;Q z$)9GPW&=L;F6aXru$O>TfP1CT^5xO0o-;wLg~3>E!Rh4PVrUEQ(g+q?tQm<57~;iFei)h$bab zwnYXuMAk01>afozsM@YMeKHxR!@_r^sI=73Q}6m!CoS zJ^fJSdfzB++1fS(MpS}0EG0yAqzTT2Qp-lce(?XM-uX^`P4*auoUk)L668G2W-N0d z5gdb&uKHvC-Z~NW^%IO)LZ4n(L1v^D#IA6%tI1d>M zSh6Xa1S&nwj~nzh7)Lda=lqTF0$zpq@h+wery5~1cz;oQq#1o5jf0v{ECB%GY+(Cr zfc30})nn^Th8TnXez&5yyNY+c!&LmX%k%da7u%QVuJ6yJXRMLs>p^4B)_;K=%66lk zx!T6~VT+w<779;vU#?U57iHyqxPB;+IW{~yiIrm~*VG)t!G%BNxaS?U8C>%EYxQx5 zJZ!3TyfA*R^Z_}xN)+vLbBt#)OqrH#VE$c-$ZRFNE^MP$VfD+B+X2TnhYk@CAl%h|A+Axk?14%<7=a?ysBQ;m|Mmc zUo$7*e6%4itFDeG=z7((yGCTOw(i2g0d?+Hv){x6k)LP?PIJ!EJx%KA%yj?UmnZKH z`>qv_6@5sJ@MK0`(GeYc@5uSq_Qlqx3!qjr^za-_B4Qm1?13(?+3x>Fm8fY#F<8=r44@ zr&3z~i}Av9-ud%S$%-C7!a!LBoxKJ_OLq^}FHc$FwAFChlp6SfU^b{* z0B=Heg{hl{U(Z~NQ&OhJzHwC5dr+Kx~$1!CU>k7l-Dinwvj4WOGNYgFOlq= z|NWsqu;$CH4_1dYzr<@!r?Xw0W*#yxteIOqE3Myp$q`A$S21Bj75*Vf(8kK>8u`a#5hC}iS%Dgze8J`;61 zEp+C&=dmjG1YKTt{wWwct*xSOTOQ`p@mjDmA>kX*LRa&XctXN*c*Kf9C*J6?g(@#- z3l;?D=~akx#R9OQg5&@vrtpu-?4dgY2IujLn#}Li7V~8cT>cDR^!?uHe1=YY_ugpE zHMH>pN->gQ@E&oW`A3*aR7CH4W}3jKoxt$AKjn`=r=G!2)7)7NNeTi}{3-@rWVtzd zQvZt#5d}i;&!DLA`KS`ZVFhj?7^whBgi*CI)a%0P_0pd&18O zJwyTP2S>+4p5|Qy#I|-JZ0lrIQpm0IvGdrsZ@7~oYm22IHCEEmF%FA*1q**207$nS z#crKPn?%JVO+(Ro*^laGOI4nt-yM{m{`=i=3QmAWpWRf&?S)$4B$p``eDMvyV4u+Q zb!PB<#_oLF?)-fI@;n5Wiir7TfR&B4wXtRru;8}WWJST2ZQsWyB+|Dr!=8U((mxQ@ zL`hw+J*;`1ZIC5A)j$a-|MMic7D8YqS?t!l zMudQA)7LQygcJ2ZF$_>Tk2j`9hlju3vdVg-$9>ccKj0=)MwAj*7ZnT8xJ3isXbp}4 zhGvay^z{7C!|iH^o<52s58OE(W?#71j6|gRcm+IoE-1i{poRolUczj z(C#7^aM>A1n@M54N5u3YD=dsK?VM;_!c5A_c?MQ-2}29;3(l50Ge3-RCGcW64?xaH*ea_aL`bZ zx;2Z0>pV=**4CCz@;LfGL>}yeXMpuYZvwfx@vGU>1-+O|I25XM6w7csZyUdV69ugZ zS8$hfKj@e#SPyub(% zRGuYoM7Gt}*XOw3n}jX**@EH;AVF~jy$!=+Ro(zjJ;2(!^E241ynYqt)3bRj5kAvk z(yfi6EBf?_t`%QVK*TcV&Ao7Cp!F4PHa0%aaXw`yZMy(CPK9xyBTOva&-W&c-wi|U zK+%M7U11N?;PP~$8$)d@N&Kju2-F`)-IW7 zcZ21mBGvO~Qx(((c!rw-;0}$Zy+k8v1*6cCdExgj_PVH!M*EAh)jAO#Wv(W&O9mW5 z2SZ|Ut`mCB_Hs=X2+ymLMt`e7aK3Z;uzz~8-f`|8WR_}Y96=O=8%B)u8+ROt>rqKYE|G-ylyd?* z5WI5B5QzuP`)S(&=RR)0d)T~Zs|kn?2H%GenSIdj9U2>JV@Mu(^7REX;JiQBf5Uw+ zRy$zBeJT?d`CPU0`Th8{siB`P-K?H#vgtQcDfxe=ikr?kq2T{}<97yAFPb)WPAjaa zyUKsGm0!2~7p}iu({G=$%0rZ>#(KH$4|E~dp1yuJI#p?oZ;J#{toQDt z=le!cVd_xDEMPrQ$MEgWO+tSN+WQf`g{3~S4H1?F0RMYc3`6TGWNz;5Zx%26nN#kv zj*;NLNBafOi()^naY?G8`@JTbbempHmx9}n1x~dTfG`2?y4arO(;xmzNyjk7Pdn#=(AwHowA0+4YA6xi;gIO$u5Xs zzM_V(B&y~K#XiBbz9cJ(XV`~XI#Ar!qEoN~Em~LE>80rw(+m@d;Zj)tPlEL=*PzIw z2ky7F9D0Ma4!Y!vOLKDC?AWQshlZl^0%>am#c5%1d&H_R)^X; z)M=9|c||00Zm>Y-Mb^uj@^_SfZwKtR_Se|1*(?HL=1nvx^i&Od_xr;e{ z&{H|BHlq9i(Dx@x+54bAfW!NiO?V%J8FPPACR`lXAwnPw`4TEg)Kj>+`DS)40g)c+ z)L7o#HCLX;G+GsPmEy(yRmXVvJzZ1@vw&hO8pO$5c= zv$M7LZ@q#IM$R3I7R)ABab8=Oq*Tox9Rujn-rHfxmgRT&#j>1LvLNdI=TV>%?@5%| z8lO#g=-aRtY97(7#K`F4Y;SQgF3r~bEk@7(}knhVsrcF?DbL#y#;)?s$7Ekc-4imVz7pwmaam=ICoj(pL5*g3 zVYgu^lP6oxX!D1Xm93|GC$mr-lO0P$M1}p^93vK)w9(&^=gYAZ1Jj3y+P{}>}r!?8lZQDO#uw`qb7gsT4=q;JRwnRY>aD42<}Nifw23YOsC_X_eD`Jr6HP_rMJ>;ZszR`i4OOA@njnn=G%AiMC( z@M;ULK%@=L4L5s@cGw!|%4pwn=a=mLJ%!P1yQ&myW|dgK9)tO<-N@TU&V~W%`{+|! zI$g7nW3sy#s*Aq!S38fk*&MBSBK*mQL(h@Lk@Jw~*}pL~jHult`TGhFs5YbP8~Ifc z0*@FN;KmNe%Z8AzKd@H%lJITgTh))=DX3=ODLu_&_pvtH64VD?Av;fuPP@SQjdIx;`Wm#HcXj#Un{GB)Sc4%f zT&#;h1Az}<+vT0)5Kc2}O)Bk0cIVR4r|7{A0QLuOtaM%qfx{|+)K~&Lua}`Qy*ldm zbTi(MphHMk)K@5N4IARR_$%$$RsONW-zB97QhGsfCS$%)RL9a*e>b$kAL01mW$ch9 zr7#}`t#*FlFr1@o_ntLt3(F@>9&9v%n}&9kP2!{3fzCi@bC}6hk>#xD zj2pAkH{YS7h(3~>@b9f83W2CnLWsgZ?gx&i8HPE~aV@nAq2=^9Zngtv?R~L_cpjs8 zJ3gIU$I00xMMYRO3P&AJKPF0nuCH#3AIvEnDGZgP)6>)RYNH29z8d@HcL~h_Av~E# zA>&1w_!Uz0>Z=X0omAv&Fo6ZQ292feW8JM83f-0&Zj$TS`gM_K`-n-)a$vYek~^h6K^v6{SoW$?mQ1d8oczKpVVFVgJ-KDQR0Gb~rJ>^umIU^yC4O^Cq7_nk&1} z-QnctSi&cUMfH1~4K)+r>9&A0DgAI?O*ZX$Yf=T-US$z2fAx@hPYyk(*kA2KS}%9i zJ_!Ro$b2w<=rB&g2J(l^|6Xg(VK}q=-@Nv$GZN#6`}1Z-NYulwV{h36CbSpwd?*bd z1TF)9j|n6kq>dmI0p~OM6iCXEA8$WBiN^jLs!M%Z0K%tEvR}Zdn1sSzHZ&zvNfaGc zjpw-2lJ}1K6ZNV*hZb)jlADW*?R(wiBizEcFFn(qIVf$7kfXAr8uYpkxgH4!!P&1y ztU(J(_N2n!xSZgtl_wP}f^D-MX$E8xZK69FWZ8l>EY434sVlhND1Hk+%ofIeB}%UG zo#J7{^LO0XCDkWAUU10TZx94LLzZKr7X+C7T3qOR2a!8qJ%0I`*>om!1gIFXhyl62 z!`rA3lLLZg!ntZ|;^pYK$V{qnbRl9KZj#;jr(XCHrS8YopfBigQg*dLp;?1jpm>Ge zIB%E-_@niSx%yo}&`&QF`wmPbPP4wJkWj#Q2QDAjn(j;baWsAoKv#YEY#l_#-zFw1 zaj0=U#N`2rC%S9gihu(Il2}VDkq^muqeh^Q=%)O zKUeRt&ipY%Hyp&KjwsWsxh3+drVnT)$*DSeA?<;ArC1O)DYw;Fjt>ai2TIYFh@b%? z1N(a1UlKJSVwSJ|la?iNm|{JM+*HLf{SqBb5uh_O&%VIl*k(!3BG14vT~&-vEB7?| zds~4plZhM30*Bvx0bxjagPk!cst@Ez?Dtrh`uwHtoAbo_N$wM9Rhr!om6B8Zx+Ot4 z?5j+EO3`C%n1c?yOeaB`Mn2Pb<9sQYg0?WaGuA7dJF(cZXaEHrNt)IFsYfE{R%NCl z3D!~+;HEOc(9;t<>@u}l#>!VjS`(YL%n1k*!N|MU-X>WzV@o>&^u)9`LBe5(W0Jte zZyVG<%tSm0{KTo!5Zk=8lWa}!gS9!0itP{mStw*WRq{I^wo`>I&I*dtTKO@>0fu{x zeZOgP>+AWO;4{sh4e??p)PYYG^$gs|ogRxfCS!@0?rwT@0|r?DpEq8 zr*CCY7EY|Ef)=s0f`e!}XYO@Fa-#6Y5Ad9w9U}3MfpK{S{LUz#!aNpu{nkr|P#{t1PV=rSX=HDN-O5pDU=6*qT&+~Frd9$~s&FK@bSvQEka9chHC#}^&U&i|LMB?r zMk14{IFl;d>@?V}f58Z-KpA)&GUc?Y58gytLpeLMw`7or=BnIQxi>Y4ZuJ#Ah_Mzt zR(8Xybyw`6p!kHg+4xuLz_dUG-+!&;f6<72MLE5_QMFL4&r~8@6@{{u2Y@()#P@!)?>43 z#9HyzdD2(_`<}vY22ZKw{B*aX+J8P0y>nbKm%n0LoPFmU6u{4|?D`POJfma*nZn^q zCf@j?8m#IGj!l2QjUw5GRg(X5%PWY5h+=hh$s4$DP2!$wCuoo$5ees7I5 z%VN((5n_m;Fw<{rP#8lNUBbF06EY7xjhWh0dWiVVpv;57`W8@42aPk|`-F;Nj@UFj z``2~NJz(ii7euLc!Fm9%Fnj=w!+;?{U(prLinQm%SqgD8e#!EyUxh6q{bP1hD;~RN z2y=}wEm`OV;G-u?Jjkm;fW9lI=e=j;DKdWWDw(fDVq8%CpUE&6g?4VF{f)F4twglsXVL5U&Xg8gt*k4`x;bV)&TuD(1+4o?12x@#7ab z`MQ*p%l$TuCUcV!pTmd$?=>>9T09t7h)b-dhwQ#{;d$e|m5<}Z@;RiDlXw}4>reWT zb2UmTB6GIai%O64L}4^&LNw_X+ceP#uJ{@Ivl(u((wV!zP!L*K>oDW0O)65FQ)QrqK4)|ITVR zjj8Ef*AQB*#+^V^(8H8JXJiU1gK53Ya^=d~S6Cj-O)5+RyVY>L>9$RaR@+zol6Y{0 z6tsAk*$5}lRWof0ikIQSXkL(207t{%Xuh%Wc5@T=v-KDQl=D`X(qWX2?Zxh(r|o{8 zysOY+tfE|YR^YwmH#3Tcz3qYx&Xq?)Kj9sxj_a$^az>fh9=?hg?)RT?1jK#v*bOJ! zMm1=j|IMhdu}^5aoajMLCqY%=^#kPoq3j#Uz+6zf99O7|Bf8JoOiy+hAn1;;M83#7 z)JJUFUto-Rrjpx)?$V=CroLSi0@WW zjO3>v)m9d%X)cZ2#Gy`%&ZLroVy?$j?~fQ&TE)}PnPPN`soy^uk%}k0Si-$HIre^M zGnyxt3D1!niq+K{oqm+7g^B=WBtxuuPE8H92rv!FeUXpv8>u;O&q*j8P|gA~w5rUS zxt1n9I90ZC#V)$=ad|O2e?~~U)x53{xvu#!%*NOu7r>4yELc^l-t(^6g1LG1J*qJ& zGW>R=c6SJO$(6@K@jGKrD_5hhw%A)#-ev_L${hGQRk3(4+k0dX28>GKXtz=z{ck=O zp5~zOO>@d|O5t^S?hURfS-=SQAd94{q>`*vE5ua71{NizFn+Zl%A^JCYHb511C)i@g}La)-=13Vax zQ^Tpuu`zAS(HMZ`M)gNepW7o8%s7I* zg9Lm-BIpQF9d@7pB)~~y{mYt@=SDL1o~I53UjCVn>y9lP5NVt8zS!k~wScYen>4fI zuc7Ol)%LAH#OaM-9t`^;005HFXO6+9jC4<}-I$`mOq+aF*% z341ISjBSQ&8|~J?xE>ADrlPVqZ&Tc`*KTVL4 zED>hz9H_i_0}PD}ybslfp&a+UZcGWc2RKkYhjO$WR@J>INBm@!_((*YlmWwcS@pfc zCHgvx>0^CXX5lYM*UL2L6D_Y%-}67~iWF_mUV;dC_TN$JAVI<2&~Bp&UPDI$j7@g4 zBn%<(DRK?N`56S9$uyO}9z4l>CP>WiB?W357u++KG6~~jnt1{lZ9CMGz%cM!%gut5 zaax(^mf6kgd-`p~z^Mn5k=S|q6Q2#{YXd_>VR`atF=?4L#sv0}aprgXlaENp7?qQw zjuQG)EPG=I(l~>0$;jXVRB%dm%g-lgZ>sz!^u0nw4vGoViHZ_M$&t}Hc3G;Hv2}ee zJ6k9__FAi-J=w?bzGi}W-Kty;Wb)@W8UqmsLh8ANMbV@QhVFoM{%#YxW+h#;#q!*)BIc$_ri((Ubv;R&3rmEMHXge?-OCX9m3JSJJv1xOo4E}xIlXR`uQ(LFsHAkHuz=( z=}&5~Gj_0c+Scgp*bMoJiWlNGSf!#|_J*HutSY zf2Wt31-^xQswyS4c|9rx&}ew}e@(fL(NOr}AP@#Ken9#q=&3J%p*rPO!yj8x{@WG9 z8tkh}n0ykn6@G`4iL85(mB&DPM5-odCqyMP`(=oY@zOp?(GDv-*<$pVNXR-mh~Av-0ioHPyAZ;r@MQjDoa zJk0{u7A4M@8bo02w3p|fE^F&b1r+SR_YiZQxteG^nmy<+(*^AL)+QzCCLWIcY0` z8y7u}Pf|#Bsz~qn@9A9Fie`z46;G5Fy1XvxsfnUX*+`Izu_>3q{-xBg@p$sH&{24f z37ZI@`IYaxP7S2=?pJq@Ir6)Jq}Ke%+z3*jN7qw0sU+rv7phO-i>EaHuuq;(@# zjq4k|M1Sgox1@JD5;Ii@%uXYJ<~4}O28`QQkf++@#Jd3YbTC& zVpf-Te02VSFu!N!L^bkuPO^N&%TQ5}Nr$4t5vwLiWVGR|jV^^%c1 z5umD9I%397i%7}A%Y1XflqSz%?tZxO^;M!s7kZEr`HV62;;XzCt2G_jwL2W%R3KdF z`=dNICmo{?^jQjKq08op)rciGYZ<1;DBKa|chvGD0W{b7a-IkNxZ}z+`u&44SBo|Z z6HvMs4_hOnTv3VcmTHk^)C?loKYyUUJr=Sh(}Q218dB4yVdnKNQX-LT*1_Pc{NHg; zir_31xgA~Z9wW~C)NdmhY9;R8dU}|uj!8Zu^V5Jjr>ivgWyk=QNzwr9i}BIi>o~Qi zq3ctiT`yXtEt+D0r5}s~!cc5>pRZ`T-Y9r=b31F2g7q6&_dF zG24XcMf!JL#^G|g`{#MDwkR2cs@VglG5@|% z9%!h(dZP{k;8+j-2EXr32Bw`}8r%k}X#B?$tuEJaW)8NQ8w{l&hf)cU-qg2`?p?e?cdDI(bAFE&oj zH=O_aVyU_ld0_de#LYFg%7AX_t+%A2a@~)(&FQqH;zgaBuu^+abf5S70|(u=60&t3 zYkPpoQ8e3;$Zl-Jfo4oBk^!fy7Ks#eRD^!c(RB?I*SKu5OGy@MZeN`ya7cUMXuifr zz1Yj=4jUH3+)9pvLYHUHw;Qutf$bI$EV>v-doJ;Y@9ip1%`gx!!}|O)7}VgCr_m~s z!Txt1+8#*wTkRjsUtYZ52l`t2B*rKUaPkVYTBtktzuLjjSN_*$1;Q8mb$CE0RoZ2P z*CK4<4Wz+w9qD!M_L#+>nd~|bpkzX;5x@=SHz-AS!)@~5C2V$}iFfI2`}1Wp+uu#l zH7FDNM^n((@An(CI(RxrMlmL#(@%AEe~jls#L8Mc0WmFGi(7u z3mM-N^A*yJ!dP@`U!kus?|vDa0EqxWd(+$BDtnrlN>r8QK>i2#*dK1sXGE<6p#~rn zmUGI&vKVc(gyTEep?3V3peQv8nIMIC)HlHcurR2-cMSON95qmyoNP2WH zIRD)@nm|>hNov3wbl<(@>yNp5;QM`|V+YC!0DR@y_ktlFwM&GCFhZ)%76GPHoAgfX zZI1+aq8GP1bB=h7@gz`CGd3_OP?H$7wp0zBgjrjsKGZ{k20uR5%o(k5-EX9z!=V_4 zZ3?>V_`s4_KcC^X!~DCLFvNkd=&bX+#2^@vwPF2d;PVtQL|OwWBz`+K-kP|-d)&+V z+2dp(co(krs$?FTb(@b`KOa$&gQ)fR{qw!$tr-~b5#i?GJNR(PD}IkwXbHzZNw;aa zQMm&*RtmCDf~PUBQmWie)4L<2YIya81zG|0kUxLV24fHwG~s{BsqBXkl39{cf=nS3VWy-wJ-_C?{G0y2J4 zewDNF5^a18=5j>UT?VP<#n-?@r!A!Iecw_zYjc;Pk#J+W7mF;WgL$f4zc>S5$I?pI zizU5UAt$BGev4b90_}iIL`@`p4SV-ahUXC}T@*x0ynuX!qQMA4!!CX28YSKAO(N zpJzVqJ+s|xeLTC^nSISqc$#*i+)50MEF%h*q_JeiLE&~(hG1ee*R&OMSqgB#EPx%d zVLHoJn4ddB966iZ4k<};cR?SuQ?70H-;D>QMQ~d>GA?aFo=#a;9wNont{FhX^n`Zl zRTU@-;)li$Qj`I2w(kJ&C1%nR77KjDV>205gLu!XN84j$;zv1VH~}~Il6z41Mk4`- z3r`+bz@W|{(2G*%G*r}8sGpOA5E!{-JxL_Px?{ODT5z|M zMZUwYz5Zi?Xc}gF^Cmqo3!YhxAM1k>8@0e88F*mBLMl%}@i5YO9XT$Yxl@Qbv-IM| z(QKvzb5sz8It>BBo>H#;usC;+S-g}fHJD-$2?aO5(Ny2^?~jlc{we zqiZtD8To{(Xms9F(=q8F;}kVswed+&YUmA-mic4Fzp*f}p|aw5vu1^47ovR>M?M?M z>$E{~ej+}7ijCQd6Y`T>bZGKtP&LQF2aR4Td@GJ_t{Z*Ex@nFA5g#sI0W&!;wN(&} z^=25g=F>A)VsNVJF+KzU!l7430cWkP@$z<8d3F-6OJVp(%O{%i5~k95=#Tce5BVly zDz~e9NW_Sp9+nsMi5S)9&>K#|)`Yys&%trA4je?-x@x_`~JeU}Lul?9ex^P;c z6;JQ(wL))DJTb=WA0aaF1l+IKp|}cvhxeqQh!WX{_llg)s3fTOLScqTyd|Fs z@sOyJ90Z%bCBD9rS#*iuEXFe>wZf>HttC;s%lJ2t+s6@OV!sQevgAyA;fsJ18|T}F zm2C3bEEFq7fR?VUf6NZzYOMIVEeoEFj=~aqCZSp1R>iX3mPj8uJ!CF!O^ugep5nTE>qeK^bcZ-M))sT@g2mNIjT}an`?5WZv1oG|F6@uq z2W=xfr2H-1FMt>c?BVm$p#-@zu$tc{?-5Nc1|nbmRN!s9l?59&cxGKuja24gb6Aps zPd*N$!gt{*y+^Z#NOEn}D|(%SrNjbC2jz$Z;=fy1VJnpJ@ZoU6t(^)p{#1TP#^5%W zs+(5d1K`-%l3JqAg(0Rk+0CYsYqB)8SZOw20fl0mGcWa(R(l&XfhUnxIQPyuv0#AV zfs!4&G2{>~{FFGnI+j0i20}=|XW&+DiHxq2iKE=kkA2hA68*^rf?>>c{esAa22-mC*xItFI5!luN=9A3`?LgJdnba5}<7ZZWuO568lgU<4MTpLo zuSc0O9R!F`qoT>WU4GRPDVtftsfgo~;_;WbpDT7^S!^Y^hHNO0lg7iTL>QHy+v}U} zclf+i8^G>mzJb`Yh$c$z1UL)&$f5p`h5s?Hm0C=R!KLVzRMVO`Gk8&Yw3tz*30X_~ zfDP4+NGJIdb#9c_V4{PcfQvk;3o{f>eItV0$)_6yL!^=>mh;1!VnWON`r@?Fqj5r6(xe@QKC3B)5EI@XV4I8QELL3~{l){?MA<372H z4OG~q(ntv?xC)UCL?kD2uOLt?8NyYJo4sA-&yf`jNm!0fLI3RaQZBJM+jseW1HV*F zZ3IG1aV+oL5->$|P3t^B2Pw~mHOw3!u6jlCnn%vp;8#x>jO!wR^s1N0gk&3gddOSq zmZv&?T^)So;AFEAXq-30<@%;SnVMV{T9Z8qy8CY_mVd2!wP@M*Qe{W^PcEB6Ow!4m z`?kbGOScHB&^*(Y2ItMZVW13L;M*lQMAhXHVLH+F)!xhqq8uwh^ie~@9mHsc5D+CZ zIZ8eMP{yBTldHF%C7yLaFBa=MiX3~IsqN+AXT~2YV?6*-_9`)arzUCYhPT{7{z4@o zYVbAJRXl0~(e5tp&+bhtN%#Gow)3UJ zdFXPdO&z=~o1r%*dC~qx_7pg!!RZb_5@SP@RD{$kO@%ZnGtn>!1tWRh3@q+c?S-(j zg!kXh8dq=Wr{gbIq!}pl+zOH8_Z<#{Wpx@piuLL@G2e&z4=!B>)3-;a_1#>58^-i^ z!NId-xH?#9ru7~Tl8iGE$@}ZCazt{Ff*yDu@$gU4)@{nXl5;Ux-#C!CK%sTFt}*-G z;tsMp8Q%A93gr6%Fo@QVY&(X`E~fvzOc^Sp=o6Ke=)iBfJPvrMz?J90o;Bh5z}*AA zz_S@F<8SM5wOiic;wM3U@F?RLSYV1 zewLZeT=Uh+w~F9`9b`RNCyT`_Q$U3nf3$=YR*}4+L?uRmRTmoXKxl)ICnb9nM=^32 zaE(VZ;l_|x=!jt$x!Wx|G!9DgPCzeIE^2FV4P%m=V-rw5=aVuijy4tvlLeO={g=o97Y zz1V?ZTq{Hrkebvdan)L$4;Z$yr|rc687sAvHrU6vwl9!~zOyEbFJ;(s1@uufyawX_dD##gq}erUVSGChz`B^Mb_x;KISha zpFG9TsstI8APkQfXNSTEV}8FqM%XSc-kt~T{B~-X^?py?ZB!mo$WGM#+vDO0X`8(K z;+8_bDrRQs`U|f~lFsKWM`c1}|0Q#l(7@C?!qqQ!aV`?sX-pPTY2O$e{`imo4YV5R zHy9cR2+QIndtjMNPHysfzH_jG$-Vxh`HAG#AO;#AAdp9UbQTmb29$Y6@eT>UYPyw_m+l_xDOciDS(Sk#(to^kauC{;r|M|yrRmi7_ z^+6%au;l-La&W=e9i}I7)!1B?`8+z+Zmt~V!KVa z9+_&7U*NlTxi?j#Hh-+k<&Pc_0@XV%`N5Sp_pE@gDbSBA8|&DUteb^5WT+|&sKd$L zb?&B4pV(GTwb?c3vglkn)ha;@{4cMDR-Uw+l@f%}|)z*J}6>ZgcVx_hzMGib~_ft804%H`tBbxuoLyYB7UNJj_k zV;I;dbw7J~_9t@#+B-0Tu6Npe#QD-Fv8w_^@cRK1pA0(t07X8D&vlR5`x1PuIO<;0 z{-*4+2QsyABIZXtwlkn_V@}!=%Ek3s7PP#8-+$7r!$X#*wsr=AY`M&Luo-k<1O3%b zyd6)V-1%m@$}*N#p5JBX>Ab7PHBaH@dR00_`$RxgUjMA^>3QCPfB+qEAz(7_B9HcW zN`Gt6{lzMf`u|L>G5KfugHR5fzGp?RXFKwimygTPXzYleA`m}RUFr2o{{%fq^!7xJ+9WZbY1Bop5C47ioGfEi$diCvl1 zHX6cDU4(YtxJAc-jndlNvG>|*0PQnktwc=u(AU@+>^XQL%*@PHoB9n-AKy}RvY0@8 z*cK8YBz7hzKdJ+QUy_WBs&*^P9z^=lq<>K!^xbqIcXetIr6`gk<{KD4 z$GGB##a7xe@M5beO1mG8kL)b8hK7qz8cvY}m9_FMe?*1o$K6*mF!N0@c4!O1)mC1j z3C1SH+j@5Z2Q)1(y6^5M_S6WySc$gzHX5QBq0aRKeDcYqY8Nz;loKYFM(0OHWuA)a z&Uc!H{m)gbw;;<43)fEjA?@B1JX~?c5M>*?=N#&qYG;p$AM?MJ+EUr#bRA865%Bnd zay4e1J&+9o1$9&PuWnddRQ9}|;M7@4(eK!^XRH{cO3S0(ya586M6%T4m78CDZ|dGr z6S^8daRP_oQoV+fYXoo9mZ(HRZh4tWM8lWxE$P*gqFH_YX}+=jli*y94hgE`6WIh@ z&fB2b?Wdrd%Ci*Sy$`q{-FiS(xG&bJzq-O-S`4OLw3M~g($Eb#0uuB}idGUY@cIf2 zEAD^m`dTaXl!9NtEG~i|fE|!wT~9lh9_b{bf@IOf?TX6us;FRZ(_}p{L8Ny0tjFp4#iW!h zyY`IEE>$6_5`ovvB!*kpX0kH1>fc(T=Z3ld{aS-YmrV6p zi3!URQ~k{X{&#UH9(1V0E~N(_?5?3m_J;p=q2ZxAlBbhn%@tP8`$r_p5k7~DZh+P+ zeOh_?1+!HBSM*LtjZUq&bms@Ks|_P>rwh2Y;NW;9FGyzd>?M@Y=TP?&b}S4h569N` zwc<|}u#A)&0-WfVd#g0R(}vVkXF*LDwgkB;gyso*I_9V-GCGnAK3zpY&p|1tF2iAy z-6A>_^}nm7*9hgZvXyoo{9qYeQ)dN&iQ&V;VGV8VP|sLcoIQTdUCzSg0ej+B@ufoR zFanBp{BO=b!7`ejD%NBr%ee7SnqR{>=?9KdVg9bxPs#PA$qtT(@7Tv6!+tql);D#5 z56!o`__P*6k$BNz9ww8O=86epd9Y%@MU#o{Kf;ax3m3=nO>Dk zLE#+NT;-m+5U2-I;TTJKiv8FoXzH4;ZIpe_Vkxa8pe&0ug^vf99<;#Du5b7P1k$rP zxr>xzYlXP1F0(}qMLKe+M}NOFvWI4B`PNIf4AS z1J^RKiI|(4`{1+JcB}PGc=d3&t7f;)fID=P^)6mQ4Y5i=$;erKpX7OO7$`@mDn{_BU?8`pe*rL_eVTf&~qYy7%ripsUqTQ&4b zA1c=%1l9L9pWYoG=x@2tqJ5t&LE3P_BSXk%@!AO?)_+9^v~`HQq}~OV+y?XvkSJoqw#E zANyyZVaqxndFqv%_hvxwc*a`o+bG}4q#iI2{0SUI-tvFUw-)NGtE*dWSpxo5z)St> zSCPZ+mHpu=Wr&Owq;qO&3K%5;tG+*f+^vOPyckCS&%8`?I9whvoZoeNTFEMruL4(|+N$9rKRIeVuRAn8SoF*^RS7mw>ojL4 zC$*2o#j3)jumU~JKc}Y~r9nX$+`tn@>zg!fY@n#2o7<^g!(J!OQxM^TFQ2MiDk(_D zy}{{(-C4=p-h1}lXP>h_F$(PjJewXK9sqAGwTlBfdd&cU9Vtg= z@8VK`pr{rP!Vl>R3&T)rY-osKHt*JdgHUAdQzfXcr`L|CssP3Z*qMd9U0vb~v||#Z zI!RWA^t<>Wtg$q^tdkI+Pij7TFO!?X&CLzUKwW)`|DOw&Ch2EM0s+2Vjm4VzmOE)` zfW)xQZ&T<#?4^*!M2>iDEZlAD?FHSLL?j5~bl`5}w0L1sT6t&jalw;z13AIN5ys{0q#QfnbOxDOubm zIhot#<>mWxPR%+GCnpKmHoRvD{J#z*G1jfP7~IwR1Tn^V)G*6-(o_{$gcN=zX9hz# z4EMF;50IICooY=rBx@-R=AlaG&W!dGbLs$9k_W_Nmi^)}_<$wOUO=dTo8a(ijDEr`c zx)9D1OASs*N>cv1rD)-lNkP^C9UTgdV=ZK5c|%G@riNT#pl8{q*2N8jDB+MFND*|29VFfq_fO*~hw{ApR`nefPNiaUq%YWar2#_bKnZaiGz!0nZ(!;hg@0f%K zVD|?*ge1wEUcp#`<3N3fhh;@$i;HC{`3Fw9kQ)5P+}y5@#to&VpC{iUs}610Ij!yP znioPxZ)aE{&d$#I*h`aSa__ldG8`9fk9iSrX3t9;IAEM|AV&ErnL02YUf#mf(NU%C z(RooWeAHLPz+#Hl%_$4YkO7_jB`YgC)t<)y`<<7c|92=>J+#35L!#>bcDe$%xQgqK zl2V?8eE#^TB!5zEf)ve!Rg9GRO-*BwpOOmeLDUjP#B=TkQYK6acKtDzLQy2M)SG2v zXGhxO{TEI?T{<@#h`(~SHkXcv9r^6Zp69gmQ5?fN4etI3?AWaEdZJ zcS4JZzH28gp(eoUb+5tY*V)bd{fKduD{RsK!foMxWACZ_?;2JCq#T%B#L(g3)_68k z)>4IKcAOf!RDO7RmZ=xV=X7`JpPbl#9`9_e)n@tFGHX@q1G|0)e8>bGT!ej#Hp_|p zcS3*Vz9tKUaZaz>g$K{_(+j_fA6_EhAMgHYqnmFx41Fc^ta8R|t69G#|7)FCt3CY7 zWUj^*Y~cW)9KrUMbC=ZzqvQFD<%_Lud{uYnu)Pwlz5VEK2e4TA{Ah>ru$BjfN&+k5 zs+!rsRFJwF^=Xu9+THw?{M~ubWGcuBHe+T0*wN-gxED$+J-?l8%G~;sSV<{Z{RP}M zk}S%;GTZ}Rij8Tz;^-s&W*XD68oKojJ&g^w$xCJi>y+~*A_u9Te={C`yB&+Cd&$Ez zkS0dL58nUBHF3NfjkW3648D0=ob|SoUPuY2q~&hm2m|1s%no~Q@h|N}fud@gHkiXb z_6;Z=j|0#yeK3D4qkX&XP?r{Xx*7JmYFv5%ezm-8I*l!N;FV;`Pn+6KFbFF&&Fxy) zEw%P2uF}wPR9omFSv>Y%KaLFlvEKjf@f131F>=*%)eH|XL0_@Dh(vi#7VA3B+y0%5 zpL6JXZ#aHg=J#WbWU0q?J2OBCj7H|Vj+LEH^X}@|aPz3$*zXDQ=i!(V`D-1*XTgDE_B+>l#M&lYk8C zL5YUL_DxiLQ|HXu-O`gM^GWv^K&>aEbp!OuG#Eo-EHFFQ`sP^DQWj*2h*1R35{FBC z&7$YQs;q@L{!vxdY)bO+C3y`EAeF5YdwDNpdnYK*cE(R%IH&Rwat33yS$Wvf zJlT!a#L~SyN$m;eZH#&9On~6Np4KGjJ{8YcAq(gLO|kpl!e4f{-_0$hz5a0{`%PDre{YIrOJeaX z?JC1PE%Xnj=kZTG#E#{0r&9qZ%;+^g9Qguwg(1msDh0+?rKoJ6W9Z`C)k@{#HWvK& ztlMGzxM^+EK-#j3SN>*sN5P9vf$Y5yt>ItB)>M_+<^%9Ur5`yTP0ZkI!)2LsvqdsX zT3VZDDr%5G+}%`d%SF#idDfIIhBS}MS;^(8^kKn6Tg))7LPkb4d8MX?$QYw`?7Z{( zVUM9%+~DcvL&rYk5xOFTZl+>kkNY(W3R<_#M3d9hIVjBp; ze;#afC;YCam^GUfJ=z$0hH%~|sKCp`j~f*gmX(pUX>(R0k=nEY;!;NgH;?NU0THxI zcARpBmRCHfw%tUvbzRKr74H5XE?p_ZxykQi^NSoY{KjnRvduhUW(N6?*`h{A)r91e zao?057scN;bX$YzvHymuoJ~BxI<4dWkkT5vSp-yq;}Rel-k7)*1PV~7I73mf&V2g_ zpZdo@J;bL^7>vq4Mf4SSH>3=e z<~e0G@PP&PN~%0JO*|2$vaXtsJQ`m=WgrPkNloqpBgdK^X}1ElvQNg{6_iZ{;frbu z8Ri2~KV&vu zzE^N)rh)p3*j~u}LRm(66esyZw#w3*%$rsWLX9Zv-I??$o{JY0un>o`M`w4L30*9T zlI#R2{Gd`etKZc;9!(a|{4MZ^1C0#IePSC&0H3S={O}NeGnffs7LDl@H#3N3#nht% zPoOV@h+J_u0e|faoOQKObMvZWR`=O{iN|L_YvDwmEe+Q9x)0@{DdQObBYvC#cearc>l|BE5l$6Wq8MyaykaDf53n51iUSSx4h+cE zKy7aV*lc;8CQhNYp4ews3Re$v%?kH@x4BV=(2DVHaJIz3wMNS+9`mEbi@yur#LqLt z30Ohsv@=|r^Tc`AC%%L?{doqMuClcT!YPqC|%l1%5b@kS*s(^ff^Va2A|#(`VUxkVd|kxp~4k3s(Imfn;)q{-|4qn?`9sDwHsG4J3Z zKBAu+sP}eN(oe^=WG$3n3^u{_lI+nS|Ls9;OTCwjKT2QCnrH5cTTTV8*WI7?Yr$rw z;6mcYsO9bpCT!;47l=PP+d{){94@-XVz5hXv3Fvo=%eHkEn8l`wpqCBz*;4fec0U7_qQus5gwRjpA$~|6u z!Yh{YWzNR!1;ymc+Dj?A=qIm-X)i}sW^l>(`uNt{;?}4wn2@ZhA`D2tP3wI+b?qyu z=7(MT6ABL#3LBC^(uK$2t^)>R=L+jIkzf433>=2c8>EFJZL_hXMI&menGJ4~nr1J- z?CoQZTki6B!{+x7tepAt4F~Ss!W-V7{3`ig%Bh$1omJY~AcXka{VD;P&4iL?r!^)kvD!{)r~5OD%1Q1T%ZUKfS-xAN*RzXG2*%}8qOUwqv zi>xMh^cbr|1x<01d<*ZJ4F~s0RgG!#&m?>NeSsj&v58DRCk(J3?oAG#(aylPZ4?>UbbqB%Xu6pfing5Q$)49UWnPXO2T0pAs{$;J+C-a1XBXt!NYac1A;>tYv zO^ilE21s6{=zE2$x3hO!UXLd1Q10Rn&iH8K_-_+NzKlwoiv<6GM%l3##OSjaP?fuI zqL;mVF&Q9|f=s7Pn6Qo09|-D`wwfn+e}#R#Yv$+SM}kOJWbzVWGQ}}pBt0;Qt-bJ2 z{&U)TCBeLy{5x#d5uQzHbEJY#%wP4_xl4B>uuw34JUlDBdpG9LW<-S=>yd-;&`>^q^;}F zmmmJL{LB8a<#g5>cmv6E?{}5d(^5v*Hc7e~) z+4^ax0ef}g9Bb2Q_t?2Q^mW!xyvoXCX4X12I%=3ynKBAV#e|NZSc$wP5>01IiJ%c0 zLB8C&b{gW83O1(ekUkSr4Vf?rItB$_1VydsniL`^Q9=jGo)JNoMJW>z{k?9C#mvBn zF}%Xm01KQyhQqZT6%h)?2U`Z~9bljJ`Mh5AhmO{%qNrga<1ZKd7|emVo(OCu2_O76 z+>3Q8PkTpJieIW51E@kanwqYuqO(<{Smh_}?!p+0r}4n=8$bD5??TLA3B=g)sI>24 zUuxX@zT>;fO|E=&mk`<%_5<0)3ueoNMv|q)qMek(eq+2s8+reOF3tSn4X5z$eWF|z z;l{Pk?}^l~V(rCBOtU?)oE_i!D(XpEAwU{izQBP$1PzLOzn?6q=J|QwScnCd`)%&3 zQ}B6Zja-x@V5gGF!PL%eVF#Z4*gNH=mcff8l4%8d(+j!7pp-xw?`reuH-DTw9g2)T zPW_~~UoN#IS#TG0B3h&V)013dkE!1^ShX=i}q$5oQfqmIj1ewn})+IZm?8=ctp_Ak9*5#87TGOY<) z?ZtVrM>J_^MM3M(cuFCyS+>4eGaGZTz;E6nxDRhVr>N$^^7lVTTWW&DB>m=)xY2n7 zgisYQ#@=sy;y`Uc@{gghxviaxu8 zkZWV5r-(x7Nn%VIx{hzydnNaEEUzeEOgY>k`Z@NFsRqOnuU48%hdid#Rrrv-7-iF? zo@T1SP*f>z4GW>K9BwYeO7D52iP1-?S#h&o+l92|LTGQM^-5*c^Ggh!SA*!Ss@+3x z^G!3p)i+<=K-qD`sbh5y0{F?E(uAUu5+&$pjn?Ad=3{{;Xn22#+Acj_)(($YV1Or3 z+hV9r!qCmKBLrSGK0SpbHw7e1kf39!ysoXVqQ1qa(SVLtgNfe53Yyny_LW2sOnMOn zABSp^X^w|u+9c34STDv!AsB<3OfViD_yN{@4?0KS{+~^QC)~N6kog8Cbhyk%yLSz? z3-~9VH1Nb=Z-x3CO{sC!9D+s!O#+bHzWj-oy=aPvV)~(e;O?EO66;r#9f~9yTl@BR zPhf^Ab?{%yk&WCALm_+AZq=r1#^6XqT8S`6={$jAvk;K_?;?Q^cQ>A10saxb*uVOp z92N>$xz+aYt3%n;udVs$sR~oXzM9y(?}38B*I}`?MvjcG4ds#ot61{62giwQy@Lp0 z*rzhk0-*AcCy5tbl^h>yWnl$~8sDV) zpQ6;iUGcr(?ES&{$5C2s7GFTqAJlRHwmzc_$yP1AM%`6(PHD`UT`TnciHO~jbAOk> zd&)`Ea~k%VMI!$_brg?BxqSiuY29BzD?(xVj6tVgSF|d!0nhV!eK1HCyCD994AeC{ zn??Al7&2BR%)uaIPO6!LoBqoW^GwQ(L_=(jue;jvQcJ(S{mI_+c4kSzrF3!R*f1jlhP^2GF>QMi4^23so-_x>qQ;?veI^}yHGRVx_+`S~_fJe`10qe4V7Ocq1oODo3Z%x~Fr5Dp`!IwIhQEwj5`c&-YQ!#A_rS z6+Td1FjFgfB}hGt1Wp<1WxyUwhJnCMx4Xst^fy>q4Tgz*Iz{<@lf=e$14vQX}h)YQ` ziDv!xww_D6NLWga5mhNn9m;-c_*&{UfEOcmqZ5Tm-M+CBgX83o<9KoINm^OSyAh)@ zqs^8pR|6EoFg<8RsK=Nj4Mu>Apc@w_S>LTG@o8z4qMwCc`-WL|&rgx6sI@>)_fm7H zQ^;m3VjvwQ*Pjub>E*Af@9@e+xoTF%O;yasJAqgkKRJ30OL&1zfqxE6V!mCtquz>^ zXJ!VL8`p}X5(_P(LmV;%34>yD3^3&5o1jc?>pNi73ALRnnp#_5oJ?@Rh)!+kc9#joKp5;Gae{+8~MQV-DwlKaiBa} z)S4&ob_=dK-h8ToMyle@W*f(dohla9j}>xKEky)_R@00P z!>RP*6`B5^n(>M|sQq>!#-XjvE}lpfg%wbhI#>|JB~bZ;)Yq^RHwXHn4CtPR{xBCJ zA-sVz)E^<>iXk>7dNvZ891LP9BAzlz9C{w{`82LJPV;53P-;IZ9_SB3jOKwCToe7ktRw%<567tFfRP2LX5y2^qR@6;|Wg1!<(_}EgWsWKSf`@ExYRWy$y^K=JG4?i;Y&c3!oNxdaBWk84Qc?-TWp6cDy!qbvu3xv+kCE0}w$i;CyGYD^ld)@Yrji_eO=ZAVPA`WC zrV10JQxbJZ-{{;VE0)sijcOBF39j-*p`E~2LsJ7n!h%N=YWvSBGQJSld~$jRp13yG zj?00(+mdT6gR_jo6BUF2nsXUZksp8HKVAd-XT@~dW^CkAGC+VHL zno6SkGRhg=-4)tE0{OFJxTG*b7CNWo!oU6EcIp*iYr57LQ zbrAIw2XZTP3G;g}a#ysncb}8;rPd*VeX$thSF;Y8Dl*NE5GSdZgtGrMSNa9&`0{e5 zt4=4o$}}(Hb6IpTUXhGU!rK;0LhXWzK079(R?{^$#9W>`o>;%vf+gIuY?V5Yic3aR z+R#GTV$GEHVjejSk+6#O``z4Ji0-xaY8kz`cjAUaqIprVh2&T_Ow&*rCgs-u1K3ri zczCj`eTfkVHu#ck{aq-kBAkiXG$M*j>Fp=6P`xz-u&VW$5+>S!F|WJUbeujdSU=A= z|9X00DaPo*l~+%-;O33Ol;C<#bu}ZZSsH4%aeCspt18;O1OqjU;me_FLre7+{{*dN zO@|M|2t#KO5WlTK<(b)5cUh0D=gngJhI=87ta6-fDqn>SBD@%mCg}Sb;Lha{&M5cQBjhcJ(CHt zU!dF2DIpVRf_+tBL~kPNuUlSZEwd)QS8HdQ5UOX-2;{CUGo>nixJwa56)&c-9OYr~OJ6 z#y(;9z6`lh9+RQ{gTUG@7U*Gv`+_|Q`Q)`Nys(i0f+2g~=qKo}sJ zF&^dZk90kJJ+tQ#eRh5K87HCl$G)=QF2v&uxFN&i+fb#y27r6@S3N$kws0yaE*O;N zu^y{v@HoVKTa?nUi8P(+woDr`)}17&P3L3ejyzdC9?T`*Ki)kfALl@4-e7B*oz~6< z7gx&wXjcWaD90&memJ8^O+FZvprDa_v?RTDw`$CRbW6=n8RP!+ZyN80DCa|T{{3~y z8_Ilhi7Qh#xAh{F(8!b;$;)I;_B_mP^8TdB^Dd-p27?1LPwXx^M3&Sn8$Hdi^FIkx06rMQDvD8 zmAQo9wCk66A#)~ePOzgW=qUZ)O!^UZtwOc}vo&^-tXxQ%vMfn;7_Xt@k`)_Sj2VjI zA#mYiZI};L2aciMd1l$ptcq%AnkF7zGf>Cp349;GEj@o#ewC~n-Yha*(=ChxIWe;) z%p};Fj7#W0xTU{(#if73VBd28?IlFjWNg`<>h_kHudceuwo9{u#n z&0jA&A+TY|7TT8|B_^E!7zq~v7mkrcQ;0=@q<3}iYv6iQ-e|4FvItR3e4sUe&`(@f^SB;O9pBr7NAoA;m@srM- zk8$Avtq=Q&P^=#6{u9lwvnw`9ReAmX?@l;@cKpIWu-!= zlMEMUgZ~{^nK^7MGQF#%OZ&gi`DXXqnNwqfQ&Y=*E~|<3NVB|-&T}4e8T-rPxpWqu zd}h*dK?*ZH>d7)5fL{VOrl$=JEB_c1Ek#1Q;i^9weC2ER+e0WjNTZLD-9HZ8{zd6) zrD$yVKsElt7lWE)*l5?GO}{I;--1XCU34LkE4Cl?Lj-5wNLXAoQ&{ev7L36_0DO-< zW3EC!8xm_YDlIFHpuPD!l(Uy_MBpjDnSUg3KLnoV(dvnNhjEMq_icwCIi>J1rbRph z)UPy2dV!3{bhFHOB=XOG_{vP`L>!&`;FiH}|7MOWC){tX?uREQCu?9LSy?iS0Cp@Q zFgG0^f2#bwbO??rz(9WC%_rH6@Ryd7tJ^%i!%mrGvintHdVL6@hTD=sbT17c?!u}w zS3{sc_!`lE*fMBoM}Z8Ki~GX%@?taQiM)#~H|FV$BuV(+8*8Q8KI4#bYzn0o-9kv! z)ta|$-c9T(U${i}*J0*--u}D|Pp5)>px=Fd_fWtFKz{@f*T=oTOjgvCmzCiQP9mFU z14w5U`9$3xB?46&Yl+1W>o?SpDhRbnJ9~%eeaTuXVb*}65dcA{YizVg+T7nSnLF@& zdY>`9*HvVdM5lfYHGa1CHeUmPHR1HY$IzBTY_a*Ut4f#Qz2*VM>7CyAvNC z90&>s80x;w#f{%vUVhs;_?>MA1`9SP;#Mp#Ed^U!bAOH&c?P@)gG#qI+-iw!gM~xRu3ip~)10v@kI--Q7KHIoC8a5K~O- zIM{K)YMSfor5QJZ+i?Ln!bJD9GRfRG@L84XBNr(|$Oe$5Uud!l!f>xDXM6!G8WF+=uD=Za1}hIgamZ8>5)*m9BL71s zfA1y|tVh)J!g}3ZnNghtdw4$*&r6|HO+n#m;9O+6u%U>H5H*jpHHbJIuzmm-#p?&*s|^Xru}>t z0Fd9H(<%$Ja&Z;>=5ANKLV7xL&Je=trmYQ0{0Gp&ewdDiZtd-#A^ot^P8OxH2dmir77(q z>i+8lDCf2bX1gy<`OSz_l9pc5q2L9I<_vK9`1w&~Z4p+ujdY%8PS))MNOu(#m0<7y zpX+R;kv0Ha_74c~rR+dCaKwKyMaBHu>mDT9WV?o#7ZnLiL!mWTd2)#z*>Gp3#yAVT ze&5+lEnKO;bs$iYf>FM0+a3Y<>`S>$+9w*Ockd7}NPBt!{j&N{F{<^*^{DVn>m$7X zZ6N^Yo}Pl}h-v{IHoZha{Qz>@;6pyA^`ZbGZ`3|af;F0VXh6j5J-LSgz*C~z-?k@D zY-vb}2y=gr7BOydNM~7XO*?*g}KOE$MWMYJ6 zx!nK}T#5+rJ*DF1Wpgs`;>7I~!YUP{Z9MAq z>VyBRTor`;%8HfBH_G;BwVz2JXcMIpvZNNX-h!sMr>=uVU`A?bbu0OJQdH>JBS! zlOK&EF?3mvvK8LqmgWtrqR*wHq9IfxnO?b^rdkv801@j|2}e8IlawR-b;9 zkmsuQ7z6|{%U9ZD=dRUyn|-R)%*{RL8d(8vOmP$K=SPd`=~?yk+~?bzX>2N1AkkK1 z4f~!U24k|IB>QYg+5^5|+J`16q zAkKB~jb6HhQHE7>x>rDg!TUNYLTE4tI;9*`*I)656#p~8qJwH zCNA)ZU--;vPrRayY~jAPD}h;V^#x*LcFA_q}wMjD8(p^A&dm>A;A7t#8ym|Hy#K(#LB@=EE~n-u z=(Kn1sZkX@86q!UZ9!H(nY(w{lSXztu@DP+dXo(%dTq%z$ z4$jW1QTpeH60SJ#4bFy1R6|}BPMX&o&Jd2Nh7p053`uMi2D~JTt zK!DTU&2BUQ%5pp$-hipWJrq5j!JI@V!livC%tWmI>1*@bUl=x+amPHCgyw~X9$K#sJdf*yq79eZJ6+qw(%c{5gGA0ro(dZz1T34N_#hL zC5`$s(3kCZ4>rn!rW0k4lrjHYlj|8^T4G5BVppQChb%h?XL3Pqj@#;=TI)}rtjk*t zY7Y)s>X=6hg-MXV9&2^*RCY>hhDZBL<4_&G4pjXOIbM2+8#^=3yaGdc`rFprR~A0g zd1)}ffjddQuh|q(hC`2+n#cX-N^6~FA>a+;efnlVYV~@W&53=46@Crmg0nYIW}Gw3 zz~D7ga27*wmYm@PsILI5TKQK7x=c7u@j_9{ga~ z1g&g*Lp5-(q`QIMS;G3yheWWp5tv$<2;5Q<`C%Mc(x5SAeRe7I@YW(9ee)J`s-^Mj z^_DNv+*ewzp*NuG5%tAZpJI3$MAw(ZXKy6$F{7D8`w4Eg@^6l~3+;lMFy4BNo^9kv zxC!CtADbo5Fleu*8CLZL)Eu`DYLg(iClr6Sue2Raz!yZS3(6z_G`K+E*gqEKt&f0Z z&L;#Ml;^ls0xtH0ISHP@0EO4RUiyrw3Ne7!PIKEW{O0xGIKP{UCl02bsi#kKX;OVR zYm)CK-dBLyocTqMdEU?G5!+8tI^jj!v5jcw2UREp9HRf~?x&!|#jT+8()hxptVh{h*dh*xga<$Hmx*e?kCw zWF|pHU{&Dxrp|do&i7-cs2i|AkuCap+1jR{+sMsZB2J?rD+(_>hW+OA5cTSZIi7x4 z_aR#_!pR1Uuv5>%8c${UViE`p1K?D@Fl1{WQG1E8WG73e6Q;=il;?v{h$c zSNAkQ_xvWkrdz6R+hZG1Od_I&3HS6Q(Z!c!9tMvOi)&5D+H*-D>c zyj;U-o_2;_PzW-l!Ar?~lp>7sh+#cdP#FDat$YFa6;&7i^fy1epBHw;sq>r1(LAPM ziZ@;Od%uSUwZL1!{lbUdF5RMLGk-ALp8jU~oheClj&@mzc0Hh@lHPREO>$JXRs-;F zy#SROfD^KIBJ#hJT`-{fiXd7GC}CTWlrmPK(-6pDVxl97M6Y*BwEmLN1u*jb)B5E> zBRtAcdGXfm>lmg;NbfLcvUtJ(^1TPPpF3ls$oK)^ zT4OOl>}&))wQbi8&1ANS^_b;9-(k;oX~hvd_LMLuWf&?w9&@TfW;7S*&nA5{hVp2|`}BZ69pJ~^CVW$K z$Nrh2z>XB^#Vj4A*xX^eZAqLnn+!NM5b1&36^hSBj@}q4RR9Zf;#y>sPH#WBV zAvw5!<-*zInFJle70|{c=4eWf5s{(Gal%-iemew;B6lw61U9M)FhWw!T0ET8Qj1iF zcQvgYSq0sTstyIOW(2vI?@vA4O&0oKO|JG|c-(&dv4=#R2oRG!n-uLY)DEYgfR|dx zAS%%+pky$h&p9k`_PSgDEio{56mHv;a%a__lYyF_J)^7)!vlky$L4fh`i-zahk`b| ztlm3QOl~h{j?3@Wnh$^&40#8G9LWIDjXvHMF$IPDNp5_{pN^>#l&3@I*5t&*ovoay zO0j*m*Qp(UxP|K2Sy(U}UK^CIt(h)WZb*AeQ5?5X#J4G(*-JvnPjb5b2D4%$UIfsJ z3l7ul{*AtRdz1b+W{q;)m9_}byX$8PVe$?uWd{#TzDS?jcJkt#3!9p-XTz2%jnUl7rO6w(L&WR)Uk`zk~! z>*9aQTyf;4WnHm*8$jKxw4?Slo>3VSc_6|`Q8h16oW4OZpoVIIG5PeIn{!H>RoPCB zZ*yHZTTW1l<2!s`26NJ(RJEFtoixlO$3qviIr5qZsp)-d2ZXw^S+#kslGOYUuYo~E zMH1;x3}+3Lfv>KDG_Zu2M4#3|uiNo-d_25V9$QkcZ^Ofb8mcjCN8ijk`~U`^96(Vv z&CZ1QgOb?vR?pDaayBWx{9<1PK#UoKUpXhH1KihUP0}CQG1E5FB*REoh}tGby!EFZ zvp)Uib$CgUlAJ8+b!p0BD7MT9N5wKhlLdu_YT~WI=OHw$QBrtb`>37(YG`X9A)&?A zo&nx|Ue3eCs;zhvf852zmmmHNVp=1b+yfx;n<*`8@Zvoi{vTf<`s)6j;6(~ZP4(VX z^T{nqWMt&`NWd%|@VzKkg~K{((fi}y)TB*8X3vv))cmw4h=mnju6?4BsARP3yc84| zC?zX<#`#N_BoqHVVIo}{!98zuRq;B@qU%_F0g2GZboff*Aev5v6 zaP7P9TT@||({80isD`oUd%iv>jkV`X2{Q+{z)tLCBX3hGTFv#e3%){3Hq!V#|qFF|HZ!Rc%dc8Hy5Hwd%e{(!UxxD6h#;FMVG#`&wz*_Ar&auGS={zg9W!T>(V7_KEM= z&uUb^{l-8IHa%^+dgkdmIt%adNJ&X=$GRb=W|o$nuhV}-Azu^X5p5hLbdTR1HdEEJV-_w%=; zDfJ&L4V7F3ar>YxmF~M$m8{A3P6`Y!(c-qM%>Ghv(6OE_@tD}WwPpqT@nG@&H9^yy zU{)UpDLY7XVm^Ftc{TRuzNd1CzKZ1LQehR9>F<_5xXi0Xd(}b&FX>}7?kXQAJ(n*+ zW@>8cvd}lzZKi#*t}B<5Rq?$#wct0V2H)$kk>K?CMq(L0i@|dU*$CR66>M9?6zl5+ zvuPiGt;;mhpj|25R}q`?d;8PI1D}UDgIsXKnozTKE*}}1`_p`5g+!9W3^8EN#>b}w zjLSj&G6jQ^`2b!iwX8uYaF#tzF8D;3vKd34feJicM|3(Utbcitpb{~y5X=~ z!Y+R|F(7FbzB(U=5!gh{s2QOyBEOU8yf&pqq5Nuz97+As->QjvD2KtvzrnJSH~!d2 zC+^Wnf)4@6?X*eJW(HrWzUI%#nttEm@-cJY3zAaL7g>~I{Z%`}N5{v6*A5Mu`ji51 z?41_5*l>dJS=m(w=hMriyPc3UV$?#qi(eKR?R9`^^XUA1Xwt+8NJfRO`~S_QA+OhR z@RvP=-4z5cRDtOA4|;1WmEUtvWnVSF>cI0WUzBiEf|54@l=aiGgHIM3SwKbJ;XlwJ zp-UzALM#iCYt(|`iqcu8Q*J{py~3?$phE4utaDq1vpIRmp!YaWg5=EzC;t_c%R?+`Sl8}$zj5qW)nbPP3B-k*un7Q z!#OYSiaM{i;z%nvz%*}Mt?hDELxjGK|$&a|}hESqvahB3V-PZY6|d@F?s2Bz|ZT=77>6kf|x>y{h)xM~TpdH6zV?aTCo zoT+LtsbOT(SjSIcSL{600d^jsS)cabaq&Tg3jz{!7WlU!tMMdgi}YwLZ>WoV&qe43 zX!F_l{xtN%S?MXy^U|u|E}*^EiN4*m73OS^GJvn-Hnpavv21cwmPbdT=SEnfQG5GV zQ$&CvC*Q+dYEc07kj~xw(vZOHV`g+= z6vc!Sb_TsR>D3IIs9J;dPH*#3m_%&vXya-h_IIXc0#$+FAMqDqOJr zp^9q7J4j22j|2>S<9PT74htqBQIHXlFjAFxLcW^(z5M~gS=j-IQK;?sqlrgnW)NZ! zovWUx4{|Wjyv|4e`b|{qSS0r_pg;C3vxY(;ix@Cz^z*;4Ztz?!qy2Ve(yo$3B4%M5 zEpMUp)-}#kXGokq{($#BWFFs(lKdPyL2B{Ly|1f+F<&E?}A zxaOB6xd$#^XIvaU`A{L@C=L`$-aL7sMJ>0~&?ldj?M24QUU@4^4qWArhw5o=%fOz# zKC1PV-Db_R zo4%l1!Vz;A!+AQbp`~fVs+s)m_C(Tj|C7l-P*n6DYP^OT(54qOw zAvD80%-v4X?T*Bx(O4i)S;er?ZH$xd;{LsS?;-p6lu5^hq-;;+vzF_JKv;b8OYmkr zIz1CSaRjfIYTERfWuXJXYDI?54=wr%hst603qJi}r|_#@%P`Wxv*-xYPpZfT`OdGl zxEd%+^(296lJCr5R`;ASHpL{SCK5RRS4{*4D9tCTO(eOMhFc#pISX*fs@(9Ml;N{t zX@lDaf>N3z>0S(x9Ee4lzHkZV&Z_PSl2bpdNAdM|7_9p>z*gt|X5ll2{@V+MEKHVj zBz{(fDf*oRoVH&)(v}j#S*a6Q>%lDQRtJBL#U5Qr3tOX;7>k^g2{^!(8P(<=*sv3# z@;VxPO{wFe@ud5tJwk>u`97z^ljObtiZ}uX*(xl`!E8af^Jo?FtkE=LEGlq{W4-y_ zuG9VuR@tY*gdF*K+)|Bb64$Rt2=gS;kzFP{HBMZURFi=bVA; zqW|3-Gpy5b+*)PtNrCo_dqkIS>c4-?!#*NLgN(; zjds+h5Tb8@ja?D10U0KIQd2mZ0@B1=r}`8M3O>KAS~mXxDH2NT_awBw~?uhruwT~&ErinYNG;ktd$eETF?150eBaM%b_eVq;PJ?({uMC#( z|3tW`GR;PG%2B)bj`99;K%ByeR;7I3)%UfyCx~_gvHiOVt*cLa*D`JzNdRvW((a(* z=LEDE)Hm0m)8Fa2x}zAiN?0^Yp%KfWFA>m}->cb*4vTs=(z3(QjdC$BQF8eG9!EDa zF!pogb5}u0Z7+w_=xkH*D|MpL6BX$g?|wt>&qm%Uqpc^pNYH#yqx;|82RH6o>@#M zgw^0eu^Iw;hlGLM9J<8aba}h??O|8rL?Mk$M(l3Br>Efl{=b(Y9U~I|6n=Ugexo3B z-yT|RMH*N>%Q_MtbN1okuOJ-t9Y^KX^D$Y|t+q6u%NglGsO{EJ>iwJPrMW(+5*v8E z31a%xtKhl%&5pQ!vZF3EwD#bFdr`RdJzfzD5TOW_zqr5to?Wi*jytTW<-ihYGd!?W zZk%UGY2FB8-u8Ar@s4XFatW26-~ol9vMI7kh>N3IBa!#`p8)>FMEcDsDJhDn6Q9lp z-Jy_HM-XP1@c^REN~;c|&PGmLyZo;3EZJx`9DTJXSr#Pt_2cGj6T=t?)s%$Q^~LH( z?RSOgn#+5wp{z+Z^{*~}*ZnBSOs|GuN#lKmCqKQ`AP7;Kl3)PTOuGn_Kkts5f~bB| z8xD(oJ_cybD@>aXi9o0G6^ic!yBK}-9HnN+j5_Ie0|H-0N<5vNvbbZct-q1Bgvx(U zX~sCH45;G+>utr(C(UYV*rdObabUiXImMoMNzdh4to5KreT8&Hy~TAadcy;IfgD(o zRi#~}gFxV(SlhSxqte}=R&_16Q-u~WMc#~rtFN=$8)J6!wa7R5*Kxu~<&#NpRFi3l z+$lsVux^P}D;KV#;=PQapPg@%NV{Rh@ylpW(PBd~aO$6#s~5D)NJ4r&byIhfU%0>B z(O-1Uf~@BZQLBG(S8?xZx2a$%!yYd-H5f#9lk&rYVyR$(5wg0tJLJuA$3*&Mn>z=P zQ9vAOL_~|*F03b0b0$5tWxGFB(mpea=0YxGX%{Kff_QD8t&Y&q(G{MSk(O=_oyW&3 zj<*Q@S=m0v)<5&3X*RId4ULAG=px>}xGpRyiSb!s;w}B_k{iM%vsPA>4L?1Ae&ed8 zMRJ!k+Ac}rN(9YTmG!QPv``MfveVSoJRKdbUCQE{2%0Xt8KIYbTQPhvSG)2uh~Ovc zpTIT>Qokps!h~KI@8YXKH z2&%Dw)%EFH+Owuxk{gPzPmft}-7CwRD}3G^ugA=tlJXHJVCVSLc^6%LyGfRyGWnaO zXPY`kV8y|@*Xp~|O?Jwrf3NG}Sh@~_c~XibGg(5pXgT&zgQKIpxhnVC)6dbe9(z;y z%F4_!vAxL))UD6HG-CcKs@xH%%@A|I0e)@;YnRCvZv(Mho{^auG*K?YL}$zBzg&zpWb3r^2#jxPyXM%Hl^e^}`%{TLEsPRlzfVl)5f`b{1ydeXN)Li0 zl_e?R{O7zqb8&mzPqce?p8NEp20BWKE{};#RpkTMv$vP!a)}VMvJC@D_=iHnfF}O; zepu9ZjaD)sdJ;GAj)gh1$u~3LDK}r0?5bAcYHG<(g26Oi4P8r17Du2%sK|5M{*IkO z2Wx(~y78mpJjwWoi0;W(mh<;nGaTQ~8;E&YwA{A1D8>75P(+SrpaHn<(Mt|GIeO~2 zyUwxI2%`ZX`pYLizR815EFN^kL2m;BXnh3u_}ZgPBnKnZ-dpUS_CA|Jxg?5`!cs56 z|1>2=)|x(H;pjUi)(tWGPCkqpN(yik_c51E43BxxylBZ_>{^=KG0c%&W&2~r zW7UDO^c?myG!1qfRESw~K`B+ATi^DC_C5>VHsFmw4asUxfgYB z-!n1gQXIS9t(U9@@q@t+nZofB#RRnCUMS@mm~uPhkGN*$CKYPZF7M!(SulOL#H#T~M4gj>z0w_~z|G%}pSZE@ec!8%%*@LFaSV0>}Vyp&4wRDj;1rH&zpS0L?c zxoo%bqiRBEos{t8O4MLkuL0$E4pA?=jW?v@;pf<*%rjKC@26Qa&Zo-u=gT2)7~g|w zYU5SgKV^HF{f>zWhST9}lA>4?%F2N3VnWHpmSe2vOfj6AE@PNTaM6|B8Wz5v(fN8Q zqiD#c4C8LWBT~e2QK!rzHU=RfE-w5wTw%aDOU(P4fu7-G#{rN=rbO6X!vDWu7&;8k z!;{=`wwhk}=lOg722sS$9VXfb7TDX3Vbjr;iYVEVr0_AZRX=#H2I4LhEl;vb zuOSl?oxbr4DR-#6yy?f_s3@ZD5nn+;*>=}EG$9RVYS_9Bdpn# z4;2w)S<}Re`{6>zb?;FyMH9cPZZkW{SQV^ho8}`r-OhiXYPnouaZ$01*r(l-g&Zs! zxz^j_^Dtccd>(hgM;21hc6k3WE?kMsL7`Fgf=s2OjQV(w8@2g|#%8(g&}Hn~J>joW zLuN`v1@y*@kzRDf-*_WGWG2S7*gS!>y6-E$&oGvT>1=OfnJK(qEh#NE(l>l_w*0R3 z?ZPJ!YlRe#yAMel#TJQ#rd9N?m7t^E2S0=&s8xU@0qp^mg`^1} zBDQlfRnXIVKA>tBIzznP$*+m_xp9YYhPb|l|1M1uhHXr%Cbc1+B96}?T=M|_dRH@2 zJB50)iQbPI;d!sLt}{`#-Xyjie>m;rvoCq3_Y_)xOx6G$HpH9_#3 z`qvn4ts$P1_rX()xu zC|DW@E+>{^Zve(emqsc2ow?l7>05X1OniAW5GpWs8PhVOf=Y#L=k?*uq`)LA4D*z>U^yZ7~@pMJ;8U-b#xVMipyYf=ftw4IM>ZI%n^*wfN z+h@|!s@AuxjBn|fejvC7qjI7Woqjjx!z&}~bM;yEa&NbrY$2PT&Bz7>%xoztFmWK47<Xnz@#VSM$PzLM zDMs}SGF+cIP1NKy$9rugWU!U%$mBa88;Y>P(2IGBYwjqGx1OM>m6pw4?nftKAlV_l zS<6oKK9LM{fBH_gge{C(j$7@sWjs}D^PPJm1!-?!KS-!lkUm#tW%nXkDm>20b=xe8 zo^pv!&FMlS^ZAxck1Zng;&GAx#zR4lkQ9lk=A#Q`ATiN%5mS9L38@8pKB;N}9qrJ2Gh}kF$>6Lw`uyVQWcdqx;qJ@+ux(uUQsG#Dbjy_o<`3_2Ltp{3R#Le0zUI=x~sZZQJdB-Mq`Qn`H7*A$3yc~6jdP+Jo5a}p$x`uZKQZ4 z!97G07?K~Nc3@$-Kj#76KgCvr!T!3nz9Q&0{-avUfi@?7_A3*9K2g!7!D-kK)eAK9 zffzx@y9i2?Ir@?hMHk%D|Eo?E-70&Nl+Nca5NA1p}kj=T7+)ublRjzaB$d zxR-=Bt8iV58+A3hN;q*^<5oP@`Am#_-T;?Ph4p}8GwzOk2Uk`Q2VJ-7nR+|YyEOINh5>!>NL- zxJ7JkA9HaWDBSNkt7dFl)3}C_n;4^qEQy)(vv9o#KPA$IJb~<;lB>+0F4*Ipl^{wE z(dunHSGgNzlA=+VU17Qx(Z$2qkgH?!Yr>@|jAa&u4h&3udY*xH#l;DYVNn4D(tw89 zrL6@D>c;kfin9(9j6@wXPRLV`5oMGuyOc4UuL|vTBBw%BLwnYcEcBjL5p&573Z;19 zCtoU@%=-IU#rIV>)Jyz{2z-*Pjbrg(j}Un1UdalJ_!afVpZJFL*7DjBj`r8EAC#7A zsqK_ad_ai6kxP}a7N%g&@+@|I1I-iiq*ZDm36e>V?ffSAxb(FmO+S1-)qFIk2}Yn! zv0YyLA^(JeeA(s=bnN2$sU=xNy&VrZ6ZsDX&zAOCpSA?NMvmK%R^dYO{I9k}J5HN* z?Prhn&Jfb_EJ^KJWX2$8<|>cfKRoM$=Lo+#lTx}yNiZVMA)ley-fsk{VI;S%M@GKE zmt>N5I-Db&X;0h*-F1C^z3D1^5AX6tYj<~Wjd{tJQHS35(AM{hU$T>1?;=zp!D&Lv z^v8%|>%}sMa0H{GiWv6CpLqD@Y_y9pRi_sZN7ZR?2E5d@C`2DnU8n8d`)sDre@fxi zn3(txCgl+uMy5+9mITNcOUjbEX~K)ECx`U!TC==+v%Xr!c5776hsve!)p>fap2-N3y<)+Z=Ej4Fx#slAN-`d0XZfom?|)H}0uaZk$t zi#U@;$*~Bzywhu7ge*5PQFT^a0#_L*z;k?l9xYs_zYI!X`Ny2lHl%6hwn|D$er7Fn zL{cK009x!hz(j7# z2a7GCwyZ=z3?pHd;3t~18~INk*oD+& zOb>(1bIBfFY)jnt^hSI~J%WXR>9LyvG0>;$6H9nD<6U4@GF0q@?2Nh@@g8j@;!QyS z{pXjgCR&Rf?CWzE?FpZ1Ij8VK@N^|sAX2-Cm2YyHlAi~%{WQ`k{JXGdrFvlIcB+k z30b`6XGUl^zhR^6MI>5sjl2Klt?_%qWKy*GygD(oGbnU0Tj}d|rRB8XgqNi87N~(l zvf%(KaU?J-fg+VW&6OTzy9n3URuYhBbAC`b$(?t8hd?`O!d%9 zqMAL?H-mhLd2OiPEP`$U-3w_asTa>7ydxhq5UA(96-{C+Xh_gZZhw`y)s_U4piA?j zvc?s9Ul;7gFhpBJQ~a%n$a|4mG|KN-+9mi@a`(=CqPDPQRSKBC8+@8cabtR7`@3T(LLgEc3|MGMha`^MhtFEPzL^; zMo$T&t8yVi!_+{{m?^7R9c!yDfvvhqq|YqFUL3a8KS{9dTtnLI$|c=>A4NxwAO40f*gIQdRK=blJbTvxW7uI8+q5?oaong z+zjga?vV-nC_e~)ulWjMPm(m#DY*HCPy-)t%Jv7u*4cPfjn1_(ynX_jkA5Ae z3lL;-h0V3k*4HO`+QhzqEjHgJHBX5?^MO?@TA8)=)+GXl*vR!!iyQqWz;`|_Hfsxc zT#SgKLH#bvG}M=RUY1GpcDpbgO(@UfEY7w{=y)dQl*{}<1fv?o%eE-H);*;|PMn|& zz!>?D`vW!szPYzLUo|8Ii0N<9BTndYj7z+Zpi)qvy%j3!vCBK&D&BjKHMTH_`mr?D zPh2^EZ0@%EkJv+5^0r&P@Y1Sjd^G5x8G$-f!|Y-+yFlB;_()ZGxs3X++XEc^< zD;Zn1O)LHjC83Q-_HjSkp~=MTS(VSzm5C3`@~h!pO;CExP{Bd}ai1UM{+~SMNvIz^ z5KD(TS1vQ9<9TrW@KG6hf@WI|L&Ersw{^MSFNZsx>9PGoie+$23`S^{O7^k5Q-(Iq zO%LRHUcc?1e7H(S@pg*93A0ZB}cCDe~&=govV z+caE~4b#Ay4yYwr%p6wqyZt^wM*_3?v~9>sX6{Zi)z7d-f&daVn$q z>^uZW5@S9B%Iz|@J*~cA9Dcm|!TlhXa-8d>J&Fx52`oY)O zUZVG=JYP?K90JdiR$!vHvLKzB?98co)$Y*D~IikdVMr814oufA&yJfrS!rUKclc2US(Ck&Skt zYt@_XuZC=E*l(Szahvb+$9w#)weit`D?OSgbL>fD`9!_;+HztbsT0 zJ{U*}=;Bb4ML!;-;83s^enp9B)24g(Q9k{T5x71gu#@u@W;Vp7Ek+B|^nFf-6eHAU zD0ben4S>Szj;?asC5I7_2)C{3Blw-1yH7NW>xfh7**uN5g5?!`lK(-^S2E8j9{4$n zW`}WTL3MyQ{XkohZNZ>hlOi=L`pwVR-l1FL!WNjNr{s^$%dV-Ci`inwqGnvGhme$A zm7T|~wA)^UDBe?s4&GfFM%B0CIVMx_cvoL#Ao z{0};|P=0YI6BLK$tz8BKnV+RSXVs1}RYvc@AbXbA&ZxhndOaQ$%Lh+vID8#61q~`o zdm9epoHEY%zrOtcr{jBlYjl7&`PcLRrELCxyVzU`XnQTTuAK&gKtk1BhW#agz8Mhm z3yNOT`A<)QYULT8;Prb_>=Lu0XdJfm#u2+$WE@1nY|`xC7uYqEi5lXfMlm_sAGKB7 zdCIQ${FNbc`Mk_wP5ali)fkC2KmAD4;oAIkO%8$TYL46p_}RvqhmI#G1B*v`0IZC| z#je;C)VEkbQYQ}RJoy}ajEpR`S!mDv)x%gAi8Mx7!SvExde4#3iR#EsGO>1~VXzIlj5uA&^W{!VdrQ)>m*h-LK`l=d z|MZT+DaXrTXCG)m$5Fwwin#K4+~QWZA?!jq<=B(Gbc1Ox=9ecD+nTF#dm z`M1p}DR3zBjajg3vEnL$8$)G0v>tYnSc&~$ggN?*PU)Z9`A*t@o^{`NQfw6bB_Ai! zu`-K)>f6d!X{pwovSS&MgKEPK8$OmsP##*vR#U|RP2W^a$ z>;S3R8EW}o*VBhU{LJTo)Xb2jD%nWQ+S4G*>H~uNS{FEiX|;fX6Lh2$kb3O*i7U?N zbpq8`KI@-N;GdWf;UIh#1K3H29)Ks>{pQa|wYm8qPA8 zgH_9MF=Syiwdl$2Yt-RFB52YqvzpCO`_2;^c@)~#wT=JO!S_AGJo9pRb~ZiIPs$K} z1vNEEUZM7P@7@80mvXR>%Yff=KHxAN790@~e0;2NaL^?3UkX@Y+kr z3RfbTQ+}-kZwl{S26{&bdsK+&B)vDRY9_@o3z5IC$mPw=to{h&*EF7l=Z=@S4@W&t zP0%Sc9i*2Y0*u(Ne1IVBPb&~4FLj!5-KwUkeFd1rNluM+3=qnpM%^Ts?!CbdXmoYt zTR(}(W7~nje(PW-q#N2D#nvd)d=UPrznXAu0Yy7DF_G+Bkm}Rgr$8|}-bxe?=sa_a z-7R4Fu#QD0O_8xagr2Mj8eN){Zcll|fP6j%-s1c`5-}cy{8EI)bWOEPEYuGM@e^;|`;m{~Q^VN|L&GqrFA;k{B=F_Ao|6Ny7f)`k3P`8L-J|6&h;SU`l%w?$ND;~l4x zsJOW!-D$4WGbs}{v~G*0vJ5|PU1vKiaLv0l6cz>=@s3MEk1)zDQv76f3~UH5rL3&- zZgIrZInR0#x{bH;8>_r+YyHG()J4NJzHz*gz!ruV{6K(D)Jv5VXB@hV<4M2|4Q~Kq z+vT8jC4BULg?gzxNm~i_?L<%^GH<=wTVEJNQQzQnzb;q3SeG?XkbM|_I69Xgv@+xw zcbK2_DRtUfV{A~xngFy|I{W76)Fy0IB;PbWKW&9VaYjMnu2fl32|fjdO-(qB6G?RH z!gD>ZF4w;sKRnA}BH95JQ7J6L$McqbUJU$<7NKprhL9{=L8-vIibbkVHKzKcKS*}s zKOo{mLw&msOA-1fQ?I~({ky*YjDz`V(P$kOzOKE5`FJbRcAZNM!ImUZinFSLA9jpn z7yE4>fA-4c9UzU56uR6A>F@8y?SZ1b?m6P<=6PT5*8R!Pe#u8oHP~5NkUogd?L_=z zC34lD6($W<*i6ZJ2fj}elw#nVlf4@%o4_CK{_gBnR}6w3O`agDyCK9Rx1Kkv4{#yw z^$69bJ(v)PX}FsyYy^ppC8;9adS15kUfH(~OdU!V^!Zh1z#)F_#U5QK;~&UcBJYc& z-5I?7+QY!N1m3D++r@=yay>~x%0GR=L*DZ`A%nN#*sjjJ9cXK9op}Ohk@ySkVie=5 zt}}I)?_Xyn>S48N>)O6;92cY5VuaZ>A<~JFk399E8S`J&MIqrE2dd*XQamsBnIzCj z;m|@7sx$pAOsev9tmRH{Zd3HBFeM`_h$%~ z#?10R!|+gfy8#~JirLr%5Hzt?%(_oO^5b@vm+vl}j}rz1n0?X(#Mw;iZhFiCb3wZJ z8uTm2Q6kde%qhW+x?PZn0{r4mT(A{!|R9I7(BYPWuG-XnTV6RhoSncNyllt)yU%z!C+DS)q98Ztsp4JG%3x`e8=y*8&b5k@R+#81X%La>WMd(DTW@>G>##ccI^ zP|;|mqvfg3XId@PAf9n!v8Y6?ROSy7$AQ($Q5X>Dgk8T(VUVBCOe{yj-3SaiVIUxQ zd=%L7z(((KAP(V^zJbAx(+_>qKl`q#K13x~TS-${eF>f=!J~TFDN1A!FmX4EM|Zhl zT#V2NrHS>&J0>L58chyVUjZsYG;2?X2;PPQJxG>`6m!^MOm8b|N$5ivGZI7|!%m`( zb#&=MOg+fk6SK>W7o%aF-(G2EqLK48vA6xht)yc<_q|8Z%sNJ*^FD{+Lo(jZwzh)r zK+cUs9W}g?4!dDVLUqJqZ?UT|2-Pn$D{HpL6^GL@!DgbMggPzth1FaT_pYVbkvaEb z8@@;zP^QlpW&4E>B?T*e(xxcv9hyY@8{?5}4IUG5&diGH?j_jU-Za!q)SS3Jy!+uj zSUWy!oFS;k^$iUTr|Py3=NNl&7JCxq+AfZ!x6Jh(BK;s`)+j+&THW*|h%-fUJ$eHx z>Xq_jF7LX8a-B5c%5BpH?{4GkI^JQ>E9^1Q$dCB^Sw84auP;gg)2Fh^S19sk2N(_V zD{>hRaS_SKX~$`ibhbcRC^*#3rf1W_JY(8}fjWY3ab0pw^F*@rJU(p8ean^LE*bux z=RggGq0jR^nT=*QW7Vi7cOaee8r5F^;6pY9?3szz$0}?qL`H-rw3z~;`i%@t>sZVo zf*~LNNmH1vRE48gwloR1LffHe^~)O|KumdZtaJ4vXyfkPJ%#+TxQ3MR;%fcNrM&aZ zp8l_UhK&kU}1?)4<_CzxJG z4R+{(nRc_wjytJdGXwYl~M~Ql#+nw{t7IWpZLxxzGs6M4Hn)CBK>1R`m?vfMgY6m5*x=R*U$1eqZM95a> z>XnHWNwKu%gstHj#rNw5sCC>ww%}JQfU3tP0q=X zfp5Cw2fQ@y(W?qg=9akv#*Qw-@Fiz?yt50z4_G3p0xUJIaT}NZ_qNA}8nL-boc>M@8P`3&d;pED{ie3Zi5jon5$8!`DYjU~vT;@LdY78svI5 zkEx}-6DuCPX9eoH1Od+&13MPjI|?T)RFss)OG=0lfJEE1KI~4Y$K@ujHgi@>w(9V~ ziX9qx(MG~WIQ%>GB6_OGtnzmjo( zbZ>7?ovtDT4QaVx=z7Y^B?z5P4;`cDOT0c75 zdF!0u$bxo!?SO+{Kex4#?pOS+atnTXhpnDX_P4Oy^);_>N#rxzOF=vja zT`DQa@hlLu=&8S`-zTJ1!ZJoK%r1IPH9nZ!ZmJ(mZ#uCGG2It0inxlLQzw>IwWeZO1%Lg`3;LBWbV zGqs5G$cJ~zwjf~Gu08S0paJ+5E6<{{ATO^P!+m06;O{St5w%!XYv+G%-CkT2^Q@?> zJS$16t-bpB^Qq&9*R+k6Ujbng@h|0Gz$=vsz4V`Pa&yb6{8mvxAB%#FjERXEMzzDA zPy0R{YN$r@mAwe2VVIYfr=F|4^W56x0GLQKGhaGYBkY}>=|ZhdOtK{Wn=MC-IsK)~ zKPlY1cMmK+_PF@?K>^I)w|V_5o9pWm-Q&~K!&0#=yy9PEM9#iH#{QLt~57QK&oWvMCdxebs|G3ot z8oHZ%+T1)mIIxc2k@E;z>I3)a&LqwmAxF3o{{s|NVy1KXazPaS6m6biw+>WrkYr$tn;%U|#|H^xyV51YUlXm8wdrVp zDqB+0TzhkKbK`#mRIk+wuVWtykfXyxY18$o@o|k4zQ}h9+1mNBV`Vip%st?YiY9xl zXiYV>P@@&^LCaF0yX8JD*RQAoh7~`u)2a&l_sDvP!Ok0};>*NB7coe#G^DVpK0HMB zd;gQoM_;olL_cnvM~(VxPK9%L+0>Ey!vv}lxOvqU?is=~nO5quKJMe#&esh~jab>) ziJSB!-7GyXdy3erU+OU{i-?~Ez|#9ay7eq4L$#}{*n(4pz{DW1bnQIK4fYzbCO)g4 zNAzzVV0*ar8N8lQQV21z?xmhNVK_bTI8vb=!M`;U6(Le-KR&$F1In?=%4|m6am#^V zayz3jn(7@dMC$A&l;u!hiXpWFZMhr@!61Rxy9gbTEImgV!DCUi^K9voaJhvv_L{KY zTGL>CPH0xUaLF`)z#aj!W|WdB+62(^{M5t)=+tEJ<7aii`2X@9UARx@oLxrgmP7!rlPEwNhlp*GLu}INGvYzJ-T!DEtt}a2f}gaPD*z z5Inx(yBXX_93ixB%i`pr!l*bjB$zY))2!q@?rN zA?z1SrV9YfLl+Pb2uBUpD7EPFo+!6{q-1Aj*BQgb04W6e)8JIHK`J$C}zyPfDr-2xlp(s1Rm=5M_Amerl(wR_o!O{bOi|fDkBM&hZ%Xz*|9SRAk4(@%Y#nu`PK|q7d1% z4E|%s%|HZ7W$l(TvN_#zncrk|nNZo<86GeV&C8+1<0r zW9V(ayM6b8av+yE2qB(4V1EWfKN;dInBhwo5iTENxe z(p2J=>EHb-N|DH9>pB2LqD!oY9<1hXg1)nb)la0?3Gx#kpDMv7fR{jQb2W8QB>_)u zJ$_!3XkQS;SCa47uNoZxcZtNQXITS!4m#-yXhg>XFjE)9$pVJi35u9%k>Dj^N2cX;P5c`=z{GgXGpGo;N6NK?x3EsZBt7}84cqiOqvf^`oN%v87^qs%I zIS2;?NI5^35eU{2iKKK=mJ8+~ZM+|Kqbi zh;{wW@BknId*WYI`fZv0_oM$@?mwsgQQpl2fOfOMu8q2p;o(Rc)c=lngaP2yUFoq0 zD;UJmujym}S=X;4qL9HVV%y)}KX0pNL(_w%R#rYnxZ$tte-Ds$v3{oq-4jTGT33P& zlv?LKn%`ggfiDT_*)G$CwB6m^C)R*jZG!Xf0#L5E?nw~XIc*00ccg-DWoc@gU~`{1V{O902SES%l`#KA^INZINndaIpDv?bXE=n_vz8 zbWlHdUKd?QI#WI0=#V}PFh14BeaSpQwOK#gKQpK`3js;S<9ZIG?)e);Jl^jQL2~HbPQN3XK2EyZIPW#J!ir6x&vR?_Gx2yo?=*LHi zk0+I+DW@{)%+4^)avK9;8$jpgcBz-`8}gHrz&L|Elx|MUdOE63buaX{)h-Xqi6;&~ z=Dr+L%@JgdcqD#YsYXrnU~w%*JU3`(sF`OHfL#~;b&I5CU@5ii^`);NGaJX2K48*h z*Dgx#17qOTzn44zB;teF6~#s=7c`e2^sKf6HdVz7X)HA85Ca!{05?Z3=ycX~Y%`k! znHF*PF)S2pvi@aLA)LS16)qRVWvX}nv?irv_6jHeIY}`T-hs8plzs0i(kbQVhfRf) zYY=uri7L3{9(1GnyDr2Dv2!JY!gGQmnYG#N&}8E1c(mmhNnFep4E80G zmVzbeY@3~iX52@zRUk3VG|`V4U(46+?G?R{v)M)o`Lts1PA_mXP^x4*)>RZ zk_vh7fAu|R1q+akhQ*Q1d|*tqsop8{Cn$N+-HIgoSo_wx!7)n>q8G9fHKjUXH{0N) z_RPr0sO#Sa5WEV?V@;AlVNFUsem~+}!1QI& zA{Q5z%g&iH@IK2XYp<#wA@t{5F=BBfwPt!OLgol=rsl0ZKnYd-LNpwuxaGl&c2YP<2Cbp-&i z;~lpMUFFaoPkvAWNFtsScm|Schrl|Jr=M#FY001cQMaR&Ncf!qLi!XCI)FU|o8hfc z)t_+{iw&T1oNo=?8HTTxe?&w?v`z!^>8Coc4ps-PuPG-eP4&9!xh*Dqy?(H0S4oHf zIi}j?j7PF@zH`CB!8}u7$r1k;9(Fw4Ujar-i?!g!&PahCWdaNo_x{=muWf74uNUlj zsG|+ptB4FBWPyLC_2&LM&p`kTTd@J~J_1y@;dk%r?dnxcu}w{>x(Vdz;|pEvzm)bM z*|D7J?ZqN%MsJB#GVF@MC!DTw4u2z*_vZ=V8kk0%QSzy9@CZ&vy2ORfdyD4ifNTnI zaGQam%X!Hv6E+<(E!vs~y1IG?8L?29|Fiy+wTIseo;gVWww`&sr27X{Q=o8``7=1l zB@0`Zl_)HS_ZTE;eGztQnn%tI0R89=v~9$-;@E|GYMUs-TA(L*hdUudlX)q_M|=+q zInw!GqGq;RpxgQpfDeuc{)g#oc*O;aGpb+WhZ})Mrd&H%aC~}ahe#@tIY>^ABbrC6 z%<45s3o`qk@ye($i6J%{fRFT1e=T#G)dd>>D29bY`<;=1ciC0XW>VmSvWmrG6nL!a z<_?IPcWmGzVl)^gFc_0Vt{=A$do9XX=#w+4^+Kmak7)&OV zpyURs@ItS4*|4$nMtsLqZLjV8pGA3j%niC=?vW4{v*}#KD~WyyyvUkU3ORYx`e|nc z@HYYLB9W!Z!oqY#ms}nw#(euS0Wt#y$67VKEcFz7D{V(ptdH?grUb*$hl>gVZlV$j2 zKK@_K!oMHA8ROwA{=dNN|Nhbc2)Do!1p&UlEZ+I~EHXGZrs*}bffoW|lNA*|@bK={ z+AeJ#9_Gp=@RJe|9UlyL0f+AM+rJ=x#SZ-0mux(j(^vNSn&o^~Ti2#bcYNi~SCbVO zf1e*-%YWm`W>Gfi&NqKm$^YE3z!NkxGITgT`hDrwFJBwQi6X#rHKBJpKekSVM?`G> z`^{C)looAi=!P?i4z1?YPo^>MvLn<8$7$Uakm}sS!1Nw1$r%Z~e~W-emg=Q`gt>US zgdtG}rnJtn(xp;{Vg$A8C-RPIIeVGn3cx!~euH3r$VU3fI!F+3i4&UXb@> z8`vuj)!+9lU>g{NaNI}QKb%OxaH*-P_O@+re{w<>ws^4-Bj}J*cu@D?#~X z!QTX@rhV$!LM0wNc6d75LwXu5YqXxnr?<&;mX}PNT#+PDTv4&AU0hsjpa<&G4>*W} zHk_14*&H$KHsaI z``2lch@XJr7evW&E%yq2VH7qbV z*v_%#U43wRx|nCNoR2IY-{x|%g~>m~eW+efuN8HV5{V5Q95&ZS_bftp*YXpGCd3>a zt*iivx0FK%X&!W{X7csdZ}txY?ANLa3M7|S{rFhyXdiE?K?d*J=Lx)Cj}U<|-u`8O zgZNMXik%UarkeG}*dW6*OpIPujC5$kM`BX^y7Y8{GsWcKhzO|Gz%b56#-fme9a3eb zYtd^v3)nyod?_HSXH`?9Y+2J2n{Rku%=JacJQ-S(>k#}E>--qd;h7wOmm=RR_v`SF zdeQjAz`ToYm%V38{CL5#ds^OrEnlCU)&LW?bjWhOhz0~GI7kJt=sGzyG5H>zWZO>- z3{=gH2Pi{=HXz|``sTbPm3dpdtNt!&D#ANsnbaePPczus9acxe%&54zf zo(Zv1rsBuzv0(#Ze^>S<2%8MwW+a@db?oUfnh+95M%ln{`K;~av29J|7G1G)n*LwlA#geTS!IaoN;D;hyIUBWO2wBASH2+*1fdPP!v9g z=wRUNJQ|^=Q>#6P@OQFR#>7YWX^LtH#|x#gxm;;myxvzivnxw<0aVS8J;PG84IB?_ zOy>~n&m4Sii{~bk$cc?LiFJM22P@(xpg#cD0_A>VC9EgYrt4d4{Y)CK1vq)iDhhRn z?UNI0vY%JR*4``DHi(5UF91N08XwTCaZ}2YCkq(xU>{p@v9;|-{JX{+50?8V8NBzL zAHP`Z2#kp{W=K~VxW9M3uHV%3mz9-8)k+mtt6iaiILYtdU(kUxCVhPO%1p}kl1Zy* zu(v&UQ#XlXbZSa@8RJw19QFGx(C&?8lKOBl$vJQV+yp_5@#e}y2QcwMw{jS z%wdvQ>+b5_ zgo@iplgQ#sq0NXqA1 zRr=3;)Iw!t()`^9<$V?(5x6u8^IPcMsvilpI@OU4ao=w0P4ui3_mSP-JHtq6=~4OY z45I8eAu8Z@HTc1~G38{g{MtuZ;86V3h03t?C;v&YKkIvo!d zWqz))9Y>+fqn60_YYX+pL;^3U%C+=mGb_ zvivoBJ@5kx3NpbQ(jJ8;pr@+vf$i2Mus5W(WK6!G5I4NK=?<;U7i|`jVfVq_uq|3-N#wM7+$m!H0^Xl@l z#dTPW)Ibd)btjnYY@|hSH#EeF7l8D>cjD!K#-XjTv4oyobDx~MQU9i_oZQ;ZMCElU zT=96>B#0X3H*uP9iIRj+fIkh zdbw6rU9D9rn<$uT>v;4%@gj$rUbJEi#2e-B{hpiK7RgY5btOl~&MvE*yAwe9zM`y0 zOIH)dA6MBlpDuQWHy1E0Rdi;IazRK*;J2* z}-RyOPpJbOp)o0!3NRL-)$x?kB>y=?wAKj-Nd?qVA~ zD0aIBMQAcGZK+t}>xC01)nNngU8>?tQwRhj|SCHq|S zw5zjWO&fs7Wy!9fv~<`t5n&tU1~oH-PB=)8%2U-FIgeK~=1GbDy;tm9V6fbIgpM9( z6r?voH-`zsCJrFMS97rwwi;0rmR#V?%-p;A<^?2xvt`=*$CaDuK4Obc4(qC@kUd*1 z$ITI*`ooxO<5F0N~9 z36a^Y=I%KuxbO?e7LYpoKb`si?V~?cIM=xzWy)WB_IoZ?0-xld4>Wne#)XWWGnR(g&CNs8T}m#I9??8S{&Y{ne#X_z5c_cqI$Z zx{GdViNHNz)6iFCJi){E)mO0TBgPIzB`)+7#*RJy=@)p<^47G6mGHds){<#m5*$fd zWd0XXR~;8sw{}HD5d@SH7(!Z5X@>5S?vm~ty3+tbU_iQa=#cJ`8oGNxy1Q$L@A$s= z-tYXuZ)TV|`<#7tJh9e#h_iebku4ELS*zTxdj6%*xf)t&s{V>xE6=pn-yHHa5L8h2 zt3cruPrkfC|2S6jft~dXnUB`=9`inM+?8yQg}w7YNliJ1g3sc!BPD*oXcrxN z=F7(!>NHQDR6%Qgf?wW0dL8h<62|&@vu%~{qx(h@pEF93@!L>?X^9kS50#Ox0}=&= zg_a4gmx^{YgG=ZB0~rL6do{`rkX!FG99wF(P>UzY8{RKHH)>LT?atGmD41%;)n!_d znC5E=tLENlN!ZOmRypK-7rnvvG_MxBaeokdTh3i@hu5X=d8XO1Xy?AS8?89y$7?I{ zs)>oRrG9O!I)6EcYtxInxN|q+uQz>+J^&kTgysNz_50!OR z_#EZ6Uwp5QJzX1Xy^|qe{)o^+zDt)Op3SG^hu-?5;1+W6e~-C*$gS69tPL@RPw=+) zcSt}^4lUK;OSjIc;}99yfYrf4tTAX@Ny!IO(or`xiw#$6>%t_qH*AqFMcdC7nmjvp zFD~3T-A(Ut>!hAFm00|mjXCrH0XFK&mtNWs6T$(eEE0-pC~I~Kks&!XhlwEw z3&4K$`vFr1U^b1j&4V9{B)#aVcuqH%Q2cW-1_FoI%t6fS8yZ4(*HBJc8M7exul_k8 zt3|)PLG<+Wtaet*DA6BR0jnd*$5UThyFj|{_(yP0LPf6gN2P_cGZ+2UF6%NH&u(-n zT_anQDiXhv?W5z>?l9uU*P0&w{^|C|vDeZ1rPD98e5vXevSqpWvUH||E8I*#o12>y zgKwqKH72=xz!mqBdu+<040yUf0>cOgYFTS~rsyFi7dq}|KsY`ZpuGR$JvnP_tyQjq zQvG`X6xUS%N0h$lkM*GId(9RCq1h<|pCH!Q=qDoIy%r)Ui{_v@H||^6-jNTLeilw0 z`ixYc_WM#Nf@obz&}mPfP?f#W%4q#uzb8Nu{2?+dfDo7QBev^fRBcmRlZQCs4=UA# zaFLpFxbk-z5g_iwMSgT~QRCLbid_qR60MN~6659L>*v86wu~hj1&0LxbJQGnuQWq_ z(<_App~j%(;CkT&Cg|G3Nf^hi)gwm>Rx|Y=T1@VmS18@5^YimsoCP@8*yJ;kjOA)` zS|;>j)leEK`q~B+h{=HA1J4tU@I8R&vFo*ykRo<6VpXifvnf5NHOzuXyG~lMSXs#q zfmwni{x>RpULCahA~zX&1j)O=PUl9DwOF_*FMUx`d$-VKB*|b0gnEUUxJ_sPj7yo& zWNuwpw%5@iM)Rf|3Y(Ff>UDFrYhGcIOwPo+-&R3rPLl+S z!JPVO7NF-v1I~Y#>(_?Mpq%PH&d$+-n3$NZQdOt#|9|wsYL30P_1*lDcWa|K22eB5 zvCZ42D#Q7#xw0<>b-0n;SYg}3B?P+U(k*Jieq-RrDBp0h(156?-Vua9+Y5- zk}e4DenJCKmmav?f4j8`ihvLM``@o`O-l*+aK=HzZMzmD1;{4rfQk(2pU#NCxLUS} zPbzzQ#>_}&tjOF4Xdl^aN+^0~EdEdbOc6KVJaKO; zC-n6nLTciU>hkhYm!8;QcX#)1wkCZ;9OQTa826HA${SSv^RDJqKm2rb^wseD&zqlh ziyGKRzP9(Xzw+UNj+;5`3-XwoBoA1;ie`uv&yPc>71n@UVH0AvifUmnJXuQ4(J z=rRBnHhA>!XXyX<8o;05#0CGLlK3(HlYv-uxEit6_eDY_J5K2TwK?*kT&Y9D2qKez zJt1Mp?Vx$(?7rLj?{4c@&+@ruUoX>nIiECH{yy4HX;Qf7tu#+&uc^wvM$r%e5-MP2UvVwcRv*5OT$NQ{2Z*;P zgZpmDzz-BjR@b?Ek7L>#c~ik8P`5_C@XEzQq1ndQarrF**sILH7J6Qah)WQuy_pn= z!Jx{j_^j5j1ep*SG@nRZ)r_7L`(xA za3?hljU5eN-{xQA+f{=piHVMBKsL*!EH@Y40_NaQTdM?e0RO#`wk+^XK{*$po?_mI zH=j&x?W`vP7bds(VbmQY6D0#t@bC*Tq^n>j{XUiZIFf(BKq{I=IyLdmuZ7T>V5g(}{xpg7< zC3UwD$b{lmIiuS{cQfsOypjAT4x)ZLRE1Z~`KkLCMPwMRwJ-ftB{Y z*HN>)8v|1O!dTDi)ew~`)l_hojL>w;8Z1g-5W%uTW}%f%^l zonMdp7+^_H)o4D`0=c?Dp#Z={S)exN5^Nor%ZL5nCq5zRFI2rBCirAObE=+ofqdr? zAmx$D@2OHfM37TU`Y9VkLE*kq@38zer}>f1>eg2Ny^&i8_~QmiI|hE?w8?GC@%`!GvhkkJ`9ew{|*eq zQQ+TJlW~W9CC_=bQ|?vA$A}QnGq#QawD+`o#$_a1$0cCOpu@eriRsi-GNjk#fu8^K zJgljyskmxz;l4F}HI#H$)%bs10T-uQAjB@hxuMWWxw=ET3vuq4h1ZWy;8lj0X2S&B zll(-5*81e6J_5n_$;tXChBu$OiV6yVTpUpCd2H+>C~|UAJ=yL0#0;byJdpz6t^gUQ zK}q1iRs$iC$W4B)PsfP20HY<)8b;Fvc=4NeEaU1t73k3z!7}i6l_jgV5 zBR?ihVAQ4b^g2n&ww)T>H}(6219DWqf@hP?9W)hoJ_ERf{L)hQXPID4ZEfN{3u|je zc8HS`Tg)(!KCXUSafV2+F*MBZfN%hb)Ie(!LQd8H4If~{BVo~@p+IjTIvxUSK|pSemaJiOH*c0sUpspWeY%dqR=PH}A;n8qm?FXs&)!nfG$okwk~Yb4}% ztK>F;ZPQ=z4D~;eY9N7@90*AQEAb8Y9F;-eNj=p9nIZHkp8x0n!b|zk)JSZI=M>yR zQ`myW*xt2DRbJep$5I`(-_8`paDLytkWd><^5|$92(L-F>Ok+G&nz5Pe|3KSU8)QxLph`{cd%}$#Z-3xwY=E>-jg`^+Q}YaMU{EVf4$m zcHRfTEL>9nvPox)hWR#pigaAYJ3+cUe`BqYAF~S!cMKlt0AM2knSTh!{rh_*5c@N^ z()Y~6a*8%aCg!6nxAQ4@!?(9S?wWajCM6C6To{22#!CSioCyz;%bvlmDMd_li3=^!<`;h z$eL3Z5~acQ*}MgZnyP9W=PV-0EB(Qh_AP@j17`0B=*T(Z#xzVgEBKwpODX zM4A)W@&7`YEP#fSCM_+fy_`X)RFNNxza{^bIa*_d?N5Acj@2O)=JN(3p|P&JQ=B=I z1*)3>UTo_55|c3G{WBHVD))uL*v+pwokiaua;yk$T}1=G>vaIc6Q}l1tR>?g*s_Ua zL`5_nCdSDh4DDv8*!@_qcg>lYodR-AfWo}w&b0k}xmakjo#k|?d(na3CGY83!(g%v zHzlPJP&pfDN&E#556`f`GL z{Q;sMwiqvRZ|M`yCxXTNKYCs4S%r$PXAbd~SOq zf-m3+0qhFnRaa7mEBR3Y}Rw|7{G_CLiYK|s>edePl}iRsp7#z7jV;-0<|!O+2<^l!j81wv*Wyl1@oF`Dxys0d zu;$uT=1L7AulCR2|MRs&Es{m+ve;VAprQ+#^!ZKc2@8EHcIhLREaShKBHL@ z_Y?WGKz6t)2mus&vE$M{ZTRhHZ_~*JL0+7|=lES(C5L!--^T_5g!YC;l;Ui2;_6lI zyc8$kBJTygxprh@b#70-T|4lEpHD?i*z+m`R)Sqi*<|%gucFxBD=++xh z0+4&*dOvaXGiJ1v4BvV6oP__d=@h@Zc?L@fq%B>-*9%nR)%V)Y31w>7N^u^t0>DRG z|3++;K+Q{*c^qEfcs&s2;`@`9QgKXr)>1CLe>xPLXgH}_R#sMkn!vYrhyD>!Bp|MZ zL#U=dAD8)oA{I7})+#`3D3htx{2Twx>$D6tB2xkb>;oW(0HnkkfI#lQSw$mpNL3L0 zNySjZit!ww6qFR0HSS(v^3vth4fxy(y!YiSpWO@xhKCj9<$W(^Es$s1&g-$7K-Kwa zqVc8Qs5m4y(1)KvZm2gTP%g`i*8t8iBzA&a2~_Tjbc6Jl0hsUN;`f~00B%I}pPmcA zt!Dk6DfT`8!&7n+moGwoGGXDANdRF30AB*=(%s1-i?G?ID=0W`@ z11Kyn3VBhvw`cpBGh|A9bA26Hat4UTYc@OXhdgGo&jM5x*cv%)XUYHw9U@Ym;fp*- z1QHfV91h?&|8p_d`OxK`$;sYuVvD~*O-$Cbg}CJy{)L=0ye4JZd+uK3rw$h(;m^aZNK{u!%! ztF;=Z@L{B#`+BF8ess^aXmy-^nbXQYA>MQHM_Ypv!k|7$Nk?tQUGAwcd)kOlG=qG z++=+vEiFqSa!%`*G;4uUcUNHM{u+)q@Y~iK^F$*Vwd&W)8kLhfw63-LGyi!e-DM<0 z% zj9Zq1yeI`6h`A*#SHJ|Dv!AYYGm!8EAlc_?*UZ+C`A?r{ZC3xdFP8XHu1>(0@-TCN z(KkbF+;X6<0#zxGlF7?-pg3euFQ zJ_gp)saoCyiEq67o!Hj9aJEZ`8db3aWMah**#^krX$<*-41nryTrn;y^48WnEyx3# zzB~ZKdb17;g?wnR%s-eCp6_}gqDlF?zX9u^AF#3r z?G{(smK22`wwm=Vx_p58hd09GplU_q*eS^pyop}Mo8JmW#iP}ujwwK8(7PwkUJxBQ zXuCMo4w0sF+QgBpA6`NLkh7BX#GuG{x+`pCpVgJiH_-fKt+!#)lI^?(GAHx5xeT^^ zXf_;2@G4_RL&YZW#CcdC59dtV_BO_uztki@dS!B2ga83q8Ao=6QEO+ZCwl?`l0Ya1qQ7h43f z3;uU{PX`(3C%Ziv0>I7u@?ilS-aukQ>ndUu`LWZVvy$UZh{q37B{Zm2t*j$Wc(qm1 zPD&b!%7NkxcyPPM71Td@w9R&?#rf29`!g zOY1qu4gUod0eCHrzkcb~R}3KDe(EMG0ZT4_pPM>y{5NCt7i?#(0R1Zir9)}$bFUpj z4ndlA$KIZ~3?#7LBeu8|oSZ5YXv&x`hQg*?_zhmZ=uIZvOfQNHNXkWdI9VeHRpZ2ulI=C-lN|v2&pahiJJX#AZQCb=o$zVGKYa(nmSo7 z;G8?_eK#$6M%zpC`7fpdFv~3gC8z2~52XOiKzO|GuLJXU&EgOT^w4DgLHt-g2G-NR zODgdB1bpD{yGPlQe_fEj^APeEKl?^g{alsxL60&%@D3l%yhiUrhm>%4H@Oz;5%46c zBm0t)UO95S#Tc|@=VA*lw6?Zphn=rEdWVr2nY;C6J3{5<2OrsfLjYm5o-X#>IwJF$ z9JloWtLoX88k)?=UjqY9uC9ZROc^-Azi=MS<$=KhEzywnF@n8SwuITZIgwS#+}X0X z;jjH=y(7pRyUrNQHPYtn_jx{uta4E<(U0o6HU8p$g&`Bf(>QNOWWD;1xw+~=&|Bbm#)P8f z!7r!=@+sBL`sEWV?Zx60JrY5|)*3pDuWZJO{D*005ZiN?oAqSkXfcn~@4d z;|c;E4|ff&2QJH=n{h)WaYOWbm2-AEe8N80U7K@Th!`}?2%eSTvDbP&_fLAbQys@K zR*fI-PZfvl^ez#u(@d$nDgHy+uMHgh^4uV@%x>0;cAdema@-%)ovnl_@LgvUUJQM0 zgpy(Y?td^$OblfJjGuoF)r9C1(RwW4HIoS`H!`(Z;PiISlvjb@&vC~)Nu2u6sHL#x zSl&C=@$cs(4|f-@TaO%^#~am34VpU*pf*qvLEHPiS?VCHHa9E^es1!)Nw}rSpGJZ2 z{%S-p$qpgLh5GLxc#b3*{}t3bBK_Dkc3{D!rgW* zu<8wFN=hp#c1uO678Vt{e#P{Vh0Il4k9IsmC^~+$a|6rqFqcY8RqhqP$G?JD z4QVGPrl8NZwaOuL$czb=3r=hcT);3Wi}?PHsH@9|v2l7u!_aqeA7o{v)v56CaLpBx zrwa5M0me=>0b*-*S}8VDSn^sk898s<4QDrC#eXtM)bIbG3UZfK&JX3*`36wU4^2|| zriBt09rQovGCf$|U%&3lcn3)lFu7TKzItSxt=!~weNyUofw%3>*s+?4`m_o;SxOfU zpwh^41fyQ3wF}b%wDSaWS)eAYeDM4NJ{ zg8KTr>%&tiYppg&H=w&>;^I!8UN5=kK*P%U+%2D3n~n@!4pE477M7!GG2_1kxkLf|Sr~Pu)B;#_1GQWESehf^x&2*P=!Uizo|v zU9mvaRj-_8S0u9Nz#%PlGOSyDR-MWACK?1jQID4K_|T?&VcYW^Z1zW)zO)2FE3>q?6r@z;-YTH7}7R_uefMZttJPQN$ouMAno6Gs!Z9s3LErEe>|6Dl{WI%w#nvO(^<0>03~&5%`YK^MLeC9 z*ogdR7UF4l_c&pbv%@mj>HJTN|O2W(#Kq)Oac!z8p6uPz7lD+dFOA+RdX+j~gFHy4gwjs+g)RvSumr{Slfb#TxfT2vX&=)UOeb+y zHw_+cEgIeT(9bT@xx6)|!}p0r^IwRroyFPji^F6!JbYeGDe4RQ52-syUuA4G*4iI^ z%xYu~Q%T_?tr}I3Sm09?xEYnKrJMOSeIM;^Q-i?&BZ_7~u~hGz@+pht|NVS26Gp?=pg3 zKeJRCe!10=17NezO)}6a1U-I>fYj&Ld!C%xM^q`3u8)Wt&?C_CNM}lM04+{yxkpN2 zd5YvkwiK);JNPbB#k$KodrgKzqy@eR*{|is`!!Ydmj?^mN67}N% z3pidggi7R=X0ueivbB|ZzW!1U*2LYWUiM*LVzyKj`-djJmJ)MhhS=)Bb}lZD%}o^_ zz|C#)(_4B?=7AMola}DenBI)umWRwt1y@9T@r-K1tdZeRT=DD!@%ZiK=QSc{$Me>T z8huycQWBuc{ROOccsOTLQhK@+iUPTi-DOod(?QMgnAMc3zxDaqIYrXm+6=x!u{=(( z$o)B&hKfI~|K@b(%MSs{*VA#9)S{?0M*E-8(dAPe`I(ga~H=MB}`EhrtX zgyMbC8{(hbbSK~csSnb#y-AHKY0AkKB^7`sx5>+ zUvKN^c~TGzq6-x$5d{&9egvJKp42h%2bO&U`@N`&jQd1~dzJy^*9nUfP0g3`S%zb9 zV=j5UH{uUOUT#muG*bCMohrU+{V;N^`Jt?GIb(J+-rpfMyA+5N*}IsS#r#AB+RktGJ|#OU@N(avgv;M18e6zP9F55vurL6%=^!phG&6)e0q+? zY;Ix3PR-`k>EMN4+}F89DIy|*dUKJ+=EyI76?nTsFO-}3lg7CXDyusq=W;OL@_{@& z-~#~~Rja)S<%B+1s;ijJA|b4E*x^lm(zqnGfZ-{2-{2VzJnT_}6j zRE;D}#5D`+8(jH8p#B1@vzLC=$Gr{h7oeT&FA!?m$(b~(GDe5>%Lzb;G8AY>$;qU| zf)30mYZ#uTXPM2fiB|u4G7y$!62s^@@GhZ7I^t2WEYUDm)hLTtJx!5(qT+V;Qp+{8 zl-@k=g)9Cs2djmtrJYrnb~XkzODBm_I)1~qL0dho!2AS!oA;{oUF}917|})8#9Q`{ zFZ;QadnakccoXN%kG9zjzXwyflnlNk6af{e`e(Voe_yry49$hU`UXkU=Xnv&fAz9i zy_}7nbW@R)Et`#T8NZ);GLuPDJ_cfl~!b$ELd*4&gV-`k=aY@OoUlo}_8q<~8Y}Q)172{b$pNNrIh6b}Y zoWX=SuDA(pnxk;P^UItN3q$$X)V!CI3E;=p-yO*80^=pDnllvyx3i7j<;9JznTA`M zxKH(7Z-abfnMXtTlfGu?b=XwC5oTHRv4X9P%jSZXjm`NjuZyTu{d5RP%NAQYLK3T> zv}jVaBr66@EzB)*LfeiJ$__FzX3Ia*KW^OsBuVd*Wo;Ap=c9mthk(?mSi+gOrw_xT z-Dc)1V!`53+7pq%6Q%Y@oN9>hq@)48uVz!Q1*wrssxz9b|}^ zN6Z=Z%R_)uyp3PG;8>*`)s#eRh#~fDjr}})<85eNkXhujhz-L6j8{TAbATwj#x28K zjp5-MljnYsfjkb+dsmVZrp70*<@(M5%TKJ#h_@%l)H*OFgYcSpH_KH6m&{i0iLbaY z=0f|6Z-@TrjpH0868ncNRgo}$VwM^{I5)iQJ2-4jNUt$Gc~Vp&Kpx?r9(Gd(=@ zBS@ZyV-5YVzvtp=HC)H>9`-z}p?&%Bd}3CDwA?MGkubSgXN(<}pK^bdRRNY$QvT=q zu!rQptQT+kkLaJ!^cR)eJ4xr8<#MA%I#9Upm5cF(FF(S8%8-PuI~k3=(Gm#mo}lKE z8GvSEv0(h}`#GjJ;MW((rh2tpMxz*WoHsFb(W%eK^Nu1LJgp!GLVNk1`a#@=&FanT z{1_>RsTFO%=(#urB1GVN)cgcxD(8kYn+l<$B3|VXF($rXZ%9fkSl;uySS(IRLGQD8 zap|z&us$b^v^B{gy6Wfi%!?s?>y$m%BnL6tX0}HmU5EGh)#foQEQ^U z(c6$Zps3&^mlRdHf~my$QAXDoHUF*F`=-b0s}80Qzw|?Sz0LNxK9ia4nyO8amvqv% zbM)NpD4Kv?q&@|Aj+;__5qnz1LsDcsME6NCl_Zi%;a&F)r{~H+@7uuqsY_?Sq^AZo zYb~F8K6mjF99#PM6eaUPOp6k%b+#67&6+rg55_rAJC;KNa?j;I(Nv9S{TS#KJ0Jg< zVzC`9KM?b2q3LA4cG%L^;N+yxMN;J}O2H{^x?*QyX*SumJ2~7y7`dG?vAkU*-$Jrc zSlG&@`il_2V`tmZVWsxc%4+vTfqx~j-{(bU_}%c3mU?W<%EP8~o{E#7WIVGnROaz- zW{`tzT>VBaUJm351e!>A?j|n=_-frpG*Z9R;YPqHuF=m0oP~t0zTgxSaidG0atc-- zG@Yh+4cE|`Ida*ly6qIVQB$NpuUBqzNPkV?k`c7-*XEq=eRuBfVjYf);67_BJ8?P4 zvA9ptY+>v582$3BZ(%@CfelDk4mk9dNuc9gd9U~DlTcs>&AhP18wmM{@y^1Rl<*sE zO+8t~^kR!g`cto+7oE=rPA<<&eW8?Pt$xitkLz!%LmFgpRc>en4{p&8%RVID*3>o; zcZoIfPn^}J#k;6UoNSd<$~>4c`#HWzU3ojma~y!jc(f^mBo^fDm`B+Pzq^?2u4WK4 zs8FrDXOua>b$_m#JM8k_UCnI~dEoA894->FimwtOoi{N^7f5UGISxwAdJ(VZmDtKB zhdu8vWP{e(JicBb%Vf24uN5wElh>{baCgSVJ&+mD)L2qvmE$Rc{mj#Qg|s;EFOF#5 zsz_Vj2T*Om1A<8;M5onH$e-)}nEwD8PobG?U=PUZLWko_a8BIE6VvB}@8_^|gf84k zuIef+Vc)G5oA1A3>{pa{??g|Wji*;ujoTR9F*0u`C4?2f$Yu{=;eUrJjAiCA!d=MF z4S&p1P*k~>Yz?;DlRv8rI?XNFP%?=M!$HeWWhK%&CLV_$o92_)_BXTH_YTO^c+U$y zqI0TIz9&>Yy1Gs_`nFv(s8yv1qWm8AvQOa|1p||6aQCdKA>p3pTM1B*2X3b24pYbFetk8HKh1>#3&>+INS&5S%&MpZ}qvv|Z? zSeK=jEECxW`cMeVL@hWkz-J$Yw}*O;Wqc9C5%l;7%1e<>Wg|ms-oGdUnJ4v@+!4B7 z^v&2mYU~4xG`;?sT|JEDGx6rKsP(nHZ8sxWTyE)@PRL4BFCU?RATIv&ajH()WL0tI zZNyK};@ixsyOi|Yw%R6a4#KIpSQOC5?hy82K}7@+$rZ&X9SMrpgO=umwY#TjHe{?9 zVI7q!Igb^R4g>_(+X`Mv8E~YPvzc4I}1)QSU_d@N>&W}7A<0x zMFzuk`6xb<@RSKVPWQ2M@!K}mHUTov)m?r11m3jPcAhn-p+4}Hl0sMbFrV*e)o~px;`!{_h1a^dbdbvWo{hmdqo9_`DBO|Gl+N|8`XYse zKE=U@Qq*T>iPsWb?pETGlv z*#De)bv6%r^`@9>-Vb4G>72Mbvv{hfr;J)9oND_0aBRlpef3K-=*Wi?4560*yoP7mIH)>{s}bNKXE! z7nU$dOwO>`gmRmJWIQs5MwhohM_F`oD}Wrs!V7quMQ87t!(vA zpO>5JBa#3N+{Kbww&Uq9BjNu}D(}AU6CA_@cXC8Y?C%fQz=xU-GRX`6Oma+)#Xas$ z-I8IUq83SmsT^i8>?J7{_E(>l6ofXBSobudsTFT|i7R*v5JajrZmdv$>)qz*dFQ6+ zlg5R6H&8vG3{QSI9fOhlb&FXy9}cymMiKBG+7=3KD%`Vf5nG%4-DG+T&wfPYLY8Js zhsdwC8X7AP`5@IY8)o3(Tm7nTOvwh_l*D7<5KHHU4)X`SK9#tB&R-;)H9s9`fb0u0 zdEw}3(<)%~J={$#NX37fbHc#JhQgU_4ox&LE5*J)iog-7gaCg z?k>=?C>luqxdU?ZY9l`UhcUorSyea|TWim5l}(?+que8`^CkhAPmR5Kzhe}H#r))n zXCI-|$nue`rOCXDp-+9aZ&nuc=mmr{9Rm{cWmT2oaZR+?L@4S`-$K*J=RiG|t=29u z;Y)+#5v0>36|Tv8&1Atn%mO+KKuGI1hV5jm5)n7+)#2Y0bD$zLoo6nMvYZ&qdD z|D1FnvVK@~3|@Ss)g8C0?-D8ivzk~FGH@@_p>V3jTHENaXGini?Y@_+Oj^Uvg+5gX zV>|07gI_=D6C-zcAi5hA2c51j+LIA8kzh;(Fl>nOjueU*{ygplRymE_@>iAz`b}>P z^Xa^&Hfjb-!)w!Tr}W>#wZj9wVY00T(!fPnA+z?i@NHK@Gr*8#EPduweEzyF1Isr zek*l7U;JJ&5tFQM5*uP07FBWlHOpPyrGj|;!BLboidB!`TZ%FfT_ z5b{s^VN6E{_k_|SK`X>#>gTm2Z&w55WI0YK#^DO79}qK!kCh8*iZY->W4FLbi=(4> zDr@tb`|AEYErZ$W^U6z>yT5e_K7bC6bM(xW+zWEQmJio+CVZKoKe2)x;nrAwuB-O- z>$$UEBbML5GA&W63w6loqcgcj7G=QX%m@Fd30t+MRdN}&W=twLW_nlZF)N$?kYb^T zz?8_0;8}9p54Y0G*bk|C_2PWGJIwFJs=JchUgh*f6Af1go#uY-)9@x%&OKn4X4#$C zT8a2kM#4+#qbq>4R{bEST5+ABQdK_ismNDJT{D#juxoKSFDOl*zkGSyBdTa__%%@1 z%4l2$B3;rGUh!|CTykYkGdwoK<~tinrbpKpd$CPiA;J@G`2Gs6jcsV@v78gH+ZV09 zUT8&Ko>mq@#ihv4Y^-6E-rY>K1Vz1Pu<%efwRy_ zZ5b)6SrOWX=p1q*whn6QBCRSo;*v9=IXt6r%>K)m^F0i;v^}ygnF`(C#CfSTya=L1 zYDidqHeFfwC-8C4QQ(;5&T$c7@eyk!{k9mR-CFx<3482hBXFuf^sOZZK^6$Uzdutb zBZs(<`ov?cwch&q^nGWUQ@Atwgz)%dRhtDBT5tw+-(F8KKk#Aqb1pERFyDrLVrHu> zw6C^$G5l~R z5IFa}aqncptjf^-+W~wkPu4YU$6U`1OCl5YwfAQ~=r1(CWDlqAw>R@eUzsDf$hQ+sy3l^d(+N(|GBn^EJcEKcD zPmMLcnX9l*SPGaiOt4z+MQ-Q4zKqeREwnE47MfE%h_7vFv8WHdcEo&na6b^cpgpku0u>#t159-j;{IT9mMwf(o?DK z_eqTllS89kVVh~*;fu&Wn+t7(I1P(T_tv;~okI~DhE@yaN2zaA3@8~tN&>vgdYsW0_>1RlKJv zjHA^!RbBM4q#~bxT3$v?MFr*8!|J)umGcYcwQbhbp}_D4md88CYQl5P=cG9>#@Bit znW`cKe?wHWdUTuYo^L};!8ElJYEd{jJiPoJ`CA~xLycKngjSxDXDkq^+ddD93cEx7 zYKGv5%un+i8}~=fB@@#1J_#EBt=u|4y*VQt-?T%(ECqTUnNaF`USzZGsfnLYHzaJB zgl^wab({tJ`U;g)9%k1u!@7|x%Q&!+-U>NMn;2O(`Bd2*ZP4w%nj@4L0ybanGCCQ$ zdE6?W=j#LFy-nX*KH7^*-_v`?V^_CKbx>lUH}`djOagSy42iONAcUF8KP}KdWVAGy z8XeBNl4MIB;7-JtYS$q{B(#(+>#~p8!Qram9jHiM6+AumnbT#nnK`#>xvJ5s0y-2i zZ`xWSoUX}tM3yh^*+dUY=eBIM6~j!=tID0kj&sgV(E;ODXG$ZNfZ>9Zz-)=9_ansZ z_bBrsnIdSepblro4C2yA?e!CdFmxfhp;9t|B%a~g%e)8%HK|ZC>z;AeK)hwiqSS$# zO(&2SeP3zAhsQqlB<&`ZNf-v*T&AY9wRD$2nP+CFV1fj4ypP?G93n+CKeZpZ5LB^Z zz8a7LAwTRC$CfWXa*Hi#owJ`a_nN8taJNH&vRc7V#?krNhOT$s0E%J64F&d+m12^-wXoXUFNfYsOg4#h>!p#NmhH`l)pUZ0 zVPvUZf5Ibz;U{OI|dPzxK1^J^y{Z*{jgm^}vve z8P{pLM-=yfl5@H^NP6!{Y28YQqoaZMIp>|uAqxViG3B)+?-&3kG;KO+$HdTzw)eT) zz4zPHStOA(+wj&1;%M)?n3CTpP6EqxaMQz3ZwcI2hkq&g!+nzA4+PA<(o>$gDTTWq zKO<)og5_!Nj%yV`2G=L?nNUp8^kFZr9X&-YYa2Yf#j00ww(hAdM?ED5x6`vs=Hs#5 zTo#_cq1c1}Z7AsGCdVtHD6zn_1EMlk9ualR@`1Gcy_^`x%v-)QROZ_@L#nnFY&aPUs$ zKG{|4;EYqrlXio3)v=Fi^}&Mn=#paJ&I=$D{)s^fxgReLKE@}AB@|@tt{<0-%d8%5 zTrM|HY4-2fwJD|PZ?M6m+YekHPtErMw4E`F6rf!hqR|N_A4CY$f8{D9+8ce?cHBkm zZQ^nE^W`%hlA=A2MO`IGC-D6+K6}U?M*9wI8XLPDA6u0#ypxTsXaLDF%y0`-oE+h1 z{&!!RN;NWPv)U7eQ~pF)910vZn`5oI`yXmoISvZk-L+k|Ty9=Ab>AcRI{rlVGdI4F zbcx+?-@?hlGK#bN*1C8LpFW*dAfx0yV0%vKc?VTYt%W#B7G1_gAWzEktftq>U2RU|iz+|xX+c{4Ir63VNdUCk3gz+V zhRB`U4X);6z-0{WwFm-}L^DwlEBdwLEu7?XLAVl_f}U^G+9S5ga9)JXkeUoxb`zZ@ zc~_SB`%gVJe!UH1{{EgA%C6~zrcogXEV;*MLeP^TIdb%% zcb}o9WLI|_1=S>NU2a`$`ZDj5xSwY~n|0%VKaJIy%Mnjz64*^+K3X-(n73d`_A;ZyhS4f)wO0Vxys&|7gd?j_1- zC`0j`NE>6rI0mod{OQw17G!iUaK}_-IiK1Qh&&T*OvMklv1lx?LR*RyKtJ476WPfI zE9kZg$uS)H=ktlGm;F6*A%02^V2l=d05Y4wm~#Jft#kxz2;6EbN97>ey?P5 zM^q?e5W(*lDh4mg9G{wKd&6hPOCs2C2ElCnO=g#GYDcp`q0O~A#_2LYZ$_6VKb1~C z%gtBxcB;3)oR+StHMw2cb@n?CWATlo7JH@J8ezJZoYXQ;dcpP8CW#F0r?Gk0t01!d z&Pf}WvXn)j#tvt^X?w!(5XpMllb^)B@3uAj8~_0th^pvW{oxP4-)vUrN$&Y_D z#jWQieN_pDmQFb@0a1VPy07VX-tcOi1x@j|Edsf%?_ulU;yp;ym@#Qw12ukOT-ot7(MlmPdpC5+E^nip9~! z?7JN{ZR!cuoCBHl4MQ-!cYpWXZMQ|x#`w6m7+@PYzOrX8xzE>)Oq~`4Hnr!4^szpYYhn>d?o008uhc zDGMjfD!Ta1cB`ls0Fj<<2H160-N+h$+1SJ^JPxf%^#hDd_ynJ2fEN?09Qc~HbvQQG z&mL3MSOho%*SyYyKgN3+goO!X!0blqXINgwT}ezXCzj+31j$V^x@H^{-VxTOK^zXf z^cCu?zP_<{Gp4Guo{QZ|ZQI%r{T}!-Ly?Sd3NRUoCF`2K1#p>IfsL29Q##4d3_rq; zl3Jt6;yC@7iCog|*X5!$I2>}B!k2Dm(yON!7h(}20U9$&n$s`eaa3i0SJp0~jarIx z5wwwHyO@VO=z$D%y-M{7eDtH|TKph*556GUXpGu7BZ|n4WI|)!HO?PZwV{n3s%T@t zO-9k7a)!GZ+fSn~V-#C;AxeT)lnHibMC#Bd{n<{W4%MO2mQ*6sPPtb+I`iDCW(E00 zjXSh4lQ;V!Ck^s74xXaM|E1FGLx=QG8x?k6voafzq)D?=#W_}Qxu-gF^jg%}D`UR_ z>?>ObKOu?LjQErl`$3P~t^Vz560u<)%UGSpqCk~hFW^f8Wk$7;gs_H!pw5i#zdz2u@XR{nx?09Hy-0wV$CEc*7R<)?=cxmal zdiK?(=%i>lO6$@ED%$AlPD2}oSu&}mENSC{P~(j&$!~xCYimb*`jiRuk9gQaa_yrY@$eu2=!ZAXTf~eub^=%;?3J&0IUba3 zdEo&E=!@o*=@dM5Ae&2M{_Zb$L6YsI|HnW4Q5e@&)&V^UsRO|OagTf?#Ixevzy7ta z<-#0t0}mPo5Y;dMcOq{@`dvHpx%ARY_S$V%m$cd2zx7T>T*KA-*cr-pI55CZl@x$p(gd+yOc{z;vsw7DvWPM0$Eq!r+^pZT=ERkE@rckgJ3 z2#0wEG^;E*JB|UMcZ;UfKJoF7@lOEvXTyy67zt)Kk+tR3OViI*IDk4X{Yj}qKmyCB+_6}5azsa~e_ ziNlrVB4KT8f>s>SsV!LvYtptGCPFq=SKh=kttRPBkM+dNs*IkD`I~4N^bLnPk|a#C z> z0#CPq>ffkSZ-vuIjTPn8_O9-;LBGMzb19cG=?j-!5N!;EMW&c8BD{@}ql$*=-s3L+ z!8W5!#q8vsiBVzY9f9CQOGBnS%*F`d?te|uEaVloXivL+M?)JIa4dv2dV|^-#+1*p=*i}x+;>Rv`j4edGZKzP z0J5O#pnuQOCl)%h4Yu#NvXnk;><=t0vdQ|-sN0}4o;#5?2E)PYnIL@Bkw+{G_x3B9 z^lhEQot1!o1GrSS`F^)U<=BREERHsYL7U}mEr`sw^|hZNj16QmSw7z;=PO!B-#BMF zrccIFiZ(7~DND{cNfwP7S0G;##Eg);{F{H{IP&nrZ@5FtQkJrQ{W@kDwB1SqbBuN= zJ~jZ3GWN-{jsEt#B_QD`b7sFXpOUnJmCIj zc*R)b4k;I2cp+1N=x4=EoO9ItZpH;m+PIW1ZCrpXo;KDZdhC2-2(y&iub4P*-so+M zHZGNI0b+ZidU-9BHkQ?}?~cAizTtJRwW6XqLAmvvO_sEADN9-ILp>8}ylEtpAmjBb z$ysNedC>m*`^;mi@AY?FS;|tby85bs^+y?TuRDLfkuT+pGtRJi(T=<$gHW@}P4WKH z7r$V7-zs8Q(#GkgRDb6YTfg*gZeO%9TWW%gP@~0|vd8P4P_RX7*;-!2>JLU)>dEv| z&lvCAULJEPOWJ7qr!6T={rkV|-NkiPQ2>Tfx?zZW8O$gHYk@IyNGgb@khr_Md#*%J z#^<^1z5duJZr{36di{a$1JD;+^~Bdkm>3_AKO-9gwXxLH7~dJ!q4lgRue7)Q@9FNE zo}Rva5!A*yUVLqA8K5=-Y9jz;Jg{*uh)j)}fx-x&HmZ$)+6Xj5Z3Li<2R0Uw8bw9` zwNY&Z)J8yU1aK?rr~?~g4ryEmKy6eT0ksimj@k%7)edZoKcsOj0N`2Pn0Ts<*IAYq z1vpKUkoE@i`f!pwzDX~~2J==0e3=-2_))NVb(J?1003x#+Nd@HYGVvQZ3F-SwNY*S zwY>0qrI?x7kL3lmF#yy?002-M)yC3hX7yMb!<%%NT({=T?9+I4Ya;*vsEvG`oh(;p zF|%)TQ*~7v&CIG=8vy`7ZR8-i?myV;+}ka2X4ZeOca$V~eYi~1=L$=hnH?n8IRF5t zjQ|ww@8;Tv@u4?kEj=^)Fh2BWeI*9~fJSozQFJ`N2s?0D00000NkvXXu0mjfsklyV literal 0 HcmV?d00001 From 128bbb9d861c2de62cd047925b03f28cf2649e2f Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 26 Apr 2021 21:19:39 +0100 Subject: [PATCH 0990/1270] DEV: general Gitpod enhancements (#18822) --- .github/workflows/docker.yml | 59 ++++++++++++++ .github/workflows/gitpod.yml | 55 +++++++++++++ .gitpod.yml | 63 +++++++++++++-- .hadolint.yaml | 7 ++ environment.yml | 2 +- tools/gitpod/Dockerfile | 141 ++++++++++++++++++++------------- tools/gitpod/gitpod.Dockerfile | 45 +++++++++++ tools/gitpod/settings.json | 9 +++ tools/gitpod/workspace_config | 58 ++++++++++++++ 9 files changed, 376 insertions(+), 63 deletions(-) create mode 100644 .github/workflows/docker.yml create mode 100644 .github/workflows/gitpod.yml create mode 100644 .hadolint.yaml create mode 100644 tools/gitpod/gitpod.Dockerfile create mode 100644 tools/gitpod/settings.json create mode 100644 tools/gitpod/workspace_config diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 000000000000..52d78a38d1a8 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,59 @@ +name: Build Base Docker Image + +on: + push: + branches: + - main + paths: + - './environment.yml' + tags: + - '*' + +jobs: + build: + name: Build base Docker image + runs-on: ubuntu-latest + environment: numpy-dev + if: "github.repository_owner == 'numpy' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" + steps: + - name: Clone repository + uses: actions/checkout@v2 + - name: Lint Docker + uses: brpaz/hadolint-action@v1.2.1 + with: + dockerfile: ./tools/gitpod/Dockerfile + - name: Get refs + shell: bash + run: | + export raw_branch=${GITHUB_REF#refs/heads/} + echo "::set-output name=branch::${raw_branch//\//-}" + echo "::set-output name=date::$(date +'%Y%m%d')" + echo "::set-output name=sha8::$(echo ${GITHUB_SHA} | cut -c1-8)" + id: getrefs + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: ${{ runner.os }}-buildx- + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push + id: docker_build + uses: docker/build-push-action@v2 + with: + context: "." + file: "./tools/gitpod/Dockerfile" + push: ${{ github.event_name != 'pull_request' }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + tags: | + numpy/numpy-dev:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, numpy/numpy-dev:latest + - name: Image digest + # Return details of the image build: sha and shell + run: echo ${{ steps.docker_build.outputs.digest }} \ No newline at end of file diff --git a/.github/workflows/gitpod.yml b/.github/workflows/gitpod.yml new file mode 100644 index 000000000000..55683bcae78d --- /dev/null +++ b/.github/workflows/gitpod.yml @@ -0,0 +1,55 @@ +name: Build Gitpod Docker image + +on: + push: + branches: + - main + +jobs: + build: + name: Build Gitpod Docker image + runs-on: ubuntu-latest + environment: numpy-dev + if: "github.repository_owner == 'numpy' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" + steps: + - name: Clone repository + uses: actions/checkout@v2 + - name: Lint Docker + uses: brpaz/hadolint-action@v1.2.1 + with: + dockerfile: ./tools/gitpod/gitpod.Dockerfile + - name: Get refs + shell: bash + run: | + export raw_branch=${GITHUB_REF#refs/heads/} + echo "::set-output name=branch::${raw_branch//\//-}" + echo "::set-output name=date::$(date +'%Y%m%d')" + echo "::set-output name=sha8::$(echo ${GITHUB_SHA} | cut -c1-8)" + id: getrefs + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: ${{ runner.os }}-buildx- + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push + id: docker_build + uses: docker/build-push-action@v2 + with: + context: "." + file: "./tools/gitpod/gitpod.Dockerfile" + push: ${{ github.event_name != 'pull_request' }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + tags: | + numpy/numpy-gitpod:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, numpy/numpy-gitpod:latest + - name: Image digest + # Return details of the image build: sha and shell + run: echo ${{ steps.docker_build.outputs.digest }} \ No newline at end of file diff --git a/.gitpod.yml b/.gitpod.yml index c1755607b626..dfbee831a33b 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -1,10 +1,63 @@ -image: thomasjpfan/numpy-gitpod:latest +# Rebuilding NumPy on init - rather than on prebuild: this ensures +# that even forks do have a usable freshly built NumPy +# Might delegate this later to prebuild with Q2 improvements on gitpod +# https://www.gitpod.io/docs/config-start-tasks/#configuring-the-terminal +# ------------------------------------------------------------------------- + +image: numpy/numpy-gitpod:latest tasks: - # The base image complied numpy with ccache enabled. This second build - # should be faster since it is using the cache. - - init: python setup.py build_ext -i + - name: Prepare development + init: | + mkdir -p .vscode + cp tools/gitpod/settings.json .vscode/settings.json + conda activate numpy-dev + python setup.py build_ext --inplace + echo "🛠 Completed rebuilding NumPy!! 🛠 " + echo "📖 Building docs 📖 " + git submodule update --init + cd doc + make html + echo "✨ Pre-build complete! You can close this terminal ✨ " + + +# -------------------------------------------------------- +# exposing ports for liveserve +ports: + - port: 5500 + onOpen: notify + +# -------------------------------------------------------- +# some useful extensions to have +vscode: + extensions: + - eamodio.gitlens + - njpwerner.autodocstring + - lextudio.restructuredtext + - ritwickdey.liveserver + - ms-python.python + - yzhang.markdown-all-in-one + - bungcip.better-toml + - mhutchie.git-graph +# -------------------------------------------------------- +# using prebuilds for the container - note: atm this only +# works for the NumPy repo +# With this configuration the prebuild will happen on push to master github: prebuilds: + # enable for main/default branch master: true - branches: true + # enable for other branches (defaults to false) + branches: false + # enable for pull requests coming from this repo (defaults to true) + pullRequests: false + # enable for pull requests coming from forks (defaults to false) + pullRequestsFromForks: false + # add a check to pull requests (defaults to true) + addCheck: false + # add a "Review in Gitpod" button as a comment to pull requests (defaults to false) + addComment: false + # add a "Review in Gitpod" button to the pull request's description (defaults to false) + addBadge: false + # add a label once the prebuild is ready to pull requests (defaults to false) + addLabel: false \ No newline at end of file diff --git a/.hadolint.yaml b/.hadolint.yaml new file mode 100644 index 000000000000..0188ba2cf627 --- /dev/null +++ b/.hadolint.yaml @@ -0,0 +1,7 @@ +--- +ignored: + - DL3006 + - DL3008 + - SC2016 + - DL3004 + - DL3007 \ No newline at end of file diff --git a/environment.yml b/environment.yml index 3acc3dda5992..19fe69a7973b 100644 --- a/environment.yml +++ b/environment.yml @@ -19,7 +19,7 @@ dependencies: - mypy=0.812 - typing_extensions # For building docs - - sphinx=3.5.2 + - sphinx=3.5.4 - numpydoc=1.1.0 - ipython - scipy diff --git a/tools/gitpod/Dockerfile b/tools/gitpod/Dockerfile index b9c0d4449cb7..e2e0e1bc9571 100644 --- a/tools/gitpod/Dockerfile +++ b/tools/gitpod/Dockerfile @@ -1,74 +1,101 @@ -# Builds a development environment for gitpod by building numpy with -# ccache enabled. When gitpod is prebuilding or starting up it clones -# a branch into `/workspace/numpy`. The gitpod clone will build numpy -# faster because it is using compliers with ccache enabled. -FROM gitpod/workspace-base as clone - -COPY --chown=gitpod . /tmp/numpy_repo - -# We use a multistage build to create a shallow clone of the repo to avoid -# having the complete git history in the build stage and reducing the image -# size. During the build stage, the shallow clone is used to install the -# dependencies and build numpy to populate the cache used by ccache. Building -# numpy with setup.py uses versioneer.py which requires a git history. -RUN git clone --depth 1 file:////tmp/numpy_repo /tmp/numpy - -FROM gitpod/workspace-base as build - -# gitpod/workspace-base needs at least one file here -RUN touch /home/gitpod/.bashrc.d/empty +# +# Dockerfile for NumPy development +# +# Usage: +# ------- +# +# To make a local build of the container, from the 'Docker-dev' directory: +# docker build --rm -f "Dockerfile" -t "." +# +# To use the container use the following command. It assumes that you are in +# the root folder of the NumPy git repository, making it available as +# /home/numpy in the container. Whatever changes you make to that directory +# are visible in the host and container. +# The docker image is retrieved from the NumPy dockerhub repository +# +# docker run --rm -it -v $(pwd):/home/numpy numpy/numpy-dev: +# +# By default the container will activate the conda environment numpy-dev +# which contains all the dependencies needed for NumPy development +# +# To build NumPy run: python setup.py build_ext --inplace +# +# To run the tests use: python runtests.py +# +# This image is based on: Ubuntu 20.04 (focal) +# https://hub.docker.com/_/ubuntu/?tab=tags&name=focal +# OS/ARCH: linux/amd64 +FROM gitpod/workspace-base:latest ARG MAMBAFORGE_VERSION="4.10.0-0" ARG CONDA_ENV=numpy-dev -ENV CONDA_DIR=/home/gitpod/mambaforge3 -ENV PATH=$CONDA_DIR/bin:$PATH +# ---- Configure environment ---- +ENV CONDA_DIR=/home/gitpod/mambaforge3 \ + SHELL=/bin/bash +ENV PATH=${CONDA_DIR}/bin:$PATH \ + WORKSPACE=/workspace/numpy + + +# ----------------------------------------------------------------------------- +# ---- Creating as root - note: make sure to change to gitpod in the end ---- USER root -RUN install-packages texlive-latex-extra dvisvgm -USER gitpod + +# hadolint ignore=DL3008 +RUN apt-get update && \ + apt-get install -yq --no-install-recommends \ + ca-certificates \ + dirmngr \ + dvisvgm \ + gnupg \ + gpg-agent \ + texlive-latex-extra \ + vim && \ + # this needs to be done after installing dirmngr + apt-key adv --keyserver keyserver.ubuntu.com --recv-key C99B11DEB97541F0 && \ + apt-add-repository https://cli.github.com/packages && \ + apt-get install -yq --no-install-recommends \ + gh && \ + locale-gen en_US.UTF-8 && \ + apt-get clean && \ + rm -rf /var/cache/apt/* &&\ + rm -rf /var/lib/apt/lists/* &&\ + rm -rf /tmp/* # Allows this Dockerfile to activate conda environments SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"] -# Install mambaforge3 +# ----------------------------------------------------------------------------- +# ---- Installing mamba ---- RUN wget -q -O mambaforge3.sh \ - https://github.com/conda-forge/miniforge/releases/download/$MAMBAFORGE_VERSION/Mambaforge-$MAMBAFORGE_VERSION-Linux-x86_64.sh && \ - bash mambaforge3.sh -p $CONDA_DIR -b && \ + "https://github.com/conda-forge/miniforge/releases/download/$MAMBAFORGE_VERSION/Mambaforge-$MAMBAFORGE_VERSION-Linux-x86_64.sh" && \ + bash mambaforge3.sh -p ${CONDA_DIR} -b && \ rm mambaforge3.sh -# makes conda activate command for this Dockerfile -RUN echo ". $CONDA_DIR/etc/profile.d/conda.sh" >> ~/.profile -# enables conda for interactive sessions -RUN conda init bash +# ----------------------------------------------------------------------------- +# ---- Copy needed files ---- +# basic workspace configurations +COPY ./tools/gitpod/workspace_config /usr/local/bin/workspace_config -# Install numpy dev dependencies -COPY --from=clone --chown=gitpod /tmp/numpy /workspace/numpy -RUN mamba env create -f /workspace/numpy/environment.yml -n $CONDA_ENV && \ - conda activate $CONDA_ENV && \ - mamba install ccache -y && \ - conda clean --all -f -y +RUN chmod a+rx /usr/local/bin/workspace_config && \ + workspace_config -# Set up ccache for compilers for this Dockerfile and interactino sessions -# Using `conda env config vars set` does not work with Docker -# REF: https://github.com/conda-forge/compilers-feedstock/issues/31 -RUN echo "conda activate $CONDA_ENV" >> ~/.startuprc && \ - echo "export CC=\"ccache \$CC\"" >> ~/.startuprc && \ - echo "export CXX=\"ccache \$CXX\"" >> ~/.startuprc && \ - echo "export F77=\"ccache \$F77\"" >> ~/.startuprc && \ - echo "export F90=\"ccache \$F90\"" >> ~/.startuprc && \ - echo "export GFORTRAN=\"ccache \$GFORTRAN\"" >> ~/.startuprc && \ - echo "export FC=\"ccache \$FC\"" >> ~/.startuprc && \ - echo "source ~/.startuprc" >> ~/.profile && \ - echo "source ~/.startuprc" >> ~/.bashrc +# Copy conda environment file into the container - this needs to exists inside +# the container to create a conda environment from it +COPY environment.yml /tmp/environment.yml -# Build numpy to populate the cache used by ccache -RUN python /workspace/numpy/setup.py build_ext -i && \ - ccache -s - -# .gitpod.yml is configured to install numpy from /workspace/numpy -RUN echo "export PYTHONPATH=/workspace/numpy" >> ~/.bashrc +# ----------------------------------------------------------------------------- +# ---- Create conda environment ---- +# Install NumPy dependencies +RUN mamba env create -f /tmp/environment.yml && \ + conda activate ${CONDA_ENV} && \ + mamba install ccache -y && \ + # needed for docs rendering later on + python -m pip install --no-cache-dir sphinx-autobuild && \ + conda clean --all -f -y && \ + rm -rf /tmp/* -# gitpod will load the repository into /workspace/numpy. We remove the -# directoy from the image to prevent conflicts -RUN sudo rm -rf /workspace/numpy +# ----------------------------------------------------------------------------- +# Always make sure we are not root +USER gitpod \ No newline at end of file diff --git a/tools/gitpod/gitpod.Dockerfile b/tools/gitpod/gitpod.Dockerfile new file mode 100644 index 000000000000..ad731fd63d01 --- /dev/null +++ b/tools/gitpod/gitpod.Dockerfile @@ -0,0 +1,45 @@ +# Doing a local shallow clone - keeps the container secure +# and much slimmer than using COPY directly or making a +# remote clone +ARG BASE_CONTAINER="numpy/numpy-dev:latest" +FROM gitpod/workspace-base:latest as clone + +COPY --chown=gitpod . /tmp/numpy_repo +RUN git clone --depth 1 file:////tmp/numpy_repo /tmp/numpy + +# ----------------------------------------------------------------------------- +# Using the numpy-dev Docker image as a base +# This way, we ensure we have all the needed compilers and dependencies +# while reducing the build time +FROM ${BASE_CONTAINER} as build + +# ----------------------------------------------------------------------------- +USER root + +# ----------------------------------------------------------------------------- +# ---- ENV variables ---- +# ---- Directories needed ---- +ENV WORKSPACE=/workspace/numpy/ \ + CONDA_ENV=numpy-dev + +# Allows this Dockerfile to activate conda environments +SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"] + +# Copy over the shallow clone +COPY --from=clone --chown=gitpod /tmp/numpy ${WORKSPACE} + +# Everything happens in the /workspace/numpy directory +WORKDIR ${WORKSPACE} + +# Build numpy to populate the cache used by ccache +RUN conda activate ${CONDA_ENV} && \ + python setup.py build_ext --inplace && \ + ccache -s + +# Gitpod will load the repository into /workspace/numpy. We remove the +# directoy from the image to prevent conflicts +RUN rm -rf ${WORKSPACE} + +# ----------------------------------------------------------------------------- +# Always return to non privileged user +USER gitpod diff --git a/tools/gitpod/settings.json b/tools/gitpod/settings.json new file mode 100644 index 000000000000..8f070c04c05a --- /dev/null +++ b/tools/gitpod/settings.json @@ -0,0 +1,9 @@ +{ + "restructuredtext.languageServer.disabled": true, + "restructuredtext.builtDocumentationPath": "${workspaceRoot}/doc/build/html", + "restructuredtext.confPath": "", + "restructuredtext.updateOnTextChanged": "true", + "restructuredtext.updateDelay": 300, + "restructuredtext.linter.disabled": true, + "python.pythonPath": "/home/gitpod/mambaforge3/envs/numpy-dev/bin/python" +} \ No newline at end of file diff --git a/tools/gitpod/workspace_config b/tools/gitpod/workspace_config new file mode 100644 index 000000000000..aa859c9be4d0 --- /dev/null +++ b/tools/gitpod/workspace_config @@ -0,0 +1,58 @@ +#!/bin/bash +# Basic configurations for the workspace + +set -e + +# gitpod/workspace-base needs at least one file here +touch /home/gitpod/.bashrc.d/empty + +# Add git aliases +git config --global alias.co checkout +git config --global alias.ci commit +git config --global alias.st status +git config --global alias.br branch +git config --global alias.hist "log --pretty=format:'%h %ad | %s%d [%an]' --graph --date=short" +git config --global alias.type 'cat-file -t' +git config --global alias.dump 'cat-file -p' + +# Enable basic vim defaults in ~/.vimrc +echo "filetype plugin indent on" >>~/.vimrc +echo "set colorcolumn=80" >>~/.vimrc +echo "set number" >>~/.vimrc +echo "syntax enable" >>~/.vimrc + +# Vanity custom bash prompt - makes it more legible +echo "PS1='\[\e]0;\u \w\a\]\[\033[01;36m\]\u\[\033[m\] > \[\033[38;5;141m\]\w\[\033[m\] \\$ '" >>~/.bashrc + +# Enable prompt color in the skeleton .bashrc +# hadolint ignore=SC2016 +sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc + +# .gitpod.yml is configured to install NumPy from /workspace/numpy +echo "export PYTHONPATH=${WORKSPACE}" >>~/.bashrc + +# make conda activate command available from /bin/bash (login and interactive) +if [[ ! -f "/etc/profile.d/conda.sh" ]]; then + ln -s ${CONDA_DIR}/etc/profile.d/conda.sh /etc/profile.d/conda.sh +fi +echo ". ${CONDA_DIR}/etc/profile.d/conda.sh" >>~/.bashrc +echo "conda activate numpy-dev" >>~/.bashrc + +# Enable prompt color in the skeleton .bashrc +# hadolint ignore=SC2016 +sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc + +# .gitpod.yml is configured to install numpy from /workspace/numpy +echo "export PYTHONPATH=/workspace/numpy" >>~/.bashrc + +# Set up ccache for compilers for this Dockerfile +# REF: https://github.com/conda-forge/compilers-feedstock/issues/31 +echo "conda activate numpy-dev" >>~/.startuprc +echo "export CC=\"ccache \$CC\"" >>~/.startuprc +echo "export CXX=\"ccache \$CXX\"" >>~/.startuprc +echo "export F77=\"ccache \$F77\"" >>~/.startuprc +echo "export F90=\"ccache \$F90\"" >>~/.startuprc +echo "export GFORTRAN=\"ccache \$GFORTRAN\"" >>~/.startuprc +echo "export FC=\"ccache \$FC\"" >>~/.startuprc +echo "source ~/.startuprc" >>~/.profile +echo "source ~/.startuprc" >>~/.bashrc From d5fd9cc4927c19eaabf576e1c043b261799657a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nico=20Schl=C3=B6mer?= Date: Mon, 26 Apr 2021 22:23:25 +0200 Subject: [PATCH 0991/1270] change CRLF line terminators to Unix --- .github/PULL_REQUEST_TEMPLATE.md | 32 +- .../upcoming_changes/17727.performance.rst | 14 +- doc/source/_static/numpylogo.svg | 46 +- doc/source/user/how-to-how-to.rst | 236 +++---- doc/source/user/how-to-io.rst | 656 +++++++++--------- doc/source/user/install.rst | 12 +- .../user/who_covid_19_sit_rep_time_series.csv | 228 +++--- numpy/core/src/common/npy_cpuinfo_parser.h | 524 +++++++------- 8 files changed, 874 insertions(+), 874 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 508c8c034869..704d2d16fd9c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,16 +1,16 @@ - + diff --git a/doc/release/upcoming_changes/17727.performance.rst b/doc/release/upcoming_changes/17727.performance.rst index 7b447a3b203d..5dd23b28572e 100755 --- a/doc/release/upcoming_changes/17727.performance.rst +++ b/doc/release/upcoming_changes/17727.performance.rst @@ -1,7 +1,7 @@ -Improved performance in integer division of NumPy arrays --------------------------------------------------------- -Integer division of NumPy arrays now uses `libdivide ` -when the divisor is a constant. With the usage of libdivde and -other minor optimizations, there is a large speedup. -The ``//`` operator and ``np.floor_divide`` makes use -of the new changes. +Improved performance in integer division of NumPy arrays +-------------------------------------------------------- +Integer division of NumPy arrays now uses `libdivide ` +when the divisor is a constant. With the usage of libdivde and +other minor optimizations, there is a large speedup. +The ``//`` operator and ``np.floor_divide`` makes use +of the new changes. diff --git a/doc/source/_static/numpylogo.svg b/doc/source/_static/numpylogo.svg index 5f0dac7007a1..a566851b8699 100644 --- a/doc/source/_static/numpylogo.svg +++ b/doc/source/_static/numpylogo.svg @@ -1,23 +1,23 @@ - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst index a3be0729be58..16a2fc7a4acf 100644 --- a/doc/source/user/how-to-how-to.rst +++ b/doc/source/user/how-to-how-to.rst @@ -1,118 +1,118 @@ -.. _how-to-how-to: - -############################################################################## -How to write a NumPy how-to -############################################################################## - -How-tos get straight to the point -- they - - - answer a focused question, or - - narrow a broad question into focused questions that the user can - choose among. - -****************************************************************************** -A stranger has asked for directions... -****************************************************************************** - -**"I need to refuel my car."** - -****************************************************************************** -Give a brief but explicit answer -****************************************************************************** - - - `"Three kilometers/miles, take a right at Hayseed Road, it's on your left."` - -Add helpful details for newcomers ("Hayseed Road", even though it's the only -turnoff at three km/mi). But not irrelevant ones: - - - Don't also give directions from Route 7. - - Don't explain why the town has only one filling station. - -If there's related background (tutorial, explanation, reference, alternative -approach), bring it to the user's attention with a link ("Directions from Route 7," -"Why so few filling stations?"). - - -****************************************************************************** -Delegate -****************************************************************************** - - - `"Three km/mi, take a right at Hayseed Road, follow the signs."` - -If the information is already documented and succinct enough for a how-to, -just link to it, possibly after an introduction ("Three km/mi, take a right"). - -****************************************************************************** -If the question is broad, narrow and redirect it -****************************************************************************** - - **"I want to see the sights."** - -The `See the sights` how-to should link to a set of narrower how-tos: - -- Find historic buildings -- Find scenic lookouts -- Find the town center - -and these might in turn link to still narrower how-tos -- so the town center -page might link to - - - Find the court house - - Find city hall - -By organizing how-tos this way, you not only display the options for people -who need to narrow their question, you also have provided answers for users -who start with narrower questions ("I want to see historic buildings," "Which -way to city hall?"). - -****************************************************************************** -If there are many steps, break them up -****************************************************************************** - -If a how-to has many steps: - - - Consider breaking a step out into an individual how-to and linking to it. - - Include subheadings. They help readers grasp what's coming and return - where they left off. - -****************************************************************************** -Why write how-tos when there's Stack Overflow, Reddit, Gitter...? -****************************************************************************** - - - We have authoritative answers. - - How-tos make the site less forbidding to non-experts. - - How-tos bring people into the site and help them discover other information - that's here . - - Creating how-tos helps us see NumPy usability through new eyes. - -****************************************************************************** -Aren't how-tos and tutorials the same thing? -****************************************************************************** - -People use the terms "how-to" and "tutorial" interchangeably, but we draw a -distinction, following Daniele Procida's `taxonomy of documentation`_. - - .. _`taxonomy of documentation`: https://documentation.divio.com/ - -Documentation needs to meet users where they are. `How-tos` offer get-it-done -information; the user wants steps to copy and doesn't necessarily want to -understand NumPy. `Tutorials` are warm-fuzzy information; the user wants a -feel for some aspect of NumPy (and again, may or may not care about deeper -knowledge). - -We distinguish both tutorials and how-tos from `Explanations`, which are -deep dives intended to give understanding rather than immediate assistance, -and `References`, which give complete, autoritative data on some concrete -part of NumPy (like its API) but aren't obligated to paint a broader picture. - -For more on tutorials, see the `tutorial how-to`_. - -.. _`tutorial how-to`: https://github.com/numpy/numpy-tutorials/blob/master/tutorial_style.ipynb - - -****************************************************************************** -Is this page an example of a how-to? -****************************************************************************** - -Yes -- until the sections with question-mark headings; they explain rather -than giving directions. In a how-to, those would be links. +.. _how-to-how-to: + +############################################################################## +How to write a NumPy how-to +############################################################################## + +How-tos get straight to the point -- they + + - answer a focused question, or + - narrow a broad question into focused questions that the user can + choose among. + +****************************************************************************** +A stranger has asked for directions... +****************************************************************************** + +**"I need to refuel my car."** + +****************************************************************************** +Give a brief but explicit answer +****************************************************************************** + + - `"Three kilometers/miles, take a right at Hayseed Road, it's on your left."` + +Add helpful details for newcomers ("Hayseed Road", even though it's the only +turnoff at three km/mi). But not irrelevant ones: + + - Don't also give directions from Route 7. + - Don't explain why the town has only one filling station. + +If there's related background (tutorial, explanation, reference, alternative +approach), bring it to the user's attention with a link ("Directions from Route 7," +"Why so few filling stations?"). + + +****************************************************************************** +Delegate +****************************************************************************** + + - `"Three km/mi, take a right at Hayseed Road, follow the signs."` + +If the information is already documented and succinct enough for a how-to, +just link to it, possibly after an introduction ("Three km/mi, take a right"). + +****************************************************************************** +If the question is broad, narrow and redirect it +****************************************************************************** + + **"I want to see the sights."** + +The `See the sights` how-to should link to a set of narrower how-tos: + +- Find historic buildings +- Find scenic lookouts +- Find the town center + +and these might in turn link to still narrower how-tos -- so the town center +page might link to + + - Find the court house + - Find city hall + +By organizing how-tos this way, you not only display the options for people +who need to narrow their question, you also have provided answers for users +who start with narrower questions ("I want to see historic buildings," "Which +way to city hall?"). + +****************************************************************************** +If there are many steps, break them up +****************************************************************************** + +If a how-to has many steps: + + - Consider breaking a step out into an individual how-to and linking to it. + - Include subheadings. They help readers grasp what's coming and return + where they left off. + +****************************************************************************** +Why write how-tos when there's Stack Overflow, Reddit, Gitter...? +****************************************************************************** + + - We have authoritative answers. + - How-tos make the site less forbidding to non-experts. + - How-tos bring people into the site and help them discover other information + that's here . + - Creating how-tos helps us see NumPy usability through new eyes. + +****************************************************************************** +Aren't how-tos and tutorials the same thing? +****************************************************************************** + +People use the terms "how-to" and "tutorial" interchangeably, but we draw a +distinction, following Daniele Procida's `taxonomy of documentation`_. + + .. _`taxonomy of documentation`: https://documentation.divio.com/ + +Documentation needs to meet users where they are. `How-tos` offer get-it-done +information; the user wants steps to copy and doesn't necessarily want to +understand NumPy. `Tutorials` are warm-fuzzy information; the user wants a +feel for some aspect of NumPy (and again, may or may not care about deeper +knowledge). + +We distinguish both tutorials and how-tos from `Explanations`, which are +deep dives intended to give understanding rather than immediate assistance, +and `References`, which give complete, autoritative data on some concrete +part of NumPy (like its API) but aren't obligated to paint a broader picture. + +For more on tutorials, see the `tutorial how-to`_. + +.. _`tutorial how-to`: https://github.com/numpy/numpy-tutorials/blob/master/tutorial_style.ipynb + + +****************************************************************************** +Is this page an example of a how-to? +****************************************************************************** + +Yes -- until the sections with question-mark headings; they explain rather +than giving directions. In a how-to, those would be links. diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index ca9fc41f07f8..d238ccbb605e 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -1,328 +1,328 @@ -.. _how-to-io: - -############################################################################## -Reading and writing files -############################################################################## - -This page tackles common applications; for the full collection of I/O -routines, see :ref:`routines.io`. - - -****************************************************************************** -Reading text and CSV_ files -****************************************************************************** - -.. _CSV: https://en.wikipedia.org/wiki/Comma-separated_values - -With no missing values -============================================================================== - -Use :func:`numpy.loadtxt`. - -With missing values -============================================================================== - -Use :func:`numpy.genfromtxt`. - -:func:`numpy.genfromtxt` will either - - - return a :ref:`masked array` - **masking out missing values** (if ``usemask=True``), or - - - **fill in the missing value** with the value specified in - ``filling_values`` (default is ``np.nan`` for float, -1 for int). - -With non-whitespace delimiters ------------------------------------------------------------------------------- -:: - - >>> print(open("csv.txt").read()) # doctest: +SKIP - 1, 2, 3 - 4,, 6 - 7, 8, 9 - - -Masked-array output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:: - - >>> np.genfromtxt("csv.txt", delimiter=",", usemask=True) # doctest: +SKIP - masked_array( - data=[[1.0, 2.0, 3.0], - [4.0, --, 6.0], - [7.0, 8.0, 9.0]], - mask=[[False, False, False], - [False, True, False], - [False, False, False]], - fill_value=1e+20) - -Array output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:: - - >>> np.genfromtxt("csv.txt", delimiter=",") # doctest: +SKIP - array([[ 1., 2., 3.], - [ 4., nan, 6.], - [ 7., 8., 9.]]) - -Array output, specified fill-in value -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:: - - >>> np.genfromtxt("csv.txt", delimiter=",", dtype=np.int8, filling_values=99) # doctest: +SKIP - array([[ 1, 2, 3], - [ 4, 99, 6], - [ 7, 8, 9]], dtype=int8) - -Whitespace-delimited -------------------------------------------------------------------------------- - -:func:`numpy.genfromtxt` can also parse whitespace-delimited data files -that have missing values if - -* **Each field has a fixed width**: Use the width as the `delimiter` argument. - :: - - # File with width=4. The data does not have to be justified (for example, - # the 2 in row 1), the last column can be less than width (for example, the 6 - # in row 2), and no delimiting character is required (for instance 8888 and 9 - # in row 3) - - >>> f = open("fixedwidth.txt").read() # doctest: +SKIP - >>> print(f) # doctest: +SKIP - 1 2 3 - 44 6 - 7 88889 - - # Showing spaces as ^ - >>> print(f.replace(" ","^")) # doctest: +SKIP - 1^^^2^^^^^^3 - 44^^^^^^6 - 7^^^88889 - - >>> np.genfromtxt("fixedwidth.txt", delimiter=4) # doctest: +SKIP - array([[1.000e+00, 2.000e+00, 3.000e+00], - [4.400e+01, nan, 6.000e+00], - [7.000e+00, 8.888e+03, 9.000e+00]]) - -* **A special value (e.g. "x") indicates a missing field**: Use it as the - `missing_values` argument. - :: - - >>> print(open("nan.txt").read()) # doctest: +SKIP - 1 2 3 - 44 x 6 - 7 8888 9 - - >>> np.genfromtxt("nan.txt", missing_values="x") # doctest: +SKIP - array([[1.000e+00, 2.000e+00, 3.000e+00], - [4.400e+01, nan, 6.000e+00], - [7.000e+00, 8.888e+03, 9.000e+00]]) - -* **You want to skip the rows with missing values**: Set - `invalid_raise=False`. - :: - - >>> print(open("skip.txt").read()) # doctest: +SKIP - 1 2 3 - 44 6 - 7 888 9 - - >>> np.genfromtxt("skip.txt", invalid_raise=False) # doctest: +SKIP - __main__:1: ConversionWarning: Some errors were detected ! - Line #2 (got 2 columns instead of 3) - array([[ 1., 2., 3.], - [ 7., 888., 9.]]) - - -* **The delimiter whitespace character is different from the whitespace that - indicates missing data**. For instance, if columns are delimited by ``\t``, - then missing data will be recognized if it consists of one - or more spaces. - :: - - >>> f = open("tabs.txt").read() # doctest: +SKIP - >>> print(f) # doctest: +SKIP - 1 2 3 - 44 6 - 7 888 9 - - # Tabs vs. spaces - >>> print(f.replace("\t","^")) # doctest: +SKIP - 1^2^3 - 44^ ^6 - 7^888^9 - - >>> np.genfromtxt("tabs.txt", delimiter="\t", missing_values=" +") # doctest: +SKIP - array([[ 1., 2., 3.], - [ 44., nan, 6.], - [ 7., 888., 9.]]) - -****************************************************************************** -Read a file in .npy or .npz format -****************************************************************************** - -Choices: - - - Use :func:`numpy.load`. It can read files generated by any of - :func:`numpy.save`, :func:`numpy.savez`, or :func:`numpy.savez_compressed`. - - - Use memory mapping. See `numpy.lib.format.open_memmap`. - -****************************************************************************** -Write to a file to be read back by NumPy -****************************************************************************** - -Binary -=============================================================================== - -Use -:func:`numpy.save`, or to store multiple arrays :func:`numpy.savez` -or :func:`numpy.savez_compressed`. - -For :ref:`security and portability `, set -``allow_pickle=False`` unless the dtype contains Python objects, which -requires pickling. - -Masked arrays :any:`can't currently be saved `, -nor can other arbitrary array subclasses. - -Human-readable -============================================================================== - -:func:`numpy.save` and :func:`numpy.savez` create binary files. To **write a -human-readable file**, use :func:`numpy.savetxt`. The array can only be 1- or -2-dimensional, and there's no ` savetxtz` for multiple files. - -Large arrays -============================================================================== - -See :ref:`how-to-io-large-arrays`. - -****************************************************************************** -Read an arbitrarily formatted binary file ("binary blob") -****************************************************************************** - -Use a :doc:`structured array `. - -**Example:** - -The ``.wav`` file header is a 44-byte block preceding ``data_size`` bytes of the -actual sound data:: - - chunk_id "RIFF" - chunk_size 4-byte unsigned little-endian integer - format "WAVE" - fmt_id "fmt " - fmt_size 4-byte unsigned little-endian integer - audio_fmt 2-byte unsigned little-endian integer - num_channels 2-byte unsigned little-endian integer - sample_rate 4-byte unsigned little-endian integer - byte_rate 4-byte unsigned little-endian integer - block_align 2-byte unsigned little-endian integer - bits_per_sample 2-byte unsigned little-endian integer - data_id "data" - data_size 4-byte unsigned little-endian integer - -The ``.wav`` file header as a NumPy structured dtype:: - - wav_header_dtype = np.dtype([ - ("chunk_id", (bytes, 4)), # flexible-sized scalar type, item size 4 - ("chunk_size", "`_.) - -.. _how-to-io-large-arrays: - -****************************************************************************** -Write or read large arrays -****************************************************************************** - -**Arrays too large to fit in memory** can be treated like ordinary in-memory -arrays using memory mapping. - -- Raw array data written with :func:`numpy.ndarray.tofile` or - :func:`numpy.ndarray.tobytes` can be read with :func:`numpy.memmap`:: - - array = numpy.memmap("mydata/myarray.arr", mode="r", dtype=np.int16, shape=(1024, 1024)) - -- Files output by :func:`numpy.save` (that is, using the numpy format) can be read - using :func:`numpy.load` with the ``mmap_mode`` keyword argument:: - - large_array[some_slice] = np.load("path/to/small_array", mmap_mode="r") - -Memory mapping lacks features like data chunking and compression; more -full-featured formats and libraries usable with NumPy include: - -* **HDF5**: `h5py `_ or `PyTables `_. -* **Zarr**: `here `_. -* **NetCDF**: :class:`scipy.io.netcdf_file`. - -For tradeoffs among memmap, Zarr, and HDF5, see -`pythonspeed.com `_. - -****************************************************************************** -Write files for reading by other (non-NumPy) tools -****************************************************************************** - -Formats for **exchanging data** with other tools include HDF5, Zarr, and -NetCDF (see :ref:`how-to-io-large-arrays`). - -****************************************************************************** -Write or read a JSON file -****************************************************************************** - -NumPy arrays are **not** directly -`JSON serializable `_. - - -.. _how-to-io-pickle-file: - -****************************************************************************** -Save/restore using a pickle file -****************************************************************************** - -Avoid when possible; :doc:`pickles ` are not secure -against erroneous or maliciously constructed data. - -Use :func:`numpy.save` and :func:`numpy.load`. Set ``allow_pickle=False``, -unless the array dtype includes Python objects, in which case pickling is -required. - -****************************************************************************** -Convert from a pandas DataFrame to a NumPy array -****************************************************************************** - -See :meth:`pandas.DataFrame.to_numpy`. - -****************************************************************************** - Save/restore using `~numpy.ndarray.tofile` and `~numpy.fromfile` -****************************************************************************** - -In general, prefer :func:`numpy.save` and :func:`numpy.load`. - -:func:`numpy.ndarray.tofile` and :func:`numpy.fromfile` lose information on -endianness and precision and so are unsuitable for anything but scratch -storage. - +.. _how-to-io: + +############################################################################## +Reading and writing files +############################################################################## + +This page tackles common applications; for the full collection of I/O +routines, see :ref:`routines.io`. + + +****************************************************************************** +Reading text and CSV_ files +****************************************************************************** + +.. _CSV: https://en.wikipedia.org/wiki/Comma-separated_values + +With no missing values +============================================================================== + +Use :func:`numpy.loadtxt`. + +With missing values +============================================================================== + +Use :func:`numpy.genfromtxt`. + +:func:`numpy.genfromtxt` will either + + - return a :ref:`masked array` + **masking out missing values** (if ``usemask=True``), or + + - **fill in the missing value** with the value specified in + ``filling_values`` (default is ``np.nan`` for float, -1 for int). + +With non-whitespace delimiters +------------------------------------------------------------------------------ +:: + + >>> print(open("csv.txt").read()) # doctest: +SKIP + 1, 2, 3 + 4,, 6 + 7, 8, 9 + + +Masked-array output +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:: + + >>> np.genfromtxt("csv.txt", delimiter=",", usemask=True) # doctest: +SKIP + masked_array( + data=[[1.0, 2.0, 3.0], + [4.0, --, 6.0], + [7.0, 8.0, 9.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) + +Array output +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:: + + >>> np.genfromtxt("csv.txt", delimiter=",") # doctest: +SKIP + array([[ 1., 2., 3.], + [ 4., nan, 6.], + [ 7., 8., 9.]]) + +Array output, specified fill-in value +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:: + + >>> np.genfromtxt("csv.txt", delimiter=",", dtype=np.int8, filling_values=99) # doctest: +SKIP + array([[ 1, 2, 3], + [ 4, 99, 6], + [ 7, 8, 9]], dtype=int8) + +Whitespace-delimited +------------------------------------------------------------------------------- + +:func:`numpy.genfromtxt` can also parse whitespace-delimited data files +that have missing values if + +* **Each field has a fixed width**: Use the width as the `delimiter` argument. + :: + + # File with width=4. The data does not have to be justified (for example, + # the 2 in row 1), the last column can be less than width (for example, the 6 + # in row 2), and no delimiting character is required (for instance 8888 and 9 + # in row 3) + + >>> f = open("fixedwidth.txt").read() # doctest: +SKIP + >>> print(f) # doctest: +SKIP + 1 2 3 + 44 6 + 7 88889 + + # Showing spaces as ^ + >>> print(f.replace(" ","^")) # doctest: +SKIP + 1^^^2^^^^^^3 + 44^^^^^^6 + 7^^^88889 + + >>> np.genfromtxt("fixedwidth.txt", delimiter=4) # doctest: +SKIP + array([[1.000e+00, 2.000e+00, 3.000e+00], + [4.400e+01, nan, 6.000e+00], + [7.000e+00, 8.888e+03, 9.000e+00]]) + +* **A special value (e.g. "x") indicates a missing field**: Use it as the + `missing_values` argument. + :: + + >>> print(open("nan.txt").read()) # doctest: +SKIP + 1 2 3 + 44 x 6 + 7 8888 9 + + >>> np.genfromtxt("nan.txt", missing_values="x") # doctest: +SKIP + array([[1.000e+00, 2.000e+00, 3.000e+00], + [4.400e+01, nan, 6.000e+00], + [7.000e+00, 8.888e+03, 9.000e+00]]) + +* **You want to skip the rows with missing values**: Set + `invalid_raise=False`. + :: + + >>> print(open("skip.txt").read()) # doctest: +SKIP + 1 2 3 + 44 6 + 7 888 9 + + >>> np.genfromtxt("skip.txt", invalid_raise=False) # doctest: +SKIP + __main__:1: ConversionWarning: Some errors were detected ! + Line #2 (got 2 columns instead of 3) + array([[ 1., 2., 3.], + [ 7., 888., 9.]]) + + +* **The delimiter whitespace character is different from the whitespace that + indicates missing data**. For instance, if columns are delimited by ``\t``, + then missing data will be recognized if it consists of one + or more spaces. + :: + + >>> f = open("tabs.txt").read() # doctest: +SKIP + >>> print(f) # doctest: +SKIP + 1 2 3 + 44 6 + 7 888 9 + + # Tabs vs. spaces + >>> print(f.replace("\t","^")) # doctest: +SKIP + 1^2^3 + 44^ ^6 + 7^888^9 + + >>> np.genfromtxt("tabs.txt", delimiter="\t", missing_values=" +") # doctest: +SKIP + array([[ 1., 2., 3.], + [ 44., nan, 6.], + [ 7., 888., 9.]]) + +****************************************************************************** +Read a file in .npy or .npz format +****************************************************************************** + +Choices: + + - Use :func:`numpy.load`. It can read files generated by any of + :func:`numpy.save`, :func:`numpy.savez`, or :func:`numpy.savez_compressed`. + + - Use memory mapping. See `numpy.lib.format.open_memmap`. + +****************************************************************************** +Write to a file to be read back by NumPy +****************************************************************************** + +Binary +=============================================================================== + +Use +:func:`numpy.save`, or to store multiple arrays :func:`numpy.savez` +or :func:`numpy.savez_compressed`. + +For :ref:`security and portability `, set +``allow_pickle=False`` unless the dtype contains Python objects, which +requires pickling. + +Masked arrays :any:`can't currently be saved `, +nor can other arbitrary array subclasses. + +Human-readable +============================================================================== + +:func:`numpy.save` and :func:`numpy.savez` create binary files. To **write a +human-readable file**, use :func:`numpy.savetxt`. The array can only be 1- or +2-dimensional, and there's no ` savetxtz` for multiple files. + +Large arrays +============================================================================== + +See :ref:`how-to-io-large-arrays`. + +****************************************************************************** +Read an arbitrarily formatted binary file ("binary blob") +****************************************************************************** + +Use a :doc:`structured array `. + +**Example:** + +The ``.wav`` file header is a 44-byte block preceding ``data_size`` bytes of the +actual sound data:: + + chunk_id "RIFF" + chunk_size 4-byte unsigned little-endian integer + format "WAVE" + fmt_id "fmt " + fmt_size 4-byte unsigned little-endian integer + audio_fmt 2-byte unsigned little-endian integer + num_channels 2-byte unsigned little-endian integer + sample_rate 4-byte unsigned little-endian integer + byte_rate 4-byte unsigned little-endian integer + block_align 2-byte unsigned little-endian integer + bits_per_sample 2-byte unsigned little-endian integer + data_id "data" + data_size 4-byte unsigned little-endian integer + +The ``.wav`` file header as a NumPy structured dtype:: + + wav_header_dtype = np.dtype([ + ("chunk_id", (bytes, 4)), # flexible-sized scalar type, item size 4 + ("chunk_size", "`_.) + +.. _how-to-io-large-arrays: + +****************************************************************************** +Write or read large arrays +****************************************************************************** + +**Arrays too large to fit in memory** can be treated like ordinary in-memory +arrays using memory mapping. + +- Raw array data written with :func:`numpy.ndarray.tofile` or + :func:`numpy.ndarray.tobytes` can be read with :func:`numpy.memmap`:: + + array = numpy.memmap("mydata/myarray.arr", mode="r", dtype=np.int16, shape=(1024, 1024)) + +- Files output by :func:`numpy.save` (that is, using the numpy format) can be read + using :func:`numpy.load` with the ``mmap_mode`` keyword argument:: + + large_array[some_slice] = np.load("path/to/small_array", mmap_mode="r") + +Memory mapping lacks features like data chunking and compression; more +full-featured formats and libraries usable with NumPy include: + +* **HDF5**: `h5py `_ or `PyTables `_. +* **Zarr**: `here `_. +* **NetCDF**: :class:`scipy.io.netcdf_file`. + +For tradeoffs among memmap, Zarr, and HDF5, see +`pythonspeed.com `_. + +****************************************************************************** +Write files for reading by other (non-NumPy) tools +****************************************************************************** + +Formats for **exchanging data** with other tools include HDF5, Zarr, and +NetCDF (see :ref:`how-to-io-large-arrays`). + +****************************************************************************** +Write or read a JSON file +****************************************************************************** + +NumPy arrays are **not** directly +`JSON serializable `_. + + +.. _how-to-io-pickle-file: + +****************************************************************************** +Save/restore using a pickle file +****************************************************************************** + +Avoid when possible; :doc:`pickles ` are not secure +against erroneous or maliciously constructed data. + +Use :func:`numpy.save` and :func:`numpy.load`. Set ``allow_pickle=False``, +unless the array dtype includes Python objects, in which case pickling is +required. + +****************************************************************************** +Convert from a pandas DataFrame to a NumPy array +****************************************************************************** + +See :meth:`pandas.DataFrame.to_numpy`. + +****************************************************************************** + Save/restore using `~numpy.ndarray.tofile` and `~numpy.fromfile` +****************************************************************************** + +In general, prefer :func:`numpy.save` and :func:`numpy.load`. + +:func:`numpy.ndarray.tofile` and :func:`numpy.fromfile` lose information on +endianness and precision and so are unsuitable for anything but scratch +storage. + diff --git a/doc/source/user/install.rst b/doc/source/user/install.rst index e05cee2f130d..b9425701f352 100644 --- a/doc/source/user/install.rst +++ b/doc/source/user/install.rst @@ -1,7 +1,7 @@ -:orphan: - -**************** -Installing NumPy -**************** - +:orphan: + +**************** +Installing NumPy +**************** + See `Installing NumPy `_. \ No newline at end of file diff --git a/doc/source/user/who_covid_19_sit_rep_time_series.csv b/doc/source/user/who_covid_19_sit_rep_time_series.csv index ebf670b8c0b0..8ad5c2c238cf 100644 --- a/doc/source/user/who_covid_19_sit_rep_time_series.csv +++ b/doc/source/user/who_covid_19_sit_rep_time_series.csv @@ -1,115 +1,115 @@ -Province/States,Country/Region,WHO region,1/21/20,1/22/20,1/23/20,1/24/20,1/25/20,1/26/20,1/27/20,1/28/20,1/29/20,1/30/20,1/31/20,2/1/20,2/2/20,2/3/20,2/4/20,2/5/20,2/6/20,2/7/20,2/8/20,2/9/20,2/10/20,2/11/20,2/12/20,2/13/20,2/14/20,2/15/20,2/16/20,2/17/20,2/18/20,2/19/20,2/20/20,2/21/20,2/22/20,2/23/20,2/24/20,2/25/20,2/26/20,2/27/20,2/28/20,2/29/20,3/1/20,3/2/20,3/3/20 -Confirmed,Globally,,282,314,581,846,1320,2014,2798,4593,6065,7818,9826,11953,14557,17391,20630,24554,28276,31481,34886,37558,40554,43103,45171,46997,49053,50580,51857,71429,73332,75204,75748,76769,77794,78811,79331,80239,81109,82294,83652,85403,87137,88948,90870 -Confirmed,Mainland China,Western Pacific Region,278,309,571,830,1297,1985,2741,4537,5997,7736,9720,11821,14411,17238,20471,24363,28060,31211,34598,37251,40235,42708,44730,46550,48548,50054,51174,70635,72528,74280,74675,75569,76392,77042,77262,77780,78191,78630,78961,79394,79968,80174,80304 -Confirmed,Outside of China,,4,5,10,16,23,29,57,56,68,82,106,132,146,153,159,191,216,270,288,307,319,395,441,447,505,526,683,794,804,924,1073,1200,1402,1769,2069,2459,2918,3664,4691,6009,7169,8774,10566 -Suspected,Mainland China,Western Pacific Region,,,,,,,5794,6973,9239,12167,15238,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -Severe,Mainland China,Western Pacific Region,,,,,,,461,976,1239,1370,1527,1795,2110,2296,2788,3219,3859,4821,6101,6188,6484,7333,8204,,,,,,,,,,,,,,,,,,,, -Deaths,Mainland China,Western Pacific Region,,,,,,,80,106,132,170,213,259,304,361,425,491,564,637,723,812,909,1017,1114,1260,1381,1524,1666,1772,1870,2006,2121,2239,2348,2445,2595,2666,2718,2747,2791,2838,2873,2915,2946 -Hubei ,China,Western Pacific Region,258,270,375,375,,,,,,,,7153,9074,11177,13522,16678,19665,22112,24953,27100,29631,31728,33366,34874,51968,54406,56249,58182,59989,61682,62031,62662,63454,64084,64287,64786,65187,65596,65914,66337,66907,67103,67217 -Guangdong,China,Western Pacific Region,14,17,26,32,,,,,,,,520,604,683,797,870,944,1018,1075,1120,1151,1177,1219,1241,1261,1295,1316,1322,1328,1331,1332,1333,1339,1342,1345,1347,1347,1347,1348,1349,1349,1350,1350 -Henan,China,Western Pacific Region,,1,1,1,,,,,,,,422,493,566,675,764,851,914,981,1033,1073,1105,1135,1169,1184,1212,1231,1246,1257,1262,1265,1267,1270,1271,1271,1271,1271,1272,1272,1272,1272,1272,1272 -Zhejiang,China,Western Pacific Region,,5,5,5,,,,,,,,599,661,724,829,895,954,1006,1048,1075,1104,1117,1131,1145,1155,1162,1167,1171,1172,1173,1175,1203,1205,1205,1205,1205,1205,1205,1205,1205,1205,1206,1213 -Hunan,China,Western Pacific Region,,1,1,1,,,,,,,,389,463,521,593,661,711,772,803,838,879,912,946,968,988,1001,1004,1006,1007,1008,1010,1011,1013,1016,1016,1016,1016,1017,1017,1018,1018,1018,1018 -Anhui,China,Western Pacific Region,,,,,,,,,,,,297,340,408,480,530,591,665,733,779,830,860,889,910,934,950,962,973,982,986,987,988,989,989,989,989,989,989,990,990,990,990,990 -Jiangxi,China,Western Pacific Region,,1,2,2,,,,,,,,286,333,391,476,548,600,661,698,740,771,804,844,872,900,913,925,930,933,934,934,934,934,934,934,934,934,934,935,935,935,935,935 -Shandong,China,Western Pacific Region,,1,1,1,,,,,,,,202,225,246,270,298,343,379,407,435,459,486,497,506,519,530,537,541,543,544,546,748,750,754,755,755,756,756,756,756,756,758,758 -Jiangsu,China,Western Pacific Region,,,,,,,,,,,,202,231,271,308,341,373,408,439,468,492,515,543,570,593,604,617,626,629,631,631,631,631,631,631,631,631,631,631,631,631,631,631 -Chongqing,China,Western Pacific Region,,1,5,5,,,,,,,,238,262,300,337,366,389,411,426,446,468,486,505,518,529,537,544,551,553,555,560,567,572,573,575,576,576,576,576,576,576,576,576 -Sichuan,China,Western Pacific Region,,1,2,2,,,,,,,,207,236,254,282,301,321,344,363,386,405,417,436,451,463,470,481,495,508,514,520,525,526,526,527,529,531,534,538,538,538,538,538 -Heilongjiang,China,Western Pacific Region,,,,,,,,,,,,80,95,118,155,190,227,277,282,307,331,360,378,395,418,425,445,457,464,470,476,479,479,480,480,480,480,480,480,480,480,480,480 -Beijing,China,Western Pacific Region,5,5,10,10,,,,,,,,156,183,212,228,253,274,297,315,326,337,342,352,366,372,375,380,381,387,393,395,396,399,399,399,400,400,410,410,411,413,414,414 -Shanghai,China,Western Pacific Region,1,2,9,9,,,,,,,,153,177,193,208,233,254,269,281,292,295,302,306,313,318,326,328,331,333,333,333,334,334,335,335,335,336,337,337,337,337,337,338 -Hebei,China,Western Pacific Region,,,,,,,,,,,,96,104,113,126,135,157,171,195,206,218,239,251,265,283,291,300,301,302,306,307,308,309,311,311,311,312,317,318,318,318,318,318 -Fujian,China,Western Pacific Region,,,,,,,,,,,,144,159,179,194,205,215,224,239,250,261,267,272,279,281,285,287,290,292,293,293,293,293,293,293,294,294,296,296,296,296,296,296 -Guangxi,China,Western Pacific Region,,,,,,,,,,,,100,111,127,139,150,168,172,183,195,210,215,222,222,226,235,237,238,242,244,245,246,249,249,251,252,252,252,252,252,252,252,252 -Shaanxi,China,Western Pacific Region,,,,,,,,,,,,101,116,128,142,165,173,184,195,208,213,219,225,229,230,232,236,240,240,242,245,245,245,245,245,245,245,245,245,245,245,245,245 -Yunnan,China,Western Pacific Region,,1,1,1,,,,,,,,91,99,109,117,122,128,135,138,140,141,149,154,155,162,168,169,171,172,172,172,174,174,174,174,174,174,174,174,174,174,174,174 -Hainan,China,Western Pacific Region,,,,,,,,,,,,57,63,70,79,89,100,111,123,128,136,142,145,157,157,162,162,162,163,163,168,168,168,168,168,168,168,168,168,168,168,168,168 -Guizhou,China,Western Pacific Region,,,,,,,,,,,,29,38,46,56,64,69,77,89,96,109,118,131,135,140,143,144,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146 -Tianjin,China,Western Pacific Region,,2,2,2,,,,,,,,34,40,49,63,67,70,94,81,88,91,96,106,112,119,120,122,124,125,128,130,131,133,135,135,135,135,135,136,136,136,136,136 -Shanxi,China,Western Pacific Region,,,,,,,,,,,,47,56,66,74,81,90,96,104,115,119,122,124,126,126,127,128,129,130,131,131,132,132,132,132,133,133,133,133,133,133,133,133 -Liaoning,China,Western Pacific Region,,,,,,,,,,,,60,64,70,74,81,89,94,99,105,107,108,111,116,117,119,120,121,121,121,121,121,121,121,121,121,121,121,121,121,122,122,125 -Hong Kong,China,Western Pacific Region,,,1,2,5,5,8,8,8,10,12,13,14,15,15,18,21,24,26,26,36,42,49,50,53,56,56,57,60,62,65,68,68,70,74,81,85,91,93,94,95,98,101 -Jilin,China,Western Pacific Region,,,,,,,,,,,,17,21,31,42,54,59,65,69,78,80,81,83,84,86,88,88,89,89,90,91,91,91,91,93,93,93,93,93,93,93,93,93 -Gansu,China,Western Pacific Region,,,,,,,,,,,,35,45,51,56,57,62,70,71,81,85,86,86,87,90,90,90,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91 -Xinjiang,China,Western Pacific Region,,,,,,,,,,,,18,23,24,29,32,36,39,42,45,49,55,59,63,65,70,71,73,76,76,76,76,76,75,76,76,76,76,76,76,76,76,76 -Inner Mongolia,China,Western Pacific Region,,,,,,,,,,,,23,26,33,37,42,46,49,50,54,58,58,60,61,63,68,70,72,73,75,75,75,75,75,75,75,75,75,75,75,75,75,75 -Ningxia,China,Western Pacific Region,,,,,,,,,,,,26,28,31,34,34,40,43,45,45,49,53,58,64,67,70,70,70,70,71,71,71,71,71,71,71,71,72,72,73,73,74,74 -Taiwan,China,Western Pacific Region,,1,1,1,3,3,4,7,8,8,9,10,10,10,10,11,11,16,16,17,18,18,18,18,18,18,18,20,22,23,24,26,26,23,28,31,32,32,34,39,39,40,42 -Qinghai,China,Western Pacific Region,,,,,,,,,,,,8,9,13,15,17,18,18,18,18,18,18,18,18,18,18,18,18,18,12,18,18,18,18,18,18,18,18,18,18,18,18,18 -Macau,China,Western Pacific Region,,,1,2,2,2,5,7,7,7,7,7,7,8,8,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10 -Xizang,China,Western Pacific Region,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 -Unspecified*,China,Western Pacific Region,,,131,384,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -,Japan,Western Pacific Region,1,1,1,1,3,3,4,6,7,11,14,17,20,20,20,33,25,25,25,26,26,26,28,29,33,41,53,59,65,73,85,93,105,132,144,157,164,186,210,230,239,254,268 -,Republic of Korea,Western Pacific Region,1,1,1,2,2,2,4,4,4,4,11,12,15,15,16,18,23,24,24,27,27,28,28,28,28,28,29,30,31,51,104,204,346,602,763,977,1261,1766,2337,3150,3736,4212,4812 -,Thailand,South-East Asia Region,2,2,2,4,4,5,5,14,14,14,14,19,19,19,19,25,25,25,32,32,32,33,33,33,33,34,34,35,35,35,35,35,35,35,35,37,40,40,40,42,42,42,43 -,United States of America,Region of the Americas,,,1,1,2,2,5,5,5,5,6,7,8,11,11,11,12,12,12,12,12,13,13,14,15,15,15,15,15,15,15,15,35,35,35,53,53,59,59,62,62,62,64 -,Vietnam,Western Pacific Region,,,,2,2,2,2,2,2,2,5,6,7,8,9,10,10,12,13,14,14,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 -,Singapore,Western Pacific Region,,,,1,3,3,4,7,7,10,13,16,18,18,18,24,28,30,33,40,43,45,47,50,58,67,72,75,77,81,84,85,86,89,89,90,91,93,96,98,102,106,108 -,Italy,European Region,,,,,,,,,,,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,9,76,124,229,322,400,650,888,1128,1689,2036 -,Nepal,South-East Asia Region,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 -,Australia,Western Pacific Region,,,,,3,3,4,5,7,7,9,12,12,12,12,13,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17,21,21,21,22,23,23,23,24,25,27,33 -,Malaysia,Western Pacific Region,,,,,,3,4,4,4,7,8,8,8,8,10,10,12,14,15,17,18,18,18,18,19,21,22,22,22,22,22,22,22,22,22,22,22,22,24,24,24,24,29 -,Canada,Region of the Americas,,,,,,,1,2,3,3,3,4,4,4,4,5,5,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,9,9,10,10,11,11,14,19,19,27 -,Cambodia,Western Pacific Region,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 -,France,European Region,,,,,3,3,3,3,4,5,6,6,6,6,6,6,6,6,6,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,18,38,57,100,100,191 -,Sri Lanka,South-East Asia Region,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 -,Iran,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,5,18,28,43,61,95,141,245,388,593,978,1501 -,India,South-East Asia Region,,,,,,,,,,1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,5 -,Germany,European Region,,,,,,,,1,4,4,5,7,8,10,12,12,12,13,14,14,14,14,16,16,16,16,16,16,16,16,16,16,16,16,16,16,18,21,26,57,57,129,157 -,Philippines,Western Pacific Region,,,,,,,,,,1,1,1,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 -,Spain,European Region,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,12,25,32,45,45,114 -,United Kingdom,European Region,,,,,,,,,,,,2,2,2,2,2,2,3,3,3,4,8,8,9,9,9,9,9,9,9,9,9,9,9,9,13,13,13,16,20,23,36,39 -,Sweden,European Region,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,7,12,13,14,15 -,Switzerland,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,6,10,18,26,30 -,Austria,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2,4,5,10,10,18 -,Norway,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,4,6,15,19,25 -,Kuwait,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,8,12,43,43,45,45,56,56 -,Bahrain,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,8,26,33,33,38,40,47,49 -,United Arab Emirates,Eastern Mediterranean Region,,,,,,,,,4,4,4,4,5,5,5,5,5,5,7,7,7,8,8,8,8,8,8,9,9,9,9,9,11,13,13,13,13,13,19,19,19,21,21 -,Israel,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,2,2,2,3,5,7,7,10 -,Iraq,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,5,6,7,8,13,19,26 -,Oman,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,4,4,6,6,6,6,6 -,Lebanon,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,2,2,2,2,10,13 -,Pakistan,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2,2,4,4,5 -,Egypt,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2 -,Croatia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,3,3,5,7,7,9 -,Greece,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,3,3,3,7,7 -,Finland,European Region,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,6,7 -,Algeria,African Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1,5 -,Brazil,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,2,2,2 -,Russian,European Region,,,,,,,,,,,,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3 -,Belgium,European Region,,,,,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,8 -,Denmark,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,2,3,4,5 -,Estonia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1 -,Georgia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,2,3,3,3 -,North Macedonia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1 -,Romania,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,3,3,3,3 -,Afghanistan,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1,1,1 -,New Zealand,Western Pacific Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,2 -,Belarus,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1 -,Lithuania,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1 -,Netherlands,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,2,7,13,18 -,Nigeria,African Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1 -,Mexico,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2,5,5 -,San Marino,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,8 -,Azerbaijan,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,3 -,Ireland,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1 -,Monaco,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1 -,Qatar,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,3,7 -,Ecuador,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,6 -,Czechia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3 -,Iceland,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,9 -,Armenia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1 -,Luxembourg,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1 -,Indonesia,South-East Asia Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2 -,Dominican Republic,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1 -,Portugal,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2 -,Andorra,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 -,Latvia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 -,Jordan,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 -,Morocco,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 -,Saudi Arabia,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 -,Tunisia,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 -,Senegal,African Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 +Province/States,Country/Region,WHO region,1/21/20,1/22/20,1/23/20,1/24/20,1/25/20,1/26/20,1/27/20,1/28/20,1/29/20,1/30/20,1/31/20,2/1/20,2/2/20,2/3/20,2/4/20,2/5/20,2/6/20,2/7/20,2/8/20,2/9/20,2/10/20,2/11/20,2/12/20,2/13/20,2/14/20,2/15/20,2/16/20,2/17/20,2/18/20,2/19/20,2/20/20,2/21/20,2/22/20,2/23/20,2/24/20,2/25/20,2/26/20,2/27/20,2/28/20,2/29/20,3/1/20,3/2/20,3/3/20 +Confirmed,Globally,,282,314,581,846,1320,2014,2798,4593,6065,7818,9826,11953,14557,17391,20630,24554,28276,31481,34886,37558,40554,43103,45171,46997,49053,50580,51857,71429,73332,75204,75748,76769,77794,78811,79331,80239,81109,82294,83652,85403,87137,88948,90870 +Confirmed,Mainland China,Western Pacific Region,278,309,571,830,1297,1985,2741,4537,5997,7736,9720,11821,14411,17238,20471,24363,28060,31211,34598,37251,40235,42708,44730,46550,48548,50054,51174,70635,72528,74280,74675,75569,76392,77042,77262,77780,78191,78630,78961,79394,79968,80174,80304 +Confirmed,Outside of China,,4,5,10,16,23,29,57,56,68,82,106,132,146,153,159,191,216,270,288,307,319,395,441,447,505,526,683,794,804,924,1073,1200,1402,1769,2069,2459,2918,3664,4691,6009,7169,8774,10566 +Suspected,Mainland China,Western Pacific Region,,,,,,,5794,6973,9239,12167,15238,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Severe,Mainland China,Western Pacific Region,,,,,,,461,976,1239,1370,1527,1795,2110,2296,2788,3219,3859,4821,6101,6188,6484,7333,8204,,,,,,,,,,,,,,,,,,,, +Deaths,Mainland China,Western Pacific Region,,,,,,,80,106,132,170,213,259,304,361,425,491,564,637,723,812,909,1017,1114,1260,1381,1524,1666,1772,1870,2006,2121,2239,2348,2445,2595,2666,2718,2747,2791,2838,2873,2915,2946 +Hubei ,China,Western Pacific Region,258,270,375,375,,,,,,,,7153,9074,11177,13522,16678,19665,22112,24953,27100,29631,31728,33366,34874,51968,54406,56249,58182,59989,61682,62031,62662,63454,64084,64287,64786,65187,65596,65914,66337,66907,67103,67217 +Guangdong,China,Western Pacific Region,14,17,26,32,,,,,,,,520,604,683,797,870,944,1018,1075,1120,1151,1177,1219,1241,1261,1295,1316,1322,1328,1331,1332,1333,1339,1342,1345,1347,1347,1347,1348,1349,1349,1350,1350 +Henan,China,Western Pacific Region,,1,1,1,,,,,,,,422,493,566,675,764,851,914,981,1033,1073,1105,1135,1169,1184,1212,1231,1246,1257,1262,1265,1267,1270,1271,1271,1271,1271,1272,1272,1272,1272,1272,1272 +Zhejiang,China,Western Pacific Region,,5,5,5,,,,,,,,599,661,724,829,895,954,1006,1048,1075,1104,1117,1131,1145,1155,1162,1167,1171,1172,1173,1175,1203,1205,1205,1205,1205,1205,1205,1205,1205,1205,1206,1213 +Hunan,China,Western Pacific Region,,1,1,1,,,,,,,,389,463,521,593,661,711,772,803,838,879,912,946,968,988,1001,1004,1006,1007,1008,1010,1011,1013,1016,1016,1016,1016,1017,1017,1018,1018,1018,1018 +Anhui,China,Western Pacific Region,,,,,,,,,,,,297,340,408,480,530,591,665,733,779,830,860,889,910,934,950,962,973,982,986,987,988,989,989,989,989,989,989,990,990,990,990,990 +Jiangxi,China,Western Pacific Region,,1,2,2,,,,,,,,286,333,391,476,548,600,661,698,740,771,804,844,872,900,913,925,930,933,934,934,934,934,934,934,934,934,934,935,935,935,935,935 +Shandong,China,Western Pacific Region,,1,1,1,,,,,,,,202,225,246,270,298,343,379,407,435,459,486,497,506,519,530,537,541,543,544,546,748,750,754,755,755,756,756,756,756,756,758,758 +Jiangsu,China,Western Pacific Region,,,,,,,,,,,,202,231,271,308,341,373,408,439,468,492,515,543,570,593,604,617,626,629,631,631,631,631,631,631,631,631,631,631,631,631,631,631 +Chongqing,China,Western Pacific Region,,1,5,5,,,,,,,,238,262,300,337,366,389,411,426,446,468,486,505,518,529,537,544,551,553,555,560,567,572,573,575,576,576,576,576,576,576,576,576 +Sichuan,China,Western Pacific Region,,1,2,2,,,,,,,,207,236,254,282,301,321,344,363,386,405,417,436,451,463,470,481,495,508,514,520,525,526,526,527,529,531,534,538,538,538,538,538 +Heilongjiang,China,Western Pacific Region,,,,,,,,,,,,80,95,118,155,190,227,277,282,307,331,360,378,395,418,425,445,457,464,470,476,479,479,480,480,480,480,480,480,480,480,480,480 +Beijing,China,Western Pacific Region,5,5,10,10,,,,,,,,156,183,212,228,253,274,297,315,326,337,342,352,366,372,375,380,381,387,393,395,396,399,399,399,400,400,410,410,411,413,414,414 +Shanghai,China,Western Pacific Region,1,2,9,9,,,,,,,,153,177,193,208,233,254,269,281,292,295,302,306,313,318,326,328,331,333,333,333,334,334,335,335,335,336,337,337,337,337,337,338 +Hebei,China,Western Pacific Region,,,,,,,,,,,,96,104,113,126,135,157,171,195,206,218,239,251,265,283,291,300,301,302,306,307,308,309,311,311,311,312,317,318,318,318,318,318 +Fujian,China,Western Pacific Region,,,,,,,,,,,,144,159,179,194,205,215,224,239,250,261,267,272,279,281,285,287,290,292,293,293,293,293,293,293,294,294,296,296,296,296,296,296 +Guangxi,China,Western Pacific Region,,,,,,,,,,,,100,111,127,139,150,168,172,183,195,210,215,222,222,226,235,237,238,242,244,245,246,249,249,251,252,252,252,252,252,252,252,252 +Shaanxi,China,Western Pacific Region,,,,,,,,,,,,101,116,128,142,165,173,184,195,208,213,219,225,229,230,232,236,240,240,242,245,245,245,245,245,245,245,245,245,245,245,245,245 +Yunnan,China,Western Pacific Region,,1,1,1,,,,,,,,91,99,109,117,122,128,135,138,140,141,149,154,155,162,168,169,171,172,172,172,174,174,174,174,174,174,174,174,174,174,174,174 +Hainan,China,Western Pacific Region,,,,,,,,,,,,57,63,70,79,89,100,111,123,128,136,142,145,157,157,162,162,162,163,163,168,168,168,168,168,168,168,168,168,168,168,168,168 +Guizhou,China,Western Pacific Region,,,,,,,,,,,,29,38,46,56,64,69,77,89,96,109,118,131,135,140,143,144,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146 +Tianjin,China,Western Pacific Region,,2,2,2,,,,,,,,34,40,49,63,67,70,94,81,88,91,96,106,112,119,120,122,124,125,128,130,131,133,135,135,135,135,135,136,136,136,136,136 +Shanxi,China,Western Pacific Region,,,,,,,,,,,,47,56,66,74,81,90,96,104,115,119,122,124,126,126,127,128,129,130,131,131,132,132,132,132,133,133,133,133,133,133,133,133 +Liaoning,China,Western Pacific Region,,,,,,,,,,,,60,64,70,74,81,89,94,99,105,107,108,111,116,117,119,120,121,121,121,121,121,121,121,121,121,121,121,121,121,122,122,125 +Hong Kong,China,Western Pacific Region,,,1,2,5,5,8,8,8,10,12,13,14,15,15,18,21,24,26,26,36,42,49,50,53,56,56,57,60,62,65,68,68,70,74,81,85,91,93,94,95,98,101 +Jilin,China,Western Pacific Region,,,,,,,,,,,,17,21,31,42,54,59,65,69,78,80,81,83,84,86,88,88,89,89,90,91,91,91,91,93,93,93,93,93,93,93,93,93 +Gansu,China,Western Pacific Region,,,,,,,,,,,,35,45,51,56,57,62,70,71,81,85,86,86,87,90,90,90,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91 +Xinjiang,China,Western Pacific Region,,,,,,,,,,,,18,23,24,29,32,36,39,42,45,49,55,59,63,65,70,71,73,76,76,76,76,76,75,76,76,76,76,76,76,76,76,76 +Inner Mongolia,China,Western Pacific Region,,,,,,,,,,,,23,26,33,37,42,46,49,50,54,58,58,60,61,63,68,70,72,73,75,75,75,75,75,75,75,75,75,75,75,75,75,75 +Ningxia,China,Western Pacific Region,,,,,,,,,,,,26,28,31,34,34,40,43,45,45,49,53,58,64,67,70,70,70,70,71,71,71,71,71,71,71,71,72,72,73,73,74,74 +Taiwan,China,Western Pacific Region,,1,1,1,3,3,4,7,8,8,9,10,10,10,10,11,11,16,16,17,18,18,18,18,18,18,18,20,22,23,24,26,26,23,28,31,32,32,34,39,39,40,42 +Qinghai,China,Western Pacific Region,,,,,,,,,,,,8,9,13,15,17,18,18,18,18,18,18,18,18,18,18,18,18,18,12,18,18,18,18,18,18,18,18,18,18,18,18,18 +Macau,China,Western Pacific Region,,,1,2,2,2,5,7,7,7,7,7,7,8,8,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10 +Xizang,China,Western Pacific Region,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +Unspecified*,China,Western Pacific Region,,,131,384,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,Japan,Western Pacific Region,1,1,1,1,3,3,4,6,7,11,14,17,20,20,20,33,25,25,25,26,26,26,28,29,33,41,53,59,65,73,85,93,105,132,144,157,164,186,210,230,239,254,268 +,Republic of Korea,Western Pacific Region,1,1,1,2,2,2,4,4,4,4,11,12,15,15,16,18,23,24,24,27,27,28,28,28,28,28,29,30,31,51,104,204,346,602,763,977,1261,1766,2337,3150,3736,4212,4812 +,Thailand,South-East Asia Region,2,2,2,4,4,5,5,14,14,14,14,19,19,19,19,25,25,25,32,32,32,33,33,33,33,34,34,35,35,35,35,35,35,35,35,37,40,40,40,42,42,42,43 +,United States of America,Region of the Americas,,,1,1,2,2,5,5,5,5,6,7,8,11,11,11,12,12,12,12,12,13,13,14,15,15,15,15,15,15,15,15,35,35,35,53,53,59,59,62,62,62,64 +,Vietnam,Western Pacific Region,,,,2,2,2,2,2,2,2,5,6,7,8,9,10,10,12,13,14,14,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 +,Singapore,Western Pacific Region,,,,1,3,3,4,7,7,10,13,16,18,18,18,24,28,30,33,40,43,45,47,50,58,67,72,75,77,81,84,85,86,89,89,90,91,93,96,98,102,106,108 +,Italy,European Region,,,,,,,,,,,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,9,76,124,229,322,400,650,888,1128,1689,2036 +,Nepal,South-East Asia Region,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +,Australia,Western Pacific Region,,,,,3,3,4,5,7,7,9,12,12,12,12,13,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17,21,21,21,22,23,23,23,24,25,27,33 +,Malaysia,Western Pacific Region,,,,,,3,4,4,4,7,8,8,8,8,10,10,12,14,15,17,18,18,18,18,19,21,22,22,22,22,22,22,22,22,22,22,22,22,24,24,24,24,29 +,Canada,Region of the Americas,,,,,,,1,2,3,3,3,4,4,4,4,5,5,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,9,9,10,10,11,11,14,19,19,27 +,Cambodia,Western Pacific Region,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +,France,European Region,,,,,3,3,3,3,4,5,6,6,6,6,6,6,6,6,6,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,18,38,57,100,100,191 +,Sri Lanka,South-East Asia Region,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +,Iran,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,5,18,28,43,61,95,141,245,388,593,978,1501 +,India,South-East Asia Region,,,,,,,,,,1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,5 +,Germany,European Region,,,,,,,,1,4,4,5,7,8,10,12,12,12,13,14,14,14,14,16,16,16,16,16,16,16,16,16,16,16,16,16,16,18,21,26,57,57,129,157 +,Philippines,Western Pacific Region,,,,,,,,,,1,1,1,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 +,Spain,European Region,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,12,25,32,45,45,114 +,United Kingdom,European Region,,,,,,,,,,,,2,2,2,2,2,2,3,3,3,4,8,8,9,9,9,9,9,9,9,9,9,9,9,9,13,13,13,16,20,23,36,39 +,Sweden,European Region,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,7,12,13,14,15 +,Switzerland,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,6,10,18,26,30 +,Austria,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2,4,5,10,10,18 +,Norway,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,4,6,15,19,25 +,Kuwait,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,8,12,43,43,45,45,56,56 +,Bahrain,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,8,26,33,33,38,40,47,49 +,United Arab Emirates,Eastern Mediterranean Region,,,,,,,,,4,4,4,4,5,5,5,5,5,5,7,7,7,8,8,8,8,8,8,9,9,9,9,9,11,13,13,13,13,13,19,19,19,21,21 +,Israel,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,2,2,2,3,5,7,7,10 +,Iraq,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,5,6,7,8,13,19,26 +,Oman,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,4,4,6,6,6,6,6 +,Lebanon,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,2,2,2,2,10,13 +,Pakistan,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2,2,4,4,5 +,Egypt,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2 +,Croatia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,3,3,5,7,7,9 +,Greece,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,3,3,3,7,7 +,Finland,European Region,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,6,7 +,Algeria,African Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1,5 +,Brazil,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,2,2,2 +,Russian,European Region,,,,,,,,,,,,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3 +,Belgium,European Region,,,,,,,,,,,,,,,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,8 +,Denmark,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,2,3,4,5 +,Estonia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1 +,Georgia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,2,3,3,3 +,North Macedonia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1 +,Romania,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,3,3,3,3 +,Afghanistan,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1,1,1,1 +,New Zealand,Western Pacific Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,2 +,Belarus,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1 +,Lithuania,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1 +,Netherlands,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,2,7,13,18 +,Nigeria,African Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,1,1 +,Mexico,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2,5,5 +,San Marino,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1,8 +,Azerbaijan,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,3 +,Ireland,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1 +,Monaco,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,1 +,Qatar,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,3,7 +,Ecuador,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1,6 +,Czechia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3 +,Iceland,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,9 +,Armenia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1 +,Luxembourg,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1 +,Indonesia,South-East Asia Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,2 +,Dominican Republic,Region of the Americas,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,1 +,Portugal,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2 +,Andorra,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 +,Latvia,European Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 +,Jordan,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 +,Morocco,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 +,Saudi Arabia,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 +,Tunisia,Eastern Mediterranean Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 +,Senegal,African Region,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 Case on an international conveyance,Other,Other,,,,,,,,,,,,,,,,,20,61,64,64,70,135,175,174,218,218,355,454,454,542,621,634,634,634,695,691,691,705,705,705,706,706,706 \ No newline at end of file diff --git a/numpy/core/src/common/npy_cpuinfo_parser.h b/numpy/core/src/common/npy_cpuinfo_parser.h index 4c00c847bfbe..f4540f6ab170 100644 --- a/numpy/core/src/common/npy_cpuinfo_parser.h +++ b/numpy/core/src/common/npy_cpuinfo_parser.h @@ -1,262 +1,262 @@ -/* - * Copyright (C) 2010 The Android Open Source Project - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS - * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED - * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -#ifndef __NPY_CPUINFO_PARSER_H__ -#define __NPY_CPUINFO_PARSER_H__ -#include -#include -#include -#include -#include - -#define NPY__HWCAP 16 -#define NPY__HWCAP2 26 - -// arch/arm/include/uapi/asm/hwcap.h -#define NPY__HWCAP_HALF (1 << 1) -#define NPY__HWCAP_NEON (1 << 12) -#define NPY__HWCAP_VFPv3 (1 << 13) -#define NPY__HWCAP_VFPv4 (1 << 16) -#define NPY__HWCAP2_AES (1 << 0) -#define NPY__HWCAP2_PMULL (1 << 1) -#define NPY__HWCAP2_SHA1 (1 << 2) -#define NPY__HWCAP2_SHA2 (1 << 3) -#define NPY__HWCAP2_CRC32 (1 << 4) -// arch/arm64/include/uapi/asm/hwcap.h -#define NPY__HWCAP_FP (1 << 0) -#define NPY__HWCAP_ASIMD (1 << 1) -#define NPY__HWCAP_FPHP (1 << 9) -#define NPY__HWCAP_ASIMDHP (1 << 10) -#define NPY__HWCAP_ASIMDDP (1 << 20) -#define NPY__HWCAP_ASIMDFHM (1 << 23) -/* - * Get the size of a file by reading it until the end. This is needed - * because files under /proc do not always return a valid size when - * using fseek(0, SEEK_END) + ftell(). Nor can they be mmap()-ed. - */ -static int -get_file_size(const char* pathname) -{ - int fd, result = 0; - char buffer[256]; - - fd = open(pathname, O_RDONLY); - if (fd < 0) { - return -1; - } - - for (;;) { - int ret = read(fd, buffer, sizeof buffer); - if (ret < 0) { - if (errno == EINTR) { - continue; - } - break; - } - if (ret == 0) { - break; - } - result += ret; - } - close(fd); - return result; -} - -/* - * Read the content of /proc/cpuinfo into a user-provided buffer. - * Return the length of the data, or -1 on error. Does *not* - * zero-terminate the content. Will not read more - * than 'buffsize' bytes. - */ -static int -read_file(const char* pathname, char* buffer, size_t buffsize) -{ - int fd, count; - - fd = open(pathname, O_RDONLY); - if (fd < 0) { - return -1; - } - count = 0; - while (count < (int)buffsize) { - int ret = read(fd, buffer + count, buffsize - count); - if (ret < 0) { - if (errno == EINTR) { - continue; - } - if (count == 0) { - count = -1; - } - break; - } - if (ret == 0) { - break; - } - count += ret; - } - close(fd); - return count; -} - -/* - * Extract the content of a the first occurence of a given field in - * the content of /proc/cpuinfo and return it as a heap-allocated - * string that must be freed by the caller. - * - * Return NULL if not found - */ -static char* -extract_cpuinfo_field(const char* buffer, int buflen, const char* field) -{ - int fieldlen = strlen(field); - const char* bufend = buffer + buflen; - char* result = NULL; - int len; - const char *p, *q; - - /* Look for first field occurence, and ensures it starts the line. */ - p = buffer; - for (;;) { - p = memmem(p, bufend-p, field, fieldlen); - if (p == NULL) { - goto EXIT; - } - - if (p == buffer || p[-1] == '\n') { - break; - } - - p += fieldlen; - } - - /* Skip to the first column followed by a space */ - p += fieldlen; - p = memchr(p, ':', bufend-p); - if (p == NULL || p[1] != ' ') { - goto EXIT; - } - - /* Find the end of the line */ - p += 2; - q = memchr(p, '\n', bufend-p); - if (q == NULL) { - q = bufend; - } - - /* Copy the line into a heap-allocated buffer */ - len = q - p; - result = malloc(len + 1); - if (result == NULL) { - goto EXIT; - } - - memcpy(result, p, len); - result[len] = '\0'; - -EXIT: - return result; -} - -/* - * Checks that a space-separated list of items contains one given 'item'. - * Returns 1 if found, 0 otherwise. - */ -static int -has_list_item(const char* list, const char* item) -{ - const char* p = list; - int itemlen = strlen(item); - - if (list == NULL) { - return 0; - } - - while (*p) { - const char* q; - - /* skip spaces */ - while (*p == ' ' || *p == '\t') { - p++; - } - - /* find end of current list item */ - q = p; - while (*q && *q != ' ' && *q != '\t') { - q++; - } - - if (itemlen == q-p && !memcmp(p, item, itemlen)) { - return 1; - } - - /* skip to next item */ - p = q; - } - return 0; -} - -static void setHwcap(char* cpuFeatures, unsigned long* hwcap) { - *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; - *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; - - *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; - *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; - *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; -} - -static int -get_feature_from_proc_cpuinfo(unsigned long *hwcap, unsigned long *hwcap2) { - char* cpuinfo = NULL; - int cpuinfo_len; - cpuinfo_len = get_file_size("/proc/cpuinfo"); - if (cpuinfo_len < 0) { - return 0; - } - cpuinfo = malloc(cpuinfo_len); - if (cpuinfo == NULL) { - return 0; - } - cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, cpuinfo_len); - char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); - if(cpuFeatures == NULL) { - return 0; - } - setHwcap(cpuFeatures, hwcap); - *hwcap2 |= *hwcap; - *hwcap2 |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP2_AES : 0; - *hwcap2 |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP2_PMULL : 0; - *hwcap2 |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP2_SHA1 : 0; - *hwcap2 |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP2_SHA2 : 0; - *hwcap2 |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP2_CRC32 : 0; - return 1; -} -#endif +/* + * Copyright (C) 2010 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef __NPY_CPUINFO_PARSER_H__ +#define __NPY_CPUINFO_PARSER_H__ +#include +#include +#include +#include +#include + +#define NPY__HWCAP 16 +#define NPY__HWCAP2 26 + +// arch/arm/include/uapi/asm/hwcap.h +#define NPY__HWCAP_HALF (1 << 1) +#define NPY__HWCAP_NEON (1 << 12) +#define NPY__HWCAP_VFPv3 (1 << 13) +#define NPY__HWCAP_VFPv4 (1 << 16) +#define NPY__HWCAP2_AES (1 << 0) +#define NPY__HWCAP2_PMULL (1 << 1) +#define NPY__HWCAP2_SHA1 (1 << 2) +#define NPY__HWCAP2_SHA2 (1 << 3) +#define NPY__HWCAP2_CRC32 (1 << 4) +// arch/arm64/include/uapi/asm/hwcap.h +#define NPY__HWCAP_FP (1 << 0) +#define NPY__HWCAP_ASIMD (1 << 1) +#define NPY__HWCAP_FPHP (1 << 9) +#define NPY__HWCAP_ASIMDHP (1 << 10) +#define NPY__HWCAP_ASIMDDP (1 << 20) +#define NPY__HWCAP_ASIMDFHM (1 << 23) +/* + * Get the size of a file by reading it until the end. This is needed + * because files under /proc do not always return a valid size when + * using fseek(0, SEEK_END) + ftell(). Nor can they be mmap()-ed. + */ +static int +get_file_size(const char* pathname) +{ + int fd, result = 0; + char buffer[256]; + + fd = open(pathname, O_RDONLY); + if (fd < 0) { + return -1; + } + + for (;;) { + int ret = read(fd, buffer, sizeof buffer); + if (ret < 0) { + if (errno == EINTR) { + continue; + } + break; + } + if (ret == 0) { + break; + } + result += ret; + } + close(fd); + return result; +} + +/* + * Read the content of /proc/cpuinfo into a user-provided buffer. + * Return the length of the data, or -1 on error. Does *not* + * zero-terminate the content. Will not read more + * than 'buffsize' bytes. + */ +static int +read_file(const char* pathname, char* buffer, size_t buffsize) +{ + int fd, count; + + fd = open(pathname, O_RDONLY); + if (fd < 0) { + return -1; + } + count = 0; + while (count < (int)buffsize) { + int ret = read(fd, buffer + count, buffsize - count); + if (ret < 0) { + if (errno == EINTR) { + continue; + } + if (count == 0) { + count = -1; + } + break; + } + if (ret == 0) { + break; + } + count += ret; + } + close(fd); + return count; +} + +/* + * Extract the content of a the first occurence of a given field in + * the content of /proc/cpuinfo and return it as a heap-allocated + * string that must be freed by the caller. + * + * Return NULL if not found + */ +static char* +extract_cpuinfo_field(const char* buffer, int buflen, const char* field) +{ + int fieldlen = strlen(field); + const char* bufend = buffer + buflen; + char* result = NULL; + int len; + const char *p, *q; + + /* Look for first field occurence, and ensures it starts the line. */ + p = buffer; + for (;;) { + p = memmem(p, bufend-p, field, fieldlen); + if (p == NULL) { + goto EXIT; + } + + if (p == buffer || p[-1] == '\n') { + break; + } + + p += fieldlen; + } + + /* Skip to the first column followed by a space */ + p += fieldlen; + p = memchr(p, ':', bufend-p); + if (p == NULL || p[1] != ' ') { + goto EXIT; + } + + /* Find the end of the line */ + p += 2; + q = memchr(p, '\n', bufend-p); + if (q == NULL) { + q = bufend; + } + + /* Copy the line into a heap-allocated buffer */ + len = q - p; + result = malloc(len + 1); + if (result == NULL) { + goto EXIT; + } + + memcpy(result, p, len); + result[len] = '\0'; + +EXIT: + return result; +} + +/* + * Checks that a space-separated list of items contains one given 'item'. + * Returns 1 if found, 0 otherwise. + */ +static int +has_list_item(const char* list, const char* item) +{ + const char* p = list; + int itemlen = strlen(item); + + if (list == NULL) { + return 0; + } + + while (*p) { + const char* q; + + /* skip spaces */ + while (*p == ' ' || *p == '\t') { + p++; + } + + /* find end of current list item */ + q = p; + while (*q && *q != ' ' && *q != '\t') { + q++; + } + + if (itemlen == q-p && !memcmp(p, item, itemlen)) { + return 1; + } + + /* skip to next item */ + p = q; + } + return 0; +} + +static void setHwcap(char* cpuFeatures, unsigned long* hwcap) { + *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; + *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; + + *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; + *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; + *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; +} + +static int +get_feature_from_proc_cpuinfo(unsigned long *hwcap, unsigned long *hwcap2) { + char* cpuinfo = NULL; + int cpuinfo_len; + cpuinfo_len = get_file_size("/proc/cpuinfo"); + if (cpuinfo_len < 0) { + return 0; + } + cpuinfo = malloc(cpuinfo_len); + if (cpuinfo == NULL) { + return 0; + } + cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, cpuinfo_len); + char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); + if(cpuFeatures == NULL) { + return 0; + } + setHwcap(cpuFeatures, hwcap); + *hwcap2 |= *hwcap; + *hwcap2 |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP2_AES : 0; + *hwcap2 |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP2_PMULL : 0; + *hwcap2 |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP2_SHA1 : 0; + *hwcap2 |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP2_SHA2 : 0; + *hwcap2 |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP2_CRC32 : 0; + return 1; +} +#endif From 7d6d74a3ac913ea306d5b237112d3f034b996232 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 27 Apr 2021 13:45:46 +0200 Subject: [PATCH 0992/1270] DOC: Typo fix Co-Authored-By: h-vetinari --- numpy/lib/mixins.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index 4763a071026e..f137bb5bcf4b 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -4,7 +4,7 @@ from abc import ABCMeta, abstractmethod __all__: List[str] # NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, -# even though it's relient on subclasses implementing `__array_ufunc__` +# even though it's reliant on subclasses implementing `__array_ufunc__` class NDArrayOperatorsMixin(metaclass=ABCMeta): @abstractmethod From b32985638bd3d8b8829676131c98066647076e06 Mon Sep 17 00:00:00 2001 From: Olivier Grisel Date: Tue, 27 Apr 2021 19:13:18 +0200 Subject: [PATCH 0993/1270] TST: Skip f2py TestSharedMemory for LONGDOUBLE on macos/arm64 LONGDOUBLE is an alias for DOUBLE on this platform. --- numpy/f2py/tests/test_array_from_pyobj.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 77149e4e7a0b..649fd1c4863b 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -1,6 +1,7 @@ import os import sys import copy +import platform import pytest import numpy as np @@ -118,8 +119,11 @@ def is_intent_exact(self, *names): # 16 byte long double types this means the inout intent cannot be satisfied # and several tests fail as the alignment flag can be randomly true or fals # when numpy gains an aligned allocator the tests could be enabled again +# +# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and - sys.platform != 'win32'): + sys.platform != 'win32' and + (platform.system(), platform.processor()) != ('Darwin', 'arm')): _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] From d1767bca66eb0f3cce14d1ab586ec2be9f7fab9e Mon Sep 17 00:00:00 2001 From: HowJmay Date: Tue, 27 Apr 2021 16:35:54 +0800 Subject: [PATCH 0994/1270] fix: Fix the typo "implment" --- numpy/core/src/common/simd/avx2/arithmetic.h | 6 +++--- numpy/core/src/common/simd/avx512/arithmetic.h | 6 +++--- numpy/core/src/common/simd/sse/arithmetic.h | 6 +++--- numpy/core/src/multiarray/convert_datatype.c | 4 ++-- numpy/core/src/multiarray/convert_datatype.h | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index 7cd5a0ea67f1..b1e297988d51 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -26,7 +26,7 @@ #define npyv_adds_s8 _mm256_adds_epi8 #define npyv_adds_u16 _mm256_adds_epu16 #define npyv_adds_s16 _mm256_adds_epi16 -// TODO: rest, after implment Packs intrins +// TODO: rest, after implement Packs intrins /*************************** * Subtraction @@ -48,7 +48,7 @@ #define npyv_subs_s8 _mm256_subs_epi8 #define npyv_subs_u16 _mm256_subs_epu16 #define npyv_subs_s16 _mm256_subs_epi16 -// TODO: rest, after implment Packs intrins +// TODO: rest, after implement Packs intrins /*************************** * Multiplication @@ -64,7 +64,7 @@ #define npyv_mul_f64 _mm256_mul_pd // saturated -// TODO: after implment Packs intrins +// TODO: after implement Packs intrins /*************************** * Integer Division diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index 29e1af7e806d..8a2790e93b17 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -39,7 +39,7 @@ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_adds_u16, _mm256_adds_epu16) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_adds_s16, _mm256_adds_epi16) #endif -// TODO: rest, after implment Packs intrins +// TODO: rest, after implement Packs intrins /*************************** * Subtraction @@ -73,7 +73,7 @@ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_subs_u16, _mm256_subs_epu16) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_subs_s16, _mm256_subs_epi16) #endif -// TODO: rest, after implment Packs intrins +// TODO: rest, after implement Packs intrins /*************************** * Multiplication @@ -104,7 +104,7 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) #define npyv_mul_f64 _mm512_mul_pd // saturated -// TODO: after implment Packs intrins +// TODO: after implement Packs intrins /*************************** * Integer Division diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index a1ec88f56489..1b02a4107b45 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -25,7 +25,7 @@ #define npyv_adds_s8 _mm_adds_epi8 #define npyv_adds_u16 _mm_adds_epu16 #define npyv_adds_s16 _mm_adds_epi16 -// TODO: rest, after implment Packs intrins +// TODO: rest, after implement Packs intrins /*************************** * Subtraction @@ -47,7 +47,7 @@ #define npyv_subs_s8 _mm_subs_epi8 #define npyv_subs_u16 _mm_subs_epu16 #define npyv_subs_s16 _mm_subs_epi16 -// TODO: rest, after implment Packs intrins +// TODO: rest, after implement Packs intrins /*************************** * Multiplication @@ -83,7 +83,7 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) #define npyv_mul_f64 _mm_mul_pd // saturated -// TODO: after implment Packs intrins +// TODO: after implement Packs intrins /*************************** * Integer Division diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 18179f253172..8dca184cb52e 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -1888,7 +1888,7 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn) * @return 0 on success -1 on failure. */ NPY_NO_EXPORT int -PyArray_AddCastingImplmentation(PyBoundArrayMethodObject *meth) +PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth) { if (meth->method->nin != 1 || meth->method->nout != 1) { PyErr_SetString(PyExc_TypeError, @@ -1955,7 +1955,7 @@ PyArray_AddCastingImplementation_FromSpec(PyArrayMethod_Spec *spec, int private) if (meth == NULL) { return -1; } - int res = PyArray_AddCastingImplmentation(meth); + int res = PyArray_AddCastingImplementation(meth); Py_DECREF(meth); if (res < 0) { return -1; diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h index 14218edee60b..33517b8ca220 100644 --- a/numpy/core/src/multiarray/convert_datatype.h +++ b/numpy/core/src/multiarray/convert_datatype.h @@ -57,7 +57,7 @@ PyArray_FindConcatenationDescriptor( npy_intp n, PyArrayObject **arrays, PyObject *requested_dtype); NPY_NO_EXPORT int -PyArray_AddCastingImplmentation(PyBoundArrayMethodObject *meth); +PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth); NPY_NO_EXPORT int PyArray_AddCastingImplementation_FromSpec(PyArrayMethod_Spec *spec, int private); From 6d871ca31abad2ce7065469ca8b530e4daa4996f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 27 Apr 2021 16:02:45 -0500 Subject: [PATCH 0995/1270] MAINT: Remove dead codepath in generalized ufuncs If the iterator has size zero, then that means that the outer iteration has size. Since the outer iteration is unaffected by core dimensions, in that case the output always has size zero and the code does nothing but unnecessary checking the array sizes. --- numpy/core/src/umath/ufunc_object.c | 30 +---------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index f17dd1e61c1e..527e0d74d8ad 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -2457,7 +2457,7 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, /* Fill in any allocated outputs */ { PyArrayObject **operands = NpyIter_GetOperandArray(iter); - for (i = 0; i < nop; ++i) { + for (i = nin; i < nop; ++i) { if (op[i] == NULL) { op[i] = operands[i]; Py_INCREF(op[i]); @@ -2592,34 +2592,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op, if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { NPY_END_THREADS; } - } else { - /** - * For each output operand, check if it has non-zero size, - * and assign the identity if it does. For example, a dot - * product of two zero-length arrays will be a scalar, - * which has size one. - */ - npy_bool reorderable; - PyObject *identity = _get_identity(ufunc, &reorderable); - if (identity == NULL) { - retval = -1; - goto fail; - } - - for (i = nin; i < nop; ++i) { - if (PyArray_SIZE(op[i]) != 0) { - if (identity == Py_None) { - PyErr_Format(PyExc_ValueError, - "ufunc %s ", - ufunc_name); - Py_DECREF(identity); - retval = -1; - goto fail; - } - PyArray_FillWithScalar(op[i], identity); - } - } - Py_DECREF(identity); } /* Check whether any errors occurred during the loop */ From 18ffb74487c0e121a369fd8b4e2824caa7a20b95 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 27 Apr 2021 16:17:19 -0500 Subject: [PATCH 0996/1270] TST: Add test for non-broadcastibility of the gufunc output This ensures that it is correct to not initize the output array if the outer iteration has size 0. The broadcast result would always have size 0 in that case. If the iterator rejects such a broadcast it cannot happen. --- numpy/core/tests/test_multiarray.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 073433bd1cad..f16b9d39a64e 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -6446,6 +6446,16 @@ def test_out_arg(self): c = c.astype(tgt.dtype) assert_array_equal(c, tgt) + def test_empty_out(self): + # Check that the output cannot be broadcast, so that it cannot be + # size zero when the outer dimensions (iterator size) has size zero. + arr = np.ones((0, 1, 1)) + out = np.ones((1, 1, 1)) + assert self.matmul(arr, arr).shape == (0, 1, 1) + + with pytest.raises(ValueError, match=r"non-broadcastable"): + self.matmul(arr, arr, out=out) + def test_out_contiguous(self): a = np.ones((5, 2), dtype=float) b = np.array([[1, 3], [5, 7]], dtype=float) From df727c26b63962b74d680b4c60c14653774a1687 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Wed, 28 Apr 2021 16:46:01 +0000 Subject: [PATCH 0997/1270] Upgrade to GitHub-native Dependabot --- .dependabot/config.yml | 9 --------- .github/dependabot.yml | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 9 deletions(-) delete mode 100644 .dependabot/config.yml create mode 100644 .github/dependabot.yml diff --git a/.dependabot/config.yml b/.dependabot/config.yml deleted file mode 100644 index 160ec85cfa7f..000000000000 --- a/.dependabot/config.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 1 -update_configs: - - package_manager: "python" - directory: "/" - update_schedule: "weekly" - commit_message: - prefix: "MAINT" - default_labels: - - "03 - Maintenance" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..16ce0846cb59 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,22 @@ +version: 2 +updates: +- package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + open-pull-requests-limit: 10 + labels: + - 03 - Maintenance + ignore: + - dependency-name: gitpython + versions: + - "> 3.1.13, < 3.2" + - dependency-name: pydata-sphinx-theme + versions: + - 0.6.0 + - 0.6.1 + - dependency-name: hypothesis + versions: + - 6.3.0 + commit-message: + prefix: MAINT From 7bd275f263fe1493d59d88690b902f865777e8bc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 28 Apr 2021 12:33:52 -0600 Subject: [PATCH 0998/1270] MAINT: Fix azure linter problems with pip 20.1 The default Python 3.8 pip version seems to have been upgraded, leading to a large number of harmless messages being printed during package installation. This PR fixes the linter script to ignore the messages. --- azure-pipelines.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 01749145008e..9b8373954e00 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,9 +29,10 @@ stages: addToPath: true architecture: 'x64' - script: >- - python -m pip --disable-pip-version-check install -r linter_requirements.txt + python -m pip install -r linter_requirements.txt displayName: 'Install tools' - failOnStderr: true + # pip 21.1 emits a pile of garbage messages to annoy users :) + # failOnStderr: true - script: | python tools/linter.py --branch origin/$(System.PullRequest.TargetBranch) displayName: 'Run Lint Checks' From 7f223e6ab73dfcb1bf5d2497df967eb7647f39c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Apr 2021 19:56:09 +0000 Subject: [PATCH 0999/1270] MAINT: Bump hypothesis from 6.10.0 to 6.10.1 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.10.0 to 6.10.1. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.10.0...hypothesis-python-6.10.1) Signed-off-by: dependabot[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 3a4a89969818..2cb57d841792 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.23 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.10.0 +hypothesis==6.10.1 pytest==6.2.3 pytz==2021.1 pytest-cov==2.11.1 From e28da7a3d50862fa99b8b704d60fc6543b5af631 Mon Sep 17 00:00:00 2001 From: Matthew Badin Date: Wed, 28 Apr 2021 16:56:01 -0700 Subject: [PATCH 1000/1270] BLD: Enable Accelerate Framework --- MANIFEST.in | 1 + numpy/_build_utils/README | 9 ++++ numpy/_build_utils/__init__.py | 0 numpy/_build_utils/apple_accelerate.py | 21 +++++++++ numpy/core/setup.py | 9 ++-- numpy/core/tests/test_multiarray.py | 65 ++++++++++++++++++++++++++ numpy/distutils/system_info.py | 54 ++++++++------------- numpy/linalg/setup.py | 5 -- 8 files changed, 119 insertions(+), 45 deletions(-) create mode 100644 numpy/_build_utils/README create mode 100644 numpy/_build_utils/__init__.py create mode 100644 numpy/_build_utils/apple_accelerate.py diff --git a/MANIFEST.in b/MANIFEST.in index 8ec62123b998..856c64d0c6b9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -22,6 +22,7 @@ include numpy/*.pxd # Note that sub-directories that don't have __init__ are apparently not # included by 'recursive-include', so list those separately recursive-include numpy * +recursive-include numpy/_build_utils * recursive-include numpy/linalg/lapack_lite * recursive-include tools * # Add sdist files whose use depends on local configuration. diff --git a/numpy/_build_utils/README b/numpy/_build_utils/README new file mode 100644 index 000000000000..73d93593e6f9 --- /dev/null +++ b/numpy/_build_utils/README @@ -0,0 +1,9 @@ +======= +WARNING +======= + +This directory (numpy/_build_utils) is *not* part of the public numpy API, + - it is internal build support for numpy. + - it is only present in source distributions or during an in place build + - it is *not* installed with the rest of numpy + diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/_build_utils/apple_accelerate.py b/numpy/_build_utils/apple_accelerate.py new file mode 100644 index 000000000000..8ce54619ee5c --- /dev/null +++ b/numpy/_build_utils/apple_accelerate.py @@ -0,0 +1,21 @@ +import os +import sys +import re + +__all__ = ['uses_accelerate_framework'] + +def uses_accelerate_framework(info): + """ Returns True if Accelerate framework is used for BLAS/LAPACK """ + # If we're not building on Darwin (macOS), don't use Accelerate + if sys.platform != "darwin": + return False + # If we're building on macOS, but targeting a different platform, + # don't use Accelerate. + if os.getenv('_PYTHON_HOST_PLATFORM', None): + return False + r_accelerate = re.compile("Accelerate") + extra_link_args = info.get('extra_link_args', '') + for arg in extra_link_args: + if r_accelerate.search(arg): + return True + return False diff --git a/numpy/core/setup.py b/numpy/core/setup.py index df405bcaf487..f59b67c88bc1 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -10,6 +10,10 @@ from numpy.distutils import log from distutils.dep_util import newer from sysconfig import get_config_var + +from numpy._build_utils.apple_accelerate import ( + uses_accelerate_framework + ) from numpy.compat import npy_load_module from setup_common import * # noqa: F403 @@ -405,11 +409,6 @@ def configuration(parent_package='',top_path=None): from numpy.distutils.system_info import (get_info, blas_opt_info, lapack_opt_info) - # Accelerate is buggy, disallow it. See also numpy/linalg/setup.py - for opt_order in (blas_opt_info.blas_order, lapack_opt_info.lapack_order): - if 'accelerate' in opt_order: - opt_order.remove('accelerate') - config = Configuration('core', parent_package, top_path) local_dir = config.local_path codegen_dir = join(local_dir, 'code_generators') diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 073433bd1cad..40b435f2e20a 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -6194,6 +6194,71 @@ def test_dot_array_order(self): assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) + def test_accelerate_framework_sgemv_fix(self): + + def aligned_array(shape, align, dtype, order='C'): + d = dtype(0) + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) + + m = aligned_array(100, 15, np.float32) + s = aligned_array((100, 100), 15, np.float32) + np.dot(s, m) # this will always segfault if the bug is present + + testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) + for align, m, n, a_order in testdata: + # Calculation in double precision + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32) + assert_dot_close(A_f, X_f, desired) + # Strided A rows + A_d_2 = A_d[::2] + desired = np.dot(A_d_2, X_d) + A_f_2 = A_f[::2] + assert_dot_close(A_f_2, X_f, desired) + # Strided A columns, strided X vector + A_d_22 = A_d_2[:, ::2] + X_d_2 = X_d[::2] + desired = np.dot(A_d_22, X_d_2) + A_f_22 = A_f_2[:, ::2] + X_f_2 = X_f[::2] + assert_dot_close(A_f_22, X_f_2, desired) + # Check the strides are as expected + if a_order == 'F': + assert_equal(A_f_22.strides, (8, 8 * m)) + else: + assert_equal(A_f_22.strides, (8 * n, 8)) + assert_equal(X_f_2.strides, (8,)) + # Strides in A rows + cols only + X_f_2c = as_aligned(X_f_2, align, np.float32) + assert_dot_close(A_f_22, X_f_2c, desired) + # Strides just in A cols + A_d_12 = A_d[:, ::2] + desired = np.dot(A_d_12, X_d_2) + A_f_12 = A_f[:, ::2] + assert_dot_close(A_f_12, X_f_2c, desired) + # Strides in A cols and X + assert_dot_close(A_f_12, X_f_2, desired) + + class MatmulCommon: """Common tests for '@' operator and numpy.matmul. diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 9e192329f635..082b029d7047 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -375,22 +375,6 @@ def add_system_root(library_root): so_ext = get_shared_lib_extension() -def is_symlink_to_accelerate(filename): - accelpath = '/System/Library/Frameworks/Accelerate.framework' - return (sys.platform == 'darwin' and os.path.islink(filename) and - os.path.realpath(filename).startswith(accelpath)) - - -_accel_msg = ( - 'Found {filename}, but that file is a symbolic link to the ' - 'MacOS Accelerate framework, which is not supported by NumPy. ' - 'You must configure the build to use a different optimized library, ' - 'or disable the use of optimized BLAS and LAPACK by setting the ' - 'environment variables NPY_BLAS_ORDER="" and NPY_LAPACK_ORDER="" ' - 'before building NumPy.' -) - - def get_standard_file(fname): """Returns a list of files named 'fname' from 1) System-wide directory (directory-location of this module) @@ -539,6 +523,7 @@ def get_info(name, notfound_action=0): 'blis': blis_info, # use blas_opt instead 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'accelerate': accelerate_info, # use blas_opt instead 'openblas64_': openblas64__info, 'openblas64__lapack': openblas64__lapack_info, 'openblas_ilp64': openblas_ilp64_info, @@ -1029,9 +1014,6 @@ def _find_lib(self, lib_dir, lib, exts): for prefix in lib_prefixes: p = self.combine_paths(lib_dir, prefix + lib + ext) if p: - # p[0] is the full path to the binary library file. - if is_symlink_to_accelerate(p[0]): - raise RuntimeError(_accel_msg.format(filename=p[0])) break if p: assert len(p) == 1 @@ -1766,10 +1748,18 @@ def get_atlas_version(**config): class lapack_opt_info(system_info): notfounderror = LapackNotFoundError + # List of all known LAPACK libraries, in the default order - lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'lapack'] + lapack_order = ['accelerate', 'mkl', 'openblas', 'flame', 'atlas', 'lapack'] order_env_var_name = 'NPY_LAPACK_ORDER' + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + def _calc_info_mkl(self): info = get_info('lapack_mkl') if info: @@ -1820,13 +1810,6 @@ def _calc_info_atlas(self): return True return False - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - def _get_info_blas(self): # Default to get the optimized BLAS implementation info = get_info('blas_opt') @@ -1942,9 +1925,17 @@ class lapack64__opt_info(lapack_ilp64_opt_info): class blas_opt_info(system_info): notfounderror = BlasNotFoundError # List of all known BLAS libraries, in the default order - blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'blas'] + + blas_order = ['accelerate', 'mkl', 'blis', 'openblas', 'atlas', 'blas'] order_env_var_name = 'NPY_BLAS_ORDER' + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + def _calc_info_mkl(self): info = get_info('blas_mkl') if info: @@ -1979,13 +1970,6 @@ def _calc_info_atlas(self): return True return False - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - def _calc_info_blas(self): # Warn about a non-optimized BLAS library warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index 5c9f2a4cb56c..e2944f38c33d 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -9,11 +9,6 @@ def configuration(parent_package='', top_path=None): config.add_subpackage('tests') - # Accelerate is buggy, disallow it. See also numpy/core/setup.py - for opt_order in (blas_opt_info.blas_order, lapack_opt_info.lapack_order): - if 'accelerate' in opt_order: - opt_order.remove('accelerate') - # Configure lapack_lite src_dir = 'lapack_lite' From 6c324961c1c8805e1a698ea6928940f44d3430dd Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 29 Apr 2021 10:44:38 -0600 Subject: [PATCH 1001/1270] MAINT: Update PyPy version used by CI Use PyPy 7.3.4 instead of 7.3.4rc1. --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 7a6522f6a386..c7d463348f6d 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -212,7 +212,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 with: - python-version: pypy-3.7-v7.3.4rc1 + python-version: pypy-3.7-v7.3.4 - uses: ./.github/actions sdist: From 18cb9b43e846ff1b42ce0b19d8909bd1be03773e Mon Sep 17 00:00:00 2001 From: HowJmay Date: Wed, 28 Apr 2021 01:35:59 +0800 Subject: [PATCH 1002/1270] ENH: Add max values comparison for floating point SIMD intrinsics for comparing double/single precision has been added. Now only NEON and SSE ones are added. --- numpy/core/src/_simd/_simd.dispatch.c.src | 10 ++++++++ numpy/core/src/common/simd/avx2/math.h | 19 ++++++++++++++ numpy/core/src/common/simd/avx512/math.h | 17 +++++++++++++ numpy/core/src/common/simd/neon/math.h | 20 +++++++++++++++ numpy/core/src/common/simd/sse/math.h | 19 ++++++++++++++ numpy/core/src/common/simd/vsx/math.h | 9 +++++++ numpy/core/tests/test_simd.py | 30 +++++++++++++++++++++++ 7 files changed, 124 insertions(+) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index 4acd20a75cd1..d5984009e976 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -387,6 +387,11 @@ SIMD_IMPL_INTRIN_1(@intrin@_@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ #endif +#if @fp_only@ +SIMD_IMPL_INTRIN_2(max_@sfx@, v@sfx@, v@sfx@, v@sfx@) +SIMD_IMPL_INTRIN_2(maxp_@sfx@, v@sfx@, v@sfx@, v@sfx@) +#endif + /*************************** * Mask operations ***************************/ @@ -607,6 +612,11 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ #endif +#if @fp_only@ +SIMD_INTRIN_DEF(max_@sfx@) +SIMD_INTRIN_DEF(maxp_@sfx@) +#endif + /*************************** * Mask operations ***************************/ diff --git a/numpy/core/src/common/simd/avx2/math.h b/numpy/core/src/common/simd/avx2/math.h index b3eba6f5ff79..84ab024d02b4 100644 --- a/numpy/core/src/common/simd/avx2/math.h +++ b/numpy/core/src/common/simd/avx2/math.h @@ -37,4 +37,23 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) { return _mm256_mul_pd(a, a); } +// Maximum, natively mapping with no guarantees to handle NaN. +#define npyv_max_f32 _mm256_max_ps +#define npyv_max_f64 _mm256_max_pd +// Maximum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_maxp_f32(npyv_f32 a, npyv_f32 b) +{ + __m256 nn = _mm256_cmp_ps(b, b, _CMP_ORD_Q); + __m256 max = _mm256_max_ps(a, b); + return _mm256_blendv_ps(a, max, nn); +} +NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) +{ + __m256d nn = _mm256_cmp_pd(b, b, _CMP_ORD_Q); + __m256d max = _mm256_max_pd(a, b); + return _mm256_blendv_pd(a, max, nn); +} + #endif diff --git a/numpy/core/src/common/simd/avx512/math.h b/numpy/core/src/common/simd/avx512/math.h index 1db710670208..66836229857d 100644 --- a/numpy/core/src/common/simd/avx512/math.h +++ b/numpy/core/src/common/simd/avx512/math.h @@ -46,4 +46,21 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) { return _mm512_mul_pd(a, a); } +// Maximum, natively mapping with no guarantees to handle NaN. +#define npyv_max_f32 _mm512_max_ps +#define npyv_max_f64 _mm512_max_pd +// Maximum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_maxp_f32(npyv_f32 a, npyv_f32 b) +{ + __mmask16 nn = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q); + return _mm512_mask_max_ps(a, nn, a, b); +} +NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) +{ + __mmask8 nn = _mm512_cmp_pd_mask(b, b, _CMP_ORD_Q); + return _mm512_mask_max_pd(a, nn, a, b); +} + #endif diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h index a2bbdf2a5d30..09b7bbc9efe1 100644 --- a/numpy/core/src/common/simd/neon/math.h +++ b/numpy/core/src/common/simd/neon/math.h @@ -83,4 +83,24 @@ NPY_FINLINE npyv_f32 npyv_recip_f32(npyv_f32 a) } #endif // NPY_SIMD_F64 +// Maximum, natively mapping with no guarantees to handle NaN. +#define npyv_max_f32 vmaxq_f32 +#define npyv_max_f64 vmaxq_f64 +// Maximum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +#ifdef NPY_HAVE_ASIMD + #define npyv_maxp_f32 vmaxnmq_f32 +#else + NPY_FINLINE npyv_f32 npyv_maxp_f32(npyv_f32 a, npyv_f32 b) + { + npyv_u32 nn_a = vceqq_f32(a, a); + npyv_u32 nn_b = vceqq_f32(b, b); + return vmaxq_f32(vbslq_f32(nn_a, a, b), vbslq_f32(nn_b, b, a)); + } +#endif +#if NPY_SIMD_F64 + #define npyv_maxp_f64 vmaxnmq_f64 +#endif // NPY_SIMD_F64 + #endif // _NPY_SIMD_SSE_MATH_H diff --git a/numpy/core/src/common/simd/sse/math.h b/numpy/core/src/common/simd/sse/math.h index b7203cd8993d..15e9f7e44fb4 100644 --- a/numpy/core/src/common/simd/sse/math.h +++ b/numpy/core/src/common/simd/sse/math.h @@ -37,4 +37,23 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) { return _mm_mul_pd(a, a); } +// Maximum, natively mapping with no guarantees to handle NaN. +#define npyv_max_f32 _mm_max_ps +#define npyv_max_f64 _mm_max_pd +// Maximum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_maxp_f32(npyv_f32 a, npyv_f32 b) +{ + __m128 nn = _mm_cmpord_ps(b, b); + __m128 max = _mm_max_ps(a, b); + return npyv_select_f32(_mm_castps_si128(nn), max, a); +} +NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) +{ + __m128d nn = _mm_cmpord_pd(b, b); + __m128d max = _mm_max_pd(a, b); + return npyv_select_f64(_mm_castpd_si128(nn), max, a); +} + #endif diff --git a/numpy/core/src/common/simd/vsx/math.h b/numpy/core/src/common/simd/vsx/math.h index 7c8610b197eb..11bacd703f91 100644 --- a/numpy/core/src/common/simd/vsx/math.h +++ b/numpy/core/src/common/simd/vsx/math.h @@ -33,4 +33,13 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) { return vec_mul(a, a); } +// Maximum, natively mapping with no guarantees to handle NaN. +#define npyv_max_f32 vec_max +#define npyv_max_f64 vec_max +// Maximum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +#define npyv_maxp_f32 vec_max +#define npyv_maxp_f64 vec_max + #endif // _NPY_SIMD_VSX_MATH_H diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 8306a86d32fb..fb7dd88cf432 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -315,6 +315,36 @@ def test_square(self): data_square = [x*x for x in data] square = self.square(vdata) assert square == data_square + + def test_max(self): + """ + Test intrinics: + npyv_max_##SFX + npyv_maxp_##SFX + """ + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + _max = self.max(vdata_a, vdata_b) + assert _max == data_max + maxp = self.maxp(vdata_a, vdata_b) + assert maxp == data_max + # test IEEE standards + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + max_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10), + (pinf, pinf, pinf), (pinf, 10, pinf), (10, pinf, pinf), + (ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10)) + for case_operand1, case_operand2, desired in max_cases: + data_max = [desired]*self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + maxp = self.maxp(vdata_a, vdata_b) + assert maxp == pytest.approx(data_max, nan_ok=True) + if nan in (case_operand1, case_operand2, desired): + continue + _max = self.max(vdata_a, vdata_b) + assert _max == data_max def test_reciprocal(self): pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() From 3fd849356a2cc5a0b501454decc65090227b4d56 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 30 Apr 2021 13:40:43 -0500 Subject: [PATCH 1003/1270] API: Ensure that casting does not affect ufunc loop This ensures that casting does not affect ufunc loops, which means that the following will give the same result: ``` >>> np.not_equal(None, False, dtype=bool) True >>> np.not_equal(None, False, dtype=bool, casting="unsafe") False ``` This is absolutely necessary to make new promotion and type resolution sane. In some cases, it causes problems with existing weirder dtype usage, the workaround is that we allow the promoter (in current terms the type resolver) to resolve more strictly (potentially ignoring input types). I.e. a promoter (after not finding match), can use the fixed (signature) dtypes to infer the loop. Because this makes things more strict, most importantly `nextafter` causes problems. Code like: ``` np.nextafter(0., np.inf, dtype=np.float32) ``` looks harmless enough, but requires such preferential treatment. NumPy has such capabilities currently for the homogeneous type resolver, so changing to that fixes it (SciPy uses the above quite a lot). SciPy also has code like (in the tests): ``` np.nextafter(0, 1) ``` which will now FAIL. However, this is for the better since the above code is actually buggy: It will return different values on windows and 32bit linux because it will find the float32 instead of the float64 loop. That is almost certainly not the expected result. --- numpy/core/code_generators/generate_umath.py | 2 +- numpy/core/src/umath/ufunc_type_resolution.c | 99 ++++++++++++++------ numpy/core/src/umath/ufunc_type_resolution.h | 1 + numpy/core/tests/test_scalar_methods.py | 3 +- numpy/core/tests/test_scalarmath.py | 3 +- numpy/core/tests/test_ufunc.py | 4 +- 6 files changed, 76 insertions(+), 36 deletions(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 6b6a0fe64ad5..ea74c107f83f 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -902,7 +902,7 @@ def english_upper(s): 'nextafter': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.nextafter'), - None, + "PyUFunc_SimpleUniformOperationTypeResolver", TD(flts), ), 'spacing': diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 465deb87b756..632cfe9e6ba5 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -288,7 +288,7 @@ PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, } else { /* Find the specified ufunc inner loop, and fill in the dtypes */ retval = type_tuple_type_resolver(ufunc, type_tup, - operands, casting, any_object, out_dtypes); + operands, input_casting, casting, any_object, out_dtypes); } return retval; @@ -553,6 +553,11 @@ PyUFunc_SimpleUniformOperationTypeResolver( * This is a fast-path, since all descriptors will be identical, mainly * when only a single descriptor was passed (which would set the out * one in the tuple), there is no need to check all loops. + * Note that this also allows (None, None, float64) to resolve to + * (float64, float64, float64), even when the inputs do not match, + * i.e. fixing a single part of the signature can fix all of them. + * This is necessary to support `nextafter(1., inf, dtype=float32)`, + * where it is "clear" we want to cast 1. and inf to float32. */ PyArray_Descr *descr = NULL; if (PyTuple_CheckExact(type_tup) && @@ -1196,8 +1201,8 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, /* Use the default when datetime and timedelta are not involved */ if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - type_tup, out_dtypes); + return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, + casting, operands, type_tup, out_dtypes); } if (type_num1 == NPY_TIMEDELTA) { @@ -1656,6 +1661,9 @@ ufunc_loop_matches(PyUFuncObject *self, if (types[i] == NPY_OBJECT && !any_object && self->ntypes > 1) { return 0; } + if (types[i] == NPY_NOTYPE) { + continue; /* Matched by being explicitly specified. */ + } /* * If type num is NPY_VOID and struct dtypes have been passed in, @@ -1705,6 +1713,9 @@ ufunc_loop_matches(PyUFuncObject *self, * outputs. */ for (i = nin; i < nop; ++i) { + if (types[i] == NPY_NOTYPE) { + continue; /* Matched by being explicitly specified. */ + } if (op[i] != NULL) { PyArray_Descr *tmp = PyArray_DescrFromType(types[i]); if (tmp == NULL) { @@ -1723,7 +1734,6 @@ ufunc_loop_matches(PyUFuncObject *self, Py_DECREF(tmp); } } - return 1; } @@ -1864,12 +1874,15 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, int n_specified, int *specified_types, PyArrayObject **op, + NPY_CASTING input_casting, NPY_CASTING casting, int any_object, int use_min_scalar, PyArray_Descr **out_dtype) { int i, j, nin = self->nin, nop = nin + self->nout; + assert(n_specified == nop); + int types[NPY_MAXARGS]; /* Use this to try to avoid repeating the same userdef loop search */ int last_userdef = -1; @@ -1902,28 +1915,28 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, return -1; } for (; funcdata != NULL; funcdata = funcdata->next) { - int *types = funcdata->arg_types; - int matched = 1; - - if (n_specified == nop) { - for (j = 0; j < nop; ++j) { - if (types[j] != specified_types[j] && - specified_types[j] != NPY_NOTYPE) { - matched = 0; - break; - } + int *orig_types = funcdata->arg_types; + + /* Copy the types into an int array for matching */ + for (j = 0; j < nop; ++j) { + if (specified_types[j] == NPY_NOTYPE) { + types[j] = orig_types[j]; + continue; } - } else { - if (types[nin] != specified_types[0]) { - matched = 0; + if (orig_types[j] != specified_types[j]) { + break; } + /* indicate that we do not have to check this type anymore. */ + types[j] = NPY_NOTYPE; } - if (!matched) { + + if (j != nop) { + /* no match */ continue; } switch (ufunc_loop_matches(self, op, - casting, casting, + input_casting, casting, any_object, use_min_scalar, types, NULL, &no_castable_output, &err_src_typecode, @@ -1931,7 +1944,19 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, /* It works */ case 1: set_ufunc_loop_data_types(self, op, - out_dtype, types, NULL); + out_dtype, orig_types, NULL); + /* + * In principle, we only need to validate the + * NPY_NOTYPE ones + */ + if (PyUFunc_ValidateCasting(self, + casting, op, out_dtype) < 0) { + for (j = 0; j < self->nargs; j++) { + Py_DECREF(out_dtype[j]); + out_dtype[j] = NULL; + } + return -1; + } return 1; /* Didn't match */ case 0: @@ -2074,6 +2099,7 @@ NPY_NO_EXPORT int type_tuple_type_resolver(PyUFuncObject *self, PyObject *type_tup, PyArrayObject **op, + NPY_CASTING input_casting, NPY_CASTING casting, int any_object, PyArray_Descr **out_dtype) @@ -2130,7 +2156,7 @@ type_tuple_type_resolver(PyUFuncObject *self, if (self->userloops) { switch (type_tuple_userloop_type_resolver(self, nop, specified_types, - op, casting, + op, input_casting, casting, any_object, use_min_scalar, out_dtype)) { /* Error */ @@ -2145,16 +2171,17 @@ type_tuple_type_resolver(PyUFuncObject *self, for (i = 0; i < self->ntypes; ++i) { char *orig_types = self->types + i*self->nargs; - /* Copy the types into an int array for matching */ + /* Check specified types and copy into an int array for matching */ for (j = 0; j < nop; ++j) { - types[j] = orig_types[j]; - } - - for (j = 0; j < nop; ++j) { - if (types[j] != specified_types[j] && - specified_types[j] != NPY_NOTYPE) { + if (specified_types[j] == NPY_NOTYPE) { + types[j] = orig_types[j]; + continue; + } + if (orig_types[j] != specified_types[j]) { break; } + /* indicate that we do not have to check this type anymore. */ + types[j] = NPY_NOTYPE; } if (j < nop) { /* no match */ @@ -2162,7 +2189,7 @@ type_tuple_type_resolver(PyUFuncObject *self, } switch (ufunc_loop_matches(self, op, - casting, casting, + input_casting, casting, any_object, use_min_scalar, types, NULL, &no_castable_output, &err_src_typecode, @@ -2174,8 +2201,20 @@ type_tuple_type_resolver(PyUFuncObject *self, /* Cannot cast inputs */ continue; case 1: - /* Success */ + /* Success, fill also the NPY_NOTYPE (cast from char to int) */ + for (j = 0; j < nop; j++) { + types[j] = orig_types[j]; + } set_ufunc_loop_data_types(self, op, out_dtype, types, NULL); + /* In principle, we only need to validate the NPY_NOTYPE ones */ + if (PyUFunc_ValidateCasting(self, casting, op, out_dtype) < 0) { + for (j = 0; j < self->nargs; j++) { + Py_DECREF(out_dtype[j]); + out_dtype[j] = NULL; + } + return -1; + } + return 0; } } diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h index 1d6ad3358773..b11c69852889 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.h +++ b/numpy/core/src/umath/ufunc_type_resolution.h @@ -123,6 +123,7 @@ NPY_NO_EXPORT int type_tuple_type_resolver(PyUFuncObject *self, PyObject *type_tup, PyArrayObject **op, + NPY_CASTING input_casting, NPY_CASTING casting, int any_object, PyArray_Descr **out_dtype); diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py index 4f5fd298887d..3693bba59ce6 100644 --- a/numpy/core/tests/test_scalar_methods.py +++ b/numpy/core/tests/test_scalar_methods.py @@ -89,7 +89,8 @@ def test_against_known_values(self): ]) def test_roundtrip(self, ftype, frac_vals, exp_vals): for frac, exp in zip(frac_vals, exp_vals): - f = np.ldexp(frac, exp, dtype=ftype) + f = np.ldexp(ftype(frac), exp) + assert f.dtype == ftype n, d = f.as_integer_ratio() try: diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index d91b4a39146d..0718f365e7c8 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -307,8 +307,7 @@ def test_inplace_floordiv_handling(self): # promotes to float which does not fit a = np.array([1, 2], np.int64) b = np.array([1, 2], np.uint64) - pattern = 'could not be coerced to provided output parameter' - with assert_raises_regex(TypeError, pattern): + with pytest.raises(TypeError, match=r"Cannot cast ufunc"): a //= b diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index c13865ce4c28..47606876e5ee 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -2201,8 +2201,8 @@ def test_ufunc_casterrors(): # was aborted (this is not necessarily defined behaviour) assert out[-1] == 1 - with pytest.raises(ValueError): - # Input casting failure: + with pytest.raises(TypeError): + # Input "casting" failure (there is no intp out loop for object inputs) np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe") assert count == sys.getrefcount(value) From 427548cfc4881dba7172d244981e1d4c8215a731 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 30 Apr 2021 14:41:49 -0500 Subject: [PATCH 1004/1270] BUG: Hardcode that boolean division is promoted to int8 division --- numpy/core/src/umath/ufunc_type_resolution.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 632cfe9e6ba5..a747c0e02ddf 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -1201,8 +1201,20 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, /* Use the default when datetime and timedelta are not involved */ if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) { - return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, + int res = PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); + if (res < 0 || out_dtypes[0]->type_num != NPY_BOOL) { + return res; + } + /* + * Hardcode that boolean division is handled by casting to int8, + * we could consider deprecating this (this is safe so no need to + * "validate casting" again. + */ + Py_SETREF(out_dtypes[0], PyArray_DescrFromType(NPY_BYTE)); + Py_SETREF(out_dtypes[1], PyArray_DescrFromType(NPY_BYTE)); + Py_SETREF(out_dtypes[2], PyArray_DescrFromType(NPY_BYTE)); + return res; } if (type_num1 == NPY_TIMEDELTA) { From 9d023167249bd1cfc2c3161483b8253b313b0b46 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 18:05:44 +0200 Subject: [PATCH 1005/1270] MAINT: Remove unsafe unions from `np.core.fromnumeric` --- numpy/core/fromnumeric.pyi | 342 ++++++------------ numpy/typing/tests/data/fail/fromnumeric.py | 108 +++--- numpy/typing/tests/data/reveal/fromnumeric.py | 258 +++++++------ 3 files changed, 286 insertions(+), 422 deletions(-) diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index fc7f28a59e88..26a43c1a049e 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -23,7 +23,6 @@ from numpy.typing import ( ArrayLike, _ShapeLike, _Shape, - _IntLike_co, _NumberLike_co, ) @@ -42,11 +41,7 @@ _ScalarBuiltin = Union[str, bytes, dt.date, dt.timedelta, bool, int, float, comp _Scalar = Union[_ScalarBuiltin, _ScalarNumpy] # Integers and booleans can generally be used interchangeably -_ScalarIntOrBool = TypeVar("_ScalarIntOrBool", bound=Union[integer, bool_]) _ScalarGeneric = TypeVar("_ScalarGeneric", bound=generic) -_ScalarGenericDT = TypeVar( - "_ScalarGenericDT", bound=Union[dt.datetime, dt.timedelta, generic] -) _Number = TypeVar("_Number", bound=number) @@ -55,67 +50,51 @@ _Number = TypeVar("_Number", bound=number) # 2. A scalar comes in; a generic comes out # 3. An array-like object comes in; some keyword ensures that a generic comes out # 4. An array-like object comes in; an ndarray or generic comes out -@overload -def take( - a: _ScalarGenericDT, - indices: int, - axis: Optional[int] = ..., - out: Optional[ndarray] = ..., - mode: _ModeKind = ..., -) -> _ScalarGenericDT: ... -@overload -def take( - a: _Scalar, - indices: int, - axis: Optional[int] = ..., - out: Optional[ndarray] = ..., - mode: _ModeKind = ..., -) -> _ScalarNumpy: ... -@overload -def take( - a: ArrayLike, - indices: int, - axis: Optional[int] = ..., - out: Optional[ndarray] = ..., - mode: _ModeKind = ..., -) -> _ScalarNumpy: ... -@overload def take( a: ArrayLike, indices: _ArrayLikeIntOrBool, axis: Optional[int] = ..., out: Optional[ndarray] = ..., mode: _ModeKind = ..., -) -> Union[_ScalarNumpy, ndarray]: ... -def reshape(a: ArrayLike, newshape: _ShapeLike, order: _OrderACF = ...) -> ndarray: ... -@overload -def choose( - a: _ScalarIntOrBool, - choices: ArrayLike, - out: Optional[ndarray] = ..., - mode: _ModeKind = ..., -) -> _ScalarIntOrBool: ... -@overload -def choose( - a: _IntLike_co, choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ... -) -> Union[integer, bool_]: ... -@overload +) -> Any: ... + +def reshape( + a: ArrayLike, + newshape: _ShapeLike, + order: _OrderACF = ..., +) -> ndarray: ... + def choose( a: _ArrayLikeIntOrBool, choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ..., -) -> ndarray: ... +) -> Any: ... + def repeat( - a: ArrayLike, repeats: _ArrayLikeIntOrBool, axis: Optional[int] = ... + a: ArrayLike, + repeats: _ArrayLikeIntOrBool, + axis: Optional[int] = ..., ) -> ndarray: ... + def put( - a: ndarray, ind: _ArrayLikeIntOrBool, v: ArrayLike, mode: _ModeKind = ... + a: ndarray, + ind: _ArrayLikeIntOrBool, + v: ArrayLike, + mode: _ModeKind = ..., ) -> None: ... -def swapaxes(a: ArrayLike, axis1: int, axis2: int) -> ndarray: ... + +def swapaxes( + a: ArrayLike, + axis1: int, + axis2: int, +) -> ndarray: ... + def transpose( - a: ArrayLike, axes: Union[None, Sequence[int], ndarray] = ... + a: ArrayLike, + axes: Union[None, Sequence[int], ndarray] = ... ) -> ndarray: ... + def partition( a: ArrayLike, kth: _ArrayLikeIntOrBool, @@ -123,54 +102,55 @@ def partition( kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... -@overload -def argpartition( - a: generic, - kth: _ArrayLikeIntOrBool, - axis: Optional[int] = ..., - kind: _PartitionKind = ..., - order: Union[None, str, Sequence[str]] = ..., -) -> intp: ... -@overload -def argpartition( - a: _ScalarBuiltin, - kth: _ArrayLikeIntOrBool, - axis: Optional[int] = ..., - kind: _PartitionKind = ..., - order: Union[None, str, Sequence[str]] = ..., -) -> ndarray: ... -@overload + def argpartition( a: ArrayLike, kth: _ArrayLikeIntOrBool, axis: Optional[int] = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., -) -> ndarray: ... +) -> Any: ... + def sort( a: ArrayLike, axis: Optional[int] = ..., kind: Optional[_SortKind] = ..., order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... + def argsort( a: ArrayLike, axis: Optional[int] = ..., kind: Optional[_SortKind] = ..., order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... + @overload -def argmax(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> intp: ... +def argmax( + a: ArrayLike, + axis: None = ..., + out: Optional[ndarray] = ..., +) -> intp: ... @overload def argmax( - a: ArrayLike, axis: int = ..., out: Optional[ndarray] = ... -) -> Union[ndarray, intp]: ... + a: ArrayLike, + axis: Optional[int] = ..., + out: Optional[ndarray] = ..., +) -> Any: ... + @overload -def argmin(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> intp: ... +def argmin( + a: ArrayLike, + axis: None = ..., + out: Optional[ndarray] = ..., +) -> intp: ... @overload def argmin( - a: ArrayLike, axis: int = ..., out: Optional[ndarray] = ... -) -> Union[ndarray, intp]: ... + a: ArrayLike, + axis: Optional[int] = ..., + out: Optional[ndarray] = ..., +) -> Any: ... + @overload def searchsorted( a: ArrayLike, @@ -185,14 +165,30 @@ def searchsorted( side: _SortSide = ..., sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array ) -> ndarray: ... -def resize(a: ArrayLike, new_shape: _ShapeLike) -> ndarray: ... + +def resize( + a: ArrayLike, + new_shape: _ShapeLike, +) -> ndarray: ... + @overload -def squeeze(a: _ScalarGeneric, axis: Optional[_ShapeLike] = ...) -> _ScalarGeneric: ... +def squeeze( + a: _ScalarGeneric, + axis: Optional[_ShapeLike] = ..., +) -> _ScalarGeneric: ... @overload -def squeeze(a: ArrayLike, axis: Optional[_ShapeLike] = ...) -> ndarray: ... +def squeeze( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., +) -> ndarray: ... + def diagonal( - a: ArrayLike, offset: int = ..., axis1: int = ..., axis2: int = ... # >= 2D array + a: ArrayLike, + offset: int = ..., + axis1: int = ..., + axis2: int = ..., # >= 2D array ) -> ndarray: ... + def trace( a: ArrayLike, # >= 2D array offset: int = ..., @@ -200,32 +196,21 @@ def trace( axis2: int = ..., dtype: DTypeLike = ..., out: Optional[ndarray] = ..., -) -> Union[number, ndarray]: ... +) -> Any: ... + def ravel(a: ArrayLike, order: _OrderKACF = ...) -> ndarray: ... + def nonzero(a: ArrayLike) -> Tuple[ndarray, ...]: ... + def shape(a: ArrayLike) -> _Shape: ... + def compress( condition: ArrayLike, # 1D bool array a: ArrayLike, axis: Optional[int] = ..., out: Optional[ndarray] = ..., ) -> ndarray: ... -@overload -def clip( - a: _Number, - a_min: ArrayLike, - a_max: Optional[ArrayLike], - out: Optional[ndarray] = ..., - **kwargs: Any, -) -> _Number: ... -@overload -def clip( - a: _Number, - a_min: None, - a_max: ArrayLike, - out: Optional[ndarray] = ..., - **kwargs: Any, -) -> _Number: ... + @overload def clip( a: ArrayLike, @@ -233,7 +218,7 @@ def clip( a_max: Optional[ArrayLike], out: Optional[ndarray] = ..., **kwargs: Any, -) -> Union[number, ndarray]: ... +) -> Any: ... @overload def clip( a: ArrayLike, @@ -241,18 +226,8 @@ def clip( a_max: ArrayLike, out: Optional[ndarray] = ..., **kwargs: Any, -) -> Union[number, ndarray]: ... -@overload -def sum( - a: _Number, - axis: Optional[_ShapeLike] = ..., - dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., -) -> _Number: ... -@overload +) -> Any: ... + def sum( a: ArrayLike, axis: _ShapeLike = ..., @@ -261,12 +236,13 @@ def sum( keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., -) -> Union[number, ndarray]: ... +) -> Any: ... + @overload def all( a: ArrayLike, axis: None = ..., - out: Optional[ndarray] = ..., + out: None = ..., keepdims: Literal[False] = ..., ) -> bool_: ... @overload @@ -275,12 +251,13 @@ def all( axis: Optional[_ShapeLike] = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., -) -> Union[bool_, ndarray]: ... +) -> Any: ... + @overload def any( a: ArrayLike, axis: None = ..., - out: Optional[ndarray] = ..., + out: None = ..., keepdims: Literal[False] = ..., ) -> bool_: ... @overload @@ -289,53 +266,22 @@ def any( axis: Optional[_ShapeLike] = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., -) -> Union[bool_, ndarray]: ... +) -> Any: ... + def cumsum( a: ArrayLike, axis: Optional[int] = ..., dtype: DTypeLike = ..., out: Optional[ndarray] = ..., ) -> ndarray: ... -@overload -def ptp( - a: _Number, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., - keepdims: bool = ..., -) -> _Number: ... -@overload -def ptp( - a: ArrayLike, - axis: None = ..., - out: Optional[ndarray] = ..., - keepdims: Literal[False] = ..., -) -> number: ... -@overload + def ptp( a: ArrayLike, axis: Optional[_ShapeLike] = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., -) -> Union[number, ndarray]: ... -@overload -def amax( - a: _Number, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., -) -> _Number: ... -@overload -def amax( - a: ArrayLike, - axis: None = ..., - out: Optional[ndarray] = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., -) -> number: ... -@overload +) -> Any: ... + def amax( a: ArrayLike, axis: Optional[_ShapeLike] = ..., @@ -343,26 +289,8 @@ def amax( keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., -) -> Union[number, ndarray]: ... -@overload -def amin( - a: _Number, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., -) -> _Number: ... -@overload -def amin( - a: ArrayLike, - axis: None = ..., - out: Optional[ndarray] = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., -) -> number: ... -@overload +) -> Any: ... + def amin( a: ArrayLike, axis: Optional[_ShapeLike] = ..., @@ -370,7 +298,7 @@ def amin( keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., -) -> Union[number, ndarray]: ... +) -> Any: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. @@ -379,27 +307,6 @@ def amin( # Note that the same situation holds for all wrappers around # `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). -@overload -def prod( - a: _Number, - axis: Optional[_ShapeLike] = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., -) -> _Number: ... -@overload -def prod( - a: ArrayLike, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., -) -> number: ... -@overload def prod( a: ArrayLike, axis: Optional[_ShapeLike] = ..., @@ -408,53 +315,33 @@ def prod( keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., -) -> Union[number, ndarray]: ... +) -> Any: ... + def cumprod( a: ArrayLike, axis: Optional[int] = ..., dtype: DTypeLike = ..., out: Optional[ndarray] = ..., ) -> ndarray: ... + def ndim(a: ArrayLike) -> int: ... + def size(a: ArrayLike, axis: Optional[int] = ...) -> int: ... -@overload -def around( - a: _Number, decimals: int = ..., out: Optional[ndarray] = ... -) -> _Number: ... -@overload -def around( - a: _NumberLike_co, decimals: int = ..., out: Optional[ndarray] = ... -) -> number: ... -@overload + def around( - a: ArrayLike, decimals: int = ..., out: Optional[ndarray] = ... -) -> ndarray: ... -@overload -def mean( a: ArrayLike, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: Literal[False] = ..., -) -> number: ... -@overload + decimals: int = ..., + out: Optional[ndarray] = ..., +) -> Any: ... + def mean( a: ArrayLike, axis: Optional[_ShapeLike] = ..., dtype: DTypeLike = ..., out: Optional[ndarray] = ..., keepdims: bool = ..., -) -> Union[number, ndarray]: ... -@overload -def std( - a: ArrayLike, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: int = ..., - keepdims: Literal[False] = ..., -) -> number: ... -@overload +) -> Any: ... + def std( a: ArrayLike, axis: Optional[_ShapeLike] = ..., @@ -462,17 +349,8 @@ def std( out: Optional[ndarray] = ..., ddof: int = ..., keepdims: bool = ..., -) -> Union[number, ndarray]: ... -@overload -def var( - a: ArrayLike, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: int = ..., - keepdims: Literal[False] = ..., -) -> number: ... -@overload +) -> Any: ... + def var( a: ArrayLike, axis: Optional[_ShapeLike] = ..., @@ -480,4 +358,4 @@ def var( out: Optional[ndarray] = ..., ddof: int = ..., keepdims: bool = ..., -) -> Union[number, ndarray]: ... +) -> Any: ... diff --git a/numpy/typing/tests/data/fail/fromnumeric.py b/numpy/typing/tests/data/fail/fromnumeric.py index c9156895dd51..d8f7a5d69eba 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.py +++ b/numpy/typing/tests/data/fail/fromnumeric.py @@ -7,17 +7,17 @@ a = np.bool_(True) -np.take(a, None) # E: No overload variant of "take" matches argument type -np.take(a, axis=1.0) # E: No overload variant of "take" matches argument type -np.take(A, out=1) # E: No overload variant of "take" matches argument type -np.take(A, mode="bob") # E: No overload variant of "take" matches argument type +np.take(a, None) # E: incompatible type +np.take(a, axis=1.0) # E: incompatible type +np.take(A, out=1) # E: incompatible type +np.take(A, mode="bob") # E: incompatible type np.reshape(a, None) # E: Argument 2 to "reshape" has incompatible type np.reshape(A, 1, order="bob") # E: Argument "order" to "reshape" has incompatible type -np.choose(a, None) # E: No overload variant of "choose" matches argument type -np.choose(a, out=1.0) # E: No overload variant of "choose" matches argument type -np.choose(A, mode="bob") # E: No overload variant of "choose" matches argument type +np.choose(a, None) # E: incompatible type +np.choose(a, out=1.0) # E: incompatible type +np.choose(A, mode="bob") # E: incompatible type np.repeat(a, None) # E: Argument 2 to "repeat" has incompatible type np.repeat(A, 1, axis=1.0) # E: Argument "axis" to "repeat" has incompatible type @@ -38,14 +38,14 @@ A, 0, order=range(5) # E: Argument "order" to "partition" has incompatible type ) -np.argpartition( # E: No overload variant of "argpartition" matches argument type - a, None +np.argpartition( + a, None # E: incompatible type ) -np.argpartition( # E: No overload variant of "argpartition" matches argument type - a, 0, axis="bob" +np.argpartition( + a, 0, axis="bob" # E: incompatible type ) -np.argpartition( # E: No overload variant of "argpartition" matches argument type - A, 0, kind="bob" +np.argpartition( + A, 0, kind="bob" # E: incompatible type ) np.argpartition( A, 0, order=range(5) # E: Argument "order" to "argpartition" has incompatible type @@ -93,62 +93,62 @@ np.clip(a, 1, 2, out=1) # E: No overload variant of "clip" matches argument type np.clip(1, None, None) # E: No overload variant of "clip" matches argument type -np.sum(a, axis=1.0) # E: No overload variant of "sum" matches argument type -np.sum(a, keepdims=1.0) # E: No overload variant of "sum" matches argument type -np.sum(a, initial=[1]) # E: No overload variant of "sum" matches argument type +np.sum(a, axis=1.0) # E: incompatible type +np.sum(a, keepdims=1.0) # E: incompatible type +np.sum(a, initial=[1]) # E: incompatible type -np.all(a, axis=1.0) # E: No overload variant of "all" matches argument type -np.all(a, keepdims=1.0) # E: No overload variant of "all" matches argument type -np.all(a, out=1.0) # E: No overload variant of "all" matches argument type +np.all(a, axis=1.0) # E: No overload variant +np.all(a, keepdims=1.0) # E: No overload variant +np.all(a, out=1.0) # E: No overload variant -np.any(a, axis=1.0) # E: No overload variant of "any" matches argument type -np.any(a, keepdims=1.0) # E: No overload variant of "any" matches argument type -np.any(a, out=1.0) # E: No overload variant of "any" matches argument type +np.any(a, axis=1.0) # E: No overload variant +np.any(a, keepdims=1.0) # E: No overload variant +np.any(a, out=1.0) # E: No overload variant -np.cumsum(a, axis=1.0) # E: Argument "axis" to "cumsum" has incompatible type -np.cumsum(a, dtype=1.0) # E: Argument "dtype" to "cumsum" has incompatible type -np.cumsum(a, out=1.0) # E: Argument "out" to "cumsum" has incompatible type +np.cumsum(a, axis=1.0) # E: incompatible type +np.cumsum(a, dtype=1.0) # E: incompatible type +np.cumsum(a, out=1.0) # E: incompatible type -np.ptp(a, axis=1.0) # E: No overload variant of "ptp" matches argument type -np.ptp(a, keepdims=1.0) # E: No overload variant of "ptp" matches argument type -np.ptp(a, out=1.0) # E: No overload variant of "ptp" matches argument type +np.ptp(a, axis=1.0) # E: incompatible type +np.ptp(a, keepdims=1.0) # E: incompatible type +np.ptp(a, out=1.0) # E: incompatible type -np.amax(a, axis=1.0) # E: No overload variant of "amax" matches argument type -np.amax(a, keepdims=1.0) # E: No overload variant of "amax" matches argument type -np.amax(a, out=1.0) # E: No overload variant of "amax" matches argument type -np.amax(a, initial=[1.0]) # E: No overload variant of "amax" matches argument type +np.amax(a, axis=1.0) # E: incompatible type +np.amax(a, keepdims=1.0) # E: incompatible type +np.amax(a, out=1.0) # E: incompatible type +np.amax(a, initial=[1.0]) # E: incompatible type np.amax(a, where=[1.0]) # E: List item 0 has incompatible type -np.amin(a, axis=1.0) # E: No overload variant of "amin" matches argument type -np.amin(a, keepdims=1.0) # E: No overload variant of "amin" matches argument type -np.amin(a, out=1.0) # E: No overload variant of "amin" matches argument type -np.amin(a, initial=[1.0]) # E: No overload variant of "amin" matches argument type +np.amin(a, axis=1.0) # E: incompatible type +np.amin(a, keepdims=1.0) # E: incompatible type +np.amin(a, out=1.0) # E: incompatible type +np.amin(a, initial=[1.0]) # E: incompatible type np.amin(a, where=[1.0]) # E: List item 0 has incompatible type -np.prod(a, axis=1.0) # E: No overload variant of "prod" matches argument type -np.prod(a, out=False) # E: No overload variant of "prod" matches argument type -np.prod(a, keepdims=1.0) # E: No overload variant of "prod" matches argument type -np.prod(a, initial=int) # E: No overload variant of "prod" matches argument type -np.prod(a, where=1.0) # E: No overload variant of "prod" matches argument type +np.prod(a, axis=1.0) # E: incompatible type +np.prod(a, out=False) # E: incompatible type +np.prod(a, keepdims=1.0) # E: incompatible type +np.prod(a, initial=int) # E: incompatible type +np.prod(a, where=1.0) # E: incompatible type np.cumprod(a, axis=1.0) # E: Argument "axis" to "cumprod" has incompatible type np.cumprod(a, out=False) # E: Argument "out" to "cumprod" has incompatible type np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type -np.around(a, decimals=1.0) # E: No overload variant of "around" matches argument type -np.around(a, out=type) # E: No overload variant of "around" matches argument type +np.around(a, decimals=1.0) # E: incompatible type +np.around(a, out=type) # E: incompatible type -np.mean(a, axis=1.0) # E: No overload variant of "mean" matches argument type -np.mean(a, out=False) # E: No overload variant of "mean" matches argument type -np.mean(a, keepdims=1.0) # E: No overload variant of "mean" matches argument type +np.mean(a, axis=1.0) # E: incompatible type +np.mean(a, out=False) # E: incompatible type +np.mean(a, keepdims=1.0) # E: incompatible type -np.std(a, axis=1.0) # E: No overload variant of "std" matches argument type -np.std(a, out=False) # E: No overload variant of "std" matches argument type -np.std(a, ddof='test') # E: No overload variant of "std" matches argument type -np.std(a, keepdims=1.0) # E: No overload variant of "std" matches argument type +np.std(a, axis=1.0) # E: incompatible type +np.std(a, out=False) # E: incompatible type +np.std(a, ddof='test') # E: incompatible type +np.std(a, keepdims=1.0) # E: incompatible type -np.var(a, axis=1.0) # E: No overload variant of "var" matches argument type -np.var(a, out=False) # E: No overload variant of "var" matches argument type -np.var(a, ddof='test') # E: No overload variant of "var" matches argument type -np.var(a, keepdims=1.0) # E: No overload variant of "var" matches argument type +np.var(a, axis=1.0) # E: incompatible type +np.var(a, out=False) # E: incompatible type +np.var(a, ddof='test') # E: incompatible type +np.var(a, keepdims=1.0) # E: incompatible type diff --git a/numpy/typing/tests/data/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.py index 2b58f019f2b3..bbcfbb85ad3d 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.py +++ b/numpy/typing/tests/data/reveal/fromnumeric.py @@ -12,27 +12,13 @@ c = 1.0 d = np.array(1.0, dtype=np.float32) # writeable -reveal_type(np.take(a, 0)) # E: numpy.bool_ -reveal_type(np.take(b, 0)) # E: {float32} -reveal_type( - np.take(c, 0) # E: Union[numpy.generic, datetime.datetime, datetime.timedelta] -) -reveal_type( - np.take(A, 0) # E: Union[numpy.generic, datetime.datetime, datetime.timedelta] -) -reveal_type( - np.take(B, 0) # E: Union[numpy.generic, datetime.datetime, datetime.timedelta] -) -reveal_type( - np.take( # E: Union[Union[numpy.generic, datetime.datetime, datetime.timedelta], numpy.ndarray[Any, Any]] - A, [0] - ) -) -reveal_type( - np.take( # E: Union[Union[numpy.generic, datetime.datetime, datetime.timedelta], numpy.ndarray[Any, Any]] - B, [0] - ) -) +reveal_type(np.take(a, 0)) # E: Any +reveal_type(np.take(b, 0)) # E: Any +reveal_type(np.take(c, 0)) # E: Any +reveal_type(np.take(A, 0)) # E: Any +reveal_type(np.take(B, 0)) # E: Any +reveal_type(np.take(A, [0])) # E: Any +reveal_type(np.take(B, [0])) # E: Any reveal_type(np.reshape(a, 1)) # E: numpy.ndarray[Any, Any] reveal_type(np.reshape(b, 1)) # E: numpy.ndarray[Any, Any] @@ -40,8 +26,8 @@ reveal_type(np.reshape(A, 1)) # E: numpy.ndarray[Any, Any] reveal_type(np.reshape(B, 1)) # E: numpy.ndarray[Any, Any] -reveal_type(np.choose(a, [True, True])) # E: numpy.bool_ -reveal_type(np.choose(A, [True, True])) # E: numpy.ndarray[Any, Any] +reveal_type(np.choose(a, [True, True])) # E: Any +reveal_type(np.choose(A, [True, True])) # E: Any reveal_type(np.repeat(a, 1)) # E: numpy.ndarray[Any, Any] reveal_type(np.repeat(b, 1)) # E: numpy.ndarray[Any, Any] @@ -66,11 +52,11 @@ reveal_type(np.partition(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.partition(B, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argpartition(a, 0)) # E: {intp} -reveal_type(np.argpartition(b, 0)) # E: {intp} -reveal_type(np.argpartition(c, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argpartition(A, 0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argpartition(B, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.argpartition(a, 0)) # E: Any +reveal_type(np.argpartition(b, 0)) # E: Any +reveal_type(np.argpartition(c, 0)) # E: Any +reveal_type(np.argpartition(A, 0)) # E: Any +reveal_type(np.argpartition(B, 0)) # E: Any reveal_type(np.sort(A, 0)) # E: numpy.ndarray[Any, Any] reveal_type(np.sort(B, 0)) # E: numpy.ndarray[Any, Any] @@ -80,13 +66,13 @@ reveal_type(np.argmax(A)) # E: {intp} reveal_type(np.argmax(B)) # E: {intp} -reveal_type(np.argmax(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] -reveal_type(np.argmax(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] +reveal_type(np.argmax(A, axis=0)) # E: Any +reveal_type(np.argmax(B, axis=0)) # E: Any reveal_type(np.argmin(A)) # E: {intp} reveal_type(np.argmin(B)) # E: {intp} -reveal_type(np.argmin(A, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] -reveal_type(np.argmin(B, axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] +reveal_type(np.argmin(A, axis=0)) # E: Any +reveal_type(np.argmin(B, axis=0)) # E: Any reveal_type(np.searchsorted(A[0], 0)) # E: {intp} reveal_type(np.searchsorted(B[0], 0)) # E: {intp} @@ -108,8 +94,8 @@ reveal_type(np.diagonal(A)) # E: numpy.ndarray[Any, Any] reveal_type(np.diagonal(B)) # E: numpy.ndarray[Any, Any] -reveal_type(np.trace(A)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.trace(B)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.trace(A)) # E: Any +reveal_type(np.trace(B)) # E: Any reveal_type(np.ravel(a)) # E: numpy.ndarray[Any, Any] reveal_type(np.ravel(b)) # E: numpy.ndarray[Any, Any] @@ -135,39 +121,39 @@ reveal_type(np.compress([True], A)) # E: numpy.ndarray[Any, Any] reveal_type(np.compress([True], B)) # E: numpy.ndarray[Any, Any] -reveal_type(np.clip(a, 0, 1.0)) # E: numpy.number[Any] -reveal_type(np.clip(b, -1, 1)) # E: {float32} -reveal_type(np.clip(c, 0, 1)) # E: numpy.number[Any] -reveal_type(np.clip(A, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.clip(B, 0, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.clip(a, 0, 1.0)) # E: Any +reveal_type(np.clip(b, -1, 1)) # E: Any +reveal_type(np.clip(c, 0, 1)) # E: Any +reveal_type(np.clip(A, 0, 1)) # E: Any +reveal_type(np.clip(B, 0, 1)) # E: Any -reveal_type(np.sum(a)) # E: numpy.number[Any] -reveal_type(np.sum(b)) # E: {float32} -reveal_type(np.sum(c)) # E: numpy.number[Any] -reveal_type(np.sum(A)) # E: numpy.number[Any] -reveal_type(np.sum(B)) # E: numpy.number[Any] -reveal_type(np.sum(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.sum(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(np.sum(a)) # E: Any +reveal_type(np.sum(b)) # E: Any +reveal_type(np.sum(c)) # E: Any +reveal_type(np.sum(A)) # E: Any +reveal_type(np.sum(B)) # E: Any +reveal_type(np.sum(A, axis=0)) # E: Any +reveal_type(np.sum(B, axis=0)) # E: Any reveal_type(np.all(a)) # E: numpy.bool_ reveal_type(np.all(b)) # E: numpy.bool_ reveal_type(np.all(c)) # E: numpy.bool_ reveal_type(np.all(A)) # E: numpy.bool_ reveal_type(np.all(B)) # E: numpy.bool_ -reveal_type(np.all(A, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.all(B, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.all(A, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.all(B, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.all(A, axis=0)) # E: Any +reveal_type(np.all(B, axis=0)) # E: Any +reveal_type(np.all(A, keepdims=True)) # E: Any +reveal_type(np.all(B, keepdims=True)) # E: Any reveal_type(np.any(a)) # E: numpy.bool_ reveal_type(np.any(b)) # E: numpy.bool_ reveal_type(np.any(c)) # E: numpy.bool_ reveal_type(np.any(A)) # E: numpy.bool_ reveal_type(np.any(B)) # E: numpy.bool_ -reveal_type(np.any(A, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.any(B, axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.any(A, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.any(B, keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.any(A, axis=0)) # E: Any +reveal_type(np.any(B, axis=0)) # E: Any +reveal_type(np.any(A, keepdims=True)) # E: Any +reveal_type(np.any(B, keepdims=True)) # E: Any reveal_type(np.cumsum(a)) # E: numpy.ndarray[Any, Any] reveal_type(np.cumsum(b)) # E: numpy.ndarray[Any, Any] @@ -175,47 +161,47 @@ reveal_type(np.cumsum(A)) # E: numpy.ndarray[Any, Any] reveal_type(np.cumsum(B)) # E: numpy.ndarray[Any, Any] -reveal_type(np.ptp(a)) # E: numpy.number[Any] -reveal_type(np.ptp(b)) # E: {float32} -reveal_type(np.ptp(c)) # E: numpy.number[Any] -reveal_type(np.ptp(A)) # E: numpy.number[Any] -reveal_type(np.ptp(B)) # E: numpy.number[Any] -reveal_type(np.ptp(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.ptp(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.ptp(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.ptp(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] - -reveal_type(np.amax(a)) # E: numpy.number[Any] -reveal_type(np.amax(b)) # E: {float32} -reveal_type(np.amax(c)) # E: numpy.number[Any] -reveal_type(np.amax(A)) # E: numpy.number[Any] -reveal_type(np.amax(B)) # E: numpy.number[Any] -reveal_type(np.amax(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.amax(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.amax(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.amax(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] - -reveal_type(np.amin(a)) # E: numpy.number[Any] -reveal_type(np.amin(b)) # E: {float32} -reveal_type(np.amin(c)) # E: numpy.number[Any] -reveal_type(np.amin(A)) # E: numpy.number[Any] -reveal_type(np.amin(B)) # E: numpy.number[Any] -reveal_type(np.amin(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.amin(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.amin(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.amin(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] - -reveal_type(np.prod(a)) # E: numpy.number[Any] -reveal_type(np.prod(b)) # E: {float32} -reveal_type(np.prod(c)) # E: numpy.number[Any] -reveal_type(np.prod(A)) # E: numpy.number[Any] -reveal_type(np.prod(B)) # E: numpy.number[Any] -reveal_type(np.prod(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.prod(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.prod(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.prod(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.prod(b, out=d)) # E: numpy.ndarray[Any, Any] -reveal_type(np.prod(B, out=d)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ptp(a)) # E: Any +reveal_type(np.ptp(b)) # E: Any +reveal_type(np.ptp(c)) # E: Any +reveal_type(np.ptp(A)) # E: Any +reveal_type(np.ptp(B)) # E: Any +reveal_type(np.ptp(A, axis=0)) # E: Any +reveal_type(np.ptp(B, axis=0)) # E: Any +reveal_type(np.ptp(A, keepdims=True)) # E: Any +reveal_type(np.ptp(B, keepdims=True)) # E: Any + +reveal_type(np.amax(a)) # E: Any +reveal_type(np.amax(b)) # E: Any +reveal_type(np.amax(c)) # E: Any +reveal_type(np.amax(A)) # E: Any +reveal_type(np.amax(B)) # E: Any +reveal_type(np.amax(A, axis=0)) # E: Any +reveal_type(np.amax(B, axis=0)) # E: Any +reveal_type(np.amax(A, keepdims=True)) # E: Any +reveal_type(np.amax(B, keepdims=True)) # E: Any + +reveal_type(np.amin(a)) # E: Any +reveal_type(np.amin(b)) # E: Any +reveal_type(np.amin(c)) # E: Any +reveal_type(np.amin(A)) # E: Any +reveal_type(np.amin(B)) # E: Any +reveal_type(np.amin(A, axis=0)) # E: Any +reveal_type(np.amin(B, axis=0)) # E: Any +reveal_type(np.amin(A, keepdims=True)) # E: Any +reveal_type(np.amin(B, keepdims=True)) # E: Any + +reveal_type(np.prod(a)) # E: Any +reveal_type(np.prod(b)) # E: Any +reveal_type(np.prod(c)) # E: Any +reveal_type(np.prod(A)) # E: Any +reveal_type(np.prod(B)) # E: Any +reveal_type(np.prod(A, axis=0)) # E: Any +reveal_type(np.prod(B, axis=0)) # E: Any +reveal_type(np.prod(A, keepdims=True)) # E: Any +reveal_type(np.prod(B, keepdims=True)) # E: Any +reveal_type(np.prod(b, out=d)) # E: Any +reveal_type(np.prod(B, out=d)) # E: Any reveal_type(np.cumprod(a)) # E: numpy.ndarray[Any, Any] reveal_type(np.cumprod(b)) # E: numpy.ndarray[Any, Any] @@ -235,44 +221,44 @@ reveal_type(np.size(A)) # E: int reveal_type(np.size(B)) # E: int -reveal_type(np.around(a)) # E: numpy.number[Any] -reveal_type(np.around(b)) # E: {float32} -reveal_type(np.around(c)) # E: numpy.number[Any] -reveal_type(np.around(A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.around(B)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.mean(a)) # E: numpy.number[Any] -reveal_type(np.mean(b)) # E: numpy.number[Any] -reveal_type(np.mean(c)) # E: numpy.number[Any] -reveal_type(np.mean(A)) # E: numpy.number[Any] -reveal_type(np.mean(B)) # E: numpy.number[Any] -reveal_type(np.mean(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.mean(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.mean(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.mean(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.mean(b, out=d)) # E: numpy.ndarray[Any, Any] -reveal_type(np.mean(B, out=d)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.std(a)) # E: numpy.number[Any] -reveal_type(np.std(b)) # E: numpy.number[Any] -reveal_type(np.std(c)) # E: numpy.number[Any] -reveal_type(np.std(A)) # E: numpy.number[Any] -reveal_type(np.std(B)) # E: numpy.number[Any] -reveal_type(np.std(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.std(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.std(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.std(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.std(b, out=d)) # E: numpy.ndarray[Any, Any] -reveal_type(np.std(B, out=d)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.var(a)) # E: numpy.number[Any] -reveal_type(np.var(b)) # E: numpy.number[Any] -reveal_type(np.var(c)) # E: numpy.number[Any] -reveal_type(np.var(A)) # E: numpy.number[Any] -reveal_type(np.var(B)) # E: numpy.number[Any] -reveal_type(np.var(A, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.var(B, axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.var(A, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.var(B, keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(np.var(b, out=d)) # E: numpy.ndarray[Any, Any] -reveal_type(np.var(B, out=d)) # E: numpy.ndarray[Any, Any] +reveal_type(np.around(a)) # E: Any +reveal_type(np.around(b)) # E: Any +reveal_type(np.around(c)) # E: Any +reveal_type(np.around(A)) # E: Any +reveal_type(np.around(B)) # E: Any + +reveal_type(np.mean(a)) # E: Any +reveal_type(np.mean(b)) # E: Any +reveal_type(np.mean(c)) # E: Any +reveal_type(np.mean(A)) # E: Any +reveal_type(np.mean(B)) # E: Any +reveal_type(np.mean(A, axis=0)) # E: Any +reveal_type(np.mean(B, axis=0)) # E: Any +reveal_type(np.mean(A, keepdims=True)) # E: Any +reveal_type(np.mean(B, keepdims=True)) # E: Any +reveal_type(np.mean(b, out=d)) # E: Any +reveal_type(np.mean(B, out=d)) # E: Any + +reveal_type(np.std(a)) # E: Any +reveal_type(np.std(b)) # E: Any +reveal_type(np.std(c)) # E: Any +reveal_type(np.std(A)) # E: Any +reveal_type(np.std(B)) # E: Any +reveal_type(np.std(A, axis=0)) # E: Any +reveal_type(np.std(B, axis=0)) # E: Any +reveal_type(np.std(A, keepdims=True)) # E: Any +reveal_type(np.std(B, keepdims=True)) # E: Any +reveal_type(np.std(b, out=d)) # E: Any +reveal_type(np.std(B, out=d)) # E: Any + +reveal_type(np.var(a)) # E: Any +reveal_type(np.var(b)) # E: Any +reveal_type(np.var(c)) # E: Any +reveal_type(np.var(A)) # E: Any +reveal_type(np.var(B)) # E: Any +reveal_type(np.var(A, axis=0)) # E: Any +reveal_type(np.var(B, axis=0)) # E: Any +reveal_type(np.var(A, keepdims=True)) # E: Any +reveal_type(np.var(B, keepdims=True)) # E: Any +reveal_type(np.var(b, out=d)) # E: Any +reveal_type(np.var(B, out=d)) # E: Any From b1eaa4033d9a8a62d7789e112db6d74fe897d42b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 18:06:36 +0200 Subject: [PATCH 1006/1270] MAINT: Remove unsafe unions from `np.core.function_base` --- numpy/core/function_base.pyi | 6 ++++-- numpy/typing/tests/data/reveal/array_constructors.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi index d4543f28141b..b5d6ca6abe88 100644 --- a/numpy/core/function_base.pyi +++ b/numpy/core/function_base.pyi @@ -1,7 +1,7 @@ import sys from typing import overload, Tuple, Union, Sequence, Any -from numpy import ndarray, inexact +from numpy import ndarray from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co if sys.version_info >= (3, 8): @@ -33,7 +33,8 @@ def linspace( retstep: Literal[True] = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ..., -) -> Tuple[ndarray, inexact]: ... +) -> Tuple[ndarray, Any]: ... + def logspace( start: _ArrayLikeNumber, stop: _ArrayLikeNumber, @@ -43,6 +44,7 @@ def logspace( dtype: DTypeLike = ..., axis: SupportsIndex = ..., ) -> ndarray: ... + def geomspace( start: _ArrayLikeNumber, stop: _ArrayLikeNumber, diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.py index 04d5cd229e4b..2e803a365ce8 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.py +++ b/numpy/typing/tests/data/reveal/array_constructors.py @@ -41,7 +41,7 @@ def func(i: int, j: int, **kwargs: Any) -> SubClass: ... reveal_type(np.require(C)) # E: numpy.ndarray[Any, Any] reveal_type(np.linspace(0, 10)) # E: numpy.ndarray[Any, Any] -reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray[Any, Any], numpy.inexact[Any]] +reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray[Any, Any], Any] reveal_type(np.logspace(0, 10)) # E: numpy.ndarray[Any, Any] reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray[Any, Any] From 669826524248d15ea4f9383cf5a230685351f06c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 18:19:12 +0200 Subject: [PATCH 1007/1270] MAINT: Remove unsafe unions from `np.core.numeric` --- numpy/core/numeric.pyi | 72 ++++++++++++++++++++--- numpy/typing/tests/data/reveal/numeric.py | 10 ++-- 2 files changed, 68 insertions(+), 14 deletions(-) diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi index d91cb31c2031..6b9ef4b20b98 100644 --- a/numpy/core/numeric.pyi +++ b/numpy/core/numeric.pyi @@ -41,6 +41,7 @@ def zeros_like( subok: bool = ..., shape: Optional[_ShapeLike] = ..., ) -> ndarray: ... + def ones( shape: _ShapeLike, dtype: DTypeLike = ..., @@ -48,6 +49,7 @@ def ones( *, like: ArrayLike = ..., ) -> ndarray: ... + @overload def ones_like( a: _ArrayType, @@ -64,6 +66,7 @@ def ones_like( subok: bool = ..., shape: Optional[_ShapeLike] = ..., ) -> ndarray: ... + @overload def empty_like( a: _ArrayType, @@ -80,6 +83,7 @@ def empty_like( subok: bool = ..., shape: Optional[_ShapeLike] = ..., ) -> ndarray: ... + def full( shape: _ShapeLike, fill_value: Any, @@ -88,6 +92,7 @@ def full( *, like: ArrayLike = ..., ) -> ndarray: ... + @overload def full_like( a: _ArrayType, @@ -106,39 +111,73 @@ def full_like( subok: bool = ..., shape: Optional[_ShapeLike] = ..., ) -> ndarray: ... + @overload def count_nonzero( - a: ArrayLike, axis: None = ..., *, keepdims: Literal[False] = ... + a: ArrayLike, + axis: None = ..., + *, + keepdims: Literal[False] = ..., ) -> int: ... @overload def count_nonzero( - a: ArrayLike, axis: _ShapeLike = ..., *, keepdims: bool = ... -) -> Union[signedinteger[Any], ndarray]: ... # TODO: np.intp + a: ArrayLike, + axis: _ShapeLike = ..., + *, + keepdims: bool = ..., +) -> Any: ... # TODO: np.intp or ndarray[np.intp] + def isfortran(a: Union[ndarray, generic]) -> bool: ... + def argwhere(a: ArrayLike) -> ndarray: ... + def flatnonzero(a: ArrayLike) -> ndarray: ... -def correlate(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ... -def convolve(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ... + +def correlate( + a: ArrayLike, + v: ArrayLike, + mode: _CorrelateMode = ..., +) -> ndarray: ... + +def convolve( + a: ArrayLike, + v: ArrayLike, + mode: _CorrelateMode = ..., +) -> ndarray: ... + @overload -def outer(a: ArrayLike, b: ArrayLike, out: None = ...) -> ndarray: ... +def outer( + a: ArrayLike, + b: ArrayLike, + out: None = ..., +) -> ndarray: ... @overload -def outer(a: ArrayLike, b: ArrayLike, out: _ArrayType = ...) -> _ArrayType: ... +def outer( + a: ArrayLike, + b: ArrayLike, + out: _ArrayType = ..., +) -> _ArrayType: ... + def tensordot( a: ArrayLike, b: ArrayLike, axes: Union[int, Tuple[_ShapeLike, _ShapeLike]] = ..., ) -> ndarray: ... + def roll( a: ArrayLike, shift: _ShapeLike, axis: Optional[_ShapeLike] = ..., ) -> ndarray: ... + def rollaxis(a: ndarray, axis: int, start: int = ...) -> ndarray: ... + def moveaxis( a: ndarray, source: _ShapeLike, destination: _ShapeLike, ) -> ndarray: ... + def cross( a: ArrayLike, b: ArrayLike, @@ -147,6 +186,7 @@ def cross( axisc: int = ..., axis: Optional[int] = ..., ) -> ndarray: ... + @overload def indices( dimensions: Sequence[int], @@ -159,6 +199,7 @@ def indices( dtype: DTypeLike = ..., sparse: Literal[True] = ..., ) -> Tuple[ndarray, ...]: ... + def fromfunction( function: Callable[..., _T], shape: Sequence[int], @@ -167,10 +208,20 @@ def fromfunction( like: ArrayLike = ..., **kwargs: Any, ) -> _T: ... + def isscalar(element: Any) -> bool: ... + def binary_repr(num: int, width: Optional[int] = ...) -> str: ... + def base_repr(number: int, base: int = ..., padding: int = ...) -> str: ... -def identity(n: int, dtype: DTypeLike = ..., *, like: ArrayLike = ...) -> ndarray: ... + +def identity( + n: int, + dtype: DTypeLike = ..., + *, + like: ArrayLike = ..., +) -> ndarray: ... + def allclose( a: ArrayLike, b: ArrayLike, @@ -178,12 +229,15 @@ def allclose( atol: float = ..., equal_nan: bool = ..., ) -> bool: ... + def isclose( a: ArrayLike, b: ArrayLike, rtol: float = ..., atol: float = ..., equal_nan: bool = ..., -) -> Union[bool_, ndarray]: ... +) -> Any: ... + def array_equal(a1: ArrayLike, a2: ArrayLike) -> bool: ... + def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/numeric.py b/numpy/typing/tests/data/reveal/numeric.py index 78e5c1d6120c..ec6e47ca05d0 100644 --- a/numpy/typing/tests/data/reveal/numeric.py +++ b/numpy/typing/tests/data/reveal/numeric.py @@ -20,8 +20,8 @@ class SubClass(np.ndarray): reveal_type(np.count_nonzero(i8)) # E: int reveal_type(np.count_nonzero(A)) # E: int reveal_type(np.count_nonzero(B)) # E: int -reveal_type(np.count_nonzero(A, keepdims=True)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] -reveal_type(np.count_nonzero(A, axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]] +reveal_type(np.count_nonzero(A, keepdims=True)) # E: Any +reveal_type(np.count_nonzero(A, axis=0)) # E: Any reveal_type(np.isfortran(i8)) # E: bool reveal_type(np.isfortran(A)) # E: bool @@ -76,9 +76,9 @@ class SubClass(np.ndarray): reveal_type(np.allclose(B, A)) # E: bool reveal_type(np.allclose(A, A)) # E: bool -reveal_type(np.isclose(i8, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.isclose(B, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(np.isclose(A, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(np.isclose(i8, A)) # E: Any +reveal_type(np.isclose(B, A)) # E: Any +reveal_type(np.isclose(A, A)) # E: Any reveal_type(np.array_equal(i8, A)) # E: bool reveal_type(np.array_equal(B, A)) # E: bool From 44c3e1f2ffbccaa5e0877deb282a0ce49ed04c35 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 18:25:39 +0200 Subject: [PATCH 1008/1270] MAINT: Remove unsafe unions from `np.lib.index_tricks` --- numpy/lib/index_tricks.pyi | 23 +++++++++++++++---- numpy/typing/tests/data/fail/index_tricks.py | 2 +- .../typing/tests/data/reveal/index_tricks.py | 19 +++++++-------- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi index 3e5bc1adb47b..e486fe8f2af8 100644 --- a/numpy/lib/index_tricks.pyi +++ b/numpy/lib/index_tricks.pyi @@ -17,6 +17,7 @@ from numpy import ( ndindex as ndindex, ndarray, dtype, + integer, str_, bytes_, bool_, @@ -33,7 +34,6 @@ from numpy.typing import ( _NestedSequence, _RecursiveSequence, _ArrayND, - _ArrayOrScalar, _ArrayLikeInt, # DTypes @@ -57,18 +57,33 @@ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) __all__: List[str] +@overload +def unravel_index( # type: ignore[misc] + indices: Union[int, integer[Any]], + shape: _ShapeLike, + order: _OrderCF = ... +) -> Tuple[intp, ...]: ... +@overload def unravel_index( indices: _ArrayLikeInt, shape: _ShapeLike, order: _OrderCF = ... -) -> Tuple[_ArrayOrScalar[intp], ...]: ... +) -> Tuple[_ArrayND[intp], ...]: ... +@overload +def ravel_multi_index( # type: ignore[misc] + multi_index: Sequence[Union[int, integer[Any]]], + dims: _ShapeLike, + mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., + order: _OrderCF = ... +) -> intp: ... +@overload def ravel_multi_index( - multi_index: ArrayLike, + multi_index: Sequence[_ArrayLikeInt], dims: _ShapeLike, mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., order: _OrderCF = ... -) -> _ArrayOrScalar[intp]: ... +) -> _ArrayND[intp]: ... @overload def ix_(*args: _NestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... diff --git a/numpy/typing/tests/data/fail/index_tricks.py b/numpy/typing/tests/data/fail/index_tricks.py index cbc43fd54b3a..c508bf3aeae6 100644 --- a/numpy/typing/tests/data/fail/index_tricks.py +++ b/numpy/typing/tests/data/fail/index_tricks.py @@ -5,7 +5,7 @@ AR_LIKE_f: List[float] np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type -np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: incompatible type +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: No overload variant np.mgrid[1] # E: Invalid index type np.mgrid[...] # E: Invalid index type np.ogrid[1] # E: Invalid index type diff --git a/numpy/typing/tests/data/reveal/index_tricks.py b/numpy/typing/tests/data/reveal/index_tricks.py index ec20130251e2..863d60220937 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.py +++ b/numpy/typing/tests/data/reveal/index_tricks.py @@ -27,15 +27,16 @@ reveal_type(iter(np.ndindex(1, 2, 3))) # E: Iterator[builtins.tuple[builtins.int]] reveal_type(next(np.ndindex(1, 2, 3))) # E: builtins.tuple[builtins.int] -reveal_type(np.unravel_index([22, 41, 37], (7, 6))) # E: tuple[Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]]] -reveal_type(np.unravel_index([31, 41, 13], (7, 6), order="F")) # E: tuple[Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]]] -reveal_type(np.unravel_index(1621, (6, 7, 8, 9))) # E: tuple[Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]]] - -reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6))) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] -reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F")) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] -reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip")) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] -reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap"))) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] -reveal_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))) # E: Union[{intp}, numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unravel_index([22, 41, 37], (7, 6))) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unravel_index([31, 41, 13], (7, 6), order="F")) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unravel_index(1621, (6, 7, 8, 9))) # E: tuple[{intp}] + +reveal_type(np.ravel_multi_index([[1]], (7, 6))) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] +reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6))) # E: {intp} +reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F")) # E: {intp} +reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip")) # E: {intp} +reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap"))) # E: {intp} +reveal_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))) # E: {intp} reveal_type(np.mgrid[1:1:2]) # E: numpy.ndarray[Any, numpy.dtype[Any]] reveal_type(np.mgrid[1:1:2, None:10]) # E: numpy.ndarray[Any, numpy.dtype[Any]] From 5fad83954e41d33e2a6e27fd77f98975cc81caef Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 18:29:24 +0200 Subject: [PATCH 1009/1270] MAINT: Remove unsafe unions from `np.lib.ufunclike` --- numpy/lib/ufunclike.pyi | 28 ++++++++++++++++----- numpy/typing/tests/data/reveal/ufunclike.py | 24 +++++++++--------- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/numpy/lib/ufunclike.pyi b/numpy/lib/ufunclike.pyi index 3443fa7ae53d..e97383af8a7e 100644 --- a/numpy/lib/ufunclike.pyi +++ b/numpy/lib/ufunclike.pyi @@ -1,48 +1,64 @@ from typing import Any, overload, TypeVar, List, Union -from numpy import floating, bool_, ndarray +from numpy import floating, bool_, object_, ndarray from numpy.typing import ( + _ArrayND, + _FloatLike_co, _ArrayLikeFloat_co, _ArrayLikeObject_co, - _ArrayOrScalar, ) _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) __all__: List[str] +@overload +def fix( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> floating[Any]: ... @overload def fix( x: _ArrayLikeFloat_co, out: None = ..., -) -> _ArrayOrScalar[floating[Any]]: ... +) -> _ArrayND[floating[Any]]: ... @overload def fix( x: _ArrayLikeObject_co, out: None = ..., -) -> Any: ... +) -> _ArrayND[object_]: ... @overload def fix( x: Union[_ArrayLikeFloat_co, _ArrayLikeObject_co], out: _ArrayType, ) -> _ArrayType: ... +@overload +def isposinf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> bool_: ... @overload def isposinf( x: _ArrayLikeFloat_co, out: None = ..., -) -> _ArrayOrScalar[bool_]: ... +) -> _ArrayND[bool_]: ... @overload def isposinf( x: _ArrayLikeFloat_co, out: _ArrayType, ) -> _ArrayType: ... +@overload +def isneginf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> bool_: ... @overload def isneginf( x: _ArrayLikeFloat_co, out: None = ..., -) -> _ArrayOrScalar[bool_]: ... +) -> _ArrayND[bool_]: ... @overload def isneginf( x: _ArrayLikeFloat_co, diff --git a/numpy/typing/tests/data/reveal/ufunclike.py b/numpy/typing/tests/data/reveal/ufunclike.py index 35e45a824698..8b3aea7ceeb9 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.py +++ b/numpy/typing/tests/data/reveal/ufunclike.py @@ -9,21 +9,21 @@ AR_U: np.ndarray[Any, np.dtype[np.str_]] -reveal_type(np.fix(AR_LIKE_b)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(np.fix(AR_LIKE_u)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(np.fix(AR_LIKE_i)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(np.fix(AR_LIKE_f)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.fix(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.fix(AR_LIKE_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.fix(AR_LIKE_i)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.fix(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(np.fix(AR_LIKE_O)) # E: Any reveal_type(np.fix(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] -reveal_type(np.isposinf(AR_LIKE_b)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(np.isposinf(AR_LIKE_u)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(np.isposinf(AR_LIKE_i)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(np.isposinf(AR_LIKE_f)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(np.isposinf(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isposinf(AR_LIKE_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isposinf(AR_LIKE_i)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isposinf(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(np.isposinf(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] -reveal_type(np.isneginf(AR_LIKE_b)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(np.isneginf(AR_LIKE_u)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(np.isneginf(AR_LIKE_i)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(np.isneginf(AR_LIKE_f)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(np.isneginf(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isneginf(AR_LIKE_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isneginf(AR_LIKE_i)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isneginf(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(np.isneginf(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] From a63315aec7c51cc5e3bb0bc0ccfe8fad7d508659 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 19:00:56 +0200 Subject: [PATCH 1010/1270] MAINT: Remove unsafe unions from `np.typing._callable` --- numpy/typing/_callable.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 23cabd47a166..30c210b62c33 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -47,7 +47,7 @@ _NumberLike_co, ) from . import NBitBase -from ._array_like import ArrayLike, _ArrayOrScalar +from ._array_like import ArrayLike, _ArrayND if sys.version_info >= (3, 8): from typing import Protocol @@ -168,7 +168,7 @@ def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... @overload def __call__( self, __other: Union[int, signedinteger[Any]] - ) -> Union[signedinteger[Any], float64]: ... + ) -> Any: ... @overload def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload @@ -198,7 +198,7 @@ def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... @overload def __call__( self, __other: Union[int, signedinteger[Any]] - ) -> Union[signedinteger[Any], float64]: ... + ) -> Any: ... @overload def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... @overload @@ -212,7 +212,7 @@ def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ... @overload def __call__( self, __other: Union[int, signedinteger[Any]] - ) -> Union[_2Tuple[signedinteger[Any]], _2Tuple[float64]]: ... + ) -> _2Tuple[Any]: ... @overload def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... @overload @@ -336,7 +336,7 @@ class _ComparisonOp(Protocol[_T1, _T2]): @overload def __call__(self, __other: _T1) -> bool_: ... @overload - def __call__(self, __other: _T2) -> _ArrayOrScalar[bool_]: ... + def __call__(self, __other: _T2) -> _ArrayND[bool_]: ... else: _BoolOp = Any From 235e4f32b87f8a3b93281c19bee03ac67ec32e5c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 19:00:30 +0200 Subject: [PATCH 1011/1270] MAINT: Remove unsafe unions from `np` --- numpy/__init__.pyi | 551 +++++++++--------- numpy/typing/tests/data/reveal/arithmetic.py | 288 ++++----- numpy/typing/tests/data/reveal/comparisons.py | 80 +-- numpy/typing/tests/data/reveal/mod.py | 28 +- .../typing/tests/data/reveal/ndarray_misc.py | 90 +-- 5 files changed, 519 insertions(+), 518 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 5c7a3c7e4dd1..8824497f08ee 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -11,7 +11,6 @@ from numpy.typing import ( # Arrays ArrayLike, _ArrayND, - _ArrayOrScalar, _SupportsArray, _NestedSequence, _RecursiveSequence, @@ -1289,14 +1288,21 @@ class _ArrayOrScalarCommon: def __array_wrap__(array, context=...): ... def __setstate__(self, __state): ... # a `bool_` is returned when `keepdims=True` and `self` is a 0d array + @overload def all( - self, axis: None = ..., out: None = ..., keepdims: Literal[False] = ... + self, + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., ) -> bool_: ... @overload def all( - self, axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ... - ) -> Union[bool_, ndarray]: ... + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... @overload def all( self, @@ -1304,14 +1310,21 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... + @overload def any( - self, axis: None = ..., out: None = ..., keepdims: Literal[False] = ... + self, + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., ) -> bool_: ... @overload def any( - self, axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ... - ) -> Union[bool_, ndarray]: ... + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... @overload def any( self, @@ -1319,40 +1332,67 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... + @overload - def argmax(self, axis: None = ..., out: None = ...) -> intp: ... + def argmax( + self, + axis: None = ..., + out: None = ..., + ) -> intp: ... @overload def argmax( - self, axis: _ShapeLike = ..., out: None = ... - ) -> Union[ndarray, intp]: ... + self, + axis: _ShapeLike = ..., + out: None = ..., + ) -> Any: ... @overload def argmax( - self, axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ... + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... + @overload - def argmin(self, axis: None = ..., out: None = ...) -> intp: ... + def argmin( + self, + axis: None = ..., + out: None = ..., + ) -> intp: ... @overload def argmin( - self, axis: _ShapeLike = ..., out: None = ... - ) -> Union[ndarray, intp]: ... + self, + axis: _ShapeLike = ..., + out: None = ..., + ) -> Any: ... @overload def argmin( - self, axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ... + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... + def argsort( self, axis: Optional[SupportsIndex] = ..., kind: Optional[_SortKind] = ..., order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... + @overload def choose( - self, choices: ArrayLike, out: None = ..., mode: _ModeKind = ..., + self, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., ) -> ndarray: ... @overload def choose( - self, choices: ArrayLike, out: _NdArraySubClass = ..., mode: _ModeKind = ..., + self, + choices: ArrayLike, + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., ) -> _NdArraySubClass: ... + @overload def clip( self, @@ -1360,7 +1400,7 @@ class _ArrayOrScalarCommon: max: Optional[ArrayLike] = ..., out: None = ..., **kwargs: Any, - ) -> Union[number, ndarray]: ... + ) -> ndarray: ... @overload def clip( self, @@ -1368,7 +1408,7 @@ class _ArrayOrScalarCommon: max: ArrayLike = ..., out: None = ..., **kwargs: Any, - ) -> Union[number, ndarray]: ... + ) -> ndarray: ... @overload def clip( self, @@ -1385,6 +1425,7 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., **kwargs: Any, ) -> _NdArraySubClass: ... + @overload def compress( self, @@ -1399,8 +1440,11 @@ class _ArrayOrScalarCommon: axis: Optional[SupportsIndex] = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... + def conj(self: _ArraySelf) -> _ArraySelf: ... + def conjugate(self: _ArraySelf) -> _ArraySelf: ... + @overload def cumprod( self, @@ -1415,6 +1459,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... + @overload def cumsum( self, @@ -1429,15 +1474,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... - @overload - def max( - self, - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., - ) -> number: ... + @overload def max( self, @@ -1446,7 +1483,7 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def max( self, @@ -1456,14 +1493,7 @@ class _ArrayOrScalarCommon: initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... - @overload - def mean( - self, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: Literal[False] = ..., - ) -> number: ... + @overload def mean( self, @@ -1471,7 +1501,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def mean( self, @@ -1480,15 +1510,7 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... - @overload - def min( - self, - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., - ) -> number: ... + @overload def min( self, @@ -1497,7 +1519,7 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def min( self, @@ -1507,17 +1529,12 @@ class _ArrayOrScalarCommon: initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... - def newbyteorder(self: _ArraySelf, __new_order: _ByteOrder = ...) -> _ArraySelf: ... - @overload - def prod( - self, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., - ) -> number: ... + + def newbyteorder( + self: _ArraySelf, + __new_order: _ByteOrder = ..., + ) -> _ArraySelf: ... + @overload def prod( self, @@ -1527,7 +1544,7 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def prod( self, @@ -1538,14 +1555,14 @@ class _ArrayOrScalarCommon: initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... + @overload def ptp( - self, axis: None = ..., out: None = ..., keepdims: Literal[False] = ..., - ) -> number: ... - @overload - def ptp( - self, axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ..., - ) -> Union[number, ndarray]: ... + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... @overload def ptp( self, @@ -1553,26 +1570,26 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... + def repeat( - self, repeats: _ArrayLikeIntOrBool, axis: Optional[SupportsIndex] = ... + self, + repeats: _ArrayLikeIntOrBool, + axis: Optional[SupportsIndex] = ..., ) -> ndarray: ... + @overload def round( - self: _ArraySelf, decimals: SupportsIndex = ..., out: None = ... + self: _ArraySelf, + decimals: SupportsIndex = ..., + out: None = ..., ) -> _ArraySelf: ... @overload def round( - self, decimals: SupportsIndex = ..., out: _NdArraySubClass = ... - ) -> _NdArraySubClass: ... - @overload - def std( self, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: int = ..., - keepdims: Literal[False] = ..., - ) -> number: ... + decimals: SupportsIndex = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + @overload def std( self, @@ -1581,7 +1598,7 @@ class _ArrayOrScalarCommon: out: None = ..., ddof: int = ..., keepdims: bool = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def std( self, @@ -1591,16 +1608,7 @@ class _ArrayOrScalarCommon: ddof: int = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... - @overload - def sum( - self, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., - ) -> number: ... + @overload def sum( self, @@ -1610,7 +1618,7 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def sum( self, @@ -1621,6 +1629,7 @@ class _ArrayOrScalarCommon: initial: _NumberLike_co = ..., where: _ArrayLikeBool = ..., ) -> _NdArraySubClass: ... + @overload def take( self, @@ -1628,7 +1637,7 @@ class _ArrayOrScalarCommon: axis: Optional[SupportsIndex] = ..., out: None = ..., mode: _ModeKind = ..., - ) -> generic: ... + ) -> Any: ... @overload def take( self, @@ -1645,15 +1654,7 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., mode: _ModeKind = ..., ) -> _NdArraySubClass: ... - @overload - def var( - self, - axis: None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: int = ..., - keepdims: Literal[False] = ..., - ) -> number: ... + @overload def var( self, @@ -1662,7 +1663,7 @@ class _ArrayOrScalarCommon: out: None = ..., ddof: int = ..., keepdims: bool = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def var( self, @@ -1772,7 +1773,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis2: SupportsIndex = ..., ) -> _ArraySelf: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Union[number, ndarray]: ... + def dot(self, b: ArrayLike, out: None = ...) -> ndarray: ... @overload def dot(self, b: ArrayLike, out: _NdArraySubClass = ...) -> _NdArraySubClass: ... # `nonzero()` is deprecated for 0d arrays/generics @@ -1812,7 +1813,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis2: SupportsIndex = ..., dtype: DTypeLike = ..., out: None = ..., - ) -> Union[number, ndarray]: ... + ) -> Any: ... @overload def trace( self, # >= 2D array @@ -1843,105 +1844,105 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __lt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... @overload - def __lt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + def __lt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... @overload - def __lt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + def __lt__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... @overload - def __lt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + def __lt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... @overload def __lt__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayOrScalar[bool_]: ... + ) -> _ArrayND[bool_]: ... @overload def __le__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... @overload - def __le__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + def __le__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... @overload - def __le__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + def __le__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... @overload - def __le__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + def __le__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... @overload def __le__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayOrScalar[bool_]: ... + ) -> _ArrayND[bool_]: ... @overload def __gt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... @overload - def __gt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + def __gt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... @overload - def __gt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + def __gt__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... @overload - def __gt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + def __gt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... @overload def __gt__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayOrScalar[bool_]: ... + ) -> _ArrayND[bool_]: ... @overload def __ge__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... @overload - def __ge__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + def __ge__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... @overload - def __ge__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + def __ge__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... @overload - def __ge__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayOrScalar[bool_]: ... + def __ge__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... @overload def __ge__( self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayOrScalar[bool_]: ... + ) -> _ArrayND[bool_]: ... # Unary ops @overload - def __abs__(self: _ArrayND[bool_]) -> _ArrayOrScalar[bool_]: ... + def __abs__(self: _ArrayND[bool_]) -> _ArrayND[bool_]: ... @overload - def __abs__(self: _ArrayND[complexfloating[_NBit1, _NBit1]]) -> _ArrayOrScalar[floating[_NBit1]]: ... + def __abs__(self: _ArrayND[complexfloating[_NBit1, _NBit1]]) -> _ArrayND[floating[_NBit1]]: ... @overload - def __abs__(self: _ArrayND[_NumberType]) -> _ArrayOrScalar[_NumberType]: ... + def __abs__(self: _ArrayND[_NumberType]) -> _ArrayND[_NumberType]: ... @overload - def __abs__(self: _ArrayND[timedelta64]) -> _ArrayOrScalar[timedelta64]: ... + def __abs__(self: _ArrayND[timedelta64]) -> _ArrayND[timedelta64]: ... @overload def __abs__(self: _ArrayND[object_]) -> Any: ... @overload - def __invert__(self: _ArrayND[bool_]) -> _ArrayOrScalar[bool_]: ... + def __invert__(self: _ArrayND[bool_]) -> _ArrayND[bool_]: ... @overload - def __invert__(self: _ArrayND[_IntType]) -> _ArrayOrScalar[_IntType]: ... + def __invert__(self: _ArrayND[_IntType]) -> _ArrayND[_IntType]: ... @overload def __invert__(self: _ArrayND[object_]) -> Any: ... @overload - def __pos__(self: _ArrayND[_NumberType]) -> _ArrayOrScalar[_NumberType]: ... + def __pos__(self: _ArrayND[_NumberType]) -> _ArrayND[_NumberType]: ... @overload - def __pos__(self: _ArrayND[timedelta64]) -> _ArrayOrScalar[timedelta64]: ... + def __pos__(self: _ArrayND[timedelta64]) -> _ArrayND[timedelta64]: ... @overload def __pos__(self: _ArrayND[object_]) -> Any: ... @overload - def __neg__(self: _ArrayND[_NumberType]) -> _ArrayOrScalar[_NumberType]: ... + def __neg__(self: _ArrayND[_NumberType]) -> _ArrayND[_NumberType]: ... @overload - def __neg__(self: _ArrayND[timedelta64]) -> _ArrayOrScalar[timedelta64]: ... + def __neg__(self: _ArrayND[timedelta64]) -> _ArrayND[timedelta64]: ... @overload def __neg__(self: _ArrayND[object_]) -> Any: ... @@ -1950,15 +1951,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __matmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __matmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __matmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... @overload def __matmul__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -1972,15 +1973,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rmatmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rmatmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... @overload def __rmatmul__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -1994,15 +1995,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __mod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __mod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __mod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[timedelta64]: ... + def __mod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[timedelta64]: ... @overload def __mod__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2016,15 +2017,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __rmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[timedelta64]: ... + def __rmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[timedelta64]: ... @overload def __rmod__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2038,15 +2039,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __divmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __divmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayOrScalar[int8]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayND[int8]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayOrScalar[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayND[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayOrScalar[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayND[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayOrScalar[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayND[floating[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... + def __divmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[_ArrayND[int64], _ArrayND[timedelta64]]: ... @overload def __divmod__( self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], @@ -2056,15 +2057,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rdivmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rdivmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayOrScalar[int8]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayND[int8]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayOrScalar[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayND[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayOrScalar[signedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayND[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayOrScalar[floating[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayND[floating[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Union[Tuple[int64, timedelta64], Tuple[_ArrayND[int64], _ArrayND[timedelta64]]]: ... + def __rdivmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[_ArrayND[int64], _ArrayND[timedelta64]]: ... @overload def __rdivmod__( self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], @@ -2074,21 +2075,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __add__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __add__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __add__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayOrScalar[datetime64]: ... + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayND[datetime64]: ... @overload - def __add__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[datetime64]: ... + def __add__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... @overload def __add__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2102,21 +2103,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __radd__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __radd__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __radd__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayOrScalar[datetime64]: ... + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayND[datetime64]: ... @overload - def __radd__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[datetime64]: ... + def __radd__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... @overload def __radd__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2132,19 +2133,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __sub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[datetime64]: ... + def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... @overload - def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[timedelta64]: ... + def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[timedelta64]: ... @overload def __sub__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2160,19 +2161,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rsub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayOrScalar[datetime64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayND[datetime64]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[timedelta64]: ... + def __rsub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[timedelta64]: ... @overload def __rsub__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2186,19 +2187,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __mul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __mul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __mul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... @overload def __mul__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2212,19 +2213,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __rmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... @overload def __rmul__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2238,21 +2239,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __floordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __floordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[int64]: ... + def __floordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[int64]: ... @overload def __floordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __floordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + def __floordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... @overload def __floordiv__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2266,21 +2267,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rfloordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rfloordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[int64]: ... + def __rfloordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[int64]: ... @overload def __rfloordiv__(self: _ArrayND[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... @overload def __rfloordiv__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2294,15 +2295,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __pow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __pow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __pow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... @overload def __pow__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2316,15 +2317,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rpow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rpow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __rpow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... @overload def __rpow__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2338,17 +2339,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __truediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayOrScalar[float64]: ... # type: ignore[misc] + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayND[float64]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[float64]: ... + def __truediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[float64]: ... @overload def __truediv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __truediv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayOrScalar[timedelta64]: ... + def __truediv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... @overload def __truediv__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2362,17 +2363,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rtruediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayOrScalar[float64]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayND[float64]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayOrScalar[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayOrScalar[float64]: ... + def __rtruediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[float64]: ... @overload def __rtruediv__(self: _ArrayND[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayOrScalar[timedelta64]: ... + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... @overload def __rtruediv__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2386,11 +2387,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __lshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __lshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __lshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __lshift__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2404,11 +2405,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rlshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rlshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __rlshift__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2422,11 +2423,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __rshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __rshift__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2440,11 +2441,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rrshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rrshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[int8]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __rrshift__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2458,11 +2459,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __and__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __and__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __and__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __and__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2476,11 +2477,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rand__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rand__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __rand__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __rand__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2494,11 +2495,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __xor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __xor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __xor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __xor__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2512,11 +2513,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rxor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rxor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __rxor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __rxor__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2530,11 +2531,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __or__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __or__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __or__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __or__(self: _ArrayND[object_], other: Any) -> Any: ... @overload @@ -2548,11 +2549,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __ror__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ror__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayOrScalar[bool_]: ... # type: ignore[misc] + def __ror__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayOrScalar[unsignedinteger[Any]]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayOrScalar[signedinteger[Any]]: ... + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... @overload def __ror__(self: _ArrayND[object_], other: Any) -> Any: ... @overload diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py index 53a3a846dbab..f5d185206c05 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.py @@ -45,104 +45,104 @@ # Array subtraction -reveal_type(AR_b - AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_b - AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_b - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_b - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_b - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_b - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_b - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_b - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_b - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_b - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_b - AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_u - AR_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_LIKE_i - AR_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_f - AR_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c - AR_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_m - AR_b) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_LIKE_M - AR_b) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_u - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_b) # E: Any -reveal_type(AR_u - AR_LIKE_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_u - AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_u - AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_u - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_u - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_u - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_u - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_u - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_u - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_u - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_u - AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b - AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_LIKE_u - AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_LIKE_i - AR_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_f - AR_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c - AR_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_m - AR_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_LIKE_M - AR_u) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_b - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_u - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_u) # E: Any -reveal_type(AR_i - AR_LIKE_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_i - AR_LIKE_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_i - AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_i - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_i - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_i - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_i - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_i - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_i - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_i - AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b - AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_u - AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_i - AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_f - AR_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c - AR_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_m - AR_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_LIKE_M - AR_i) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_b - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_u - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_i - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_i) # E: Any -reveal_type(AR_f - AR_LIKE_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f - AR_LIKE_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f - AR_LIKE_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f - AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_f - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_f - AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_u - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_i - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_f - AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c - AR_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_b - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_u - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_i - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_f - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_O - AR_f) # E: Any -reveal_type(AR_c - AR_LIKE_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c - AR_LIKE_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c - AR_LIKE_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c - AR_LIKE_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c - AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_c - AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_u - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_i - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_f - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_c - AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_b - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_u - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_i - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_f - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_c - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_O - AR_c) # E: Any -reveal_type(AR_m - AR_LIKE_b) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_m - AR_LIKE_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_m - AR_LIKE_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_m - AR_LIKE_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_m - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m - AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_LIKE_u - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_LIKE_i - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_LIKE_m - AR_m) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_LIKE_M - AR_m) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] +reveal_type(AR_LIKE_b - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_u - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_i - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_m - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_m) # E: Any -reveal_type(AR_M - AR_LIKE_b) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] -reveal_type(AR_M - AR_LIKE_u) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] -reveal_type(AR_M - AR_LIKE_i) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] -reveal_type(AR_M - AR_LIKE_m) # E: Union[numpy.datetime64, numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]] -reveal_type(AR_M - AR_LIKE_M) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_M - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_M) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_M - AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_M - AR_M) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_M - AR_M) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O - AR_M) # E: Any reveal_type(AR_O - AR_LIKE_b) # E: Any @@ -165,86 +165,86 @@ # Array floor division -reveal_type(AR_b // AR_LIKE_b) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] -reveal_type(AR_b // AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_b // AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_b // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_b // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_b // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] +reveal_type(AR_b // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_b // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_b // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_b // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_b // AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b // AR_b) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] -reveal_type(AR_LIKE_u // AR_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_LIKE_i // AR_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_f // AR_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c // AR_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_b // AR_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] +reveal_type(AR_LIKE_u // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_O // AR_b) # E: Any -reveal_type(AR_u // AR_LIKE_b) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_u // AR_LIKE_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_u // AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_u // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_u // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_u // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_u // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_u // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_u // AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b // AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_LIKE_u // AR_u) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]] -reveal_type(AR_LIKE_i // AR_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_f // AR_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c // AR_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_m // AR_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_b // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_u // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O // AR_u) # E: Any -reveal_type(AR_i // AR_LIKE_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_i // AR_LIKE_u) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_i // AR_LIKE_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_i // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_i // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_i // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_i // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_i // AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b // AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_u // AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_i // AR_i) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] -reveal_type(AR_LIKE_f // AR_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c // AR_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_m // AR_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_b // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_u // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_i // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O // AR_i) # E: Any -reveal_type(AR_f // AR_LIKE_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f // AR_LIKE_u) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f // AR_LIKE_i) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f // AR_LIKE_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_f // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_f // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_f // AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_u // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_i // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_f // AR_f) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] -reveal_type(AR_LIKE_c // AR_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_m // AR_f) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(AR_LIKE_b // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_u // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_i // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_f // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O // AR_f) # E: Any -reveal_type(AR_c // AR_LIKE_b) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c // AR_LIKE_u) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c // AR_LIKE_i) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c // AR_LIKE_f) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_c // AR_LIKE_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_c // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_c // AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_b // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_u // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_i // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_f // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] -reveal_type(AR_LIKE_c // AR_c) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(AR_LIKE_b // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_u // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_i // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_f // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_c // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_O // AR_c) # E: Any -reveal_type(AR_m // AR_LIKE_u) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_m // AR_LIKE_i) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_m // AR_LIKE_f) # E: Union[numpy.timedelta64, numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] -reveal_type(AR_m // AR_LIKE_m) # E: Union[{int64}, numpy.ndarray[Any, numpy.dtype[{int64}]]] +reveal_type(AR_m // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m // AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] reveal_type(AR_m // AR_LIKE_O) # E: Any -reveal_type(AR_LIKE_m // AR_m) # E: Union[{int64}, numpy.ndarray[Any, numpy.dtype[{int64}]]] +reveal_type(AR_LIKE_m // AR_m) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] reveal_type(AR_LIKE_O // AR_m) # E: Any reveal_type(AR_O // AR_LIKE_b) # E: Any @@ -472,9 +472,9 @@ # Int reveal_type(i8 + i8) # E: {int64} -reveal_type(i8 + u8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i8 + u8) # E: Any reveal_type(i8 + i4) # E: {int64} -reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i8 + u4) # E: Any reveal_type(i8 + b_) # E: {int64} reveal_type(i8 + b) # E: {int64} reveal_type(i8 + c) # E: {complex128} @@ -483,19 +483,19 @@ reveal_type(i8 + AR_f) # E: Any reveal_type(u8 + u8) # E: {uint64} -reveal_type(u8 + i4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u8 + i4) # E: Any reveal_type(u8 + u4) # E: {uint64} reveal_type(u8 + b_) # E: {uint64} reveal_type(u8 + b) # E: {uint64} reveal_type(u8 + c) # E: {complex128} reveal_type(u8 + f) # E: {float64} -reveal_type(u8 + i) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u8 + i) # E: Any reveal_type(u8 + AR_f) # E: Any reveal_type(i8 + i8) # E: {int64} -reveal_type(u8 + i8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u8 + i8) # E: Any reveal_type(i4 + i8) # E: {int64} -reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u4 + i8) # E: Any reveal_type(b_ + i8) # E: {int64} reveal_type(b + i8) # E: {int64} reveal_type(c + i8) # E: {complex128} @@ -504,13 +504,13 @@ reveal_type(AR_f + i8) # E: Any reveal_type(u8 + u8) # E: {uint64} -reveal_type(i4 + u8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i4 + u8) # E: Any reveal_type(u4 + u8) # E: {uint64} reveal_type(b_ + u8) # E: {uint64} reveal_type(b + u8) # E: {uint64} reveal_type(c + u8) # E: {complex128} reveal_type(f + u8) # E: {float64} -reveal_type(i + u8) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i + u8) # E: Any reveal_type(AR_f + u8) # E: Any reveal_type(i4 + i8) # E: {int64} @@ -520,11 +520,11 @@ reveal_type(i4 + b) # E: {int32} reveal_type(i4 + AR_f) # E: Any -reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], {float64}] -reveal_type(u4 + i4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u4 + i8) # E: Any +reveal_type(u4 + i4) # E: Any reveal_type(u4 + u8) # E: {uint64} reveal_type(u4 + u4) # E: {uint32} -reveal_type(u4 + i) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(u4 + i) # E: Any reveal_type(u4 + b_) # E: {uint32} reveal_type(u4 + b) # E: {uint32} reveal_type(u4 + AR_f) # E: Any @@ -536,11 +536,11 @@ reveal_type(b + i4) # E: {int32} reveal_type(AR_f + i4) # E: Any -reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], {float64}] -reveal_type(i4 + u4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i8 + u4) # E: Any +reveal_type(i4 + u4) # E: Any reveal_type(u8 + u4) # E: {uint64} reveal_type(u4 + u4) # E: {uint32} reveal_type(b_ + u4) # E: {uint32} reveal_type(b + u4) # E: {uint32} -reveal_type(i + u4) # E: Union[numpy.signedinteger[Any], {float64}] +reveal_type(i + u4) # E: Any reveal_type(AR_f + u4) # E: Any diff --git a/numpy/typing/tests/data/reveal/comparisons.py b/numpy/typing/tests/data/reveal/comparisons.py index 5053a9e8215e..16f21cc39349 100644 --- a/numpy/typing/tests/data/reveal/comparisons.py +++ b/numpy/typing/tests/data/reveal/comparisons.py @@ -34,12 +34,12 @@ reveal_type(td > i4) # E: numpy.bool_ reveal_type(td > i8) # E: numpy.bool_ -reveal_type(td > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(td > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(AR > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(AR > td) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > td) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(td > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(td > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR > td) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > td) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] # boolean @@ -56,8 +56,8 @@ reveal_type(b_ > c) # E: numpy.bool_ reveal_type(b_ > c16) # E: numpy.bool_ reveal_type(b_ > c8) # E: numpy.bool_ -reveal_type(b_ > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(b_ > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(b_ > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(b_ > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] # Complex @@ -72,8 +72,8 @@ reveal_type(c16 > c) # E: numpy.bool_ reveal_type(c16 > f) # E: numpy.bool_ reveal_type(c16 > i) # E: numpy.bool_ -reveal_type(c16 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(c16 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(c16 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(c16 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(c16 > c16) # E: numpy.bool_ reveal_type(f8 > c16) # E: numpy.bool_ @@ -86,8 +86,8 @@ reveal_type(c > c16) # E: numpy.bool_ reveal_type(f > c16) # E: numpy.bool_ reveal_type(i > c16) # E: numpy.bool_ -reveal_type(AR > c16) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > c16) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > c16) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > c16) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(c8 > c16) # E: numpy.bool_ reveal_type(c8 > f8) # E: numpy.bool_ @@ -100,8 +100,8 @@ reveal_type(c8 > c) # E: numpy.bool_ reveal_type(c8 > f) # E: numpy.bool_ reveal_type(c8 > i) # E: numpy.bool_ -reveal_type(c8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(c8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(c8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(c8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(c16 > c8) # E: numpy.bool_ reveal_type(f8 > c8) # E: numpy.bool_ @@ -114,8 +114,8 @@ reveal_type(c > c8) # E: numpy.bool_ reveal_type(f > c8) # E: numpy.bool_ reveal_type(i > c8) # E: numpy.bool_ -reveal_type(AR > c8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > c8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > c8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > c8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] # Float @@ -128,8 +128,8 @@ reveal_type(f8 > c) # E: numpy.bool_ reveal_type(f8 > f) # E: numpy.bool_ reveal_type(f8 > i) # E: numpy.bool_ -reveal_type(f8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(f8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(f8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(f8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(f8 > f8) # E: numpy.bool_ reveal_type(i8 > f8) # E: numpy.bool_ @@ -140,8 +140,8 @@ reveal_type(c > f8) # E: numpy.bool_ reveal_type(f > f8) # E: numpy.bool_ reveal_type(i > f8) # E: numpy.bool_ -reveal_type(AR > f8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > f8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > f8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > f8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(f4 > f8) # E: numpy.bool_ reveal_type(f4 > i8) # E: numpy.bool_ @@ -152,8 +152,8 @@ reveal_type(f4 > c) # E: numpy.bool_ reveal_type(f4 > f) # E: numpy.bool_ reveal_type(f4 > i) # E: numpy.bool_ -reveal_type(f4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(f4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(f4 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(f4 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(f8 > f4) # E: numpy.bool_ reveal_type(i8 > f4) # E: numpy.bool_ @@ -164,8 +164,8 @@ reveal_type(c > f4) # E: numpy.bool_ reveal_type(f > f4) # E: numpy.bool_ reveal_type(i > f4) # E: numpy.bool_ -reveal_type(AR > f4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > f4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > f4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > f4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] # Int @@ -178,8 +178,8 @@ reveal_type(i8 > c) # E: numpy.bool_ reveal_type(i8 > f) # E: numpy.bool_ reveal_type(i8 > i) # E: numpy.bool_ -reveal_type(i8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(i8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(i8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(u8 > i4) # E: numpy.bool_ @@ -189,8 +189,8 @@ reveal_type(u8 > c) # E: numpy.bool_ reveal_type(u8 > f) # E: numpy.bool_ reveal_type(u8 > i) # E: numpy.bool_ -reveal_type(u8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(u8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(u8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(u8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(i8 > i8) # E: numpy.bool_ reveal_type(u8 > i8) # E: numpy.bool_ @@ -201,8 +201,8 @@ reveal_type(c > i8) # E: numpy.bool_ reveal_type(f > i8) # E: numpy.bool_ reveal_type(i > i8) # E: numpy.bool_ -reveal_type(AR > i8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > i8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > i8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > i8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(i4 > u8) # E: numpy.bool_ @@ -212,16 +212,16 @@ reveal_type(c > u8) # E: numpy.bool_ reveal_type(f > u8) # E: numpy.bool_ reveal_type(i > u8) # E: numpy.bool_ -reveal_type(AR > u8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > u8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > u8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > u8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(i4 > i8) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i4 > i) # E: numpy.bool_ reveal_type(i4 > b_) # E: numpy.bool_ reveal_type(i4 > b) # E: numpy.bool_ -reveal_type(i4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(i4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(i4 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i4 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(u4 > i8) # E: numpy.bool_ reveal_type(u4 > i4) # E: numpy.bool_ @@ -230,16 +230,16 @@ reveal_type(u4 > i) # E: numpy.bool_ reveal_type(u4 > b_) # E: numpy.bool_ reveal_type(u4 > b) # E: numpy.bool_ -reveal_type(u4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(u4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(u4 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(u4 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(i8 > i4) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i > i4) # E: numpy.bool_ reveal_type(b_ > i4) # E: numpy.bool_ reveal_type(b > i4) # E: numpy.bool_ -reveal_type(AR > i4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > i4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > i4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > i4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(i8 > u4) # E: numpy.bool_ reveal_type(i4 > u4) # E: numpy.bool_ @@ -248,5 +248,5 @@ reveal_type(b_ > u4) # E: numpy.bool_ reveal_type(b > u4) # E: numpy.bool_ reveal_type(i > u4) # E: numpy.bool_ -reveal_type(AR > u4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] -reveal_type(SEQ > u4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > u4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > u4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] diff --git a/numpy/typing/tests/data/reveal/mod.py b/numpy/typing/tests/data/reveal/mod.py index 4a913f11a931..bf45b8c589b5 100644 --- a/numpy/typing/tests/data/reveal/mod.py +++ b/numpy/typing/tests/data/reveal/mod.py @@ -26,8 +26,8 @@ reveal_type(td % AR_m) # E: Any reveal_type(divmod(td, td)) # E: Tuple[{int64}, numpy.timedelta64] -reveal_type(divmod(AR_m, td)) # E: Union[Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64], Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]]] -reveal_type(divmod(td, AR_m)) # E: Union[Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64], Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]]] +reveal_type(divmod(AR_m, td)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(divmod(td, AR_m)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] # Bool @@ -38,7 +38,7 @@ reveal_type(b_ % i8) # E: {int64} reveal_type(b_ % u8) # E: {uint64} reveal_type(b_ % f8) # E: {float64} -reveal_type(b_ % AR_b) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] +reveal_type(b_ % AR_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] reveal_type(divmod(b_, b)) # E: Tuple[{int8}, {int8}] reveal_type(divmod(b_, i)) # E: Tuple[{int_}, {int_}] @@ -47,7 +47,7 @@ reveal_type(divmod(b_, i8)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(b_, u8)) # E: Tuple[{uint64}, {uint64}] reveal_type(divmod(b_, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(b_, AR_b)) # E: Tuple[Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]], Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]] +reveal_type(divmod(b_, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[{int8}]], numpy.ndarray[Any, numpy.dtype[{int8}]]] reveal_type(b % b_) # E: {int8} reveal_type(i % b_) # E: {int_} @@ -56,7 +56,7 @@ reveal_type(i8 % b_) # E: {int64} reveal_type(u8 % b_) # E: {uint64} reveal_type(f8 % b_) # E: {float64} -reveal_type(AR_b % b_) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]] +reveal_type(AR_b % b_) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] reveal_type(divmod(b, b_)) # E: Tuple[{int8}, {int8}] reveal_type(divmod(i, b_)) # E: Tuple[{int_}, {int_}] @@ -65,7 +65,7 @@ reveal_type(divmod(i8, b_)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(u8, b_)) # E: Tuple[{uint64}, {uint64}] reveal_type(divmod(f8, b_)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(AR_b, b_)) # E: Tuple[Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]], Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]] +reveal_type(divmod(AR_b, b_)) # E: numpy.ndarray[Any, numpy.dtype[{int8}]], numpy.ndarray[Any, numpy.dtype[{int8}]]] # int @@ -78,7 +78,7 @@ reveal_type(i4 % f8) # E: {float64} reveal_type(i4 % i4) # E: {int32} reveal_type(i4 % f4) # E: {float32} -reveal_type(i8 % AR_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(i8 % AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(i8, i)) # E: Tuple[{int64}, {int64}] @@ -89,7 +89,7 @@ reveal_type(divmod(i8, f4)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(i8, AR_b)) # E: Tuple[Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]], Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]] +reveal_type(divmod(i8, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] reveal_type(b % i8) # E: {int64} reveal_type(i % i8) # E: {int64} @@ -100,7 +100,7 @@ reveal_type(f8 % i4) # E: {float64} reveal_type(i4 % i4) # E: {int32} reveal_type(f4 % i4) # E: {float32} -reveal_type(AR_b % i8) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] +reveal_type(AR_b % i8) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(i, i8)) # E: Tuple[{int64}, {int64}] @@ -111,7 +111,7 @@ reveal_type(divmod(f4, i8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(AR_b, i8)) # E: Tuple[Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]], Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]] +reveal_type(divmod(AR_b, i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] # float @@ -120,7 +120,7 @@ reveal_type(f8 % f) # E: {float64} reveal_type(i8 % f4) # E: {float64} reveal_type(f4 % f4) # E: {float32} -reveal_type(f8 % AR_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(f8 % AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f8, i)) # E: Tuple[{float64}, {float64}] @@ -128,7 +128,7 @@ reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f8, f4)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(f8, AR_b)) # E: Tuple[Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]], Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]] +reveal_type(divmod(f8, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] reveal_type(b % f8) # E: {float64} reveal_type(i % f8) # E: {float64} @@ -136,7 +136,7 @@ reveal_type(f8 % f8) # E: {float64} reveal_type(f8 % f8) # E: {float64} reveal_type(f4 % f4) # E: {float32} -reveal_type(AR_b % f8) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(AR_b % f8) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i, f8)) # E: Tuple[{float64}, {float64}] @@ -144,4 +144,4 @@ reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f4, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(AR_b, f8)) # E: Tuple[Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]], Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]] +reveal_type(divmod(AR_b, f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index 0f7345358db6..e0f44bcbc40f 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -16,24 +16,24 @@ class SubClass(np.ndarray): ... reveal_type(f8.all()) # E: numpy.bool_ reveal_type(A.all()) # E: numpy.bool_ -reveal_type(A.all(axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(A.all(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(A.all(axis=0)) # E: Any +reveal_type(A.all(keepdims=True)) # E: Any reveal_type(A.all(out=B)) # E: SubClass reveal_type(f8.any()) # E: numpy.bool_ reveal_type(A.any()) # E: numpy.bool_ -reveal_type(A.any(axis=0)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] -reveal_type(A.any(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]] +reveal_type(A.any(axis=0)) # E: Any +reveal_type(A.any(keepdims=True)) # E: Any reveal_type(A.any(out=B)) # E: SubClass reveal_type(f8.argmax()) # E: {intp} reveal_type(A.argmax()) # E: {intp} -reveal_type(A.argmax(axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] +reveal_type(A.argmax(axis=0)) # E: Any reveal_type(A.argmax(out=B)) # E: SubClass reveal_type(f8.argmin()) # E: {intp} reveal_type(A.argmin()) # E: {intp} -reveal_type(A.argmin(axis=0)) # E: Union[numpy.ndarray[Any, Any], {intp}] +reveal_type(A.argmin(axis=0)) # E: Any reveal_type(A.argmin(out=B)) # E: SubClass reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any] @@ -43,9 +43,9 @@ class SubClass(np.ndarray): ... reveal_type(A.choose([0])) # E: numpy.ndarray[Any, Any] reveal_type(A.choose([0], out=B)) # E: SubClass -reveal_type(f8.clip(1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.clip(1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.clip(None, 1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.clip(1)) # E: Any +reveal_type(A.clip(1)) # E: Any +reveal_type(A.clip(None, 1)) # E: Any reveal_type(A.clip(1, out=B)) # E: SubClass reveal_type(A.clip(None, 1, out=B)) # E: SubClass @@ -69,38 +69,38 @@ class SubClass(np.ndarray): ... reveal_type(A.cumsum()) # E: numpy.ndarray[Any, Any] reveal_type(A.cumsum(out=B)) # E: SubClass -reveal_type(f8.max()) # E: numpy.number[Any] -reveal_type(A.max()) # E: numpy.number[Any] -reveal_type(A.max(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.max(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.max()) # E: Any +reveal_type(A.max()) # E: Any +reveal_type(A.max(axis=0)) # E: Any +reveal_type(A.max(keepdims=True)) # E: Any reveal_type(A.max(out=B)) # E: SubClass -reveal_type(f8.mean()) # E: numpy.number[Any] -reveal_type(A.mean()) # E: numpy.number[Any] -reveal_type(A.mean(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.mean(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.mean()) # E: Any +reveal_type(A.mean()) # E: Any +reveal_type(A.mean(axis=0)) # E: Any +reveal_type(A.mean(keepdims=True)) # E: Any reveal_type(A.mean(out=B)) # E: SubClass -reveal_type(f8.min()) # E: numpy.number[Any] -reveal_type(A.min()) # E: numpy.number[Any] -reveal_type(A.min(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.min(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.min()) # E: Any +reveal_type(A.min()) # E: Any +reveal_type(A.min(axis=0)) # E: Any +reveal_type(A.min(keepdims=True)) # E: Any reveal_type(A.min(out=B)) # E: SubClass reveal_type(f8.newbyteorder()) # E: {float64} reveal_type(A.newbyteorder()) # E: numpy.ndarray[Any, Any] reveal_type(B.newbyteorder('|')) # E: SubClass -reveal_type(f8.prod()) # E: numpy.number[Any] -reveal_type(A.prod()) # E: numpy.number[Any] -reveal_type(A.prod(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.prod(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.prod()) # E: Any +reveal_type(A.prod()) # E: Any +reveal_type(A.prod(axis=0)) # E: Any +reveal_type(A.prod(keepdims=True)) # E: Any reveal_type(A.prod(out=B)) # E: SubClass -reveal_type(f8.ptp()) # E: numpy.number[Any] -reveal_type(A.ptp()) # E: numpy.number[Any] -reveal_type(A.ptp(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.ptp(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.ptp()) # E: Any +reveal_type(A.ptp()) # E: Any +reveal_type(A.ptp(axis=0)) # E: Any +reveal_type(A.ptp(keepdims=True)) # E: Any reveal_type(A.ptp(out=B)) # E: SubClass reveal_type(f8.round()) # E: {float64} @@ -111,40 +111,40 @@ class SubClass(np.ndarray): ... reveal_type(A.repeat(1)) # E: numpy.ndarray[Any, Any] reveal_type(B.repeat(1)) # E: numpy.ndarray[Any, Any] -reveal_type(f8.std()) # E: numpy.number[Any] -reveal_type(A.std()) # E: numpy.number[Any] -reveal_type(A.std(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.std(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.std()) # E: Any +reveal_type(A.std()) # E: Any +reveal_type(A.std(axis=0)) # E: Any +reveal_type(A.std(keepdims=True)) # E: Any reveal_type(A.std(out=B)) # E: SubClass -reveal_type(f8.sum()) # E: numpy.number[Any] -reveal_type(A.sum()) # E: numpy.number[Any] -reveal_type(A.sum(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.sum(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.sum()) # E: Any +reveal_type(A.sum()) # E: Any +reveal_type(A.sum(axis=0)) # E: Any +reveal_type(A.sum(keepdims=True)) # E: Any reveal_type(A.sum(out=B)) # E: SubClass -reveal_type(f8.take(0)) # E: numpy.generic -reveal_type(A.take(0)) # E: numpy.generic +reveal_type(f8.take(0)) # E: Any +reveal_type(A.take(0)) # E: Any reveal_type(A.take([0])) # E: numpy.ndarray[Any, Any] reveal_type(A.take(0, out=B)) # E: SubClass reveal_type(A.take([0], out=B)) # E: SubClass -reveal_type(f8.var()) # E: numpy.number[Any] -reveal_type(A.var()) # E: numpy.number[Any] -reveal_type(A.var(axis=0)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] -reveal_type(A.var(keepdims=True)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(f8.var()) # E: Any +reveal_type(A.var()) # E: Any +reveal_type(A.var(axis=0)) # E: Any +reveal_type(A.var(keepdims=True)) # E: Any reveal_type(A.var(out=B)) # E: SubClass reveal_type(A.argpartition([0])) # E: numpy.ndarray[Any, Any] reveal_type(A.diagonal()) # E: numpy.ndarray[Any, Any] -reveal_type(A.dot(1)) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.dot(1)) # E: Any reveal_type(A.dot(1, out=B)) # E: SubClass reveal_type(A.nonzero()) # E: tuple[numpy.ndarray[Any, Any]] reveal_type(A.searchsorted([1])) # E: numpy.ndarray[Any, Any] -reveal_type(A.trace()) # E: Union[numpy.number[Any], numpy.ndarray[Any, Any]] +reveal_type(A.trace()) # E: Any reveal_type(A.trace(out=B)) # E: SubClass From 3888fa81497b59a8ae33204b53da6f281d290ea0 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 28 Apr 2021 19:46:58 +0200 Subject: [PATCH 1012/1270] MAINT: Remove unsafe unions from `np.core.einsumfunc` --- numpy/core/einsumfunc.pyi | 18 +++++++++++------- numpy/typing/tests/data/reveal/einsumfunc.py | 16 ++++++++-------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi index b33aff29ff4d..2457e8719df4 100644 --- a/numpy/core/einsumfunc.pyi +++ b/numpy/core/einsumfunc.pyi @@ -13,7 +13,6 @@ from numpy import ( _OrderKACF, ) from numpy.typing import ( - _ArrayOrScalar, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, @@ -46,6 +45,11 @@ _CastingUnsafe = Literal["unsafe"] __all__: List[str] # TODO: Properly handle the `casting`-based combinatorics +# TODO: We need to evaluate the content `__subscripts` in order +# to identify whether or an array or scalar is returned. At a cursory +# glance this seems like something that can quite easilly be done with +# a mypy plugin. +# Something like `is_scalar = bool(__subscripts.partition("->")[-1])` @overload def einsum( __subscripts: str, @@ -55,7 +59,7 @@ def einsum( order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., -) -> _ArrayOrScalar[bool_]: ... +) -> Any: ... @overload def einsum( __subscripts: str, @@ -65,7 +69,7 @@ def einsum( order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., -) -> _ArrayOrScalar[unsignedinteger[Any]]: ... +) -> Any: ... @overload def einsum( __subscripts: str, @@ -75,7 +79,7 @@ def einsum( order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., -) -> _ArrayOrScalar[signedinteger[Any]]: ... +) -> Any: ... @overload def einsum( __subscripts: str, @@ -85,7 +89,7 @@ def einsum( order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., -) -> _ArrayOrScalar[floating[Any]]: ... +) -> Any: ... @overload def einsum( __subscripts: str, @@ -95,7 +99,7 @@ def einsum( order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., -) -> _ArrayOrScalar[complexfloating[Any, Any]]: ... +) -> Any: ... @overload def einsum( __subscripts: str, @@ -105,7 +109,7 @@ def einsum( out: None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = ..., -) -> _ArrayOrScalar[Any]: ... +) -> Any: ... @overload def einsum( __subscripts: str, diff --git a/numpy/typing/tests/data/reveal/einsumfunc.py b/numpy/typing/tests/data/reveal/einsumfunc.py index 18c192b0b0bc..f1a90428de34 100644 --- a/numpy/typing/tests/data/reveal/einsumfunc.py +++ b/numpy/typing/tests/data/reveal/einsumfunc.py @@ -10,17 +10,17 @@ OUT_f: np.ndarray[Any, np.dtype[np.float64]] -reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]] -reveal_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] -reveal_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] -reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] -reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] -reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] -reveal_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Any +reveal_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Any reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f)) # E: numpy.ndarray[Any, numpy.dtype[{float64}] reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f)) # E: numpy.ndarray[Any, numpy.dtype[{float64}] -reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")) # E: Any reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")) # E: Any reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Tuple[builtins.list[Any], builtins.str] From a90cbc7476902811f94044cbfedaeb6bbcd17413 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 21 Apr 2021 19:02:49 +0200 Subject: [PATCH 1013/1270] MAINT: Remove the `np.typing._ArrayOrScalar` type-alias --- numpy/typing/__init__.py | 1 - numpy/typing/_array_like.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index d71ec0719112..88a568537497 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -326,7 +326,6 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _RecursiveSequence, _SupportsArray, _ArrayND, - _ArrayOrScalar, _ArrayLikeInt, _ArrayLikeBool_co, _ArrayLikeUInt_co, diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index eb8d3f4e1abd..582d3db9afcc 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -132,7 +132,5 @@ def __array__(self) -> ndarray[Any, _DType_co]: ... if TYPE_CHECKING: _ArrayND = ndarray[Any, dtype[_ScalarType]] - _ArrayOrScalar = Union[_ScalarType, _ArrayND[_ScalarType]] else: _ArrayND = Any - _ArrayOrScalar = Any From 0a045bb31eb0503969352be4071cb0c848176427 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 30 Apr 2021 22:09:41 +0200 Subject: [PATCH 1014/1270] DOC: Add a segment to the `numpy.typing` docs about 0D arrays --- numpy/typing/__init__.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 88a568537497..a2d4c60fb274 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -140,6 +140,20 @@ The `~numpy.timedelta64` class is not considered a subclass of `~numpy.signedinteger`, the former only inheriting from `~numpy.generic` while static type checking. +0D arrays +~~~~~~~~~ + +During runtime numpy aggressively casts any passed 0D arrays into their +corresponding `~numpy.generic` instance. Until the introduction of shape +typing (see :pep:`646`) it is unfortunately not possible to make the +necessary distinction between 0D and >0D arrays. While thus not strictly +correct, all operations are that can potentially perform a 0D-array -> scalar +cast are currently annotated as exclusively returning an `ndarray`. + +If it is known in advance that an operation _will_ perform a +0D-array -> scalar cast, then one can consider manually remedying the +situation with either `typing.cast` or a ``# type: ignore`` comment. + API --- From ee56322f91788c97d4a41fdf4ae66aa45310553c Mon Sep 17 00:00:00 2001 From: Matthew Badin Date: Fri, 30 Apr 2021 14:03:55 -0700 Subject: [PATCH 1015/1270] BLD: Address lint issues and reviewer comments. --- MANIFEST.in | 1 - numpy/_build_utils/README | 9 --------- numpy/_build_utils/__init__.py | 0 numpy/_build_utils/apple_accelerate.py | 21 --------------------- numpy/core/setup.py | 4 ---- numpy/core/tests/test_multiarray.py | 2 +- numpy/distutils/system_info.py | 6 ++++-- 7 files changed, 5 insertions(+), 38 deletions(-) delete mode 100644 numpy/_build_utils/README delete mode 100644 numpy/_build_utils/__init__.py delete mode 100644 numpy/_build_utils/apple_accelerate.py diff --git a/MANIFEST.in b/MANIFEST.in index 856c64d0c6b9..8ec62123b998 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -22,7 +22,6 @@ include numpy/*.pxd # Note that sub-directories that don't have __init__ are apparently not # included by 'recursive-include', so list those separately recursive-include numpy * -recursive-include numpy/_build_utils * recursive-include numpy/linalg/lapack_lite * recursive-include tools * # Add sdist files whose use depends on local configuration. diff --git a/numpy/_build_utils/README b/numpy/_build_utils/README deleted file mode 100644 index 73d93593e6f9..000000000000 --- a/numpy/_build_utils/README +++ /dev/null @@ -1,9 +0,0 @@ -======= -WARNING -======= - -This directory (numpy/_build_utils) is *not* part of the public numpy API, - - it is internal build support for numpy. - - it is only present in source distributions or during an in place build - - it is *not* installed with the rest of numpy - diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/numpy/_build_utils/apple_accelerate.py b/numpy/_build_utils/apple_accelerate.py deleted file mode 100644 index 8ce54619ee5c..000000000000 --- a/numpy/_build_utils/apple_accelerate.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import sys -import re - -__all__ = ['uses_accelerate_framework'] - -def uses_accelerate_framework(info): - """ Returns True if Accelerate framework is used for BLAS/LAPACK """ - # If we're not building on Darwin (macOS), don't use Accelerate - if sys.platform != "darwin": - return False - # If we're building on macOS, but targeting a different platform, - # don't use Accelerate. - if os.getenv('_PYTHON_HOST_PLATFORM', None): - return False - r_accelerate = re.compile("Accelerate") - extra_link_args = info.get('extra_link_args', '') - for arg in extra_link_args: - if r_accelerate.search(arg): - return True - return False diff --git a/numpy/core/setup.py b/numpy/core/setup.py index f59b67c88bc1..d1229ee8f143 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -10,10 +10,6 @@ from numpy.distutils import log from distutils.dep_util import newer from sysconfig import get_config_var - -from numpy._build_utils.apple_accelerate import ( - uses_accelerate_framework - ) from numpy.compat import npy_load_module from setup_common import * # noqa: F403 diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 40b435f2e20a..012dcbc6c8d5 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -6219,7 +6219,7 @@ def assert_dot_close(A, X, desired): s = aligned_array((100, 100), 15, np.float32) np.dot(s, m) # this will always segfault if the bug is present - testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) + testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F')) for align, m, n, a_order in testdata: # Calculation in double precision A_d = np.random.rand(m, n) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 082b029d7047..7a031bf9e519 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1750,7 +1750,8 @@ class lapack_opt_info(system_info): notfounderror = LapackNotFoundError # List of all known LAPACK libraries, in the default order - lapack_order = ['accelerate', 'mkl', 'openblas', 'flame', 'atlas', 'lapack'] + lapack_order = ['mkl', 'openblas', 'flame', + 'accelerate', 'atlas', 'lapack'] order_env_var_name = 'NPY_LAPACK_ORDER' def _calc_info_accelerate(self): @@ -1926,7 +1927,8 @@ class blas_opt_info(system_info): notfounderror = BlasNotFoundError # List of all known BLAS libraries, in the default order - blas_order = ['accelerate', 'mkl', 'blis', 'openblas', 'atlas', 'blas'] + blas_order = ['mkl', 'blis', 'openblas', + 'accelerate', 'atlas', 'blas'] order_env_var_name = 'NPY_BLAS_ORDER' def _calc_info_accelerate(self): From 006c199d2ac924ab1d35074d63b47aea1420cd90 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 30 Apr 2021 19:06:15 -0500 Subject: [PATCH 1016/1270] DOC: Fix `nextafter` docs since example used integers --- numpy/core/code_generators/ufunc_docstrings.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index f19946be408a..e33d71ab3ccb 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -3628,9 +3628,9 @@ def add_newdoc(place, name, doc): Examples -------- >>> eps = np.finfo(np.float64).eps - >>> np.nextafter(1, 2) == eps + 1 + >>> np.nextafter(1., 2.) == eps + 1 True - >>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps] + >>> np.nextafter([1., 2.], [2., 1.]) == [eps + 1, 2 - eps] array([ True, True]) """) From ec479e0edceb5759d24d4521e56ad8f63cc26354 Mon Sep 17 00:00:00 2001 From: Matthew Badin Date: Sat, 1 May 2021 11:30:24 -0700 Subject: [PATCH 1017/1270] BLD: Minimize diff --- numpy/distutils/system_info.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 7a031bf9e519..234b4afc9d94 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1754,13 +1754,6 @@ class lapack_opt_info(system_info): 'accelerate', 'atlas', 'lapack'] order_env_var_name = 'NPY_LAPACK_ORDER' - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - def _calc_info_mkl(self): info = get_info('lapack_mkl') if info: @@ -1811,6 +1804,13 @@ def _calc_info_atlas(self): return True return False + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + def _get_info_blas(self): # Default to get the optimized BLAS implementation info = get_info('blas_opt') @@ -1931,13 +1931,6 @@ class blas_opt_info(system_info): 'accelerate', 'atlas', 'blas'] order_env_var_name = 'NPY_BLAS_ORDER' - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - def _calc_info_mkl(self): info = get_info('blas_mkl') if info: @@ -1972,6 +1965,13 @@ def _calc_info_atlas(self): return True return False + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + def _calc_info_blas(self): # Warn about a non-optimized BLAS library warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) From 238f4e5984a4e668dac70a05b6b4f49dabf239ba Mon Sep 17 00:00:00 2001 From: HowJmay Date: Sat, 1 May 2021 11:20:20 +0800 Subject: [PATCH 1018/1270] ENH: Add min values comparison for floating point Add the similar implementation for minimum value comparison. --- numpy/core/src/_simd/_simd.dispatch.c.src | 14 ++++++++--- numpy/core/src/common/simd/avx2/math.h | 19 ++++++++++++++ numpy/core/src/common/simd/avx512/math.h | 17 +++++++++++++ numpy/core/src/common/simd/neon/math.h | 20 +++++++++++++++ numpy/core/src/common/simd/sse/math.h | 19 ++++++++++++++ numpy/core/src/common/simd/vsx/math.h | 9 +++++++ numpy/core/tests/test_simd.py | 30 +++++++++++++++++++---- 7 files changed, 119 insertions(+), 9 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index d5984009e976..3e82c28a40d7 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -388,8 +388,11 @@ SIMD_IMPL_INTRIN_1(@intrin@_@sfx@, v@sfx@, v@sfx@) #endif #if @fp_only@ -SIMD_IMPL_INTRIN_2(max_@sfx@, v@sfx@, v@sfx@, v@sfx@) -SIMD_IMPL_INTRIN_2(maxp_@sfx@, v@sfx@, v@sfx@, v@sfx@) +/**begin repeat1 + * #intrin = max, maxp, min, minp# + */ +SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) +/**end repeat1**/ #endif /*************************** @@ -613,8 +616,11 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) #endif #if @fp_only@ -SIMD_INTRIN_DEF(max_@sfx@) -SIMD_INTRIN_DEF(maxp_@sfx@) +/**begin repeat1 + * #intrin = max, maxp, min, minp# + */ +SIMD_INTRIN_DEF(@intrin@_@sfx@) +/**end repeat1**/ #endif /*************************** diff --git a/numpy/core/src/common/simd/avx2/math.h b/numpy/core/src/common/simd/avx2/math.h index 84ab024d02b4..19e770ebf8aa 100644 --- a/numpy/core/src/common/simd/avx2/math.h +++ b/numpy/core/src/common/simd/avx2/math.h @@ -56,4 +56,23 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) return _mm256_blendv_pd(a, max, nn); } +// Minimum, natively mapping with no guarantees to handle NaN. +#define npyv_min_f32 _mm256_min_ps +#define npyv_min_f64 _mm256_min_pd +// Minimum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_minp_f32(npyv_f32 a, npyv_f32 b) +{ + __m256 nn = _mm256_cmp_ps(b, b, _CMP_ORD_Q); + __m256 min = _mm256_min_ps(a, b); + return _mm256_blendv_ps(a, min, nn); +} +NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) +{ + __m256d nn = _mm256_cmp_pd(b, b, _CMP_ORD_Q); + __m256d min = _mm256_min_pd(a, b); + return _mm256_blendv_pd(a, min, nn); +} + #endif diff --git a/numpy/core/src/common/simd/avx512/math.h b/numpy/core/src/common/simd/avx512/math.h index 66836229857d..da94faaeb1ad 100644 --- a/numpy/core/src/common/simd/avx512/math.h +++ b/numpy/core/src/common/simd/avx512/math.h @@ -63,4 +63,21 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) return _mm512_mask_max_pd(a, nn, a, b); } +// Minimum, natively mapping with no guarantees to handle NaN. +#define npyv_min_f32 _mm512_min_ps +#define npyv_min_f64 _mm512_min_pd +// Minimum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_minp_f32(npyv_f32 a, npyv_f32 b) +{ + __mmask16 nn = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q); + return _mm512_mask_min_ps(a, nn, a, b); +} +NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) +{ + __mmask8 nn = _mm512_cmp_pd_mask(b, b, _CMP_ORD_Q); + return _mm512_mask_min_pd(a, nn, a, b); +} + #endif diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h index 09b7bbc9efe1..c99ef3299cbc 100644 --- a/numpy/core/src/common/simd/neon/math.h +++ b/numpy/core/src/common/simd/neon/math.h @@ -103,4 +103,24 @@ NPY_FINLINE npyv_f32 npyv_recip_f32(npyv_f32 a) #define npyv_maxp_f64 vmaxnmq_f64 #endif // NPY_SIMD_F64 +// Minimum, natively mapping with no guarantees to handle NaN. +#define npyv_min_f32 vminq_f32 +#define npyv_min_f64 vminq_f64 +// Minimum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +#ifdef NPY_HAVE_ASIMD + #define npyv_minp_f32 vminnmq_f32 +#else + NPY_FINLINE npyv_f32 npyv_minp_f32(npyv_f32 a, npyv_f32 b) + { + npyv_u32 nn_a = vceqq_f32(a, a); + npyv_u32 nn_b = vceqq_f32(b, b); + return vminq_f32(vbslq_f32(nn_a, a, b), vbslq_f32(nn_b, b, a)); + } +#endif +#if NPY_SIMD_F64 + #define npyv_minp_f64 vminnmq_f64 +#endif // NPY_SIMD_F64 + #endif // _NPY_SIMD_SSE_MATH_H diff --git a/numpy/core/src/common/simd/sse/math.h b/numpy/core/src/common/simd/sse/math.h index 15e9f7e44fb4..e43c4116770d 100644 --- a/numpy/core/src/common/simd/sse/math.h +++ b/numpy/core/src/common/simd/sse/math.h @@ -56,4 +56,23 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) return npyv_select_f64(_mm_castpd_si128(nn), max, a); } +// Minimum, natively mapping with no guarantees to handle NaN. +#define npyv_min_f32 _mm_min_ps +#define npyv_min_f64 _mm_min_pd +// Minimum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_minp_f32(npyv_f32 a, npyv_f32 b) +{ + __m128 nn = _mm_cmpord_ps(b, b); + __m128 min = _mm_min_ps(a, b); + return npyv_select_f32(_mm_castps_si128(nn), min, a); +} +NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) +{ + __m128d nn = _mm_cmpord_pd(b, b); + __m128d min = _mm_min_pd(a, b); + return npyv_select_f64(_mm_castpd_si128(nn), min, a); +} + #endif diff --git a/numpy/core/src/common/simd/vsx/math.h b/numpy/core/src/common/simd/vsx/math.h index 11bacd703f91..7c5301032af0 100644 --- a/numpy/core/src/common/simd/vsx/math.h +++ b/numpy/core/src/common/simd/vsx/math.h @@ -42,4 +42,13 @@ NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) #define npyv_maxp_f32 vec_max #define npyv_maxp_f64 vec_max +// Minimum, natively mapping with no guarantees to handle NaN. +#define npyv_min_f32 vec_min +#define npyv_min_f64 vec_min +// Minimum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +#define npyv_minp_f32 vec_min +#define npyv_minp_f64 vec_min + #endif // _NPY_SIMD_VSX_MATH_H diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index fb7dd88cf432..6c15759718e2 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -317,11 +317,6 @@ def test_square(self): assert square == data_square def test_max(self): - """ - Test intrinics: - npyv_max_##SFX - npyv_maxp_##SFX - """ data_a = self._data() data_b = self._data(self.nlanes) vdata_a, vdata_b = self.load(data_a), self.load(data_b) @@ -346,6 +341,31 @@ def test_max(self): _max = self.max(vdata_a, vdata_b) assert _max == data_max + def test_min(self): + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + _min = self.min(vdata_a, vdata_b) + assert _min == data_min + minp = self.minp(vdata_a, vdata_b) + assert minp == data_min + # test IEEE standards + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + min_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10), + (pinf, pinf, pinf), (pinf, 10, 10), (10, pinf, 10), + (ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf)) + for case_operand1, case_operand2, desired in min_cases: + data_min = [desired]*self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + minp = self.minp(vdata_a, vdata_b) + assert minp == pytest.approx(data_min, nan_ok=True) + if nan in (case_operand1, case_operand2, desired): + continue + _min = self.min(vdata_a, vdata_b) + assert _min == data_min + def test_reciprocal(self): pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() data = self._data() From 15420c82f7ef2f91bc8cd18dd6eba25204873c16 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 2 May 2021 21:10:25 +0200 Subject: [PATCH 1019/1270] TST: Ignore lint-checking in the `numpy/typing/tests/data` directory With the current tests system we cannot reasonably enforce E501 (maximum line length) --- tools/lint_diff.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index ba091468eda7..7944ce22484e 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -2,4 +2,4 @@ max_line_length = 79 statistics = True ignore = E121,E122,E123,E125,E126,E127,E128,E226,E251,E265,E266,E302,E402,E712,E721,E731,E741,W291,W293,W391,W503,W504 -exclude = numpy/__config__.py +exclude = numpy/__config__.py,numpy/typing/tests/data From 6fa34d432722faaaf54f27fb9cb3b1b9e197e9fe Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 2 May 2021 21:13:43 +0200 Subject: [PATCH 1020/1270] TST: Ignore the E704 pycodestyle error code E704 (multiple statements on one line (def)) is a style rule not prescribed by PEP8. Furthermore, because it demands a function body it is needlessly inconvenient for static type checking, i.e. situation where this is no function body. --- tools/lint_diff.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index 7944ce22484e..710138b4a705 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -1,5 +1,5 @@ [pycodestyle] max_line_length = 79 statistics = True -ignore = E121,E122,E123,E125,E126,E127,E128,E226,E251,E265,E266,E302,E402,E712,E721,E731,E741,W291,W293,W391,W503,W504 +ignore = E121,E122,E123,E125,E126,E127,E128,E226,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 exclude = numpy/__config__.py,numpy/typing/tests/data From aaf5f696de13dea81a032bcda1ac879815cb2ac9 Mon Sep 17 00:00:00 2001 From: HowJmay Date: Mon, 3 May 2021 14:45:50 +0800 Subject: [PATCH 1021/1270] DOC: Add missing test comment and fix the typo --- numpy/core/tests/test_simd.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 6c15759718e2..c48f20b7d5e5 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -215,7 +215,7 @@ class _SIMD_FP32(_Test_Utility): def test_conversions(self): """ Round to nearest even integer, assume CPU control register is set to rounding. - Test intrinics: + Test intrinsics: npyv_round_s32_##SFX """ features = self._cpu_features() @@ -238,7 +238,7 @@ class _SIMD_FP64(_Test_Utility): def test_conversions(self): """ Round to nearest even integer, assume CPU control register is set to rounding. - Test intrinics: + Test intrinsics: npyv_round_s32_##SFX """ vdata_a = self.load(self._data()) @@ -317,6 +317,11 @@ def test_square(self): assert square == data_square def test_max(self): + """ + Test intrinsics: + npyv_max_##SFX + npyv_maxp_##SFX + """ data_a = self._data() data_b = self._data(self.nlanes) vdata_a, vdata_b = self.load(data_a), self.load(data_b) @@ -342,6 +347,11 @@ def test_max(self): assert _max == data_max def test_min(self): + """ + Test intrinsics: + npyv_min_##SFX + npyv_minp_##SFX + """ data_a = self._data() data_b = self._data(self.nlanes) vdata_a, vdata_b = self.load(data_a), self.load(data_b) @@ -383,7 +393,7 @@ def test_reciprocal(self): def test_special_cases(self): """ - Compare Not NaN. Test intrinics: + Compare Not NaN. Test intrinsics: npyv_notnan_##SFX """ nnan = self.notnan(self.setall(self._nan())) @@ -722,7 +732,7 @@ def test_conversion_boolean(self): def test_conversion_expand(self): """ - Test expand intrinics: + Test expand intrinsics: npyv_expand_u16_u8 npyv_expand_u32_u16 """ @@ -785,7 +795,7 @@ def test_arithmetic_div(self): def test_arithmetic_intdiv(self): """ - Test integer division intrinics: + Test integer division intrinsics: npyv_divisor_##sfx npyv_divc_##sfx """ @@ -855,7 +865,7 @@ def trunc_div(a, d): def test_arithmetic_reduce_sum(self): """ - Test reduce sum intrinics: + Test reduce sum intrinsics: npyv_sum_##sfx """ if self.sfx not in ("u32", "u64", "f32", "f64"): @@ -870,7 +880,7 @@ def test_arithmetic_reduce_sum(self): def test_arithmetic_reduce_sumup(self): """ - Test extend reduce sum intrinics: + Test extend reduce sum intrinsics: npyv_sumup_##sfx """ if self.sfx not in ("u8", "u16"): @@ -886,7 +896,7 @@ def test_arithmetic_reduce_sumup(self): def test_mask_conditional(self): """ Conditional addition and subtraction for all supported data types. - Test intrinics: + Test intrinsics: npyv_ifadd_##SFX, npyv_ifsub_##SFX """ vdata_a = self.load(self._data()) From 01714447d72ef48c1ea33fd964f441c3112b0c83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 May 2021 08:39:29 +0000 Subject: [PATCH 1022/1270] MAINT: Bump typing-extensions from 3.7.4.3 to 3.10.0.0 Bumps [typing-extensions](https://github.com/python/typing) from 3.7.4.3 to 3.10.0.0. - [Release notes](https://github.com/python/typing/releases) - [Commits](https://github.com/python/typing/compare/3.7.4.3...3.10.0.0) Signed-off-by: dependabot[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 2cb57d841792..4421c20b58ae 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -12,4 +12,4 @@ cffi # - Mypy relies on C API features not present in PyPy # - There is no point in installing typing_extensions without mypy mypy==0.812; platform_python_implementation != "PyPy" -typing_extensions==3.7.4.3; platform_python_implementation != "PyPy" +typing_extensions==3.10.0.0; platform_python_implementation != "PyPy" From 839ee68ab02c334f2ea2e2f05ca0142ff85cb254 Mon Sep 17 00:00:00 2001 From: Chiara Marmo Date: Mon, 3 May 2021 15:10:25 +0200 Subject: [PATCH 1023/1270] Add a set of standard replies. --- doc/source/dev/reviewer_guidelines.rst | 78 +++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/doc/source/dev/reviewer_guidelines.rst b/doc/source/dev/reviewer_guidelines.rst index 1d93bc1361f5..bdc69fe452ae 100644 --- a/doc/source/dev/reviewer_guidelines.rst +++ b/doc/source/dev/reviewer_guidelines.rst @@ -37,9 +37,8 @@ Communication Guidelines style or grammar, consider merging the current PR when all important concerns are addressed. Then, either push a commit directly (if you are a maintainer) or open a follow-up PR yourself. -- If you need help writing replies in reviews, check out some `Standard replies - for reviewing - `_. +- If you need help writing replies in reviews, check out some + :ref:`standard replies for reviewing`. Reviewer Checklist ================== @@ -116,4 +115,77 @@ the PR page. Assuming you have your :ref:`development environment` set up, you can now build the code and test it. +.. _saved-replies: + +Standard replies for reviewing +============================== + +It may be helpful to store some of these in GitHub's `saved +replies `_ for reviewing: + +**Usage question** + :: + + You are asking a usage question. The issue tracker is for bugs and new + features. I'm going to close this issue, feel free to ask for help via + our [help channels](https://numpy.org/gethelp/). + +**You’re welcome to update the docs** + :: + + Please feel free to offer a pull request updating the documentation if + you feel it could be improved. + +**Self-contained example for bug** + :: + + Please provide a [self-contained example code] + (https://stackoverflow.com/help/mcve), including imports and data + (if possible), so that other contributors can just run it and reproduce + your issue. Ideally your example code should be minimal. + +**Software versions** + :: + + To help diagnose your issue, please paste the output of: + ```py + python -c 'import numpy; print(numpy.version.version)' + ``` + Thanks. + +**Code blocks** + :: + + Readability can be greatly improved if you + [format](https://help.github.com/articles/creating-and-highlighting-code-blocks/) + your code snippets and complete error messages appropriately. + You can edit your issue descriptions and comments at any time to + improve readability. This helps maintainers a lot. Thanks! + +**Linking to code** + :: + + For clarity's sake, you can link to code like + [this](https://help.github.com/articles/creating-a-permanent-link-to-a-code-snippet/). + +**Better description and title** + :: + + Please make the title of the PR more descriptive. The title will become + the commit message when this is merged. You should state what issue + (or PR) it fixes/resolves in the description using the syntax described + [here](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword). + +**Regression test needed** + :: + + Please add a [non-regression test](https://en.wikipedia.org/wiki/Non-regression_testing) + that would fail at main but pass in this PR. + +**Don’t change unrelated** + :: + + Please do not change unrelated lines. It makes your contribution harder + to review and may introduce merge conflicts to other pull requests. + .. include:: gitwash/git_links.inc From 2416ff43c4feb199890c13046f5f3e666a29f7e5 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 3 May 2021 18:59:23 +0300 Subject: [PATCH 1024/1270] NEP: propose new nep for allocator policies (gh-18805) NEP related to the PR gh-17582 * NEP: propose new nep for allocator policies * NEP: fixes from review * NEP: adopt suggestions from review * changes from review * fixes from review * fixes from review --- doc/neps/nep-0049.rst | 334 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 334 insertions(+) create mode 100644 doc/neps/nep-0049.rst diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049.rst new file mode 100644 index 000000000000..9adf0ff26290 --- /dev/null +++ b/doc/neps/nep-0049.rst @@ -0,0 +1,334 @@ +=================================== +NEP 49 — Data allocation strategies +=================================== + +:Author: Matti Picus +:Status: Draft +:Type: Standards Track +:Created: 2021-04-18 + + +Abstract +-------- + +The ``numpy.ndarray`` requires additional memory allocations +to hold ``numpy.ndarray.strides``, ``numpy.ndarray.shape`` and +``numpy.ndarray.data`` attributes. These attributes are specially allocated +after creating the python object in ``__new__`` method. + +This NEP proposes a mechanism to override the memory management strategy used +for ``ndarray->data`` with user-provided alternatives. This allocation holds +the data and can be very large. As accessing this data often becomes +a performance bottleneck, custom allocation strategies to guarantee data +alignment or pinning allocations to specialized memory hardware can enable +hardware-specific optimizations. The other allocations remain unchanged. + +Motivation and Scope +-------------------- + +Users may wish to override the internal data memory routines with ones of their +own. Two such use-cases are to ensure data alignment and to pin certain +allocations to certain NUMA cores. This desire for alignment was discussed +multiple times on the mailing list `in 2005`_, and in `issue 5312`_ in 2014, +which led to `PR 5457`_ and more mailing list discussions here_ `and here`_. In +a comment on the issue `from 2017`_, a user described how 64-byte alignment +improved performance by 40x. + +Also related is `issue 14177`_ around the use of ``madvise`` and huge pages on +Linux. + +Various tracing and profiling libraries like filprofiler_ or `electric fence`_ +override ``malloc``. + +The long CPython discussion of `BPO 18835`_ began with discussing the need for +``PyMem_Alloc32`` and ``PyMem_Alloc64``. The early conclusion was that the +cost (of wasted padding) vs. the benifit of aligned memory is best left to the +user, but then evolves into a discussion of various proposals to deal with +memory allocations, including `PEP 445`_ `memory interfaces`_ to +``PyTraceMalloc_Track`` which apparently was explictly added for NumPy. + +Allowing users to implement different strategies via the NumPy C-API will +enable exploration of this rich area of possible optimizations. The intention +is to create a flexible enough interface without burdening normative users. + +.. _`issue 5312`: https://github.com/numpy/numpy/issues/5312 +.. _`from 2017`: https://github.com/numpy/numpy/issues/5312#issuecomment-315234656 +.. _`in 2005`: https://numpy-discussion.scipy.narkive.com/MvmMkJcK/numpy-arrays-data-allocation-and-simd-alignement +.. _`here`: http://numpy-discussion.10968.n7.nabble.com/Aligned-configurable-memory-allocation-td39712.html +.. _`and here`: http://numpy-discussion.10968.n7.nabble.com/Numpy-s-policy-for-releasing-memory-td1533.html +.. _`issue 14177`: https://github.com/numpy/numpy/issues/14177 +.. _`filprofiler`: https://github.com/pythonspeed/filprofiler/blob/master/design/allocator-overrides.md +.. _`electric fence`: https://github.com/boundarydevices/efence +.. _`memory interfaces`: https://docs.python.org/3/c-api/memory.html#customize-memory-allocators +.. _`BPO 18835`: https://bugs.python.org/issue18835 +.. _`PEP 445`: https://www.python.org/dev/peps/pep-0445/ + +Usage and Impact +---------------- + +The new functions can only be accessed via the NumPy C-API. An example is +included later in this NEP. The added ``struct`` will increase the size of the +``ndarray`` object. It is a necessary price to pay for this approach. We +can be reasonably sure that the change in size will have a minimal impact on +end-user code because NumPy version 1.20 already changed the object size. + +The implementation preserves the use of ``PyTraceMalloc_Track`` to track +allocations already present in NumPy. + +Backward compatibility +---------------------- + +The design will not break backward compatibility. Projects that were assigning +to the ``ndarray->data`` pointer were already breaking the current memory +management strategy and should restore +``ndarray->data`` before calling ``Py_DECREF``. As mentioned above, the change +in size should not impact end-users. + +Detailed description +-------------------- + +High level design +================= + +Users who wish to change the NumPy data memory management routines will use +:c:func:`PyDataMem_SetHandler`, which uses a :c:type:`PyDataMem_Handler` +structure to hold pointers to functions used to manage the data memory. + +Since a call to ``PyDataMem_SetHandler`` will change the default functions, but +that function may be called during the lifetime of an ``ndarray`` object, each +``ndarray`` will carry with it the ``PyDataMem_Handler`` struct used at the +time of its instantiation, and these will be used to reallocate or free the +data memory of the instance. Internally NumPy may use ``memcpy`` or ``memset`` +on the pointer to the data memory. + +NumPy C-API functions +===================== + +.. c:type:: PyDataMem_Handler + + A struct to hold function pointers used to manipulate memory + + .. code-block:: c + + typedef struct { + char name[128]; /* multiple of 64 to keep the struct aligned */ + PyDataMem_AllocFunc *alloc; + PyDataMem_ZeroedAllocFunc *zeroed_alloc; + PyDataMem_FreeFunc *free; + PyDataMem_ReallocFunc *realloc; + } PyDataMem_Handler; + + where the function's signatures are + + .. code-block:: c + + typedef void *(PyDataMem_AllocFunc)(size_t size); + typedef void *(PyDataMem_ZeroedAllocFunc)(size_t nelems, size_t elsize); + typedef void (PyDataMem_FreeFunc)(void *ptr, size_t size); + typedef void *(PyDataMem_ReallocFunc)(void *ptr, size_t size); + +.. c:function:: const PyDataMem_Handler * PyDataMem_SetHandler(PyDataMem_Handler *handler) + + Sets a new allocation policy. If the input value is ``NULL``, will reset + the policy to the default. Returns the previous policy, ``NULL`` if the + previous policy was the default. We wrap the user-provided functions + so they will still call the Python and NumPy memory management callback + hooks. All the function pointers must be filled in, ``NULL`` is not + accepted. + +.. c:function:: const char * PyDataMem_GetHandlerName(PyArrayObject *obj) + + Return the const char name of the ``PyDataMem_Handler`` used by the + ``PyArrayObject``. If ``NULL``, return the name of the current global policy + that will be used to allocate data for the next ``PyArrayObject``. + + +Sample code +=========== + +This code adds a 64-byte header to each ``data`` pointer and stores information +about the allocation in the header. Before calling ``free``, a check ensures +the ``sz`` argument is correct. + +.. code-block:: c + + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + NPY_NO_EXPORT void * + + shift_alloc(size_t sz) { + char *real = (char *)malloc(sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld", (unsigned long)sz); + return (void *)(real + 64); + } + + NPY_NO_EXPORT void * + shift_zero(size_t sz, size_t cnt) { + char *real = (char *)calloc(sz + 64, cnt); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld via zero", + (unsigned long)sz); + return (void *)(real + 64); + } + + NPY_NO_EXPORT void + shift_free(void * p, npy_uintp sz) { + if (p == NULL) { + return ; + } + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_free, " + "no appropriate prefix\\n"); + /* Make gcc crash by calling free on the wrong address */ + free((char *)p + 10); + /* free(real); */ + } + else { + int i = atoi(real +20); + if (i != sz) { + fprintf(stderr, "uh-oh, unmatched " + "shift_free(ptr, %d) but allocated %d\\n", sz, i); + /* Make gcc crash by calling free on the wrong address */ + /* free((char *)p + 10); */ + free(real); + } + else { + free(real); + } + } + } + + NPY_NO_EXPORT void * + shift_realloc(void * p, npy_uintp sz) { + if (p != NULL) { + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_realloc\\n"); + return realloc(p, sz); + } + return (void *)((char *)realloc(real, sz + 64) + 64); + } + else { + char *real = (char *)realloc(p, sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated " + "%ld via realloc", (unsigned long)sz); + return (void *)(real + 64); + } + } + + static PyDataMem_Handler new_handler = { + "secret_data_allocator", + shift_alloc, /* alloc */ + shift_zero, /* zeroed_alloc */ + shift_free, /* free */ + shift_realloc /* realloc */ + }; + + static PyObject* mem_policy_test_prefix(PyObject *self, PyObject *args) + { + + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_ValueError, + "must be called with a numpy scalar or ndarray"); + } + return PyUnicode_FromString( + PyDataMem_GetHandlerName((PyArrayObject*)args)); + }; + + static PyObject* mem_policy_set_new_policy(PyObject *self, PyObject *args) + { + + const PyDataMem_Handler *old = PyDataMem_SetHandler(&new_handler); + return PyUnicode_FromString(old->name); + + }; + + static PyObject* mem_policy_set_old_policy(PyObject *self, PyObject *args) + { + + const PyDataMem_Handler *old = PyDataMem_SetHandler(NULL); + return PyUnicode_FromString(old->name); + + }; + + static PyMethodDef methods[] = { + {"test_prefix", (PyCFunction)mem_policy_test_prefix, METH_O}, + {"set_new_policy", (PyCFunction)mem_policy_set_new_policy, METH_NOARGS}, + {"set_old_policy", (PyCFunction)mem_policy_set_old_policy, METH_NOARGS}, + { NULL } + }; + + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "mem_policy", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + }; + + PyMODINIT_FUNC + PyInit_mem_policy(void) { + PyObject *mod = PyModule_Create(&moduledef); + import_array(); + return mod; + } + + +Related Work +------------ + +This NEP is being tracked by the pnumpy_ project and a `comment in the PR`_ +mentions use in orchestrating FPGA DMAs. + +Implementation +-------------- + +This NEP has been implemented in `PR 17582`_. + +Alternatives +------------ + +These were discussed in `issue 17467`_. `PR 5457`_ and `PR 5470`_ proposed a +global interface for specifying aligned allocations. + +``PyArray_malloc_aligned`` and friends were added to NumPy with the +`numpy.random` module API refactor. and are used there for performance. + +`PR 390`_ had two parts: expose ``PyDataMem_*`` via the NumPy C-API, and a hook +mechanism. The PR was merged with no example code for using these features. + +Discussion +---------- + +Not yet discussed on the mailing list. + + +References and Footnotes +------------------------ + +.. [1] Each NEP must either be explicitly labeled as placed in the public domain (see + this NEP as an example) or licensed under the `Open Publication License`_. + +.. _Open Publication License: https://www.opencontent.org/openpub/ + +.. _`PR 17582`: https://github.com/numpy/numpy/pull/17582 +.. _`PR 5457`: https://github.com/numpy/numpy/pull/5457 +.. _`PR 5470`: https://github.com/numpy/numpy/pull/5470 +.. _`PR 390`: https://github.com/numpy/numpy/pull/390 +.. _`issue 17467`: https://github.com/numpy/numpy/issues/17467 +.. _`comment in the PR`: https://github.com/numpy/numpy/pull/17582#issuecomment-809145547 +.. _pnumpy: https://quansight.github.io/pnumpy/stable/index.html + +Copyright +--------- + +This document has been placed in the public domain. [1]_ From 8d4828f8da96187a9263fd1573ee74ced6a3eedb Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 3 May 2021 13:07:39 -0500 Subject: [PATCH 1025/1270] Undo resolver changes and instead fall back to "old logic" If only output dtypes are provided, we add the additional step in the ufunc type resolver of converting a signature of the format:: (None,) * nin + (DType,) * nout into:: (DType,) * (nin + nout) And try searching for a matching loop again. The advantage of this is that IF the ufunc has mixed-type inputs, we have a fighting chance of finding a good loop for `dtype=DType`. Further, future `promoters` could refine such logic, while at the same time, we do not need to use completely different code paths for `signature=...` and `dtype=...`. The rare downside is that we might pick a `dd->d` loop when a `dl->d` loop would do (SciPy has such ufuncs). The up-side is, that before, using `dtype=...` was not even possible for functions with non-homogenous loops like `np.ldexp`. The future will hopefully see more precise promotion logic, and some of the more surprising paths simply deprecated... --- numpy/core/code_generators/generate_umath.py | 2 +- .../core/code_generators/ufunc_docstrings.py | 4 +- numpy/core/src/umath/ufunc_type_resolution.c | 205 +++++++++++------- numpy/core/tests/test_scalarmath.py | 3 +- numpy/core/tests/test_ufunc.py | 4 +- 5 files changed, 135 insertions(+), 83 deletions(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index ea74c107f83f..6b6a0fe64ad5 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -902,7 +902,7 @@ def english_upper(s): 'nextafter': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.nextafter'), - "PyUFunc_SimpleUniformOperationTypeResolver", + None, TD(flts), ), 'spacing': diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index e33d71ab3ccb..f19946be408a 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -3628,9 +3628,9 @@ def add_newdoc(place, name, doc): Examples -------- >>> eps = np.finfo(np.float64).eps - >>> np.nextafter(1., 2.) == eps + 1 + >>> np.nextafter(1, 2) == eps + 1 True - >>> np.nextafter([1., 2.], [2., 1.]) == [eps + 1, 2 - eps] + >>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps] array([ True, True]) """) diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index a747c0e02ddf..ac92b62df869 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -1201,20 +1201,8 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, /* Use the default when datetime and timedelta are not involved */ if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) { - int res = PyUFunc_SimpleUniformOperationTypeResolver(ufunc, - casting, operands, type_tup, out_dtypes); - if (res < 0 || out_dtypes[0]->type_num != NPY_BOOL) { - return res; - } - /* - * Hardcode that boolean division is handled by casting to int8, - * we could consider deprecating this (this is safe so no need to - * "validate casting" again. - */ - Py_SETREF(out_dtypes[0], PyArray_DescrFromType(NPY_BYTE)); - Py_SETREF(out_dtypes[1], PyArray_DescrFromType(NPY_BYTE)); - Py_SETREF(out_dtypes[2], PyArray_DescrFromType(NPY_BYTE)); - return res; + return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, + type_tup, out_dtypes); } if (type_num1 == NPY_TIMEDELTA) { @@ -2101,6 +2089,91 @@ linear_search_type_resolver(PyUFuncObject *self, return -1; } + +static int +type_tuple_type_resolver_core(PyUFuncObject *self, + PyArrayObject **op, + NPY_CASTING input_casting, NPY_CASTING casting, + int specified_types[], + int any_object, + int no_castable_output, int use_min_scalar, + PyArray_Descr **out_dtype) +{ + int i, j; + int nop = self->nargs; + int types[NPY_MAXARGS]; + + /* For making a better error message on coercion error */ + char err_dst_typecode = '-', err_src_typecode = '-'; + + /* If the ufunc has userloops, search for them. */ + if (self->userloops) { + switch (type_tuple_userloop_type_resolver(self, + nop, specified_types, + op, input_casting, casting, + any_object, use_min_scalar, + out_dtype)) { + /* Error */ + case -1: + return -1; + /* Found matching loop */ + case 1: + return 0; + } + } + + for (i = 0; i < self->ntypes; ++i) { + char *orig_types = self->types + i*self->nargs; + + /* Check specified types and copy into an int array for matching */ + for (j = 0; j < nop; ++j) { + if (specified_types[j] == NPY_NOTYPE) { + types[j] = orig_types[j]; + continue; + } + if (orig_types[j] != specified_types[j]) { + break; + } + /* indicate that we do not have to check this type anymore. */ + types[j] = NPY_NOTYPE; + } + if (j < nop) { + /* no match */ + continue; + } + + switch (ufunc_loop_matches(self, op, + input_casting, casting, + any_object, use_min_scalar, + types, NULL, + &no_castable_output, &err_src_typecode, + &err_dst_typecode)) { + case -1: + /* Error */ + return -1; + case 0: + /* Cannot cast inputs */ + continue; + case 1: + /* Success, fill also the NPY_NOTYPE (cast from char to int) */ + for (j = 0; j < nop; j++) { + types[j] = orig_types[j]; + } + set_ufunc_loop_data_types(self, op, out_dtype, types, NULL); + /* In principle, we only need to validate the NPY_NOTYPE ones */ + if (PyUFunc_ValidateCasting(self, casting, op, out_dtype) < 0) { + for (j = 0; j < self->nargs; j++) { + Py_DECREF(out_dtype[j]); + out_dtype[j] = NULL; + } + return -1; + } + return 0; + } + } + return -2; +} + /* * Does a linear search for the inner loop of the ufunc specified by type_tup. * @@ -2116,14 +2189,11 @@ type_tuple_type_resolver(PyUFuncObject *self, int any_object, PyArray_Descr **out_dtype) { - int i, j, nin = self->nin, nop = nin + self->nout; - int specified_types[NPY_MAXARGS], types[NPY_MAXARGS]; + int nin = self->nin, nop = nin + self->nout; + int specified_types[NPY_MAXARGS]; const char *ufunc_name; int no_castable_output = 0, use_min_scalar; - /* For making a better error message on coercion error */ - char err_dst_typecode = '-', err_src_typecode = '-'; - ufunc_name = ufunc_get_name_cstr(self); use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); @@ -2145,7 +2215,7 @@ type_tuple_type_resolver(PyUFuncObject *self, PyErr_SetString(PyExc_RuntimeError, bad_type_tup_msg); return -1; } - for (i = 0; i < nop; ++i) { + for (int i = 0; i < nop; ++i) { PyObject *item = PyTuple_GET_ITEM(type_tup, i); if (item == Py_None) { specified_types[i] = NPY_NOTYPE; @@ -2164,70 +2234,51 @@ type_tuple_type_resolver(PyUFuncObject *self, return -1; } - /* If the ufunc has userloops, search for them. */ - if (self->userloops) { - switch (type_tuple_userloop_type_resolver(self, - nop, specified_types, - op, input_casting, casting, - any_object, use_min_scalar, - out_dtype)) { - /* Error */ - case -1: - return -1; - /* Found matching loop */ - case 1: - return 0; - } - } + int res = type_tuple_type_resolver_core(self, + op, input_casting, casting, specified_types, any_object, + no_castable_output, use_min_scalar, out_dtype); - for (i = 0; i < self->ntypes; ++i) { - char *orig_types = self->types + i*self->nargs; + if (res != -2) { + return res; + } - /* Check specified types and copy into an int array for matching */ - for (j = 0; j < nop; ++j) { - if (specified_types[j] == NPY_NOTYPE) { - types[j] = orig_types[j]; - continue; - } - if (orig_types[j] != specified_types[j]) { + /* + * When the user passed `dtype=dtype`, it gets translated to + * `signature=(None,)*nin + (dtype,)*nout`. If the signature matches that + * exactly (could be relaxed but that is not necessary for backcompat), + * we also try `signature=(dtype,)*(nin+nout)`. + * This used to be the main meaning for `dtype=dtype`, but some calls broke + * the expectation, and changing it, allows for `dtype=dtype` to be useful + * for ufuncs like `np.ldexp` in the future while also normalizing it to + * a `signature` early on. + */ + int homogeneous_type = NPY_NOTYPE; + if (self->nout > 0) { + homogeneous_type = specified_types[nin]; + for (int i = nin+1; i < nop; i++) { + if (specified_types[i] != homogeneous_type) { + homogeneous_type = NPY_NOTYPE; break; } - /* indicate that we do not have to check this type anymore. */ - types[j] = NPY_NOTYPE; } - if (j < nop) { - /* no match */ - continue; + } + if (homogeneous_type != NPY_NOTYPE) { + for (int i = 0; i < nin; i++) { + if (specified_types[i] != NPY_NOTYPE) { + homogeneous_type = NPY_NOTYPE; + break; + } + specified_types[i] = homogeneous_type; } + } + if (homogeneous_type != NPY_NOTYPE) { + /* Try again with the homogeneous specified types. */ + res = type_tuple_type_resolver_core(self, + op, input_casting, casting, specified_types, any_object, + no_castable_output, use_min_scalar, out_dtype); - switch (ufunc_loop_matches(self, op, - input_casting, casting, - any_object, use_min_scalar, - types, NULL, - &no_castable_output, &err_src_typecode, - &err_dst_typecode)) { - case -1: - /* Error */ - return -1; - case 0: - /* Cannot cast inputs */ - continue; - case 1: - /* Success, fill also the NPY_NOTYPE (cast from char to int) */ - for (j = 0; j < nop; j++) { - types[j] = orig_types[j]; - } - set_ufunc_loop_data_types(self, op, out_dtype, types, NULL); - /* In principle, we only need to validate the NPY_NOTYPE ones */ - if (PyUFunc_ValidateCasting(self, casting, op, out_dtype) < 0) { - for (j = 0; j < self->nargs; j++) { - Py_DECREF(out_dtype[j]); - out_dtype[j] = NULL; - } - return -1; - } - - return 0; + if (res != -2) { + return res; } } diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 0718f365e7c8..d91b4a39146d 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -307,7 +307,8 @@ def test_inplace_floordiv_handling(self): # promotes to float which does not fit a = np.array([1, 2], np.int64) b = np.array([1, 2], np.uint64) - with pytest.raises(TypeError, match=r"Cannot cast ufunc"): + pattern = 'could not be coerced to provided output parameter' + with assert_raises_regex(TypeError, pattern): a //= b diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 47606876e5ee..c13865ce4c28 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -2201,8 +2201,8 @@ def test_ufunc_casterrors(): # was aborted (this is not necessarily defined behaviour) assert out[-1] == 1 - with pytest.raises(TypeError): - # Input "casting" failure (there is no intp out loop for object inputs) + with pytest.raises(ValueError): + # Input casting failure: np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe") assert count == sys.getrefcount(value) From 127b1a7cd9a139ad244f6ee62aeb1ba3cec3a4a1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 3 May 2021 16:00:58 -0500 Subject: [PATCH 1026/1270] DOC,API: Fix the documentation. Also, for now enforce *all* outputs. That is more conservative, and obviously won't work for certain functions in scipy that have multiple outputs of different types, but it seems a bit safer to me right now. Maybe at some point we have an idea of how to "modify" what the `dtype` means. I am not sure how! By allowing the ufunc itself to interpret how to translte it to a `signature`? By giving it a distinct meaning? Yes, this might narrow us down a bit... but... --- .../upcoming_changes/18880.compatibility.rst | 32 ++++++++++++++ doc/source/reference/ufuncs.rst | 44 ++++++++++++------- numpy/core/src/umath/ufunc_object.c | 9 +++- numpy/core/tests/test_ufunc.py | 28 ++++++++++++ 4 files changed, 95 insertions(+), 18 deletions(-) create mode 100644 doc/release/upcoming_changes/18880.compatibility.rst diff --git a/doc/release/upcoming_changes/18880.compatibility.rst b/doc/release/upcoming_changes/18880.compatibility.rst new file mode 100644 index 000000000000..4ea98e646dc9 --- /dev/null +++ b/doc/release/upcoming_changes/18880.compatibility.rst @@ -0,0 +1,32 @@ +Ufunc ``signature=...`` and ``dtype=`` generalization and ``casting`` +--------------------------------------------------------------------- +The behaviour for ``signature=...`` and ``dtype=...`` can differ in +some cases to the previous behaviour. +When ``signature`` was previously used, the casting check on inputs +was relaxed, which could lead to downcasting inputs unsafely especially +if combined with ``casting="unsafe"``. + +Casting is now guranteed to be safe. If a signature is only +partially provided, for example using ``signature=("float64", None, None)``, +this could lead to no loop being found (an error). +In that case, it is necessary to provide the complete signature +to enforce casting the inputs. +Since NumPy special cases if only outputs (or ``dtype``) is provided, +this should affect very few users. + +Further, the meaning of ``dtype="float64"`` has been slightly modified and +now strictly enforces only the correct output (and not input) DTypes. +This means it is now always equivalent to:: + + signature=(None, None, "float64") + +(If the ufunc has two inputs and one output). Since, this could lead +to no loop being found in some cases, NumPy will normally also search +for the loop:: + + signature("float64", "float64, "float64") + +if the first search failed. +In the future, this behaviour may be customized to achieve the expected +results for more complex ufuncs. (For some universal functions such as +``np.ldexp`` inputs can have different DTypes.) diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index c919ec9b8e65..000c7da89a2e 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -430,8 +430,10 @@ advanced usage and will not typically be used. .. versionadded:: 1.6 - Overrides the dtype of the calculation and output arrays. Similar to - *signature*. + Overrides the DType of the output arrays the same way as the *signature*. + This should ensure a matching precision of the calculation. The exact + calculation DTypes chosen may depend on the ufunc and the inputs may be + cast to this DType to perform the calculation. *subok* @@ -442,20 +444,30 @@ advanced usage and will not typically be used. *signature* - Either a data-type, a tuple of data-types, or a special signature - string indicating the input and output types of a ufunc. This argument - allows you to provide a specific signature for the 1-d loop to use - in the underlying calculation. If the loop specified does not exist - for the ufunc, then a TypeError is raised. Normally, a suitable loop is - found automatically by comparing the input types with what is - available and searching for a loop with data-types to which all inputs - can be cast safely. This keyword argument lets you bypass that - search and choose a particular loop. A list of available signatures is - provided by the **types** attribute of the ufunc object. For backwards - compatibility this argument can also be provided as *sig*, although - the long form is preferred. Note that this should not be confused with - the generalized ufunc :ref:`signature ` that is - stored in the **signature** attribute of the of the ufunc object. + Either a Dtype, a tuple of DTypes, or a special signature string + indicating the input and output types of a ufunc. + + This arguments allows to specify exact DTypes to be used for the + calculation. Casting will be used as necessary and the input DType + is not used unless ``signature`` is ``None`` for that input. + + When all DTypes are fixed, a specific loop is chosen or an error raised + if no matching loop does not exist. + If some DTypes are not specified and left ``None``, the behaviour may + depend on the ufunc. + At this time, a list of available signatures is provided by the **types** + attribute of the ufunc. (This list may be missing DTypes not defined + by NumPy.) + + The ``signature`` only specifies the DType class/type. For example, it + can specifiy that the operation should be ``datetime64`` or ``float64`` + operation. It does not specify the ``datetime64`` time-unit or the + ``float64`` byte-order. + + For backwards compatibility this argument can also be provided as *sig*, + although the long form is preferred. Note that this should not be + confused with the generalized ufunc :ref:`signature ` + that is stored in the **signature** attribute of the of the ufunc object. *extobj* diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index f17dd1e61c1e..9b550f6d0e32 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -4587,10 +4587,15 @@ _get_normalized_typetup(PyUFuncObject *ufunc, "Cannot provide `dtype` when a ufunc has no outputs"); return -1; } - signature[nin] = _get_dtype(dtype_obj); - if (signature[nin] == NULL) { + PyArray_DTypeMeta *dtype = _get_dtype(dtype_obj); + if (dtype == NULL) { return -1; } + for (int i = nin; i < nop; i++) { + Py_INCREF(dtype); + signature[i] = dtype; + } + Py_DECREF(dtype); res = _make_new_typetup(nop, signature, out_typetup); goto finish; } diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index c13865ce4c28..b44b10915ee3 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -457,6 +457,34 @@ def test_signature_dtype_type(self): float_dtype = type(np.dtype(np.float64)) np.add(3, 4, signature=(float_dtype, float_dtype, None)) + @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"]) + def test_partial_signature_mismatch(self, casting): + # If the second argument matches already, no need to specify it: + res = np.ldexp(np.float32(1.), np.int_(2), dtype="d") + assert res.dtype == "d" + res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d")) + assert res.dtype == "d" + + # ldexp only has a loop for long input as second argument, overriding + # the output cannot help with that (no matter the casting) + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), dtype="d") + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), signature=(None, None, "d")) + + def test_use_output_signature_for_all_arguments(self): + # Test that providing only `dtype=` or `signature=(None, None, dtype)` + # is sufficient if falling back to a homogeneous signature works. + # In this case, the `intp, intp -> intp` loop is chosen. + res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe") + assert res == 1 # the cast happens first. + res = np.power(1.5, 2.8, signature=(None, None, np.intp), + casting="unsafe") + assert res == 1 + with pytest.raises(TypeError): + # the unsafe casting would normally cause errors though: + np.power(1.5, 2.8, dtype=np.intp) + def test_signature_errors(self): with pytest.raises(TypeError, match="the signature object to ufunc must be a string or"): From 5ccd7e8d2be3a75ba1a0672b49e7e28886bad51d Mon Sep 17 00:00:00 2001 From: Chiara Marmo Date: Mon, 3 May 2021 23:56:48 +0200 Subject: [PATCH 1027/1270] Fix markdown formatting. Remove unuseful 'py'. --- doc/source/dev/reviewer_guidelines.rst | 40 ++++++++++---------------- 1 file changed, 15 insertions(+), 25 deletions(-) diff --git a/doc/source/dev/reviewer_guidelines.rst b/doc/source/dev/reviewer_guidelines.rst index bdc69fe452ae..f05014e986ec 100644 --- a/doc/source/dev/reviewer_guidelines.rst +++ b/doc/source/dev/reviewer_guidelines.rst @@ -126,29 +126,25 @@ replies `_ for reviewing: **Usage question** :: - You are asking a usage question. The issue tracker is for bugs and new - features. I'm going to close this issue, feel free to ask for help via - our [help channels](https://numpy.org/gethelp/). + You are asking a usage question. The issue tracker is for bugs and new features. + I'm going to close this issue, feel free to ask for help via our [help channels](https://numpy.org/gethelp/). **You’re welcome to update the docs** :: - Please feel free to offer a pull request updating the documentation if - you feel it could be improved. + Please feel free to offer a pull request updating the documentation if you feel it could be improved. **Self-contained example for bug** :: - Please provide a [self-contained example code] - (https://stackoverflow.com/help/mcve), including imports and data - (if possible), so that other contributors can just run it and reproduce - your issue. Ideally your example code should be minimal. + Please provide a [self-contained example code] (https://stackoverflow.com/help/mcve), including imports and data (if possible), so that other contributors can just run it and reproduce your issue. + Ideally your example code should be minimal. **Software versions** :: To help diagnose your issue, please paste the output of: - ```py + ``` python -c 'import numpy; print(numpy.version.version)' ``` Thanks. @@ -156,36 +152,30 @@ replies `_ for reviewing: **Code blocks** :: - Readability can be greatly improved if you - [format](https://help.github.com/articles/creating-and-highlighting-code-blocks/) - your code snippets and complete error messages appropriately. - You can edit your issue descriptions and comments at any time to - improve readability. This helps maintainers a lot. Thanks! + Readability can be greatly improved if you [format](https://help.github.com/articles/creating-and-highlighting-code-blocks/) your code snippets and complete error messages appropriately. + You can edit your issue descriptions and comments at any time to improve readability. + This helps maintainers a lot. Thanks! **Linking to code** :: - For clarity's sake, you can link to code like - [this](https://help.github.com/articles/creating-a-permanent-link-to-a-code-snippet/). + For clarity's sake, you can link to code like [this](https://help.github.com/articles/creating-a-permanent-link-to-a-code-snippet/). **Better description and title** :: - Please make the title of the PR more descriptive. The title will become - the commit message when this is merged. You should state what issue - (or PR) it fixes/resolves in the description using the syntax described - [here](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword). + Please make the title of the PR more descriptive. + The title will become the commit message when this is merged. + You should state what issue (or PR) it fixes/resolves in the description using the syntax described [here](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword). **Regression test needed** :: - Please add a [non-regression test](https://en.wikipedia.org/wiki/Non-regression_testing) - that would fail at main but pass in this PR. + Please add a [non-regression test](https://en.wikipedia.org/wiki/Non-regression_testing) that would fail at main but pass in this PR. **Don’t change unrelated** :: - Please do not change unrelated lines. It makes your contribution harder - to review and may introduce merge conflicts to other pull requests. + Please do not change unrelated lines. It makes your contribution harder to review and may introduce merge conflicts to other pull requests. .. include:: gitwash/git_links.inc From 8145b0549d6744d854d94b66006ac7aeb1ad0864 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 3 May 2021 13:44:18 -0600 Subject: [PATCH 1028/1270] MAINT: Add ".csv" to some data file names. It is useful to encode the data file type in the name. --- ...ion-set-README => umath-validation-set-README.txt} | 4 ++-- ...alidation-set-cos => umath-validation-set-cos.csv} | 0 ...alidation-set-exp => umath-validation-set-exp.csv} | 0 ...alidation-set-log => umath-validation-set-log.csv} | 0 ...alidation-set-sin => umath-validation-set-sin.csv} | 0 numpy/core/tests/test_umath_accuracy.py | 11 ++++++----- 6 files changed, 8 insertions(+), 7 deletions(-) rename numpy/core/tests/data/{umath-validation-set-README => umath-validation-set-README.txt} (82%) rename numpy/core/tests/data/{umath-validation-set-cos => umath-validation-set-cos.csv} (100%) rename numpy/core/tests/data/{umath-validation-set-exp => umath-validation-set-exp.csv} (100%) rename numpy/core/tests/data/{umath-validation-set-log => umath-validation-set-log.csv} (100%) rename numpy/core/tests/data/{umath-validation-set-sin => umath-validation-set-sin.csv} (100%) diff --git a/numpy/core/tests/data/umath-validation-set-README b/numpy/core/tests/data/umath-validation-set-README.txt similarity index 82% rename from numpy/core/tests/data/umath-validation-set-README rename to numpy/core/tests/data/umath-validation-set-README.txt index 6561ca3b5191..cfc9e4145d10 100644 --- a/numpy/core/tests/data/umath-validation-set-README +++ b/numpy/core/tests/data/umath-validation-set-README.txt @@ -1,5 +1,5 @@ Steps to validate transcendental functions: -1) Add a file 'umath-validation-set-', where ufuncname is name of +1) Add a file 'umath-validation-set-.txt', where ufuncname is name of the function in NumPy you want to validate 2) The file should contain 4 columns: dtype,input,expected output,ulperror a. dtype: one of np.float16, np.float32, np.float64 @@ -11,5 +11,5 @@ Steps to validate transcendental functions: d. ulperror: expected maximum ulp error of the function. This should be same across all rows of the same dtype. Otherwise, the function is tested for the maximum ulp error among all entries of that dtype. -3) Add file umath-validation-set- to the test file test_umath_accuracy.py +3) Add file umath-validation-set-.txt to the test file test_umath_accuracy.py which will then validate your ufunc. diff --git a/numpy/core/tests/data/umath-validation-set-cos b/numpy/core/tests/data/umath-validation-set-cos.csv similarity index 100% rename from numpy/core/tests/data/umath-validation-set-cos rename to numpy/core/tests/data/umath-validation-set-cos.csv diff --git a/numpy/core/tests/data/umath-validation-set-exp b/numpy/core/tests/data/umath-validation-set-exp.csv similarity index 100% rename from numpy/core/tests/data/umath-validation-set-exp rename to numpy/core/tests/data/umath-validation-set-exp.csv diff --git a/numpy/core/tests/data/umath-validation-set-log b/numpy/core/tests/data/umath-validation-set-log.csv similarity index 100% rename from numpy/core/tests/data/umath-validation-set-log rename to numpy/core/tests/data/umath-validation-set-log.csv diff --git a/numpy/core/tests/data/umath-validation-set-sin b/numpy/core/tests/data/umath-validation-set-sin.csv similarity index 100% rename from numpy/core/tests/data/umath-validation-set-sin rename to numpy/core/tests/data/umath-validation-set-sin.csv diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py index 33080edbb4c3..8e04d2875bad 100644 --- a/numpy/core/tests/test_umath_accuracy.py +++ b/numpy/core/tests/test_umath_accuracy.py @@ -28,10 +28,10 @@ def convert(s, datatype="np.float32"): return fp.contents.value # dereference the pointer, get the float str_to_float = np.vectorize(convert) -files = ['umath-validation-set-exp', - 'umath-validation-set-log', - 'umath-validation-set-sin', - 'umath-validation-set-cos'] +files = ['umath-validation-set-exp.csv', + 'umath-validation-set-log.csv', + 'umath-validation-set-sin.csv', + 'umath-validation-set-cos.csv'] class TestAccuracy: @platform_skip @@ -47,7 +47,8 @@ def test_validate_transcendentals(self): names=('type','input','output','ulperr'), delimiter=',', skip_header=1) - npfunc = getattr(np, filename.split('-')[3]) + npname = path.splitext(filename)[0].split('-')[3] + npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) From 62e1c86bbb6ae322aaed3942170d11c13df36e18 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Mon, 3 May 2021 16:35:47 -0700 Subject: [PATCH 1029/1270] Grammatical fixups for release note. --- doc/release/upcoming_changes/18880.compatibility.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/release/upcoming_changes/18880.compatibility.rst b/doc/release/upcoming_changes/18880.compatibility.rst index 4ea98e646dc9..3efac000200d 100644 --- a/doc/release/upcoming_changes/18880.compatibility.rst +++ b/doc/release/upcoming_changes/18880.compatibility.rst @@ -6,12 +6,12 @@ When ``signature`` was previously used, the casting check on inputs was relaxed, which could lead to downcasting inputs unsafely especially if combined with ``casting="unsafe"``. -Casting is now guranteed to be safe. If a signature is only +Casting is now guaranteed to be safe. If a signature is only partially provided, for example using ``signature=("float64", None, None)``, this could lead to no loop being found (an error). In that case, it is necessary to provide the complete signature to enforce casting the inputs. -Since NumPy special cases if only outputs (or ``dtype``) is provided, +Since NumPy special-cases if only outputs (or ``dtype``) is provided, this should affect very few users. Further, the meaning of ``dtype="float64"`` has been slightly modified and @@ -20,7 +20,7 @@ This means it is now always equivalent to:: signature=(None, None, "float64") -(If the ufunc has two inputs and one output). Since, this could lead +(If the ufunc has two inputs and one output). Since this could lead to no loop being found in some cases, NumPy will normally also search for the loop:: From 2e38b8fc0b1d11e188ed7acc2b2c244bc9f858be Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Mon, 3 May 2021 16:54:41 -0700 Subject: [PATCH 1030/1270] Grammatical fixups for ufuncs refguide. --- doc/source/reference/ufuncs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 000c7da89a2e..333e445d40af 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -447,12 +447,12 @@ advanced usage and will not typically be used. Either a Dtype, a tuple of DTypes, or a special signature string indicating the input and output types of a ufunc. - This arguments allows to specify exact DTypes to be used for the + This argument allows the user to specify exact DTypes to be used for the calculation. Casting will be used as necessary and the input DType is not used unless ``signature`` is ``None`` for that input. When all DTypes are fixed, a specific loop is chosen or an error raised - if no matching loop does not exist. + if no matching loop exists. If some DTypes are not specified and left ``None``, the behaviour may depend on the ufunc. At this time, a list of available signatures is provided by the **types** From d476630b975d2620cd758b35e47e3fec0c9da229 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Mon, 3 May 2021 16:59:20 -0700 Subject: [PATCH 1031/1270] Comment fixups --- numpy/core/src/umath/ufunc_type_resolution.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index ac92b62df869..8f974fca0907 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -2243,12 +2243,12 @@ type_tuple_type_resolver(PyUFuncObject *self, } /* - * When the user passed `dtype=dtype`, it gets translated to + * When the user passes `dtype=dtype`, it gets translated to * `signature=(None,)*nin + (dtype,)*nout`. If the signature matches that * exactly (could be relaxed but that is not necessary for backcompat), * we also try `signature=(dtype,)*(nin+nout)`. * This used to be the main meaning for `dtype=dtype`, but some calls broke - * the expectation, and changing it, allows for `dtype=dtype` to be useful + * the expectation, and changing it allows for `dtype=dtype` to be useful * for ufuncs like `np.ldexp` in the future while also normalizing it to * a `signature` early on. */ From ccfd68d88fa356d7f097e216fb2c16b14da4052d Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 4 May 2021 03:27:40 +0300 Subject: [PATCH 1032/1270] MAINT: ssize_t -> Py_ssize_t and other fixes for Python v3.10.0 (gh-18890) The 3.10 alpha 7 fails to build on windows, see below for the warnings and errors. It seems we should not be using ssize_t rather Py_ssize_t. On windows, ssize_t is not defined anymore in CPython3.10 alpha7 after python/cpython#24479 in response to issue 17717 from 2011, which was merged March 1. Fixes #18888 --- numpy/core/include/numpy/npy_3kcompat.h | 12 +++++++++++- numpy/core/src/common/npy_argparse.c | 2 +- numpy/core/src/multiarray/array_method.c | 4 ++-- numpy/core/src/multiarray/common.h | 2 +- numpy/core/src/multiarray/dtype_transfer.c | 17 ++++++++++------- numpy/core/src/multiarray/dtypemeta.c | 2 +- numpy/core/tests/test_multiarray.py | 18 ++++++++++++++++++ 7 files changed, 44 insertions(+), 13 deletions(-) diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index 191cd244f875..551ec6be8c2d 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -216,6 +216,7 @@ static NPY_INLINE FILE* npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) { int fd, fd2, unbuf; + Py_ssize_t fd2_tmp; PyObject *ret, *os, *io, *io_raw; npy_off_t pos; FILE *handle; @@ -251,8 +252,17 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) if (ret == NULL) { return NULL; } - fd2 = PyNumber_AsSsize_t(ret, NULL); + fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError); Py_DECREF(ret); + if (fd2_tmp == -1 && PyErr_Occurred()) { + return NULL; + } + if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) { + PyErr_SetString(PyExc_IOError, + "Getting an 'int' from os.dup() failed"); + return NULL; + } + fd2 = (int)fd2_tmp; /* Convert to FILE* handle */ #ifdef _WIN32 diff --git a/numpy/core/src/common/npy_argparse.c b/numpy/core/src/common/npy_argparse.c index 3df780990422..8460a38e6461 100644 --- a/numpy/core/src/common/npy_argparse.c +++ b/numpy/core/src/common/npy_argparse.c @@ -338,7 +338,7 @@ _npy_parse_arguments(const char *funcname, } } - ssize_t param_pos = ( + Py_ssize_t param_pos = ( (name - cache->kw_strings) + cache->npositional_only); /* There could be an identical positional argument */ diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c index fc315da24aff..2cc075141c1c 100644 --- a/numpy/core/src/multiarray/array_method.c +++ b/numpy/core/src/multiarray/array_method.c @@ -422,7 +422,7 @@ PyArrayMethod_FromSpec_int(PyArrayMethod_Spec *spec, int private) return NULL; } - ssize_t length = strlen(spec->name); + Py_ssize_t length = strlen(spec->name); res->method->name = PyMem_Malloc(length + 1); if (res->method->name == NULL) { Py_DECREF(res); @@ -625,7 +625,7 @@ boundarraymethod__simple_strided_call( PyArrayObject *arrays[NPY_MAXARGS]; PyArray_Descr *descrs[NPY_MAXARGS]; PyArray_Descr *out_descrs[NPY_MAXARGS]; - ssize_t length = -1; + Py_ssize_t length = -1; int aligned = 1; char *args[NPY_MAXARGS]; npy_intp strides[NPY_MAXARGS]; diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 2f2e7e25bea2..83209cd38b94 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -209,7 +209,7 @@ npy_is_aligned(const void * p, const npy_uintp alignment) } /* Get equivalent "uint" alignment given an itemsize, for use in copy code */ -static NPY_INLINE int +static NPY_INLINE npy_uintp npy_uint_alignment(int itemsize) { npy_uintp alignment = 0; /* return value of 0 means unaligned */ diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index e76532ebeb17..aa8cc84ffa6f 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -2281,7 +2281,9 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), { PyObject *key, *tup, *title; PyArray_Descr *src_fld_dtype, *dst_fld_dtype; - npy_int i, field_count, structsize; + npy_int i; + size_t structsize; + Py_ssize_t field_count; int src_offset, dst_offset; _field_transfer_data *data; @@ -2468,7 +2470,8 @@ get_decref_fields_transfer_function(int NPY_UNUSED(aligned), { PyObject *names, *key, *tup, *title; PyArray_Descr *src_fld_dtype; - npy_int i, field_count, structsize; + npy_int i, structsize; + Py_ssize_t field_count; int src_offset; names = src_dtype->names; @@ -2831,17 +2834,17 @@ static NpyAuxData * _multistep_cast_auxdata_clone_int(_multistep_castdata *castdata, int move_info) { /* Round up the structure size to 16-byte boundary for the buffers */ - ssize_t datasize = (sizeof(_multistep_castdata) + 15) & ~0xf; + Py_ssize_t datasize = (sizeof(_multistep_castdata) + 15) & ~0xf; - ssize_t from_buffer_offset = datasize; + Py_ssize_t from_buffer_offset = datasize; if (castdata->from.func != NULL) { - ssize_t src_itemsize = castdata->main.context.descriptors[0]->elsize; + Py_ssize_t src_itemsize = castdata->main.context.descriptors[0]->elsize; datasize += NPY_LOWLEVEL_BUFFER_BLOCKSIZE * src_itemsize; datasize = (datasize + 15) & ~0xf; } - ssize_t to_buffer_offset = datasize; + Py_ssize_t to_buffer_offset = datasize; if (castdata->to.func != NULL) { - ssize_t dst_itemsize = castdata->main.context.descriptors[1]->elsize; + Py_ssize_t dst_itemsize = castdata->main.context.descriptors[1]->elsize; datasize += NPY_LOWLEVEL_BUFFER_BLOCKSIZE * dst_itemsize; } diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index ddaf11042c62..26b16b15b1b5 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -539,7 +539,7 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr) if (dot) { scalar_name = dot + 1; } - ssize_t name_length = strlen(scalar_name) + 14; + Py_ssize_t name_length = strlen(scalar_name) + 14; char *tp_name = malloc(name_length); if (tp_name == NULL) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 073433bd1cad..2dea5518c40d 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -5047,6 +5047,24 @@ def test_fromfile_offset(self): np.fromfile, self.filename, dtype=self.dtype, sep=",", offset=1) + @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") + def test_fromfile_bad_dup(self): + def dup_str(fd): + return 'abc' + + def dup_bigint(fd): + return 2**68 + + old_dup = os.dup + try: + with open(self.filename, 'wb') as f: + self.x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + os.dup = dup + assert_raises(exc, np.fromfile, f) + finally: + os.dup = old_dup + def _check_from(self, s, value, **kw): if 'sep' not in kw: y = np.frombuffer(s, **kw) From ca49502905d7265ba4620bf5e2013abeabe61eec Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 3 May 2021 12:53:33 -0600 Subject: [PATCH 1033/1270] MAINT: Explicitly mark text files in .gitattributes. Marking files as text will ensure the line endings are normalized to lf on checkin. The line endings in the working tree may be controlled by setting the eol variable in the .gitconfig file. --- .gitattributes | 106 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 96 insertions(+), 10 deletions(-) diff --git a/.gitattributes b/.gitattributes index 8cdd176f0680..8723dd9dc95a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,14 +1,10 @@ -# Numerical data files -numpy/lib/tests/data/*.npy binary - -# Release notes, reduce number of conflicts. -doc/release/*.rst merge=union - # Highlight our custom templating language as C, since it's hopefully better # than nothing. This also affects repo statistics. -*.c.src linguist-language=C -*.inc.src linguist-language=C -*.h.src linguist-language=C +*.c.src text linguist-language=C +*.inc.src text linguist-language=C +*.h.src text linguist-language=C +*.pyx.in text linguist-language=Python +*.pxd.in text linguist-language=Python # Mark some files as vendored numpy/linalg/lapack_lite/f2c.c linguist-vendored @@ -19,5 +15,95 @@ numpy/core/include/numpy/libdivide/* linguist-vendored # Mark some files as generated numpy/linalg/lapack_lite/f2c_*.c linguist-generated numpy/linalg/lapack_lite/lapack_lite_names.h linguist-generated - numpy/_version.py export-subst + +# Configuration files +*.ini text +*.cfg text +./MANIFEST.in text +./numpy/core/npymath.ini.in text +./numpy/core/mlib.ini.in text +./site.cfg.example text + +# Python sources +*.py text diff=python +*.pxd text diff=python +*.pyx text diff=python +*.pyi text diff=python + +# C/C++ sources +*.c text diff=c +*.h text diff=c +*.cc text diff=cpp +*.cxx text diff=cpp +*.cpp text diff=cpp +*.hpp text diff=cpp +*.hh text diff=cpp + +# Fortran sources +*.f text diff=fortran +*.for text diff=fortran +*.f90 text diff=fortran +*.f95 text diff=fortran +*.f03 text diff=fortran + +# JavaScript +*.js text + +# F2py +./doc/source/f2py/*.pyf text +./doc/source/f2py/*.dat text +./numpy/f2py/tests/src/module_data/mod.mod binary + +# Documents +*.md text diff=markdown +*.txt text +*.rst text +*.pdf binary +*.css text diff=css +*.html text diff=html + +# Graphics +*.png binary +*.ico binary +*.dia binary +*.gif binary +*.odg binary +*.fig text +*.svg text +# SVG is treated as an asset (binary) by default. If you want +# to treat it as binary, use the following line instead. +# *.svg binary + +# Scripts +*.sh text eol=lf +*.sed text +# These are explicitly windows files and should use crlf +*.bat text eol=crlf +*.cmd text eol=crlf + +# Serialisation +*.json text +*.toml text +*.xml text +*.yaml text +*.yml text + +# Data files +*.csv text +*.pkl binary +*.fits binary +*.npy binary +*.npz binary + +# Misc. +*.swg text +*.patch text +./doc/neps/index.rst.tmpl text +./benchmarks/asv_compare.conf.json.tpl text +./tools/swig/test/*.i text +./tools/gitpod/gitpod.Dockerfile text +./doc/source/dev/gitwash/git_links.inc text +./doc/source/reference/simd/*.inc text +./numpy/core/src/_simd/*.inc text diff=c + From 1cb1f67429997e894c32c3e7484c0bb29324390b Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 01:16:04 -0400 Subject: [PATCH 1034/1270] ENH: Add PCG64DXSM implementation. --- numpy/random/__init__.py | 6 +- numpy/random/__init__.pyi | 5 +- numpy/random/_pcg64.pyi | 14 + numpy/random/_pcg64.pyx | 245 ++++ numpy/random/_pickle.py | 3 +- numpy/random/src/pcg64/pcg64.c | 18 + numpy/random/src/pcg64/pcg64.h | 56 + .../random/tests/data/pcg64dxsm-testset-1.csv | 1001 +++++++++++++++++ .../random/tests/data/pcg64dxsm-testset-2.csv | 1001 +++++++++++++++++ numpy/random/tests/test_direct.py | 32 +- numpy/random/tests/test_smoke.py | 14 +- 11 files changed, 2388 insertions(+), 7 deletions(-) create mode 100644 numpy/random/tests/data/pcg64dxsm-testset-1.csv create mode 100644 numpy/random/tests/data/pcg64dxsm-testset-2.csv diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py index 7efa5c07fcfe..2e8f99fe3045 100644 --- a/numpy/random/__init__.py +++ b/numpy/random/__init__.py @@ -17,6 +17,7 @@ --------------------------------------------- --- MT19937 PCG64 +PCG64DXSM Philox SFC64 ============================================= === @@ -183,13 +184,14 @@ from ._generator import Generator, default_rng from .bit_generator import SeedSequence, BitGenerator from ._mt19937 import MT19937 -from ._pcg64 import PCG64 +from ._pcg64 import PCG64, PCG64DXSM from ._philox import Philox from ._sfc64 import SFC64 from .mtrand import * __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', - 'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator'] + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] def __RandomState_ctor(): diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index 1187b7d8ecb0..48b620c48b92 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -3,7 +3,10 @@ from typing import List from numpy.random._generator import Generator as Generator from numpy.random._generator import default_rng as default_rng from numpy.random._mt19937 import MT19937 as MT19937 -from numpy.random._pcg64 import PCG64 as PCG64 +from numpy.random._pcg64 import ( + PCG64 as PCG64, + PCG64DXSM as PCG64DXSM, +) from numpy.random._philox import Philox as Philox from numpy.random._sfc64 import SFC64 as SFC64 from numpy.random.bit_generator import BitGenerator as BitGenerator diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index a4f6e0760bc6..25e2fdde602d 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -32,3 +32,17 @@ class PCG64(BitGenerator): value: _PCG64State, ) -> None: ... def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index 605aae4bcb21..aaa79b3045b2 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -26,6 +26,10 @@ cdef extern from "src/pcg64/pcg64.h": void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger) void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger) + uint64_t pcg64_cm_next64(pcg64_state *state) nogil + uint32_t pcg64_cm_next32(pcg64_state *state) nogil + void pcg64_cm_advance(pcg64_state *state, uint64_t *step) + cdef uint64_t pcg64_uint64(void* st) nogil: return pcg64_next64(st) @@ -35,6 +39,14 @@ cdef uint32_t pcg64_uint32(void *st) nogil: cdef double pcg64_double(void* st) nogil: return uint64_to_double(pcg64_next64(st)) +cdef uint64_t pcg64_cm_uint64(void* st) nogil: + return pcg64_cm_next64(st) + +cdef uint32_t pcg64_cm_uint32(void *st) nogil: + return pcg64_cm_next32( st) + +cdef double pcg64_cm_double(void* st) nogil: + return uint64_to_double(pcg64_cm_next64(st)) cdef class PCG64(BitGenerator): """ @@ -268,3 +280,236 @@ cdef class PCG64(BitGenerator): pcg64_advance(&self.rng_state, np.PyArray_DATA(d)) self._reset_state_variables() return self + + +cdef class PCG64DXSM(BitGenerator): + """ + PCG64DXSM(seed=None) + + BitGenerator for the PCG-64 DXSM pseudo-random number generator. + + Parameters + ---------- + seed : {None, int, array_like[ints], SeedSequence}, optional + A seed to initialize the `BitGenerator`. If None, then fresh, + unpredictable entropy will be pulled from the OS. If an ``int`` or + ``array_like[ints]`` is passed, then it will be passed to + `SeedSequence` to derive the initial `BitGenerator` state. One may also + pass in a `SeedSequence` instance. + + Notes + ----- + PCG-64 DXSM is a 128-bit implementation of O'Neill's permutation congruential + generator ([1]_, [2]_). PCG-64 DXSM has a period of :math:`2^{128}` and supports + advancing an arbitrary number of steps as well as :math:`2^{127}` streams. + The specific member of the PCG family that we use is PCG DXSM 128/64. + + ``PCG64DXSM`` provides a capsule containing function pointers that produce + doubles, and unsigned 32 and 64- bit integers. These are not + directly consumable in Python and must be consumed by a ``Generator`` + or similar object that supports low-level access. + + Supports the method :meth:`advance` to advance the RNG an arbitrary number of + steps. The state of the PCG-64 DXSM RNG is represented by 2 128-bit unsigned + integers. + + **State and Seeding** + + The ``PCG64DXSM`` state vector consists of 2 unsigned 128-bit values, + which are represented externally as Python ints. One is the state of the + PRNG, which is advanced by a linear congruential generator (LCG). The + second is a fixed odd increment used in the LCG. + + The input seed is processed by `SeedSequence` to generate both values. The + increment is not independently settable. + + **Parallel Features** + + The preferred way to use a BitGenerator in parallel applications is to use + the `SeedSequence.spawn` method to obtain entropy values, and to use these + to generate new BitGenerators: + + >>> from numpy.random import Generator, PCG64DXSM, SeedSequence + >>> sg = SeedSequence(1234) + >>> rg = [Generator(PCG64DXSM(s)) for s in sg.spawn(10)] + + **Compatibility Guarantee** + + ``PCG64DXSM`` makes a guarantee that a fixed seed and will always produce + the same random integer stream. + + References + ---------- + .. [1] `"PCG, A Family of Better Random Number Generators" + `_ + .. [2] O'Neill, Melissa E. `"PCG: A Family of Simple Fast Space-Efficient + Statistically Good Algorithms for Random Number Generation" + `_ + """ + cdef pcg64_state rng_state + cdef pcg64_random_t pcg64_random_state + + def __init__(self, seed=None): + BitGenerator.__init__(self, seed) + self.rng_state.pcg_state = &self.pcg64_random_state + + self._bitgen.state = &self.rng_state + self._bitgen.next_uint64 = &pcg64_cm_uint64 + self._bitgen.next_uint32 = &pcg64_cm_uint32 + self._bitgen.next_double = &pcg64_cm_double + self._bitgen.next_raw = &pcg64_cm_uint64 + # Seed the _bitgen + val = self._seed_seq.generate_state(4, np.uint64) + pcg64_set_seed(&self.rng_state, + np.PyArray_DATA(val), + (np.PyArray_DATA(val) + 2)) + self._reset_state_variables() + + cdef _reset_state_variables(self): + self.rng_state.has_uint32 = 0 + self.rng_state.uinteger = 0 + + cdef jump_inplace(self, jumps): + """ + Jump state in-place + Not part of public API + + Parameters + ---------- + jumps : integer, positive + Number of times to jump the state of the rng. + + Notes + ----- + The step size is phi-1 when multiplied by 2**128 where phi is the + golden ratio. + """ + step = 0x9e3779b97f4a7c15f39cc0605cedc835 + self.advance(step * int(jumps)) + + def jumped(self, jumps=1): + """ + jumped(jumps=1) + + Returns a new bit generator with the state jumped. + + Jumps the state as-if jumps * 210306068529402873165736369884012333109 + random numbers have been generated. + + Parameters + ---------- + jumps : integer, positive + Number of times to jump the state of the bit generator returned + + Returns + ------- + bit_generator : PCG64DXSM + New instance of generator jumped iter times + + Notes + ----- + The step size is phi-1 when multiplied by 2**128 where phi is the + golden ratio. + """ + cdef PCG64DXSM bit_generator + + bit_generator = self.__class__() + bit_generator.state = self.state + bit_generator.jump_inplace(jumps) + + return bit_generator + + @property + def state(self): + """ + Get or set the PRNG state + + Returns + ------- + state : dict + Dictionary containing the information required to describe the + state of the PRNG + """ + cdef np.ndarray state_vec + cdef int has_uint32 + cdef uint32_t uinteger + + # state_vec is state.high, state.low, inc.high, inc.low + state_vec = np.empty(4, dtype=np.uint64) + pcg64_get_state(&self.rng_state, + np.PyArray_DATA(state_vec), + &has_uint32, &uinteger) + state = int(state_vec[0]) * 2**64 + int(state_vec[1]) + inc = int(state_vec[2]) * 2**64 + int(state_vec[3]) + return {'bit_generator': self.__class__.__name__, + 'state': {'state': state, 'inc': inc}, + 'has_uint32': has_uint32, + 'uinteger': uinteger} + + @state.setter + def state(self, value): + cdef np.ndarray state_vec + cdef int has_uint32 + cdef uint32_t uinteger + if not isinstance(value, dict): + raise TypeError('state must be a dict') + bitgen = value.get('bit_generator', '') + if bitgen != self.__class__.__name__: + raise ValueError('state must be for a {0} ' + 'RNG'.format(self.__class__.__name__)) + state_vec = np.empty(4, dtype=np.uint64) + state_vec[0] = value['state']['state'] // 2 ** 64 + state_vec[1] = value['state']['state'] % 2 ** 64 + state_vec[2] = value['state']['inc'] // 2 ** 64 + state_vec[3] = value['state']['inc'] % 2 ** 64 + has_uint32 = value['has_uint32'] + uinteger = value['uinteger'] + pcg64_set_state(&self.rng_state, + np.PyArray_DATA(state_vec), + has_uint32, uinteger) + + def advance(self, delta): + """ + advance(delta) + + Advance the underlying RNG as-if delta draws have occurred. + + Parameters + ---------- + delta : integer, positive + Number of draws to advance the RNG. Must be less than the + size state variable in the underlying RNG. + + Returns + ------- + self : PCG64 + RNG advanced delta steps + + Notes + ----- + Advancing a RNG updates the underlying RNG state as-if a given + number of calls to the underlying RNG have been made. In general + there is not a one-to-one relationship between the number output + random values from a particular distribution and the number of + draws from the core RNG. This occurs for two reasons: + + * The random values are simulated using a rejection-based method + and so, on average, more than one value from the underlying + RNG is required to generate an single draw. + * The number of bits required to generate a simulated value + differs from the number of bits generated by the underlying + RNG. For example, two 16-bit integer values can be simulated + from a single draw of a 32-bit RNG. + + Advancing the RNG state resets any pre-computed random numbers. + This is required to ensure exact reproducibility. + """ + delta = wrap_int(delta, 128) + + cdef np.ndarray d = np.empty(2, dtype=np.uint64) + d[0] = delta // 2**64 + d[1] = delta % 2**64 + pcg64_cm_advance(&self.rng_state, np.PyArray_DATA(d)) + self._reset_state_variables() + return self + diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 71b01d6cd1e5..a32f64f4a3d3 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -1,6 +1,6 @@ from .mtrand import RandomState from ._philox import Philox -from ._pcg64 import PCG64 +from ._pcg64 import PCG64, PCG64DXSM from ._sfc64 import SFC64 from ._generator import Generator @@ -8,6 +8,7 @@ BitGenerators = {'MT19937': MT19937, 'PCG64': PCG64, + 'PCG64DXSM': PCG64DXSM, 'Philox': Philox, 'SFC64': SFC64, } diff --git a/numpy/random/src/pcg64/pcg64.c b/numpy/random/src/pcg64/pcg64.c index b15973aefafc..c623c809b02e 100644 --- a/numpy/random/src/pcg64/pcg64.c +++ b/numpy/random/src/pcg64/pcg64.c @@ -61,6 +61,10 @@ pcg_setseq_128_xsl_rr_64_boundedrand_r(pcg_state_setseq_128 *rng, uint64_t bound); extern inline void pcg_setseq_128_advance_r(pcg_state_setseq_128 *rng, pcg128_t delta); +extern inline uint64_t pcg_cm_random_r(pcg_state_setseq_128 *rng); +extern inline void pcg_cm_step_r(pcg_state_setseq_128 *rng); +extern inline uint64_t pcg_output_cm_128_64(pcg128_t state); +extern inline void pcg_cm_srandom_r(pcg_state_setseq_128 *rng, pcg128_t initstate, pcg128_t initseq); /* Multi-step advance functions (jump-ahead, jump-back) * @@ -117,6 +121,9 @@ pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta, pcg128_t cur_mult, extern inline uint64_t pcg64_next64(pcg64_state *state); extern inline uint32_t pcg64_next32(pcg64_state *state); +extern inline uint64_t pcg64_cm_next64(pcg64_state *state); +extern inline uint32_t pcg64_cm_next32(pcg64_state *state); + extern void pcg64_advance(pcg64_state *state, uint64_t *step) { pcg128_t delta; #ifndef PCG_EMULATED_128BIT_MATH @@ -128,6 +135,17 @@ extern void pcg64_advance(pcg64_state *state, uint64_t *step) { pcg64_advance_r(state->pcg_state, delta); } +extern void pcg64_cm_advance(pcg64_state *state, uint64_t *step) { + pcg128_t delta; +#ifndef PCG_EMULATED_128BIT_MATH + delta = (((pcg128_t)step[0]) << 64) | step[1]; +#else + delta.high = step[0]; + delta.low = step[1]; +#endif + pcg_cm_advance_r(state->pcg_state, delta); +} + extern void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc) { pcg128_t s, i; #ifndef PCG_EMULATED_128BIT_MATH diff --git a/numpy/random/src/pcg64/pcg64.h b/numpy/random/src/pcg64/pcg64.h index 31899cbc1469..ea8264b012df 100644 --- a/numpy/random/src/pcg64/pcg64.h +++ b/numpy/random/src/pcg64/pcg64.h @@ -104,6 +104,9 @@ typedef struct { , PCG_128BIT_CONSTANT(0x0000000000000001ULL, 0xda3e39cb94b95bdbULL) \ } +#define PCG_CHEAP_MULTIPLIER_128 (0xda942042e4dd58b5ULL) + + static inline uint64_t pcg_rotr_64(uint64_t value, unsigned int rot) { #ifdef _WIN32 return _rotr64(value, rot); @@ -209,6 +212,37 @@ static inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state) { state >> 122u); } +static inline void pcg_cm_step_r(pcg_state_setseq_128 *rng) { + rng-> state = rng->state * PCG_CHEAP_MULTIPLIER_128 + rng->inc; +} + +static inline uint64_t pcg_output_cm_128_64(pcg128_t state) { + uint64_t hi = state >> 64; + uint64_t lo = state; + + lo |= 1; + hi ^= hi >> 32; + hi *= 0xda942042e4dd58b5ULL; + hi ^= hi >> 48; + hi *= lo; + return hi; +} + +static inline void pcg_cm_srandom_r(pcg_state_setseq_128 *rng, pcg128_t initstate, pcg128_t initseq) { + rng->state = 0U; + rng->inc = (initseq << 1u) | 1u; + pcg_cm_step_r(rng); + rng->state += initstate; + pcg_cm_step_r(rng); +} + +static inline uint64_t pcg_cm_random_r(pcg_state_setseq_128* rng) +{ + uint64_t ret = pcg_output_cm_128_64(rng->state); + pcg_cm_step_r(rng); + return ret; +} + static inline uint64_t pcg_setseq_128_xsl_rr_64_random_r(pcg_state_setseq_128* rng) { @@ -248,6 +282,11 @@ static inline void pcg_setseq_128_advance_r(pcg_state_setseq_128 *rng, PCG_DEFAULT_MULTIPLIER_128, rng->inc); } +static inline void pcg_cm_advance_r(pcg_state_setseq_128 *rng, pcg128_t delta) { + rng->state = pcg_advance_lcg_128(rng->state, delta, + PCG_CHEAP_MULTIPLIER_128, rng->inc); +} + typedef pcg_state_setseq_128 pcg64_random_t; #define pcg64_random_r pcg_setseq_128_xsl_rr_64_random_r #define pcg64_boundedrand_r pcg_setseq_128_xsl_rr_64_boundedrand_r @@ -281,7 +320,24 @@ static inline uint32_t pcg64_next32(pcg64_state *state) { return (uint32_t)(next & 0xffffffff); } +static inline uint64_t pcg64_cm_next64(pcg64_state *state) { + return pcg_cm_random_r(state->pcg_state); +} + +static inline uint32_t pcg64_cm_next32(pcg64_state *state) { + uint64_t next; + if (state->has_uint32) { + state->has_uint32 = 0; + return state->uinteger; + } + next = pcg_cm_random_r(state->pcg_state); + state->has_uint32 = 1; + state->uinteger = (uint32_t)(next >> 32); + return (uint32_t)(next & 0xffffffff); +} + void pcg64_advance(pcg64_state *state, uint64_t *step); +void pcg64_cm_advance(pcg64_state *state, uint64_t *step); void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc); diff --git a/numpy/random/tests/data/pcg64dxsm-testset-1.csv b/numpy/random/tests/data/pcg64dxsm-testset-1.csv new file mode 100644 index 000000000000..39cef057f449 --- /dev/null +++ b/numpy/random/tests/data/pcg64dxsm-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xdf1ddcf1e22521fe +1, 0xc71b2f9c706cf151 +2, 0x6922a8cc24ad96b2 +3, 0x82738c549beccc30 +4, 0x5e8415cdb1f17580 +5, 0x64c54ad0c09cb43 +6, 0x361a17a607dce278 +7, 0x4346f6afb7acad68 +8, 0x6e9f14d4f6398d6b +9, 0xf818d4343f8ed822 +10, 0x6327647daf508ed6 +11, 0xe1d1dbe5496a262a +12, 0xfc081e619076b2e0 +13, 0x37126563a956ab1 +14, 0x8bb46e155db16b9 +15, 0x56449f006c9f3fb4 +16, 0x34a9273550941803 +17, 0x5b4df62660f99462 +18, 0xb8665cad532e3018 +19, 0x72fc3e5f7f84216a +20, 0x71d3c47f6fd59939 +21, 0xfd4218afa1de463b +22, 0xc84054c78e0a9a71 +23, 0xae59034726be61a8 +24, 0xa6a5f21de983654d +25, 0x3b633acf572009da +26, 0x6a0884f347ab54c8 +27, 0x7a907ebe9adcab50 +28, 0xbe779be53d7b8d4a +29, 0xf5976e8c69b9dcd1 +30, 0x1d8302f114699e11 +31, 0x7d37e43042c038a0 +32, 0x2cc1d4edc2a40f35 +33, 0x83e3347bb2d581f1 +34, 0x253f8698651a844d +35, 0x4312dea0dd4e32f6 +36, 0x10f106439964ea3a +37, 0x810eb374844868cc +38, 0x366342a54b1978cc +39, 0x9fb39b13aaddfb5e +40, 0xdb91fd0d9482bed7 +41, 0x89f6ea4ca9c68204 +42, 0x146b31ccca461792 +43, 0x203fd9724deb2486 +44, 0x58a84f23748e25cb +45, 0x2f20eb6aeb94e88 +46, 0x14d3581460e473c +47, 0xad5bd0d25f37d047 +48, 0x1cf88fa16de258b2 +49, 0x3bcab6485b7a341 +50, 0xb2433b37f227d90c +51, 0x2cffd7e0a8360cc8 +52, 0x5d2eeff7c9ebc847 +53, 0x6fd7c7ae23f9f64b +54, 0x381650b2d00f175d +55, 0x9d93edcedc873cae +56, 0x56e369a033d4cb49 +57, 0x7547997116a3bac +58, 0x11debaa897fd4665 +59, 0xdf799d2b73bd6fb8 +60, 0x3747d299c66624d +61, 0xac9346701afd0cfa +62, 0xac90e150fa13c7bf +63, 0x85c56ad2248c2871 +64, 0xdea66bf35c45f195 +65, 0x59cf910ea079fb74 +66, 0x2f841bb782274586 +67, 0x9814df4384d92bd9 +68, 0x15bc70824be09925 +69, 0x16d4d0524c0503a3 +70, 0xf04ea249135c0cc7 +71, 0xa707ab509b7e3032 +72, 0x465459efa869e372 +73, 0x64cbf70a783fab67 +74, 0x36b3541a14ca8ed7 +75, 0x9a4dfae8f4c596bf +76, 0x11d9a04224281be3 +77, 0xe09bbe6d5e98ec32 +78, 0xa6c60d908973aa0d +79, 0x7c524c57dd5915c8 +80, 0xa810c170b27f1fdc +81, 0xce5d409819621583 +82, 0xfe2ee3d5332a3525 +83, 0x162fb7c8b32045eb +84, 0x4a3327156b0b2d83 +85, 0x808d0282f971064 +86, 0x2e6f04cf5ed27e60 +87, 0xaf6800699cca67a9 +88, 0xc7590aae7244c3bf +89, 0x7824345f4713f5f9 +90, 0x8f713505f8fd059b +91, 0x3d5b5b9bb6b1e80e +92, 0x8674f45e5dc40d79 +93, 0xcb1e36846aa14773 +94, 0xe0ae45b2b9b778c1 +95, 0xd7254ce931eefcfb +96, 0xef34e15e4f55ac0a +97, 0xf17cc0ba15a99bc4 +98, 0x77bb0f7ffe7b31f1 +99, 0x6ee86438d2e71d38 +100, 0x584890f86829a455 +101, 0x7baf0d8d30ba70fe +102, 0xb1ac8f326b8403ae +103, 0xcc1963435c874ba7 +104, 0x9c483b953d1334ce +105, 0xc0924bcbf3e10941 +106, 0x21bcc581558717b1 +107, 0x2c5ad1623f8d292b +108, 0xa8ea110f6124557e +109, 0x15f24a6c5c4c591 +110, 0x40fe0d9cd7629126 +111, 0xcfe8f2b3b081484d +112, 0x891383f4b4cac284 +113, 0x76f2fcdef7fa845 +114, 0x4edd12133aed0584 +115, 0xd53c06d12308873d +116, 0xf7f22882c17f86bf +117, 0xfbaa4aad72f35e10 +118, 0x627610da2e3c0cc3 +119, 0x582b16a143634d9a +120, 0x9b4a7f69ed38f4a0 +121, 0x2df694974d1e1cbe +122, 0xe5be6eaafed5d4b +123, 0xc48e2a288ad6605e +124, 0xbcb088149ce27c2b +125, 0x3cb6a7fb06ceecbe +126, 0x516735fff3b9e3ac +127, 0x5cbafc551ee5008d +128, 0xee27d1ab855c5fd5 +129, 0xc99fb341f6baf846 +130, 0x7ad8891b92058e6d +131, 0xf50310d03c1ac6c7 +132, 0x947e281d998cbd3e +133, 0x1d4d94a93824fe80 +134, 0x5568b77289e7ee73 +135, 0x7d82d1b2b41e3c8b +136, 0x1af462c7abc787b +137, 0xcfd8dfe80bfae1ef +138, 0xd314caeb723a63ea +139, 0x1c63ddcfc1145429 +140, 0x3801b7cc6cbf2437 +141, 0xc327d5b9fdafddd3 +142, 0xe140278430ca3c78 +143, 0x4d0345a685cb6ef8 +144, 0x47640dc86e261ff9 +145, 0xab817f158523ebf4 +146, 0x37c51e35fbe65a6b +147, 0xab090f475d30a178 +148, 0x4d3ec225bf599fc1 +149, 0xefd517b0041679b1 +150, 0x20ad50bca4da32c5 +151, 0x75e1f7cd07fad86d +152, 0x348cf781ee655f4b +153, 0x9375f0e5ffc2d2ec +154, 0x7689082fd5f7279c +155, 0x633e56f763561e77 +156, 0x9d1752d70861f9fd +157, 0xa3c994b4e70b0b0f +158, 0xabf7276a58701b88 +159, 0xbfa18d1a0540d000 +160, 0xc6a28a2475646d26 +161, 0x7cdf108583f65085 +162, 0x82dcefb9f32104be +163, 0xc6baadd0adc6b446 +164, 0x7a63cff01075b1b4 +165, 0x67ac62e575c89919 +166, 0x96fa4320a0942035 +167, 0xc4658859385b325f +168, 0xde22c17ff47808f6 +169, 0xbb952c4d89e2f2ec +170, 0x638251fbc55bdc37 +171, 0x38918b307a03b3ea +172, 0xccb60f2cedbb570b +173, 0x3c06f4086a28f012 +174, 0x4e8d238388986e33 +175, 0x1760b7793514a143 +176, 0xa3f924efe49ee7d6 +177, 0xaf6be2dbaebc0bdf +178, 0x6782682090dffe09 +179, 0xb63a4d90d848e8ef +180, 0x5f649c7eaf4c54c5 +181, 0xbe57582426a085ba +182, 0xb5dd825aa52fb76d +183, 0x74cb4e6ca4039617 +184, 0x382e578bf0a49588 +185, 0xc043e8ea6e1dcdae +186, 0xf902addd5c04fa7c +187, 0xf3337994612528db +188, 0x4e8fd48d6d15b4e6 +189, 0x7190a509927c07ab +190, 0x864c2dee5b7108ae +191, 0xbb9972ddc196f467 +192, 0x1ea02ab3ca10a448 +193, 0xe50a8ffde35ddef9 +194, 0x7bd2f59a67183541 +195, 0x5a940b30d8fcd27a +196, 0x82b4cea62623d4d3 +197, 0x6fbda76d4afef445 +198, 0x8b1f6880f418328e +199, 0x8b69a025c72c54b7 +200, 0xb71e0f3986a3835f +201, 0xa4a7ddb8b9816825 +202, 0x945dcda28228b1d8 +203, 0xb471abf2f8044d72 +204, 0xf07d4af64742b1ba +205, 0xfca5190bc4dd6a2a +206, 0xd681497262e11bc5 +207, 0xbe95d5f00c577028 +208, 0x56313439fd8bde19 +209, 0x3f3d9ac9b5ee6522 +210, 0x7b8d457dd2b49bbe +211, 0xe76b5747885d214b +212, 0xa8a695b3deb493ea +213, 0x5292446548c95d71 +214, 0xbf5cdf0d436412df +215, 0x7936abaed779d28d +216, 0x659c6e8073b3a06d +217, 0x86c9ff28f5543b71 +218, 0x6faa748445a99146 +219, 0xdcc1e6ab57904fd7 +220, 0x770bd61233addc5f +221, 0x16963e041e46d94f +222, 0x158e6cb2934157ac +223, 0xb65088a8fd246441 +224, 0x2b12ced6ce8a68c3 +225, 0x59a18d02cd6082b3 +226, 0x4ddbc318cb5488ee +227, 0x3d4cf520b3ed20a1 +228, 0x7028b3a92e2b292d +229, 0xf141da264a250e4d +230, 0x9788d53e86041c37 +231, 0x1bb91238a7c97dbf +232, 0x81953d0ddb634309 +233, 0xfa39ccfe14d2d46 +234, 0xf7c7861c9b7e8399 +235, 0x18d27ca50d9dc249 +236, 0x258dfdf38510d0d9 +237, 0x9e72d8af910ea76f +238, 0x4f8ef24b96de50ad +239, 0xb9d9c12297e03dc9 +240, 0x91994e41b4a1929c +241, 0x8defa79b2ccc83b9 +242, 0x948566748706dac5 +243, 0x7b0454946e70e4cf +244, 0x340b7cb298c70ed7 +245, 0x6602005330cebd95 +246, 0xf71cb803aa61f722 +247, 0x4683fb07fc70ae8a +248, 0xc6db9f0c4de3ed88 +249, 0x3e8dfae2a593cef9 +250, 0x615f7c38e3862b33 +251, 0x676c7996550d857 +252, 0xc6d520d54a5c266a +253, 0x202b1e8eef14aa2e +254, 0xa3a84891a27a582 +255, 0x84dbee451658d47f +256, 0x254c7cd97e777e3a +257, 0xf50b6e977f0eba50 +258, 0x2898b1d3062a4798 +259, 0x4096f7cbbb019773 +260, 0x9fb8e75548062c50 +261, 0x4647071e5ca318ec +262, 0x2b4750bdb3b3b01 +263, 0x88ac41cc69a39786 +264, 0x705e25476ef46fa3 +265, 0xc0c1db19884a48a6 +266, 0x1364c0afdbb465e5 +267, 0x58e98534701272a6 +268, 0x746a5ea9701517c0 +269, 0x523a70bc6b300b67 +270, 0x9b1c098eda8564ad +271, 0xfbaeb28d3637067f +272, 0xddd9a13551fdba65 +273, 0x56461a670559e832 +274, 0xab4fd79be85570ad +275, 0xd4b691ecaff8ca55 +276, 0x11a4495939e7f004 +277, 0x40d069d19477eb47 +278, 0xe790783d285cd81e +279, 0xde8218b16d935bc7 +280, 0x2635e8c65cd4182d +281, 0xeae402623e3454 +282, 0x9f99c833184e0279 +283, 0x3d0f79a0d52d84e7 +284, 0xc1f8edb10c625b90 +285, 0x9b4546363d1f0489 +286, 0x98d86d0b1212a282 +287, 0x386b53863161200d +288, 0xbe1165c7fe48a135 +289, 0xb9658b04dbbfdc8c +290, 0xcea14eddfe84d71a +291, 0x55d03298be74abe7 +292, 0x5be3b50d961ffd7e +293, 0xc76b1045dc4b78e1 +294, 0x7830e3ff3f6c3d4c +295, 0xb617adb36ca3729 +296, 0x4a51bdb194f14aa9 +297, 0x246024e54e6b682a +298, 0x33d42fc9c6d33083 +299, 0xadccba149f31e1d +300, 0x5183e66b9002f8b +301, 0x70eb2416404d51b7 +302, 0x26c25eb225535351 +303, 0xbc2d5b0d23076561 +304, 0x5823019ddead1da +305, 0x85cfa109fca69f62 +306, 0x26017933e7e1efd9 +307, 0x3ec7be9a32212753 +308, 0x697e8a0697cd6f60 +309, 0x44735f6cca03920f +310, 0x8cc655eb94ee212e +311, 0x8b8b74eba84929a0 +312, 0x7708ccedd0c98c80 +313, 0x1b6f21f19777cbe1 +314, 0x363e564bd5fadedb +315, 0x5921543a641591fe +316, 0xc390786d68ea8a1b +317, 0x9b293138dc033fca +318, 0x45447ca8dc843345 +319, 0xee6ef6755bc49c5e +320, 0x70a3a1f5163c3be5 +321, 0xf05e25448b6343b0 +322, 0x4739f4f8717b7e69 +323, 0xb006141975bf957 +324, 0x31874a91b707f452 +325, 0x3a07f2c90bae2869 +326, 0xb73dae5499a55c5e +327, 0x489070893bb51575 +328, 0x7129acf423940575 +329, 0x38c41f4b90130972 +330, 0xc5260ca65f5a84a1 +331, 0x6e76194f39563932 +332, 0x62ca1f9ca3de3ca6 +333, 0xb4a97874e640853f +334, 0x38ed0f71e311cc02 +335, 0xde183b81099e8f47 +336, 0x9bb8bf8e6694346 +337, 0xd15497b6bf81e0f2 +338, 0xaaae52536c00111 +339, 0x4e4e60d1435aaafd +340, 0x5a15512e5d6ea721 +341, 0xff0f1ffabfc6664f +342, 0xba3ffcedc5f97fec +343, 0xef87f391c0c6bfb6 +344, 0x4a888c5d31eb0f98 +345, 0x559a3fbfd7946e95 +346, 0xe45b44a0db5a9bad +347, 0x9457898964190af1 +348, 0xd9357dfaab76cd9e +349, 0xa60e907178d965a1 +350, 0x76b2dc3032dc2f4a +351, 0x13549b9c2802120 +352, 0x8656b965a66a1800 +353, 0x16802e6e22456a23 +354, 0x23b62edc60efaa9 +355, 0x6832a366e1e4ea3b +356, 0x46b1b41093ff2b1e +357, 0x55c857128143f219 +358, 0x7fc35ddf5e138200 +359, 0x790abe78be67467e +360, 0xa4446fc08babd466 +361, 0xc23d70327999b855 +362, 0x2e019d1597148196 +363, 0xfefd98e560403ab8 +364, 0xbe5f0a33da330d58 +365, 0x3078a4e9d43ca395 +366, 0x511bfedd6f12f2b3 +367, 0x8bc138e335be987c +368, 0x24640f803465716d +369, 0xf6530b04d0bd618f +370, 0x9b7833e5aa782716 +371, 0x778cd35aea5841b1 +372, 0xecea3c458cefbc60 +373, 0x5107ae83fc527f46 +374, 0x278ad83d44bd2d1a +375, 0x7014a382295aeb16 +376, 0xf326dd762048743f +377, 0x858633d56279e553 +378, 0x76408154085f01bc +379, 0x3e77d3364d02e746 +380, 0x2f26cea26cadd50b +381, 0x6d6846a4ecb84273 +382, 0x4847e96f2df5f76 +383, 0x5a8610f46e13ff61 +384, 0x4e7a7cac403e10dd +385, 0x754bdf2e20c7bc90 +386, 0x8bdd80e6c51bd0be +387, 0x61c655fae2b4bc52 +388, 0x60873ef48e3d2f03 +389, 0x9d7d8d3698a0b4a4 +390, 0xdf48e9c355cd5d4b +391, 0x69ecf03e20be99ac +392, 0xc1a0c5a339bd1815 +393, 0x2e3263a6a3adccb +394, 0x23557459719adbdc +395, 0xd1b709a3b330e5a +396, 0xade5ab00a5d88b9d +397, 0x69a6bd644120cfad +398, 0x40187ecceee92342 +399, 0x1c41964ba1ac78da +400, 0x9ac5c51cbecabe67 +401, 0xbdc075781cf36d55 +402, 0xeaf5a32246ded56 +403, 0xcda0b67e39c0fb71 +404, 0x4839ee456ef7cc95 +405, 0xf17092fdd41d5658 +406, 0x2b5d422e60ae3253 +407, 0x3effe71102008551 +408, 0x20a47108e83934b7 +409, 0xd02da65fe768a88f +410, 0xeb046bd56afa4026 +411, 0x70c0509c08e0fbe0 +412, 0x1d35c38d4f8bac6c +413, 0x9aa8eb6466f392e0 +414, 0x587bd4a430740f30 +415, 0x82978fe4bad4195 +416, 0xdc4ebc4c0feb50ab +417, 0xd3b7164d0240c06f +418, 0x6e2ad6e5a5003a63 +419, 0xa24b430e2ee6b59c +420, 0x2905f49fd5073094 +421, 0x5f209e4de03aa941 +422, 0x57b7da3e0bedb1dc +423, 0x5e054018875b01f5 +424, 0xb2f2da6145658db3 +425, 0xbd9c94a69a8eb651 +426, 0x9c5f9a07cd6ac749 +427, 0x2296c4af4d529c38 +428, 0x522ed800fafdefab +429, 0xe2a447ced0c66791 +430, 0x937f10d45e455fef +431, 0xc882987d9e29a24 +432, 0x4610bfd6a247ee1a +433, 0x562ba3e50870059 +434, 0x59d8d58793602189 +435, 0xfe9a606e3e34abe +436, 0x6825f7932a5e9282 +437, 0xe77f7061bab476ad +438, 0xbf42001da340ace3 +439, 0x9c3e9230f5e47960 +440, 0x2c0f700d96d5ad58 +441, 0x330048b7cd18f1f9 +442, 0xffc08785eca5cca9 +443, 0xb5879046915f07a5 +444, 0xef51fe26f83c988e +445, 0xfa4c2968e7881a9a +446, 0xc0a9744455a4aad +447, 0xbd2ad686d6313928 +448, 0x6b9f0984c127682a +449, 0xc9aaa00a5da59ed8 +450, 0x762a0c4b98980dbf +451, 0x52d1a2393d3ca2d1 +452, 0x1e9308f2861db15c +453, 0xe7b3c74fe4b4a844 +454, 0x485e15704a7fc594 +455, 0x9e7f67ea44c221f6 +456, 0xbab9ad47fde916e0 +457, 0x50e383912b7fc1f4 +458, 0xaad63db8abcef62d +459, 0xc2f0c5699f47f013 +460, 0xee15b36ada826812 +461, 0x2a1b1cf1e1777142 +462, 0x8adb03ede79e937d +463, 0xf14105ef65643bf3 +464, 0x752bbaefc374a3c7 +465, 0xa4980a08a5a21d23 +466, 0x418a1c05194b2db7 +467, 0xdd6ff32efe1c3cd6 +468, 0x272473ed1f0d3aa2 +469, 0x1e7fdebadabe6c06 +470, 0xd1baa90c17b3842f +471, 0xd3d3a778e9c8404a +472, 0x781ae7fda49fa1a0 +473, 0x61c44fdbdacc672d +474, 0x6d447d0a1404f257 +475, 0x9303e8bdfbfb894d +476, 0x3b3482cdec016244 +477, 0xb149bf245d062e7b +478, 0x96f8d54b14cf992d +479, 0x4741549a01f8c3d0 +480, 0x48270811b2992af +481, 0x7b58f175cd25d147 +482, 0x8f19a840b56f4be9 +483, 0x84a77f43c0951a93 +484, 0x34e1a69381f0c374 +485, 0xb158383c9b4040f +486, 0x372f1abc7cf3a9fa +487, 0x5439819a84571763 +488, 0xabf8515e9084e2fa +489, 0xb02312b9387ff99 +490, 0x238a85bb47a68b12 +491, 0x2068cb83857c49bb +492, 0xc6170e743083664c +493, 0x745cf8470bcb8467 +494, 0xe3a759a301670300 +495, 0x292c7686ad3e67da +496, 0x359efedaff192a45 +497, 0x511f2c31a2d8c475 +498, 0x97fd041bf21c20b3 +499, 0x25ef1fe841b7b3f6 +500, 0xbb71739e656f262d +501, 0x2729b0e989b6b7b8 +502, 0xd2142702ec7dbabf +503, 0x7008decd2488ee3f +504, 0x69daa95e303298d7 +505, 0xc35eca4efb8baa5a +506, 0xf3f16d261cec3b6c +507, 0x22371c1d75396bd3 +508, 0x7aefa08eccae857e +509, 0x255b493c5e3c2a2f +510, 0x779474a077d34241 +511, 0x5199c42686bea241 +512, 0x16c83931e293b8d3 +513, 0xa57fe8db8c0302c7 +514, 0xd7ace619e5312eb1 +515, 0x8740f013306d217c +516, 0xb6a1ad5e29f4d453 +517, 0x31abf7c964688597 +518, 0xbc3d791daed71e7 +519, 0x31ee4ca67b7056ed +520, 0x1ab5416bfe290ea3 +521, 0x93db416f6d3b843a +522, 0xed83bbe5b1dd2fed +523, 0xece38271470d9b6d +524, 0x3a620f42663cd8ae +525, 0x50c87e02acafee5d +526, 0xcabeb8bedbc6dab5 +527, 0x2880a6d09970c729 +528, 0x4aba5dd3bfc81bc +529, 0xaba54edf41080cec +530, 0xb86bb916fc85a169 +531, 0x4c41de87bc79d8ca +532, 0xcce2a202622945fe +533, 0x513f086fad94c107 +534, 0x18b3960c11f8cc96 +535, 0x2f0d1cfd1896e236 +536, 0x1702ae3880d79b15 +537, 0x88923749029ae81 +538, 0x84810d4bdec668eb +539, 0xf85b0a123f4fc68d +540, 0x93efd68974b6e4d1 +541, 0x5d16d6d993a071c9 +542, 0x94436858f94ca43b +543, 0xb3dbb9ed0cb180b6 +544, 0x6447030a010b8c99 +545, 0xd7224897c62925d8 +546, 0xb0c13c1d50605d3a +547, 0xdff02c7cb9d45f30 +548, 0xe8103179f983570d +549, 0xbc552037d6d0a24e +550, 0x775e500b01486b0d +551, 0x2050ac632c694dd6 +552, 0x218910387c4d7ae7 +553, 0xf83e8b68ff885d5d +554, 0xe3374ec25fca51a3 +555, 0xfa750ffa3a60f3af +556, 0x29ee40ba6df5592e +557, 0x70e21a68f48260d2 +558, 0x3805ca72cd40886e +559, 0x2f23e73f8eabf062 +560, 0x2296f80cdf6531ae +561, 0x903099ed968db43a +562, 0xf044445cf9f2929f +563, 0xcd47fdc2de1b7a1 +564, 0xaab1cbd4f849da99 +565, 0x5fc990688da01acb +566, 0xa9cee52ea7dab392 +567, 0xecefc3a4349283a8 +568, 0xdd6b572972e3fafc +569, 0xc1f0b1a2ffb155da +570, 0xc30d53fc17bd25c8 +571, 0x8afa89c77834db28 +572, 0x5569a596fb32896c +573, 0x36f207fc8df3e3d4 +574, 0x57c2bd58517d81db +575, 0xb524693e73d0061c +576, 0xb69f6eb233f5c48b +577, 0x4f0fb23cab8dc695 +578, 0x492c1ad0a48df8df +579, 0xf6dcc348ec8dec1f +580, 0xa4d8708d6eb2e262 +581, 0x4c2072c2c9766ff1 +582, 0xa9bf27c4304875f0 +583, 0xfc8fb8066d4f9ae2 +584, 0x188095f6235fec3c +585, 0x1d8227a2938c2864 +586, 0x89ea50c599010378 +587, 0xcac86df0a7c6d56d +588, 0x47a8c5df84c7d78 +589, 0xe607ae24ea228bfa +590, 0x36624a7996efe104 +591, 0x5d72881c1227d810 +592, 0x78694a6750374c8 +593, 0x7b9a217d4ab5ff45 +594, 0xd53e5d6f7504becc +595, 0x197a72d3f4889a0e +596, 0xfdc70c4755a8df36 +597, 0xd0fda83748c77f74 +598, 0x7ddc919ac9d6dcc9 +599, 0x785c810a6a2dc08b +600, 0xba4be83e7e36896c +601, 0x379d6fe80cf2bffe +602, 0x74cae2dabc429206 +603, 0x1efac32d5d34c917 +604, 0x3cb64e2f98d36e70 +605, 0xc0a7c3cdc3c60aa7 +606, 0x699dfadd38790ebe +607, 0x4861e61b3ecfbeac +608, 0x531744826c345baa +609, 0x5ec26427ad450cba +610, 0xf2c1741479abdcae +611, 0xe9328a78b2595458 +612, 0x30cd1bdf087acd7f +613, 0x7491ced4e009adbe +614, 0xdcd942df1e2e7023 +615, 0xfe63f01689fee35 +616, 0x80282dfe5eaedc42 +617, 0x6ecdea86495f8427 +618, 0xe0adfdd5e9ed31c3 +619, 0xf32bd2a7418127e +620, 0x8aabba078db6ee2 +621, 0xa8a8e60499145aca +622, 0xf76b086ac4e8a0f2 +623, 0x6e55b3c452ff27f8 +624, 0xe18fa7cd025a71bf +625, 0xeed7b685fde0fa25 +626, 0xba9b6c95867fa721 +627, 0x4c2603bc69de2df2 +628, 0xaac87eee1b58cd66 +629, 0x3c9af6656e01282c +630, 0x2dfa05ce8ff476b6 +631, 0xeae9143fcf92f23d +632, 0x3f0699f631be3bc8 +633, 0xa0f5f79f2492bd67 +634, 0x59c47722388131ed +635, 0x5f6e9d2941cef1de +636, 0xe9ad915c09788b7b +637, 0x92c6d37e4f9482f5 +638, 0x57d301b7fdadd911 +639, 0x7e952d23d2a8443 +640, 0xbb2fa5e0704b3871 +641, 0xe5642199be36e2d5 +642, 0x5020b60d54358291 +643, 0xa0b6317ec3f60343 +644, 0xb57b08b99540bc5c +645, 0x21f1890adc997a88 +646, 0xfcf824200dd9da2d +647, 0x8146293d83d425d1 +648, 0xdadfbf5fbb99d420 +649, 0x1eb9bbc5e6482b7d +650, 0xd40ff44f1bbd0f1c +651, 0xa9f948ba2d08afa5 +652, 0x638cc07c5301e601 +653, 0x1f984baa606e14e8 +654, 0x44e153671081f398 +655, 0xb17882eeb1d77a5d +656, 0x5fd8dbee995f14c +657, 0xff3533e87f81b7fe +658, 0x2f44124293c49795 +659, 0x3bf6b51e9360248 +660, 0x72d615edf1436371 +661, 0x8fc5cf4a38adab9d +662, 0xfa517e9022078374 +663, 0xf356733f3e26f4d8 +664, 0x20ea099cdc6aad40 +665, 0xe15b977deb37637d +666, 0xcc85601b89dae88d +667, 0x5768c62f8dd4905c +668, 0xa43cc632b4e56ea +669, 0xc4240cf980e82458 +670, 0xb194e8ffb4b3eeb6 +671, 0xee753cf2219c5fa1 +672, 0xfe2500192181d44d +673, 0x2d03d7d6493dd821 +674, 0xff0e787bb98e7f9b +675, 0xa05cf8d3bd810ce7 +676, 0x718d5d6dcbbdcd65 +677, 0x8d0b5343a06931c +678, 0xae3a00a932e7eaf9 +679, 0x7ed3d8f18f983e18 +680, 0x3bb778ee466dc143 +681, 0x711c685c4e9062c0 +682, 0x104c3af5d7ac9834 +683, 0x17bdbb671fb5d5cf +684, 0xabf26caead4d2292 +685, 0xa45f02866467c005 +686, 0xf3769a32dc945d2d +687, 0xe78d0007f6aabb66 +688, 0x34b60be4acbd8d4b +689, 0x58c0b04b69359084 +690, 0x3a8bb354c212b1 +691, 0x6b82a8f3d70058d5 +692, 0x405bdef80a276a4a +693, 0xe20ca40ee9195cad +694, 0xf5dd96ba2446fefd +695, 0xc1e180c55fe55e3c +696, 0xa329caf6daa952b3 +697, 0xb4809dd0c84a6b0a +698, 0xd27f82661070cee7 +699, 0xa7121f15ee2b0d8a +700, 0x4bdaea70d6b34583 +701, 0xe821dc2f310f7a49 +702, 0x4c00a5a68e76f647 +703, 0x331065b064a2d5ea +704, 0xac0c2ce3dc04fa37 +705, 0x56b32b37b8229008 +706, 0xe757cdb51534fcfa +707, 0xd3ff183576b2fad7 +708, 0x179e1f4190f197a7 +709, 0xf874c626a7c9aae5 +710, 0xd58514ffc37c80e4 +711, 0xc65de31d33fa7fd3 +712, 0x6f6637052025769b +713, 0xca1c6bdadb519cc0 +714, 0xd1f3534cde37828a +715, 0xc858c339eee4830a +716, 0x2371eacc215e02f4 +717, 0x84e5022db85bbbe9 +718, 0x5f71c50bba48610e +719, 0xe420192dad9c323f +720, 0x2889342721fca003 +721, 0x83e64f63334f501d +722, 0xac2617172953f2c +723, 0xfa1f78d8433938ff +724, 0x5578382760051462 +725, 0x375d7a2e3b90af16 +726, 0xb93ff44e6c07552d +727, 0xded1d5ad811e818c +728, 0x7cf256b3b29e3a8c +729, 0x78d581b8e7bf95e8 +730, 0x5b69192f2caa6ad3 +731, 0xa9e25855a52de3ce +732, 0x69d8e8fc45cc188d +733, 0x5dd012c139ad347d +734, 0xfcb01c07b77db606 +735, 0x56253e36ab3d1cce +736, 0x1181edbb3ea2192 +737, 0x325bef47ff19a08d +738, 0xd3e231ceb27e5f7 +739, 0x8e819dd2de7956d2 +740, 0x34a9689fe6f84a51 +741, 0x3e4eeb719a9c2927 +742, 0x5c3b3440581d0aaf +743, 0x57caf51897d7c920 +744, 0xec6a458130464b40 +745, 0xe98f044e0da40e9b +746, 0xbe38662020eeb8e7 +747, 0x7b8c407c632724ae +748, 0x16c7cfa97b33a544 +749, 0xd23359e2e978ae5a +750, 0x4fdba458250933dd +751, 0x3c9e0713cfe616ba +752, 0x6f0df87b13163b42 +753, 0xc460902cb852cc97 +754, 0x289df8fefd6b0bce +755, 0x4ac2a2a1c3fb8029 +756, 0x2fc3e24d8b68eef7 +757, 0x34564386a59aab9a +758, 0x31047391ebd67ce4 +759, 0x6c23d070a0564d41 +760, 0xba6387b2b72545f7 +761, 0xcdcf1008058387af +762, 0xc9308fa98db05192 +763, 0xdbdbb5abd01a9d84 +764, 0x937088275c7804ab +765, 0x6f6accfefe34ee81 +766, 0x5c33c74c49cfdb2c +767, 0x5e1a771edfb92bd3 +768, 0x6e89b009069ecae7 +769, 0x34d64e17ec0e8968 +770, 0x841203d0cde0c330 +771, 0x7642cc9d7eb9e9cb +772, 0xca01d2e8c128b97e +773, 0x5b8390617b3304ab +774, 0x52ec4ed10de1eb2d +775, 0xb90f288b9616f237 +776, 0x5bd43cd49617b2e2 +777, 0x1a53e21d25230596 +778, 0x36ccd15207a21cd6 +779, 0xc8263d780618fd3c +780, 0x6eb520598c6ce1cb +781, 0x493c99a3b341564f +782, 0xab999e9c5aa8764f +783, 0xab2fa4ceaba84b +784, 0xbbd2f17e5cb2331b +785, 0xc8b4d377c0cc4e81 +786, 0x31f71a6e165c4b1e +787, 0xd1011e55fb3addaa +788, 0x5f7ec34728dfa59 +789, 0x2aef59e60a84eb0f +790, 0x5dde6f09aec9ad5f +791, 0x968c6cdbc0ef0438 +792, 0x1957133afa15b13a +793, 0xbaf28f27573a64c2 +794, 0xc6f6ddd543ebf862 +795, 0xdd7534315ec9ae1e +796, 0xd2b80cd2758dd3b +797, 0xa38c3da00cc81538 +798, 0x15c95b82d3f9b0f9 +799, 0x6704930287ce2571 +800, 0x9c40cc2f6f4ecb0c +801, 0xc8de91f50b22e94e +802, 0x39272e8fddbfdf0a +803, 0x879e0aa810a117d +804, 0xa312fff4e9e5f3bd +805, 0x10dd747f2835dfec +806, 0xeb8466db7171cdae +807, 0xaa808d87b9ad040a +808, 0xab4d2229a329243a +809, 0x7c622f70d46f789c +810, 0x5d41cef5965b2a8e +811, 0xce97ec4702410d99 +812, 0x5beba2812c91211b +813, 0xf134b46c93a3fec7 +814, 0x76401d5630127226 +815, 0xc55fc9d9eacd4ec1 +816, 0xaec8cefaa12f813f +817, 0x2f845dcfd7b00722 +818, 0x3380ab4c20885921 +819, 0xdb68ad2597691b74 +820, 0x8a7e4951455f563f +821, 0x2372d007ed761c53 +822, 0xcab691907714c4f1 +823, 0x16bc31d6f3abec1a +824, 0x7dff639fbcf1824 +825, 0x6666985fbcff543d +826, 0xb618948e3d8e6d0c +827, 0x77b87837c794e068 +828, 0xcd48288d54fcb5a8 +829, 0x47a773ed6ae30dc3 +830, 0xba85ae44e203c942 +831, 0xa7a7b21791a25b2d +832, 0x4029dd92e63f19e0 +833, 0xc2ad66ab85e7d5aa +834, 0xa0f237c96fdab0db +835, 0xffefb0ab1ca18ed +836, 0x90cb4500785fd7d5 +837, 0xa7dd3120f4876435 +838, 0x53f7872624694300 +839, 0xea111326ff0040d9 +840, 0x5f83cb4cce40c83b +841, 0x918e04936c3b504d +842, 0x87a8db4c0e15e87c +843, 0x7cff39da6a0dedd0 +844, 0x36f7de2037f85381 +845, 0xd1d8d94022a1e9a7 +846, 0x2c9930127dc33ec9 +847, 0x6cb4719dcd0101c6 +848, 0xc01868cde76935f7 +849, 0x6b86f2ec1ab50143 +850, 0x68af607d8d94ae61 +851, 0xe216c5b95feedf34 +852, 0x4b866bd91efe2e4b +853, 0x4bff79df08f92c99 +854, 0x6ff664ea806acfd1 +855, 0x7fce0b3f9ece39bc +856, 0x29bc90b59cb3db97 +857, 0x833c4b419198607d +858, 0xf3573e36ca4d4768 +859, 0x50d71c0a3c2a3fa8 +860, 0xd754591aea2017e7 +861, 0x3f9126f1ee1ebf3 +862, 0xe775d7f4b1e43de8 +863, 0xe93d51628c263060 +864, 0x83e77f6fb32d6d82 +865, 0x43dd7eef823408e4 +866, 0x1c843c2c90180662 +867, 0xe924dafb9a16066b +868, 0x6af3ee96e7b7fbd9 +869, 0x94d5c4f37befcd1f +870, 0x40ffb04bedef4236 +871, 0x71c17bbc20e553e +872, 0x101f7a0a6208729f +873, 0x5ca34570cf923548 +874, 0x8e3139db2e96e814 +875, 0x3ab96d96263d048d +876, 0x97f3c0bbc6755c3c +877, 0x31fc72daedaef3dc +878, 0x71f8d7855d10789b +879, 0xce6dc97b4662333b +880, 0xfddc2aabd342bc61 +881, 0xefbd4007ff8c7d2e +882, 0xf72cd6c689ef8758 +883, 0x932c8b0c0e755137 +884, 0x94cc4dedd58ff69 +885, 0xde4dfd6890535979 +886, 0xdb00dcd2dcb4a50a +887, 0xb0466240b4548107 +888, 0x9cb9264c7b90d1a3 +889, 0x357e378e9be5766b +890, 0x6e0316ef03367bbf +891, 0x201ea18839544ca +892, 0x803ff3406be5f338 +893, 0xf9d5e82fd4144bb2 +894, 0x1b6b88ca701e9f47 +895, 0xd1fe5ab8e1f89cc0 +896, 0x14171fe176c4bece +897, 0x887948bdef78beaa +898, 0x80449ddc3eb9b977 +899, 0x5f4e1f900fb4bcf3 +900, 0xbe30f8701909f8e2 +901, 0xd1f2a2fb5503306d +902, 0x6b1c77238dc23803 +903, 0x102156a6c9860f66 +904, 0x4cd446e099edf4c1 +905, 0xc79ac6cbc911f33b +906, 0x3ee096ffe3384f1c +907, 0xb58f83b18a306dc7 +908, 0x9f76582141de56b2 +909, 0x9ddfa85e02c13866 +910, 0x4d9a19d4ce90a543 +911, 0xbf81ab39fd17d376 +912, 0x5327e5054c6a74f1 +913, 0xd5062dd31db1a9b7 +914, 0x645853735527edc +915, 0x485393967f91af08 +916, 0xeff9667dcf77ca68 +917, 0xd012313f5fbec464 +918, 0xbeae35bdfae55144 +919, 0x302c41ebac8444a0 +920, 0x9ccdb6c2fe58fba8 +921, 0x567753af68ed23f8 +922, 0xff90f790e43efec3 +923, 0x970cc756fb799696 +924, 0xe59239d1c44915 +925, 0x4d2d189fb3941f05 +926, 0x96f23085db165a9c +927, 0xa1202dec7a37b1a5 +928, 0xc0c1ee74bcd7dc1a +929, 0x9edcf2048b30333a +930, 0xd848588ba7e865fb +931, 0x8d9f0897317cab40 +932, 0x67b96f15e25924fb +933, 0xefc8d8536619ee42 +934, 0xf3f621d22bdde0c2 +935, 0x68610a0de862ae32 +936, 0xa22ca5142de24cbd +937, 0x8815452f4e6b4801 +938, 0x4e9c1b607b2750e5 +939, 0x19b3c09ba6fc9b25 +940, 0x9b2543c8836780ac +941, 0xe702b8f950e56431 +942, 0xb357cc329cac3917 +943, 0x387bf86a17a31e08 +944, 0x9940b983d331b163 +945, 0xf5d89d7fe9095e18 +946, 0x4362682329e5c4d1 +947, 0xd2132573f6ae7b42 +948, 0xc0a5849e23a61606 +949, 0xdadbddf47265bc02 +950, 0x1b96f00339a705f7 +951, 0x94e6642329288913 +952, 0x825ab3f10e6d330b +953, 0x1a1c31ac9d883ea0 +954, 0xb49076b7155c6f47 +955, 0x920cf3085dfe3ccb +956, 0x9743407c9f28e825 +957, 0x6ce8a28622402719 +958, 0xce2fe67e06baf8a6 +959, 0x3a16b34784ecf5e6 +960, 0x140467cc1d162a0c +961, 0x32d4772692ab625 +962, 0xa4f4b28562f43336 +963, 0x885b4335457bd84a +964, 0x499d3ed26c87ad8a +965, 0xc7328bcedb9a545e +966, 0xc6dd76a6cbf5d2b2 +967, 0xba9c22be404ee1aa +968, 0x70e6aee45f23521d +969, 0x61e03a798593c177 +970, 0x171671f809c68213 +971, 0x28d54872fc1d914c +972, 0x43c2fcd9bd098b53 +973, 0x172ad4c4a98b9d37 +974, 0x330860c9460f2516 +975, 0x49547f472df984f4 +976, 0x873b2436d3f0e114 +977, 0x6f99accf4ea050b6 +978, 0x5968ac874ed51613 +979, 0x4939d70d29a3c611 +980, 0x11f381ed28738d3d +981, 0xa97430d36ab3a869 +982, 0xe6fa880801129e22 +983, 0xf84decbd8f48c913 +984, 0x4425c0ed1e9a82a5 +985, 0x7a1f9485e9929d5a +986, 0xc7c51f155dfce1c6 +987, 0x9619a39501d74f2b +988, 0x7c7035955dbf4c1b +989, 0xc61ee569cf57c2c9 +990, 0x3eaf7c5b0df734e1 +991, 0xe71cb4064d1ede05 +992, 0x356e3cec80e418b2 +993, 0xca04306243a15be6 +994, 0x941cf3881fa18896 +995, 0x30dbb0e819d644e0 +996, 0xaae22c0bef02859a +997, 0x7bd30917bbaa8a94 +998, 0x2672547bc8d7d329 +999, 0x4955c92aaa231578 diff --git a/numpy/random/tests/data/pcg64dxsm-testset-2.csv b/numpy/random/tests/data/pcg64dxsm-testset-2.csv new file mode 100644 index 000000000000..878c5ea7c3a5 --- /dev/null +++ b/numpy/random/tests/data/pcg64dxsm-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xd97e4a147f788a70 +1, 0x8dfa7bce56e3a253 +2, 0x13556ed9f53d3c10 +3, 0x55dbf1c241341e98 +4, 0xa2cd98f722eb0e0a +5, 0x83dfc407203ade8 +6, 0xeaa083df518f030d +7, 0x44968c87e432852b +8, 0x573107b9cb8d9ecc +9, 0x9eedd1da50b9daca +10, 0xb33a6735ca451e3c +11, 0x72830d2b39677262 +12, 0x9da8c512fd0207e8 +13, 0x1fc5c91954a2672b +14, 0xd33479437116e08 +15, 0x9ccdd9390cee46f3 +16, 0x1fd39bb01acd9e76 +17, 0xedc1869a42ff7fe5 +18, 0xbd68ca0b42a6e7e9 +19, 0x620b67df09621b1f +20, 0xfa11d51bd6950221 +21, 0xc8c45b36e7d28d08 +22, 0xe9c91272fbaad777 +23, 0x2dc87a143f220e90 +24, 0x6376a7c82361f49d +25, 0x552c5e434232fe75 +26, 0x468f7f872ac195bc +27, 0x32bed6858125cf89 +28, 0xe4f06111494d09d3 +29, 0xa5c166ffea248b80 +30, 0x4e26605b97064a3f +31, 0xceafd9f6fc5569d +32, 0xb772f2f9eed9e106 +33, 0x672c65e6a93534e2 +34, 0xcdc5e1a28d1bd6a0 +35, 0x1ed9c96daeebd3e3 +36, 0x4d189dcfc0c93c3f +37, 0x50df5a95c62f4b43 +38, 0xcccf4949fa65bbb8 +39, 0x19b8073d53cdc984 +40, 0x6fb40bba35483703 +41, 0xb02de4aef86b515a +42, 0x4d90c63655350310 +43, 0xea44e4089825b16c +44, 0x8d676958b1f9da2b +45, 0x6d313940917ae195 +46, 0x1b1d35a4c1dd19f4 +47, 0x117720f8397337ef +48, 0xcc073cf3ac11eeaa +49, 0x8331ec58a9ff8acb +50, 0xf3dc2a308b6b866f +51, 0x7eba1202663382b6 +52, 0x8269839debeb4e5a +53, 0x87fd3dc0f9181a8e +54, 0xabe62ddd3c925f03 +55, 0x7f56f146944fe8d4 +56, 0xc535972150852068 +57, 0x60b252d453bd3a68 +58, 0x4251f0134634490a +59, 0x338950da210dfeb2 +60, 0xcadfe932971c9471 +61, 0xfb7049457fab470e +62, 0x9bfb8145a4459dff +63, 0x4a89dda3898f9d8a +64, 0x88cc560151483929 +65, 0x277dc820f4b6796e +66, 0x3524bd07ea0afb88 +67, 0x92eb6ffb2bf14311 +68, 0xf6559be0783f3fe9 +69, 0xf0844f9af54af00d +70, 0xdd5e0b59adcef8a +71, 0x4ff7e4f2ab18554c +72, 0x3fa22c8a02634587 +73, 0x1db8e1a9442fe300 +74, 0x40cf15953ad3d3e7 +75, 0x92af15fe1a9f6f0a +76, 0xab4a0e466fb0cfd +77, 0x944f1555a06cca82 +78, 0x10cf48412f1f6066 +79, 0x7f51f9a455f9e8e1 +80, 0x47ee93530f024c7e +81, 0x36cf2f0413e0f6f2 +82, 0xa315e23731969407 +83, 0xd8e2796327cf5f87 +84, 0xa86072696a555c34 +85, 0xee3f0b8804feaab7 +86, 0x41e80dc858f8360b +87, 0x31ec2e9b78f5b29 +88, 0xd397fb9b8561344c +89, 0x28081e724e649b74 +90, 0x5c135fc3fc672348 +91, 0x9a276ca70ce9caa0 +92, 0x9216da059229050a +93, 0xcf7d375ed68007b0 +94, 0xa68ad1963724a770 +95, 0xd4350de8d3b6787c +96, 0xee7d2c2cc275b6d2 +97, 0x71645ec738749735 +98, 0x45abdf8c68d33dbb +99, 0xe71cadb692c705ea +100, 0x60af6f061fd90622 +101, 0x1eabe2072632c99d +102, 0x947dda995a402cb6 +103, 0xbb19f49a3454f3b +104, 0xe6e43e907407758c +105, 0xfe2b67016bd6873a +106, 0x7fdb4dd8ab30a722 +107, 0x39d3265b0ff1a45b +108, 0xed24c0e4fce8d0c2 +109, 0xf6e074f86faf669d +110, 0x9142040df8dc2a79 +111, 0x9682ab16bc939a9c +112, 0x6a4e80c378d971c8 +113, 0x31309c2c7fc2d3d6 +114, 0xb7237ec682993339 +115, 0x6a30c06bb83dccd9 +116, 0x21c8e9b6d8e7c382 +117, 0x258a24ae6f086a19 +118, 0xb76edb5be7df5c35 +119, 0x3c11d7d5c16e7175 +120, 0xbdfc34c31eff66e1 +121, 0x8af66e44be8bf3a2 +122, 0x3053292e193dec28 +123, 0xd0cc44545b454995 +124, 0x408ac01a9289d56 +125, 0x4e02d34318ec2e85 +126, 0x9413ff3777c6eb6b +127, 0xa3a301f8e37eb3df +128, 0x14e6306bd8d8f9f9 +129, 0xd3ea06ce16c4a653 +130, 0x170abe5429122982 +131, 0x7f9e6fddc6cacb85 +132, 0xa41b93e10a10a4c8 +133, 0x239216f9d5b6d0b5 +134, 0x985fcb6cb4190d98 +135, 0xb45e3e7c68f480c6 +136, 0xc1b2fc2e0446211c +137, 0x4596adb28858c498 +138, 0x2dd706f3458ddc75 +139, 0x29c988c86f75464 +140, 0xac33a65aa679a60 +141, 0xa28fef762d39d938 +142, 0x541e6fa48647f53 +143, 0x27838d56b2649735 +144, 0x8e143d318a796212 +145, 0xaea6097745f586b8 +146, 0x636143330f8ee2e6 +147, 0xc2d05fd8b945b172 +148, 0x6e355f9eb4353055 +149, 0xeb64ca42e8bf282e +150, 0xe8202dfd9da0fe5 +151, 0x7305689c9d790cba +152, 0xf122f8b1bef32970 +153, 0x9562887e38c32ba5 +154, 0xf9cd9be121b738d +155, 0x6238e0c398307913 +156, 0x5f2e79bb07c30f47 +157, 0x8ce8e45c465006e +158, 0x39281fe1e99e2441 +159, 0xafb10c2ca2874fea +160, 0x6e52f91633f83cf +161, 0x8ff12c1ac73c4494 +162, 0xe48608a09365af59 +163, 0xefd9bbc7e76e6a33 +164, 0xbe16a39d5c38ec92 +165, 0x6a6ffbcaf5a2330f +166, 0xdd5d6ac7d998d43d +167, 0x207bf978226d4f11 +168, 0xf8eec56bd2a0f62e +169, 0xa5bccf05dce0d975 +170, 0x93cf3ec1afe457a6 +171, 0x38651466d201f736 +172, 0x3ad21473985c9184 +173, 0xc6407a3bd38c92a6 +174, 0xb1ec42c7afa90a25 +175, 0xbdeca984df8b7dd3 +176, 0xb6926b1d00aa6c55 +177, 0x86141d0022352d49 +178, 0x169316256135ee09 +179, 0xffb1c7767af02a5c +180, 0x502af38ad19f5c91 +181, 0xfbf6cbc080086658 +182, 0x33cf9b219edae501 +183, 0x46e69bebd77b8862 +184, 0xf11e0cc91125d041 +185, 0xb4cd1649f85e078f +186, 0xb49be408db4e952 +187, 0xb0b8db46140cce3c +188, 0xba647f2174012be7 +189, 0x4f0a09e406970ac9 +190, 0xf868c7aec9890a5c +191, 0xde4c8fa7498ea090 +192, 0x872ceb197978c1d4 +193, 0x1eb5cd9c3269b258 +194, 0x3ea189f91724f014 +195, 0x41379656f7746f2c +196, 0x7bd18493aca60e51 +197, 0x5380c23b0cbbf15e +198, 0x920b72835f88246b +199, 0x24d7f734a4548b8e +200, 0x9944edb57e5aa145 +201, 0x4628e136ebb8afe1 +202, 0xb4ee6a776356e2a7 +203, 0x481cbe9744ccf7d7 +204, 0x7e8d67e8b0b995d9 +205, 0xeeacde100af7b47e +206, 0x103da08f2487dab7 +207, 0x6b9890a91d831459 +208, 0xd0c5beae37b572c7 +209, 0xfdccc371ee73fcc +210, 0x65438f0a367a2003 +211, 0x5d23b2c818a7e943 +212, 0x9a8ed45ac04b58b3 +213, 0xdaf3c3f1695dce10 +214, 0x5960eec706fa2bc0 +215, 0x98ca652facb80d40 +216, 0x72970ae5e2194143 +217, 0x18c6374d878c5c94 +218, 0x20fa51f997381900 +219, 0x3af253dba26d6e1d +220, 0x1b23d65db15c7f78 +221, 0x9f53ae976259b0e3 +222, 0x9a6addb28dc92d49 +223, 0x1e085c4accd0a7d7 +224, 0xe9d3f4cc9bad6ce5 +225, 0xe018fad78b5b1059 +226, 0x5ef7682232b4b95 +227, 0xb2242aa649f5de80 +228, 0x8f3e6d8dd99b9e4e +229, 0xb9be6cc22949d62a +230, 0xecbdc7beaa5ff1fe +231, 0xd388db43a855bdf0 +232, 0xd71ee3238852568d +233, 0x85ab3056304c04b5 +234, 0x2ed7ae7ad3cfc3cb +235, 0x781d1b03d40b6c48 +236, 0x7d3c740886657e6d +237, 0x982cfa6828daa6b0 +238, 0x278579599c529464 +239, 0x773adecfae9f0e08 +240, 0x63a243ea4b85c5d7 +241, 0x59940074fc3709e1 +242, 0xc914a2eed58a6363 +243, 0x2602b04274dd724c +244, 0xdf636eb7636c2c42 +245, 0x891a334d0d26c547 +246, 0xde8cd586d499e22d +247, 0x3ea1aa4d9b7035b6 +248, 0xd085cff6f9501523 +249, 0xe82a872f374959e +250, 0x55cb495bbd42cc53 +251, 0x5f42b3226e56ca97 +252, 0xea463f6f203493a3 +253, 0xeef3718e57731737 +254, 0x1bd4f9d62b7f9f3c +255, 0x19284f5e74817511 +256, 0xaf6e842c7450ca87 +257, 0x1d27d2b08a6b3600 +258, 0xfb4b912b396a52e3 +259, 0x30804d4c5c710121 +260, 0x4907e82564e36338 +261, 0x6441cf3b2900ddb7 +262, 0xd76de6f51988dc66 +263, 0x4f298ef96fd5e6d2 +264, 0x65432960c009f83d +265, 0x65ebed07e1d2e3df +266, 0xf83ee8078febca20 +267, 0x7bb18e9d74fc5b29 +268, 0x597b5fbc2261d91 +269, 0xea4f8ed0732b15b2 +270, 0xba2267f74f458268 +271, 0x3f304acabd746bbb +272, 0x7bd187af85659a82 +273, 0x88e20dbdb7a08ea3 +274, 0x2a2dc948c772fcb4 +275, 0x87784fec2993c867 +276, 0x89163933cd362d4e +277, 0xfd7b24f04302f957 +278, 0x9bdd544405dfb153 +279, 0xddee0fac58ffc611 +280, 0xa8e8993417e71ec1 +281, 0x55e0ab46ff7757af +282, 0x53e7645f08d3d7df +283, 0xbf78e563bc656ba2 +284, 0x1d162253b45ee2de +285, 0x15e2bfefedf29eb4 +286, 0x4e2a4584aa394702 +287, 0xa89fb12b01525897 +288, 0x825bd98f0544e4df +289, 0xfc6c50da6750700 +290, 0xc24aaabde7d28423 +291, 0x79d6f4660fcb19e5 +292, 0xee7d4fb40c8d659f +293, 0x70bc281b462e811d +294, 0x23ed4dc9636519a7 +295, 0xcb7c3f5a5711b935 +296, 0xe73090e0508c5d9d +297, 0xb25a331f375952a6 +298, 0xa64c86e0c04740f6 +299, 0xb8f3ffc8d56ac124 +300, 0x2479266fc5ee6b15 +301, 0x8d5792d27f5ffbcb +302, 0xb064298be946cd52 +303, 0xf0934a98912ffe26 +304, 0xbe805682c6634d98 +305, 0xe0e6e2c010012b4f +306, 0x58c47d475f75976 +307, 0x358c9a6e646b2b4a +308, 0x7e7c4ffca5b17ba7 +309, 0x43585c8c9a24a04c +310, 0x5154ddbcd68d5c2c +311, 0x4a2b062d3742a5e +312, 0xca5691191da2b946 +313, 0x696a542109457466 +314, 0x9eb5d658a5022ba5 +315, 0x8158cf6b599ab8dc +316, 0x1b95391eaa4af4a6 +317, 0x9953e79bd0fc3107 +318, 0x8639690086748123 +319, 0x2d35781c287c6842 +320, 0x393ef0001cd7bc8f +321, 0xe3a61be8c5f2c22a +322, 0x5e4ff21b847cc29b +323, 0x4c9c9389a370eb84 +324, 0xd43a25a8fc3635fa +325, 0xf6790e4a85385508 +326, 0x37edf0c81cb95e1d +327, 0x52db00d6e6e79af8 +328, 0x3b202bceeb7f096 +329, 0x2a164a1c776136bb +330, 0x73e03ee3fd80fd1b +331, 0xd2c58c0746b8d858 +332, 0x2ed2cb0038153d22 +333, 0x98996d0fc8ceeacc +334, 0xa4ed0589936b37f +335, 0x5f61cf41a6d2c172 +336, 0xa6d4afb538c110d7 +337, 0xe85834541baadf1a +338, 0x4c8967107fd49212 +339, 0x49bafb762ab1a8c1 +340, 0x45d540e2a834bf17 +341, 0x1c0ec8b4ed671dac +342, 0x3d503ce2c83fe883 +343, 0x437bfffd95f42022 +344, 0xc82d1e3d5c2bc8d2 +345, 0x7a0a9cbfcb0d3f24 +346, 0xc0a4f00251b7a3be +347, 0xb5be24e74bb6a1c6 +348, 0xa3104b94b57545b1 +349, 0x86de7d0c4b97b361 +350, 0x879c1483f26538a6 +351, 0xd74c87557f6accfb +352, 0x2f9be40dbf0fe8a1 +353, 0x445a93398f608d89 +354, 0x7b3cb8a7211d7fdc +355, 0xe86cc51290d031e7 +356, 0x33ef3594052ad79f +357, 0xc61911d241dbb590 +358, 0x37cccb0c0e3de461 +359, 0xb75259124080b48b +360, 0xd81e8961beb4abe5 +361, 0xf4542deb84a754e +362, 0x6ea036d00385f02e +363, 0xa7b60b0ac3b88681 +364, 0x108a6c36ca30baf5 +365, 0x4a2adc5bbfe2bf07 +366, 0x4079501f892a5342 +367, 0x55e113963c5448f0 +368, 0x8019ff4903b37242 +369, 0x109c6dcdb7ec6618 +370, 0x1239ac50944da450 +371, 0xe1399c7f94c651c1 +372, 0x5a6bbbae388d365a +373, 0x4d72be57b8810929 +374, 0x3f067df24384e1fb +375, 0x4f8b9e0f7f6c7be +376, 0x202492c342a3b08 +377, 0x250753192af93a3 +378, 0xfba1159d9de2cb8e +379, 0xba964497ab05505c +380, 0x1329ec5d8a709dca +381, 0x32927cacb6cd22bb +382, 0x6b4d7db904187d56 +383, 0xe76adccf8e841e02 +384, 0x8c4bf4b6a788202 +385, 0x3013a3b409831651 +386, 0x7427d125c475412f +387, 0x84dcc4bb2bf43202 +388, 0x117526f1101372a5 +389, 0xfe95d64b8984bd72 +390, 0x524e129934cc55c1 +391, 0xc3db4b0418c36d30 +392, 0xe1cb2047e9c19f7a +393, 0xea43d6c8d8982795 +394, 0xe80ac8a37df89ed +395, 0xfecc2104329ed306 +396, 0xa5c38aac9c1d51ea +397, 0x3abe5d1c01e4fe17 +398, 0x717a805d97fcc7ac +399, 0x94441f8207a1fb78 +400, 0x22d7869c5f002607 +401, 0x349e899f28c3a1b9 +402, 0x5639950cdea92b75 +403, 0x7e08450497c375b +404, 0x94bf898b475d211d +405, 0x75c761a402375104 +406, 0x1930920ec9d2a1e7 +407, 0xb774ba1bc6f6e4e2 +408, 0xf715602412e5d900 +409, 0x87bb995f4a13f0ba +410, 0xa3c787868dfa9c8d +411, 0xa17fd42a5a4f0987 +412, 0x4a9f7d435242b86 +413, 0x240364aff88f8aef +414, 0xe7cd4cf4bf39f144 +415, 0xd030f313ca4c2692 +416, 0xc46696f4e03ec1e9 +417, 0x22c60f1ec21060b3 +418, 0x16c88058fd68986f +419, 0x69ca448e8e6bde3f +420, 0x3466c2cdec218abd +421, 0x837ac4d05e6b117d +422, 0x911210e154690191 +423, 0x9ece851d6fa358b7 +424, 0x42f79cb0c45e7897 +425, 0xbf7583babd7c499b +426, 0x2059fe8031c6e0b9 +427, 0xabbec8fc00f7e51d +428, 0x88809d86a3a256e1 +429, 0xd36056df829fdcb5 +430, 0x515632b6cb914c64 +431, 0xba76d06c2558874 +432, 0x632c54ca4214d253 +433, 0xadec487adf2cb215 +434, 0x521e663e1940513d +435, 0xb1b638b548806694 +436, 0xbe2d5bfbe57d2c72 +437, 0x8b89e7719db02f7 +438, 0x90ba5281c1d56e63 +439, 0x899e1b92fceea102 +440, 0xf90d918e15182fa6 +441, 0x94a489ce96c948c4 +442, 0xad34db453517fcd4 +443, 0xc5264eb2de15930f +444, 0x101b4e6603a21cee +445, 0xef9b6258d6e85fff +446, 0x6075c7d6c048bd7a +447, 0x6f03232c64e438aa +448, 0x18c983d7105ee469 +449, 0x3ffc23f5c1375879 +450, 0xbc1b4a00afb1f9f +451, 0x5afa6b2bb8c6b46e +452, 0xe7fce4af2f2c152a +453, 0x5b00ab5c4b3982c7 +454, 0x2d4b0c9c0eb4bd0c +455, 0x61d926270642f1f2 +456, 0x7219c485c23a2377 +457, 0x7e471c752fecd895 +458, 0x23c4d30a4d17ba1f +459, 0x65cb277fe565ca22 +460, 0xcbb56ed9c701363b +461, 0xfd04ab3a6eba8282 +462, 0x19c9e5c8bab38500 +463, 0xea4c15227676b65b +464, 0x20f3412606c8da6f +465, 0xb06782d3bf61a239 +466, 0xf96e02d5276a9a31 +467, 0x835d256b42aa52a6 +468, 0x25b09151747f39c1 +469, 0x64507386e1103eda +470, 0x51cbc05716ef88e4 +471, 0x998cd9b7989e81cc +472, 0x9d7115416bec28d1 +473, 0xc992ca39de97906b +474, 0xd571e6f7ca598214 +475, 0xafc7fb6ccd9abbf8 +476, 0x88ef456febff7bf4 +477, 0xdbe87ccc55b157d2 +478, 0xaab95e405f8a4f6d +479, 0xad586a385e74af4f +480, 0x23cd15225c8485aa +481, 0x370940bf47900ac7 +482, 0xefd6afda1a4b0ead +483, 0x9cb1a4c90993dd7a +484, 0xff7893e8b2f70b11 +485, 0xb09e1807c0638e8e +486, 0xb10915dcb4978f74 +487, 0x88212ab0051a85eb +488, 0x7af41b76e1ec793f +489, 0x2e5c486406d3fefd +490, 0xebe54eff67f513cc +491, 0xab6c90d0876a79b8 +492, 0x224df82f93fe9089 +493, 0xc51c1ce053dc9cd2 +494, 0x5ef35a4d8a633ee7 +495, 0x4aca033459c2585f +496, 0xd066932c6eefb23d +497, 0x5309768aab9a7591 +498, 0xa2a3e33823df37f9 +499, 0xcec77ff6a359ee9 +500, 0x784dc62d999d3483 +501, 0x84e789fb8acc985d +502, 0xd590237e86aa60f +503, 0x737e2ffe1c8ad600 +504, 0xc019c3a39a99eab8 +505, 0x6a39e9836964c516 +506, 0xe0fe43129535d9da +507, 0xdfc5f603d639d4de +508, 0x7b9a7d048a9c03b6 +509, 0xbb5aa520faa27fdd +510, 0x2a09b4200f398fa2 +511, 0x38cc88107904064e +512, 0xa9a90d0b2d92bb25 +513, 0x9419762f87e987e3 +514, 0x1a52c525153dedcd +515, 0xc26d9973dd65ae99 +516, 0x8e89bd9d0dc6e6a1 +517, 0x2f30868dc01bfb53 +518, 0x20f09d99b46501c4 +519, 0x78b468a563b8f1e9 +520, 0xcccf34b0b6c380c7 +521, 0xf554e7dc815297e6 +522, 0x332a585cfb4a50ef +523, 0xa9fb64a2b6da41d7 +524, 0xdcd2a5a337391ce0 +525, 0x8a9bd3e324c6463d +526, 0x9f4487d725503bdd +527, 0xf72282d82f1d0ff +528, 0x308f4160abb72d42 +529, 0x648de1db3a601b08 +530, 0x36cab5192e7ebd39 +531, 0x7975fbe4ab6a1c66 +532, 0xd515b4d72243864e +533, 0x43a568f8b915e895 +534, 0x15fa9f2057bdb91d +535, 0x7a43858ef7a222dc +536, 0x17b4a9175ac074fe +537, 0xa932c833b8d0f8f8 +538, 0x1d2db93a9a587678 +539, 0x98abd1d146124d27 +540, 0xf0ab0431671740aa +541, 0xa9d182467540ad33 +542, 0x41c8a6cfc331b7fc +543, 0xa52c6bd0fcd1d228 +544, 0x2773c29a34dc6fa3 +545, 0x3098230746fc1f37 +546, 0xd63311bb4f23fabe +547, 0x6712bf530cd2faec +548, 0x342e8f342e42c4dd +549, 0xfbd83331851cdcad +550, 0xe903be1361bbc34d +551, 0xd94372e5077e3ef9 +552, 0x95aaa234f194bd8 +553, 0x20c0c8fb11e27538 +554, 0xfaf47dc90462b30b +555, 0x8ddc6d144147682a +556, 0xf626833fd926af55 +557, 0x5df93c34290d1793 +558, 0xb06a903e6e9fca5e +559, 0x10c792dc851d77ca +560, 0xd9b1b817b18e56cb +561, 0x3a81730c408eb408 +562, 0x65052c04a8d4b63c +563, 0x3328546598e33742 +564, 0xeca44a13f62d156d +565, 0x69f83d1d86b20170 +566, 0x937764200412027d +567, 0xc57eb1b58df0f191 +568, 0xa1c7d67dce81bc41 +569, 0x8e709c59a6a579ce +570, 0x776a2f5155d46c70 +571, 0xd92906fbbc373aa5 +572, 0xe97ad478a2a98bf6 +573, 0xc296c8819ac815f +574, 0x613ede67ba70e93e +575, 0xe145222498f99cde +576, 0xafcdfa7a3c1cf9bf +577, 0x1c89252176db670d +578, 0xad245eda5c0865ff +579, 0x249463d3053eb917 +580, 0xc9be16d337517c0b +581, 0xefcc82bf67b8f731 +582, 0x1e01577d029e0d00 +583, 0xad9c24b2a4f3d418 +584, 0xed2cceb510db4d0f +585, 0xbddadcdb92400c70 +586, 0x67d6b0476ef82186 +587, 0xbc7662ff7bf19f73 +588, 0x9d94452a729e6e92 +589, 0x6b278d8594f55428 +590, 0x6c4b31cceb1b2109 +591, 0xccc6c3a726701e9 +592, 0x6bc28ece07df8925 +593, 0xc0422b7bf150ccc4 +594, 0xab7158f044e73479 +595, 0xdf3347546d9ed83f +596, 0x3b3235a02c70dff4 +597, 0x2551c49c14ea8d77 +598, 0xee2f7f5bb3cc228e +599, 0x39b87bfe8c882d39 +600, 0x7dd420fad380b51c +601, 0xffe64976af093f96 +602, 0x4a4f48dc6e7eaa5f +603, 0x85f2514d32fdc8cc +604, 0x1ab1215fd7f94801 +605, 0x4cd1200fc795b774 +606, 0xcf8af463a38942ee +607, 0x319caa7ce3022721 +608, 0x8cd9798a76d1aea4 +609, 0x2bd3933ac7afd34e +610, 0x85d4c323403cf811 +611, 0xd7b956d3064efa30 +612, 0x67a078dbf1f13068 +613, 0x665fa6c83e87c290 +614, 0x9333ac2416d2469b +615, 0xdfb1fd21a0094977 +616, 0xa1962a6e2c25f8ff +617, 0x1f3b10a7ed5287cf +618, 0x70641efb3d362713 +619, 0xe527a2cf85d00918 +620, 0x9741e45d3f9890a3 +621, 0x6cb74b5d4d36db4b +622, 0xf24734d622bd2209 +623, 0xadd6d94f78e9d378 +624, 0xc3bbdb59225cca7f +625, 0x5ad36614275b30cd +626, 0x495568dd74eea434 +627, 0xf35de47e0ffe1f2d +628, 0xefa209dca719ab18 +629, 0x844ddcaeb5b99ae8 +630, 0x37449670a1dc7b19 +631, 0x5a4612c166f845c1 +632, 0xe70f7782f2087947 +633, 0x98d484deac365721 +634, 0x705302198cf52457 +635, 0x7135ae0f5b77df41 +636, 0x342ac6e44a9b6fc3 +637, 0x2713fd2a59af5826 +638, 0x6e1a3f90f84efa75 +639, 0x9fb3b4dd446ca040 +640, 0x530044ae91e6bd49 +641, 0xe984c4183974dc3e +642, 0x40c1fa961997d066 +643, 0xb7868250d8c21559 +644, 0x8bc929fa085fd1de +645, 0x7bdb63288dc8733e +646, 0xac4faad24326a468 +647, 0x1c6e799833aea0b1 +648, 0xcc8a749e94f20f36 +649, 0x4e7abfd0443547c5 +650, 0xb661c73bb8caa358 +651, 0x4a800f5728ff2351 +652, 0x8c15e15189b9f7ed +653, 0xab367846b811362c +654, 0x4ba7508f0851ca2a +655, 0xe9af891acbafc356 +656, 0xbdebe183989601f8 +657, 0x4c665ea496afc061 +658, 0x3ca1d14a5f2ed7c +659, 0xfbdff10a1027dd21 +660, 0xdfd28f77c8cff968 +661, 0xc4fbaadf8a3e9c77 +662, 0xdac7e448b218c589 +663, 0xb26390b5befd19e2 +664, 0xd2ef14916c66dba9 +665, 0xfab600284b0ff86b +666, 0xf04a1c229b58dabb +667, 0xc21c45637e452476 +668, 0xd1435966f75e0791 +669, 0xc1f28522eda4a2d0 +670, 0x52332ae8f1222185 +671, 0x81c6c0790c0bf47e +672, 0xfebd215e7d8ffb86 +673, 0x68c5dce55dbe962b +674, 0x231d09cb0d2531d1 +675, 0x3218fba199dbbc6b +676, 0x8f23c535f8ea0bf6 +677, 0x6c228963e1df8bd9 +678, 0x9843c7722ed153e3 +679, 0xd032d99e419bddec +680, 0xe2dca88aa7814cab +681, 0x4d53fb8c6a59cdc2 +682, 0x8fb3abc46157b68b +683, 0xa3e733087e09b8e +684, 0x6bdc1aee029d6b96 +685, 0x4089667a8906d65b +686, 0x8f3026a52d39dd03 +687, 0x6d2e0ccb567bae84 +688, 0x74bad450199e464 +689, 0xf114fb68a8f300d5 +690, 0xc7a5cc7b374c7d10 +691, 0xf0e93da639b279d1 +692, 0xb9943841ad493166 +693, 0x77a69290455a3664 +694, 0x41530da2ebea054b +695, 0xe8f9fab03ea24abf +696, 0xaa931f0c9f55a57a +697, 0xb4d68a75d56f97ae +698, 0x3d58ff898b6ba297 +699, 0x49d81e08faf5a3f5 +700, 0xfc5207b9f3697f3b +701, 0xa25911abb3cf19b7 +702, 0x6b8908eb67c3a41 +703, 0xd63ef402e2e3fa33 +704, 0x728e75d3f33b14c5 +705, 0x248cb1b8bc6f379a +706, 0x3aa3d6d2b8c72996 +707, 0x49cc50bd2d3d2860 +708, 0xb4e1387647c72075 +709, 0x435a1630a4a81ed3 +710, 0xa5ea13005d2460cf +711, 0xc7a613df37d159ec +712, 0x95721ccc218b857e +713, 0xd4b70d8c86b124d3 +714, 0x2b82bcc4b612d494 +715, 0xaf13062885276050 +716, 0xcbd8fcf571a33d9c +717, 0x3f7f67ca1125fc15 +718, 0xddf4bb45aac81b4c +719, 0x23606da62de9c040 +720, 0xa3a172375666b636 +721, 0x292f87387a6c6c3c +722, 0xd1d10d00c5496fe1 +723, 0x86b0411ce8a25550 +724, 0x38e0487872e33976 +725, 0x363e49f88ddfd42c +726, 0x45bdf1e9f6b66b0a +727, 0x8a6fff3de394f9b5 +728, 0x8502158bb03f6209 +729, 0x22e24d16dba42907 +730, 0x3fe3ba427cc2b779 +731, 0x77144793f66b3d7e +732, 0xcf8912ccb29b8af9 +733, 0xdc856caff2abd670 +734, 0xe6d3ae0b0d9d4c8b +735, 0xb8f5d40e454c539f +736, 0x79ca953114fbc6b7 +737, 0x478d6f4bbfa38837 +738, 0x9babae1a3ffdc340 +739, 0x40edd56802bae613 +740, 0x97a56c2dcccf0641 +741, 0xafc250257f027f8e +742, 0x8da41ef1edf69125 +743, 0x6574b0280ff9d309 +744, 0x197c776151b8f820 +745, 0x6b03e077c9dac3b6 +746, 0x24a40ebbc5c341c5 +747, 0x50e585169a6a1c4b +748, 0x37783a5a6a3e4e02 +749, 0xb3de81ee6fbad647 +750, 0xf4f292f57ca4591e +751, 0x6214e9e7d44d30a +752, 0x5920190c56d21c12 +753, 0x9ac163419b5e0c9b +754, 0xfc2328761ae8ed93 +755, 0xc68f945b545508c6 +756, 0x687c49a17ce0a5e2 +757, 0x276d8f53d30d4ab4 +758, 0x8201804970343ce1 +759, 0x1b5d323cc2e7fb7e +760, 0x6f351ef04fd904b +761, 0x6c793a7d455d5198 +762, 0x46f5d108430ae91f +763, 0xac16a15b2a0cf77f +764, 0xa0d479d9e4122b9d +765, 0x3afd94604307f19 +766, 0x2573ed6d39d38dbf +767, 0xa58e14ba60b4294b +768, 0xe69c1aed5840d156 +769, 0x4cf6fda7f04855c2 +770, 0x2fb65a56ef5f22da +771, 0xf95819434d5dc220 +772, 0x29c65133623dafba +773, 0x8e997bd018467523 +774, 0xfd08ba9d498461a7 +775, 0xdd52243bc78a5592 +776, 0x39c30108f6db88b3 +777, 0x38af8e1894f259b9 +778, 0x97eedf3b4ae5f6de +779, 0x757825add80c5ece +780, 0xf0fdd90ac14edb14 +781, 0xbbb19d4cc8cac6d4 +782, 0x9a82234edfae05e3 +783, 0x704401c61d1edf1c +784, 0x8b0eb481fb3a1fb2 +785, 0xef6f36e7cc06c002 +786, 0x7a208b17e04b8cd7 +787, 0xf20e33d498838fe9 +788, 0xc2bdb22117058326 +789, 0x6ec31939eb4ca543 +790, 0x6f1654838f507a21 +791, 0xc65ab81a955d2b93 +792, 0x40b1420fdd9531b8 +793, 0xe31f221cab9f4f40 +794, 0x798cdd414c1deb7a +795, 0x9c84e9c7d41cd983 +796, 0x63d6b1ae3b60b7fa +797, 0xb42bfdd1a2f78ffa +798, 0x37e431eaccaaa8e9 +799, 0x7508142a0f73eac9 +800, 0x91662a023df5893a +801, 0x59782070e2fe3031 +802, 0xb2acd589a8ce7961 +803, 0xa224743fa877b292 +804, 0xaa5362aa27e6ed9e +805, 0xa394a4e520c0c1c7 +806, 0xe49b16d2018ffb6f +807, 0xb8074b9f2f1e762b +808, 0xcf5f86143d5c23a7 +809, 0xfd838785db987087 +810, 0x31b1889df389aff8 +811, 0x30aaca876a4383b +812, 0x1731bb71c4c38d4f +813, 0x9a83a65395e05458 +814, 0x99cd0c8d67c8f4fc +815, 0xfbd9fdc849b761a5 +816, 0x82c04834fc466889 +817, 0xdeef9d6e715e8c97 +818, 0x549c281c16da6078 +819, 0x2d70661254ad599d +820, 0x57995793a72acac +821, 0xf1727005116183ba +822, 0xa22bb38945285de3 +823, 0x4f2d687fe45131ff +824, 0x5666c87ddbbc981f +825, 0xbcb4b2d4e7a517d0 +826, 0x5e794dd2e20b785d +827, 0x449ad020149e093c +828, 0x7704ee0412d106f5 +829, 0x83cbdf257b072ac1 +830, 0xae5c4fc9f638b0da +831, 0x7b9e5a64e372ed47 +832, 0x7eddbbb22c2cdf57 +833, 0x3f19ebfa155b08e +834, 0x91d991154dfd7177 +835, 0x611ae74b952d387f +836, 0x3fdf7a335bda36ee +837, 0xdf182433fc7a7c05 +838, 0x62c78598d1f8db0a +839, 0xc3750c69d2c5c1f0 +840, 0xf1318024709efdee +841, 0xaa3fd360d224dc29 +842, 0x62af53b2f307c19 +843, 0xdf527683c58120c2 +844, 0x3281deecc496f93d +845, 0x4f704ad31527ef08 +846, 0x127a14a5e07cfdfc +847, 0x90d0b1f549255c92 +848, 0xbc3406b212c5e1fc +849, 0x4e89f39379dba91d +850, 0x1290ef43c4998e6e +851, 0xecfeb1a1cb1c6e1b +852, 0x2067e90403003bf1 +853, 0x38ae04be30bdbeba +854, 0x8a3537f298baedda +855, 0xd07f3b825cdb2936 +856, 0xea020b5aebae8b45 +857, 0xfcd614ab031132b0 +858, 0x5fb682a4ff2268f5 +859, 0xd1c4662ce65596f4 +860, 0x7026b8270dd0b8dc +861, 0x8101ec4b4beae45a +862, 0xa0e9dc87940610a6 +863, 0x83ec33679d83165b +864, 0x981847ca82e86d41 +865, 0xda84c188a304a0b7 +866, 0x3c37529c5a5bbbb8 +867, 0x34a8491ce3e19a5a +868, 0xd36ad716a2fa6cb8 +869, 0xfd1d1d6a5189a15c +870, 0x9716eb47851e8d8d +871, 0x7dfb13ea3b15c5aa +872, 0xbdf6e707f45113a5 +873, 0xb8118261b04bd097 +874, 0x6191f9895881bec6 +875, 0x7aac257ae11acf9b +876, 0x35a491e1537ff120 +877, 0xe078943432efa71c +878, 0xb3338485dd3dc2b9 +879, 0x456060975d2bb3b5 +880, 0xaddc4c451bdfc44c +881, 0x18bfa7beacf96430 +882, 0x8802ebcaf0f67498 +883, 0xad922a5a825bd780 +884, 0x9fb4587d748f4efa +885, 0xdb2a445136cd5e7 +886, 0xb98b3676ea8e96ac +887, 0xb02d8d244d784878 +888, 0xa1a8442b18860abb +889, 0x6a3029ba1361e5d1 +890, 0xf426d5fac161eb1 +891, 0xfa5ac2b87acecb23 +892, 0xaa659896e50535df +893, 0xf40dd7a3d3c5c8ed +894, 0x3f8367abecb705bc +895, 0x2d60e7525873358f +896, 0xc4a9d3948a0c3937 +897, 0x5ecc04fef6003909 +898, 0x7a865004918cba2 +899, 0x47ae110a678ec10b +900, 0xa0f02f629d91aa67 +901, 0x4848b99e7fac9347 +902, 0xaa858346d63b80ac +903, 0xeb5bf42ee161eeef +904, 0x4d35d723d3c6ba37 +905, 0xdf22ca6ca93b64a7 +906, 0x9d198520f97b25b1 +907, 0x3068415350778efe +908, 0xf3709f2e8793c2fe +909, 0xd1517bac8dd9f16f +910, 0xfb99bccaa15861dc +911, 0xa9ad607d796a2521 +912, 0x55d3793d36bd22e4 +913, 0xf99270d891ff7401 +914, 0x401750a5c4aa8238 +915, 0xd84b3003e6f28309 +916, 0x8a23798b5fa7c98b +917, 0xadd58bbc8f43e399 +918, 0xbd8c741ada62c6a8 +919, 0xbdc6937bc55b49fa +920, 0x4aefa82201b8502 +921, 0x17adf29a717b303 +922, 0xa6ed2197be168f6c +923, 0x1ba47543f4359a95 +924, 0xe34299949ac01ae9 +925, 0x711c76cffc9b62f3 +926, 0xbac259895508a4b7 +927, 0x3c8b3b3626b0d900 +928, 0x1a8d23fbe2ae71bf +929, 0xca984fa3b5a5c3a1 +930, 0xb1986ab7521a9c93 +931, 0xd6b5b2c8d47a75b5 +932, 0xc7f1c4a88afb4957 +933, 0xdeb58033a3acd6cc +934, 0xabe49ddfe1167e67 +935, 0x8d559c10205c06e3 +936, 0xea07a1a7de67a651 +937, 0xcbef60db15b6fef8 +938, 0xbfca142cff280e7 +939, 0x362693eba0732221 +940, 0x7463237e134db103 +941, 0x45574ddb5035e17a +942, 0xfc65e0cb9b94a1aa +943, 0x3154c55f1d86b36d +944, 0x2d93a96dd6ab2d8b +945, 0xbe3bc1d1f2542a25 +946, 0xdd4b541f7385bdaa +947, 0x3b56b919d914e3f8 +948, 0x82fd51468a21895f +949, 0x8988cf120731b916 +950, 0xa06a61db5fb93e32 +951, 0x6ed66c1b36f68623 +952, 0x875ae844d2f01c59 +953, 0x17ccd7ac912e5925 +954, 0x12fe2a66b8e40cb1 +955, 0xf843e5e3923ad791 +956, 0xa17560f2fd4ef48 +957, 0x27a2968191a8ee07 +958, 0xa9aab4d22ff44a3c +959, 0x63cd0dcc3bb083ae +960, 0x7a30b48c6160bf85 +961, 0x956160fb572503b3 +962, 0xc47f6b7546640257 +963, 0xaf4b625f7f49153 +964, 0x2f5c86a790e0c7e8 +965, 0xb52e0610ae07f0b8 +966, 0x38a589292c3d849e +967, 0xc3e9ef655d30b4ef +968, 0xb5695f765cda998a +969, 0xde5d5e692a028e91 +970, 0x839476721555f72e +971, 0x48b20679b17d9ebf +972, 0xe3d4c6b2c26fb0df +973, 0xce5a9834f0b4e71f +974, 0x533abb253d5d420e +975, 0x9eac5ad9aed34627 +976, 0xc0f2a01ab3c90dbb +977, 0x6528eda93f6a066c +978, 0xc16a1b625e467ade +979, 0x1a4a320fb5e8b098 +980, 0x8819cccd8b4ab32f +981, 0x42daa88531fd0bfd +982, 0xcf732226409be17c +983, 0xfddcdb25ccbf378c +984, 0x9b15b603bf589fc1 +985, 0x2436066b95d366fe +986, 0x8d42eff2e9cbda90 +987, 0x694b2fc8a4e8303c +988, 0x8e207f98aaea3ccd +989, 0x4730d7a620f822d9 +990, 0x468dc9ca30fe2fd4 +991, 0x74b36d8a1c0f031b +992, 0x3c1aac1c488c1a94 +993, 0x19d0101042444585 +994, 0x8ec50c56d0c8adf4 +995, 0x721ec629e4d66394 +996, 0x3ca5ad93abeac4a4 +997, 0xaaebc76e71592623 +998, 0x969cc319e3ed6058 +999, 0xc0a277e3b2bfc3de diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index d602b36b4169..29054b70b95a 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -8,8 +8,8 @@ import pytest from numpy.random import ( - Generator, MT19937, PCG64, Philox, RandomState, SeedSequence, SFC64, - default_rng + Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence, + SFC64, default_rng ) from numpy.random._common import interface @@ -359,6 +359,34 @@ def test_advance_symmetry(self): assert val_big == val_pos +class TestPCG64DXSM(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + class TestMT19937(Base): @classmethod def setup_class(cls): diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 909bfaa8dab6..9becc434d0d1 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -4,7 +4,7 @@ import numpy as np import pytest from numpy.testing import assert_equal, assert_, assert_array_equal -from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64) +from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64) @pytest.fixture(scope='module', params=(np.bool_, np.int8, np.int16, np.int32, np.int64, @@ -774,6 +774,18 @@ def setup_class(cls): cls._extra_setup() +class TestPCG64DXSM(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + class TestDefaultRNG(RNG): @classmethod def setup_class(cls): From 034aedc545c03417c0dae1b20c84098459c4b3a4 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 4 May 2021 08:00:48 +0200 Subject: [PATCH 1035/1270] BLD, BUG: Fix compiler optimization log AttributeError The error appears when option `build` is represented before `bdist_wheel`. --- numpy/distutils/command/build_clib.py | 8 ++++---- numpy/distutils/command/build_ext.py | 9 +++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index a4f49b00eaa2..e1f15d465e7b 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -132,12 +132,12 @@ def run(self): log.info("Detected changes on compiler optimizations, force rebuilding") self.force = True - import atexit - def report(): + def report(copt): log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") - log.info(self.compiler_opt.report(full=True)) + log.info(copt.report(full=True)) - atexit.register(report) + import atexit + atexit.register(report, self.compiler_opt) if self.have_f_sources(): from numpy.distutils.fcompiler import new_fcompiler diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 91cb0d897021..ca29ad4c0c19 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -160,11 +160,12 @@ def run(self): log.info("Detected changes on compiler optimizations, force rebuilding") self.force = True - import atexit - def report(): + def report(copt): log.info("\n########### EXT COMPILER OPTIMIZATION ###########") - log.info(self.compiler_opt.report(full=True)) - atexit.register(report) + log.info(copt.report(full=True)) + + import atexit + atexit.register(report, self.compiler_opt) # Setup directory for storing generated extra DLL files on Windows self.extra_dll_dir = os.path.join(self.build_temp, '.libs') From ca2a8035fb924d1f0ec71ab955b75deb12e0f94b Mon Sep 17 00:00:00 2001 From: Chiara Marmo Date: Tue, 4 May 2021 09:00:43 +0200 Subject: [PATCH 1036/1270] Highlight markdown syntax. --- doc/source/dev/reviewer_guidelines.rst | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/source/dev/reviewer_guidelines.rst b/doc/source/dev/reviewer_guidelines.rst index f05014e986ec..ffac85f7720a 100644 --- a/doc/source/dev/reviewer_guidelines.rst +++ b/doc/source/dev/reviewer_guidelines.rst @@ -124,24 +124,24 @@ It may be helpful to store some of these in GitHub's `saved replies `_ for reviewing: **Usage question** - :: + .. code-block:: md You are asking a usage question. The issue tracker is for bugs and new features. I'm going to close this issue, feel free to ask for help via our [help channels](https://numpy.org/gethelp/). **You’re welcome to update the docs** - :: + .. code-block:: md Please feel free to offer a pull request updating the documentation if you feel it could be improved. **Self-contained example for bug** - :: + .. code-block:: md - Please provide a [self-contained example code] (https://stackoverflow.com/help/mcve), including imports and data (if possible), so that other contributors can just run it and reproduce your issue. + Please provide a [self-contained example code](https://stackoverflow.com/help/mcve), including imports and data (if possible), so that other contributors can just run it and reproduce your issue. Ideally your example code should be minimal. **Software versions** - :: + .. code-block:: md To help diagnose your issue, please paste the output of: ``` @@ -150,31 +150,31 @@ replies `_ for reviewing: Thanks. **Code blocks** - :: + .. code-block:: md Readability can be greatly improved if you [format](https://help.github.com/articles/creating-and-highlighting-code-blocks/) your code snippets and complete error messages appropriately. You can edit your issue descriptions and comments at any time to improve readability. This helps maintainers a lot. Thanks! **Linking to code** - :: + .. code-block:: md For clarity's sake, you can link to code like [this](https://help.github.com/articles/creating-a-permanent-link-to-a-code-snippet/). **Better description and title** - :: + .. code-block:: md Please make the title of the PR more descriptive. The title will become the commit message when this is merged. You should state what issue (or PR) it fixes/resolves in the description using the syntax described [here](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword). **Regression test needed** - :: + .. code-block:: md Please add a [non-regression test](https://en.wikipedia.org/wiki/Non-regression_testing) that would fail at main but pass in this PR. **Don’t change unrelated** - :: + .. code-block:: md Please do not change unrelated lines. It makes your contribution harder to review and may introduce merge conflicts to other pull requests. From 516bb1a1faeb4f0c33f2da9a199ecccabb000469 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 4 May 2021 09:23:12 +0200 Subject: [PATCH 1037/1270] BLD: remove unnecessary flag `-faltivec` on macOS Supporting PowerPC/AltiVec on macOS is no longer necessary, even if the Mac/G5 is still running having `-faltivec` or replacing it with the new AltiVec flag `-maltivec` wouldn't increase that much performance without raw SIMD. note: the flag was enabled on non-intel platforms which causes fatal build errors on macOS/arm64. --- numpy/distutils/system_info.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 9e192329f635..85a0c932145c 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -2494,8 +2494,6 @@ def calc_info(self): 'accelerate' in libraries): if intel: args.extend(['-msse3']) - else: - args.extend(['-faltivec']) args.extend([ '-I/System/Library/Frameworks/vecLib.framework/Headers']) link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) @@ -2504,8 +2502,6 @@ def calc_info(self): 'veclib' in libraries): if intel: args.extend(['-msse3']) - else: - args.extend(['-faltivec']) args.extend([ '-I/System/Library/Frameworks/vecLib.framework/Headers']) link_args.extend(['-Wl,-framework', '-Wl,vecLib']) From eb3abcbb62e8fa7f20588f33da9ad204677707eb Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 4 May 2021 13:15:37 +0200 Subject: [PATCH 1038/1270] MAINT: Avoid treating _SIMD module build warnings as errors --- numpy/core/src/_simd/_simd.dispatch.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index 3e82c28a40d7..c51e4ce4ec33 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -1,4 +1,4 @@ -/*@targets $werror #simd_test*/ +/*@targets #simd_test*/ #include "_simd.h" #include "_simd_inc.h" From 77c87d5ef657799e0085bc3bbe9cb5340b0970ad Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 4 May 2021 13:35:12 +0200 Subject: [PATCH 1039/1270] CI: treat _SIMD module build warnings as errors within CI only --- tools/travis-test.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index ac514541111a..4667db991e9a 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -36,6 +36,11 @@ setup_base() sysflags="$($PYTHON -c "from distutils import sysconfig; \ print (sysconfig.get_config_var('CFLAGS'))")" export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare" + # SIMD extensions that need to be tested on both runtime and compile-time via (test_simd.py) + # any specified features will be ignored if they're not supported by compiler or platform + # note: it almost the same default value of --simd-test execpt adding policy `$werror` to treat all + # warnings as errors + simd_test="\$werror BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD" # We used to use 'setup.py install' here, but that has the terrible # behaviour that if a copy of the package is already installed in the # install location, then the new copy just gets dropped on top of it. @@ -53,11 +58,11 @@ setup_base() # #if !HAVE_FFI_PREP_CIF_VAR && defined(__arm64__) && defined(__APPLE__) # export CFLAGS="$CFLAGS -Werror=undef" - $PIP install -v . 2>&1 | tee log + $PYTHON setup.py build --simd-test "$simd_test" install 2>&1 | tee log else # The job run with USE_DEBUG=1 on travis needs this. export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized" - $PYTHON setup.py build build_src --verbose-cfg build_ext --inplace 2>&1 | tee log + $PYTHON setup.py build --simd-test "$simd_test" build_src --verbose-cfg build_ext --inplace 2>&1 | tee log fi grep -v "_configtest" log \ | grep -vE "ld returned 1|no files found matching" \ From 06fffd94729a920961648dd86a8b8e86f6c25326 Mon Sep 17 00:00:00 2001 From: Matthew Badin Date: Tue, 4 May 2021 13:08:35 -0700 Subject: [PATCH 1040/1270] DOC: Add release note. --- doc/release/upcoming_changes/18874.change.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 doc/release/upcoming_changes/18874.change.rst diff --git a/doc/release/upcoming_changes/18874.change.rst b/doc/release/upcoming_changes/18874.change.rst new file mode 100644 index 000000000000..c86fed83e739 --- /dev/null +++ b/doc/release/upcoming_changes/18874.change.rst @@ -0,0 +1,11 @@ +Enable Accelerate Framework +---------------------------- +With the release of macOS 11.3, several different issues that +numpy was encountering when using Accelerate Framework's +implementation of BLAS and LAPACK should be resolved. This +change enables the Accelerate Framework as an option on macOS. +If additional issues are found, please file a bug report +against Accelerate using the developer feedback assistant +tool (https://developer.apple.com/bug-reporting/). We +intend to address issues promptly and plan to continue +supporting and updating our BLAS and LAPACK libraries. From 771ce9d1c02f2b4fefcbfc603fd234cceba3d8fe Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 16:34:05 -0400 Subject: [PATCH 1041/1270] DOC: Document PCG64DXSM. --- .../reference/random/bit_generators/index.rst | 12 +- .../random/bit_generators/pcg64dxsm.rst | 32 ++++ doc/source/reference/random/index.rst | 4 + .../reference/random/upgrading-pcg64.rst | 153 ++++++++++++++++++ 4 files changed, 197 insertions(+), 4 deletions(-) create mode 100644 doc/source/reference/random/bit_generators/pcg64dxsm.rst create mode 100644 doc/source/reference/random/upgrading-pcg64.rst diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst index 6f8cf02ca49d..c5c3498068bd 100644 --- a/doc/source/reference/random/bit_generators/index.rst +++ b/doc/source/reference/random/bit_generators/index.rst @@ -15,10 +15,13 @@ Supported BitGenerators The included BitGenerators are: -* PCG-64 - The default. A fast generator that supports many parallel streams - and can be advanced by an arbitrary amount. See the documentation for - :meth:`~.PCG64.advance`. PCG-64 has a period of :math:`2^{128}`. See the `PCG - author's page`_ for more details about this class of PRNG. +* PCG-64 - The default. A fast generator that can be advanced by an arbitrary + amount. See the documentation for :meth:`~.PCG64.advance`. PCG-64 has + a period of :math:`2^{128}`. See the `PCG author's page`_ for more details + about this class of PRNG. +* PCG-64 DXSM - An upgraded version of PCG-64 with better statistical + properties in parallel contexts. See :ref:`upgrading-pcg64` for more + information on these improvements. * MT19937 - The standard Python BitGenerator. Adds a `MT19937.jumped` function that returns a new generator with state as-if :math:`2^{128}` draws have been made. @@ -43,6 +46,7 @@ The included BitGenerators are: MT19937 PCG64 + PCG64DXSM Philox SFC64 diff --git a/doc/source/reference/random/bit_generators/pcg64dxsm.rst b/doc/source/reference/random/bit_generators/pcg64dxsm.rst new file mode 100644 index 000000000000..e37efa5d39da --- /dev/null +++ b/doc/source/reference/random/bit_generators/pcg64dxsm.rst @@ -0,0 +1,32 @@ +Permuted Congruential Generator (64-bit, PCG64 DXSM) +---------------------------------------------------- + +.. currentmodule:: numpy.random + +.. autoclass:: PCG64DXSM + :members: __init__ + :exclude-members: __init__ + +State +===== + +.. autosummary:: + :toctree: generated/ + + ~PCG64DXSM.state + +Parallel generation +=================== +.. autosummary:: + :toctree: generated/ + + ~PCG64DXSM.advance + ~PCG64DXSM.jumped + +Extending +========= +.. autosummary:: + :toctree: generated/ + + ~PCG64DXSM.cffi + ~PCG64DXSM.ctypes diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index fc7743c64d0c..96cd47017cca 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -222,6 +222,9 @@ one of three ways: * :ref:`independent-streams` * :ref:`parallel-jumped` +Users with a very large amount of parallelism will want to consult +:ref:`upgrading-pcg64`. + Concepts -------- .. toctree:: @@ -230,6 +233,7 @@ Concepts generator Legacy Generator (RandomState) BitGenerators, SeedSequences + Upgrading PCG64 with PCG64DXSM Features -------- diff --git a/doc/source/reference/random/upgrading-pcg64.rst b/doc/source/reference/random/upgrading-pcg64.rst new file mode 100644 index 000000000000..105d8efcb9be --- /dev/null +++ b/doc/source/reference/random/upgrading-pcg64.rst @@ -0,0 +1,153 @@ +.. _upgrading-pcg64: + +.. currentmodule:: numpy.random + +Upgrading ``PCG64`` with ``PCG64DXSM`` +-------------------------------------- + +Uses of the `PCG64` `BitGenerator` in a massively-parallel context have been +shown to have statistical weaknesses that were not apparent at the first +release in numpy 1.17. Most users will never observe this weakness and are +safe to continue to use `PCG64`. We have introduced a new `PCG64DXSM` +`BitGenerator` that will eventually become the new default `BitGenerator` +implementation used by `default_rng` in future releases. `PCG64DXSM` solves +the statistical weakness while preserving the performance and the features of +`PCG64`. + +Does this affect me? +==================== + +If you + + 1. only use a single `Generator` instance, + 2. only use `RandomState` or the functions in `numpy.random`, + 3. only use the `PCG64.jumped` method to generate parallel streams, + 4. explicitly use a `BitGenerator` other than `PCG64`, + +then this weakness does not affect you at all. Carry on. + +If you use moderate numbers of parallel streams created with `default_rng` or +`SeedSequence.spawn`, in the 1000s, then the chance of observing this weakness +is negligibly small. You can continue to use `PCG64` comfortably. + +If you use very large numbers of parallel streams, in the millions, and draw +large amounts of numbers from each, then the chance of observing this weakness +can become non-negligible, if still small. An example of such a use case would +be a very large distributed reinforcement learning problem with millions of +long Monte Carlo playouts each generating billions of random number draws. Such +use cases should consider using `PCG64DXSM` explicitly or another +modern `BitGenerator` like `SFC64` or `Philox`, but it is unlikely that any +old results you may have calculated are invalid. In any case, the weakness is +a kind of `Birthday Paradox `_ +collision. That is, a single pair of parallel streams out of the millions, +considered together, might fail a stringent set of statistical tests of +randomness. The remaining millions of streams would all be perfectly fine, and +the effect of the bad pair in the whole calculation is very likely to be +swamped by the remaining streams in most applications. + +.. _upgrading-pcg64-details: + +Technical Details +================= + +Like many PRNG algorithms, `PCG64` is constructed from a transition function, +which advances a 128-bit state, and an output function, that mixes the 128-bit +state into a 64-bit integer to be output. One of the guiding design principles +of the PCG family of PRNGs is to balance the computational cost (and +pseudorandomness strength) between the transition function and the output +function. The transition function is a 128-bit linear congruential generator +(LCG), which consists of multiplying the 128-bit state with a fixed +multiplication constant and then adding a user-chosen increment, in 128-bit +modular arithmetic. LCGs are well-analyzed PRNGs with known weaknesses, though +128-bit LCGs are large enough to pass stringent statistical tests on their own, +with only the trivial output function. The output function of `PCG64` is +intended to patch up some of those known weaknesses by doing "just enough" +scrambling of the bits to assist in the statistical properties without adding +too much computational cost. + +One of these known weaknesses is that advancing the state of the LCG by steps +numbering a power of two (``bg.advance(2**N)``) will leave the lower ``N`` bits +identical to the state that was just left. For a single stream drawn from +sequentially, this is of little consequence. The remaining ``128-N`` bits provide +plenty of pseudorandomness that will be mixed in for any practical ``N`` that can +be observed in a single stream, which is why one does not need to worry about +this if you only use a single stream in your application. Similarly, the +`PCG64.jumped` method uses a carefully chosen number of steps to avoid creating +these collisions. However, once you start creating "randomly-initialized" +parallel streams, either using OS entropy by calling `default_rng` repeatedly +or using `SeedSequence.spawn`, then we need to consider how many lower bits +need to "collide" in order to create a bad pair of streams, and then evaluate +the probability of creating such a collision. +`Empirically `_, it has been +determined that if one shares the lower 58 bits of state and shares an +increment, then the pair of streams, when interleaved, will fail +`PractRand `_ in +a reasonable amount of time, after drawing a few gigabytes of data. Following +the standard Birthday Paradox calculations for a collision of 58 bits, we can +see that we can create ``2**29``, or about half a billion, streams which is when +the probability of such a collision becomes high. Half a billion streams is +quite high, and the amount of data each stream needs to draw before the +statistical correlations become apparent to even the strict ``PractRand`` tests +is in the gigabytes. But this is on the horizon for very large applications +like distributed reinforcement learning. There are reasons to expect that even +in these applications a collision probably will not have a practical effect in +the total result, since the statistical problem is constrained to just the +colliding pair. + +Now, let us consider the case when the increment is not constrained to be the +same. Our implementation of `PCG64` seeds both the state and the increment; +that is, two calls to `default_rng` (almost certainly) have different states +and increments. Upon our first release, we believed that having the seeded +increment would provide a certain amount of extra protection, that one would +have to be "close" in both the state space and increment space in order to +observe correlations (``PractRand`` failures) in a pair of streams. If that were +true, then the "bottleneck" for collisions would be the 128-bit entropy pool +size inside of `SeedSequence` (and 128-bit collisions are in the +"preposterously unlikely" category). Unfortunately, this is not true. + +One of the known properties of an LCG is that different increments create +*distinct* streams, but with a known relationship. Each LCG has an orbit that +traverses all ``2**128`` different 128-bit states. Two LCGs with different +increments are related in that one can "rotate" the orbit of the first LCG +(advance it by a number of steps that we can compute from the two increments) +such that then both LCGs will always then have the same state, up to an +additive constant and maybe an inversion of the bits. If you then iterate both +streams in lockstep, then the states will *always* remain related by that same +additive constant (and the inversion, if present). Recall that `PCG64` is +constructed from both a transition function (the LCG) and an output function. +It was expected that the scrambling effect of the output function would have +been strong enough to make the distinct streams practically independent (i.e. +"passing the ``PractRand`` tests") unless the two increments were +pathologically related to each other (e.g. 1 and 3). The output function XSL-RR +of the then-standard PCG algorithm that we implemented in `PCG64` turns out to +be too weak to cover up for the 58-bit collision of the underlying LCG that we +described above. For any given pair of increments, the size of the "colliding" +space of states is the same, so for this weakness, the extra distinctness +provided by the increments does not translate into extra protection from +statistical correlations that ``PractRand`` can detect. + +Fortunately, strengthening the output function is able to correct this weakness +and *does* turn the extra distinctness provided by differing increments into +additional protection from these low-bit collisions. To the `PCG author's +credit `_, +she had developed a stronger output function in response to related discussions +during the long birth of the new `BitGenerator` system. We NumPy developers +(mostly me, Robert Kern, to be clear where the blame lies) chose to be +"conservative" and use the XSL-RR variant that had undergone a longer period of +testing at that time. The DXSM output function adopts a "xorshift-multiply" +construction used in strong integer hashes that has much better avalanche +properties than the XSL-RR output function. While there are "pathological" +pairs of increments that induce "bad" additive constants that relate the two +streams, the vast majority of pairs induce "good" additive constants that make +the merely-distinct streams of LCG states into practically-independent output +streams. Indeed, now the claim I once made about `PCG64` is actually true of +`PCG64DXSM`: collisions are possible, but both streams have to simultaneously +be both "close" in the 128 bit state space *and* "close" in the 127-bit +increment space, so the negligible chance of colliding in the 128-bit internal +`SeedSequence` pool is more likely. The DXSM output function is more +computationally intensive than XSL-RR, but some optimizations in the LCG more +than make up for the performance hit on most machines, so `PCG64DXSM` is +a good, safe upgrade. There are, of course, an infinite number of stronger +output functions that one could consider, but most will have a greater +computational cost, and the DXSM output function has now received many CPU +cycles of testing via ``PractRand`` at this time. From 819433652f344526b36cf42b2fadf69e4af8eb45 Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 17:08:45 -0400 Subject: [PATCH 1042/1270] DOC: math formatting. --- doc/source/reference/random/upgrading-pcg64.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/random/upgrading-pcg64.rst b/doc/source/reference/random/upgrading-pcg64.rst index 105d8efcb9be..ea7ebcb6bca9 100644 --- a/doc/source/reference/random/upgrading-pcg64.rst +++ b/doc/source/reference/random/upgrading-pcg64.rst @@ -68,7 +68,7 @@ too much computational cost. One of these known weaknesses is that advancing the state of the LCG by steps numbering a power of two (``bg.advance(2**N)``) will leave the lower ``N`` bits identical to the state that was just left. For a single stream drawn from -sequentially, this is of little consequence. The remaining ``128-N`` bits provide +sequentially, this is of little consequence. The remaining :math:`128-N` bits provide plenty of pseudorandomness that will be mixed in for any practical ``N`` that can be observed in a single stream, which is why one does not need to worry about this if you only use a single stream in your application. Similarly, the @@ -84,7 +84,7 @@ increment, then the pair of streams, when interleaved, will fail `PractRand `_ in a reasonable amount of time, after drawing a few gigabytes of data. Following the standard Birthday Paradox calculations for a collision of 58 bits, we can -see that we can create ``2**29``, or about half a billion, streams which is when +see that we can create :math:`2^{29}`, or about half a billion, streams which is when the probability of such a collision becomes high. Half a billion streams is quite high, and the amount of data each stream needs to draw before the statistical correlations become apparent to even the strict ``PractRand`` tests @@ -107,7 +107,7 @@ size inside of `SeedSequence` (and 128-bit collisions are in the One of the known properties of an LCG is that different increments create *distinct* streams, but with a known relationship. Each LCG has an orbit that -traverses all ``2**128`` different 128-bit states. Two LCGs with different +traverses all :math:`2^{128}` different 128-bit states. Two LCGs with different increments are related in that one can "rotate" the orbit of the first LCG (advance it by a number of steps that we can compute from the two increments) such that then both LCGs will always then have the same state, up to an From 32ebda5a11e30e1a57a3ad3bf1798ca651fb570f Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 17:09:39 -0400 Subject: [PATCH 1043/1270] DOC: nuance recommendations for PCG64. --- doc/source/reference/random/parallel.rst | 9 ++++++--- doc/source/reference/random/performance.rst | 9 ++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index 72158401447e..7e875813357c 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -88,10 +88,11 @@ territory ([2]_). estimate the naive upper bound on a napkin and take comfort knowing that the probability is actually lower. -.. [2] In this calculation, we can ignore the amount of numbers drawn from each - stream. Each of the PRNGs we provide has some extra protection built in +.. [2] In this calculation, we can mostly ignore the amount of numbers drawn from each + stream. See :ref:`upgrading-pcg64` for the technical details about + `PCG64`. The other PRNGs we provide have some extra protection built in that avoids overlaps if the `~SeedSequence` pools differ in the - slightest bit. `PCG64` has :math:`2^{127}` separate cycles + slightest bit. `PCG64DXSM` has :math:`2^{127}` separate cycles determined by the seed in addition to the position in the :math:`2^{128}` long period for each cycle, so one has to both get on or near the same cycle *and* seed a nearby position in the cycle. @@ -156,6 +157,8 @@ are listed below. +-----------------+-------------------------+-------------------------+-------------------------+ | PCG64 | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ +| PCG64DXSM | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | ++-----------------+-------------------------+-------------------------+-------------------------+ | Philox | :math:`2^{256}` | :math:`2^{128}` | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst index 74dad4cc37d3..812c719f8750 100644 --- a/doc/source/reference/random/performance.rst +++ b/doc/source/reference/random/performance.rst @@ -5,9 +5,12 @@ Performance Recommendation ************** -The recommended generator for general use is `PCG64`. It is -statistically high quality, full-featured, and fast on most platforms, but -somewhat slow when compiled for 32-bit processes. + +The recommended generator for general use is `PCG64` or its upgraded variant +`PCG64DXSM` for heavily-parallel use cases. They are statistically high quality, +full-featured, and fast on most platforms, but somewhat slow when compiled for +32-bit processes. See :ref:`upgrading-pcg64` for details on when heavy +parallelism would indicate using `PCG64DXSM`. `Philox` is fairly slow, but its statistical properties have very high quality, and it is easy to get assuredly-independent stream by using From 781254a751e6a808c14544de5f010d8a8cfe5d2e Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 17:09:57 -0400 Subject: [PATCH 1044/1270] DOC: Add PCG64DXSM to performance-measuring script. --- doc/source/reference/random/performance.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/random/performance.py b/doc/source/reference/random/performance.py index 3267197f53fb..dfd06fee697a 100644 --- a/doc/source/reference/random/performance.py +++ b/doc/source/reference/random/performance.py @@ -3,9 +3,9 @@ import pandas as pd import numpy as np -from numpy.random import MT19937, PCG64, Philox, SFC64 +from numpy.random import MT19937, PCG64, PCG64DXSM, Philox, SFC64 -PRNGS = [MT19937, PCG64, Philox, SFC64] +PRNGS = [MT19937, PCG64, PCG64DXSM, Philox, SFC64] funcs = {} integers = 'integers(0, 2**{bits},size=1000000, dtype="uint{bits}")' @@ -53,7 +53,7 @@ col[key] = 1000 * min(t) table['RandomState'] = pd.Series(col) -columns = ['MT19937','PCG64','Philox','SFC64', 'RandomState'] +columns = ['MT19937','PCG64','PCG64DXSM','Philox','SFC64', 'RandomState'] table = pd.DataFrame(table) order = np.log(table).mean().sort_values().index table = table.T From 5d380b98f9bd029bbc5ac55f8a23bc16cebe076d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 4 May 2021 15:39:25 -0600 Subject: [PATCH 1045/1270] STY: Add spaces after commas. --- doc/source/reference/random/performance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/random/performance.py b/doc/source/reference/random/performance.py index dfd06fee697a..794142836652 100644 --- a/doc/source/reference/random/performance.py +++ b/doc/source/reference/random/performance.py @@ -53,7 +53,7 @@ col[key] = 1000 * min(t) table['RandomState'] = pd.Series(col) -columns = ['MT19937','PCG64','PCG64DXSM','Philox','SFC64', 'RandomState'] +columns = ['MT19937', 'PCG64', 'PCG64DXSM', 'Philox', 'SFC64', 'RandomState'] table = pd.DataFrame(table) order = np.log(table).mean().sort_values().index table = table.T From dc8cd57e51a73fad8ebb8d6f6f76fa9c0abbc445 Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 18:03:38 -0400 Subject: [PATCH 1046/1270] DOC: Add release note. --- .../upcoming_changes/18906.new_function.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 doc/release/upcoming_changes/18906.new_function.rst diff --git a/doc/release/upcoming_changes/18906.new_function.rst b/doc/release/upcoming_changes/18906.new_function.rst new file mode 100644 index 000000000000..38444009d3cd --- /dev/null +++ b/doc/release/upcoming_changes/18906.new_function.rst @@ -0,0 +1,17 @@ +.. currentmodule:: numpy.random + +Add `PCG64DXSM` `BitGenerator` +------------------------------ + +Uses of the `PCG64` `BitGenerator` in a massively-parallel context have been +shown to have statistical weaknesses that were not apparent at the first +release in numpy 1.17. Most users will never observe this weakness and are +safe to continue to use `PCG64`. We have introduced a new `PCG64DXSM` +`BitGenerator` that will eventually become the new default `BitGenerator` +implementation used by `default_rng` in future releases. `PCG64DXSM` solves +the statistical weakness while preserving the performance and the features of +`PCG64`. + +See :ref:`upgrading-pcg64` for more details. + +.. currentmodule:: numpy From 8e4d3e1590f4d850aebeb6a190a4a14a285efb5b Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 18:45:09 -0400 Subject: [PATCH 1047/1270] DOC: fixes in response to comments. --- doc/source/reference/random/parallel.rst | 4 +- .../reference/random/upgrading-pcg64.rst | 37 +++++++++---------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index 7e875813357c..7f0207bdebb2 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -151,9 +151,9 @@ BitGenerator, the size of the jump and the bits in the default unsigned random are listed below. +-----------------+-------------------------+-------------------------+-------------------------+ -| BitGenerator | Period | Jump Size | Bits | +| BitGenerator | Period | Jump Size | Bits per Draw | +=================+=========================+=========================+=========================+ -| MT19937 | :math:`2^{19937}` | :math:`2^{128}` | 32 | +| MT19937 | :math:`2^{19937}-1` | :math:`2^{128}` | 32 | +-----------------+-------------------------+-------------------------+-------------------------+ | PCG64 | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ diff --git a/doc/source/reference/random/upgrading-pcg64.rst b/doc/source/reference/random/upgrading-pcg64.rst index ea7ebcb6bca9..8aec703a8a75 100644 --- a/doc/source/reference/random/upgrading-pcg64.rst +++ b/doc/source/reference/random/upgrading-pcg64.rst @@ -132,22 +132,21 @@ additional protection from these low-bit collisions. To the `PCG author's credit `_, she had developed a stronger output function in response to related discussions during the long birth of the new `BitGenerator` system. We NumPy developers -(mostly me, Robert Kern, to be clear where the blame lies) chose to be -"conservative" and use the XSL-RR variant that had undergone a longer period of -testing at that time. The DXSM output function adopts a "xorshift-multiply" -construction used in strong integer hashes that has much better avalanche -properties than the XSL-RR output function. While there are "pathological" -pairs of increments that induce "bad" additive constants that relate the two -streams, the vast majority of pairs induce "good" additive constants that make -the merely-distinct streams of LCG states into practically-independent output -streams. Indeed, now the claim I once made about `PCG64` is actually true of -`PCG64DXSM`: collisions are possible, but both streams have to simultaneously -be both "close" in the 128 bit state space *and* "close" in the 127-bit -increment space, so the negligible chance of colliding in the 128-bit internal -`SeedSequence` pool is more likely. The DXSM output function is more -computationally intensive than XSL-RR, but some optimizations in the LCG more -than make up for the performance hit on most machines, so `PCG64DXSM` is -a good, safe upgrade. There are, of course, an infinite number of stronger -output functions that one could consider, but most will have a greater -computational cost, and the DXSM output function has now received many CPU -cycles of testing via ``PractRand`` at this time. +chose to be "conservative" and use the XSL-RR variant that had undergone +a longer period of testing at that time. The DXSM output function adopts +a "xorshift-multiply" construction used in strong integer hashes that has much +better avalanche properties than the XSL-RR output function. While there are +"pathological" pairs of increments that induce "bad" additive constants that +relate the two streams, the vast majority of pairs induce "good" additive +constants that make the merely-distinct streams of LCG states into +practically-independent output streams. Indeed, now the claim we once made +about `PCG64` is actually true of `PCG64DXSM`: collisions are possible, but +both streams have to simultaneously be both "close" in the 128 bit state space +*and* "close" in the 127-bit increment space, so the negligible chance of +colliding in the 128-bit internal `SeedSequence` pool would be more likely. The +DXSM output function is more computationally intensive than XSL-RR, but some +optimizations in the LCG more than make up for the performance hit on most +machines, so `PCG64DXSM` is a good, safe upgrade. There are, of course, an +infinite number of stronger output functions that one could consider, but most +will have a greater computational cost, and the DXSM output function has now +received many CPU cycles of testing via ``PractRand`` at this time. From 13b30e94cce1cdd314395c25d74bfe693c835910 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 4 May 2021 02:05:39 +0200 Subject: [PATCH 1048/1270] ENH: Add improved placeholder annotations for `np.ma` --- numpy/ma/__init__.pyi | 451 ++++++++++++++++++++-------------------- numpy/ma/core.pyi | 468 ++++++++++++++++++++++++++++++++++++++++++ numpy/ma/extras.pyi | 84 ++++++++ numpy/ma/mrecords.pyi | 88 ++++++++ 4 files changed, 867 insertions(+), 224 deletions(-) create mode 100644 numpy/ma/core.pyi create mode 100644 numpy/ma/extras.pyi create mode 100644 numpy/ma/mrecords.pyi diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index 16e026272605..a9a833e520f2 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -2,228 +2,231 @@ from typing import Any, List from numpy.ma import extras as extras -__all__: List[str] +from numpy.ma.core import ( + MAError as MAError, + MaskError as MaskError, + MaskType as MaskType, + MaskedArray as MaskedArray, + abs as abs, + absolute as absolute, + add as add, + all as all, + allclose as allclose, + allequal as allequal, + alltrue as alltrue, + amax as amax, + amin as amin, + angle as angle, + anom as anom, + anomalies as anomalies, + any as any, + append as append, + arange as arange, + arccos as arccos, + arccosh as arccosh, + arcsin as arcsin, + arcsinh as arcsinh, + arctan as arctan, + arctan2 as arctan2, + arctanh as arctanh, + argmax as argmax, + argmin as argmin, + argsort as argsort, + around as around, + array as array, + asanyarray as asanyarray, + asarray as asarray, + bitwise_and as bitwise_and, + bitwise_or as bitwise_or, + bitwise_xor as bitwise_xor, + bool_ as bool_, + ceil as ceil, + choose as choose, + clip as clip, + common_fill_value as common_fill_value, + compress as compress, + compressed as compressed, + concatenate as concatenate, + conjugate as conjugate, + convolve as convolve, + copy as copy, + correlate as correlate, + cos as cos, + cosh as cosh, + count as count, + cumprod as cumprod, + cumsum as cumsum, + default_fill_value as default_fill_value, + diag as diag, + diagonal as diagonal, + diff as diff, + divide as divide, + empty as empty, + empty_like as empty_like, + equal as equal, + exp as exp, + expand_dims as expand_dims, + fabs as fabs, + filled as filled, + fix_invalid as fix_invalid, + flatten_mask as flatten_mask, + flatten_structured_array as flatten_structured_array, + floor as floor, + floor_divide as floor_divide, + fmod as fmod, + frombuffer as frombuffer, + fromflex as fromflex, + fromfunction as fromfunction, + getdata as getdata, + getmask as getmask, + getmaskarray as getmaskarray, + greater as greater, + greater_equal as greater_equal, + harden_mask as harden_mask, + hypot as hypot, + identity as identity, + ids as ids, + indices as indices, + inner as inner, + innerproduct as innerproduct, + isMA as isMA, + isMaskedArray as isMaskedArray, + is_mask as is_mask, + is_masked as is_masked, + isarray as isarray, + left_shift as left_shift, + less as less, + less_equal as less_equal, + log as log, + log10 as log10, + log2 as log2, + logical_and as logical_and, + logical_not as logical_not, + logical_or as logical_or, + logical_xor as logical_xor, + make_mask as make_mask, + make_mask_descr as make_mask_descr, + make_mask_none as make_mask_none, + mask_or as mask_or, + masked as masked, + masked_array as masked_array, + masked_equal as masked_equal, + masked_greater as masked_greater, + masked_greater_equal as masked_greater_equal, + masked_inside as masked_inside, + masked_invalid as masked_invalid, + masked_less as masked_less, + masked_less_equal as masked_less_equal, + masked_not_equal as masked_not_equal, + masked_object as masked_object, + masked_outside as masked_outside, + masked_print_option as masked_print_option, + masked_singleton as masked_singleton, + masked_values as masked_values, + masked_where as masked_where, + max as max, + maximum as maximum, + maximum_fill_value as maximum_fill_value, + mean as mean, + min as min, + minimum as minimum, + minimum_fill_value as minimum_fill_value, + mod as mod, + multiply as multiply, + mvoid as mvoid, + ndim as ndim, + negative as negative, + nomask as nomask, + nonzero as nonzero, + not_equal as not_equal, + ones as ones, + outer as outer, + outerproduct as outerproduct, + power as power, + prod as prod, + product as product, + ptp as ptp, + put as put, + putmask as putmask, + ravel as ravel, + remainder as remainder, + repeat as repeat, + reshape as reshape, + resize as resize, + right_shift as right_shift, + round as round, + round_ as round_, + set_fill_value as set_fill_value, + shape as shape, + sin as sin, + sinh as sinh, + size as size, + soften_mask as soften_mask, + sometrue as sometrue, + sort as sort, + sqrt as sqrt, + squeeze as squeeze, + std as std, + subtract as subtract, + sum as sum, + swapaxes as swapaxes, + take as take, + tan as tan, + tanh as tanh, + trace as trace, + transpose as transpose, + true_divide as true_divide, + var as var, + where as where, + zeros as zeros, +) + +from numpy.ma.extras import ( + apply_along_axis as apply_along_axis, + apply_over_axes as apply_over_axes, + atleast_1d as atleast_1d, + atleast_2d as atleast_2d, + atleast_3d as atleast_3d, + average as average, + clump_masked as clump_masked, + clump_unmasked as clump_unmasked, + column_stack as column_stack, + compress_cols as compress_cols, + compress_nd as compress_nd, + compress_rowcols as compress_rowcols, + compress_rows as compress_rows, + count_masked as count_masked, + corrcoef as corrcoef, + cov as cov, + diagflat as diagflat, + dot as dot, + dstack as dstack, + ediff1d as ediff1d, + flatnotmasked_contiguous as flatnotmasked_contiguous, + flatnotmasked_edges as flatnotmasked_edges, + hsplit as hsplit, + hstack as hstack, + isin as isin, + in1d as in1d, + intersect1d as intersect1d, + mask_cols as mask_cols, + mask_rowcols as mask_rowcols, + mask_rows as mask_rows, + masked_all as masked_all, + masked_all_like as masked_all_like, + median as median, + mr_ as mr_, + notmasked_contiguous as notmasked_contiguous, + notmasked_edges as notmasked_edges, + polyfit as polyfit, + row_stack as row_stack, + setdiff1d as setdiff1d, + setxor1d as setxor1d, + stack as stack, + unique as unique, + union1d as union1d, + vander as vander, + vstack as vstack, +) -core: Any -extras: Any -MAError: Any -MaskError: Any -MaskType: Any -MaskedArray: Any -abs: Any -absolute: Any -add: Any -all: Any -allclose: Any -allequal: Any -alltrue: Any -amax: Any -amin: Any -angle: Any -anom: Any -anomalies: Any -any: Any -append: Any -arange: Any -arccos: Any -arccosh: Any -arcsin: Any -arcsinh: Any -arctan: Any -arctan2: Any -arctanh: Any -argmax: Any -argmin: Any -argsort: Any -around: Any -array: Any -asanyarray: Any -asarray: Any -bitwise_and: Any -bitwise_or: Any -bitwise_xor: Any -bool_: Any -ceil: Any -choose: Any -clip: Any -common_fill_value: Any -compress: Any -compressed: Any -concatenate: Any -conjugate: Any -convolve: Any -copy: Any -correlate: Any -cos: Any -cosh: Any -count: Any -cumprod: Any -cumsum: Any -default_fill_value: Any -diag: Any -diagonal: Any -diff: Any -divide: Any -empty: Any -empty_like: Any -equal: Any -exp: Any -expand_dims: Any -fabs: Any -filled: Any -fix_invalid: Any -flatten_mask: Any -flatten_structured_array: Any -floor: Any -floor_divide: Any -fmod: Any -frombuffer: Any -fromflex: Any -fromfunction: Any -getdata: Any -getmask: Any -getmaskarray: Any -greater: Any -greater_equal: Any -harden_mask: Any -hypot: Any -identity: Any -ids: Any -indices: Any -inner: Any -innerproduct: Any -isMA: Any -isMaskedArray: Any -is_mask: Any -is_masked: Any -isarray: Any -left_shift: Any -less: Any -less_equal: Any -log: Any -log10: Any -log2: Any -logical_and: Any -logical_not: Any -logical_or: Any -logical_xor: Any -make_mask: Any -make_mask_descr: Any -make_mask_none: Any -mask_or: Any -masked: Any -masked_array: Any -masked_equal: Any -masked_greater: Any -masked_greater_equal: Any -masked_inside: Any -masked_invalid: Any -masked_less: Any -masked_less_equal: Any -masked_not_equal: Any -masked_object: Any -masked_outside: Any -masked_print_option: Any -masked_singleton: Any -masked_values: Any -masked_where: Any -max: Any -maximum: Any -maximum_fill_value: Any -mean: Any -min: Any -minimum: Any -minimum_fill_value: Any -mod: Any -multiply: Any -mvoid: Any -ndim: Any -negative: Any -nomask: Any -nonzero: Any -not_equal: Any -ones: Any -outer: Any -outerproduct: Any -power: Any -prod: Any -product: Any -ptp: Any -put: Any -putmask: Any -ravel: Any -remainder: Any -repeat: Any -reshape: Any -resize: Any -right_shift: Any -round: Any -round_: Any -set_fill_value: Any -shape: Any -sin: Any -sinh: Any -size: Any -soften_mask: Any -sometrue: Any -sort: Any -sqrt: Any -squeeze: Any -std: Any -subtract: Any -sum: Any -swapaxes: Any -take: Any -tan: Any -tanh: Any -trace: Any -transpose: Any -true_divide: Any -var: Any -where: Any -zeros: Any -apply_along_axis: Any -apply_over_axes: Any -atleast_1d: Any -atleast_2d: Any -atleast_3d: Any -average: Any -clump_masked: Any -clump_unmasked: Any -column_stack: Any -compress_cols: Any -compress_nd: Any -compress_rowcols: Any -compress_rows: Any -count_masked: Any -corrcoef: Any -cov: Any -diagflat: Any -dot: Any -dstack: Any -ediff1d: Any -flatnotmasked_contiguous: Any -flatnotmasked_edges: Any -hsplit: Any -hstack: Any -isin: Any -in1d: Any -intersect1d: Any -mask_cols: Any -mask_rowcols: Any -mask_rows: Any -masked_all: Any -masked_all_like: Any -median: Any -mr_: Any -notmasked_contiguous: Any -notmasked_edges: Any -polyfit: Any -row_stack: Any -setdiff1d: Any -setxor1d: Any -stack: Any -unique: Any -union1d: Any -vander: Any -vstack: Any +__all__: List[str] diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi new file mode 100644 index 000000000000..e7e3f1f36818 --- /dev/null +++ b/numpy/ma/core.pyi @@ -0,0 +1,468 @@ +from typing import Any, List, TypeVar, Callable +from numpy import ndarray, dtype, float64 + +from numpy import ( + amax as amax, + amin as amin, + bool_ as bool_, + expand_dims as expand_dims, + diff as diff, + clip as clip, + indices as indices, + ones_like as ones_like, + squeeze as squeeze, + zeros_like as zeros_like, +) + +from numpy.lib.function_base import ( + angle as angle, +) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +__all__: List[str] + +MaskType = bool_ +nomask: bool_ + +class MaskedArrayFutureWarning(FutureWarning): ... +class MAError(Exception): ... +class MaskError(MAError): ... + +def default_fill_value(obj): ... +def minimum_fill_value(obj): ... +def maximum_fill_value(obj): ... +def set_fill_value(a, fill_value): ... +def common_fill_value(a, b): ... +def filled(a, fill_value=...): ... +def getdata(a, subok=...): ... +get_data = getdata + +def fix_invalid(a, mask=..., copy=..., fill_value=...): ... + +class _MaskedUFunc: + f: Any + __doc__: Any + __name__: Any + def __init__(self, ufunc): ... + +class _MaskedUnaryOperation(_MaskedUFunc): + fill: Any + domain: Any + def __init__(self, mufunc, fill=..., domain=...): ... + def __call__(self, a, *args, **kwargs): ... + +class _MaskedBinaryOperation(_MaskedUFunc): + fillx: Any + filly: Any + def __init__(self, mbfunc, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + def reduce(self, target, axis=..., dtype=...): ... + def outer(self, a, b): ... + def accumulate(self, target, axis=...): ... + +class _DomainedBinaryOperation(_MaskedUFunc): + domain: Any + fillx: Any + filly: Any + def __init__(self, dbfunc, domain, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + +exp: _MaskedUnaryOperation +conjugate: _MaskedUnaryOperation +sin: _MaskedUnaryOperation +cos: _MaskedUnaryOperation +arctan: _MaskedUnaryOperation +arcsinh: _MaskedUnaryOperation +sinh: _MaskedUnaryOperation +cosh: _MaskedUnaryOperation +tanh: _MaskedUnaryOperation +abs: _MaskedUnaryOperation +absolute: _MaskedUnaryOperation +fabs: _MaskedUnaryOperation +negative: _MaskedUnaryOperation +floor: _MaskedUnaryOperation +ceil: _MaskedUnaryOperation +around: _MaskedUnaryOperation +logical_not: _MaskedUnaryOperation +sqrt: _MaskedUnaryOperation +log: _MaskedUnaryOperation +log2: _MaskedUnaryOperation +log10: _MaskedUnaryOperation +tan: _MaskedUnaryOperation +arcsin: _MaskedUnaryOperation +arccos: _MaskedUnaryOperation +arccosh: _MaskedUnaryOperation +arctanh: _MaskedUnaryOperation + +add: _MaskedBinaryOperation +subtract: _MaskedBinaryOperation +multiply: _MaskedBinaryOperation +arctan2: _MaskedBinaryOperation +equal: _MaskedBinaryOperation +not_equal: _MaskedBinaryOperation +less_equal: _MaskedBinaryOperation +greater_equal: _MaskedBinaryOperation +less: _MaskedBinaryOperation +greater: _MaskedBinaryOperation +logical_and: _MaskedBinaryOperation +alltrue: _MaskedBinaryOperation +logical_or: _MaskedBinaryOperation +sometrue: Callable[..., Any] +logical_xor: _MaskedBinaryOperation +bitwise_and: _MaskedBinaryOperation +bitwise_or: _MaskedBinaryOperation +bitwise_xor: _MaskedBinaryOperation +hypot: _MaskedBinaryOperation +divide: _MaskedBinaryOperation +true_divide: _MaskedBinaryOperation +floor_divide: _MaskedBinaryOperation +remainder: _MaskedBinaryOperation +fmod: _MaskedBinaryOperation +mod: _MaskedBinaryOperation + +def make_mask_descr(ndtype): ... +def getmask(a): ... +get_mask = getmask + +def getmaskarray(arr): ... +def is_mask(m): ... +def make_mask(m, copy=..., shrink=..., dtype=...): ... +def make_mask_none(newshape, dtype=...): ... +def mask_or(m1, m2, copy=..., shrink=...): ... +def flatten_mask(mask): ... +def masked_where(condition, a, copy=...): ... +def masked_greater(x, value, copy=...): ... +def masked_greater_equal(x, value, copy=...): ... +def masked_less(x, value, copy=...): ... +def masked_less_equal(x, value, copy=...): ... +def masked_not_equal(x, value, copy=...): ... +def masked_equal(x, value, copy=...): ... +def masked_inside(x, v1, v2, copy=...): ... +def masked_outside(x, v1, v2, copy=...): ... +def masked_object(x, value, copy=..., shrink=...): ... +def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... +def masked_invalid(a, copy=...): ... + +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=...): ... + +masked_print_option: _MaskedPrintOption + +def flatten_structured_array(a): ... + +class MaskedIterator: + ma: Any + dataiter: Any + maskiter: Any + def __init__(self, ma): ... + def __iter__(self): ... + def __getitem__(self, indx): ... + def __setitem__(self, index, value): ... + def __next__(self): ... + +class MaskedArray(ndarray[_ShapeType, _DType_co]): + __array_priority__: Any + def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=...): ... + def view(self, dtype=..., type=..., fill_value=...): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + @property + def dtype(self): ... + @dtype.setter + def dtype(self, dtype): ... + @property + def shape(self): ... + @shape.setter + def shape(self, shape): ... + def __setmask__(self, mask, copy=...): ... + @property + def mask(self): ... + @mask.setter + def mask(self, value): ... + @property + def recordmask(self): ... + @recordmask.setter + def recordmask(self, mask): ... + def harden_mask(self): ... + def soften_mask(self): ... + @property + def hardmask(self): ... + def unshare_mask(self): ... + @property + def sharedmask(self): ... + def shrink_mask(self): ... + @property + def baseclass(self): ... + data: Any + @property + def flat(self): ... + @flat.setter + def flat(self, value): ... + @property + def fill_value(self): ... + @fill_value.setter + def fill_value(self, value=...): ... + get_fill_value: Any + set_fill_value: Any + def filled(self, fill_value=...): ... + def compressed(self): ... + def compress(self, condition, axis=..., out=...): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __div__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __pow__(self, other): ... + def __rpow__(self, other): ... + def __iadd__(self, other): ... + def __isub__(self, other): ... + def __imul__(self, other): ... + def __idiv__(self, other): ... + def __ifloordiv__(self, other): ... + def __itruediv__(self, other): ... + def __ipow__(self, other): ... + def __float__(self): ... + def __int__(self): ... + @property # type: ignore[misc] + def imag(self): ... + get_imag: Any + @property # type: ignore[misc] + def real(self): ... + get_real: Any + def count(self, axis=..., keepdims=...): ... + def ravel(self, order=...): ... + def reshape(self, *s, **kwargs): ... + def resize(self, newshape, refcheck=..., order=...): ... + def put(self, indices, values, mode=...): ... + def ids(self): ... + def iscontiguous(self): ... + def all(self, axis=..., out=..., keepdims=...): ... + def any(self, axis=..., out=..., keepdims=...): ... + def nonzero(self): ... + def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + def dot(self, b, out=..., strict=...): ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + def cumsum(self, axis=..., dtype=..., out=...): ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + product: Any + def cumprod(self, axis=..., dtype=..., out=...): ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + def anom(self, axis=..., dtype=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def round(self, decimals=..., out=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... + def argmin(self, axis=..., fill_value=..., out=...): ... + def argmax(self, axis=..., fill_value=..., out=...): ... + def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... + def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... + # NOTE: deprecated + # def mini(self, axis=...): ... + # def tostring(self, fill_value=..., order=...): ... + def max(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def partition(self, *args, **kwargs): ... + def argpartition(self, *args, **kwargs): ... + def take(self, indices, axis=..., out=..., mode=...): ... + copy: Any + diagonal: Any + flatten: Any + repeat: Any + squeeze: Any + swapaxes: Any + T: Any + transpose: Any + def tolist(self, fill_value=...): ... + def tobytes(self, fill_value=..., order=...): ... + def tofile(self, fid, sep=..., format=...): ... + def toflex(self): ... + torecords: Any + def __reduce__(self): ... + def __deepcopy__(self, memo=...): ... + +class mvoid(MaskedArray[_ShapeType, _DType_co]): + def __new__( + self, + data, + mask=..., + dtype=..., + fill_value=..., + hardmask=..., + copy=..., + subok=..., + ): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def __iter__(self): ... + def __len__(self): ... + def filled(self, fill_value=...): ... + def tolist(self): ... + +def isMaskedArray(x): ... +isarray = isMaskedArray +isMA = isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[Any, dtype[float64]]): + def __new__(cls): ... + __class__: Any + def __array_finalize__(self, obj): ... + def __array_prepare__(self, obj, context=...): ... + def __array_wrap__(self, obj, context=...): ... + def __format__(self, format_spec): ... + def __reduce__(self): ... + def __iop__(self, other): ... + __iadd__: Any + __isub__: Any + __imul__: Any + __ifloordiv__: Any + __itruediv__: Any + __ipow__: Any + def copy(self, *args, **kwargs): ... + def __copy__(self): ... + def __deepcopy__(self, memo): ... + def __setattr__(self, attr, value): ... + +masked: MaskedConstant +masked_singleton: MaskedConstant +masked_array = MaskedArray + +def array( + data, + dtype=..., + copy=..., + order=..., + mask=..., + fill_value=..., + keep_mask=..., + hard_mask=..., + shrink=..., + subok=..., + ndmin=..., +): ... +def is_masked(x): ... + +class _extrema_operation(_MaskedUFunc): + compare: Any + fill_value_func: Any + def __init__(self, ufunc, compare, fill_value): ... + # NOTE: in practice `b` has a default value, but users should + # explicitly provide a value here as the default is deprecated + def __call__(self, a, b): ... + def reduce(self, target, axis=...): ... + def outer(self, a, b): ... + +def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ... + +class _frommethod: + __name__: Any + __doc__: Any + reversed: Any + def __init__(self, methodname, reversed=...): ... + def getdoc(self): ... + def __call__(self, a, *args, **params): ... + +all: _frommethod +anomalies: _frommethod +anom: _frommethod +any: _frommethod +compress: _frommethod +cumprod: _frommethod +cumsum: _frommethod +copy: _frommethod +diagonal: _frommethod +harden_mask: _frommethod +ids: _frommethod +mean: _frommethod +nonzero: _frommethod +prod: _frommethod +product: _frommethod +ravel: _frommethod +repeat: _frommethod +soften_mask: _frommethod +std: _frommethod +sum: _frommethod +swapaxes: _frommethod +trace: _frommethod +var: _frommethod +count: _frommethod +argmin: _frommethod +argmax: _frommethod + +minimum: _extrema_operation +maximum: _extrema_operation + +def take(a, indices, axis=..., out=..., mode=...): ... +def power(a, b, third=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... +def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... +def compressed(x): ... +def concatenate(arrays, axis=...): ... +def diag(v, k=...): ... +def left_shift(a, n): ... +def right_shift(a, n): ... +def put(a, indices, values, mode=...): ... +def putmask(a, mask, values): ... +def transpose(a, axes=...): ... +def reshape(a, new_shape, order=...): ... +def resize(x, new_shape): ... +def ndim(obj): ... +def shape(obj): ... +def size(obj, axis=...): ... +def where(condition, x=..., y=...): ... +def choose(indices, choices, out=..., mode=...): ... +def round_(a, decimals=..., out=...): ... +round = round_ + +def inner(a, b): ... +innerproduct = inner + +def outer(a, b): ... +outerproduct = outer + +def correlate(a, v, mode=..., propagate_mask=...): ... +def convolve(a, v, mode=..., propagate_mask=...): ... +def allequal(a, b, fill_value=...): ... +def allclose(a, b, masked_equal=..., rtol=..., atol=...): ... +def asarray(a, dtype=..., order=...): ... +def asanyarray(a, dtype=...): ... +def fromflex(fxarray): ... + +class _convert2ma: + __doc__: Any + def __init__(self, funcname, params=...): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +arange: _convert2ma +empty: _convert2ma +empty_like: _convert2ma +frombuffer: _convert2ma +fromfunction: _convert2ma +identity: _convert2ma +ones: _convert2ma +zeros: _convert2ma + +def append(a, b, axis=...): ... +def dot(a, b, strict=..., out=...): ... +def mask_rowcols(a, axis=...): ... diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi new file mode 100644 index 000000000000..e58e43badf23 --- /dev/null +++ b/numpy/ma/extras.pyi @@ -0,0 +1,84 @@ +from typing import Any, List +from numpy.lib.index_tricks import AxisConcatenator + +from numpy.ma.core import ( + dot as dot, + mask_rowcols as mask_rowcols, +) + +__all__: List[str] + +def count_masked(arr, axis=...): ... +def masked_all(shape, dtype = ...): ... +def masked_all_like(arr): ... + +class _fromnxfunction: + __name__: Any + __doc__: Any + def __init__(self, funcname): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +class _fromnxfunction_single(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_seq(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_allargs(_fromnxfunction): + def __call__(self, *args, **params): ... + +atleast_1d: _fromnxfunction_allargs +atleast_2d: _fromnxfunction_allargs +atleast_3d: _fromnxfunction_allargs + +vstack: _fromnxfunction_seq +row_stack: _fromnxfunction_seq +hstack: _fromnxfunction_seq +column_stack: _fromnxfunction_seq +dstack: _fromnxfunction_seq +stack: _fromnxfunction_seq + +hsplit: _fromnxfunction_single +diagflat: _fromnxfunction_single + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def average(a, axis=..., weights=..., returned=...): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def compress_nd(x, axis=...): ... +def compress_rowcols(x, axis=...): ... +def compress_rows(a): ... +def compress_cols(a): ... +def mask_rows(a, axis = ...): ... +def mask_cols(a, axis = ...): ... +def ediff1d(arr, to_end=..., to_begin=...): ... +def unique(ar1, return_index=..., return_inverse=...): ... +def intersect1d(ar1, ar2, assume_unique=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... +def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... + +class MAxisConcatenator(AxisConcatenator): + concatenate: Any + @classmethod + def makemat(cls, arr): ... + def __getitem__(self, key): ... + +class mr_class(MAxisConcatenator): + def __init__(self): ... + +mr_: mr_class + +def flatnotmasked_edges(a): ... +def notmasked_edges(a, axis=...): ... +def flatnotmasked_contiguous(a): ... +def notmasked_contiguous(a, axis=...): ... +def clump_unmasked(a): ... +def clump_masked(a): ... +def vander(x, n=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi new file mode 100644 index 000000000000..92d5afb897e2 --- /dev/null +++ b/numpy/ma/mrecords.pyi @@ -0,0 +1,88 @@ +from typing import List, Any, TypeVar + +from numpy import dtype +from numpy.ma import MaskedArray + +__all__: List[str] + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +class MaskedRecords(MaskedArray[_ShapeType, _DType_co]): + def __new__( + cls, + shape, + dtype=..., + buf=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + byteorder=..., + aligned=..., + mask=..., + hard_mask=..., + fill_value=..., + keep_mask=..., + copy=..., + **options, + ): ... + _mask: Any + _fill_value: Any + @property + def _data(self): ... + @property + def _fieldmask(self): ... + def __array_finalize__(self, obj): ... + def __len__(self): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def view(self, dtype=..., type=...): ... + def harden_mask(self): ... + def soften_mask(self): ... + def copy(self): ... + def tolist(self, fill_value=...): ... + def __reduce__(self): ... + +mrecarray = MaskedRecords + +def fromarrays( + arraylist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., +): ... + +def fromrecords( + reclist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., + mask=..., +): ... + +def fromtextfile( + fname, + delimitor=..., + commentchar=..., + missingchar=..., + varnames=..., + vartypes=..., +): ... + +def addfield(mrecord, newfield, newfieldname=...): ... From 695df2561f79bbae45a6d090a763ebbe7dc45be6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 5 May 2021 02:24:50 +0200 Subject: [PATCH 1049/1270] ENH: Add improved placeholder annotations for `np.polynomial` --- numpy/polynomial/__init__.pyi | 16 ++++---- numpy/polynomial/_polybase.pyi | 69 +++++++++++++++++++++++++++++++++ numpy/polynomial/chebyshev.pyi | 51 ++++++++++++++++++++++++ numpy/polynomial/hermite.pyi | 46 ++++++++++++++++++++++ numpy/polynomial/hermite_e.pyi | 46 ++++++++++++++++++++++ numpy/polynomial/laguerre.pyi | 46 ++++++++++++++++++++++ numpy/polynomial/legendre.pyi | 46 ++++++++++++++++++++++ numpy/polynomial/polynomial.pyi | 41 ++++++++++++++++++++ numpy/polynomial/polyutils.pyi | 17 ++++++++ 9 files changed, 370 insertions(+), 8 deletions(-) create mode 100644 numpy/polynomial/_polybase.pyi create mode 100644 numpy/polynomial/chebyshev.pyi create mode 100644 numpy/polynomial/hermite.pyi create mode 100644 numpy/polynomial/hermite_e.pyi create mode 100644 numpy/polynomial/laguerre.pyi create mode 100644 numpy/polynomial/legendre.pyi create mode 100644 numpy/polynomial/polynomial.pyi create mode 100644 numpy/polynomial/polyutils.pyi diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 6a740604184c..bebedb3a6990 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import List from numpy.polynomial import ( chebyshev as chebyshev, @@ -8,13 +8,13 @@ from numpy.polynomial import ( legendre as legendre, polynomial as polynomial, ) +from numpy.polynomial.chebyshev import Chebyshev as Chebyshev +from numpy.polynomial.hermite import Hermite as Hermite +from numpy.polynomial.hermite_e import HermiteE as HermiteE +from numpy.polynomial.laguerre import Laguerre as Laguerre +from numpy.polynomial.legendre import Legendre as Legendre +from numpy.polynomial.polynomial import Polynomial as Polynomial __all__: List[str] -Polynomial: Any -Chebyshev: Any -Legendre: Any -Hermite: Any -HermiteE: Any -Laguerre: Any -set_default_printstyle: Any +def set_default_printstyle(style): ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi new file mode 100644 index 000000000000..c4160146947f --- /dev/null +++ b/numpy/polynomial/_polybase.pyi @@ -0,0 +1,69 @@ +import abc +from typing import Any, List, ClassVar + +__all__: List[str] + +class ABCPolyBase(abc.ABC): + __hash__: ClassVar[None] # type: ignore[assignment] + __array_ufunc__: ClassVar[None] + maxpower: ClassVar[int] + coef: Any + @property + @abc.abstractmethod + def domain(self): ... + @property + @abc.abstractmethod + def window(self): ... + @property + @abc.abstractmethod + def basis_name(self): ... + def has_samecoef(self, other): ... + def has_samedomain(self, other): ... + def has_samewindow(self, other): ... + def has_sametype(self, other): ... + def __init__(self, coef, domain=..., window=...): ... + def __format__(self, fmt_str): ... + def __call__(self, arg): ... + def __iter__(self): ... + def __len__(self): ... + def __neg__(self): ... + def __pos__(self): ... + def __add__(self, other): ... + def __sub__(self, other): ... + def __mul__(self, other): ... + def __truediv__(self, other): ... + def __floordiv__(self, other): ... + def __mod__(self, other): ... + def __divmod__(self, other): ... + def __pow__(self, other): ... + def __radd__(self, other): ... + def __rsub__(self, other): ... + def __rmul__(self, other): ... + def __rdiv__(self, other): ... + def __rtruediv__(self, other): ... + def __rfloordiv__(self, other): ... + def __rmod__(self, other): ... + def __rdivmod__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def copy(self): ... + def degree(self): ... + def cutdeg(self, deg): ... + def trim(self, tol=...): ... + def truncate(self, size): ... + def convert(self, domain=..., kind=..., window=...): ... + def mapparms(self): ... + def integ(self, m=..., k = ..., lbnd=...): ... + def deriv(self, m=...): ... + def roots(self): ... + def linspace(self, n=..., domain=...): ... + @classmethod + def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ... + @classmethod + def fromroots(cls, roots, domain = ..., window=...): ... + @classmethod + def identity(cls, domain=..., window=...): ... + @classmethod + def basis(cls, deg, domain=..., window=...): ... + @classmethod + def cast(cls, series, domain=..., window=...): ... diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi new file mode 100644 index 000000000000..841c0859b1b0 --- /dev/null +++ b/numpy/polynomial/chebyshev.pyi @@ -0,0 +1,51 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: List[str] + +chebtrim = trimcoef + +def poly2cheb(pol): ... +def cheb2poly(c): ... + +chebdomain: ndarray[Any, dtype[int_]] +chebzero: ndarray[Any, dtype[int_]] +chebone: ndarray[Any, dtype[int_]] +chebx: ndarray[Any, dtype[int_]] + +def chebline(off, scl): ... +def chebfromroots(roots): ... +def chebadd(c1, c2): ... +def chebsub(c1, c2): ... +def chebmulx(c): ... +def chebmul(c1, c2): ... +def chebdiv(c1, c2): ... +def chebpow(c, pow, maxpower=...): ... +def chebder(c, m=..., scl=..., axis=...): ... +def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def chebval(x, c, tensor=...): ... +def chebval2d(x, y, c): ... +def chebgrid2d(x, y, c): ... +def chebval3d(x, y, z, c): ... +def chebgrid3d(x, y, z, c): ... +def chebvander(x, deg): ... +def chebvander2d(x, y, deg): ... +def chebvander3d(x, y, z, deg): ... +def chebfit(x, y, deg, rcond=..., full=..., w=...): ... +def chebcompanion(c): ... +def chebroots(c): ... +def chebinterpolate(func, deg, args = ...): ... +def chebgauss(deg): ... +def chebweight(x): ... +def chebpts1(npts): ... +def chebpts2(npts): ... + +class Chebyshev(ABCPolyBase): + @classmethod + def interpolate(cls, func, deg, domain=..., args = ...): ... + domain: Any + window: Any + basis_name: Any diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi new file mode 100644 index 000000000000..8364a5b0fcbc --- /dev/null +++ b/numpy/polynomial/hermite.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_, float_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermtrim = trimcoef + +def poly2herm(pol): ... +def herm2poly(c): ... + +hermdomain: ndarray[Any, dtype[int_]] +hermzero: ndarray[Any, dtype[int_]] +hermone: ndarray[Any, dtype[int_]] +hermx: ndarray[Any, dtype[float_]] + +def hermline(off, scl): ... +def hermfromroots(roots): ... +def hermadd(c1, c2): ... +def hermsub(c1, c2): ... +def hermmulx(c): ... +def hermmul(c1, c2): ... +def hermdiv(c1, c2): ... +def hermpow(c, pow, maxpower=...): ... +def hermder(c, m=..., scl=..., axis=...): ... +def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermval(x, c, tensor=...): ... +def hermval2d(x, y, c): ... +def hermgrid2d(x, y, c): ... +def hermval3d(x, y, z, c): ... +def hermgrid3d(x, y, z, c): ... +def hermvander(x, deg): ... +def hermvander2d(x, y, deg): ... +def hermvander3d(x, y, z, deg): ... +def hermfit(x, y, deg, rcond=..., full=..., w=...): ... +def hermcompanion(c): ... +def hermroots(c): ... +def hermgauss(deg): ... +def hermweight(x): ... + +class Hermite(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi new file mode 100644 index 000000000000..c029bfda7788 --- /dev/null +++ b/numpy/polynomial/hermite_e.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermetrim = trimcoef + +def poly2herme(pol): ... +def herme2poly(c): ... + +hermedomain: ndarray[Any, dtype[int_]] +hermezero: ndarray[Any, dtype[int_]] +hermeone: ndarray[Any, dtype[int_]] +hermex: ndarray[Any, dtype[int_]] + +def hermeline(off, scl): ... +def hermefromroots(roots): ... +def hermeadd(c1, c2): ... +def hermesub(c1, c2): ... +def hermemulx(c): ... +def hermemul(c1, c2): ... +def hermediv(c1, c2): ... +def hermepow(c, pow, maxpower=...): ... +def hermeder(c, m=..., scl=..., axis=...): ... +def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermeval(x, c, tensor=...): ... +def hermeval2d(x, y, c): ... +def hermegrid2d(x, y, c): ... +def hermeval3d(x, y, z, c): ... +def hermegrid3d(x, y, z, c): ... +def hermevander(x, deg): ... +def hermevander2d(x, y, deg): ... +def hermevander3d(x, y, z, deg): ... +def hermefit(x, y, deg, rcond=..., full=..., w=...): ... +def hermecompanion(c): ... +def hermeroots(c): ... +def hermegauss(deg): ... +def hermeweight(x): ... + +class HermiteE(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi new file mode 100644 index 000000000000..2b9ab34e0afa --- /dev/null +++ b/numpy/polynomial/laguerre.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +lagtrim = trimcoef + +def poly2lag(pol): ... +def lag2poly(c): ... + +lagdomain: ndarray[Any, dtype[int_]] +lagzero: ndarray[Any, dtype[int_]] +lagone: ndarray[Any, dtype[int_]] +lagx: ndarray[Any, dtype[int_]] + +def lagline(off, scl): ... +def lagfromroots(roots): ... +def lagadd(c1, c2): ... +def lagsub(c1, c2): ... +def lagmulx(c): ... +def lagmul(c1, c2): ... +def lagdiv(c1, c2): ... +def lagpow(c, pow, maxpower=...): ... +def lagder(c, m=..., scl=..., axis=...): ... +def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def lagval(x, c, tensor=...): ... +def lagval2d(x, y, c): ... +def laggrid2d(x, y, c): ... +def lagval3d(x, y, z, c): ... +def laggrid3d(x, y, z, c): ... +def lagvander(x, deg): ... +def lagvander2d(x, y, deg): ... +def lagvander3d(x, y, z, deg): ... +def lagfit(x, y, deg, rcond=..., full=..., w=...): ... +def lagcompanion(c): ... +def lagroots(c): ... +def laggauss(deg): ... +def lagweight(x): ... + +class Laguerre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi new file mode 100644 index 000000000000..86aef179304e --- /dev/null +++ b/numpy/polynomial/legendre.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +legtrim = trimcoef + +def poly2leg(pol): ... +def leg2poly(c): ... + +legdomain: ndarray[Any, dtype[int_]] +legzero: ndarray[Any, dtype[int_]] +legone: ndarray[Any, dtype[int_]] +legx: ndarray[Any, dtype[int_]] + +def legline(off, scl): ... +def legfromroots(roots): ... +def legadd(c1, c2): ... +def legsub(c1, c2): ... +def legmulx(c): ... +def legmul(c1, c2): ... +def legdiv(c1, c2): ... +def legpow(c, pow, maxpower=...): ... +def legder(c, m=..., scl=..., axis=...): ... +def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def legval(x, c, tensor=...): ... +def legval2d(x, y, c): ... +def leggrid2d(x, y, c): ... +def legval3d(x, y, z, c): ... +def leggrid3d(x, y, z, c): ... +def legvander(x, deg): ... +def legvander2d(x, y, deg): ... +def legvander3d(x, y, z, deg): ... +def legfit(x, y, deg, rcond=..., full=..., w=...): ... +def legcompanion(c): ... +def legroots(c): ... +def leggauss(deg): ... +def legweight(x): ... + +class Legendre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi new file mode 100644 index 000000000000..f779300a9c5a --- /dev/null +++ b/numpy/polynomial/polynomial.pyi @@ -0,0 +1,41 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +polytrim = trimcoef + +polydomain: ndarray[Any, dtype[int_]] +polyzero: ndarray[Any, dtype[int_]] +polyone: ndarray[Any, dtype[int_]] +polyx: ndarray[Any, dtype[int_]] + +def polyline(off, scl): ... +def polyfromroots(roots): ... +def polyadd(c1, c2): ... +def polysub(c1, c2): ... +def polymulx(c): ... +def polymul(c1, c2): ... +def polydiv(c1, c2): ... +def polypow(c, pow, maxpower=...): ... +def polyder(c, m=..., scl=..., axis=...): ... +def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ... +def polyval(x, c, tensor=...): ... +def polyvalfromroots(x, r, tensor=...): ... +def polyval2d(x, y, c): ... +def polygrid2d(x, y, c): ... +def polyval3d(x, y, z, c): ... +def polygrid3d(x, y, z, c): ... +def polyvander(x, deg): ... +def polyvander2d(x, y, deg): ... +def polyvander3d(x, y, z, deg): ... +def polyfit(x, y, deg, rcond=..., full=..., w=...): ... +def polyroots(c): ... + +class Polynomial(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi new file mode 100644 index 000000000000..0befa740eb5d --- /dev/null +++ b/numpy/polynomial/polyutils.pyi @@ -0,0 +1,17 @@ +from typing import List + +__all__: List[str] + +class RankWarning(UserWarning): ... +class PolyError(Exception): ... +class PolyDomainError(PolyError): ... + +# NOTE: Deprecated +# class PolyBase: ... + +def trimseq(seq): ... +def as_series(alist, trim=...): ... +def trimcoef(c, tol=...): ... +def getdomain(x): ... +def mapparms(old, new): ... +def mapdomain(x, old, new): ... From ad2a73c18dcff95d844c382c94ab7f73b5571cf3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 4 May 2021 17:43:26 -0500 Subject: [PATCH 1050/1270] MAINT: Adjust NumPy float hashing to Python's slightly changed hash This is necessary, since we use the Python double hash and the semi-private function to calculate it in Python has a new signature to return the identity-hash when the value is NaN. closes gh-18833, gh-18907 --- numpy/core/src/common/npy_pycompat.h | 16 ++++++++++ numpy/core/src/multiarray/scalartypes.c.src | 13 ++++---- numpy/core/tests/test_scalarmath.py | 34 +++++++++++++++++++++ 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/numpy/core/src/common/npy_pycompat.h b/numpy/core/src/common/npy_pycompat.h index aa0b5c1224d3..9e94a971090a 100644 --- a/numpy/core/src/common/npy_pycompat.h +++ b/numpy/core/src/common/npy_pycompat.h @@ -3,4 +3,20 @@ #include "numpy/npy_3kcompat.h" + +/* + * In Python 3.10a7 (or b1), python started using the identity for the hash + * when a value is NaN. See https://bugs.python.org/issue43475 + */ +#if PY_VERSION_HEX > 0x030a00a6 +#define Npy_HashDouble _Py_HashDouble +#else +static NPY_INLINE Py_hash_t +Npy_HashDouble(PyObject *NPY_UNUSED(identity), double val) +{ + return _Py_HashDouble(val); +} +#endif + + #endif /* _NPY_COMPAT_H_ */ diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index a001500b0a97..9930f7791d6e 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -3172,7 +3172,7 @@ static npy_hash_t static npy_hash_t @lname@_arrtype_hash(PyObject *obj) { - return _Py_HashDouble((double) PyArrayScalar_VAL(obj, @name@)); + return Npy_HashDouble(obj, (double)PyArrayScalar_VAL(obj, @name@)); } /* borrowed from complex_hash */ @@ -3180,14 +3180,14 @@ static npy_hash_t c@lname@_arrtype_hash(PyObject *obj) { npy_hash_t hashreal, hashimag, combined; - hashreal = _Py_HashDouble((double) - PyArrayScalar_VAL(obj, C@name@).real); + hashreal = Npy_HashDouble( + obj, (double)PyArrayScalar_VAL(obj, C@name@).real); if (hashreal == -1) { return -1; } - hashimag = _Py_HashDouble((double) - PyArrayScalar_VAL(obj, C@name@).imag); + hashimag = Npy_HashDouble( + obj, (double)PyArrayScalar_VAL(obj, C@name@).imag); if (hashimag == -1) { return -1; } @@ -3202,7 +3202,8 @@ c@lname@_arrtype_hash(PyObject *obj) static npy_hash_t half_arrtype_hash(PyObject *obj) { - return _Py_HashDouble(npy_half_to_double(PyArrayScalar_VAL(obj, Half))); + return Npy_HashDouble( + obj, npy_half_to_double(PyArrayScalar_VAL(obj, Half))); } static npy_hash_t diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index d91b4a39146d..09a734284a76 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -712,6 +712,40 @@ def test_shift_all_bits(self, type_code, op): assert_equal(res_arr, res_scl) +class TestHash: + @pytest.mark.parametrize("type_code", np.typecodes['AllInteger']) + def test_integer_hashes(self, type_code): + scalar = np.dtype(type_code).type + for i in range(128): + assert hash(i) == hash(scalar(i)) + + @pytest.mark.parametrize("type_code", np.typecodes['AllFloat']) + def test_float_and_complex_hashes(self, type_code): + scalar = np.dtype(type_code).type + for val in [np.pi, np.inf, 3, 6.]: + numpy_val = scalar(val) + # Cast back to Python, in case the NumPy scalar has less precision + if numpy_val.dtype.kind == 'c': + val = complex(numpy_val) + else: + val = float(numpy_val) + assert val == numpy_val + print(repr(numpy_val), repr(val)) + assert hash(val) == hash(numpy_val) + + if hash(float(np.nan)) != hash(float(np.nan)): + # If Python distinguises different NaNs we do so too (gh-18833) + assert hash(scalar(np.nan)) != hash(scalar(np.nan)) + + @pytest.mark.parametrize("type_code", np.typecodes['Complex']) + def test_complex_hashes(self, type_code): + # Test some complex valued hashes specifically: + scalar = np.dtype(type_code).type + for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]: + numpy_val = scalar(val) + assert hash(complex(numpy_val)) == hash(numpy_val) + + @contextlib.contextmanager def recursionlimit(n): o = sys.getrecursionlimit() From a1142e3761d152df7e34ad5ad4d9276661b84f71 Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 21:33:15 -0400 Subject: [PATCH 1051/1270] ENH: add the emulated 128-bit math for PCG64DXSM --- numpy/random/src/pcg64/pcg64.h | 60 +++++++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/numpy/random/src/pcg64/pcg64.h b/numpy/random/src/pcg64/pcg64.h index ea8264b012df..89d94a4f467f 100644 --- a/numpy/random/src/pcg64/pcg64.h +++ b/numpy/random/src/pcg64/pcg64.h @@ -201,6 +201,63 @@ pcg_setseq_128_xsl_rr_64_random_r(pcg_state_setseq_128 *rng) { #endif } +static inline pcg128_t pcg128_mult_64(pcg128_t a, uint64_t b) { + uint64_t h1; + pcg128_t result; + + h1 = a.high * b; + _pcg_mult64(a.low, b, &(result.high), &(result.low)); + result.high += h1; + return result; +} + +static inline void pcg_cm_step_r(pcg_state_setseq_128 *rng) { +#if defined _WIN32 && _MSC_VER >= 1900 && _M_AMD64 + uint64_t h1; + pcg128_t product; + + /* Manually inline the multiplication and addition using intrinsics */ + h1 = rng->state.high * PCG_CHEAP_MULTIPLIER_128; + product.low = + _umul128(rng->state.low, PCG_CHEAP_MULTIPLIER_128, &(product.high)); + product.high += h1; + _addcarry_u64(_addcarry_u64(0, product.low, rng->inc.low, &(rng->state.low)), + product.high, rng->inc.high, &(rng->state.high)); + rng->state = product; +#else + rng->state = pcg128_add(pcg128_mult_64(rng->state, PCG_CHEAP_MULTIPLIER_128), + rng->inc); +#endif +} + +static inline uint64_t pcg_output_cm_128_64(pcg128_t state) { + uint64_t hi = state.high; + uint64_t lo = state.low; + + lo |= 1; + hi ^= hi >> 32; + hi *= 0xda942042e4dd58b5ULL; + hi ^= hi >> 48; + hi *= lo; + return hi; +} + +static inline void pcg_cm_srandom_r(pcg_state_setseq_128 *rng, pcg128_t initstate, pcg128_t initseq) { + rng->state = PCG_128BIT_CONSTANT(0ULL, 0ULL); + rng->inc.high = initseq.high << 1u; + rng->inc.high |= initseq.low >> 63u; + rng->inc.low = (initseq.low << 1u) | 1u; + pcg_cm_step_r(rng); + rng->state = pcg128_add(rng->state, initstate); + pcg_cm_step_r(rng); +} + +static inline uint64_t pcg_cm_random_r(pcg_state_setseq_128* rng) +{ + uint64_t ret = pcg_output_cm_128_64(rng->state); + pcg_cm_step_r(rng); + return ret; +} #else /* PCG_EMULATED_128BIT_MATH */ static inline void pcg_setseq_128_step_r(pcg_state_setseq_128 *rng) { @@ -284,7 +341,8 @@ static inline void pcg_setseq_128_advance_r(pcg_state_setseq_128 *rng, static inline void pcg_cm_advance_r(pcg_state_setseq_128 *rng, pcg128_t delta) { rng->state = pcg_advance_lcg_128(rng->state, delta, - PCG_CHEAP_MULTIPLIER_128, rng->inc); + PCG_128BIT_CONSTANT(0, PCG_CHEAP_MULTIPLIER_128), + rng->inc); } typedef pcg_state_setseq_128 pcg64_random_t; From aa6733bfa78cd8cb463f73f6a71564f8cb4291f4 Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 21:41:12 -0400 Subject: [PATCH 1052/1270] DOC: refine wording for clarity --- doc/source/reference/random/upgrading-pcg64.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/reference/random/upgrading-pcg64.rst b/doc/source/reference/random/upgrading-pcg64.rst index 8aec703a8a75..9e540ace9331 100644 --- a/doc/source/reference/random/upgrading-pcg64.rst +++ b/doc/source/reference/random/upgrading-pcg64.rst @@ -142,10 +142,10 @@ constants that make the merely-distinct streams of LCG states into practically-independent output streams. Indeed, now the claim we once made about `PCG64` is actually true of `PCG64DXSM`: collisions are possible, but both streams have to simultaneously be both "close" in the 128 bit state space -*and* "close" in the 127-bit increment space, so the negligible chance of -colliding in the 128-bit internal `SeedSequence` pool would be more likely. The -DXSM output function is more computationally intensive than XSL-RR, but some -optimizations in the LCG more than make up for the performance hit on most +*and* "close" in the 127-bit increment space, so that would be less likely than +the negligible chance of colliding in the 128-bit internal `SeedSequence` pool. +The DXSM output function is more computationally intensive than XSL-RR, but +some optimizations in the LCG more than make up for the performance hit on most machines, so `PCG64DXSM` is a good, safe upgrade. There are, of course, an infinite number of stronger output functions that one could consider, but most will have a greater computational cost, and the DXSM output function has now From d82ef187439081088ab606da712628959e711a3c Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Tue, 4 May 2021 22:12:20 -0400 Subject: [PATCH 1053/1270] BUG: Wrong logic for Windows --- numpy/random/src/pcg64/pcg64.h | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/random/src/pcg64/pcg64.h b/numpy/random/src/pcg64/pcg64.h index 89d94a4f467f..6691a18fcb16 100644 --- a/numpy/random/src/pcg64/pcg64.h +++ b/numpy/random/src/pcg64/pcg64.h @@ -223,7 +223,6 @@ static inline void pcg_cm_step_r(pcg_state_setseq_128 *rng) { product.high += h1; _addcarry_u64(_addcarry_u64(0, product.low, rng->inc.low, &(rng->state.low)), product.high, rng->inc.high, &(rng->state.high)); - rng->state = product; #else rng->state = pcg128_add(pcg128_mult_64(rng->state, PCG_CHEAP_MULTIPLIER_128), rng->inc); From 134063c0353546729f885e37aa28e2e1b13ac4ae Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 4 May 2021 21:15:38 -0500 Subject: [PATCH 1054/1270] BUG: Add self-overlap check and tests Previsouly we unnecessarily required contiguous inputs for 1-D "trivially iterable". That requirement was unnecessary, but also guarded against certain self-overlaps. This was not visible because a self-overlapping array is rarely aligned (but for complex this happens typically). --- numpy/core/src/umath/ufunc_object.c | 6 ++++++ numpy/core/tests/test_mem_overlap.py | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index bf8bd1a8c690..b0654d7b0a9a 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -1212,6 +1212,12 @@ try_trivial_single_output_loop(PyUFuncObject *ufunc, return -2; } } + /* Check self-overlap (non 1-D are contiguous, perfect overlap is OK) */ + if (operation_ndim == 1 && + PyArray_STRIDES(op[nin])[0] < PyArray_ITEMSIZE(op[nin]) && + PyArray_STRIDES(op[nin])[0] != 0) { + return -2; + } } /* Call the __prepare_array__ if necessary */ diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py index ba73dad62c4c..24bdf477f7c7 100644 --- a/numpy/core/tests/test_mem_overlap.py +++ b/numpy/core/tests/test_mem_overlap.py @@ -666,6 +666,11 @@ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, def test_unary_ufunc_call_fuzz(self): self.check_unary_fuzz(np.invert, None, np.int16) + @pytest.mark.slow + def test_unary_ufunc_call_complex_fuzz(self): + # Complex typically has a smaller alignment than itemsize + self.check_unary_fuzz(np.negative, None, np.complex128, count=500) + def test_binary_ufunc_accumulate_fuzz(self): def get_out_axis_size(a, b, axis): if axis is None: From 46bc1fc554b6560142ed11ab49ad5a380699a243 Mon Sep 17 00:00:00 2001 From: Dong Keun Oh Date: Wed, 5 May 2021 15:26:35 +0900 Subject: [PATCH 1055/1270] BUG FIX for MINGW : threads.h existence test requires GLIBC > 2.12 --- numpy/f2py/cfuncs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 974062f2617a..f403a66b5d7b 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -545,7 +545,9 @@ """ cppmacros["F2PY_THREAD_LOCAL_DECL"] = """\ #ifndef F2PY_THREAD_LOCAL_DECL -#if defined(_MSC_VER) +#if defined(_MSC_VER) \\ + || defined(_WIN32) || defined(_WIN64) \\ + || defined(__MINGW32__) || defined(__MINGW64__) #define F2PY_THREAD_LOCAL_DECL __declspec(thread) #elif defined(__STDC_VERSION__) \\ && (__STDC_VERSION__ >= 201112L) \\ From d3e666c94230d4b45b9736103446f8b526ef426a Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 5 May 2021 12:11:28 +0200 Subject: [PATCH 1056/1270] BLD, BUG: Fix bdist_wheel duplicate building The bug can occur only if the build option `build` was passed before the option `bdist_wheel`. You may still realize a duplicate printing for the compiler optimization report in the build log, which is normal due to multiple calling of command `build` by setuptools. --- numpy/distutils/ccompiler_opt.py | 41 ++++++++++++++++----------- numpy/distutils/command/build_clib.py | 20 +++++++++---- numpy/distutils/command/build_ext.py | 19 +++++++++---- 3 files changed, 54 insertions(+), 26 deletions(-) diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index aea9835c78e2..316d3a338142 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -747,12 +747,14 @@ def __init__(self, cache_path=None, *factors): self.cache_me = {} self.cache_private = set() self.cache_infile = False + self._cache_path = None if self.conf_nocache: self.dist_log("cache is disabled by `Config`") return - chash = self.cache_hash(*factors, *self.conf_cache_factors) + self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) + self._cache_path = cache_path if cache_path: if os.path.exists(cache_path): self.dist_log("load cache from file ->", cache_path) @@ -765,7 +767,7 @@ def __init__(self, cache_path=None, *factors): elif not hasattr(cache_mod, "hash") or \ not hasattr(cache_mod, "data"): self.dist_log("invalid cache file", stderr=True) - elif chash == cache_mod.hash: + elif self._cache_hash == cache_mod.hash: self.dist_log("hit the file cache") for attr, val in cache_mod.data.items(): setattr(self, attr, val) @@ -773,10 +775,8 @@ def __init__(self, cache_path=None, *factors): else: self.dist_log("miss the file cache") - atexit.register(self._cache_write, cache_path, chash) - if not self.cache_infile: - other_cache = _share_cache.get(chash) + other_cache = _share_cache.get(self._cache_hash) if other_cache: self.dist_log("hit the memory cache") for attr, val in other_cache.__dict__.items(): @@ -785,32 +785,41 @@ def __init__(self, cache_path=None, *factors): continue setattr(self, attr, val) - _share_cache[chash] = self + _share_cache[self._cache_hash] = self + atexit.register(self.cache_flush) def __del__(self): - # TODO: remove the cache form share on del - pass + for h, o in _share_cache.items(): + if o == self: + _share_cache.pop(h) + break - def _cache_write(self, cache_path, cache_hash): + def cache_flush(self): + """ + Force update the cache. + """ + if not self._cache_path: + return # TODO: don't write if the cache doesn't change - self.dist_log("write cache to path ->", cache_path) - for attr in list(self.__dict__.keys()): + self.dist_log("write cache to path ->", self._cache_path) + cdict = self.__dict__.copy() + for attr in self.__dict__.keys(): if re.match(self._cache_ignore, attr): - self.__dict__.pop(attr) + cdict.pop(attr) - d = os.path.dirname(cache_path) + d = os.path.dirname(self._cache_path) if not os.path.exists(d): os.makedirs(d) - repr_dict = pprint.pformat(self.__dict__, compact=True) - with open(cache_path, "w") as f: + repr_dict = pprint.pformat(cdict, compact=True) + with open(self._cache_path, "w") as f: f.write(textwrap.dedent("""\ # AUTOGENERATED DON'T EDIT # Please make changes to the code generator \ (distutils/ccompiler_opt.py) hash = {} data = \\ - """).format(cache_hash)) + """).format(self._cache_hash)) f.write(repr_dict) def cache_hash(self, *factors): diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index e1f15d465e7b..0e31a7dee5be 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -123,15 +123,20 @@ def run(self): opt_cache_path = os.path.abspath( os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') ) + if hasattr(self, "compiler_opt"): + # By default `CCompilerOpt` update the cache at the exit of + # the process, which may lead to duplicate building + # (see build_extension()/force_rebuild) if run() called + # multiple times within the same os process/thread without + # giving the chance the previous instances of `CCompilerOpt` + # to update the cache. + self.compiler_opt.cache_flush() + self.compiler_opt = new_ccompiler_opt( compiler=self.compiler, dispatch_hpath=dispatch_hpath, cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, cache_path=opt_cache_path ) - if not self.compiler_opt.is_cached(): - log.info("Detected changes on compiler optimizations, force rebuilding") - self.force = True - def report(copt): log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") log.info(copt.report(full=True)) @@ -212,7 +217,12 @@ def build_a_library(self, build_info, lib_name, libraries): lib_file = compiler.library_filename(lib_name, output_dir=self.build_clib) depends = sources + build_info.get('depends', []) - if not (self.force or newer_group(depends, lib_file, 'newer')): + + force_rebuild = self.force + if not self.disable_optimization and not self.compiler_opt.is_cached(): + log.debug("Detected changes on compiler optimizations") + force_rebuild = True + if not (force_rebuild or newer_group(depends, lib_file, 'newer')): log.debug("skipping '%s' library (up-to-date)", lib_name) return else: diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index ca29ad4c0c19..84ec8aa2cb46 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -151,15 +151,20 @@ def run(self): opt_cache_path = os.path.abspath( os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') ) + if hasattr(self, "compiler_opt"): + # By default `CCompilerOpt` update the cache at the exit of + # the process, which may lead to duplicate building + # (see build_extension()/force_rebuild) if run() called + # multiple times within the same os process/thread without + # giving the chance the previous instances of `CCompilerOpt` + # to update the cache. + self.compiler_opt.cache_flush() + self.compiler_opt = new_ccompiler_opt( compiler=self.compiler, dispatch_hpath=dispatch_hpath, cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, cache_path=opt_cache_path ) - if not self.compiler_opt.is_cached(): - log.info("Detected changes on compiler optimizations, force rebuilding") - self.force = True - def report(copt): log.info("\n########### EXT COMPILER OPTIMIZATION ###########") log.info(copt.report(full=True)) @@ -360,7 +365,11 @@ def build_extension(self, ext): self.get_ext_filename(fullname)) depends = sources + ext.depends - if not (self.force or newer_group(depends, ext_filename, 'newer')): + force_rebuild = self.force + if not self.disable_optimization and not self.compiler_opt.is_cached(): + log.debug("Detected changes on compiler optimizations") + force_rebuild = True + if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): log.debug("skipping '%s' extension (up-to-date)", ext.name) return else: From 0f7f313d847e8e36e9fb3c4e576619a3af3bfe56 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 5 May 2021 13:29:49 +0200 Subject: [PATCH 1057/1270] CI: fix the GitHub Actions trigger in docker.yml (#18912) Also remove `tags: *`, this is unnecessary. This mirrors the fix in SciPy, so should work. Somehow GitHub Actions no longer likes `./` at the start of a path. --- .github/workflows/docker.yml | 8 +++----- environment.yml | 2 ++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 52d78a38d1a8..cc4950590af0 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,10 +5,8 @@ on: branches: - main paths: - - './environment.yml' - tags: - - '*' - + - 'environment.yml' + jobs: build: name: Build base Docker image @@ -56,4 +54,4 @@ jobs: numpy/numpy-dev:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, numpy/numpy-dev:latest - name: Image digest # Return details of the image build: sha and shell - run: echo ${{ steps.docker_build.outputs.digest }} \ No newline at end of file + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/environment.yml b/environment.yml index 19fe69a7973b..5e1d4df235d4 100644 --- a/environment.yml +++ b/environment.yml @@ -1,6 +1,8 @@ # To use: +# # $ conda env create -f environment.yml # `mamba` works too for this command # $ conda activate numpy-dev +# name: numpy-dev channels: - conda-forge From d97c634a04e539de2b66ccd0fbd526fea310571a Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Wed, 5 May 2021 10:27:24 -0400 Subject: [PATCH 1058/1270] DOC: more completely describe the implemented variant. --- numpy/random/_pcg64.pyx | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index aaa79b3045b2..8a00dc265f23 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -302,7 +302,10 @@ cdef class PCG64DXSM(BitGenerator): PCG-64 DXSM is a 128-bit implementation of O'Neill's permutation congruential generator ([1]_, [2]_). PCG-64 DXSM has a period of :math:`2^{128}` and supports advancing an arbitrary number of steps as well as :math:`2^{127}` streams. - The specific member of the PCG family that we use is PCG DXSM 128/64. + The specific member of the PCG family that we use is PCG CM DXSM 128/64. It + differs from ``PCG64`` in that it uses the stronger DXSM output function, + a 64-bit "cheap multiplier" in the LCG, and outputs from the state before + advancing it rather than advance-then-output. ``PCG64DXSM`` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not From e684d66262626901cd2715e48ac8141c90cbae00 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 5 May 2021 10:11:27 -0500 Subject: [PATCH 1059/1270] Update doc/source/reference/ufuncs.rst Co-authored-by: Ross Barnowski --- doc/source/reference/ufuncs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 333e445d40af..b2906bcdfd88 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -448,8 +448,8 @@ advanced usage and will not typically be used. indicating the input and output types of a ufunc. This argument allows the user to specify exact DTypes to be used for the - calculation. Casting will be used as necessary and the input DType - is not used unless ``signature`` is ``None`` for that input. + calculation. Casting will be used as necessary. The input DType + is not considered unless ``signature`` is ``None`` for that input. When all DTypes are fixed, a specific loop is chosen or an error raised if no matching loop exists. From 6786740d5382273ec9f875e4586744e35858c206 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 5 May 2021 10:32:22 -0500 Subject: [PATCH 1060/1270] Limit special handling to output dtypes in uniform resolver --- numpy/core/src/umath/ufunc_type_resolution.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 8f974fca0907..0be85c1c4862 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -555,7 +555,7 @@ PyUFunc_SimpleUniformOperationTypeResolver( * one in the tuple), there is no need to check all loops. * Note that this also allows (None, None, float64) to resolve to * (float64, float64, float64), even when the inputs do not match, - * i.e. fixing a single part of the signature can fix all of them. + * i.e. fixing the output part of the signature can fix all of them. * This is necessary to support `nextafter(1., inf, dtype=float32)`, * where it is "clear" we want to cast 1. and inf to float32. */ @@ -565,7 +565,12 @@ PyUFunc_SimpleUniformOperationTypeResolver( for (int i = 0; i < nop; i++) { PyObject *item = PyTuple_GET_ITEM(type_tup, i); if (item == Py_None) { - continue; + if (i < ufunc->nin) { + continue; + } + /* All outputs must be set (this could be relaxed) */ + descr = NULL; + break; } if (!PyArray_DescrCheck(item)) { /* Defer to default resolver (will raise an error there) */ From 8eb83600f819b13370109496d48d0261aa289bbb Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 5 May 2021 11:21:08 -0500 Subject: [PATCH 1061/1270] DOC: Try to clarify release notes a bit [ci-skip] --- doc/release/upcoming_changes/18880.compatibility.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/18880.compatibility.rst b/doc/release/upcoming_changes/18880.compatibility.rst index 3efac000200d..ed159cdfa247 100644 --- a/doc/release/upcoming_changes/18880.compatibility.rst +++ b/doc/release/upcoming_changes/18880.compatibility.rst @@ -11,8 +11,10 @@ partially provided, for example using ``signature=("float64", None, None)``, this could lead to no loop being found (an error). In that case, it is necessary to provide the complete signature to enforce casting the inputs. -Since NumPy special-cases if only outputs (or ``dtype``) is provided, -this should affect very few users. +If ``dtype="float64"`` is used or only outputs are set (e.g. +``signature=(None, None, "float64")`` the behaviour should remain +unchanged. We expect that very few users are affected by +this change. Further, the meaning of ``dtype="float64"`` has been slightly modified and now strictly enforces only the correct output (and not input) DTypes. From 6a680f8a49ce8a23c55f9580e21977f44aba6224 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 5 May 2021 23:41:36 +0200 Subject: [PATCH 1062/1270] MAINT: Add placeholder annotations for two missing `np.testing` objects Adds placeholders for two `np.testing` objects previously missed in https://github.com/numpy/numpy/pull/18842. --- numpy/testing/__init__.pyi | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index efb6bcd911b1..395626f6bb5a 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -7,12 +7,18 @@ if sys.version_info >= (3, 8): else: from typing_extensions import Final +from unittest import ( + TestCase as TestCase, +) + from unittest.case import ( SkipTest as SkipTest, ) __all__: List[str] +def run_module_suite(file_to_run=..., argv=...): ... + class KnownFailureException(Exception): ... class IgnoreException(Exception): ... From bf531216d3b70f28c462e457dcda1795a3c28476 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 5 May 2021 14:51:54 -0700 Subject: [PATCH 1063/1270] TST: Add test to verify underflow flag is raised for np.exp --- numpy/core/tests/test_umath.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 75e1c99d388e..572e736a0774 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -919,6 +919,12 @@ def test_exp_values(self): assert_raises(FloatingPointError, np.exp, np.float64(800.)) assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + def test_log_values(self): with np.errstate(all='ignore'): x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] From b2191de9ce77441373b3b7265c700ae91283a677 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 5 May 2021 14:53:28 -0700 Subject: [PATCH 1064/1270] BUG: Detect and report underflow condition in AVX implementation of np.exp --- .../core/src/umath/loops_exponent_log.dispatch.c.src | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src index 1dc24b226666..291ce4518878 100644 --- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src @@ -465,6 +465,7 @@ simd_exp_FLOAT(npy_float * op, @mask@ xmax_mask, xmin_mask, nan_mask, inf_mask; @mask@ overflow_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); + @mask@ underflow_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); @mask@ load_mask = @isa@_get_full_load_mask_ps(); npy_intp num_remaining_elements = array_size; @@ -491,6 +492,7 @@ simd_exp_FLOAT(npy_float * op, inf_mask = _mm@vsize@_cmp_ps@vsub@(x, inf, _CMP_EQ_OQ); overflow_mask = @or_masks@(overflow_mask, @xor_masks@(xmax_mask, inf_mask)); + underflow_mask = @or_masks@(underflow_mask, xmin_mask); x = @isa@_set_masked_lanes_ps(x, zeros_f, @or_masks@( @or_masks@(nan_mask, xmin_mask), xmax_mask)); @@ -539,6 +541,10 @@ simd_exp_FLOAT(npy_float * op, if (@mask_to_int@(overflow_mask)) { npy_set_floatstatus_overflow(); } + + if (@mask_to_int@(underflow_mask)) { + npy_set_floatstatus_underflow(); + } } /* @@ -740,6 +746,7 @@ AVX512F_exp_DOUBLE(npy_double * op, __m512d mTable_tail_3 = _mm512_loadu_pd(&(EXP_Table_tail[8*3])); __mmask8 overflow_mask = avx512_get_partial_load_mask_pd(0, num_lanes); + __mmask8 underflow_mask = avx512_get_partial_load_mask_pd(0, num_lanes); __mmask8 load_mask = avx512_get_full_load_mask_pd(); __mmask8 xmin_mask, xmax_mask, inf_mask, nan_mask, nearzero_mask; @@ -769,6 +776,7 @@ AVX512F_exp_DOUBLE(npy_double * op, nearzero_mask = _mm512_kxor(nearzero_mask, nan_mask); overflow_mask = _mm512_kor(overflow_mask, _mm512_kxor(xmax_mask, inf_mask)); + underflow_mask = _mm512_kor(underflow_mask, xmax_mask); x = avx512_set_masked_lanes_pd(x, zeros_d, _mm512_kor(_mm512_kor(nan_mask, xmin_mask), _mm512_kor(xmax_mask, nearzero_mask))); @@ -828,6 +836,10 @@ AVX512F_exp_DOUBLE(npy_double * op, if (overflow_mask) { npy_set_floatstatus_overflow(); } + + if (underflow_mask) { + npy_set_floatstatus_underflow(); + } } /* * Vectorized implementation of log double using AVX512 From 6a673d0ae2c2ffac4c243240fbf237a5db648409 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 6 May 2021 14:06:59 +0300 Subject: [PATCH 1065/1270] NEP: add mailing list thread, fixes from review --- doc/neps/nep-0049.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049.rst index 9adf0ff26290..743dd2ad6e90 100644 --- a/doc/neps/nep-0049.rst +++ b/doc/neps/nep-0049.rst @@ -6,6 +6,7 @@ NEP 49 — Data allocation strategies :Status: Draft :Type: Standards Track :Created: 2021-04-18 +:Resolution: http://numpy-discussion.10968.n7.nabble.com/NEP-49-Data-allocation-strategies-tt49185.html Abstract @@ -101,6 +102,11 @@ time of its instantiation, and these will be used to reallocate or free the data memory of the instance. Internally NumPy may use ``memcpy`` or ``memset`` on the pointer to the data memory. +The name of the handler will be exposed on the python level via a +``numpy.core.multiarray.get_handler_name(arr)`` function. If called as +``numpy.core.multiarray.get_handler_name()`` it will return the name of the +global handler that will be used to allocate data for the next new `ndarrray`. + NumPy C-API functions ===================== @@ -185,7 +191,7 @@ the ``sz`` argument is correct. if (strncmp(real, "originally allocated", 20) != 0) { fprintf(stdout, "uh-oh, unmatched shift_free, " "no appropriate prefix\\n"); - /* Make gcc crash by calling free on the wrong address */ + /* Make the C runtime crash by calling free on the wrong address */ free((char *)p + 10); /* free(real); */ } @@ -194,7 +200,7 @@ the ``sz`` argument is correct. if (i != sz) { fprintf(stderr, "uh-oh, unmatched " "shift_free(ptr, %d) but allocated %d\\n", sz, i); - /* Make gcc crash by calling free on the wrong address */ + /* Make the C runtime crash by calling free on the wrong address */ /* free((char *)p + 10); */ free(real); } From 3c4e5af75601afeee3492d0d019c6ffe44c86d41 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 6 May 2021 09:55:27 -0500 Subject: [PATCH 1066/1270] DOC: Updates based on Marten's review --- doc/release/upcoming_changes/18880.compatibility.rst | 2 +- doc/source/reference/ufuncs.rst | 5 +++-- numpy/core/src/umath/ufunc_type_resolution.c | 12 +++++++++--- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/doc/release/upcoming_changes/18880.compatibility.rst b/doc/release/upcoming_changes/18880.compatibility.rst index ed159cdfa247..a6ae2e968ff9 100644 --- a/doc/release/upcoming_changes/18880.compatibility.rst +++ b/doc/release/upcoming_changes/18880.compatibility.rst @@ -26,7 +26,7 @@ This means it is now always equivalent to:: to no loop being found in some cases, NumPy will normally also search for the loop:: - signature("float64", "float64, "float64") + signature=("float64", "float64", "float64") if the first search failed. In the future, this behaviour may be customized to achieve the expected diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index b2906bcdfd88..27ebf8d1b9c0 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -448,8 +448,9 @@ advanced usage and will not typically be used. indicating the input and output types of a ufunc. This argument allows the user to specify exact DTypes to be used for the - calculation. Casting will be used as necessary. The input DType - is not considered unless ``signature`` is ``None`` for that input. + calculation. Casting will be used as necessary. The actual DType of the + input arrays is not considered unless ``signature`` is ``None`` for + that array. When all DTypes are fixed, a specific loop is chosen or an error raised if no matching loop exists. diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 0be85c1c4862..d41b071f4207 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -1922,7 +1922,10 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, for (; funcdata != NULL; funcdata = funcdata->next) { int *orig_types = funcdata->arg_types; - /* Copy the types into an int array for matching */ + /* + * Copy the types into an int array for matching + * (Mostly duplicated in `type_tuple_type_resolver`) + */ for (j = 0; j < nop; ++j) { if (specified_types[j] == NPY_NOTYPE) { types[j] = orig_types[j]; @@ -2121,7 +2124,7 @@ type_tuple_type_resolver_core(PyUFuncObject *self, /* Error */ case -1: return -1; - /* Found matching loop */ + /* Found matching loop */ case 1: return 0; } @@ -2130,7 +2133,10 @@ type_tuple_type_resolver_core(PyUFuncObject *self, for (i = 0; i < self->ntypes; ++i) { char *orig_types = self->types + i*self->nargs; - /* Check specified types and copy into an int array for matching */ + /* + * Check specified types and copy into an int array for matching + * (Mostly duplicated in `type_tuple_userloop_type_resolver`) + */ for (j = 0; j < nop; ++j) { if (specified_types[j] == NPY_NOTYPE) { types[j] = orig_types[j]; From 00064c72cd44c4e5bc6c627eeac903cd201effa2 Mon Sep 17 00:00:00 2001 From: legoffant <58195095+legoffant@users.noreply.github.com> Date: Fri, 7 May 2021 02:23:51 +0200 Subject: [PATCH 1067/1270] DOC: fix clone over ssh instructions #18917 (#18918) --- doc/source/dev/gitwash/development_setup.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/gitwash/development_setup.rst b/doc/source/dev/gitwash/development_setup.rst index badb73ca0c70..2be7125da032 100644 --- a/doc/source/dev/gitwash/development_setup.rst +++ b/doc/source/dev/gitwash/development_setup.rst @@ -165,7 +165,7 @@ slightly. Instead of :: run :: - git clone git@github.com:numpy/numpy.git + git clone git@github.com:your-user-name/numpy.git and instead of showing an ``https`` URL, ``git remote -v`` will show :: From 2430c7614ac04f4a70840836cd58ba09ce20a690 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 6 May 2021 19:26:58 -0500 Subject: [PATCH 1068/1270] BUG: Fix refcounting in string-promotion deprecation code path This is a workaround to avoid string deprecation warning when it will not make a difference. But it was lacking the decref'ing and clearing (just in case, this code normally does so). --- numpy/core/src/umath/ufunc_type_resolution.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 465deb87b756..a3f97a8f3a71 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -540,7 +540,12 @@ PyUFunc_SimpleUniformOperationTypeResolver( out_dtypes[iop] = PyArray_DESCR(operands[iop]); Py_INCREF(out_dtypes[iop]); } - return raise_no_loop_found_error(ufunc, out_dtypes); + raise_no_loop_found_error(ufunc, out_dtypes); + for (iop = 0; iop < ufunc->nin; iop++) { + Py_DECREF(out_dtypes[iop]); + out_dtypes[iop] = NULL; + } + return -1; } out_dtypes[0] = PyArray_ResultType(ufunc->nin, operands, 0, NULL); } From c2f2fa0849836d1b2e16686b5833c650a1574e8b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 6 May 2021 19:32:59 -0500 Subject: [PATCH 1069/1270] DOC: Update release notes based on Matti's review/comments Co-authored-by: Matti Picus --- doc/release/upcoming_changes/18880.compatibility.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/release/upcoming_changes/18880.compatibility.rst b/doc/release/upcoming_changes/18880.compatibility.rst index a6ae2e968ff9..4951463cf032 100644 --- a/doc/release/upcoming_changes/18880.compatibility.rst +++ b/doc/release/upcoming_changes/18880.compatibility.rst @@ -1,7 +1,8 @@ Ufunc ``signature=...`` and ``dtype=`` generalization and ``casting`` --------------------------------------------------------------------- -The behaviour for ``signature=...`` and ``dtype=...`` can differ in -some cases to the previous behaviour. +The behaviour for ``np.ufunc(1.0, 1.0, signature=...)`` or +``np.ufunc(1.0, 1.0, dtype=...)`` can now yield different loops in 1.21 +compared to 1.20 because of changes in promotion. When ``signature`` was previously used, the casting check on inputs was relaxed, which could lead to downcasting inputs unsafely especially if combined with ``casting="unsafe"``. @@ -12,9 +13,8 @@ this could lead to no loop being found (an error). In that case, it is necessary to provide the complete signature to enforce casting the inputs. If ``dtype="float64"`` is used or only outputs are set (e.g. -``signature=(None, None, "float64")`` the behaviour should remain -unchanged. We expect that very few users are affected by -this change. +``signature=(None, None, "float64")`` the is unchanged. +We expect that very few users are affected by this change. Further, the meaning of ``dtype="float64"`` has been slightly modified and now strictly enforces only the correct output (and not input) DTypes. From 6ad46507c59b0a21bf2e0669c8563ed05a5bb273 Mon Sep 17 00:00:00 2001 From: Amrit Krishnan Date: Thu, 6 May 2021 22:13:30 -0400 Subject: [PATCH 1070/1270] DOC: Clarify isreal docstring for string/obj arrays (#18843) Add Notes and Examples about behavior of isreal for string and object arrays Co-authored-by: Ross Barnowski --- numpy/lib/type_check.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 2a2982ab348e..383fbda3c239 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -262,6 +262,10 @@ def isreal(x): out : ndarray, bool Boolean array of same shape as `x`. + Notes + ----- + `isreal` may behave unexpectedly for string or object arrays (see examples) + See Also -------- iscomplex @@ -269,8 +273,28 @@ def isreal(x): Examples -------- - >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> np.isreal(a) array([False, True, True, True, True, False]) + + The function does not work on string arrays. + + >>> a = np.array([2j, "a"], dtype="U") + >>> np.isreal(a) # Warns about non-elementwise comparison + False + + Returns True for all elements in input array of ``dtype=object`` even if + any of the elements is complex. + + >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> np.isreal(a) + array([ True, True, True]) + + isreal should not be used with object arrays + + >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> np.isreal(a) + array([ True, True]) """ return imag(x) == 0 From dc062bd1b4606ed33b5ead81c63eea741b519196 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 7 May 2021 10:19:59 +0200 Subject: [PATCH 1071/1270] BUG: Fix underflow error in AVX512 of ufunc exp --- numpy/core/src/umath/loops_exponent_log.dispatch.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src index 291ce4518878..41e0bf37b6f8 100644 --- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src @@ -776,7 +776,7 @@ AVX512F_exp_DOUBLE(npy_double * op, nearzero_mask = _mm512_kxor(nearzero_mask, nan_mask); overflow_mask = _mm512_kor(overflow_mask, _mm512_kxor(xmax_mask, inf_mask)); - underflow_mask = _mm512_kor(underflow_mask, xmax_mask); + underflow_mask = _mm512_kor(underflow_mask, xmin_mask); x = avx512_set_masked_lanes_pd(x, zeros_d, _mm512_kor(_mm512_kor(nan_mask, xmin_mask), _mm512_kor(xmax_mask, nearzero_mask))); From cb64227497a3a1eec6358fbfab822e58154b611a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 12:51:06 +0200 Subject: [PATCH 1072/1270] MAINT: Add `numpy/random/lib/npyrandom.lib` to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 736597b6b7af..a2a1f2b68725 100644 --- a/.gitignore +++ b/.gitignore @@ -187,6 +187,7 @@ numpy/random/legacy/*.c numpy/random/_mtrand/randint_helpers.pxi numpy/random/bounded_integers.pyx numpy/random/bounded_integers.pxd +numpy/random/lib/npyrandom.lib tools/swig/test/Array_wrap.cxx tools/swig/test/Farray_wrap.cxx tools/swig/test/Farray.py From 5355d10b6416ef77d56c634a4f7674b3c3e092ac Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 12:58:20 +0200 Subject: [PATCH 1073/1270] REL: Add a release note for #18838 --- doc/release/upcoming_changes/18934.improvement.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/18934.improvement.rst diff --git a/doc/release/upcoming_changes/18934.improvement.rst b/doc/release/upcoming_changes/18934.improvement.rst new file mode 100644 index 000000000000..582062f2f5fb --- /dev/null +++ b/doc/release/upcoming_changes/18934.improvement.rst @@ -0,0 +1,5 @@ +Placeholder annotations have been improved +------------------------------------------ +All placeholder annotations, that were previously annotated as ``typing.Any``, +have been improved. Where appropiate they have been replaced with explicit +function definitions, classes or other miscellaneous objects. From a25f1ab93e8d71e39e9226ca3394ff665341a9e6 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 7 May 2021 15:25:39 +0200 Subject: [PATCH 1074/1270] TST: Use almost equal to get rid of object -> float64 comparison error --- numpy/core/tests/test_ufunc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index c13865ce4c28..64ecb3780542 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -175,7 +175,7 @@ def __getattr__(self, attr): ufunc(obj_arr) else: res_obj = ufunc(obj_arr) - assert_array_equal(res_num.astype("O"), res_obj) + assert_array_almost_equal(res_num.astype("O"), res_obj) def _pickleable_module_global(): From 2a9656090656b276112cb758f897b22275385e7a Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 6 May 2021 17:47:50 -0600 Subject: [PATCH 1075/1270] BUG: Make changelog recognize ``gh-`` as PR number prefix. The changelog.py script was only recognizing fast forward merges where the PR number was of the form "(#xxxxx)", but some people were merging using the "(gh-xxxxx)" form. Fix that. --- tools/changelog.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tools/changelog.py b/tools/changelog.py index 920f5b87faa3..9da330500726 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -66,10 +66,14 @@ def get_authors(revision_range): pre = set(re.findall(pat, this_repo.git.shortlog('-s', lst_release), re.M)) - # Homu is the author of auto merges, clean him out. + # Ignore the bot Homu. cur.discard('Homu') pre.discard('Homu') + # Ignore the bot dependabot-preview + cur.discard('dependabot-preview') + pre.discard('dependabot-preview') + # Append '+' to new authors. authors = [s + u' +' for s in cur - pre] + [s for s in cur & pre] authors.sort() @@ -92,8 +96,8 @@ def get_pull_requests(repo, revision_range): # From fast forward squash-merges commits = this_repo.git.log( '--oneline', '--no-merges', '--first-parent', revision_range) - issues = re.findall(u'^.*\\(\\#(\\d+)\\)$', commits, re.M) - prnums.extend(int(s) for s in issues) + issues = re.findall(u'^.*\\((\\#|gh-|gh-\\#)(\\d+)\\)$', commits, re.M) + prnums.extend(int(s[1]) for s in issues) # get PR data from github repo prnums.sort() From 0edc08484a69e6269a2a0c3e9e4fabc901152f80 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 6 May 2021 17:52:04 -0600 Subject: [PATCH 1076/1270] DOC: Update 1.20.0-changelog.rst. --- doc/changelog/1.20.0-changelog.rst | 144 ++++++++++++++++++----------- 1 file changed, 88 insertions(+), 56 deletions(-) diff --git a/doc/changelog/1.20.0-changelog.rst b/doc/changelog/1.20.0-changelog.rst index f0c2a27234f7..f06bd8a8d22d 100644 --- a/doc/changelog/1.20.0-changelog.rst +++ b/doc/changelog/1.20.0-changelog.rst @@ -193,41 +193,47 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 684 pull requests were merged for this release. +A total of 716 pull requests were merged for this release. * `#13516 `__: ENH: enable multi-platform SIMD compiler optimizations * `#14779 `__: NEP 36 (fair play) * `#14882 `__: DEP: Deprecate aliases of builtin types in python 3.7+ -* `#15037 `__: BUG: `np.resize` negative shape and subclasses edge case fixes -* `#15121 `__: ENH: random: Add the method `permuted` to Generator. +* `#15037 `__: BUG: ``np.resize`` negative shape and subclasses edge case fixes +* `#15121 `__: ENH: random: Add the method ``permuted`` to Generator. * `#15162 `__: BUG,MAINT: Fix issues with non-reduce broadcasting axes * `#15471 `__: BUG: Ensure PyArray_FromScalar always returns the requested dtype * `#15507 `__: NEP 42: Technical decisions for new DTypes * `#15508 `__: API: Create Preliminary DTypeMeta class and np.dtype subclasses +* `#15551 `__: DOC: Simd optimization documentation * `#15604 `__: MAINT: Avoid exception in NpzFile destructor if constructor raises... -* `#15666 `__: ENH: Improved `__str__` for polynomials +* `#15666 `__: ENH: Improved ``__str__`` for polynomials * `#15759 `__: BUILD: Remove Accelerate support * `#15791 `__: [DOC] Added tutorial about the numpy.ma module. * `#15852 `__: ENH: Add where argument to np.mean * `#15886 `__: DEP: Deprecate passing shape=None to mean shape=() * `#15900 `__: DEP: Ensure indexing errors will be raised even on empty results * `#15997 `__: ENH: improve printing of arrays with multi-line reprs +* `#16056 `__: DEP: Deprecate inexact matches for mode, searchside * `#16130 `__: DOC: Correct documentation of ``__array__`` when used as output... * `#16134 `__: ENH: Implement concatenate dtype and casting keyword arguments -* `#16156 `__: DEP: Deprecate `numpy.dual`. +* `#16156 `__: DEP: Deprecate ``numpy.dual``. * `#16161 `__: BUG: Potential fix for divmod(1.0, 0.0) to raise divbyzero and... * `#16167 `__: DOC: Increase guidance and detail of np.polynomial docstring * `#16174 `__: DOC: Add transition note to all lib/poly functions * `#16200 `__: ENH: Rewrite of array-coercion to support new dtypes * `#16205 `__: ENH: Add ``full_output`` argument to ``f2py.compile``. +* `#16207 `__: DOC: Add PyArray_ContiguousFromObject C docs * `#16232 `__: DEP: Deprecate ufunc.outer with matrix inputs +* `#16237 `__: MAINT: precompute ``log(2.0 * M_PI)`` in ``random_loggam`` * `#16238 `__: MAINT: Unify cached (C-level static) imports * `#16239 `__: BUG,DOC: Allow attach docs twice but error if wrong * `#16242 `__: BUG: Fix default fallback in genfromtxt * `#16247 `__: ENH:Umath Replace raw SIMD of unary float point(32-64) with NPYV... * `#16248 `__: MRG, ENH: added edge keyword argument to digitize +* `#16253 `__: DOC: Clarify tiny/xmin in finfo and machar +* `#16254 `__: MAINT: Chain exceptions in generate_umath.py * `#16257 `__: DOC: Update the f2py section of the "Using Python as Glue" page. -* `#16260 `__: DOC: Improve `rec.array` function documentation (#15853) +* `#16260 `__: DOC: Improve ``rec.array`` function documentation * `#16266 `__: ENH: include dt64/td64 isinstance checks in ``__init__.pxd`` * `#16267 `__: DOC: Clarifications for np.std * `#16273 `__: BUG: Order percentile monotonically @@ -238,6 +244,7 @@ A total of 684 pull requests were merged for this release. * `#16283 `__: DOC: Add a note about performance of isclose compared to math.isclose * `#16284 `__: MAINT: Clean up the implementation of quantile * `#16285 `__: MAINT: Bump hypothesis from 5.12.0 to 5.14.0 +* `#16288 `__: BLD: Avoid "visibility attribute not supported" warning * `#16291 `__: DOC: Improve "tobytes" docstring. * `#16292 `__: BUG: Fix tools/download-wheels.py. * `#16295 `__: BUG: Require Python >= 3.6 in setup.py @@ -254,14 +261,17 @@ A total of 684 pull requests were merged for this release. * `#16318 `__: MAINT: Stop Using PyEval_Call* and simplify some uses * `#16321 `__: ENH: Improve the ARM cpu feature detection by parsing /proc/cpuinfo * `#16323 `__: DOC: Reconstruct Testing Guideline. +* `#16327 `__: BUG: Don't segfault on bad __len__ when assigning. * `#16329 `__: MAINT: Cleanup 'tools/download-wheels.py' * `#16332 `__: DOC: link np.interp to SciPy's interpolation functions (closes... * `#16333 `__: DOC: Fix spelling typo - homogenous to homogeneous. (#16324) * `#16334 `__: ENH: Use AVX-512 for np.isnan, np.infinite, np.isinf and np.signbit * `#16336 `__: BUG: Fix refcounting in add_newdoc * `#16337 `__: CI: Create a link for the circleCI artifact -* `#16348 `__: BUG: Fix dtype leak in `PyArray_FromAny` error path +* `#16346 `__: MAINT: Remove f-strings in setup.py. +* `#16348 `__: BUG: Fix dtype leak in ``PyArray_FromAny`` error path * `#16349 `__: BUG: Indentation for docstrings +* `#16350 `__: BUG: Set readonly flag in array interface * `#16351 `__: BUG: Fix small leaks in error path and ``empty_like`` with shape * `#16362 `__: MAINT: Streamline download-wheels. * `#16365 `__: DOC: Fix an obvious mistake in a message printed in doc/Makefile. @@ -301,6 +311,7 @@ A total of 684 pull requests were merged for this release. * `#16447 `__: DOC: add a "make show" command to doc/Makefile * `#16450 `__: DOC: Add a NEP link to all neps. * `#16452 `__: DOC,ENH: extend error message when Accelerate is detected +* `#16454 `__: TST: Add tests for PyArray_IntpConverter * `#16463 `__: DOC: Improve assert_warns docstring with example * `#16464 `__: MAINT: Bump hypothesis from 5.15.1 to 5.16.0 * `#16465 `__: DOC: Fix development_workflow links @@ -308,7 +319,7 @@ A total of 684 pull requests were merged for this release. * `#16471 `__: BLD: install mingw32 v7.3.0 for win32 * `#16472 `__: DOC: Fixes for 18 broken links * `#16474 `__: MAINT: use zip instead of range in piecewise -* `#16476 `__: ENH: add `norm=forward,backward` to numpy.fft functions +* `#16476 `__: ENH: add ``norm=forward,backward`` to numpy.fft functions * `#16482 `__: SIMD: Optimize the performace of np.packbits in ARM-based machine. * `#16485 `__: BUG: Fix result when a gufunc output broadcasts the inputs. * `#16500 `__: DOC: Point Contributing page to new NEP 45 @@ -340,6 +351,7 @@ A total of 684 pull requests were merged for this release. * `#16574 `__: MAINT: fix name of first parameter to dtype constructor in type... * `#16581 `__: DOC: Added an example for np.transpose(4d_array) * `#16583 `__: MAINT: changed np.generic arguments to positional-only +* `#16589 `__: MAINT: Remove nickname from polynomial classes. * `#16590 `__: DOC: Clarify dtype default for logspace and geomspace * `#16591 `__: DOC: Disallow complex args in arange * `#16592 `__: BUG: Raise TypeError for float->timedelta promotion @@ -364,8 +376,9 @@ A total of 684 pull requests were merged for this release. * `#16633 `__: MAINT: lib: Some code clean up in loadtxt * `#16635 `__: BENCH: remove obsolete goal_time param * `#16639 `__: BUG: Fix uint->timedelta promotion to raise TypeError -* `#16642 `__: MAINT: Replace `PyUString_GET_SIZE` with `PyUnicode_GetLength`. +* `#16642 `__: MAINT: Replace ``PyUString_GET_SIZE`` with ``PyUnicode_GetLength``. * `#16643 `__: REL: Fix outdated docs link +* `#16644 `__: MAINT: Improve performance of np.full * `#16646 `__: TST: add a static typing test for memoryviews as ArrayLikes * `#16647 `__: ENH: Added annotations to 8 functions from np.core.fromnumeric * `#16648 `__: REL: Update master after 1.19.0 release. @@ -377,14 +390,15 @@ A total of 684 pull requests were merged for this release. * `#16664 `__: DOC: Add lib.format.open_memmap to autosummary. * `#16666 `__: BUG: Fix bug in AVX complex absolute while processing array of... * `#16669 `__: MAINT: remove blacklist/whitelist terms +* `#16671 `__: DOC: Simplify and update git setup page * `#16674 `__: TST: Add extra debugging information to CPU features detection * `#16675 `__: ENH: Add support for file like objects to np.core.records.fromfile * `#16683 `__: DOC: updated gcc minimum recommend version to build from source -* `#16684 `__: MAINT: Allow `None` to be passed to certain `generic` subclasses +* `#16684 `__: MAINT: Allow None to be passed to certain generic subclasses * `#16690 `__: DOC: fixed docstring for descr_to_dtype -* `#16691 `__: DOC: Remove "matrix" from `triu` docstring. +* `#16691 `__: DOC: Remove "matrix" from ``triu`` docstring. * `#16696 `__: MAINT: add py.typed sentinel to package manifest -* `#16699 `__: MAINT: Fixup quantile tests to not use `np.float` +* `#16699 `__: MAINT: Fixup quantile tests to not use ``np.float`` * `#16702 `__: BLD: Add CPU entry for Emscripten / WebAssembly * `#16704 `__: TST: Disable Python 3.9-dev testing. * `#16706 `__: DOC: Add instruction about stable symlink @@ -393,11 +407,12 @@ A total of 684 pull requests were merged for this release. * `#16710 `__: ENH, BLD: Add RPATH support for AIX * `#16718 `__: DOC: fix typo * `#16720 `__: BUG: Fix PyArray_SearchSorted signature. +* `#16723 `__: NEP: Initial draft for NEP 43 for extensible ufuncs * `#16729 `__: ENH: Add annotations to the last 8 functions in numpy.core.fromnumeric * `#16730 `__: ENH: Use f90 compiler specified in f2py command line args for... * `#16731 `__: DOC: reword random c-api introduction, cython is documented in... * `#16735 `__: DOC: Tweak a sentence about broadcasting. -* `#16736 `__: DOC: Prepend `ma.` to references in ``numpy.ma`` +* `#16736 `__: DOC: Prepend ``ma.`` to references in ``numpy.ma`` * `#16738 `__: DOC: Remove redundant word * `#16742 `__: DOC: add unique() to See Also of repeat() * `#16743 `__: DOC: add example to unique() and make connection to repeat() @@ -414,6 +429,8 @@ A total of 684 pull requests were merged for this release. * `#16770 `__: MAINT: Remove unneeded call to PyUnicode_READY * `#16771 `__: MAINT: Fix deprecated functions in scalarapi.c * `#16775 `__: DOC: switch to logo with text +* `#16777 `__: BUG: Added missing return after raising error in methods.c +* `#16778 `__: NEP: Update NEP 42 to note the issue of circular references * `#16782 `__: ENH, TST: Bring the NumPy C SIMD vectorization interface "NPYV"... * `#16786 `__: BENCH: Add basic benchmarks for scalar indexing and assignment * `#16789 `__: BUG: fix decode error when building and get rid of warn @@ -467,7 +484,8 @@ A total of 684 pull requests were merged for this release. * `#16886 `__: DOC: Fix types including curly braces * `#16887 `__: DOC: Remove the links for ``True`` and ``False`` * `#16888 `__: ENH: Integrate the new CPU dispatcher with umath generator -* `#16894 `__: DOC: Fix wrong markups in `arrays.dtypes` +* `#16890 `__: TST, BUG: Re-raise MemoryError exception in test_large_zip's... +* `#16894 `__: DOC: Fix wrong markups in ``arrays.dtypes`` * `#16896 `__: DOC: Remove links for C codes * `#16897 `__: DOC: Fix the declarations of C fuctions * `#16899 `__: MNT: also use Py_SET_REFCNT instead of Py_REFCNT @@ -480,23 +498,24 @@ A total of 684 pull requests were merged for this release. * `#16919 `__: DOC: Add ufunc docstring to generated docs. * `#16925 `__: REL: Update master after 1.19.1 release. * `#16931 `__: Revert "Merge pull request #16248 from alexrockhill/edge" +* `#16935 `__: ENH: implement NEP-35's ``like=`` argument * `#16936 `__: BUG: Fix memory leak of buffer-info cache due to relaxed strides * `#16938 `__: ENH,API: Store exported buffer info on the array * `#16940 `__: BLD: update OpenBLAS build * `#16941 `__: BUG: Allow array-like types to be coerced as object array elements * `#16943 `__: DEP: Deprecate size-one ragged array coercion * `#16944 `__: Change the name of the folder "icons" to "logo". -* `#16949 `__: ENH: enable colors for `runtests.py --ipython` +* `#16949 `__: ENH: enable colors for ``runtests.py --ipython`` * `#16950 `__: DOC: Clarify input to irfft/irfft2/irfftn * `#16952 `__: MAINT: Bump hypothesis from 5.20.2 to 5.23.2 * `#16953 `__: update numpy/lib/arraypad.py with appropriate chain exception * `#16957 `__: MAINT: Use arm64 instead of aarch64 on travisCI. * `#16962 `__: MAINT: Chain exception in ``distutils/fcompiler/environment.py``. -* `#16966 `__: MAINT: Added the `order` parameter to `np.array()` +* `#16966 `__: MAINT: Added the ``order`` parameter to ``np.array()`` * `#16969 `__: ENH: Add Neon SIMD implementations for add, sub, mul, and div * `#16973 `__: DOC: Fixed typo in lib/recfunctions.py * `#16974 `__: TST: Add pypy win32 CI testing. -* `#16982 `__: ENH: Increase the use of `Literal` types +* `#16982 `__: ENH: Increase the use of ``Literal`` types * `#16986 `__: ENH: Add NumPy declarations to be used by Cython 3.0+ * `#16988 `__: DOC: Add the new NumPy logo to Sphinx pages * `#16991 `__: MAINT: Bump hypothesis from 5.23.2 to 5.23.9 @@ -505,18 +524,24 @@ A total of 684 pull requests were merged for this release. * `#16996 `__: DOC: Revise glossary page * `#17002 `__: DOC: clip() allows arguments. * `#17009 `__: NEP: Updated NEP-35 with keyword-only instruction +* `#17010 `__: BUG: Raise correct errors in boolean indexing fast path * `#17013 `__: MAINT: Simplify scalar power * `#17014 `__: MAINT: Improve error handling in umathmodule setup +* `#17022 `__: DOC: Fix non-matching pronoun. * `#17028 `__: DOC: Disclaimer for FFT library * `#17029 `__: MAINT: Add error return to all casting functionality and NpyIter * `#17033 `__: BUG: fix a compile and a test warning -* `#17036 `__: DOC: Clarify that `np.char` comparison functions always return... +* `#17036 `__: DOC: Clarify that ``np.char`` comparison functions always return... * `#17039 `__: DOC: Use a less ambiguous example for array_split * `#17041 `__: MAINT: Bump hypothesis from 5.23.9 to 5.23.12 * `#17048 `__: STY: core._internal style fixups * `#17050 `__: MAINT: Remove _EXTRAFLAGS variable +* `#17050 `__: MAINT: change ``for line in open()`` to ``with open() as f`` +* `#17052 `__: MAINT: Delete obsolete conversion to list * `#17053 `__: BUG: fix typo in polydiv that prevented promotion to poly1d +* `#17055 `__: MAINT: Replace lambda function by list comprehension * `#17058 `__: MAINT: Revert boolean casting back to elementwise comparisons... +* `#17059 `__: BUG: fix pickling of arrays larger than 2GiB * `#17062 `__: API, BUG: Raise error on complex input to i0 * `#17063 `__: MAINT: Remove obsolete conversion to set * `#17067 `__: DEP: lib: Remove the deprecated financial functions. @@ -530,7 +555,7 @@ A total of 684 pull requests were merged for this release. * `#17109 `__: MAINT: Split einsum into multiple files * `#17112 `__: BUG: Handle errors from the PyCapsule API * `#17115 `__: DOC: Fix spacing in vectorize doc -* `#17116 `__: API: Remove `np.ctypeslib.ctypes_load_library` +* `#17116 `__: API: Remove ``np.ctypeslib.ctypes_load_library`` * `#17119 `__: DOC: make spacing consistent in NEP 41 bullet points * `#17121 `__: BUG: core: fix ilp64 blas dot/vdot/... for strides > int32 max * `#17123 `__: ENH: allow running mypy through runtests.py @@ -542,13 +567,13 @@ A total of 684 pull requests were merged for this release. * `#17141 `__: MAINT: Make arrayprint str and repr the ndarray defaults. * `#17142 `__: DOC: NEP-42: Fix a few typos. * `#17143 `__: MAINT: Change handling of the expired financial functions. -* `#17144 `__: ENH: Add annotations to 3 functions in `np.core.function_base` +* `#17144 `__: ENH: Add annotations to 3 functions in ``np.core.function_base`` * `#17145 `__: MAINT, BUG: Replace uses of PyString_AsString. * `#17146 `__: MAINT: ``Replace PyUString_*`` by ``PyUnicode_*`` equivalents. * `#17149 `__: MAINT: Replace PyInt macros with their PyLong replacement * `#17150 `__: ENH: Add support for the abstract scalars to cython code * `#17151 `__: BUG: Fix incorrect cython definition of npy_cfloat -* `#17152 `__: MAINT: Clean up some Npy_ vs Py_ macro usage +* `#17152 `__: MAINT: Clean up some ``Npy_`` vs ``Py_`` macro usage * `#17154 `__: DOC: Remove references to PyCObject * `#17159 `__: DOC: Update numpy4matlab * `#17160 `__: Clean up some more bytes vs unicode handling @@ -558,22 +583,23 @@ A total of 684 pull requests were merged for this release. * `#17167 `__: BLD: Merge the npysort library into multiarray * `#17168 `__: TST: Add tests mapping out the rules for metadata in promotion * `#17171 `__: BUG: revert trim_zeros changes from gh-16911 -* `#17172 `__: ENH: Make `np.complexfloating` generic w.r.t. `np.floating` +* `#17172 `__: ENH: Make ``np.complexfloating`` generic w.r.t. ``np.floating`` * `#17176 `__: MAINT/ENH: datetime: remove calls to PyUnicode_AsASCIIString,... -* `#17180 `__: ENH: Added missing methods to `np.flatiter` +* `#17180 `__: ENH: Added missing methods to ``np.flatiter`` * `#17181 `__: DOC: Correct error in description of ndarray.base -* `#17182 `__: DOC: Document `dtype.metadata` +* `#17182 `__: DOC: Document ``dtype.metadata`` * `#17186 `__: MAINT: Use utf8 strings in more of datetime -* `#17188 `__: MAINT: Add placeholder stubs for `ndarray` and `generic` +* `#17188 `__: MAINT: Add placeholder stubs for ``ndarray`` and ``generic`` * `#17191 `__: MAINT: Bump hypothesis from 5.26.0 to 5.30.0 * `#17193 `__: MAINT: Remove some callers of functions in numpy.compat * `#17195 `__: ENH: Make the window functions exactly symmetric * `#17197 `__: MAINT: Improve error handling in npy_cpu_init -* `#17199 `__: DOC: Fix the documented signatures of four `ufunc` methods -* `#17201 `__: MAINT: Make the `NPY_CPU_DISPATCH_CALL` macros expressions not... +* `#17199 `__: DOC: Fix the documented signatures of four ``ufunc`` methods +* `#17201 `__: MAINT: Make the ``NPY_CPU_DISPATCH_CALL`` macros expressions not... * `#17204 `__: DOC: Fixed headings for tutorials so they appear at new theme... * `#17210 `__: DOC: Canonical_urls -* `#17214 `__: MAINT: Fix various issues with the `np.generic` annotations +* `#17214 `__: MAINT: Fix various issues with the ``np.generic`` annotations +* `#17215 `__: DOC: Use official MATLAB spelling in numpy-for-matlab-users.rst * `#17219 `__: BLD: enabled negation of library choices in NPY_*_ORDER * `#17220 `__: BUG, DOC: comment out metadata added via javascript * `#17222 `__: MAINT, DOC: move informational files from numpy.doc.*.py to their... @@ -583,7 +609,9 @@ A total of 684 pull requests were merged for this release. * `#17233 `__: DEP: Deprecated ndindex.ndincr * `#17235 `__: MAINT: Remove old PY_VERSION_HEX and sys.version_info code * `#17237 `__: BUG: Avoid using ``np.random`` in typing tests. +* `#17238 `__: DOC: Use SPDX license expressions with correct license * `#17239 `__: DOC: Fix link quick-start in old random API functions +* `#17240 `__: MAINT: added exception chaining in shape_base.py * `#17241 `__: MAINT: ``__array_interface__`` data address cannot be bytes * `#17242 `__: MAINT: Run slow CI jobs earlier so builds finishes sooner * `#17247 `__: ENH: Add tool to help speed up Travis CI @@ -595,9 +623,9 @@ A total of 684 pull requests were merged for this release. * `#17260 `__: MAINT: Bump pydata-sphinx-theme from 0.3.2 to 0.4.0 * `#17263 `__: DOC: add new glossary terms * `#17264 `__: DOC: remove some glosssary terms -* `#17267 `__: TST: Fix the path to `mypy.ini` in `runtests.py` +* `#17267 `__: TST: Fix the path to ``mypy.ini`` in ``runtests.py`` * `#17268 `__: BUG: sysconfig attributes/distutils issue -* `#17273 `__: ENH: Annotate the arithmetic operations of `ndarray` and `generic` +* `#17273 `__: ENH: Annotate the arithmetic operations of ``ndarray`` and ``generic`` * `#17278 `__: MAINT: Merge together index page content into a single file * `#17279 `__: DOC: Fix a typo in shape_base. * `#17284 `__: ENH: Pass optimizations arguments to asv build @@ -616,23 +644,23 @@ A total of 684 pull requests were merged for this release. * `#17304 `__: BUILD: pin pygments to 2.6.1, 2.7.0 breaks custom NumPyC lexer * `#17307 `__: MAINT: Bump hypothesis from 5.33.0 to 5.35.1 * `#17308 `__: MAINT: Bump pytest from 6.0.1 to 6.0.2 -* `#17309 `__: MAINT: Move the `fromnumeric` annotations to their own stub file +* `#17309 `__: MAINT: Move the ``fromnumeric`` annotations to their own stub file * `#17312 `__: MAINT: Syntax-highlight .src files on github * `#17313 `__: MAINT: Mark vendored/generated files in .gitattributes * `#17315 `__: MAINT: Cleanup f2py/cfuncs.py * `#17319 `__: BUG: Set deprecated fields to null in PyArray_InitArrFuncs * `#17320 `__: BUG: allow registration of hard-coded structured dtypes * `#17326 `__: ENH: Add annotations for five array construction functions -* `#17329 `__: DOC: Fix incorrect `.. deprecated::` syntax that led to this... -* `#17330 `__: DOC: improve `issubdtype` and scalar type docs -* `#17331 `__: DOC: Remove the tables of scalar types, and use `..autoclass`... +* `#17329 `__: DOC: Fix incorrect ``.. deprecated::`` syntax that led to this... +* `#17330 `__: DOC: improve ``issubdtype`` and scalar type docs +* `#17331 `__: DOC: Remove the tables of scalar types, and use ``..autoclass``... * `#17332 `__: DOC, BLD: update lexer highlighting and make numpydocs a regular... * `#17334 `__: MAINT: Chaining exceptions in npyio.py * `#17337 `__: NEP: Regenerate table in NEP 29 (add numpy 1.18 and 1.19 to list) * `#17338 `__: DOC: Fix syntax errors in docstrings for versionchanged, versionadded * `#17340 `__: SIMD: Add partial/non-contig load and store intrinsics for 32/64-bit * `#17344 `__: ENH, BLD: Support for the NVIDIA HPC SDK nvfortran compiler -* `#17346 `__: BLD,BUG: Fix a macOS build failure when `NPY_BLAS_ORDER=""` +* `#17346 `__: BLD,BUG: Fix a macOS build failure when ``NPY_BLAS_ORDER=""`` * `#17350 `__: DEV: Add PR prefix labeler and numpy prefix mapping * `#17352 `__: DOC: Guide to writing how-tos * `#17353 `__: DOC: How-to guide for I/O @@ -642,7 +670,7 @@ A total of 684 pull requests were merged for this release. * `#17364 `__: MAINT: Finish replacing PyInt_Check * `#17369 `__: DOC: distutils: Remove an obsolete paragraph. * `#17370 `__: NEP: Edit nep-0042 for more clarity -* `#17372 `__: ENH: Add annotations for remaining `ndarray` / `generic` non-magic... +* `#17372 `__: ENH: Add annotations for remaining ``ndarray`` / ``generic`` non-magic... * `#17373 `__: BUG: Fixes module data docstrings. * `#17375 `__: DOC: Fix default_rng docstring * `#17377 `__: BUG: ensure _UFuncNoLoopError can be pickled @@ -654,6 +682,7 @@ A total of 684 pull requests were merged for this release. * `#17391 `__: DOC: Replace "About NumPy" with "Document conventions" * `#17392 `__: DOC: Update info on doc style rules * `#17393 `__: BUG: Fix default void, datetime, and timedelta in array coercion +* `#17394 `__: ENH: Implement sliding window * `#17396 `__: MAINT: Replace append_metastr_to_string function. * `#17399 `__: BLD: Fixed ARGOUTVIEWM memory deallocation. Closes #17398. * `#17400 `__: DOC: rm incorrect alias from recarray user article. @@ -661,12 +690,12 @@ A total of 684 pull requests were merged for this release. * `#17402 `__: DOC: Add arraysetops to an autosummary * `#17404 `__: MAINT: Replace PyUString_ConcatAndDel in nditer_constr.c. * `#17405 `__: MAINT: Replace PyUString_ConcatAndDel in mapping.c. -* `#17406 `__: ENH: Replace the module-level `__getattr__` with explicit type... +* `#17406 `__: ENH: Replace the module-level ``__getattr__`` with explicit type... * `#17407 `__: DOC: in PR template, set expectations for PR review timeline * `#17409 `__: MAINT: Cleanup remaining PyUString_ConcatAndDel use. * `#17410 `__: API: Special case how numpy scalars are coerced to signed integer * `#17411 `__: TST: Mark the typing tests as slow -* `#17412 `__: DOC: Fix a parameter type in the `putmask` docs +* `#17412 `__: DOC: Fix a parameter type in the ``putmask`` docs * `#17418 `__: DOC: adding operational form documentation for array ops * `#17419 `__: DEP: Deprecate coercion to subarray dtypes * `#17421 `__: BUG: Fix memory leak in array-coercion error paths @@ -674,7 +703,7 @@ A total of 684 pull requests were merged for this release. * `#17423 `__: DOC: Remove bogus reference to _a_ * `#17424 `__: DOC: Fix formatting issues in description of .c.src files * `#17427 `__: NEP: nep-0029 typo correction -* `#17429 `__: MAINT: Move aliases for common scalar unions to `numpy.typing` +* `#17429 `__: MAINT: Move aliases for common scalar unions to ``numpy.typing`` * `#17430 `__: BUG: Fix memoryleaks related to NEP 37 function overrides * `#17431 `__: DOC: Fix the links for ``Ellipsis`` * `#17432 `__: DOC: add references to einops and opt_einsum @@ -685,7 +714,7 @@ A total of 684 pull requests were merged for this release. * `#17440 `__: DOC: Cleaner template for PRs * `#17442 `__: MAINT: fix exception chaining in format.py * `#17443 `__: ENH: Warn on unsupported Python 3.10+ -* `#17444 `__: ENH: Add `Typing :: Typed` to the PyPi classifier +* `#17444 `__: ENH: Add ``Typing :: Typed`` to the PyPi classifier * `#17445 `__: DOC: Fix the references for macros * `#17447 `__: NEP: update NEP 42 with discussion of type hinting applications * `#17448 `__: DOC: Remove CoC pages from Sphinx @@ -699,16 +728,17 @@ A total of 684 pull requests were merged for this release. * `#17468 `__: DOC: add some missing scalar aliases * `#17472 `__: TST: Fix doctest for full_like * `#17473 `__: MAINT: py3k: remove os.fspath and os.PathLike backports -* `#17474 `__: MAINT: Move the `np.core.numeric` annotations to their own stub... -* `#17479 `__: ENH: type np.unicode_ as np.str_ +* `#17474 `__: MAINT: Move the ``np.core.numeric`` annotations to their own stub... +* `#17479 `__: ENH: type ``np.unicode_`` as ``np.str_`` * `#17481 `__: DOC: Fix the entries for members of structures -* `#17483 `__: DOC: Fix the references for `random.*` +* `#17483 `__: DOC: Fix the references for ``random.*`` * `#17485 `__: BLD: circleCI- merge before build, add -n to sphinx * `#17487 `__: MAINT: Remove duplicate placeholder annotations +* `#17493 `__: DOC: New round of NEP 42 edits * `#17497 `__: DOC: Use consistent lowercase on docs landing page * `#17498 `__: MAINT: fix incompatible type comparison in numpy.lib.utils.info * `#17501 `__: BUG: Fix failures in master related to userdtype registeration -* `#17502 `__: BUG: remove `sys` from the type stubs +* `#17502 `__: BUG: remove ``sys`` from the type stubs * `#17503 `__: DOC: Fix empty 'C style guide' page * `#17504 `__: DOC: Rename 'Quickstart tutorial' * `#17508 `__: ENH: Added the Final feature for all constants @@ -726,15 +756,15 @@ A total of 684 pull requests were merged for this release. * `#17537 `__: MAINT: Bump hypothesis from 5.37.0 to 5.37.1 * `#17538 `__: MAINT: Bump pydata-sphinx-theme from 0.4.0 to 0.4.1 * `#17539 `__: MAINT: Bump mypy from 0.782 to 0.790 -* `#17540 `__: ENH: Make `np.number` generic with respect to its precision +* `#17540 `__: ENH: Make ``np.number`` generic with respect to its precision * `#17541 `__: CI: fix conditional for PR merge command -* `#17546 `__: MAINT: explicit disabling `CCompilerOpt` in F2PY +* `#17546 `__: MAINT: explicit disabling ``CCompilerOpt`` in F2PY * `#17548 `__: BUG: Cygwin Workaround for #14787 on affected platforms * `#17549 `__: DOC: Fix the entries of C functions * `#17555 `__: DOC: Fix wrong blockquotes * `#17558 `__: DOC: MAINT: Add NEP 43 links to NEP 42 * `#17559 `__: DOC: Remove directives for some constants -* `#17564 `__: MAINT: Update the annotations in `np.core.numeric` +* `#17564 `__: MAINT: Update the annotations in ``np.core.numeric`` * `#17570 `__: DOC: Add the entry for ``NPY_FEATURE_VERSION`` * `#17571 `__: DOC: Fix typos * `#17572 `__: ENH: Add annotations for three new constants @@ -742,27 +772,27 @@ A total of 684 pull requests were merged for this release. * `#17577 `__: BUG: Respect dtype of all-zero argument to poly1d * `#17578 `__: NEP36: include additional feedback * `#17580 `__: MAINT: Cleanup swig for Python 3. -* `#17581 `__: MAINT: Move the `np.core.numerictypes` annotations to their own... +* `#17581 `__: MAINT: Move the ``np.core.numerictypes`` annotations to their own... * `#17583 `__: MAINT: Bump hypothesis from 5.37.1 to 5.37.3 -* `#17584 `__: ENH: Add annotations for `np.core._type_aliases` +* `#17584 `__: ENH: Add annotations for ``np.core._type_aliases`` * `#17594 `__: DOC: Typo in lexsort docstring * `#17596 `__: DEP,BUG: Coercion/cast of array to a subarray dtype will be fixed * `#17597 `__: TST: Clean up the errors of the typing tests * `#17598 `__: BUG: Fixed file handle leak in array_tofile. -* `#17601 `__: TST: Fix a broken `np.core.numeric` test +* `#17601 `__: TST: Fix a broken ``np.core.numeric`` test * `#17603 `__: MAINT: Mark dead code as intentional for clang. * `#17607 `__: DOC: removed old references to submodule licenses * `#17608 `__: DOC: Fix typos (general documentation) * `#17610 `__: Fully qualify license trove classifier * `#17611 `__: BUG: mac dylib treated as part of extra objects by f2py -* `#17613 `__: ENH: Add annotations for 9 `ndarray`/`generic` magic methods +* `#17613 `__: ENH: Add annotations for 9 ``ndarray``/``generic`` magic methods * `#17614 `__: DOC: Fix the document for arrays interface * `#17618 `__: MAINT: Conversion of some strings to f-strings * `#17619 `__: DOC: Fix some references * `#17621 `__: TST: Valid docstring for config_py function show() * `#17622 `__: MAINT: Conversion of some strings to fstrings, part II * `#17623 `__: MAINT: Conversion of some strings to fstrings, part III -* `#17624 `__: DOC: Tidy up references to str_ / bytes_ +* `#17624 `__: DOC: Tidy up references to ``str_`` / ``bytes_`` * `#17625 `__: MAINT: Conversion of some strings to fstrings, part iv * `#17627 `__: DOC: Fix the references for ``__array_*__`` * `#17628 `__: DOC: Add entries for macros @@ -773,8 +803,8 @@ A total of 684 pull requests were merged for this release. * `#17639 `__: MAINT: Bump hypothesis from 5.37.3 to 5.38.0 * `#17641 `__: MAINT, BLD: update to OpenBLAS v0.3.12 * `#17642 `__: DOC: Fix reference to atleast_1d -* `#17643 `__: ENH: Add annotations for `np.core._ufunc_config` -* `#17644 `__: ENH: Add annotations for `np.core.shape_base` +* `#17643 `__: ENH: Add annotations for ``np.core._ufunc_config`` +* `#17644 `__: ENH: Add annotations for ``np.core.shape_base`` * `#17645 `__: BUG: fix np.timedelta64('nat').__format__ throwing an exception * `#17654 `__: BUG: f2py incorrectly translates dimension declarations. * `#17655 `__: BLD: Fix installing Numpy on z/OS @@ -799,7 +829,9 @@ A total of 684 pull requests were merged for this release. * `#17700 `__: Fix small typos. * `#17701 `__: BUG: Fixed an issue where ``.pyi`` files were ignored by numpy... * `#17703 `__: Fix Doc Typos & Added Example +* `#17706 `__: BUG: Raise promotion error if a DType was provided in array coercion * `#17708 `__: Improve the einsum bench by adding new bench cases and variable... +* `#17711 `__: ENH: adds type hints to numpy.version * `#17715 `__: REV: Revert gh-17654 - f2py incorrectly translates dimension... * `#17717 `__: MAINT: Add more files to ``.gitgnore`` * `#17720 `__: API: Do not import sliding_window_view to main namespace @@ -836,9 +868,9 @@ A total of 684 pull requests were merged for this release. * `#17830 `__: TST: Add back durations flag for DEBUG builds. * `#17832 `__: BUG: Fix subarray dtype used with too large count in fromfile * `#17833 `__: BUG: Fix pickling of scalars with NPY_LISTPICKLE -* `#17838 `__: DOC: Update the `numpy.typing` documentation +* `#17838 `__: DOC: Update the ``numpy.typing`` documentation * `#17841 `__: DOC: Fixing boilerplate code example -* `#17844 `__: MAINT: Add ``__all__`` to `numpy.typing` +* `#17844 `__: MAINT: Add ``__all__`` to ``numpy.typing`` * `#17848 `__: DOC: Add release note for gh-16161. * `#17855 `__: BUG: Fix incorrect C function prototypes/declarations. * `#17857 `__: MAINT: Prepare for the NumPy 1.20.x branch. From cf56aeb90161d8656513d1abecd3a7076243aec7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 7 May 2021 09:16:35 -0600 Subject: [PATCH 1077/1270] DOC: Update 1.19.0-changelog.rst. --- doc/changelog/1.19.0-changelog.rst | 100 ++++++++++++++++++++--------- 1 file changed, 68 insertions(+), 32 deletions(-) diff --git a/doc/changelog/1.19.0-changelog.rst b/doc/changelog/1.19.0-changelog.rst index bd743832a33b..bde00249972a 100644 --- a/doc/changelog/1.19.0-changelog.rst +++ b/doc/changelog/1.19.0-changelog.rst @@ -136,25 +136,28 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 452 pull requests were merged for this release. +A total of 488 pull requests were merged for this release. * `#8255 `__: ENH: add identity kwarg to frompyfunc +* `#10600 `__: DOC: Do not complain about contiguity when mutating ``ndarray.shape`` * `#12646 `__: TST: check exception details in refguide_check.py * `#13421 `__: ENH: improve runtime detection of CPU features * `#14326 `__: TST: Add assert_array_equal test for big integer arrays. * `#14376 `__: MAINT: Remove unnecessary 'from __future__ import ...' statements * `#14530 `__: MAINT: Fix typos and copy edit NEP-0030. * `#14546 `__: DOC: NumPy for absolute beginners tutorial -* `#14715 `__: NEP: Proposal for array creation dispatching with `__array_function__` +* `#14715 `__: NEP: Proposal for array creation dispatching with ``__array_function__`` * `#14867 `__: ENH: Use AVX-512F for np.maximum and np.minimum * `#14924 `__: BUG: Fix numpy.random.dirichlet returns NaN for small 'alpha'... -* `#14933 `__: API: Use `ResultType` in `PyArray_ConvertToCommonType` +* `#14933 `__: API: Use ``ResultType`` in ``PyArray_ConvertToCommonType`` +* `#14940 `__: BUG: pickle the content of a scalar containing objects, not the... * `#14942 `__: MAINT,API: ignore and NULL fasttake/fastputmask ArrFuncs slots * `#14981 `__: BUG: Make ``ediff1d`` kwarg casting consistent * `#14988 `__: DOC: linalg: Include information about scipy.linalg. * `#14995 `__: BUG: Use ``__array__`` during dimension discovery * `#15011 `__: MAINT: cleanup compat.py3k.py * `#15022 `__: ENH: f2py: improve error messages +* `#15024 `__: DOC: clarify documentation for transpose() * `#15028 `__: [DOC] LaTeX: fix preamble (closes #15026) * `#15035 `__: BUG: add endfunction, endsubroutine to valid fortran end words * `#15040 `__: TST: Add test for object method (and general unary) loops @@ -165,6 +168,7 @@ A total of 452 pull requests were merged for this release. * `#15052 `__: MAINT: follow-up cleanup for blas64 PR * `#15054 `__: DOC: add docstrings to refguide-check * `#15066 `__: Revert "DEP: issue deprecation warning when creating ragged array... +* `#15068 `__: ENH: Add support to sort timedelta64 ``NaT`` to end of the array * `#15069 `__: ENH: add support for ILP64 OpenBLAS (without symbol suffix) * `#15070 `__: DOC: correct version for NaT sort * `#15072 `__: TST: Check requires_memory immediately before the test @@ -201,6 +205,7 @@ A total of 452 pull requests were merged for this release. * `#15187 `__: MAINT: unskip test on win32 * `#15189 `__: ENH: Add property-based tests using Hypothesis * `#15194 `__: BUG: test, fix for c++ compilation +* `#15195 `__: MAINT: refactoring in np.core.records * `#15196 `__: DOC: Adding instructions for building documentation to developer... * `#15197 `__: DOC: NEP 37: A dispatch protocol for NumPy-like modules * `#15203 `__: MAINT: Do not use private Python function in testing @@ -215,6 +220,8 @@ A total of 452 pull requests were merged for this release. * `#15227 `__: DOC: typo in release.rst * `#15228 `__: NEP: universal SIMD NEP 38 * `#15229 `__: MAINT: Remove unused int_asbuffer +* `#15230 `__: BUG: do not emit warnings for np.sign, np.equal when using nan +* `#15231 `__: MAINT: Remove Python2 specific C module setup [part2] * `#15232 `__: MAINT: Cleaning up PY_MAJOR_VERSION/PY_VERSION_HEX * `#15233 `__: MAINT: Clean up more PY_VERSION_HEX * `#15236 `__: MAINT: Remove implicit inheritance from object class @@ -224,14 +231,14 @@ A total of 452 pull requests were merged for this release. * `#15241 `__: MAINT: Remove references to non-existent sys.exc_clear() * `#15242 `__: DOC: Update HOWTO_RELEASE.rst * `#15248 `__: MAINT: cleanup use of sys.exc_info -* `#15249 `__: MAINT: Eliminate some calls to `eval` +* `#15249 `__: MAINT: Eliminate some calls to ``eval`` * `#15251 `__: MAINT: Improve const-correctness of shapes and strides * `#15253 `__: DOC: clarify the effect of None parameters passed to ndarray.view * `#15254 `__: MAINT: Improve const-correctness of string arguments * `#15255 `__: MAINT: Delete numpy.distutils.compat * `#15256 `__: MAINT: Implement keyword-only arguments as syntax * `#15260 `__: MAINT: Remove FIXME comments introduced in the previous commit -* `#15261 `__: MAINT: Work with unicode strings in `dtype('i8,i8')` +* `#15261 `__: MAINT: Work with unicode strings in ``dtype('i8,i8')`` * `#15262 `__: BUG: Use PyDict_GetItemWithError() instead of PyDict_GetItem() * `#15263 `__: MAINT: Remove python2 array_{get,set}slice * `#15264 `__: DOC: Add some missing functions in the list of available ufuncs. @@ -248,8 +255,9 @@ A total of 452 pull requests were merged for this release. * `#15280 `__: BENCH: Add basic benchmarks for take and putmask * `#15281 `__: MAINT: Cleanup most PY3K #ifdef guards * `#15282 `__: DOC: BLD: add empty release notes for 1.19.0 to fix doc build... +* `#15283 `__: MAINT: Cleanup more NPY_PY3K * `#15284 `__: MAINT: Use a simpler return convention for internal functions -* `#15285 `__: MAINT: Simplify np.int_ inheritance +* `#15285 `__: MAINT: Simplify ``np.int_`` inheritance * `#15286 `__: DOC" Update np.full docstring. * `#15287 `__: MAINT: Express PyArray_DescrAlignConverter in terms of _convert_from_any * `#15288 `__: MAINT: Push down declarations in _convert_from_* @@ -261,7 +269,7 @@ A total of 452 pull requests were merged for this release. * `#15304 `__: MAINT: Remove NPY_PY3K constant * `#15305 `__: MAINT: Remove sys.version checks in tests * `#15307 `__: MAINT: cleanup sys.version dependant code -* `#15310 `__: MAINT: Ensure `_convert_from_*` functions set errors +* `#15310 `__: MAINT: Ensure ``_convert_from_*`` functions set errors * `#15312 `__: MAINT: Avoid escaping unicode in error messages * `#15315 `__: MAINT: Change file extension of ma README to rst. * `#15319 `__: BUG: fix NameError in clip nan propagation tests @@ -271,22 +279,25 @@ A total of 452 pull requests were merged for this release. * `#15329 `__: TST: move _no_tracing to testing._private, remove testing.support * `#15333 `__: BUG: Add some missing C error handling * `#15335 `__: MAINT: Remove sys.version checks -* `#15336 `__: DEP: Deprecate `->f->fastclip` at registration time +* `#15336 `__: DEP: Deprecate ``->f->fastclip`` at registration time * `#15338 `__: DOC: document site.cfg.example * `#15350 `__: MAINT: Fix mistype in histogramdd docstring * `#15351 `__: DOC, BLD: reword release note, upgrade sphinx version * `#15353 `__: MAINT: Remove unnecessary calls to PyArray_DATA from binomial... * `#15354 `__: MAINT: Bump pytest from 5.3.2 to 5.3.3 +* `#15355 `__: MAINT: Const qualify UFunc inner loops * `#15358 `__: MAINT: Remove six * `#15361 `__: MAINT: Revise imports from collections.abc module * `#15362 `__: MAINT: remove internal functions required to handle Python2/3... * `#15364 `__: MAINT: Remove other uses of six module * `#15366 `__: MAINT: resolve pyflake F403 'from module import *' used +* `#15367 `__: DOC: Fix Multithreaded Generation example docs * `#15368 `__: MAINT: Update tox for supported Python versions * `#15369 `__: MAINT: simd: Avoid signed comparison warning -* `#15370 `__: DOC: Updating Chararry Buffer datatypes #15360 +* `#15370 `__: DOC: Updating Chararry Buffer datatypes +* `#15373 `__: MAINT: Remove sys.version checks * `#15374 `__: TST: Simplify unicode test -* `#15375 `__: MAINT: Use `with open` when possible +* `#15375 `__: MAINT: Use ``with open`` when possible * `#15377 `__: MAINT: Cleanup python2 references * `#15379 `__: MAINT: Python2 Cleanups * `#15381 `__: DEP: add PendingDeprecation to matlib.py funky namespace @@ -303,11 +314,14 @@ A total of 452 pull requests were merged for this release. * `#15407 `__: MAINT: Replace basestring with str. * `#15408 `__: ENH: Use AVX-512F for complex number arithmetic, absolute, square... * `#15414 `__: MAINT: Remove Python2 workarounds +* `#15415 `__: MAINT: Revert f2py Python 2.6 workaround * `#15417 `__: MAINT: Cleanup references to python2 * `#15418 `__: MAINT, DOC: Remove use of old Python __builtin__, now known as... * `#15421 `__: ENH: Make use of ExitStack in npyio.py * `#15422 `__: MAINT: Inline gentype_getreadbuf * `#15423 `__: MAINT: Use f-strings for clarity. +* `#15425 `__: MAINT: dir(numpy) returns duplicate "testing" +* `#15426 `__: MAINT: Use the PyArrayScalar_VAL macro where possible * `#15427 `__: DEP: Schedule unused C-API functions for removal/disabling * `#15428 `__: DOC: Improve ndarray.ctypes example * `#15429 `__: DOC: distutils: Add a docstring to show_config(). @@ -315,14 +329,15 @@ A total of 452 pull requests were merged for this release. * `#15434 `__: MAINT: Updated polynomial to use fstrings * `#15435 `__: DOC: Fix Incorrect document in Beginner Docs * `#15436 `__: MAINT: Update core.py with fstrings (issue #15420) -* `#15439 `__: DOC: fix docstrings so `python tools/refguide-check --rst ... +* `#15439 `__: DOC: fix docstrings so ``python tools/refguide-check --rst ``... * `#15441 `__: MAINT: Tidy macros in scalar_new * `#15444 `__: MAINT: use 'yield from ' for simple cases * `#15445 `__: MAINT: Bump pytest from 5.3.3 to 5.3.4 * `#15446 `__: BUG: Reject nonsense arguments to scalar constructors * `#15449 `__: DOC: Update refguide_check note on how to skip code -* `#15451 `__: MAINT: Simplify `np.object_.__new__` +* `#15451 `__: MAINT: Simplify ``np.object_.__new__`` * `#15452 `__: STY,MAINT: avoid 'multiple imports on one line' +* `#15463 `__: ENH: expose ``bit_generator`` and random C-API to cython * `#15464 `__: MAINT: Cleanup duplicate line in refguide_check * `#15465 `__: MAINT: cleanup unused imports; avoid redefinition of imports * `#15468 `__: BUG: Fix for SVD not always sorted with hermitian=True @@ -330,21 +345,22 @@ A total of 452 pull requests were merged for this release. * `#15474 `__: MAINT: Eliminate messy _WORK macro * `#15476 `__: update result of rng.random(3) to current rng output * `#15480 `__: DOC: Correct get_state doc -* `#15482 `__: MAINT: Use `.identifier = val` to fill type structs +* `#15482 `__: MAINT: Use ``.identifier = val`` to fill type structs * `#15483 `__: [DOC] Mention behaviour of np.squeeze with one element * `#15484 `__: ENH: fixing generic error messages to be more specific in multiarray/descriptor.c * `#15487 `__: BUG: Fixing result of np quantile edge case * `#15491 `__: TST: mark the top 3 slowest tests to save ~10 seconds * `#15493 `__: MAINT: Bump pytest from 5.3.4 to 5.3.5 * `#15500 `__: MAINT: Use True/False instead of 1/0 in np.dtype.__reduce__ -* `#15503 `__: MAINT: Do not allow `copyswap` and friends to fail silently +* `#15503 `__: MAINT: Do not allow ``copyswap`` and friends to fail silently * `#15504 `__: DOC: Remove duplicated code in true_divide docstring * `#15505 `__: NEP 40: Informational NEP about current DTypes +* `#15506 `__: NEP 41: First steps towards improved Datatype Support * `#15510 `__: DOC: Update unique docstring example * `#15511 `__: MAINT: Large overhead in some random functions * `#15516 `__: TST: Fix missing output in refguide-check * `#15521 `__: MAINT: Simplify arraydescr_richcompare -* `#15522 `__: MAINT: Fix internal misuses of `NPY_TITLE_KEY` +* `#15522 `__: MAINT: Fix internal misuses of ``NPY_TITLE_KEY`` * `#15524 `__: DOC: Update instructions for building/archiving docs. * `#15526 `__: BUG: Fix inline assembly that detects cpu features on x86(32bit) * `#15532 `__: update doctests, small bugs and changes of repr @@ -354,13 +370,15 @@ A total of 452 pull requests were merged for this release. * `#15543 `__: NEP: edit and move NEP 38 to accepted status * `#15547 `__: MAINT: Refresh Doxyfile and modernize numpyfilter.py * `#15549 `__: TST: Accuracy test float32 sin/cos/exp/log for AVX platforms -* `#15550 `__: DOC: Improve the `numpy.linalg.eig` docstring. +* `#15550 `__: DOC: Improve the ``numpy.linalg.eig`` docstring. +* `#15553 `__: BUG: Added missing error check in ``ndarray.__contains__`` * `#15554 `__: NEP 44 - Restructuring the NumPy Documentation * `#15556 `__: TST: (Travis CI) Use full python3-dbg path for virtual env creation * `#15560 `__: BUG, DOC: restore missing import * `#15566 `__: DOC: Removing bad practices from quick start + some PEP8 * `#15574 `__: TST: Do not create symbolic link named gfortran. * `#15575 `__: DOC: Document caveat in random.uniform +* `#15577 `__: TST: Test division by zero both with scalar and with array * `#15579 `__: DOC: numpy.clip is equivalent to minimum(..., maximum(...)) * `#15582 `__: MAINT: Bump cython from 0.29.14 to 0.29.15 * `#15583 `__: MAINT: Bump hypothesis from 5.3.0 to 5.5.4 @@ -369,6 +387,7 @@ A total of 452 pull requests were merged for this release. * `#15600 `__: TST: use manylinux2010 docker instead of ubuntu * `#15610 `__: TST: mask DeprecationWarning in xfailed test * `#15612 `__: BUG: Fix bug in AVX-512F np.maximum and np.minimum +* `#15614 `__: DOC: Reword docstring for assert_equal * `#15615 `__: BUG: Remove check requiring natural alignment of float/double... * `#15616 `__: DOC: Add missing imports, definitions and dummy file * `#15619 `__: DOC: Fix documentation for apply_along_axis @@ -376,9 +395,10 @@ A total of 452 pull requests were merged for this release. * `#15631 `__: MAINT: Pull identical line out of conditional. * `#15633 `__: DOC: remove broken link in f2py tutorial * `#15639 `__: BLD: update openblas download to new location, use manylinux2010-base +* `#15644 `__: DOC: Update to clarify actual behavior real_if_(all elements)_close * `#15648 `__: MAINT: AVX512 implementation with intrinsic for float64 input... * `#15653 `__: BLD: update OpenBLAS to pre-0.3.9 version -* `#15662 `__: DOC: Refactor `np.polynomial` docs using `automodule` +* `#15662 `__: DOC: Refactor ``np.polynomial`` docs using ``automodule`` * `#15665 `__: BUG: fix doctest exception messages * `#15672 `__: MAINT: Added comment pointing FIXME to relevant PR. * `#15673 `__: DOC: Make extension module wording more clear @@ -386,16 +406,18 @@ A total of 452 pull requests were merged for this release. * `#15680 `__: DOC: Improve Benchmark README with environment setup and more... * `#15682 `__: MAINT: Bump hypothesis from 5.5.4 to 5.6.0 * `#15683 `__: NEP: move NEP 44 to accepted status +* `#15685 `__: ENH: Add ``subok`` parameter to np.copy function (cf. #6509) * `#15694 `__: DOC: Fix indexing docs to pass refguide * `#15695 `__: MAINT: Test during import to detect bugs with Accelerate(MacOS)... * `#15696 `__: MAINT: Add a fast path to var for complex input * `#15701 `__: MAINT: Convert shebang from python to python3 (#15687) * `#15702 `__: MAINT: replace optparse with argparse for 'doc' and 'tools' scripts * `#15703 `__: DOC: Fix quickstart doc to pass refguide +* `#15705 `__: DOC: Change list to tuple in example description. * `#15706 `__: MAINT: Fixing typos in f2py comments and code. * `#15710 `__: DOC: fix SVD tutorial to pass refguide * `#15714 `__: MAINT: use list-based APIs to call subprocesses -* `#15715 `__: ENH: update numpy.linalg.multi_dot to accept an `out` argument +* `#15715 `__: ENH: update numpy.linalg.multi_dot to accept an ``out`` argument * `#15716 `__: TST: always use 'python -mpip' not 'pip' * `#15717 `__: DOC: update datetime reference to pass refguide * `#15718 `__: DOC: Fix coremath.rst to fix refguide_check @@ -403,6 +425,7 @@ A total of 452 pull requests were merged for this release. * `#15723 `__: BUG: fix logic error when nm fails on 32-bit * `#15724 `__: TST: Remove nose from the test_requirements.txt file. * `#15733 `__: DOC: Allow NEPs to link to python, numpy, scipy, and matplotlib... +* `#15735 `__: DOC: LICENSE 2019 -> 2020 * `#15736 `__: BUG: Guarantee array is in valid state after memory error occurs... * `#15738 `__: MAINT: Remove non-native byte order from _var check. * `#15740 `__: MAINT: Add better error handling in linalg.norm for vectors and... @@ -418,28 +441,30 @@ A total of 452 pull requests were merged for this release. * `#15769 `__: ENH: Allow toggling madvise hugepage and fix default * `#15771 `__: DOC: Fix runtests example in developer docs * `#15773 `__: DEP: Make issubdtype consistent for types and dtypes -* `#15774 `__: MAINT: remove useless `global` statements +* `#15774 `__: MAINT: remove useless ``global`` statements * `#15778 `__: BLD: Add requirements.txt file for building docs * `#15781 `__: BUG: don't add 'public' or 'private' if the other one exists -* `#15784 `__: ENH: Use TypeError in `np.array` for python consistency +* `#15784 `__: ENH: Use TypeError in ``np.array`` for python consistency * `#15794 `__: BUG: Add basic __format__ for masked element to fix incorrect... * `#15797 `__: TST: Add unit test for out=None of np.einsum * `#15799 `__: MAINT: Cleanups to np.insert and np.delete * `#15800 `__: BUG: Add error-checking versions of strided casts. -* `#15802 `__: DEP: Make `np.insert` and `np.delete` on 0d arrays with an axis... -* `#15803 `__: DOC: correct possible list lengths for `extobj` in ufunc calls +* `#15802 `__: DEP: Make ``np.insert`` and ``np.delete`` on 0d arrays with an axis... +* `#15803 `__: DOC: correct possible list lengths for ``extobj`` in ufunc calls * `#15804 `__: DEP: Make np.delete on out-of-bounds indices an error -* `#15805 `__: DEP: Forbid passing non-integral index arrays to `insert` and... +* `#15805 `__: DEP: Forbid passing non-integral index arrays to ``insert`` and... * `#15806 `__: TST: Parametrize sort test * `#15809 `__: TST: switch PyPy job with CPython * `#15812 `__: TST: Remove code that is not supposed to warn out of warning... * `#15815 `__: DEP: Do not cast boolean indices to integers in np.delete * `#15816 `__: MAINT: simplify code that assumes str/unicode and int/long are... +* `#15827 `__: BUG: Break on all errors when performing strided casts. * `#15830 `__: MAINT: pathlib and hashlib are in stdlib in Python 3.5+ -* `#15832 `__: ENH: improved error message `IndexError: too many indices for... +* `#15832 `__: ENH: improved error message ``IndexError: too many indices for``... +* `#15834 `__: NEP: Add paragraph to NEP 41 about no array-object use and fix... * `#15836 `__: BUG: Fix IndexError for illegal axis in np.mean * `#15839 `__: DOC: Minor fix to _hist_bin_fd documentation -* `#15840 `__: BUG,DEP: Make `scalar.__round__()` behave like pythons round +* `#15840 `__: BUG,DEP: Make ``scalar.__round__()`` behave like pythons round * `#15843 `__: DOC: First steps towards docs restructuring (NEP 44) * `#15848 `__: DOC, TST: enable refguide_check in circleci * `#15850 `__: DOC: fix typo in C-API reference @@ -447,8 +472,9 @@ A total of 452 pull requests were merged for this release. * `#15866 `__: MAINT: Bump cython from 0.29.15 to 0.29.16 * `#15867 `__: DEP: Deprecate ndarray.tostring() * `#15868 `__: TST: use draft OpenBLAS build +* `#15870 `__: ENH: Add keepdims argument to count_nonzero * `#15872 `__: BUG: Fix eigh and cholesky methods of numpy.random.multivariate_normal -* `#15876 `__: BUG: Check that `pvals` is 1D in `_generator.multinomial`. +* `#15876 `__: BUG: Check that ``pvals`` is 1D in ``_generator.multinomial``. * `#15877 `__: DOC: Add missing signature from nditer docstring * `#15881 `__: BUG: Fix empty_like to respect shape=() * `#15882 `__: BUG: Do not ignore empty tuple of strides in ndarray.__new__ @@ -456,29 +482,34 @@ A total of 452 pull requests were merged for this release. * `#15884 `__: BUG: Setting a 0d array's strides to themselves should be legal * `#15885 `__: BUG: Respect itershape=() in nditer * `#15887 `__: MAINT: Clean-up 'next = __next__' used for Python 2 compatibility +* `#15891 `__: DOC: Clarify docs on mixed advanced indexing and slicing * `#15893 `__: TST: Run test_large_zip in a child process * `#15894 `__: DOC: Add missing doc of numpy.ma.apply_over_axes in API list. * `#15899 `__: DOC: Improve record module documentation * `#15901 `__: DOC: Fixed order of items and link to mailing list in dev docs... * `#15903 `__: BLD: report clang version on macOS -* `#15904 `__: MAINT: records: Remove private `format_parser._descr` attribute +* `#15904 `__: MAINT: records: Remove private ``format_parser._descr`` attribute +* `#15907 `__: DOC: Update documentation w.r.t. NPY_RELAXED_STRIDES_CHECKING * `#15914 `__: BUG: random: Disallow p=0 in negative_binomial +* `#15920 `__: DOC: Improve docstring for numpy.linalg.lstsq * `#15921 `__: ENH: Use sysconfig instead of probing Makefile * `#15928 `__: DOC: Update np.copy docstring to include ragged case * `#15931 `__: DOC: Correct private function name to PyArray_AdaptFlexibleDType -* `#15936 `__: MAINT: Fix capitalization in error message in `mtrand.pyx` +* `#15936 `__: MAINT: Fix capitalization in error message in ``mtrand.pyx`` +* `#15938 `__: BUG: Add _LARGE_FILES to def_macros[] when platform is AIX. * `#15939 `__: DOC: Update np.rollaxis docstring * `#15949 `__: BUG: fix AttributeError on accessing object in nested MaskedArray. -* `#15951 `__: BUG: Alpha parameter must be 1D in `generator.dirichlet` +* `#15951 `__: BUG: Alpha parameter must be 1D in ``generator.dirichlet`` * `#15953 `__: NEP: minor maintenance, update filename and fix a cross-reference * `#15964 `__: MAINT: Bump hypothesis from 5.8.0 to 5.8.3 * `#15967 `__: TST: Add slow_pypy support * `#15968 `__: DOC: Added note to angle function docstring about angle(0) being... * `#15982 `__: MAINT/BUG: Cleanup and minor fixes to conform_reduce_result -* `#15985 `__: BUG: Avoid duplication in stack trace of `linspace(a, b, num=1.5)` -* `#15988 `__: BUG: Fix inf and NaN-warnings in half float `nextafter` +* `#15985 `__: BUG: Avoid duplication in stack trace of ``linspace(a, b, num=1.5)`` +* `#15988 `__: BUG: Fix inf and NaN-warnings in half float ``nextafter`` * `#15989 `__: MAINT: Remove 0d check for PyArray_ISONESEGMENT * `#15990 `__: DEV: Pass additional runtests.py args to ASV +* `#15991 `__: BUG: max/min of a masked array dtype fix * `#15993 `__: DOC: Fix method documentation of function sort in MaskedArray * `#16000 `__: NEP: Improve Value Based Casting paragraph in NEP 40 * `#16001 `__: DOC: add note on flatten ordering in matlab page @@ -496,6 +527,8 @@ A total of 452 pull requests were merged for this release. * `#16038 `__: MAINT,TST: Move _repr_latex tests to test_printing. * `#16041 `__: BUG: missing 'f' prefix for fstring * `#16042 `__: ENH: Fix exception causes in build_ext.py +* `#16043 `__: DOC: Add converters example to the loadtxt docstring +* `#16051 `__: DOC: Add missing bracket * `#16053 `__: DOC: Small typo fixes to NEP 40. * `#16054 `__: DOC, BLD: update release howto and walkthrough for ananconda.org... * `#16061 `__: ENH: Chained exceptions in linalg.py and polyutils.py @@ -508,6 +541,7 @@ A total of 452 pull requests were merged for this release. * `#16077 `__: BLD: fix path to libgfortran on macOS * `#16078 `__: DOC: Add axis to random module "new or different" docs * `#16079 `__: DOC,BLD: Limit timeit iterations in random docs. +* `#16080 `__: BUG: numpy.einsum indexing arrays now accept numpy int type * `#16081 `__: DOC: add note on type casting to numpy.left_shift(). * `#16083 `__: DOC: improve development debugging doc * `#16084 `__: DOC: tweak neps/scope.rst @@ -517,10 +551,12 @@ A total of 452 pull requests were merged for this release. * `#16097 `__: MAINT, DOC: Improve grammar on a comment in the quickstart * `#16100 `__: NEP 41: Accept NEP 41 and add DType<->scalar duplication paragraph * `#16101 `__: BLD: put openblas library in local directory on windows +* `#16102 `__: ENH: correct identity for logaddexp2 ufunc: -inf * `#16113 `__: MAINT: Fix random.PCG64 signature * `#16119 `__: DOC: Move misplaced news fragment for gh-13421 * `#16122 `__: DOC: Fix links for NEP 40 in NEP 41 * `#16125 `__: BUG: lib: Fix a problem with vectorize with default parameters. +* `#16128 `__: ENH: Add equal_nan keyword argument to array_equal * `#16129 `__: ENH: Better error message when ``bins`` has float value in ``histogramdd``. * `#16133 `__: MAINT: Unify casting error creation (outside the iterator) * `#16141 `__: BENCH: Default to building HEAD instead of master @@ -571,7 +607,7 @@ A total of 452 pull requests were merged for this release. * `#16344 `__: BUG: Allow attaching documentation twice in add_docstring * `#16355 `__: MAINT: Remove f-strings in setup.py. (gh-16346) * `#16356 `__: BUG: Indentation for docstrings -* `#16358 `__: BUG: Fix dtype leak in `PyArray_FromAny` error path +* `#16358 `__: BUG: Fix dtype leak in ``PyArray_FromAny`` error path * `#16383 `__: ENH: Optimize Cpu feature detect in X86, fix for GCC on macOS... * `#16398 `__: MAINT: core: Use a raw string for the fromstring docstring. * `#16399 `__: MAINT: Make ctypes optional on Windows From 4daba31061aed482b25219faf5acda5b1cd1b8fa Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 7 May 2021 09:35:08 -0600 Subject: [PATCH 1078/1270] DOC: Update 1.18.0-changelog.rst. --- doc/changelog/1.18.0-changelog.rst | 55 +++++++++++++++++------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/doc/changelog/1.18.0-changelog.rst b/doc/changelog/1.18.0-changelog.rst index b86b3614ad3a..266ff08077ac 100644 --- a/doc/changelog/1.18.0-changelog.rst +++ b/doc/changelog/1.18.0-changelog.rst @@ -123,20 +123,20 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 406 pull requests were merged for this release. +A total of 413 pull requests were merged for this release. * `#9301 `__: DOC: added note to docstring of numpy.savez * `#10151 `__: BUG: Numpy scalar types sometimes have the same name * `#12129 `__: DOC: Improve axes shift description and example in np.tensordot -* `#12205 `__: MAINT: avoid relying on `np.generic.__name__` in `np.dtype.name` +* `#12205 `__: MAINT: avoid relying on ``np.generic.__name__`` in ``np.dtype.name`` * `#12284 `__: ENH: supply our version of numpy.pxd, requires cython>=0.29 * `#12633 `__: BUG: General fixes to f2py reference counts (dereferencing) * `#12658 `__: BUG: NaT now sorts to ends of arrays * `#12828 `__: DOC: Updates to nditer usage instructions -* `#13003 `__: BUG: Do not crash on recursive `.dtype` attribute lookup. +* `#13003 `__: BUG: Do not crash on recursive ``.dtype`` attribute lookup. * `#13368 `__: ENH: Use AVX for float32 implementation of np.sin & np.cos * `#13605 `__: DEP: Deprecate silent ignoring of bad data in fromfile/fromstring -* `#13610 `__: ENH: Always produce a consistent shape in the result of `argwhere` +* `#13610 `__: ENH: Always produce a consistent shape in the result of ``argwhere`` * `#13673 `__: DOC: array(obj, dtype=dt) can downcast * `#13698 `__: DOC: Document ma.filled behavior with non-scalar fill_value * `#13710 `__: DOC: Add note to irfft-like functions about the default sizes @@ -144,6 +144,7 @@ A total of 406 pull requests were merged for this release. * `#13766 `__: MAINT: Update NEP template. * `#13794 `__: ENH: random: Add the multivariate hypergeometric distribution. * `#13799 `__: DOC: Fix unrendered links +* `#13802 `__: BUG: Fixed maximum relative error reporting in assert_allclose * `#13812 `__: MAINT: Rewrite Floyd algorithm * `#13825 `__: DOC: Add missing macros to C-API documentation * `#13829 `__: ENH: Add axis argument to random.permutation and random.shuffle @@ -162,6 +163,7 @@ A total of 406 pull requests were merged for this release. * `#13892 `__: DOC : Refactor Array API documentation -- Array Structure and... * `#13895 `__: DOC: Fix typo in "make_mask" documentation * `#13896 `__: MAINT: Delete unused _aliased_types.py +* `#13899 `__: MAINT: Change the type of error raised in set_printoptions * `#13901 `__: BLD: Remove Trusty dist in Travis CI build * `#13907 `__: BUG: Handle weird bytestrings in dtype() * `#13908 `__: ENH: use towncrier to build the release note @@ -174,7 +176,7 @@ A total of 406 pull requests were merged for this release. * `#13926 `__: DOC: Remove explicit .next method calls with built-in next function... * `#13928 `__: DOC: Don't override MaskedArray.view documentation with the one... * `#13930 `__: BUG: Fix incorrect GIL release in array.nonzero -* `#13935 `__: MAINT: Warn if `_add_newdocs.py` is used to add docstrings to... +* `#13935 `__: MAINT: Warn if ``_add_newdocs.py`` is used to add docstrings to... * `#13943 `__: MAINT: Revert #13876, "MAINT,BUG,DOC: Fix errors in _add_newdocs" * `#13944 `__: MAINT,BUG,DOC: Fix errors in _add_newdocs * `#13945 `__: DOC, MAINT: emphasize random API changes, remove Generator.randint @@ -184,7 +186,7 @@ A total of 406 pull requests were merged for this release. * `#13950 `__: Fixing failure on Python 2.7 on Windows 7 * `#13952 `__: Fix a typo related to the range of indices * `#13959 `__: DOC: add space between words across lines -* `#13964 `__: BUG, DOC: add new recfunctions to `__all__` +* `#13964 `__: BUG, DOC: add new recfunctions to ``__all__`` * `#13967 `__: DOC: Change (old) range() to np.arange() * `#13968 `__: DOC: improve np.sort docstring * `#13970 `__: DOC: spellcheck numpy/doc/broadcasting.py @@ -226,7 +228,7 @@ A total of 406 pull requests were merged for this release. * `#14076 `__: TST: Add 3.8-dev to travisCI testing. * `#14085 `__: DOC: Add blank line above doctest for intersect1d * `#14086 `__: ENH: Propose standard policy for dropping support of old Python... -* `#14089 `__: DOC: Use `pip install .` where possible instead of calling setup.py +* `#14089 `__: DOC: Use ``pip install .`` where possible instead of calling setup.py * `#14091 `__: MAINT: adjustments to test_ufunc_noncontigous * `#14092 `__: MAINT: Improve NEP template * `#14096 `__: DOC: fix documentation of i and j for tri. @@ -236,6 +238,7 @@ A total of 406 pull requests were merged for this release. * `#14106 `__: MAINT: remove duplicate variable assignments * `#14108 `__: BUG: initialize variable that is passed by pointer * `#14110 `__: DOC: fix typo in c-api/array.rst doc +* `#14115 `__: DOC: fix markup of news fragment readme * `#14121 `__: BUG: Add gcd/lcm definitions to npy_math.h * `#14122 `__: MAINT: Mark umath accuracy test xfail. * `#14124 `__: MAINT: Use equality instead of identity check with literal @@ -249,36 +252,39 @@ A total of 406 pull requests were merged for this release. * `#14153 `__: TST: Allow fuss in testing strided/non-strided exp/log loops * `#14170 `__: NEP: Proposal for __duckarray__ protocol * `#14171 `__: BUG: Make advanced indexing result on read-only subclass writeable +* `#14174 `__: BUG: Check for existence of ``fromstr`` which used in ``fromstr_next_element`` * `#14178 `__: TST: Clean up of test_pocketfft.py * `#14181 `__: DEP: Deprecate np.alen +* `#14183 `__: DOC: Fix misleading ``allclose`` docstring for ``equal_nan`` * `#14185 `__: MAINT: Workaround for Intel compiler bug leading to failing test -* `#14190 `__: DOC: Fix hermitian argument docs in `svd` +* `#14190 `__: DOC: Fix hermitian argument docs in ``svd`` * `#14195 `__: MAINT: Fix a docstring typo. -* `#14196 `__: DOC: Fix links in `/.github/CONTRIBUTING.md`. +* `#14196 `__: DOC: Fix links in ``/.github/CONTRIBUTING.md``. * `#14197 `__: ENH: Multivariate normal speedups * `#14203 `__: MAINT: Improve mismatch message of np.testing.assert_array_equal * `#14204 `__: DOC,MAINT: Move towncrier files and fixup categories * `#14207 `__: BUG: Fixed default BitGenerator name * `#14209 `__: BUG: Fix uint-overflow if padding with linear_ramp and negative... * `#14216 `__: ENH: Enable huge pages in all Linux builds -* `#14217 `__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`... +* `#14217 `__: BUG: Fix leak in the f2py-generated module init and ``PyMem_Del``... * `#14219 `__: DOC: new nan_to_num keywords are from 1.17 onwards * `#14223 `__: TST: Add tests for deprecated C functions (PyArray_As1D, PyArray_As1D) -* `#14224 `__: DOC: mention `take_along_axis` in `choose` +* `#14224 `__: DOC: mention ``take_along_axis`` in ``choose`` * `#14227 `__: ENH: Parse complex number from string * `#14231 `__: DOC: update or remove outdated sourceforge links * `#14234 `__: MAINT: Better error message for norm * `#14235 `__: DOC: add backlinks to numpy.org * `#14240 `__: BUG: Don't fail when lexsorting some empty arrays. -* `#14241 `__: BUG: Fix segfault in `random.permutation(x)` when x is a string. +* `#14241 `__: BUG: Fix segfault in ``random.permutation(x)`` when x is a string. * `#14245 `__: Doc: fix a typo in NEP21 * `#14249 `__: DOC: set status of NEP 28 (website redesign) to Accepted * `#14250 `__: BLD: MAINT: change default behavior of build flag appending. * `#14252 `__: BUG: Fixes StopIteration error from 'np.genfromtext' for empty... -* `#14255 `__: BUG: fix inconsistent axes ordering for axis in function `unique` +* `#14255 `__: BUG: fix inconsistent axes ordering for axis in function ``unique`` * `#14256 `__: DEP: Deprecate load/dump functions in favour of pickle methods * `#14257 `__: MAINT: Update NEP-30 * `#14259 `__: DEP: Deprecate arrayprint formatting functions +* `#14263 `__: DOC: Make Py3K docs C code snippets RST literal blocks * `#14266 `__: DOC: remove scipy.org from the breadcrumb formattiong * `#14270 `__: BUG: Fix formatting error in exception message * `#14272 `__: DOC: Address typos in dispatch docs @@ -293,9 +299,9 @@ A total of 406 pull requests were merged for this release. * `#14313 `__: DOC: Clarify rules about broadcasting when empty arrays are involved. * `#14321 `__: TST, MAINT: bump to OpenBLAS 0.3.7 stable * `#14325 `__: DEP: numpy.testing.rand -* `#14335 `__: DEP: Deprecate class `SafeEval` +* `#14335 `__: DEP: Deprecate class ``SafeEval`` * `#14341 `__: BUG: revert detecting and raising error on ragged arrays -* `#14342 `__: DOC: Improve documentation of `isscalar`. +* `#14342 `__: DOC: Improve documentation of ``isscalar``. * `#14349 `__: MAINT: Fix bloated mismatch error percentage in array comparisons. * `#14351 `__: DOC: Fix a minor typo in dispatch documentation. * `#14352 `__: MAINT: Remove redundant deprecation checks @@ -344,7 +350,7 @@ A total of 406 pull requests were merged for this release. * `#14475 `__: DOC: add timedelta64 signature * `#14477 `__: MAINT: Extract raising of MemoryError to a helper function * `#14483 `__: BUG,MAINT: Some fixes and minor cleanup based on clang analysis -* `#14484 `__: MAINT: Add `NPY_UNUSED` and `const` qualified suggested by clang +* `#14484 `__: MAINT: Add ``NPY_UNUSED`` and ``const`` qualified suggested by clang * `#14485 `__: MAINT: Silence integer comparison build warnings in assert statements * `#14486 `__: MAINT: distutils: Add newline at the end of printed warnings. * `#14490 `__: BUG: random: Revert gh-14458 and refix gh-14557. @@ -383,7 +389,7 @@ A total of 406 pull requests were merged for this release. * `#14567 `__: DEP: remove deprecated (and private) numpy.testing submodules. * `#14568 `__: BLD, DOC: fix gh-14518, add release note * `#14570 `__: BUG: importing build_src breaks setuptools monkeypatch for msvc14 -* `#14572 `__: DOC: Note runtests.py `-- -s` method to use pytests `-s` +* `#14572 `__: DOC: Note runtests.py ``-- -s`` method to use pytests ``-s`` * `#14573 `__: DOC: update submodule docstrings, remove info.py files * `#14576 `__: DOC: Document the NPY_SCALARKIND values as C variables. * `#14582 `__: MAINT: Bump pytest from 5.1.2 to 5.1.3 @@ -405,30 +411,31 @@ A total of 406 pull requests were merged for this release. * `#14614 `__: MAINT: Bump pytest from 5.1.3 to 5.2.0 * `#14615 `__: MAINT: Add "MAINT" tag to dependabot commit msg * `#14616 `__: DOC: Updated sphinx directive formatting -* `#14620 `__: DEP: Finish deprecation of non-integer `num` in linspace +* `#14620 `__: DEP: Finish deprecation of non-integer ``num`` in linspace * `#14621 `__: DOC: s/OR/AND/ in np.logical_and docstring * `#14623 `__: DOC: misleading np.sinc() documentation * `#14629 `__: DOC: clarify residual in np.polyfit * `#14630 `__: BUILD: change to build_src --verbose-cfg, runtests.py --debug-info * `#14631 `__: BUG: always free clean_sep -* `#14634 `__: DOC: Create `class Extension` docstring and add it to documentation. -* `#14636 `__: DOC: add `printoptions` as a context manager to `set_printoptions` +* `#14634 `__: DOC: Create ``class Extension`` docstring and add it to documentation. +* `#14636 `__: DOC: add ``printoptions`` as a context manager to ``set_printoptions`` * `#14639 `__: DOC: Fix typo in NEP 29 * `#14643 `__: MAINT: Use scalar math power function directly * `#14649 `__: DOC: Add IPython to dependencies needed to build docs. * `#14652 `__: MAINT: Bump pytest-cov from 2.7.1 to 2.8.1 * `#14653 `__: MAINT: Bump pytest from 5.2.0 to 5.2.1 * `#14654 `__: MAINT: Bump pytz from 2019.2 to 2019.3 -* `#14656 `__: MAINT: Use `extract_unit` throughout datetime +* `#14656 `__: MAINT: Use ``extract_unit`` throughout datetime * `#14657 `__: BUG: fix fromfile behavior when reading sub-array dtypes * `#14662 `__: BUG: random: Use correct length when axis is given to shuffle. * `#14669 `__: BUG: Do not rely on undefined behaviour to cast from float to... * `#14674 `__: NEP: add default-dtype-object-deprecation nep 34 * `#14681 `__: MAINT: Remove unused boolean negative/subtract loops -* `#14682 `__: DEP: ufunc `out` argument must be a tuple for multiple outputs -* `#14693 `__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux +* `#14682 `__: DEP: ufunc ``out`` argument must be a tuple for multiple outputs +* `#14693 `__: BUG: Fix ``np.einsum`` errors on Power9 Linux and z/Linux * `#14696 `__: DOC: Note release notes process changes on devdocs start page * `#14699 `__: Doc warnings +* `#14703 `__: TST: Adding CI stages, with one initial job to the Travis CI * `#14705 `__: DOC: Switch Markdown link to RST in NEP 29 * `#14709 `__: TST: Divide Azure CI Pipelines into stages. * `#14710 `__: DEP: Finish the out kwarg deprecation for ufunc calls @@ -526,7 +533,7 @@ A total of 406 pull requests were merged for this release. * `#15058 `__: API, DOC: change names to multivariate_hypergeometric, improve docs * `#15059 `__: REL: Prepare for NumPy 1.18.0 release. * `#15109 `__: TST: Check requires_memory immediately before the test -* `#15111 `__: ENH: Add support to sort timedelta64 `NaT` to end of the array +* `#15111 `__: ENH: Add support to sort timedelta64 ``NaT`` to end of the array * `#15112 `__: MAINT: follow-up cleanup for blas64 PR * `#15113 `__: ENH: f2py: add --f2cmap option for specifying the name of .f2py_f2cmap * `#15114 `__: ENH: add support for ILP64 OpenBLAS (without symbol suffix) From af1344d8250717087bcd01000f0819f7b79486b7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 7 May 2021 09:43:00 -0600 Subject: [PATCH 1079/1270] DOC: Update 1.17.0-changelog.rst. --- doc/changelog/1.17.0-changelog.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/changelog/1.17.0-changelog.rst b/doc/changelog/1.17.0-changelog.rst index 4177c848fd7e..8179c180bad3 100644 --- a/doc/changelog/1.17.0-changelog.rst +++ b/doc/changelog/1.17.0-changelog.rst @@ -159,7 +159,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 531 pull requests were merged for this release. +A total of 532 pull requests were merged for this release. * `#4808 `__: ENH: Make the `mode` parameter of np.pad default to 'constant' * `#8131 `__: BUG: Fix help() formatting for deprecated functions. @@ -174,6 +174,7 @@ A total of 531 pull requests were merged for this release. * `#10855 `__: ENH: Adding a count parameter to np.unpackbits * `#11230 `__: MAINT: More cleanup of einsum * `#11233 `__: BUG: ensure i0 does not change the shape. +* `#11358 `__: MAINT: Rewrite numpy.pad without concatenate * `#11684 `__: BUG: Raise when unravel_index, ravel_multi_index are given empty... * `#11689 `__: DOC: Add ref docs for C generic types. * `#11721 `__: BUG: Make `arr.ctypes.data` hold onto a reference to the underlying... From 9bb6a0c202e8c72b3b388ef35312ad8fda391e80 Mon Sep 17 00:00:00 2001 From: Robert Kern Date: Fri, 7 May 2021 15:22:28 -0400 Subject: [PATCH 1080/1270] ENH: manually inline code for performance. --- numpy/random/src/pcg64/pcg64.h | 43 +++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/numpy/random/src/pcg64/pcg64.h b/numpy/random/src/pcg64/pcg64.h index 6691a18fcb16..90a83fd5edf9 100644 --- a/numpy/random/src/pcg64/pcg64.h +++ b/numpy/random/src/pcg64/pcg64.h @@ -229,17 +229,6 @@ static inline void pcg_cm_step_r(pcg_state_setseq_128 *rng) { #endif } -static inline uint64_t pcg_output_cm_128_64(pcg128_t state) { - uint64_t hi = state.high; - uint64_t lo = state.low; - - lo |= 1; - hi ^= hi >> 32; - hi *= 0xda942042e4dd58b5ULL; - hi ^= hi >> 48; - hi *= lo; - return hi; -} static inline void pcg_cm_srandom_r(pcg_state_setseq_128 *rng, pcg128_t initstate, pcg128_t initseq) { rng->state = PCG_128BIT_CONSTANT(0ULL, 0ULL); @@ -253,9 +242,35 @@ static inline void pcg_cm_srandom_r(pcg_state_setseq_128 *rng, pcg128_t initstat static inline uint64_t pcg_cm_random_r(pcg_state_setseq_128* rng) { - uint64_t ret = pcg_output_cm_128_64(rng->state); - pcg_cm_step_r(rng); - return ret; + /* Lots of manual inlining to help out certain compilers to generate + * performant code. */ + uint64_t hi = rng->state.high; + uint64_t lo = rng->state.low; + + /* Run the DXSM output function on the pre-iterated state. */ + lo |= 1; + hi ^= hi >> 32; + hi *= 0xda942042e4dd58b5ULL; + hi ^= hi >> 48; + hi *= lo; + + /* Run the CM step. */ +#if defined _WIN32 && _MSC_VER >= 1900 && _M_AMD64 + uint64_t h1; + pcg128_t product; + + /* Manually inline the multiplication and addition using intrinsics */ + h1 = rng->state.high * PCG_CHEAP_MULTIPLIER_128; + product.low = + _umul128(rng->state.low, PCG_CHEAP_MULTIPLIER_128, &(product.high)); + product.high += h1; + _addcarry_u64(_addcarry_u64(0, product.low, rng->inc.low, &(rng->state.low)), + product.high, rng->inc.high, &(rng->state.high)); +#else + rng->state = pcg128_add(pcg128_mult_64(rng->state, PCG_CHEAP_MULTIPLIER_128), + rng->inc); +#endif + return hi; } #else /* PCG_EMULATED_128BIT_MATH */ From acf249e0c663f148ee6389327f0b35298fc14833 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 8 May 2021 14:52:03 +0200 Subject: [PATCH 1081/1270] TST: xfail `TestCond.test_nan` unconditionally This is happening on too many build configurations, and it's not completely clear if it's just an OpenBLAS version or also depends on something else. Reported as happening mostly on macOS, but also on Fedora. --- numpy/linalg/tests/test_linalg.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 8a270f194147..c6e8cdd039f1 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -684,7 +684,7 @@ def hermitian(mat): axes = list(range(mat.ndim)) axes[-1], axes[-2] = axes[-2], axes[-1] return np.conj(np.transpose(mat, axes=axes)) - + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) assert_equal(np.sort(s)[..., ::-1], s) @@ -766,6 +766,9 @@ def test_singular(self): for A, p in itertools.product(As, p_neg): linalg.cond(A, p) + @pytest.mark.xfail(True, run=False, + reason="Platform/LAPACK-dependent failure, " + "see gh-18914") def test_nan(self): # nans should be passed through, not converted to infs ps = [None, 1, -1, 2, -2, 'fro'] @@ -981,7 +984,7 @@ def test_incompatible_dims(self): linalg.lstsq(A, y, rcond=None) -@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) class TestMatrixPower: rshft_0 = np.eye(4) @@ -1010,7 +1013,7 @@ def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) - + for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: From 8609939e7db393519075ba69840cebed9cb56bdd Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 6 May 2021 03:14:49 +0200 Subject: [PATCH 1082/1270] ENH: Add annotations for `np.lib.utils` --- numpy/lib/utils.pyi | 100 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 90 insertions(+), 10 deletions(-) diff --git a/numpy/lib/utils.pyi b/numpy/lib/utils.pyi index 5a1594149d59..0518655c6ce5 100644 --- a/numpy/lib/utils.pyi +++ b/numpy/lib/utils.pyi @@ -1,4 +1,19 @@ -from typing import List +import sys +from ast import AST +from typing import ( + Any, + Callable, + List, + Mapping, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) + +from numpy import ndarray, generic from numpy.core.numerictypes import ( issubclass_ as issubclass_, @@ -6,14 +21,79 @@ from numpy.core.numerictypes import ( issubsctype as issubsctype, ) +if sys.version_info >= (3, 8): + from typing import Protocol +else: + from typing_extensions import Protocol + +_T_contra = TypeVar("_T_contra", contravariant=True) +_FuncType = TypeVar("_FuncType", bound=Callable[..., Any]) + +# A file-like object opened in `w` mode +class _SupportsWrite(Protocol[_T_contra]): + def write(self, __s: _T_contra) -> Any: ... + __all__: List[str] -def get_include(): ... -def deprecate(*args, **kwargs): ... -def deprecate_with_doc(msg): ... -def byte_bounds(a): ... -def who(vardict=...): ... -def info(object=..., maxwidth=..., output=..., toplevel=...): ... -def source(object, output=...): ... -def lookfor(what, module=..., import_modules=..., regenerate=..., output=...): ... -def safe_eval(source): ... +class _Deprecate: + old_name: Optional[str] + new_name: Optional[str] + message: Optional[str] + def __init__( + self, + old_name: Optional[str] = ..., + new_name: Optional[str] = ..., + message: Optional[str] = ..., + ) -> None: ... + # NOTE: `__call__` can in principle take arbitrary `*args` and `**kwargs`, + # even though they aren't used for anything + def __call__(self, func: _FuncType) -> _FuncType: ... + +def get_include() -> str: ... + +@overload +def deprecate( + *, + old_name: Optional[str] = ..., + new_name: Optional[str] = ..., + message: Optional[str] = ..., +) -> _Deprecate: ... +@overload +def deprecate( + __func: _FuncType, + old_name: Optional[str] = ..., + new_name: Optional[str] = ..., + message: Optional[str] = ..., +) -> _FuncType: ... + +def deprecate_with_doc(msg: Optional[str]) -> _Deprecate: ... + +# NOTE: In practice `byte_bounds` can (potentially) take any object +# implementing the `__array_interface__` protocol. The caveat is +# that certain keys, marked as optional in the spec, must be present for +# `byte_bounds`. This concerns `"strides"` and `"data"`. +def byte_bounds(a: Union[generic, ndarray[Any, Any]]) -> Tuple[int, int]: ... + +def who(vardict: Optional[Mapping[str, ndarray[Any, Any]]] = ...) -> None: ... + +def info( + object: object = ..., + maxwidth: int = ..., + output: Optional[_SupportsWrite[str]] = ..., + toplevel: str = ..., +) -> None: ... + +def source( + object: object, + output: Optional[_SupportsWrite[str]] = ..., +) -> None: ... + +def lookfor( + what: str, + module: Union[None, str, Sequence[str]] = ..., + import_modules: bool = ..., + regenerate: bool = ..., + output: Optional[_SupportsWrite[str]] =..., +) -> None: ... + +def safe_eval(source: Union[str, AST]) -> Any: ... From f3d0a30de0f1b2c7f016518867cb19ffc9d4b182 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 8 May 2021 17:13:04 +0200 Subject: [PATCH 1083/1270] TST: Add typing tests for `np.lib.utils` --- numpy/typing/tests/data/fail/lib_utils.py | 13 +++++++++ numpy/typing/tests/data/pass/lib_utils.py | 26 ++++++++++++++++++ numpy/typing/tests/data/reveal/lib_utils.py | 30 +++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 numpy/typing/tests/data/fail/lib_utils.py create mode 100644 numpy/typing/tests/data/pass/lib_utils.py create mode 100644 numpy/typing/tests/data/reveal/lib_utils.py diff --git a/numpy/typing/tests/data/fail/lib_utils.py b/numpy/typing/tests/data/fail/lib_utils.py new file mode 100644 index 000000000000..e16c926aa645 --- /dev/null +++ b/numpy/typing/tests/data/fail/lib_utils.py @@ -0,0 +1,13 @@ +import numpy as np + +np.deprecate(1) # E: No overload variant + +np.deprecate_with_doc(1) # E: incompatible type + +np.byte_bounds(1) # E: incompatible type + +np.who(1) # E: incompatible type + +np.lookfor(None) # E: incompatible type + +np.safe_eval(None) # E: incompatible type diff --git a/numpy/typing/tests/data/pass/lib_utils.py b/numpy/typing/tests/data/pass/lib_utils.py new file mode 100644 index 000000000000..c602923d9104 --- /dev/null +++ b/numpy/typing/tests/data/pass/lib_utils.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from io import StringIO +from typing import Any + +import numpy as np + +FILE = StringIO() +AR: np.ndarray[Any, np.dtype[np.float64]] = np.arange(10).astype(np.float64) + +def func(a: int) -> bool: ... + +np.deprecate(func) +np.deprecate() + +np.deprecate_with_doc("test") +np.deprecate_with_doc(None) + +np.byte_bounds(AR) +np.byte_bounds(np.float64()) + +np.info(1, output=FILE) + +np.source(np.interp, output=FILE) + +np.lookfor("binary representation", output=FILE) diff --git a/numpy/typing/tests/data/reveal/lib_utils.py b/numpy/typing/tests/data/reveal/lib_utils.py new file mode 100644 index 000000000000..d820127078a3 --- /dev/null +++ b/numpy/typing/tests/data/reveal/lib_utils.py @@ -0,0 +1,30 @@ +from io import StringIO +from typing import Any, Dict + +import numpy as np + +AR: np.ndarray[Any, np.dtype[np.float64]] +AR_DICT: Dict[str, np.ndarray[Any, np.dtype[np.float64]]] +FILE: StringIO + +def func(a: int) -> bool: ... + +reveal_type(np.deprecate(func)) # E: def (a: builtins.int) -> builtins.bool +reveal_type(np.deprecate()) # E: _Deprecate + +reveal_type(np.deprecate_with_doc("test")) # E: _Deprecate +reveal_type(np.deprecate_with_doc(None)) # E: _Deprecate + +reveal_type(np.byte_bounds(AR)) # E: Tuple[builtins.int, builtins.int] +reveal_type(np.byte_bounds(np.float64())) # E: Tuple[builtins.int, builtins.int] + +reveal_type(np.who(None)) # E: None +reveal_type(np.who(AR_DICT)) # E: None + +reveal_type(np.info(1, output=FILE)) # E: None + +reveal_type(np.source(np.interp, output=FILE)) # E: None + +reveal_type(np.lookfor("binary representation", output=FILE)) # E: None + +reveal_type(np.safe_eval("1 + 1")) # E: Any From e377d071ea502f396a7da299633bad74922c04eb Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 8 May 2021 19:02:27 +0200 Subject: [PATCH 1084/1270] TST: Exclude to-be ignored files when computing the diff The `diff` and `exclude` options of pycodestyle seem to be incompatible, so instead just ignore the necessary files when computing the diff itself. --- tools/lint_diff.ini | 1 - tools/linter.py | 18 +++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index 710138b4a705..3b66d3c3e900 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -2,4 +2,3 @@ max_line_length = 79 statistics = True ignore = E121,E122,E123,E125,E126,E127,E128,E226,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 -exclude = numpy/__config__.py,numpy/typing/tests/data diff --git a/tools/linter.py b/tools/linter.py index b1bab2b70fce..fd229dbef8f9 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -9,6 +9,14 @@ 'lint_diff.ini', ) +# NOTE: The `diff` and `exclude` options of pycodestyle seem to be +# incompatible, so instead just exclude the necessary files when +# computing the diff itself. +EXCLUDE = ( + "numpy/typing/tests/data/", + "numpy/__config__.py", +) + class DiffLinter: def __init__(self, branch): @@ -29,11 +37,15 @@ def get_branch_diff(self, uncommitted = False): print(f"Branch with name `{self.branch}` does not exist") sys.exit(1) + exclude = [f':(exclude){i}' for i in EXCLUDE] if uncommitted: - diff = self.repo.git.diff(self.head, '--unified=0', '***.py') + diff = self.repo.git.diff( + self.head, '--unified=0', '***.py', *exclude + ) else: - diff = self.repo.git.diff(commit, self.head, - '--unified=0', '***.py') + diff = self.repo.git.diff( + commit, self.head, '--unified=0', '***.py', *exclude + ) return diff def run_pycodestyle(self, diff): From 6f34b4e4380e3afbccb21a3378addd0251498111 Mon Sep 17 00:00:00 2001 From: EvaJau Date: Sun, 9 May 2021 16:48:10 +0200 Subject: [PATCH 1085/1270] DOC: Update beginners docu for sum function with axis Hopefully clarifies how summing over an axis is done. Issue #16697 --- doc/source/user/absolute_beginners.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 0de08c3f0a9f..d97b92617b77 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -771,12 +771,12 @@ If you start with this array:: >>> b = np.array([[1, 1], [2, 2]]) -You can sum the rows with:: +You can sum over the axis of rows with:: >>> b.sum(axis=0) array([3, 3]) -You can sum the columns with:: +You can sum over the axis of columns with:: >>> b.sum(axis=1) array([2, 4]) From e372d06eb8b4cf8e156e2a5b177f5bf222b4584a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 9 May 2021 17:09:40 +0200 Subject: [PATCH 1086/1270] DOC: add an extra example in runtests.py help test This was a little confusing in the sprint. To verify, look at the output of `python runtests.py --help` [ci skip] --- runtests.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/runtests.py b/runtests.py index ea16845e327f..452ccbc64c5a 100755 --- a/runtests.py +++ b/runtests.py @@ -8,7 +8,10 @@ $ python runtests.py $ python runtests.py -s {SAMPLE_SUBMODULE} + $ # Run a standalone test function: $ python runtests.py -t {SAMPLE_TEST} + $ # Run a test defined as a method of a TestXXX class: + $ python runtests.py -t {SAMPLE_TEST2} $ python runtests.py --ipython $ python runtests.py --python somescript.py $ python runtests.py --bench @@ -43,6 +46,7 @@ PROJECT_MODULE = "numpy" PROJECT_ROOT_FILES = ['numpy', 'LICENSE.txt', 'setup.py'] SAMPLE_TEST = "numpy/linalg/tests/test_linalg.py::test_byteorder_check" +SAMPLE_TEST2 = "numpy/core/tests/test_memmap.py::TestMemmap::test_open_with_filename" SAMPLE_SUBMODULE = "linalg" EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache', From b5e09ff7b3143227ced311e714c40b588c74ea4a Mon Sep 17 00:00:00 2001 From: Kasia Date: Sun, 9 May 2021 15:17:54 +0000 Subject: [PATCH 1087/1270] DOC: Improve datetime64 docs #14299 --- doc/source/reference/arrays.datetime.rst | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index c5947620efe7..7c789b8e8c38 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -20,9 +20,9 @@ Basic Datetimes =============== The most basic way to create datetimes is from strings in -ISO 8601 date or datetime format. The unit for internal storage -is automatically selected from the form of the string, and can -be either a :ref:`date unit ` or a +ISO 8601 date, datetime format or from integer to UNIX time. +The unit for internal storage is automatically selected from the +form of the string, and can be either a :ref:`date unit ` or a :ref:`time unit `. The date units are years ('Y'), months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and @@ -36,6 +36,11 @@ letters, for a "Not A Time" value. >>> np.datetime64('2005-02-25') numpy.datetime64('2005-02-25') + + UNIX time, by providing integer and a date unit: + + >>> np.datetime64(1, 'Y') + numpy.datetime64('1971') Using months for the unit: From 4deb8eef02e7d9df7ebb4275d9a9a9c441122846 Mon Sep 17 00:00:00 2001 From: Kasia Date: Sun, 9 May 2021 15:24:11 +0000 Subject: [PATCH 1088/1270] DOC: Improve datetime64 docs #14299 --- doc/source/reference/arrays.datetime.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 7c789b8e8c38..fcdb9d934d82 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -37,7 +37,7 @@ letters, for a "Not A Time" value. >>> np.datetime64('2005-02-25') numpy.datetime64('2005-02-25') - UNIX time, by providing integer and a date unit: + UNIX time, by providing integer and a date unit : >>> np.datetime64(1, 'Y') numpy.datetime64('1971') From eb231979b90cfe7f434bd598aefe4196df74b32e Mon Sep 17 00:00:00 2001 From: lbogula Date: Sun, 9 May 2021 14:37:25 +0000 Subject: [PATCH 1089/1270] DOC: change copyright SciPy to NumPy Co-authored-by: Lima Tango <19208585+lima-tango@users.noreply.github.com> --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 4b60800d250a..bf2fbdce9623 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -99,7 +99,7 @@ class PyTypeObject(ctypes.Structure): # General substitutions. project = 'NumPy' -copyright = '2008-2021, The SciPy community' +copyright = '2008-2021, The NumPy community' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. From 353cb87cc42f8623cca76bdce6dbec84e4abfd17 Mon Sep 17 00:00:00 2001 From: Kasia Date: Sun, 9 May 2021 15:41:21 +0000 Subject: [PATCH 1090/1270] DOC: resolves #14299 - deleting the note --- doc/source/reference/arrays.datetime.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index fcdb9d934d82..2bdbc2bfa11c 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -13,8 +13,6 @@ support datetime functionality. The data type is called "datetime64", so named because "datetime" is already taken by the datetime library included in Python. -.. note:: The datetime API is *experimental* in 1.7.0, and may undergo changes - in future versions of NumPy. Basic Datetimes =============== @@ -37,7 +35,7 @@ letters, for a "Not A Time" value. >>> np.datetime64('2005-02-25') numpy.datetime64('2005-02-25') - UNIX time, by providing integer and a date unit : + UNIX time, by providing integer and a date unit: >>> np.datetime64(1, 'Y') numpy.datetime64('1971') From 00aea7a1dec6a7a4e03d9e92c41683be4ace4d9f Mon Sep 17 00:00:00 2001 From: Kasia Date: Sun, 9 May 2021 15:56:35 +0000 Subject: [PATCH 1091/1270] DOC resolves #14299 rephrasing --- doc/source/reference/arrays.datetime.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 2bdbc2bfa11c..e3b8d270dbc4 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -17,8 +17,9 @@ included in Python. Basic Datetimes =============== -The most basic way to create datetimes is from strings in -ISO 8601 date, datetime format or from integer to UNIX time. +The most basic way to create datetimes is from strings in ISO 8601 date +or datetime format. It is also possible to create datetimes from an integer by +offset relative to the Unix epoch (00:00:00 UTC on 1 January 1970). The unit for internal storage is automatically selected from the form of the string, and can be either a :ref:`date unit ` or a :ref:`time unit `. The date units are years ('Y'), @@ -35,7 +36,7 @@ letters, for a "Not A Time" value. >>> np.datetime64('2005-02-25') numpy.datetime64('2005-02-25') - UNIX time, by providing integer and a date unit: + From an integer and a date unit, 1 year since the UNIX epoch: >>> np.datetime64(1, 'Y') numpy.datetime64('1971') From 8fa54380655f964f3d7d15c4e8adf71169f2dc58 Mon Sep 17 00:00:00 2001 From: Marysia Winkels Date: Sun, 9 May 2021 18:06:19 +0200 Subject: [PATCH 1092/1270] DOC: improve numpy.histogram2d() documentation Clarify necessity of H.T in code example. Resolves issue #18929 Co-authored-by: Laura Kopf <60775505+lkopf@users.noreply.github.com>> --- numpy/lib/twodim_base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 58a097f99ba5..fd8c73573290 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -705,7 +705,9 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, >>> x = np.random.normal(2, 1, 100) >>> y = np.random.normal(1, 1, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) - >>> H = H.T # Let each row list bins with common y range. + >>> # Histogram does not follow Cartesian convention (see Notes), + >>> # therefore transpose H for visualization purposes. + >>> H = H.T :func:`imshow ` can only display square bins: From 4fe183b2d444534cad4a95ee94d02f2219542d64 Mon Sep 17 00:00:00 2001 From: Kasia Date: Sun, 9 May 2021 16:23:44 +0000 Subject: [PATCH 1093/1270] DOC: Improve datetime64 docs #14299 - scalars --- numpy/core/_add_newdocs_scalars.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py index d31f0037d834..eb1c794f36b5 100644 --- a/numpy/core/_add_newdocs_scalars.py +++ b/numpy/core/_add_newdocs_scalars.py @@ -218,10 +218,14 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): add_newdoc_for_scalar_type('datetime64', [], """ - A datetime stored as a 64-bit integer, counting from ``1970-01-01T00:00:00``. + A datetime is stored as a 64-bit integer. + If created from integer, counting from ``1970-01-01T00:00:00``. + If created from string, shown as ISO 8601 date or datetime format. >>> np.datetime64(10, 'Y') numpy.datetime64('1980') + >>> np.datetime64('1980', 'Y') + numpy.datetime64('1980') >>> np.datetime64(10, 'D') numpy.datetime64('1970-01-11') From 38bdff05da0859f45bebf5a0c32869d58365b02f Mon Sep 17 00:00:00 2001 From: Misal Raj <34789277+misalraj@users.noreply.github.com> Date: Sun, 9 May 2021 23:04:55 +0530 Subject: [PATCH 1094/1270] DOC: Update a page title with proper case (#18962) --- doc/source/dev/releasing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/releasing.rst b/doc/source/dev/releasing.rst index 61fa1951426c..75081aec1675 100644 --- a/doc/source/dev/releasing.rst +++ b/doc/source/dev/releasing.rst @@ -1,5 +1,5 @@ =================== -Releasing a Version +Releasing a version =================== ------------------------ From 7aee0fa0805c55d0aed2d26dec8e1153757c69ff Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sun, 9 May 2021 21:33:37 +0200 Subject: [PATCH 1095/1270] MAINT, SIMD: Hardened the AVX compile-time tests To avoid optimizing it out by the compiler so we make sure that the assembler is getting involved. --- numpy/distutils/checks/cpu_avx.c | 4 ++-- numpy/distutils/checks/cpu_avx2.c | 4 ++-- numpy/distutils/checks/cpu_avx512_clx.c | 5 +++-- numpy/distutils/checks/cpu_avx512_cnl.c | 7 ++++--- numpy/distutils/checks/cpu_avx512_icl.c | 5 +++-- numpy/distutils/checks/cpu_avx512_knl.c | 5 +++-- numpy/distutils/checks/cpu_avx512_knm.c | 6 +++--- numpy/distutils/checks/cpu_avx512_skx.c | 5 +++-- numpy/distutils/checks/cpu_avx512cd.c | 4 ++-- numpy/distutils/checks/cpu_avx512f.c | 4 ++-- numpy/distutils/checks/cpu_f16c.c | 6 +++--- numpy/distutils/checks/cpu_fma3.c | 5 +++-- numpy/distutils/checks/cpu_fma4.c | 5 +++-- 13 files changed, 36 insertions(+), 29 deletions(-) diff --git a/numpy/distutils/checks/cpu_avx.c b/numpy/distutils/checks/cpu_avx.c index 737c0d2e9492..cee4f36ab3f4 100644 --- a/numpy/distutils/checks/cpu_avx.c +++ b/numpy/distutils/checks/cpu_avx.c @@ -1,7 +1,7 @@ #include -int main(void) +int main(int argc, char **argv) { - __m256 a = _mm256_add_ps(_mm256_setzero_ps(), _mm256_setzero_ps()); + __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1])); return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); } diff --git a/numpy/distutils/checks/cpu_avx2.c b/numpy/distutils/checks/cpu_avx2.c index dfb11fd79967..15b6c919b089 100644 --- a/numpy/distutils/checks/cpu_avx2.c +++ b/numpy/distutils/checks/cpu_avx2.c @@ -1,7 +1,7 @@ #include -int main(void) +int main(int argc, char **argv) { - __m256i a = _mm256_abs_epi16(_mm256_setzero_si256()); + __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1])); return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); } diff --git a/numpy/distutils/checks/cpu_avx512_clx.c b/numpy/distutils/checks/cpu_avx512_clx.c index 71dad83a79f0..4baa8fea0475 100644 --- a/numpy/distutils/checks/cpu_avx512_clx.c +++ b/numpy/distutils/checks/cpu_avx512_clx.c @@ -1,8 +1,9 @@ #include -int main(void) +int main(int argc, char **argv) { /* VNNI */ - __m512i a = _mm512_dpbusd_epi32(_mm512_setzero_si512(), _mm512_setzero_si512(), _mm512_setzero_si512()); + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a); return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); } diff --git a/numpy/distutils/checks/cpu_avx512_cnl.c b/numpy/distutils/checks/cpu_avx512_cnl.c index dfab4436d07e..f2ff3725ea93 100644 --- a/numpy/distutils/checks/cpu_avx512_cnl.c +++ b/numpy/distutils/checks/cpu_avx512_cnl.c @@ -1,10 +1,11 @@ #include -int main(void) +int main(int argc, char **argv) { + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); /* IFMA */ - __m512i a = _mm512_madd52hi_epu64(_mm512_setzero_si512(), _mm512_setzero_si512(), _mm512_setzero_si512()); + a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512()); /* VMBI */ - a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), _mm512_setzero_si512()); + a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a); return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); } diff --git a/numpy/distutils/checks/cpu_avx512_icl.c b/numpy/distutils/checks/cpu_avx512_icl.c index cf2706b3b9ff..085b947e05bf 100644 --- a/numpy/distutils/checks/cpu_avx512_icl.c +++ b/numpy/distutils/checks/cpu_avx512_icl.c @@ -1,9 +1,10 @@ #include -int main(void) +int main(int argc, char **argv) { + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); /* VBMI2 */ - __m512i a = _mm512_shrdv_epi64(_mm512_setzero_si512(), _mm512_setzero_si512(), _mm512_setzero_si512()); + a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512()); /* BITLAG */ a = _mm512_popcnt_epi8(a); /* VPOPCNTDQ */ diff --git a/numpy/distutils/checks/cpu_avx512_knl.c b/numpy/distutils/checks/cpu_avx512_knl.c index 0699f37a6346..10ba52bcc5a7 100644 --- a/numpy/distutils/checks/cpu_avx512_knl.c +++ b/numpy/distutils/checks/cpu_avx512_knl.c @@ -1,10 +1,11 @@ #include -int main(void) +int main(int argc, char **argv) { int base[128]; + __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]); /* ER */ - __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(_mm512_setzero_pd())); + __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad)); /* PF */ _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1); return base[0]; diff --git a/numpy/distutils/checks/cpu_avx512_knm.c b/numpy/distutils/checks/cpu_avx512_knm.c index db61b4bfa674..d03b0fe8beb3 100644 --- a/numpy/distutils/checks/cpu_avx512_knm.c +++ b/numpy/distutils/checks/cpu_avx512_knm.c @@ -1,9 +1,9 @@ #include -int main(void) +int main(int argc, char **argv) { - __m512i a = _mm512_setzero_si512(); - __m512 b = _mm512_setzero_ps(); + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]); /* 4FMAPS */ b = _mm512_4fmadd_ps(b, b, b, b, b, NULL); diff --git a/numpy/distutils/checks/cpu_avx512_skx.c b/numpy/distutils/checks/cpu_avx512_skx.c index 1d5e15b5e5b8..04761876295f 100644 --- a/numpy/distutils/checks/cpu_avx512_skx.c +++ b/numpy/distutils/checks/cpu_avx512_skx.c @@ -1,9 +1,10 @@ #include -int main(void) +int main(int argc, char **argv) { + __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); /* VL */ - __m256i a = _mm256_abs_epi64(_mm256_setzero_si256()); + __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1)); /* DQ */ __m512i b = _mm512_broadcast_i32x8(a); /* BW */ diff --git a/numpy/distutils/checks/cpu_avx512cd.c b/numpy/distutils/checks/cpu_avx512cd.c index 61bef6b8270e..52f4c7f8be0d 100644 --- a/numpy/distutils/checks/cpu_avx512cd.c +++ b/numpy/distutils/checks/cpu_avx512cd.c @@ -1,7 +1,7 @@ #include -int main(void) +int main(int argc, char **argv) { - __m512i a = _mm512_lzcnt_epi32(_mm512_setzero_si512()); + __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); } diff --git a/numpy/distutils/checks/cpu_avx512f.c b/numpy/distutils/checks/cpu_avx512f.c index f60cc09dd094..22d861471ced 100644 --- a/numpy/distutils/checks/cpu_avx512f.c +++ b/numpy/distutils/checks/cpu_avx512f.c @@ -1,7 +1,7 @@ #include -int main(void) +int main(int argc, char **argv) { - __m512i a = _mm512_abs_epi32(_mm512_setzero_si512()); + __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); } diff --git a/numpy/distutils/checks/cpu_f16c.c b/numpy/distutils/checks/cpu_f16c.c index a5a343e2dd59..678c582e410c 100644 --- a/numpy/distutils/checks/cpu_f16c.c +++ b/numpy/distutils/checks/cpu_f16c.c @@ -1,9 +1,9 @@ #include #include -int main(void) +int main(int argc, char **argv) { - __m128 a = _mm_cvtph_ps(_mm_setzero_si128()); - __m256 a8 = _mm256_cvtph_ps(_mm_setzero_si128()); + __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1])); + __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2])); return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8))); } diff --git a/numpy/distutils/checks/cpu_fma3.c b/numpy/distutils/checks/cpu_fma3.c index cf34c6cb1572..2f879c3b357f 100644 --- a/numpy/distutils/checks/cpu_fma3.c +++ b/numpy/distutils/checks/cpu_fma3.c @@ -1,8 +1,9 @@ #include #include -int main(void) +int main(int argc, char **argv) { - __m256 a = _mm256_fmadd_ps(_mm256_setzero_ps(), _mm256_setzero_ps(), _mm256_setzero_ps()); + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_fmadd_ps(a, a, a); return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); } diff --git a/numpy/distutils/checks/cpu_fma4.c b/numpy/distutils/checks/cpu_fma4.c index 1ad717033e24..0ff17a483385 100644 --- a/numpy/distutils/checks/cpu_fma4.c +++ b/numpy/distutils/checks/cpu_fma4.c @@ -5,8 +5,9 @@ #include #endif -int main(void) +int main(int argc, char **argv) { - __m256 a = _mm256_macc_ps(_mm256_setzero_ps(), _mm256_setzero_ps(), _mm256_setzero_ps()); + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_macc_ps(a, a, a); return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); } From d5134ae507d49970b5212596c1d603746b806b6d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 9 May 2021 18:00:10 -0600 Subject: [PATCH 1096/1270] ENH: Include co-authors in changelog. Closes #18969 --- tools/changelog.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/tools/changelog.py b/tools/changelog.py index 9da330500726..1f116cc20fd4 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -57,25 +57,34 @@ def get_authors(revision_range): - pat = u'^.*\\t(.*)$' lst_release, cur_release = [r.strip() for r in revision_range.split('..')] + authors_pat = u'^.*\\t(.*)$' # authors, in current release and previous to current release. - cur = set(re.findall(pat, this_repo.git.shortlog('-s', revision_range), - re.M)) - pre = set(re.findall(pat, this_repo.git.shortlog('-s', lst_release), - re.M)) + cur = this_repo.git.shortlog('-s', revision_range) + pre = this_repo.git.shortlog('-s', lst_release) + authors_cur = set(re.findall(authors_pat, cur, re.M)) + authors_pre = set(re.findall(authors_pat, pre, re.M)) + + # include co-authors + grp = '--group=trailer:co-authored-by' + cur = this_repo.git.shortlog('-s', grp, revision_range) + pre = this_repo.git.shortlog('-s', grp, lst_release) + authors_cur |= set(re.findall(authors_pat, cur, re.M)) + authors_pre |= set(re.findall(authors_pat, pre, re.M)) # Ignore the bot Homu. - cur.discard('Homu') - pre.discard('Homu') + authors_cur.discard('Homu') + authors_pre.discard('Homu') # Ignore the bot dependabot-preview - cur.discard('dependabot-preview') - pre.discard('dependabot-preview') + authors_cur.discard('dependabot-preview') + authors_pre.discard('dependabot-preview') # Append '+' to new authors. - authors = [s + u' +' for s in cur - pre] + [s for s in cur & pre] + authors_new = [s + u' +' for s in authors_cur - authors_pre] + authors_old = [s for s in authors_cur & authors_pre] + authors = authors_new + authors_old authors.sort() return authors From c00091e4265f5eaabc943c6990c65903522c6e45 Mon Sep 17 00:00:00 2001 From: Yashasvi Misra Date: Sun, 9 May 2021 16:02:14 +0000 Subject: [PATCH 1097/1270] DOC: add note and examples to `isrealobj` docstring Clarify that `isrealobj` returns True for non-array input rather than raise an exception, which may be surprising. Closes gh-12652 Co-authored-by: Mukulikaa <60316606+Mukulikaa@users.noreply.github.com> --- numpy/lib/type_check.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 383fbda3c239..56afd83ce335 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -364,6 +364,19 @@ def isrealobj(x): -------- iscomplexobj, isreal + Notes + ----- + The function is only meant for arrays with numerical values but it + accepts all other objects. Since it assumes array input, the return + value of other objects may be True. + + >>> np.isrealobj('A string') + True + >>> np.isrealobj(False) + True + >>> np.isrealobj(None) + True + Examples -------- >>> np.isrealobj(1) From b6eb3d8e64e8408cdcac80caacbda9b978545ee0 Mon Sep 17 00:00:00 2001 From: Constanza Date: Mon, 10 May 2021 07:25:02 +0200 Subject: [PATCH 1098/1270] BUG: fix np.ma.masked_where(copy=False) when input has no mask (#18967) Fixes gh-18946 --- numpy/ma/core.py | 4 ++++ numpy/ma/tests/test_core.py | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 14dc218047e5..63d42eccad08 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -1936,6 +1936,10 @@ def masked_where(condition, a, copy=True): result = a.view(cls) # Assign to *.mask so that structured masks are handled correctly. result.mask = _shrink_mask(cond) + # There is no view of a boolean so when 'a' is a MaskedArray with nomask + # the update to the result's mask has no effect. + if not copy and hasattr(a, '_mask') and getmask(a) is nomask: + a._mask = result._mask.view() return result diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 9bfb82d1ff1d..b71fa9069f60 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -5175,6 +5175,16 @@ def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) +def test_masked_array_no_copy(): + # check nomask array is updated in place + a = np.ma.array([1, 2, 3, 4]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [False, False, True, False]) + # check masked array is updated in place + a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [True, False, True, False]) + def test_append_masked_array(): a = np.ma.masked_equal([1,2,3], value=2) b = np.ma.masked_equal([4,3,2], value=2) @@ -5213,7 +5223,6 @@ def test_append_masked_array_along_axis(): assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) - def test_default_fill_value_complex(): # regression test for Python 3, where 'unicode' was not defined assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) From e75537773bd3af1b4f2f4846c8e6cb4b80ff20ea Mon Sep 17 00:00:00 2001 From: HowJmay Date: Mon, 3 May 2021 14:44:21 +0800 Subject: [PATCH 1099/1270] ENH: Add max/min value comparison for integers --- numpy/core/src/_simd/_simd.dispatch.c.src | 38 ++++++++----- numpy/core/src/common/simd/avx2/math.h | 30 ++++++++++ numpy/core/src/common/simd/avx512/math.h | 32 +++++++++++ numpy/core/src/common/simd/neon/math.h | 30 ++++++++++ numpy/core/src/common/simd/sse/math.h | 68 +++++++++++++++++++++++ numpy/core/src/common/simd/vsx/math.h | 18 ++++++ numpy/core/tests/test_simd.py | 19 ++++++- 7 files changed, 220 insertions(+), 15 deletions(-) diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index 3e82c28a40d7..243d06fef6e7 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -309,7 +309,7 @@ SIMD_IMPL_INTRIN_2IMM(shri_@sfx@, v@sfx@, v@sfx@, @shr_imm@) #endif // shl_imm /**begin repeat1 - * #intrin = and, or, xor# + * #intrin = and, or, xor# */ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ @@ -317,7 +317,7 @@ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) SIMD_IMPL_INTRIN_1(not_@sfx@, v@sfx@, v@sfx@) /**begin repeat1 - * #intrin = cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple# + * #intrin = cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple# */ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@bsfx@, v@sfx@, v@sfx@) /**end repeat1**/ @@ -334,14 +334,14 @@ SIMD_IMPL_INTRIN_1(expand_@esfx@_@sfx@, v@esfx@x2, v@sfx@) * Arithmetic ***************************/ /**begin repeat1 - * #intrin = add, sub# + * #intrin = add, sub# */ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ #if @sat_sup@ /**begin repeat1 - * #intrin = adds, subs# + * #intrin = adds, subs# */ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ @@ -362,7 +362,7 @@ SIMD_IMPL_INTRIN_2(divc_@sfx@, v@sfx@, v@sfx@, v@sfx@x3) #if @fused_sup@ /**begin repeat1 - * #intrin = muladd, mulsub, nmuladd, nmulsub# + * #intrin = muladd, mulsub, nmuladd, nmulsub# */ SIMD_IMPL_INTRIN_3(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ @@ -381,15 +381,21 @@ SIMD_IMPL_INTRIN_1(sumup_@sfx@, @esfx@, v@sfx@) ***************************/ #if @fp_only@ /**begin repeat1 - * #intrin = sqrt, recip, abs, square# + * #intrin = sqrt, recip, abs, square# */ SIMD_IMPL_INTRIN_1(@intrin@_@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ #endif +/**begin repeat1 + * #intrin = max, min# + */ +SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) +/**end repeat1**/ + #if @fp_only@ /**begin repeat1 - * #intrin = max, maxp, min, minp# + * #intrin = maxp, minp# */ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@) /**end repeat1**/ @@ -546,7 +552,7 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@) #endif // shl_imm /**begin repeat1 - * #intrin = and, or, xor, not, cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple# + * #intrin = and, or, xor, not, cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple# */ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ @@ -563,14 +569,14 @@ SIMD_INTRIN_DEF(expand_@esfx@_@sfx@) * Arithmetic ***************************/ /**begin repeat1 - * #intrin = add, sub# + * #intrin = add, sub# */ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ #if @sat_sup@ /**begin repeat1 - * #intrin = adds, subs# + * #intrin = adds, subs# */ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ @@ -591,7 +597,7 @@ SIMD_INTRIN_DEF(divc_@sfx@) #if @fused_sup@ /**begin repeat1 - * #intrin = muladd, mulsub, nmuladd, nmulsub# + * #intrin = muladd, mulsub, nmuladd, nmulsub# */ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ @@ -609,15 +615,21 @@ SIMD_INTRIN_DEF(sumup_@sfx@) ***************************/ #if @fp_only@ /**begin repeat1 - * #intrin = sqrt, recip, abs, square# + * #intrin = sqrt, recip, abs, square# */ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ #endif +/**begin repeat1 + * #intrin = max, min# + */ +SIMD_INTRIN_DEF(@intrin@_@sfx@) +/**end repeat1**/ + #if @fp_only@ /**begin repeat1 - * #intrin = max, maxp, min, minp# + * #intrin = maxp, minp# */ SIMD_INTRIN_DEF(@intrin@_@sfx@) /**end repeat1**/ diff --git a/numpy/core/src/common/simd/avx2/math.h b/numpy/core/src/common/simd/avx2/math.h index 19e770ebf8aa..22659e21b1c0 100644 --- a/numpy/core/src/common/simd/avx2/math.h +++ b/numpy/core/src/common/simd/avx2/math.h @@ -55,6 +55,21 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) __m256d max = _mm256_max_pd(a, b); return _mm256_blendv_pd(a, max, nn); } +// Maximum, integer operations +#define npyv_max_u8 _mm256_max_epu8 +#define npyv_max_s8 _mm256_max_epi8 +#define npyv_max_u16 _mm256_max_epu16 +#define npyv_max_s16 _mm256_max_epi16 +#define npyv_max_u32 _mm256_max_epu32 +#define npyv_max_s32 _mm256_max_epi32 +NPY_FINLINE npyv_u64 npyv_max_u64(npyv_u64 a, npyv_u64 b) +{ + return _mm256_blendv_epi8(b, a, npyv_cmpgt_u64(a, b)); +} +NPY_FINLINE npyv_s64 npyv_max_s64(npyv_s64 a, npyv_s64 b) +{ + return _mm256_blendv_epi8(b, a, _mm256_cmpgt_epi64(a, b)); +} // Minimum, natively mapping with no guarantees to handle NaN. #define npyv_min_f32 _mm256_min_ps @@ -74,5 +89,20 @@ NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) __m256d min = _mm256_min_pd(a, b); return _mm256_blendv_pd(a, min, nn); } +// Minimum, integer operations +#define npyv_min_u8 _mm256_min_epu8 +#define npyv_min_s8 _mm256_min_epi8 +#define npyv_min_u16 _mm256_min_epu16 +#define npyv_min_s16 _mm256_min_epi16 +#define npyv_min_u32 _mm256_min_epu32 +#define npyv_min_s32 _mm256_min_epi32 +NPY_FINLINE npyv_u64 npyv_min_u64(npyv_u64 a, npyv_u64 b) +{ + return _mm256_blendv_epi8(b, a, npyv_cmplt_u64(a, b)); +} +NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b) +{ + return _mm256_blendv_epi8(a, b, _mm256_cmpgt_epi64(a, b)); +} #endif diff --git a/numpy/core/src/common/simd/avx512/math.h b/numpy/core/src/common/simd/avx512/math.h index da94faaeb1ad..b75651962675 100644 --- a/numpy/core/src/common/simd/avx512/math.h +++ b/numpy/core/src/common/simd/avx512/math.h @@ -62,6 +62,22 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) __mmask8 nn = _mm512_cmp_pd_mask(b, b, _CMP_ORD_Q); return _mm512_mask_max_pd(a, nn, a, b); } +// Maximum, integer operations +#ifdef NPY_HAVE_AVX512BW + #define npyv_max_u8 _mm512_max_epu8 + #define npyv_max_s8 _mm512_max_epi8 + #define npyv_max_u16 _mm512_max_epu16 + #define npyv_max_s16 _mm512_max_epi16 +#else + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_u8, _mm256_max_epu8) + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_s8, _mm256_max_epi8) + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_u16, _mm256_max_epu16) + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_s16, _mm256_max_epi16) +#endif +#define npyv_max_u32 _mm512_max_epu32 +#define npyv_max_s32 _mm512_max_epi32 +#define npyv_max_u64 _mm512_max_epu64 +#define npyv_max_s64 _mm512_max_epi64 // Minimum, natively mapping with no guarantees to handle NaN. #define npyv_min_f32 _mm512_min_ps @@ -79,5 +95,21 @@ NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) __mmask8 nn = _mm512_cmp_pd_mask(b, b, _CMP_ORD_Q); return _mm512_mask_min_pd(a, nn, a, b); } +// Minimum, integer operations +#ifdef NPY_HAVE_AVX512BW + #define npyv_min_u8 _mm512_min_epu8 + #define npyv_min_s8 _mm512_min_epi8 + #define npyv_min_u16 _mm512_min_epu16 + #define npyv_min_s16 _mm512_min_epi16 +#else + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_u8, _mm256_min_epu8) + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_s8, _mm256_min_epi8) + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_u16, _mm256_min_epu16) + NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_s16, _mm256_min_epi16) +#endif +#define npyv_min_u32 _mm512_min_epu32 +#define npyv_min_s32 _mm512_min_epi32 +#define npyv_min_u64 _mm512_min_epu64 +#define npyv_min_s64 _mm512_min_epi64 #endif diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h index c99ef3299cbc..a5508c96b78d 100644 --- a/numpy/core/src/common/simd/neon/math.h +++ b/numpy/core/src/common/simd/neon/math.h @@ -102,6 +102,21 @@ NPY_FINLINE npyv_f32 npyv_recip_f32(npyv_f32 a) #if NPY_SIMD_F64 #define npyv_maxp_f64 vmaxnmq_f64 #endif // NPY_SIMD_F64 +// Maximum, integer operations +#define npyv_max_u8 vmaxq_u8 +#define npyv_max_s8 vmaxq_s8 +#define npyv_max_u16 vmaxq_u16 +#define npyv_max_s16 vmaxq_s16 +#define npyv_max_u32 vmaxq_u32 +#define npyv_max_s32 vmaxq_s32 +NPY_FINLINE npyv_u64 npyv_max_u64(npyv_u64 a, npyv_u64 b) +{ + return vbslq_u64(npyv_cmpgt_u64(a, b), a, b); +} +NPY_FINLINE npyv_s64 npyv_max_s64(npyv_s64 a, npyv_s64 b) +{ + return vbslq_s64(npyv_cmpgt_s64(a, b), a, b); +} // Minimum, natively mapping with no guarantees to handle NaN. #define npyv_min_f32 vminq_f32 @@ -122,5 +137,20 @@ NPY_FINLINE npyv_f32 npyv_recip_f32(npyv_f32 a) #if NPY_SIMD_F64 #define npyv_minp_f64 vminnmq_f64 #endif // NPY_SIMD_F64 +// Minimum, integer operations +#define npyv_min_u8 vminq_u8 +#define npyv_min_s8 vminq_s8 +#define npyv_min_u16 vminq_u16 +#define npyv_min_s16 vminq_s16 +#define npyv_min_u32 vminq_u32 +#define npyv_min_s32 vminq_s32 +NPY_FINLINE npyv_u64 npyv_min_u64(npyv_u64 a, npyv_u64 b) +{ + return vbslq_u64(npyv_cmplt_u64(a, b), a, b); +} +NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b) +{ + return vbslq_s64(npyv_cmplt_s64(a, b), a, b); +} #endif // _NPY_SIMD_SSE_MATH_H diff --git a/numpy/core/src/common/simd/sse/math.h b/numpy/core/src/common/simd/sse/math.h index e43c4116770d..1f82b546f276 100644 --- a/numpy/core/src/common/simd/sse/math.h +++ b/numpy/core/src/common/simd/sse/math.h @@ -55,6 +55,40 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) __m128d max = _mm_max_pd(a, b); return npyv_select_f64(_mm_castpd_si128(nn), max, a); } +// Maximum, integer operations +#ifdef NPY_HAVE_SSE41 + #define npyv_max_s8 _mm_max_epi8 + #define npyv_max_u16 _mm_max_epu16 + #define npyv_max_u32 _mm_max_epu32 + #define npyv_max_s32 _mm_max_epi32 +#else + NPY_FINLINE npyv_s8 npyv_max_s8(npyv_s8 a, npyv_s8 b) + { + return npyv_select_s8(npyv_cmpgt_s8(a, b), a, b); + } + NPY_FINLINE npyv_u16 npyv_max_u16(npyv_u16 a, npyv_u16 b) + { + return npyv_select_u16(npyv_cmpgt_u16(a, b), a, b); + } + NPY_FINLINE npyv_u32 npyv_max_u32(npyv_u32 a, npyv_u32 b) + { + return npyv_select_u32(npyv_cmpgt_u32(a, b), a, b); + } + NPY_FINLINE npyv_s32 npyv_max_s32(npyv_s32 a, npyv_s32 b) + { + return npyv_select_s32(npyv_cmpgt_s32(a, b), a, b); + } +#endif +#define npyv_max_u8 _mm_max_epu8 +#define npyv_max_s16 _mm_max_epi16 +NPY_FINLINE npyv_u64 npyv_max_u64(npyv_u64 a, npyv_u64 b) +{ + return npyv_select_u64(npyv_cmpgt_u64(a, b), a, b); +} +NPY_FINLINE npyv_s64 npyv_max_s64(npyv_s64 a, npyv_s64 b) +{ + return npyv_select_s64(npyv_cmpgt_s64(a, b), a, b); +} // Minimum, natively mapping with no guarantees to handle NaN. #define npyv_min_f32 _mm_min_ps @@ -74,5 +108,39 @@ NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) __m128d min = _mm_min_pd(a, b); return npyv_select_f64(_mm_castpd_si128(nn), min, a); } +// Minimum, integer operations +#ifdef NPY_HAVE_SSE41 + #define npyv_min_s8 _mm_min_epi8 + #define npyv_min_u16 _mm_min_epu16 + #define npyv_min_u32 _mm_min_epu32 + #define npyv_min_s32 _mm_min_epi32 +#else + NPY_FINLINE npyv_s8 npyv_min_s8(npyv_s8 a, npyv_s8 b) + { + return npyv_select_s8(npyv_cmplt_s8(a, b), a, b); + } + NPY_FINLINE npyv_u16 npyv_min_u16(npyv_u16 a, npyv_u16 b) + { + return npyv_select_u16(npyv_cmplt_u16(a, b), a, b); + } + NPY_FINLINE npyv_u32 npyv_min_u32(npyv_u32 a, npyv_u32 b) + { + return npyv_select_u32(npyv_cmplt_u32(a, b), a, b); + } + NPY_FINLINE npyv_s32 npyv_min_s32(npyv_s32 a, npyv_s32 b) + { + return npyv_select_s32(npyv_cmplt_s32(a, b), a, b); + } +#endif +#define npyv_min_u8 _mm_min_epu8 +#define npyv_min_s16 _mm_min_epi16 +NPY_FINLINE npyv_u64 npyv_min_u64(npyv_u64 a, npyv_u64 b) +{ + return npyv_select_u64(npyv_cmplt_u64(a, b), a, b); +} +NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b) +{ + return npyv_select_s64(npyv_cmplt_s64(a, b), a, b); +} #endif diff --git a/numpy/core/src/common/simd/vsx/math.h b/numpy/core/src/common/simd/vsx/math.h index 7c5301032af0..b2e393c7cf77 100644 --- a/numpy/core/src/common/simd/vsx/math.h +++ b/numpy/core/src/common/simd/vsx/math.h @@ -41,6 +41,15 @@ NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) // - Only if both corresponded elements are NaN, NaN is set. #define npyv_maxp_f32 vec_max #define npyv_maxp_f64 vec_max +// Maximum, integer operations +#define npyv_max_u8 vec_max +#define npyv_max_s8 vec_max +#define npyv_max_u16 vec_max +#define npyv_max_s16 vec_max +#define npyv_max_u32 vec_max +#define npyv_max_s32 vec_max +#define npyv_max_u64 vec_max +#define npyv_max_s64 vec_max // Minimum, natively mapping with no guarantees to handle NaN. #define npyv_min_f32 vec_min @@ -50,5 +59,14 @@ NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) // - Only if both corresponded elements are NaN, NaN is set. #define npyv_minp_f32 vec_min #define npyv_minp_f64 vec_min +// Minimum, integer operations +#define npyv_min_u8 vec_min +#define npyv_min_s8 vec_min +#define npyv_min_u16 vec_min +#define npyv_min_s16 vec_min +#define npyv_min_u32 vec_min +#define npyv_min_s32 vec_min +#define npyv_min_u64 vec_min +#define npyv_min_s64 vec_min #endif // _NPY_SIMD_VSX_MATH_H diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index c48f20b7d5e5..3be28c3bb1dd 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -208,6 +208,19 @@ def test_arithmetic_subadd_saturated(self): subs = self.subs(vdata_a, vdata_b) assert subs == data_subs + def test_math_max_min(self): + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + simd_max = self.max(vdata_a, vdata_b) + assert simd_max == data_max + + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + simd_min = self.min(vdata_a, vdata_b) + assert simd_min == data_min + class _SIMD_FP32(_Test_Utility): """ To only test single precision @@ -334,7 +347,8 @@ def test_max(self): pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() max_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10), (pinf, pinf, pinf), (pinf, 10, pinf), (10, pinf, pinf), - (ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10)) + (ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10), + (10, 0, 10), (10, -10, 10)) for case_operand1, case_operand2, desired in max_cases: data_max = [desired]*self.nlanes vdata_a = self.setall(case_operand1) @@ -364,7 +378,8 @@ def test_min(self): pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() min_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10), (pinf, pinf, pinf), (pinf, 10, 10), (10, pinf, 10), - (ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf)) + (ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf), + (10, 0, 0), (10, -10, -10)) for case_operand1, case_operand2, desired in min_cases: data_min = [desired]*self.nlanes vdata_a = self.setall(case_operand1) From e4ed09d58e47b1c120cb6d1f16110bf19fb711d2 Mon Sep 17 00:00:00 2001 From: Holly Corbett Date: Sun, 9 May 2021 18:24:52 +0200 Subject: [PATCH 1100/1270] DEP: Remove PolyBase from np.polynomial.polyutils Addresses issue #15658 by removing PolyBase class. Happy to submit alternative PR using getattr approach instead if desired. --- numpy/polynomial/polyutils.py | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 01879ecbcebb..f76426a60f4e 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -21,14 +21,6 @@ RankWarning raised in least-squares fit for rank-deficient matrix. -Base class ----------- - -.. autosummary:: - :toctree: generated/ - - PolyBase Obsolete base class for the polynomial classes. Do not use. - Functions --------- @@ -51,7 +43,7 @@ __all__ = [ 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', - 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase'] + 'trimcoef', 'getdomain', 'mapdomain', 'mapparms'] # # Warnings and Exceptions @@ -74,21 +66,6 @@ class PolyDomainError(PolyError): """ pass -# -# Base class for all polynomial types -# - -class PolyBase: - """ - Base class for all polynomial types. - - Deprecated in numpy 1.9.0, use the abstract - ABCPolyBase class instead. Note that the latter - requires a number of virtual functions to be - implemented. - - """ - pass # # Helper functions to convert inputs to 1-D arrays From 7a9c663c2b6b1b636de56607485260e935ed6450 Mon Sep 17 00:00:00 2001 From: Holly Corbett Date: Sun, 9 May 2021 18:34:44 +0200 Subject: [PATCH 1101/1270] DEP: Remove PolyBase from np.polynomial.polyutils Additionally removes associated PolyError classes. These appear to me to be unused outside this file. --- numpy/polynomial/polyutils.py | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index f76426a60f4e..3b0f0a9e57ee 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -4,15 +4,6 @@ This module provides: error and warning objects; a polynomial base class; and some routines used in both the `polynomial` and `chebyshev` modules. -Error objects -------------- - -.. autosummary:: - :toctree: generated/ - - PolyError base class for this sub-package's errors. - PolyDomainError raised when domains are mismatched. - Warning objects --------------- @@ -42,7 +33,7 @@ import numpy as np __all__ = [ - 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', + 'RankWarning', 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms'] # @@ -53,20 +44,6 @@ class RankWarning(UserWarning): """Issued by chebfit when the design matrix is rank deficient.""" pass -class PolyError(Exception): - """Base class for errors in this module.""" - pass - -class PolyDomainError(PolyError): - """Issued by the generic Poly class when two domains don't match. - - This is raised when an binary operation is passed Poly objects with - different domains. - - """ - pass - - # # Helper functions to convert inputs to 1-D arrays # From 76097abea70b5f58301493d57016713d3bef4edb Mon Sep 17 00:00:00 2001 From: Holly Corbett Date: Sun, 9 May 2021 20:05:06 +0200 Subject: [PATCH 1102/1270] DEP: Remove PolyBase from np.polynomial.polyutils Update stubs --- numpy/polynomial/polyutils.pyi | 5 ----- 1 file changed, 5 deletions(-) diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 0befa740eb5d..52c9cfc4a607 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -3,11 +3,6 @@ from typing import List __all__: List[str] class RankWarning(UserWarning): ... -class PolyError(Exception): ... -class PolyDomainError(PolyError): ... - -# NOTE: Deprecated -# class PolyBase: ... def trimseq(seq): ... def as_series(alist, trim=...): ... From 7ef9da0c0fea51597903a154348cdcaf60c8ae36 Mon Sep 17 00:00:00 2001 From: Holly Corbett Date: Mon, 10 May 2021 09:51:19 +0200 Subject: [PATCH 1103/1270] DEP: Remove PolyBase from np.polynomial.polyutils Note removal in release notes --- doc/release/upcoming_changes/18963.expired.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 doc/release/upcoming_changes/18963.expired.rst diff --git a/doc/release/upcoming_changes/18963.expired.rst b/doc/release/upcoming_changes/18963.expired.rst new file mode 100644 index 000000000000..b792fe0c8b5a --- /dev/null +++ b/doc/release/upcoming_changes/18963.expired.rst @@ -0,0 +1 @@ +* The class ``PolyBase`` has been removed (deprecated in numpy 1.9.0). Please use use the abstract ``ABCPolyBase`` class instead. \ No newline at end of file From 7046be97350733c484fad8f35c1758816761c90a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 May 2021 08:26:50 +0000 Subject: [PATCH 1104/1270] MAINT: Bump sphinx from 3.5.4 to 4.0.0 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 3.5.4 to 4.0.0. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v3.5.4...v4.0.0) Signed-off-by: dependabot[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 97cba369d207..2606f6270dd3 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==3.5.4 +sphinx==4.0.0 numpydoc==1.1.0 ipython scipy From 4dea85b64392ed11a625c273b48e2ee55da26430 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 May 2021 08:27:45 +0000 Subject: [PATCH 1105/1270] MAINT: Bump pytest from 6.2.3 to 6.2.4 Bumps [pytest](https://github.com/pytest-dev/pytest) from 6.2.3 to 6.2.4. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/6.2.3...6.2.4) Signed-off-by: dependabot[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 4421c20b58ae..4982c9035a4f 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -2,7 +2,7 @@ cython==0.29.23 wheel<0.36.3 setuptools<49.2.0 hypothesis==6.10.1 -pytest==6.2.3 +pytest==6.2.4 pytz==2021.1 pytest-cov==2.11.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' From 71cf786c1ebb3d67d32f2fab6b1b8889a84f3ab5 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 11:50:46 +0100 Subject: [PATCH 1106/1270] DOC: Add troubleshoot items --- doc/source/dev/development_gitpod.rst | 26 +++++++++++++----- .../dev/gitpod-imgs/gitpod-dashboard-stop.png | Bin 0 -> 26217 bytes 2 files changed, 19 insertions(+), 7 deletions(-) create mode 100644 doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index 282103cc57c3..f0c1c18f5e68 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -135,12 +135,12 @@ before working on your contributions. When using Gitpod, git is pre configured for you: #. You do not need to configure your git username, and email as this should be -#. done for you as you authenticated through GitHub. You can check the git -#. configuration with the command ``git config --list`` in your terminal. + done for you as you authenticated through GitHub. You can check the git + configuration with the command ``git config --list`` in your terminal. #. As you started your workspace from your own NumPy fork, you will by default -#. have both "upstream "and "origin "added as remotes. You can verify this by -#. typing ``git remote`` on your terminal or by clicking on the **branch name** -#. on the status bar (see image below). + have both "upstream "and "origin "added as remotes. You can verify this by + typing ``git remote`` on your terminal or by clicking on the **branch name** + on the status bar (see image below). .. image:: ./gitpod-imgs/NumPy-gitpod-branches.png :alt: Gitpod workspace branches plugin screenshot @@ -199,8 +199,8 @@ uses the rst extension with docutils. If you want to see the final output with the ``html`` theme you will need to rebuild the docs with ``make html`` and use Live Serve as described in option 1. -FAQ's ------ +FAQ's and troubleshooting +---------------------------- #. How long is my Gitpod workspace kept for? Your stopped workspace will be kept for 14 days and deleted afterwards if @@ -234,6 +234,18 @@ FAQ's it will shut down after 30 minutes. If you close the browser tab, it will shut down after 3 minutes. +#. My terminal is blank - there is no cursor and it's completely unresponsive + Unfortunately this is a known-issue on Gitpod's side. You can sort this issue in two ways: + + #. Create a new Gitpod workspace altogether. + #. Head to your `Gitpod dashboard `_ and locate + the running workspace. Hover on it and click on the **three dots menu** + and then click on **Stop**. When the workspace is completely stopped you + can click on its name to restart it again. + + .. image:: ./gitpod-imgs/gitpod-dashboard-stop.png + :alt: Gitpod dashboard and workspace menu screenshot + .. _Gitpod: https://www.gitpod.io/ .. _NumPy repository on GitHub: https://github.com/NumPy/NumPy .. _create your own fork: https://help.github.com/en/articles/fork-a-repo diff --git a/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png b/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png new file mode 100644 index 0000000000000000000000000000000000000000..db3a570c1da8175b8df9aff8a1b1a20547d3347e GIT binary patch literal 26217 zcmd43Wpv)U5-uEOX67ahGczX*Ck^8pW_-iUjBTh5Gc!ZO%*>oL%*;1Ed+)Q)TK9hc zKQDV(mS?2VNFK|Yc^s}NFNp|;2L}QIf+#H|rVIiCE&>7qDhLDik+Vy@i}`T^byAiT z0jU@#IQ+Pi{5I0QiP|C^Qo{R{&BpZt$NEvY5o*>>pSBWadDiJr5(JtE(%6 zD=UMYqZuPJH#avU6AL2?3;hQLy_35w5a32{>qP!nA^(&kX5wVzXkibuu(KukQ!c>J z&Kby0M)s$o|89TDY2s$_Z%wvNe>dx+fsB9dFfubRG5){UfEK3zA8dc_{KfX4cKuZy z-=D&G6fN9LtTn_eY)ovOK137XVrAp|Pc{G7oqq-WA5P7GIa!&x|CjTB?)(quA4zzW zO`Pnko&RV;)z$(iz{1D)|H}TGi;wY-w*E(Ze|61&azFY;0FICGzx^x#*Nnok0Rkcf zA}uET)eZDC9a;lp;CUe56$1hkRlGAG$58U5osyY(>PA1}kg~v4C7QCo7vmeww+`v3 za0|+UqNzns(6U8gP~nKr?aA-=w)VQ`F*`x6JGPH!J+-6b@~@+-XRYU>yhbXzx@%F; zuyG*&?0)sARrL*EwQUqaVE^4|z%X_ek)`|~{@y!jU=^ckhfR&LrT@)aw}-&Q#7s=Y zR8>t(lQZLD3!wl-Bl*v64hLav{bOl)d1+~Bhe_Mp=7`Nf$nWavDk8$MHZdGG{@?0v z(W)9P*YBC)o z5gD_>WLh0-N0nx;!B`4oH=F;jLgvK{RE2*9587|4O_eSE=zi&kl(Ny;xiUNZFG&bm zQBeh!Fu#q>>CYt`ys^ct;UB8lMbIw!S2(=TqBFTTW}m4;$p6uM6$%*fbA1fZ#iFd+ z`ZJvy$F-Ny9>YX(V2L9h@Y}z`cn&2C$BbRBFCgLHyflzLE^b8DW`%Soc|cRPR{g^VBR(~ z8cQ;IWvM~Yxux|=ubzLqUWl>LK1P_I1uJ`Zouj(m;P<)^QYmeEJbs?r&typ9=w*tCrhiit~FZ3HL?14zmA23xYOE0*|JSy>gMa=Jxo_cS`9=pfWz;VK2y zOicQkzwgGR_uN8T1-P!%BVQt$uazX~qnEX&mJ+Pja)?R2Z_|55M5E&`+o$cr-o|5l<8T0x!UNT)h7GD1K=kUy-hs=72^)8J@glAoTQ zjxjA1ngMaUDzc5|8W<4Z^K`3zDtkTBh2q1IRA*G^#k$Hj6IPj%o!=Q`j+{|FliL>g%4Mr5T>LSq<7BsT6##0lfM; zJR9%Pifvk5$VLkg@!y`V zD;F0Rw*vx5dqVjIoJ{SX@05Xv14HOkrhHdbA^Um9wr<q^kE-O*}3tu3GveviQSb$3wDTZ{+B>*%4n zP|pi8xoJ+IPSixaFJ82SbTGsFDVl)QcF#^QQ^37@eEpYt=9{Qw6wn@s5gwHee7$@i zH$#&T6xAq{H=@$pKRZixcza1;6vSr(K5TB0i$6g^x@V4OF663b!Y(La34~-q_6#+y zwm7W)h+JACai5u8VbE=90-Rdvh+GB|gN&1PvBJV0&%dOnX2gsn2?(IuHA%0ch4bA! zsI1d#ACVfl6=UEk$1&$c<~%*SoUi_JN0-RigxdXS+dQ{6gKkTaV1Kkby8l&yW-9sQ z#2Wc5mX@B{VvpF*z%6bY3mMHE2!;c~-hx0y6KovMmGLcFZ(teDU@fpDnPHEB8+cHz zS()qc`Xss4wtHN;p2^GkiQD8P=#rXxU*Pp37xH>Pa+oSj-tA)jnWRS?c)nijw9l!R z?tCKUWursk%MW|D)V@C?NxV{Z*5MBhEi|e5y0GCY+gdKm@#F2KmXRP*5KI_idr)1U z?&*0#Fy30+{S$*j7~Q(J*_rEm<8F8GNxYGS=r2@f)JCyEV&4bAmSpC`^K*~WM#b8M zz-`_^nRcVK&)cCHNe?1EN1ghHK+!_r{RY*9E(KY7x~>k7%L(0U@o=2?(70ZHV(&>O z6|pbDRZ+4!{OsO6rZ3&|9WUzC+#EXv-H9xi-Wlu#x68@rJZZ70HS9nc7*(q`O-OD! z3bwqwCWyE5(F{U-^gtT&;m8TT`^}p}XylXgNm7Kdo2QXVoxx!dI;F}DpV!+NNMgym zqqP?2PG75efu~7uLmI%BFT*#_XQDM*y5yY@HaELQtR#!!R|j3rGh-?!rkDGX$`k2< z7l@#Ed(H3uv?2*o!an^I=uEzP4YN~Sq?Vf`QXDIscHhh3zC>iJbg-1sfvKw&18?*5 z`><$xg!*n_yVK2J)<6e4eLBe5_X=8Iu9(={FM z8mPVxC}MSSlSpRh=luzBZR5)AoNayI z7XePmxnM<=ub>y=bZMwfd3hQ-IdIjUu9^JbU@q(2BeC4Hjnr@-Zt$5!QwGfZ@2`cLKlRbK4h@>2q<7d@v&jmvC8Q_ltp+UYr5rr+CAx#UD1Yq%htE?E|7e9nkU zsJK243jPd#4b?C0i>2!xSD8ttAC!9Y%hNC^K%$@0=yqJv$MgN+Pl9i=Dz59^*z1u) zLqm~RK1jHRQ`>zNwzhqJeJO<Oo|_tv&*$fDk4aM5 z4Yh9&X6-LzWDIYn(i3{^n{J1X`&q%u%|-)NTRq|dDBw{gQBt-f7`Sms{ z9Dp+L^U_tob&zZU#OIGUztAUl;teOQjLX(SygPKN2K?OFkb$^T&jZFLs3tw8OR*`&dT3uVy_j*jk&Tx0xq9TsP;Xayt z14pmusL+4+A>-xWAJ=V_Pj!I?6Vklx8z3eU#3P={EzG!>E?+tO)o4@o<@NDV^17B$ zr|I;}vzy1Ie=iwcFd)7d10m+1OH?w35P2YrvPZggF3H=_M*(g15| zWn~n(ERKk`Iz52hsP*{x-n_jq>qHIE${lUe7G!yXFOF&r0VZnuK|i@3KX@c7NR2$th47g^YR_T%irj zAw+{x6(S}zRuDQa99*|zXycbg_p^Ena}HU58UkryA@Jzu>?3P>(}~H^o1~`533Uq!t{D| z%}DY}mEi?feZ=Q+SAiSTi$mN`i>NaSrYk>aFC|!H!!({?jE+`<=3tQPSIgB+-U17~ z{yk2Mi82_|425ubJkWvW5OH7&{8gQ4-tL-o7?v5s( zd5F3MUzs{DY(v2cbFgnuUHpqFi?Y$Ps$Mmk<49UDZ$rJ{#Q$OY;DCN&p)s+e33z|u zm;2>T5Bn{yXw!U+M^R^+fT)7om6t>@9}E>b#C8is_h@jd$FBVi@4Hj7UCCQyaw&iR z7ZVPGrnqjQu|R;PdH3(}Oe(p&KqO?)7w`&W4BRF3CvMbIZz&idVcU^rAS_(R&qBG= z)lXhCzQ@@SEA>v;-8Yd(RQbr7Dow$uLEpZ8$?E}F9g`4DH+wePDwAs0D-SV9?!<0+ zU2gYr>iZI+!{x`>hc$bIY_`}vfwwlCi+x|Q?I<@HDg7auk+phymYqX`1Pz_YFdQf9 z#XTtcMPQkd477&w&TP#9hWDNbFJ5)gu`W!OV=*j_1^ocSDLO z!;)nGm6O$+Z=`FB=IUx3q(b@0z77^HLBGY*&9pdw|2KV`0d54Y(qt+j4@xK#%GEAu zFn$L~cE!xE{4FCp-)5dff_WbKS_v_3dNK8o%2Awh zJ_WHmcHjP9L}pTWtZ2||;PTA%Fr&C3!zcNV1+b6ruqqE`dHHfVMSLCk)}4lrHj18| zZymzcqxUnA)&agmyUFkE?a8nZfK7OP3H>dh5P)+nQq~jt74Me^vA>NLxvfOO(X~23 zq5PaISnG+>@q3|}0I2ik+Wi`*$EZ0DzgPQ1>3PSaL=X|HE!|t4k;vQ6*E{XF7u(k3 zai!b=DbRprD8LzBK|wgK+l8P9n_dh2pLZz}MaxZVzP(ZL_n4zcZKjdVPLunLc*jyC zce>ZJZ&b0ld?IHUOuR*M1Okr-mV=-z-Ix>z)G8AOKq50>M2r?Fx4|jq|p1&!T8qCFex%kQ=lpz|VlD_Pj z&6*z(x(<2cZR<6fVj4Gr28I|gWW`n@dkp3%V6jmM_m@Y5gh)=!GieAX2M)#*J6fr~ zs%jfNeeydF0+?1?^|BUskFGp*4-Uty(D)8Vk#z%l$5o%xrM@^fvlgBR&T<_ z>8f}>*vsvHW-yh6X{@fz3o0Q$Ok+^WVy(K}CL)||}OWFiQ+f z=o5Q~GrVj(TE2WD^N=~44Il4p=IEtDyb*NC^d=YUjTlr_sU+4+cc??gr<2+drQ_?3 zxiS976I^B%n4ake^3daN5j;qBR%8s*T?Z5b6nt}6pTvlxto!>;0zXmrH}v9gieZ`m z?&iTY7Tqjp2#OWdQEBK72yhyr!L;L#sSStyoDmiV8(`mPJENu}0aST)dDCu(bE#>6 zF76+Y^EQCu_qZ)`Kqqz(E17`wTo4(90d0+9{qrQ`Hc>xyW$$T2AY2?Of2 zRalVG>c))UoyJmPjbyVrwYm}WyJC<|Y0TDQZ#7@$`e!R!rXieH3M{A3v=|PzJRYad zOKCu?;GS3-Z0-MdxC*QO9dXW!I_UU0+N(?|Z9Ot&`+k z$rM@_1(qiHSrwqC~qH4C}zQ?U0gUy4BULjF0cU z{XQfX2MJvD@qz1yC^AD>_j%FrZMO_#BeKQ&!te%wgW0w_h$#N{Wj$c(^KXCe%b|d? z_;&dWE*D|`^D3e_vF~H}Z$0qSwZYiGi2yfnr9M)JH)kupV;jEPHI>V=cu1E-h_J^b z!BabaVqv&4C?3BaudX-as+zo8Zz1M=;T=~RBRg=I+|1QXpJM2LGMB*r6hnn1H`gdR z&GU#U-Ypiahg!G6CjeuOXZNi{IG#Lwd7P!kpXX!}mROJmSwka6D^wubXg6w2Zd|A) z56$|>5ex*YDt6CH@y>5;1PDr#y^>!d6eD7|vUiGa>K;o;e$+PsOAbaD#* z_~)BTP8)%JAP_7EEmd$sBb8&yeP0!H<3qo?>v@l6JTnxO2@j`onlX$HGky2E^&X?8 z>n3YsLW`(Z>fJ~h`?(bks0r4m3qXVaUHVzVE%~B#mIIo@vIiBy-qBHnbPI<|BR{E! zlny5g3F9~RMRj%Jn6+|fmvk*$4K=qpo<<%S?}v0nZtsD}Ot8pG27HlwmsNGcZLYu1 z)@sfAzo}z{J~;r^xMY2M-I%Slm`P9i)b)fd&)aSb@&GM5MYQkYtvbzWtrQd4!2AQZSlHC08 z0V0em1{pj)7+O3Q{Zwa`##Gy1_R?5xktTlA8^BdLt^6L>C9B#F|K1cL8j-Wb_%Q`U z9-WwYw|Qh4lvNyEb$%DEYK|AVQ}fL9Mz-y#yVgNbV4)J*-#ACOcC_^=(i zY3aM?eEf$8ZIFRo0RbY!nMpk7N0(x{1}}@9xCY8k8Z!{KL$bnGoE6|a4ZH>N!E>i+ zt{Z_}rqa^IE`3f;PRu_JOF6tJ$J^JN>b{lCKx28iWrx?}93hku8uU#Frt!I)kte0bT#RaRSDTd-P26>x$T z1gvILJn(dE^uOz!Uqs5L+ys&IB%Fw5kdCbvqUPo`@gP{m-$j*;SiMqt_R>XGeG0nU+6VNkdDI6YY#dyYbQbgODYo^5-dg zvW*o-NI{@95UEZ0yJsmuNvMhS^>--Ev&8c6uzbA|M z7CXpv7--q);5Fkox|eVJ*jSJeR;PqvfH+!8^gqhImj^1mn3OB>kqA<}9SrG?(;p0q^^2li`{^b; zqCaA?DY6d{={&aY%bLbJ$el#hz|mA;WoE}0OYUSKR=MUeI0P33m?;{2n6>KV{X_OkdR~ z7xVP>LH6~PTzvRr3~}H#1Oe$ht<9c7rHwW_Pd9bfi{Mb11Ju%Hcc-fieP~E@cC9u$ zP@~aE2$rE8WupjXBhlI@(v5annck$Oaw`T@RLLo8t%{Duk=ji8%fz+VF)pW~GLez4 zb{igtIhMCjBZ3CSPj8-1>mEi}1WEc@qF>BKY;@F$hYN%!l*~Z*9o$DeU=6&#F(!K< zy7zgM8n_LGBH+)JDtsTmAk9ucYr6+sRN;I=)6rN!8F_6iG4YkPhFRgH!CM(o4w+es zxuF2%_K(@pT?tKmY_tVe7*w7d1rW2I1b;>b=bYa=TT(YchThWz0Ny62j6N1eWUY#N ztH6Im&QEIJOm1zbHXp@eKHSaZK3QU;c^vgJBa}qk_NXKzFezCs9#l^jkn zT)J4ms=93#5>DkNn??Q>aR{@4A?4~8Waox;T-ij;?bRcMHZn?>pzJ0dm=c9%;sD69g^x-3aG39g7%$<)gCx!QHA|CYb*5<= zae}=Zj!vziYfqFS7~;KlK8qR+z_`b=^#|-V9#Dd!O_KNtD{HWHnYNK2djg6~@spF1 z%p)CenlMmx*K}xUM*2P%qRbiqD@pseDsI-`w_vXc*jVPAQqvRtTQ8xl*c4Ev>{Fen zB?Zo5STA0*pefS1o%r5hprnOHxIW+Iz0PYv*JBzI)VZRZck|3?s0hRLYMegK z;?eT(2UnMAf2sb;o{h7AJS#r2%Ed+nrlFQw!}hoTbP@zio3pHk$Yj!9blvg(MoRVE zypWt<7iyU&ptyZE1j(_H&&!7LF$F0b&V`#7ChWnXz${JSc$$8?wwdZIC&*+k9|tB*q&q%71CG>saIktzy9i3}{&bnTw<+n@ugPk_ zG`?)g7BYQq*xtbb1ih}Q2hY4KhCqruRT3F?a*|iSPzg)F@~1&cj2(kuIP#EXie`?A zS2?o+8r4{&zdi9x$e`6MN*-F4LIxx!_(*+ysSNr>V&&ZDhNRx-P~nXK87N3J-E&C2}18G&w%+ZF9b)SotrqyW!_rwYV>i&~`&7Ux4o<|&+5Z}QsW-Re?zaaU1GrruA-2wiTo zcyski``&sN){94W8H%Jc7$h%m@|84(VXehzUHv3u4dpX4J@RkVIn_iay0vy2CskFG z4*$SC7J?iDr3AlD4rwzqj`v0^$~qh*tV5w=4&z<(lk6x+$*lg!6Q@0!7*!b(Qp0jb z824go=|vkdF19jC&ylx6J{i>+hId*m^y403da4s;W@03x;@KWe7eTYbzrs?Eq(NjV zg?WSX3Hqk3Ze{y6%|E`^@eEVLw6v7TlD?xSYh>kVIYlc+pG;pe;izOpb^5@ zm?o_l$OU?*dfYEmhu){o6neeBvYA3jiO;zhR9;41?~C%_)OF>%+=M)D7B*R|{Qwn} z2-Ac|RH)jbPLuh<5WdhbCe(Ay?g7W0%&pDxF9b1C8zR8am;7cm>+GQ0?Ao?m(OcBupV1}6ftt!FEKYi`kc<2;Y}{E{kE zVQLI?ba4d?;V0vT89hvC8TB;4B)G^>;o?AM-ZI;Rm0^7}Ii?#^v-8jGQ_lT1KQd4P zDT0%5VGh1;#Z~Hr^nChi^#mW7bZcFqb*0Wj5+$+()fzg9lpYxgC!fg`Q!=!WHjtlc z!c}Z7VgI>Ld9E(7L;^&&l7hr6FI}d%tVqZ@dTq}>H8I}8twbY=_ifUrrw5`S)WcS46yyveF%Mwgzkxb=3BEz0wu3G@y*x*CIwHiV%*l(&#P zBPGD(u*M$k<*c-OaSRcU+lzVIYN0%*#Mu1HP)C6_%Ggxaq;;4E zUNX-&JlEG~W#Udl`8Rj}g%Xvi)UrqZ>V4!o-tLC+m*(TqDU>%Ja;;xfVqUWHm5s1l z&AY~Q6d}b*{2upxUmB{qdUt8O#G^X4D0}KVk`%%}&5CBptu#E$pm!eGPASjT{6GM| z{`jUdP!C)?UJef0+tcX=aY?r2uYz5Boc&72RNmf7K;u{aQjY)L(?G?Pb2iH^lA?UR z-XLymKxG+L9?T0w8sH$`MmuWHl|)igRWr00IBS18SbfLPZ^5bUw8xyc=yj`5G~eNs zur@WbvI^X%wssF#YpUWBph126G-};>6oW!S=5ZtTD=fgANtRV?hK4&D84Fzsin=bm0*)~$pNsb) z4iUpVLnUHCQ9yE|>+lk*%N^1hnWviC$HE4jOhhBB~9Iv0^k0v&}I1z|g zqTM_;D|Mi_TO$uQLqm)3F3Y#~V=q4tgs$}3}4=CsH#7W7?JmLJQIjLMfY(-!w zGPJZ(XR9l-kcG4^DxE3vS^zTsWP7w6H`s8(@!aiL>GAz2r3_77}|U@yo;&WhSlLriOPbl;cp?)r`Q9d#dxyI8w>59 zw4Wq@3^u-Jk>gh&9BOL{gqbxJ){Ky2MCv;h%W27BW z>)t|LTPfP7pS^<+jj4C`Tt&lo%?TBXi3&TR@3+7Fx!hrQ9$bc?jgRj&AvBDUuA=AG zBApeEQgtRg$G_4@naLmM?BJ#KxYOmIZxsQjD)ZF{od<^2-u#4t3J(+frGcZtO~ME$ zw5*Ht4Wt*!g$yC1ImMY`mFqlqY3Jw&YmyQsP;=2p-ZNVqfrlcE2XJl>+sJOJW24iG zSB-*jW$2-$C3Gx=fu;;YbU-F&3_X>dQ-ij()qlc?<7pIat&QK>1HC=m7Z80EJq^&H zSnvJodB}^xY^vrbUUfqT4DegWfyKk)QweEwwS1SCmmh>2fdM%Fgo*8Is@6rAN3>fI zfKa+SSq!-nh46<7)_oDf3Bv9sX1K)bM^~YvuBhlM{s`F0+Po7N7uT?XiB%6O{&)z$ zemmCN5C_Cn?ed`1Vx!w$C<}hG$D<~%_{^bavB;02;9EDKSE=F%0TYv5&Ea7EGuqMi-YGb=S{im&9%~E39Y8FyPU!mK ziA-}azDZj#uF+_OGVJPrUS59OZ7eV&EO_*2l@g8g(kZT2Q!HGtiP(eCTK0QlDpBcr zKPmuk+EOKrxICaJ}s#23u@C2kHN4>*>6xw(+x^LUs%{6=*_?g(_``)2>;JG5LZWD8Ts zCx&RphU)s`H9@!z9WQ_-MbZ8gMX(r-wIIll?4W%&vE!YyO2Pb~{n(GK_(&h3Sq2=m z75^0e?bKBJD~KbQE+p@{@o^$#1t7jy`L$$060A5MNZi*h$b%*c&R(trt*vU(U4MX8guYTdxF{xipI z1`N``kl3J=()@e>6mPe|`Nx|e0fa9CVitspQ2LE1Rt6dLdypn*cd$s73j#F?`feiD zJdOt-*5phNVfQ9CaR9>ygdcn`vXhZ8iQwx2fg#a~pA1!^<<6cE3BaWD>r`M(GYzo$ z0AKwJar7Lo!W}}+%cvq2+ST=odP!N!^_s!Z6LO%Cg9S4h+=|0MxI`VeOt)|CfiVY2 z2PFDMB4>Flj19_pDj)TgGWJsn{}~NLQmY=OsPHwa3XAbX%$|d?g3Zi z%FhQTLe;!u@_P|sofr(sK4VR1EFq6YQ={Fw+uLfhZG3yMuZ@|g&tO=oUhxPj(qLC% zOkXbHN~jB0ogfDzC$gaZL7{`p!J5@b5z(8ufVuW%aJT}hw1)(bxjug^WjyL&=hmqT z>rExA>&^TW`|E0m553_=QGi|3w%Y0a^sKmYPS1r$%&8$t%Rn%;^qt zKS1QwkF+NqBSYRTOjzg~9Wxq}{{m-V6XfMGo=#KufR2LyCm1N-uM2*IoFBuP(UikE zq(fTO9A_eGl$H$9o`&h7;KC3M<<+tnN*8*?tT}FGhXhCwtB6C3_=5#$FsghNLC75G`OpELjB z)+K9r`WW#Ee{_|Fi8_69vR5dGdaK&g1+a*HMXU@?TvkM}d-=7x(L(ZkES!9#1VCs zXmD`g?0g$Q|2Yo`6FYDR%c4?oBhsyV*^AM&YwO-y zGW^4z^|fnPY4`aUGzJAM3$K?J-2=}YLJ4j3xA1rPqf}+Z7OAN{%+=rXlr1Jc}w==BQhcT_`m{k;5iOUe_YE~_#sf$3W~E% z6(7wNq!8uRWdd_*hvSl?L6H&tt&1e+9@#Gs1_-)XI&;obx_e!{x;KbkC{m>2tioi) zm(6Wn> zc!=bV&gkfR}5U1Ebua5j8~%*L(PE{F>vhMlroVbT2ZKD|0dYkO13FpoD$RpwFt zZZUjOas&K0P4rZ6&0E{6xchYkHw>N~9KdMmspDL!$vWDDA`lD->GloK5Xa{iyS#g$ zqg->*+lfLcwH5Vf@8;1>`Vu^iq(J7b!*7d#yHla#oXyIVVNw$NJTh|_5djB;n%PNB zbuhs}dP2*l5_KD03FHE^0lk4RG{810IG8kqYX1lX@0mqcYDvsc11ES9QY?zensjt> z3Z;kntv-}Ymu!f;kUeEXGBP}~0EO zRhcCdI2Q>MD=nxjhA25V7d+KO%W(Ol4Osn~FwJ|bB$@fCDy1SQ4tHM_t0}=zzBf24 zoN0Om@7$3Y8YwbQh?J(gXs2M?x`~0}`o;aAB>$kE8*kmW_dcW~=+J^IGF~{_wRctc z=JTa=R#!*ERi{3(EPr*LwmGo)-Pj~8^gFeW7>;;w7`s)maIOKju` zVK3#L8sSChtx_#s1h3uJk;7DxsgI>@oc$hUQ1lK1-s1S#AtTAls!ppHgHZv#fIdIG zo5z$;Hm^}P94Fy6A?b_lP`Qol#gy)DFN80KpRukTnQB13$--#im~?VMN|3UnRi2{F zJObkVrXrTXVBQA0|PUw3*Y>R4oXdlFW>d=nU_royAiojiCB0nHNB|S6bK{a(3 zEjcL5pn*^rWDqe;lw5E*4lO3sTY+XomQ{qV=y^7>SI6n^_E#tT0gIHctC%f-#4H13(7kCaJk|fU_8(Te!yW5_$PK8Shn)#E`bd{w}&X4+2 zgU^+C9tM_InhFXEY^kgIJ<&-b*pAN5p<0Nir4F1f_Y8~o!o$6?yiptx`NEB5`jO)Z z{Loq`9Ra#44c*hYTBS|xp3I!YGZ~%4S`xA%h{vnA?0v^7zPP)oU?f<1!Sq$DDuu0E zuh96Ch9P7Hoq%G%OGvNv*X-8-ZtkzmphwN+crVUa5^hJK#Z);!_|*&EZdX*GKbh-l zHw9uF)kR{_Jd)m&kW6*dp5GH(|MRppA~>;}QXrI_PAgP_5{_Spzq_diR4*6bv7$9U zq^LSI-C!t1bWCU>#7z)J;|1Ri;?&@mJb`e@A$FCQRMobYr013pxst84gREsXuI0rojeHoYe62XLz z&6?b2j1+w|=$bs!^r-^@4yhlTCZp;>V?pDIpo_^TbZM0-s=TbM)0MOi@}d`;v;ejT z`b?Ny!jOf44eP@YbyG1)Fkb*AnC9SIF!z|=d+%IO$UiFK7NVdfu7>odd{zN}&jx}OTlBQTB)SvLvTk10UVDfM1lW6_sY=`1wq%f zb2B>y2vRGjfoN23R{a$kLa_`!@G+g2!!C#~IyL#=NVz6qho(J$8T*xfr|paLej&&A zc#$I@Th)7!uLpyMH<4A6Ay1GOZ0z09n6?ye z(0(4Kt&f13UtLitP44n~j~0G43=yL)M=-(9UNYpaI0c)paDEzk(1bW0U3 z8Z*8I`{ybD>_=*}Hz=v$gG3k}>|>Gr-T4!9LckrT>5^ywTvwn=p4{*5)AZwgWn!EG z0_sl`!cSef=*V2Bc{k55?c3NVdkAtw4f)ea(FWeXw~qwe9;7J1aIu$%VKp9AIC#G( zY4JWH&IC17qI zgok)LzBViqEJA@tlEh>%6*qw3z1MIsUA&C?ZGR7M!C)r0-nGhEzY`!V|MJlO-^JB?zD>2rBZa5a5PFJSJ3M_`>G%c zE5EILat=dD}#4yAY?Itja)mVI3v(zzR5cOjyH)TT?zm7*HV2ynjz{b#r6f&@b3}8d)V!8=u zKdlp|b%#^i?XQDo{jSEIJ}VvC>2RpbQLw)Ah~ln^8PRQi!KrjfVsk);2GMkFQH)a= z>arxJsV!bJe0HZc1)-)j=p0SeGZb#|t918%bi3h9ShbeKkn+D^Ni5;b^0FvF1ij4} zgijgk-N(#~qPLO-IA8N|%tF*mvHnXaL82INT3$`nV$E$uPNB6y(Pn+x-{Q$-E+<(` z&mZAXAD=DwFZfl14OAt}K`T=KBY_q356<=jS)3IjM6Q5o#``xs*^e96AQ7ByuV~q; zDMy5^#%O#sHTe^|pLD8*DxAdXyrG^1^LTY;kc}0UgwcH5n)kV?E z4C^U}tn{UnSqY~jBh_HmKDG==stJsx7S*>o;emY|i17dAbQ4uq`|5$)MxMibZK^dd z0Yb|w!n)8oN5)Pw*6e^MpM@k%IM7G4ml!ms}o`B7v>H^B} z@-NpE-WL$3%?P;2w&GB$&w~Y+#0dl(M0FRi-!XQ6s1+L9`~f#-!J>8AT4!X8{WR#) z+=D{vWFSA_LzL~!o1*Sd#b89I`U^_!1%^Y&>Lqy4&&VKSzzo5}jdV>-U%m15>P@2Sqhy)lpSlEQ{b)w zx- ze`#?s zmEUQbtnn(z;1SVR%=@^;Z{M6$>_F!;|1+$!q|p|MvB7a|CpWVrf4$ue=)a{rzkxw} zhN|m(F$fBFNM9M`SEhmleO9siZKbkr3V`uLCZ3@K0U-ze`4_;H>ItpaSf3fhKdxF3 zFn4b5kL8!jdJ=^Rui2%gz4Yeu6`l21cJkL36$L(?(*|Cj%TR0X_w4{eo^Ic*0b-~I3nx-qS`JhF5B%o0 ziVo%+?RzVF%$}dALLOQrW|3G6Kd7zon7GG)M#akRHx@f$Bp6`Z6fHGc%rGkO)C`1w zTZ1fFutZEui{j#*kl63WT(dJ1f+S7egW)@pTOWSP(c#fF-e*K)g^4Mnfxf=pw!5S2 zPoMgloDFoccjJmZEqDGD0TOzvXB@9Az35njKx6wOMqD2?Iw-BO4LZS=NgQ!7*(ZgUEy-qlf z3`xH0`~6+lcg}nM_%WB+&))0VYu#(zd#`)stcRDJW!uGIQ`P!AL7;;cU;5UcfzZ!G zGaFNlR$K8;XA!dt(4gR4uQlBx^#=c)^UbuAL1PYh<{2S3ZFcrblQ?|=BU*pf_Eu2c zcgbHT%5R3BrlT7eLO82U*pl;2vpTf`F|NADUy(R@x<1Ijz{nT{f&s3ffk7eczQ7Tn71itk0H952`}tnCzrVR)KXz}8>CU2K7S_Ilz_=lNr|{qL^^Ecgs8;# z;Ya_~yJVN1s>6dit5rcm15?ixD|!?HhU?iGs>V$*h1M77B{BR= zD!1f*ds`k2THK)DogX6ZiR|S}+*u-xV;&)?dB(ciI4rjA( z3`#zGOakeOrU%Y(Hgo+ElB}XOke0GsZ-&+Vs$~o16>>y#d8KlKn7GL4nfRfRNpf2vaQNjGiQa7u$bz%BGcxY{Bd0+L2zBr? z+gl=E#BWKDE@lwlfEQ;v@!v3?_7adL;BT|k`UBuBCK7<=M|5|>f0x0&XY%pt~vaCa)S2nkxeR6(qraxE!j)Lf#szI9+BGEzoHW}cRW z7h{ZP2A7h3XX8ZLHINV6{P7bie$m%sdsv0Zsk)X$Z(^$ZJ)XWnW(3jdCRwjEOK6S# zx-iSW-4squ?$XxMaoPBu$yM*JBZQo*8Sx7bC#p55dH>DylmcuPZ7sbzif-vAa-JZq zKY0?U9uWPwQw!GR|Fb|jl01rH=(5F6qySuRs4f>*JK_@MwG9}?+c^#5j{ zx~Sm4PqU^7S|}(m>y9nXQpVppgwxD)j*b!wKXB9`rJQQGkdoOvFk3|;zsmhN71M>X z%1;S7J~L9hC{|XU6NfXIcvnL-W@dLladQ7j;EmYfhSF48H6zN6#Q zi@Ho?d-`~L=QC_>ZdMLtxFe1tdV70wQ_lA$dXveksw^c=psJd$qbWp*QO}M~b7{qE zjC%(*M=Rl~nhpmC2VpB#xKehbw|K?HeW%9371z+@<7mb0k9d!E8BonGHk=MiIzDp4 z6=11QNX@4j`~PTm)s~aPU|*&uL}Bb77jxeq6-jhhg2Xo)s)4AOoNCloUI#6KIxlwbZ;oQJZqU%tQBnD;j7O2IbL&$$wlxG~oL)=YltAz8 z9UdO_SEZqH7+cOR8mm(s^9wM&hj}|4sI^XWFzDP1S8tm)OR|{Q0`b5=*U`+~uE0$s z)nzk+l0UYKK6!0iQiNi&s!vAeNs(WHp!atKMOrYFR^!oKC&~h@< z2zQ5mM|GhnU0YngCh7>!fuwR9+71_B$SX@BO#JOtRN7Y2MU?t_PafPLysv~4`RUm5 z%khT~Uo(Yxj@QbzNp7>p+*}_mLAPEQDmh9QMeZFrVkajjuk&^S|8sEXw5Y2K&vIUL z{8*0nnfIgbB_*iMr~AIE$K>#al^oyA=9Qn?R?X)#zG)KSjfDHa{AOnd)kA0mJ>2Y7 zU_0=D)}QXe(@ql*Pwapk#a1?S`_i(qm;^$St7Ny2WvlheEB(H{hc&g+65(QEhMJ53 zx>nug*FQP>K4r%)3DE7dG&GD`5$JurybF43^Ye^F=hM}LVWFY-_O{w}%-?!PEn{&) z3fkGR`g!`#8&xb4cH}{sQ%%n}-}Pr?kDx{MF#Wy5`R?P#{oTWf^v1HX%<0)j&w8G* z)KWwybFW@F(cH9%*-LY=OOSFRMWv_DUH*Ena%TYpn<5$IqqBVHmyXMjH+MIP+{kac zD)C&gmE|HE!PdH^^tU>AeG34=Zz1WD|A1f!0D_4o76t!+pe+D`DynI#|0w$?VgZQy ze+{PmM3cnw1o_PrnY{9bh8IyY`bu^>(xSE=?H}h7G@`5Lx}FXrXBVwx;_=9zAF<7V zT0{sSx`tlDJ`$WtLIYpLsB(EiRne{!IaPh*z7Nil-slD)M;=$6lfRwHO!`yw$&+z? zjFhJLSaY~})lutMamz7JP<|axK6XtB^p|N;{=HI@pXei_8Wl~~IsMIuw;XA|r~O#} z3ZRtF_SUVx>(*fhfFNb^E+_fl&!DUzJ~bY?nLi3%45-SiAQDVvFl9>A?@iz8h0f{i zlelgBB`I3Hwe7Z+2DKI9mR00G#=8y}uOB5{_aAYw?Td6oQuDRK-#1~&0K5sj_;ifF zPp8NQ-ozrm#s93qOdP``(ymsiLkrXkY0nW7H>YN17|YGNwJH!QdZ9o*9eiqriBDx} zH9Mreo8A#Jot4#N)U2v!a?1f5ooZ5>GasMx+NG%|vRexVCBO9bxjVh9EH7`Hhr;~M z_rk;Qsh{pe-+vA?(McH}@tO0e4=&&6H*G(02eMW~nyJAxQ9W6=Nayh(B~IqwLxIc) zDFiL2_b}Z-LrMA2FhHO_3T9vg)1slw{bV^fVk-z46aJhncwb8K3iDkUvw;ae{vGcr<_{3S8n z9Y^~MHi}V4N?r@^$(J=`Zm2M?yrkreh~L^g>`x|0{2{%dZO~g*TTc2CN2&R zuAK$?1dQif+;_X&?IwWIZ+$i((8G4`5j$#z2-`;s-OIT#lhpa|+{CLUBO_yiz+3cp z773JPWn>=2(ot7?4wk3pa&U7yOw|}_Sz7jgx`%~=dA}AS?wOR5H0Lrguz`VrK?t9_ zC_R!}&V`(|9s-qVj|8G~QffXaCL)%Njy;Cr44jP(!0>4KC%_vbc#!=WpN3 zHEP*%>VFUltI!t`6BYNwhgW$Ha-&DZ{8(RD!@+$0X1>XB(4juZew^A-Ht29|-I2Nf z0IW|460=pNL1kiU7)(3f!{r&I5`H@3%JT`0ULjKCybd;%(|jVai%`~dY+gK1@#A5Y z3ts@dP}7g3`Z>~ByBHs87(|CmL++{NieYre&kHW&^P1jppm*7+Q+h)tXfhYE*v-zt zo|m{zPf9yeWT(2?#)FJv=`z$jf-No+ft;PXo+ri$h z#@^eyAv`^UJUl$%gxDn7M3B$J?`6BycF=O`2jj6Zj#ZI^iAtJZVzom5DhM za^I@HMCq5=wkpV&ND(1B8%t+io_Q2Dswn?CN5J`^(_)qFA1qP=&|d9JMPI|8%9t$; zumXlKcNvoK1q$Ola{#WArW?wsuYf4<`GueS??C@4HabvxOS68pA8s%_Gb$xk;FG9M|nVgb>e5|t?tnY}%v3+f0Nb!-vEO~{HW z;~1!^so@#)VJ^^*TU%QT1ysI{5(kg)3Mh)3ENiQ%oF9($Qbl*N{w5}P34%JKBwbzH zPE5CjXE_be_G|m}PL4PcUP0bfmfRP$uI(ZodZ)?d-j4#gjQoh0P(9c=kpBpQ+%xju zk1^|B0FrZid;7UW*VPfA3r|htS*dSa&-$&|Z*L8GC6twwwGk(yu^al4k&&{p?VFir zPV228Yj}^qCmuBD`FQy^JsI^HlyX<1bP8Y#Xtt%&Io1ztsD@A=lRT8{${KuJ)Xko<7| z6pk~IfyecyiMoBqGB{Y5n)|42^qro1wAeVnUQqGBpf>xYAj zW6K2SJ6uTbPRWYl;o&7Ri*%?UL*_o!elTLT+Ry6E~-4gV@6NoxBA#1wEP<$hP zk1*^xVLVh;K90Z=sQdBW$I&yV%EEikg2$TL8QP)rh z;wPuBCLdjPapCs=v}LxzyCu!UnE$jd^%+5R*w$Mj!hIn}11Zm$@C{iR_psvI*ic3w zq!ZB0AZa=40(9W8#;S!MUTd5haI%fqaY-(+)d`Os{FGN^Q?l{y{l{mph+@f8^UtIF z^m1uukA{6c;P$VLvhOqZmJ3_9oNtONe>x*R1CHP5{h)lkPE;?vY#2~8CqJ>{MGsy& z9gE5QMM3pTA9K7i9$1M)K=Gf`ky0H9X)JNH^BLrc&+liwKegg5PlfOPs1s@#sX4)lC1;1I$8vF zd5Rjq+ZELk_Wg78bz$Jqh|rur@Wx`Y3WOnDAKdqTe?QAh1Q70j2fHjZ|C4P7<7y7% zq`%B+1g>8eo=f(2cH|T{0bqDnQK5ABNKa!Yz&JqEr@s!LR$NfNKJ7NP0pMesIJga$ zlnRLH9kg0~XUBz>2FAfr=G3}9F1|Y%9%2ME>8>xw`1_yTTx8L56Cgv)1mLc#H}m*X z{5X&jtFXA}q!zzvJesZvzUlwy+H0vJETg3E*sg%%P8kJv0PtenfP+?mN_s0O9abxk zw!bc6S$1coEB%HbKhnqFDWM4o2^luXPrL1Y1Nh|remn(tKq~VnKlt=f`USdnM;d?9 z+xyWz3im$D7LKElG*1>Z&#y=U9H9&>MwW$izG!szUXJ+8>59#b&+u^=n5xu%T=E>CXLc`ioaqMoL$4h7vDRZ4q}i^sQpy`(16LPcJ-YG0 zYDE-20La#S(>C_S)r8`AblRfNv0|siqNviSbf?_8WMPGa_Serkb4!97GrKcgXU5*D z!;PNVeJpt77yS4lAnw)I5`^)*ED&yXx8 z7#HD3n)&^Uzn|r)0)b12AYW%3<5MJ77u#VMPzJ!wh=ax>MT zBlIz;gr#4P-^bE9!bHJ*lBpnNF8JpLM3OvUi4nb|{VO(>*qRA2gy73C%m18zYBjPU zHGOs&fm$ihBM)J3dIujU2?00Qvc;!PPpK1Z6%Dc~8XJN0r7(0$3Sw&pU~Ak!?|~vW zV=qhm{&MC|?Bb24<}#(eQe0dZ0`bi1s|)o#y#O$e|H&IoFR#9p+zArS!*F_7Q`7Wq zl_8R_T7_0ZG@8ePA;j&MiHsbYQJf@Mlbxr6fx*@RE#Q!agI?ucD+6J{Y?Dp(K>KJ| zar;e$z)6vVtE0w};K1xk$3Al7@+L1ny8~EFnRe8WcMC$}oxf~n{mqFIwn|F=yxLKP zA_VNmG<^W)b~)uHS}0}9Ob+Det%CT05)6@Lrv7`y7stRE!)uN@Sx$` zzfr4#a2>c$vhr&ibHq6omVOW33?q)<_WwW@z#43l0{S9ys1F$(1d8Y@2 zrcG4B&fWFVaCbR=X^w63eYJ>?8fifTRdH3v{d$V-n{PQkfWp#1imhGAJmDa%%VA%9uR= EFK2e&b^rhX literal 0 HcmV?d00001 From 0bd55555177160b1b953711f93f09f0a42fa7214 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 12:16:54 +0100 Subject: [PATCH 1107/1270] DOC: Fix lists and add images --- doc/source/dev/development_gitpod.rst | 34 +++++++++++------- .../dev/gitpod-imgs/gitpod-dashboard-stop.png | Bin 26217 -> 14185 bytes .../gitpod-edit-permissions-repo.png | Bin 0 -> 5505 bytes 3 files changed, 22 insertions(+), 12 deletions(-) create mode 100644 doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index f0c1c18f5e68..619bfba3ad80 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -216,15 +216,12 @@ FAQ's and troubleshooting Absolutely! Any extensions you installed will be installed in your own workspace and preserved. -#. I registered on Gitpod but I still cannot see a ``Gitpod`` button in my - repositories - +#. I registered on Gitpod but I still cannot see a ``Gitpod`` button in my repositories. Head to https://gitpod.io/integrations and make sure you are logged in. Hover over GitHub and click on the three buttons that appear on the right. Click on edit permissions and make sure you have ``user:email``, - ``read:user``, and ``public_repo`` checked. - Click on **Update Permissions** and confirm the changes in the - GitHub application page. + ``read:user``, and ``public_repo`` checked. Click on **Update Permissions** + and confirm the changes in the GitHub application page. .. image:: ./gitpod-imgs/gitpod-edit-permissions-gh.png :alt: Gitpod integrations - edit GH permissions screenshot @@ -235,16 +232,29 @@ FAQ's and troubleshooting shut down after 3 minutes. #. My terminal is blank - there is no cursor and it's completely unresponsive - Unfortunately this is a known-issue on Gitpod's side. You can sort this issue in two ways: + Unfortunately this is a known-issue on Gitpod's side. You can sort this + issue in two ways: #. Create a new Gitpod workspace altogether. #. Head to your `Gitpod dashboard `_ and locate - the running workspace. Hover on it and click on the **three dots menu** - and then click on **Stop**. When the workspace is completely stopped you - can click on its name to restart it again. + the running workspace. Hover on it and click on the **three dots menu** + and then click on **Stop**. When the workspace is completely stopped you + can click on its name to restart it again. + + .. image:: ./gitpod-imgs/gitpod-dashboard-stop.png + :alt: Gitpod dashboard and workspace menu screenshot + +#. I authenticated through GitHub but I still cannot commit to the repository + through Gitpod. + + Head to https://gitpod.io/integrations and make sure you are logged in. + Hover over GitHub and click on the three buttons that appear on the right. + Click on edit permissions and make sure you have ``public_repo`` checked. + Click on **Update Permissions** and confirm the changes in the + GitHub application page. - .. image:: ./gitpod-imgs/gitpod-dashboard-stop.png - :alt: Gitpod dashboard and workspace menu screenshot + .. image:: ./gitpod-imgs/gitpod-edit-permissions-repo.png + :alt: Gitpod integrations - edit GH repository permissions screenshot .. _Gitpod: https://www.gitpod.io/ .. _NumPy repository on GitHub: https://github.com/NumPy/NumPy diff --git a/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png b/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png index db3a570c1da8175b8df9aff8a1b1a20547d3347e..40f137745941e26e738dc237c0ca39b423bbdae5 100644 GIT binary patch literal 14185 zcmcJ0Wmub0(`INXv}h^rTBNuKcW8^dyF0<1B1MC=xKk)l+#$FJhvE)}AjOM2Z0IZB z`|V!)WB1y9euR^gBQtYm?zzu%4G~HTGUzBoC;$Ke{lj}n6#(G5H~@ek^70wHXM=nL z7XaX8`yeT%=83Ss_!%8X87EXZO)L-iIR`z*ejG6I?D*H=A&cqdLHFAxere~2jAw_^ z?aP_kr_;mm=b?K5$O(T4EiK>=$LLev!T==vM{za1aKQC5K!=FbabxjdIDsZ~Gh?qQ zYx_9>pfu|?>U9N{WnlK^2LKrIDLKoncQ+4_06Era#vCm9TAL0409Ec0I9+aH*W)=L zPGy%HR$Q!iZv_Ap7w@yeoJZ}K(E*~PjfLQ-@cI}e00ouzAf%dUfu9KQshSDic0WP_ zh(Zwn{Xjxw00j&Hhy%eljspPz{g7|)4C4 zUXyjd$s0tkTW)aW2L0B|`?SC_LXWMH5`;0}^+_Wbt_lb~Ar7P;56TenIP8rgJgV>D zy%-gjiJ^Jb?FeFZV&v~+e==xFD*O?8GN+)0&(C2@r|i3I@%%%Oe36H@(6&5-nUso} zg=^WVi=@RtT}d}{v>RojfIbv!y7vWkceq<+LRG-fl238vPti&|~W?{d_nd6+cB<-yZ$O4zHql zMVQau>8MjkK>S^%Ss`780Y`gdmGAfp^s?qm7H0IkSlVtU-$^Jp}EwnFMwOaaBYT7gvP$H?#i z6Mh(|XUCY8!Vc+0aEcqjCE37aVQgzJK5JIHf3Pne0@RqzGu~Lwv*fW9EE}Zw>4s2F zlM>4)<1wL$1^O;jBW`1AaC0ZZgu>?hgXIVciSS5JBqtU_-}#&t_~dMmOnIP@_S_v?HL+s z-ew0p1k>IW($c-?1O@~fA@*Dx$NGj8vKls-+1d3(fi@H$j~m)^e|rD!c(5GHnwXlM zl>nVW&!w~3CL7w1-J0(Ajsh&1*Wc`U8LhU(_r+GL>^|Q0iJ+ll%uSxUxDa$rTAS!U zzxVTJ2ajTuehx?Y8BIK^$!r+IVAfa9EYRSH4Hfcq%63?Nyx^^~N$QaMv2$=x)x*_W zKB{05^EEZA&1r=M%0a_Rw>NsA>+*QdYInk!pI`8Eb*9d?FGiIv=G)IEtIn3JLx(Xn z0dqWoJL&wly~URvvqji+1kP8#{Lk%d?Z5S7Zo{zOxa?^%9q!oBdA#EM=96dW)8P0K z;!FAcG;@)6T=k8~0PnO$qN9Gp*Feh{a_^HXtZ%51w#m$L5MTeh>2xC@-E^V`do8K4 zV4r&4DUArFB0j%;il%|UW!)VnAx7p`s*q(c*YxcEQVSV1+P;;oGVdFM;o@)QI#O;c zZN|~el*|mme1{x(DcMyl;z=adQLW|fmK;>HzL0pEg9?8 zN+MD-=~g=-aLZtl*XHUp*spvaZ50yQMN<(4Ox&}Nt{Rz3eP#l%C`C01d7aF$DR|Op zNv@lk_R#0HwBViDHgMo*;Rf4;&%s4hNS%@PZ9B>aKmp8LN zI@IR^@y7|WO3`-coJmGO?~gCrrSL7dd3lRp)#9pIW615C^rEiGhBnYYY$dlVVHKll z&$jSAj_2(Z8%C*W~Hz@KRdQV}1%p2UegT$ShDi=crTgkX6%E{Cry zEWO9pWoWWb1)xh67&mXt!@@?ww<5m2cES3*-g8ASPjVqsLC-ARFgo0mB+7`nIUX0c z)o41&Um$Ow@%%L3&CQ=>`PNqo<}8VSC=q!;$R5;aagu2Pm@?6M|Ge~rHv3z{Ut z;d;HMYAN*%<>YiLZaO zp{v7cQ>w}EX4HzCdQ!5gBu}}n?3b$yssdZhiowxgrO7W6$W|s8lTD}tLjy^RgfZg2 zTrbrtHH+SVkETl369p3!{Z@^C8Sx`4GLq8EL$zMzmX7*8B5N*ZMnUzuIBL7H8HNs# zx4Ku+>`P<8s7xgqPdr4}PjV6Oa0MfE(ITqa5RCRTED8GWd0y&#N@9vbZC);oIflT;EI zd*cH;o&g6q_|%M&kve$&q1ETWzx)LUGm&K&PK9B+G`g_}?hCvn@san6s=wj{k7^IU z-rchX{FauIa|chfk4<+HCTtycWfAnPyoYlCIri zfsfQLDak&@G$%;+2=PW5!&gD|s*1uzrS6CHpxrd$U6xECDOGv%)XT6xr{Hc#N5zkg zht}qMKcQdE4uvr>OBeyk?{9P;3vP6}CI8wRL037E-XxqN+_px1A+|rsg5GNshk3UJvp6sQ0WQ z6a->V=9WBWYs8{#Bn$%mNxf_f>Ol2#b(KU$Y3-`uENgy^`nrv(UkK;E7Zvf%FKPoIJfKW5AF?$m~ecOet*d{=kCzVqYW7n1oUy|JmD8 zdJ@=QhyBg?@!_V)#BIyQIODOohfp#Mq+p>%0!IgI1H1X!z)58TN>Xb+rOm6jfq@T? z-;3MD#I^Zv&mGuU7}!@%TBLIk*cFE6TV3cP)YiY4(R>Cy%yQ$5E!La6^k1I-uye3E z^N;Lh8&%2mmNXW7IJea_4-ui{8;Lhs#4Zhe*tuEhwHPdzcN+7 zzsO3=4)v1Ie*2;k%XCQ|lPVb(f35){N`98xVppw0KM2|zkJEDkGcpX5@YLNkRx zG&Dqglzcni8FttNr!W_*HdV1x5&Iw;R|Kwt5qp1Vei5L9T?pt5wi{>z6@DZ>(p)!$ zbOY%e*`bLzo2>dkk5Q4w$aOY{8Do-Ub}88;f$-=?$#u*>;@gI%)wN05()(w`Y=!G(y1ew> z>@gB{IBx~hh_*_p8sw?FfgzuAb*>|`I^V8I$X@9zB#0Ec;XgZlg*=wQpWm_8+zEW$ z_s0EhNmQ|iK!yre-@Su^xMY~twbz<6ND(LcpuYsE6PS@>8=ORBDjS0aBj`V~i?651 z7CN2_6uQ&a);J+EB024$<9S^ljs!o=;6s%mM{!hMoTZjWp=>b=B&sg*0W(g zFzP#d>}1nWe@Mk%@XL>jl5z=bG4e0*#=mfPsr9(k?(O~2)3@iAIz|y(b5Z5a5i@Z! zVfgBY@{FBuRPaR~@WLo_GdDsj**O6r4}><}D0CHp`6cJpl!W}35_y(5i%Y%>$iZV=k{)$1``%3j|@n_M4vf zugtIHE>2>=EP}bFr_XjXtPz#EP2s}y0c11(1WRji# znq-djB`WoCAbUha$pgsw;=tdsRN5kSm zOehvZEvv)b;cPqADLlhAl^dJ5)I;)=v5aLm63AT?|9N&-qSf(mcMakAh?E@-7Q$)!%L={LD#u8No z@TBXN7t{R=hLB;ui)_(f@@J1#X7M8yc&F2?60X@&&rsfhf)az4n3{C~whLi`5bN8f zvSbHgf`3(-0aGbJr6HFI^9FLzO4&4F7POA$&3f(uI#(ffy?742WZ zawM^)d^_nOizQ{RmOzI zg6NfJ)`9C3Y$lnuOyy`)F2K(}Xeu?D|7oNr{QCLJ0ZJ3|9?$Fb2p1K-KUc|&e1Cpy zEQ9;4Gwj2TIZ7u%ULXexx>&=I=*}N;kFT_jPWHC32I47JseDs}yzqk2oyR|I9y!wT zU}(R`!QU>_`ZZXHk-xIhieqfbfPh!!B)XQMOt>MP3iTtn(n4WJ*+$Urr1ye z=}jpMbJ}^#YVlDvf~;Z;#(_E~$IosW8E#=gtHdlSNE`prp4}!v<+Ezrj{!cQ;J`zS zt(wc$O!=5Qqkuj~l1ti`UBsWz#J+{z`dAdw<^UBfdwieDfo6uc5?)@~8`(;v% zC}L7P`RM2l)GyJTV#RTp&?^2#u{kY$X~JLMZ z;}tpj2GwhbMPP6da-glo3Qx~;@7pLQ?E2bYZ%2tmxQIPnP)kFryI+Zu7||@QE)QP3 zFbG;*`l3z#6Y@o_Erxh(SiHWf9_;Jmf;$0pWJjJ4x@#4KQ7Ncv;oHnj*}p+0kN;xw z!qc&Vcjg|w!=Ew;`cnDAfRs(J5cE?3n135kb?R2f!=S3prSKY2?DabWB`PNMJPE%{ z=4d4rQzOb~U0nm$ZEMqdW|WW!DMSWoE0PN~#OHQJWmP=F(cRs)c);g-9E8$j68l$4 z#QO(b(^2B*v8=-+r29j4wAD%D!1r{|9P|uBD)8jZiwBF|y$qswiHmmX)0Om6CV05@ zBTOS{eG;M$19UUMlF0;6bda;mn1>mVGxlo&SRKmM>ZYd5;J!Hbj-`G(|CzF(Thn%Gi?x zx3TJn)Ue$K?`!df>e^ol>ZK0fh1nuLU`k{b0)hF#mEtFVeo$4oNauozm>zGb;!SCz zl%8xKGS$JPg|zG6E(^PPMuf~B`2kj$`$f0oogD&s`6MJhuN_M9%|WhOjN?rT;uN}9 z6;0vhFi~JP>#$?}ul~1^@vR+?6iy1T;mydjahVBc)fH9s5y&t588E6?o7!Jhcp9Tl zIaynixQ1)-y1KVMrOvF3Ms0sOZk4vdATH$Ex3ia=pjM)-;5ipkmx6r+v?()X3LsvK z;Ss*ZJ|k-I%}@z!r?Rk*(xJzWOB+q563-8IG`y{55`Ou%(+ewgIIoi`7MTV2P2WQi z#k0=wm-$^i{lAn9dBv|g$^!-m4LlK>OdTzw(z9iRbX8a_Y5P3woQqzWbK6PcPoQ`Y zYpryT&d{2N5k|1J?4Wlnw@t<&q0!eT4EuekEv zQ+pr3gJ?+Vc&DioUV&9!zxv`pWEm?juKwl6SWgat9Mrzjs@vuB1kn3|#_*5OzaYrs8zq*I0d*JPT8&9XtSKajd zejQwO6f;AAm#Zkq;I{|#H~6y8`}IiT`|e%9{xWh5w5jaHuMy$1h6pCQ*2@(i3B}gd zOVSMq3}>T$0w{K@Bey7;zmFMiBvmZ^$D5Ig(Dd_#IqBksx+&sI$QLVJgks?h$}wC| zoo_uWhsd+um>Kic55po4GmZiW(p$y~NQ3C<=nLur28@jZlMQ&dC%`2mtt5_t-5lEQ;z3=&*3wbPu6H&W;n5KeEevk02qoK16hk{%O6esJ{+D~}ku%og+Jn<_o^_R>`#_#kcj{=m6)c+`TtQ(EmgbEP50 zdybEEY~|TX?!%2s7dpZ3zJ!t$-k7IPyp5luJ7mKup+g9nT^CfbnW(XuxC$x9{N#C= z*$_Wmeok`Dxl}p}$@|^X@Y+$POvwIAC{1_urpQDDNfD%%R>djq6?QX;igBOatZG(B2mK%jk zZ_#cVh@0`MjFo|s>^;<5f<3j823!16dMXEYqR&rSmN*cf+qU2SJ+3!sZtopFy`O)J z{5dzF%|q}xy%2t}KM_YuzA33%Hh-S&WaiQh(KPhm;YXu8C5GyY@9$|0Ro0<`eXNx0+DbA; z`en`yjrB5o!&i>!RXiJ^J+gZa%lq=5K^Yy0(TzLOXEu)uJO#On0lzyB-qN+-p0m23 z4>{GW0AMzY&K`raZC9yTd&`O@_yT?B!uuvr9A(Io&94+ja$tpwf3v*Us=jOkp|l*(bTB`eH9UsSKaE9io2nR z--Diy!mT`~l6XO<9D}!8@hufZ zvyofZ5&6W>*5iee*-l{UD%;^34&{=S*ebziFn-+f$f{6|Zb)&~=yFYqR(0(xCvig$ z=eUilJ7YJ_tj_rPe2AjkhKo_25_f^WUn1PWh@AI*~;d` zvjFnxm2nJ#r5CZ?=%Jio+dEZuY&`fTb0wmUGfla=sI6~pjdkmL=1ryce{p}QG@WD( zR$h(_9PN+g+7!jm{=MJjIe?OPtgh*B-ELVRE`-5$_)9dYAgetq1#|jE@*hOY@31m* zMGRPi3F5Wa|^gY^*m8Qoq9@(CnTBJenU+dpABLezi)mno!}^i6X9T=M^II z0U}l(?Y(3GGY)4OFX1J2k&iHoDcpIT6aK9H9BSW-%U1q_3W49dQUS-sBe8UB2eD3A z$~4SqX_yQA44Dt3ro5I_t>V@51rZEhf*7{)Ry}1sW4&W@K@=8$yIx+>%XQwT-9x2Fr~BTr!6P#hpQuqmJ)vcDw$`cii?ni-q~??Pt%7GFDgjsIrnNZ>(9xb~{TnD|1MWVt$a5U+4y%9i%j+;Qx7>3 za$5ix3e^sW!#kVVqrNVP=d&O0bs!XVDcq0D5aSn?@TfTjg5z-|qEZ)rvo}9WcWDjA z?}SL%*^D!5pA2EhVPEv*{&v99d^K~?sCYK!>T#oCL+fsLtT$g)Lc5DixSGQXk* z2!ru}20Y;EqmKMM&pcL?qqZmRa*c8jbZdHH|Cc1;~6^ z;`wD9;07HR2og*&NJ}foiNGUhSYqMr0IIIpQAM5T8)c~E>ir<*oDJ$<00iV!n$XxC zoH;iR$5?;Jxh%eaY_SO_erHXzBSPOv-^|4F39Cg?#z+s-FZ|-{D!cyCj*qprTsZuD zC+s=g3+8Ei#P^5);Op2A%^0VhEh|l+`?X4}^Aj|Lqw_#fI69tBLKJ+|3NG1{Kxo(m zigMKJ$$e``-JhrDOC$eyeF8C&tKyRtui-(fRyTUmwKn_NUYXULd-5rbe;zQp@jjv% z^1+ZbByFKpG32GPhrhkvD75~RIQ4p!|92A>%A)2!4B8o3>Tpvl;F4Z|bx-8X0Jbe9 z9t9U$zx|2r$T-oHHIr$ak-r+f zxzv|zM5y*tVkWVB+>HwUk+@7wN7^{yzkYTKJt}gzX zts1;8yN>S*zt2VC7i7#J7i!uGh3k-~U5#f{6onncSxN~cw=RBTc2Rs*k>@RWfyBzF zG+pT)YT)P9?aPdC2GumneOxi7_u`%;lO@SLCpkO*i5^Psx4yK#;QQ#fMQ?ybs_1!D ze0kR%z9m~iUjkXrsJNIDJc<%p zi7?(vZOSiIIj6a~j7Ov>o>+|yZ*0DR91y%F@2=0~S(30hMRKHxKYlA6 zsxrDauYO)wtD&*@*t_C4CyM^uh(JFXeJ=17!KWr{(yx2d8YW+)kIsv!J=<4MlG+#} zFxS@=%$7Wto{B31U|ThPd4mw$imU;6p@fL;=lf-AP_r)|5@z8h4-(;JEW+O+3=Rb- zf+*OyPKRZy=E&d;>SVRnu;tz~2NYhaVWh`%bfgqZ`Mt?u(f!KC!hW%!DaQEmJo<`* zMFcQs&%3wjd=-^imK1g1P7OPG{%P5vhQD@AZoqy9UR&Yp9pGlbo`kk{x8;eN=gQ}TcO>lg+R;9Zu-10f=Y@Ba?-iI+K6QLJwqX;vIC`qp zsEz~C!l0M{vy1^qvGW!JkeJYtqrA905yUV(DhSx7g2_Si)eIS6XrG%z<7}S zq#gt8rp*IB8vkkZ(JiBS#;Q8Pl_)e7JaiwmyGFQ;$_E$~hkxC>eJCcpbeEq5UIQNa z2$NO3Cs*`6EkRqTB#)-$&CSgods%oaP7Er$%*#4XLAJ~0x>h`Hf)E~vZqSQYER^4> z8rxgP_-gck=GsO#1qa(MS@E2wE%B}u!mFpl0+Z^Dw!-(2;_~tf6g*QiJzZT}ytXwk zDl@a;wTOQ=8yd_f9pqv$0y!wyGg$%+ogdf8&L5zohjFl^gKe#?2gE;cu+-Mo`C&d{ zuCSqX$AL;TrzM-@NqBF1B48k*W{^-*QZLZ&#uaKF2f|X;rZo>;1L%gMi$;YH4ke7w7sBLG2$&a!RrlxEU zrq>>V@C^x8ufevq_R(dK)X8>;k#JYJ$?pze_$OIjUF1X&eZKR1`uY{2W*%PNUKv}B zipW@Lb+oeL3?g_vjzh2T&Ch$#(qmN^)fN>3%ww{-=gPrM4^hQT#h0>7NbzyPk_6y8 zq%Q{TWuAIpKIhzQ^e~-l7~Wbd2wU;J-{fj)c$_p6MhDOyIIzPz>&j0~+hy=;&Xx_h!o5jQqRilrb+_S;rvq#IxJv zy-?i^9lOfjJM36tf(E5Y$n6C&Y{q5~0?L!wn3b96t=EIry@dffK9XFY!!Y6WXwKTl z_#{LSHsP-Spx<;As%L6y+T_CTwGQ#^S)NV+ftX;Ceyn7tEyf%1#e<&D z|Lj$?;VkpGyGr>cSn7SB3q7|rRb+8UmxkttsTa09I{2PF_&6Rd*9ee(4Tp`1+-pJo zT;LT~S`o0$&z_v`y)PZl^b+(QXe$#yN3Ew8cXHX%W4*p_G@(NCDY4ndp+kK zclLL)Cp+&90L}Eak>O=t@FFh_6F62W2@-}q&18qyg3)JzC7=kO5((hr6XO1>jq?!- z$AgZKXz+zUivCBt<&EJn8ybA6-gr!shR~9}a$=lVGOTevsZb>%a>hBH^{0r-1Rl z0GKTyH{19buR`vQ`!AT`>qOGQp7!GyWrNFzBM&8n_nBoFKM9XOXbJxl2MUGDnq$Q= z3RjK-h6Yzi^i@CbNiT58V%2?=qKMz|^@nUVb%(`j?HONGxZD832G{Yf_Q&-0n{&gv z-m}xofXfEUv5YUmH)pWo$Zx^w3YH{K8!feZtX`8p-YBN?I)uYW7wcEfg7? zegxe21*q|5g8s@-bet|Zl%C3e#b2MG5MaD@Th8bH-%=N1gpH?1h()`f=P~)&2lhS@0vuG-u z=D0prr6~NMMbcMcm1pvFp+q?K!)06e|Vz)X0SlV2)Y~kKa zX~bm1iYN6L1VZ6A^*AoG9LenaS3m1g!)E?B2cx=<5fqF8TRTyl<=!);SM+H|cdXOB z3I>A^U`+5`XL!qQ+qb8~`LhCvfb~g*^xDdtfNz_C%@sbE$VW%%94s-X*Mx??WSS0( zEg#5mx&P{#wmIb3xOOvVJ!H9FU?VvfT4$>Qy+2*Ow@+KGwl*COc-W!aarMx*_qAs+ z;7;aXap;GtV(sLe*@f`DbNVZwkH=BuMkI5?oSp9VX5?Uz>N*#hqQe(q!%+LG5f85 zkukAlK4Fxu)p>XU$y{tmT=ufeGC$U{sUTaPH2>m#O=e)Gr=~9mNfVB2PjcS^cRks8 zHF~k#@n}Jvk=1kiJyD%NeQ$xjf0L!=@=ac1t@Wcf8xx;aI(3Dwsev1a< z=nIDXEx!bm5Q>XNsQD4_)92QL(nN3M>M1PC5dlUQS^5F3UdxV-dZ(ut-B+2>whkrB z?zwg9&_n=0ZzOpMom%$q(h^WOm3a~Y?m3nO9ObGApBN1yo?t@(d*C!9$MZjs zh3j9)0#SJe_*eVCqYRGXf1ybvEetNb(OV=!O%8bE{%sr7Ra=MZ0G22-I6L9x8 z2I7y$Y;5gE%60S)M!8EgEZ~N@@-Od!=ig=bNjHyGe;8?l9yzkP$1;C_ftyCk`Q8jv zWOJ%0eqP;YGk^Q`d?yud*2ASV>5^IeHpU-$^88b8SSGp$FA?k>}DBH zCcRW8o~ekMu(7iP!x~n7nPUw7ja4?MFxLm4@-#PhrmaKFfOU2N5x!(qF9@6ho}4xG zV<2M0Sn=9BPuZV8Sivl85&3bq16-+h8-l-tcMA~Qf|WZAW0G^Qz5BC6M;-;Emci(| zTbDvz2ktrz-X1|mR)I-R=PppvrEap_5@HVn5HuybR>$LnvqCa zOEqj>8k-t2^9-DTOI90rVAq5i;Z%Z0>5s4W2YqWWeamdVE^N@b6LCAWg3F*g_dq zs9W=PIq?0vPz>;ifC!A`xKH9>lvGv3la*&>9nW45?-2qol?7_m8u|PC*-Z^R)i!e+ z78Y+0gNoGPYG}MftdT~!$R7v5J_a^TeWC&6^EEj-n`F(gW)*DXt!Z6+?|231|*Qa#qKEBQ# z1Qhna)ADcN$`J5iS&>yy=i|;?Jg+<%quvAX? zcEE?=e1V#8d3n_3e^+dqeKL+4F0np{oRf?+JoWCw#F>^=&rHn0;(GO*oq+2f(VO4* zmn$(Hk*K2qk9vZ}-l>Eh5h7%~Hn56bzv<=X-F0_+IEH))pF1t7Sj8VxxpIK})2HFR zfXVO%pO)hV+2e4D2Wvt8H1T)Rf^C(c?wh;3tCOYcG5Vh8g&^;nvz?(~i0s|5P_suL zR5vZwzOlbK0YrLm0P6#TCtdwoHfbDMo}#SsO+cn66W7f?@dNDm>zAl)30qrRL4q&v zX!-{=fam6a2Jl}(@OMfuT$Wro>3Cv0V$j4laE$tA`~L@SeEb(~ApL`r{9Ad$7;=#$ea63vDiR3n9xgiSReKui z+S!klRNcgy6d$Ty(jxb;!k!2-t5CEGaL=EtGMBG zToAUUJ{AM}>A5ABQLdwn-3J=t##r$`t()_d#gv{pT1+pG4bz=GC+01Uom9q^fH~NK z=T4(0iw7A9rT+R_%x&SgZlOZvFgd8-@Z(oYJ-db8WB83J;C?!pYD)#G?{pt&E~^Ta z!r7vx%%n>qivSd4u2ICWTW{P!KcNl*Rvf@e+<;3!xM z_^a$}EFqtu|IF-BGZ2=RmWCvFtJcKyvC4a9cz@v^HoTa_AOkvGvm#nQw(pH`^ID3? zw)D}st#Lo)=KqDr|8G+4|8=PR&sqLI?+8Cikb{QFor;Ax^v$vI|qj>_GC3wWx^XhwC9`LTf z@9H}_sAGg@W3V1#t%zV0o5zfvmG?~m#7zi~%WQZ_2E#*NuEpK#yg02p zjJSFu7aWxw>$>vSqDOEZ_BcfSL;xvaf0^08!1d+-D{z(h7bz<@`@74(u72%rh@rSo zH%A_|Kkb4~W+aLDhs*sRGra%ie(+HKU;KcV*>DBu;|{Jl-KrxKOwrHUQZH#+sAp!x z{j2Wf-D3N}-KOEfd0~Lctxx-9d~o|s2~@kLI@>%TnMH>2U9~L#^-{!a+@ zeKGL!VQzLOCFG!CnF|;b0=e+WlkG1HIJ3E5T`W7x4vl8APma%d`mXl?mpvw<=?wh1 zZLJ3&p-*Q6+2Xd%?Z|Cfzv>xc?K?i+&K7xCj{?ECQJCNymXpQ)SFhh3FnU2?uE9~y zx4wygYiu;T{j&Xmk!OR*a@b15kKdnB$LVtQ(QnCGh{H-r4{y<*a5nBqTzqr9R9sAU zS1#^eG1xGn^}|wuJZPcTbq7xV;?e8(Dq_p(e6h

      K9qtiip~A+tWqFK_{6W>s$~1 z&^oFo2ddkj$BxGJ5TC4HTC>cKCW^xHP=ImOPo98f%kTmyh2^OaQ=!pQQJjZ8j1@h; q{_~yw_v?y&b>_06;AXT()X}mJEG{`rj_`k>1ALHDkSrHB3i==1gK7%^ literal 26217 zcmd43Wpv)U5-uEOX67ahGczX*Ck^8pW_-iUjBTh5Gc!ZO%*>oL%*;1Ed+)Q)TK9hc zKQDV(mS?2VNFK|Yc^s}NFNp|;2L}QIf+#H|rVIiCE&>7qDhLDik+Vy@i}`T^byAiT z0jU@#IQ+Pi{5I0QiP|C^Qo{R{&BpZt$NEvY5o*>>pSBWadDiJr5(JtE(%6 zD=UMYqZuPJH#avU6AL2?3;hQLy_35w5a32{>qP!nA^(&kX5wVzXkibuu(KukQ!c>J z&Kby0M)s$o|89TDY2s$_Z%wvNe>dx+fsB9dFfubRG5){UfEK3zA8dc_{KfX4cKuZy z-=D&G6fN9LtTn_eY)ovOK137XVrAp|Pc{G7oqq-WA5P7GIa!&x|CjTB?)(quA4zzW zO`Pnko&RV;)z$(iz{1D)|H}TGi;wY-w*E(Ze|61&azFY;0FICGzx^x#*Nnok0Rkcf zA}uET)eZDC9a;lp;CUe56$1hkRlGAG$58U5osyY(>PA1}kg~v4C7QCo7vmeww+`v3 za0|+UqNzns(6U8gP~nKr?aA-=w)VQ`F*`x6JGPH!J+-6b@~@+-XRYU>yhbXzx@%F; zuyG*&?0)sARrL*EwQUqaVE^4|z%X_ek)`|~{@y!jU=^ckhfR&LrT@)aw}-&Q#7s=Y zR8>t(lQZLD3!wl-Bl*v64hLav{bOl)d1+~Bhe_Mp=7`Nf$nWavDk8$MHZdGG{@?0v z(W)9P*YBC)o z5gD_>WLh0-N0nx;!B`4oH=F;jLgvK{RE2*9587|4O_eSE=zi&kl(Ny;xiUNZFG&bm zQBeh!Fu#q>>CYt`ys^ct;UB8lMbIw!S2(=TqBFTTW}m4;$p6uM6$%*fbA1fZ#iFd+ z`ZJvy$F-Ny9>YX(V2L9h@Y}z`cn&2C$BbRBFCgLHyflzLE^b8DW`%Soc|cRPR{g^VBR(~ z8cQ;IWvM~Yxux|=ubzLqUWl>LK1P_I1uJ`Zouj(m;P<)^QYmeEJbs?r&typ9=w*tCrhiit~FZ3HL?14zmA23xYOE0*|JSy>gMa=Jxo_cS`9=pfWz;VK2y zOicQkzwgGR_uN8T1-P!%BVQt$uazX~qnEX&mJ+Pja)?R2Z_|55M5E&`+o$cr-o|5l<8T0x!UNT)h7GD1K=kUy-hs=72^)8J@glAoTQ zjxjA1ngMaUDzc5|8W<4Z^K`3zDtkTBh2q1IRA*G^#k$Hj6IPj%o!=Q`j+{|FliL>g%4Mr5T>LSq<7BsT6##0lfM; zJR9%Pifvk5$VLkg@!y`V zD;F0Rw*vx5dqVjIoJ{SX@05Xv14HOkrhHdbA^Um9wr<q^kE-O*}3tu3GveviQSb$3wDTZ{+B>*%4n zP|pi8xoJ+IPSixaFJ82SbTGsFDVl)QcF#^QQ^37@eEpYt=9{Qw6wn@s5gwHee7$@i zH$#&T6xAq{H=@$pKRZixcza1;6vSr(K5TB0i$6g^x@V4OF663b!Y(La34~-q_6#+y zwm7W)h+JACai5u8VbE=90-Rdvh+GB|gN&1PvBJV0&%dOnX2gsn2?(IuHA%0ch4bA! zsI1d#ACVfl6=UEk$1&$c<~%*SoUi_JN0-RigxdXS+dQ{6gKkTaV1Kkby8l&yW-9sQ z#2Wc5mX@B{VvpF*z%6bY3mMHE2!;c~-hx0y6KovMmGLcFZ(teDU@fpDnPHEB8+cHz zS()qc`Xss4wtHN;p2^GkiQD8P=#rXxU*Pp37xH>Pa+oSj-tA)jnWRS?c)nijw9l!R z?tCKUWursk%MW|D)V@C?NxV{Z*5MBhEi|e5y0GCY+gdKm@#F2KmXRP*5KI_idr)1U z?&*0#Fy30+{S$*j7~Q(J*_rEm<8F8GNxYGS=r2@f)JCyEV&4bAmSpC`^K*~WM#b8M zz-`_^nRcVK&)cCHNe?1EN1ghHK+!_r{RY*9E(KY7x~>k7%L(0U@o=2?(70ZHV(&>O z6|pbDRZ+4!{OsO6rZ3&|9WUzC+#EXv-H9xi-Wlu#x68@rJZZ70HS9nc7*(q`O-OD! z3bwqwCWyE5(F{U-^gtT&;m8TT`^}p}XylXgNm7Kdo2QXVoxx!dI;F}DpV!+NNMgym zqqP?2PG75efu~7uLmI%BFT*#_XQDM*y5yY@HaELQtR#!!R|j3rGh-?!rkDGX$`k2< z7l@#Ed(H3uv?2*o!an^I=uEzP4YN~Sq?Vf`QXDIscHhh3zC>iJbg-1sfvKw&18?*5 z`><$xg!*n_yVK2J)<6e4eLBe5_X=8Iu9(={FM z8mPVxC}MSSlSpRh=luzBZR5)AoNayI z7XePmxnM<=ub>y=bZMwfd3hQ-IdIjUu9^JbU@q(2BeC4Hjnr@-Zt$5!QwGfZ@2`cLKlRbK4h@>2q<7d@v&jmvC8Q_ltp+UYr5rr+CAx#UD1Yq%htE?E|7e9nkU zsJK243jPd#4b?C0i>2!xSD8ttAC!9Y%hNC^K%$@0=yqJv$MgN+Pl9i=Dz59^*z1u) zLqm~RK1jHRQ`>zNwzhqJeJO<Oo|_tv&*$fDk4aM5 z4Yh9&X6-LzWDIYn(i3{^n{J1X`&q%u%|-)NTRq|dDBw{gQBt-f7`Sms{ z9Dp+L^U_tob&zZU#OIGUztAUl;teOQjLX(SygPKN2K?OFkb$^T&jZFLs3tw8OR*`&dT3uVy_j*jk&Tx0xq9TsP;Xayt z14pmusL+4+A>-xWAJ=V_Pj!I?6Vklx8z3eU#3P={EzG!>E?+tO)o4@o<@NDV^17B$ zr|I;}vzy1Ie=iwcFd)7d10m+1OH?w35P2YrvPZggF3H=_M*(g15| zWn~n(ERKk`Iz52hsP*{x-n_jq>qHIE${lUe7G!yXFOF&r0VZnuK|i@3KX@c7NR2$th47g^YR_T%irj zAw+{x6(S}zRuDQa99*|zXycbg_p^Ena}HU58UkryA@Jzu>?3P>(}~H^o1~`533Uq!t{D| z%}DY}mEi?feZ=Q+SAiSTi$mN`i>NaSrYk>aFC|!H!!({?jE+`<=3tQPSIgB+-U17~ z{yk2Mi82_|425ubJkWvW5OH7&{8gQ4-tL-o7?v5s( zd5F3MUzs{DY(v2cbFgnuUHpqFi?Y$Ps$Mmk<49UDZ$rJ{#Q$OY;DCN&p)s+e33z|u zm;2>T5Bn{yXw!U+M^R^+fT)7om6t>@9}E>b#C8is_h@jd$FBVi@4Hj7UCCQyaw&iR z7ZVPGrnqjQu|R;PdH3(}Oe(p&KqO?)7w`&W4BRF3CvMbIZz&idVcU^rAS_(R&qBG= z)lXhCzQ@@SEA>v;-8Yd(RQbr7Dow$uLEpZ8$?E}F9g`4DH+wePDwAs0D-SV9?!<0+ zU2gYr>iZI+!{x`>hc$bIY_`}vfwwlCi+x|Q?I<@HDg7auk+phymYqX`1Pz_YFdQf9 z#XTtcMPQkd477&w&TP#9hWDNbFJ5)gu`W!OV=*j_1^ocSDLO z!;)nGm6O$+Z=`FB=IUx3q(b@0z77^HLBGY*&9pdw|2KV`0d54Y(qt+j4@xK#%GEAu zFn$L~cE!xE{4FCp-)5dff_WbKS_v_3dNK8o%2Awh zJ_WHmcHjP9L}pTWtZ2||;PTA%Fr&C3!zcNV1+b6ruqqE`dHHfVMSLCk)}4lrHj18| zZymzcqxUnA)&agmyUFkE?a8nZfK7OP3H>dh5P)+nQq~jt74Me^vA>NLxvfOO(X~23 zq5PaISnG+>@q3|}0I2ik+Wi`*$EZ0DzgPQ1>3PSaL=X|HE!|t4k;vQ6*E{XF7u(k3 zai!b=DbRprD8LzBK|wgK+l8P9n_dh2pLZz}MaxZVzP(ZL_n4zcZKjdVPLunLc*jyC zce>ZJZ&b0ld?IHUOuR*M1Okr-mV=-z-Ix>z)G8AOKq50>M2r?Fx4|jq|p1&!T8qCFex%kQ=lpz|VlD_Pj z&6*z(x(<2cZR<6fVj4Gr28I|gWW`n@dkp3%V6jmM_m@Y5gh)=!GieAX2M)#*J6fr~ zs%jfNeeydF0+?1?^|BUskFGp*4-Uty(D)8Vk#z%l$5o%xrM@^fvlgBR&T<_ z>8f}>*vsvHW-yh6X{@fz3o0Q$Ok+^WVy(K}CL)||}OWFiQ+f z=o5Q~GrVj(TE2WD^N=~44Il4p=IEtDyb*NC^d=YUjTlr_sU+4+cc??gr<2+drQ_?3 zxiS976I^B%n4ake^3daN5j;qBR%8s*T?Z5b6nt}6pTvlxto!>;0zXmrH}v9gieZ`m z?&iTY7Tqjp2#OWdQEBK72yhyr!L;L#sSStyoDmiV8(`mPJENu}0aST)dDCu(bE#>6 zF76+Y^EQCu_qZ)`Kqqz(E17`wTo4(90d0+9{qrQ`Hc>xyW$$T2AY2?Of2 zRalVG>c))UoyJmPjbyVrwYm}WyJC<|Y0TDQZ#7@$`e!R!rXieH3M{A3v=|PzJRYad zOKCu?;GS3-Z0-MdxC*QO9dXW!I_UU0+N(?|Z9Ot&`+k z$rM@_1(qiHSrwqC~qH4C}zQ?U0gUy4BULjF0cU z{XQfX2MJvD@qz1yC^AD>_j%FrZMO_#BeKQ&!te%wgW0w_h$#N{Wj$c(^KXCe%b|d? z_;&dWE*D|`^D3e_vF~H}Z$0qSwZYiGi2yfnr9M)JH)kupV;jEPHI>V=cu1E-h_J^b z!BabaVqv&4C?3BaudX-as+zo8Zz1M=;T=~RBRg=I+|1QXpJM2LGMB*r6hnn1H`gdR z&GU#U-Ypiahg!G6CjeuOXZNi{IG#Lwd7P!kpXX!}mROJmSwka6D^wubXg6w2Zd|A) z56$|>5ex*YDt6CH@y>5;1PDr#y^>!d6eD7|vUiGa>K;o;e$+PsOAbaD#* z_~)BTP8)%JAP_7EEmd$sBb8&yeP0!H<3qo?>v@l6JTnxO2@j`onlX$HGky2E^&X?8 z>n3YsLW`(Z>fJ~h`?(bks0r4m3qXVaUHVzVE%~B#mIIo@vIiBy-qBHnbPI<|BR{E! zlny5g3F9~RMRj%Jn6+|fmvk*$4K=qpo<<%S?}v0nZtsD}Ot8pG27HlwmsNGcZLYu1 z)@sfAzo}z{J~;r^xMY2M-I%Slm`P9i)b)fd&)aSb@&GM5MYQkYtvbzWtrQd4!2AQZSlHC08 z0V0em1{pj)7+O3Q{Zwa`##Gy1_R?5xktTlA8^BdLt^6L>C9B#F|K1cL8j-Wb_%Q`U z9-WwYw|Qh4lvNyEb$%DEYK|AVQ}fL9Mz-y#yVgNbV4)J*-#ACOcC_^=(i zY3aM?eEf$8ZIFRo0RbY!nMpk7N0(x{1}}@9xCY8k8Z!{KL$bnGoE6|a4ZH>N!E>i+ zt{Z_}rqa^IE`3f;PRu_JOF6tJ$J^JN>b{lCKx28iWrx?}93hku8uU#Frt!I)kte0bT#RaRSDTd-P26>x$T z1gvILJn(dE^uOz!Uqs5L+ys&IB%Fw5kdCbvqUPo`@gP{m-$j*;SiMqt_R>XGeG0nU+6VNkdDI6YY#dyYbQbgODYo^5-dg zvW*o-NI{@95UEZ0yJsmuNvMhS^>--Ev&8c6uzbA|M z7CXpv7--q);5Fkox|eVJ*jSJeR;PqvfH+!8^gqhImj^1mn3OB>kqA<}9SrG?(;p0q^^2li`{^b; zqCaA?DY6d{={&aY%bLbJ$el#hz|mA;WoE}0OYUSKR=MUeI0P33m?;{2n6>KV{X_OkdR~ z7xVP>LH6~PTzvRr3~}H#1Oe$ht<9c7rHwW_Pd9bfi{Mb11Ju%Hcc-fieP~E@cC9u$ zP@~aE2$rE8WupjXBhlI@(v5annck$Oaw`T@RLLo8t%{Duk=ji8%fz+VF)pW~GLez4 zb{igtIhMCjBZ3CSPj8-1>mEi}1WEc@qF>BKY;@F$hYN%!l*~Z*9o$DeU=6&#F(!K< zy7zgM8n_LGBH+)JDtsTmAk9ucYr6+sRN;I=)6rN!8F_6iG4YkPhFRgH!CM(o4w+es zxuF2%_K(@pT?tKmY_tVe7*w7d1rW2I1b;>b=bYa=TT(YchThWz0Ny62j6N1eWUY#N ztH6Im&QEIJOm1zbHXp@eKHSaZK3QU;c^vgJBa}qk_NXKzFezCs9#l^jkn zT)J4ms=93#5>DkNn??Q>aR{@4A?4~8Waox;T-ij;?bRcMHZn?>pzJ0dm=c9%;sD69g^x-3aG39g7%$<)gCx!QHA|CYb*5<= zae}=Zj!vziYfqFS7~;KlK8qR+z_`b=^#|-V9#Dd!O_KNtD{HWHnYNK2djg6~@spF1 z%p)CenlMmx*K}xUM*2P%qRbiqD@pseDsI-`w_vXc*jVPAQqvRtTQ8xl*c4Ev>{Fen zB?Zo5STA0*pefS1o%r5hprnOHxIW+Iz0PYv*JBzI)VZRZck|3?s0hRLYMegK z;?eT(2UnMAf2sb;o{h7AJS#r2%Ed+nrlFQw!}hoTbP@zio3pHk$Yj!9blvg(MoRVE zypWt<7iyU&ptyZE1j(_H&&!7LF$F0b&V`#7ChWnXz${JSc$$8?wwdZIC&*+k9|tB*q&q%71CG>saIktzy9i3}{&bnTw<+n@ugPk_ zG`?)g7BYQq*xtbb1ih}Q2hY4KhCqruRT3F?a*|iSPzg)F@~1&cj2(kuIP#EXie`?A zS2?o+8r4{&zdi9x$e`6MN*-F4LIxx!_(*+ysSNr>V&&ZDhNRx-P~nXK87N3J-E&C2}18G&w%+ZF9b)SotrqyW!_rwYV>i&~`&7Ux4o<|&+5Z}QsW-Re?zaaU1GrruA-2wiTo zcyski``&sN){94W8H%Jc7$h%m@|84(VXehzUHv3u4dpX4J@RkVIn_iay0vy2CskFG z4*$SC7J?iDr3AlD4rwzqj`v0^$~qh*tV5w=4&z<(lk6x+$*lg!6Q@0!7*!b(Qp0jb z824go=|vkdF19jC&ylx6J{i>+hId*m^y403da4s;W@03x;@KWe7eTYbzrs?Eq(NjV zg?WSX3Hqk3Ze{y6%|E`^@eEVLw6v7TlD?xSYh>kVIYlc+pG;pe;izOpb^5@ zm?o_l$OU?*dfYEmhu){o6neeBvYA3jiO;zhR9;41?~C%_)OF>%+=M)D7B*R|{Qwn} z2-Ac|RH)jbPLuh<5WdhbCe(Ay?g7W0%&pDxF9b1C8zR8am;7cm>+GQ0?Ao?m(OcBupV1}6ftt!FEKYi`kc<2;Y}{E{kE zVQLI?ba4d?;V0vT89hvC8TB;4B)G^>;o?AM-ZI;Rm0^7}Ii?#^v-8jGQ_lT1KQd4P zDT0%5VGh1;#Z~Hr^nChi^#mW7bZcFqb*0Wj5+$+()fzg9lpYxgC!fg`Q!=!WHjtlc z!c}Z7VgI>Ld9E(7L;^&&l7hr6FI}d%tVqZ@dTq}>H8I}8twbY=_ifUrrw5`S)WcS46yyveF%Mwgzkxb=3BEz0wu3G@y*x*CIwHiV%*l(&#P zBPGD(u*M$k<*c-OaSRcU+lzVIYN0%*#Mu1HP)C6_%Ggxaq;;4E zUNX-&JlEG~W#Udl`8Rj}g%Xvi)UrqZ>V4!o-tLC+m*(TqDU>%Ja;;xfVqUWHm5s1l z&AY~Q6d}b*{2upxUmB{qdUt8O#G^X4D0}KVk`%%}&5CBptu#E$pm!eGPASjT{6GM| z{`jUdP!C)?UJef0+tcX=aY?r2uYz5Boc&72RNmf7K;u{aQjY)L(?G?Pb2iH^lA?UR z-XLymKxG+L9?T0w8sH$`MmuWHl|)igRWr00IBS18SbfLPZ^5bUw8xyc=yj`5G~eNs zur@WbvI^X%wssF#YpUWBph126G-};>6oW!S=5ZtTD=fgANtRV?hK4&D84Fzsin=bm0*)~$pNsb) z4iUpVLnUHCQ9yE|>+lk*%N^1hnWviC$HE4jOhhBB~9Iv0^k0v&}I1z|g zqTM_;D|Mi_TO$uQLqm)3F3Y#~V=q4tgs$}3}4=CsH#7W7?JmLJQIjLMfY(-!w zGPJZ(XR9l-kcG4^DxE3vS^zTsWP7w6H`s8(@!aiL>GAz2r3_77}|U@yo;&WhSlLriOPbl;cp?)r`Q9d#dxyI8w>59 zw4Wq@3^u-Jk>gh&9BOL{gqbxJ){Ky2MCv;h%W27BW z>)t|LTPfP7pS^<+jj4C`Tt&lo%?TBXi3&TR@3+7Fx!hrQ9$bc?jgRj&AvBDUuA=AG zBApeEQgtRg$G_4@naLmM?BJ#KxYOmIZxsQjD)ZF{od<^2-u#4t3J(+frGcZtO~ME$ zw5*Ht4Wt*!g$yC1ImMY`mFqlqY3Jw&YmyQsP;=2p-ZNVqfrlcE2XJl>+sJOJW24iG zSB-*jW$2-$C3Gx=fu;;YbU-F&3_X>dQ-ij()qlc?<7pIat&QK>1HC=m7Z80EJq^&H zSnvJodB}^xY^vrbUUfqT4DegWfyKk)QweEwwS1SCmmh>2fdM%Fgo*8Is@6rAN3>fI zfKa+SSq!-nh46<7)_oDf3Bv9sX1K)bM^~YvuBhlM{s`F0+Po7N7uT?XiB%6O{&)z$ zemmCN5C_Cn?ed`1Vx!w$C<}hG$D<~%_{^bavB;02;9EDKSE=F%0TYv5&Ea7EGuqMi-YGb=S{im&9%~E39Y8FyPU!mK ziA-}azDZj#uF+_OGVJPrUS59OZ7eV&EO_*2l@g8g(kZT2Q!HGtiP(eCTK0QlDpBcr zKPmuk+EOKrxICaJ}s#23u@C2kHN4>*>6xw(+x^LUs%{6=*_?g(_``)2>;JG5LZWD8Ts zCx&RphU)s`H9@!z9WQ_-MbZ8gMX(r-wIIll?4W%&vE!YyO2Pb~{n(GK_(&h3Sq2=m z75^0e?bKBJD~KbQE+p@{@o^$#1t7jy`L$$060A5MNZi*h$b%*c&R(trt*vU(U4MX8guYTdxF{xipI z1`N``kl3J=()@e>6mPe|`Nx|e0fa9CVitspQ2LE1Rt6dLdypn*cd$s73j#F?`feiD zJdOt-*5phNVfQ9CaR9>ygdcn`vXhZ8iQwx2fg#a~pA1!^<<6cE3BaWD>r`M(GYzo$ z0AKwJar7Lo!W}}+%cvq2+ST=odP!N!^_s!Z6LO%Cg9S4h+=|0MxI`VeOt)|CfiVY2 z2PFDMB4>Flj19_pDj)TgGWJsn{}~NLQmY=OsPHwa3XAbX%$|d?g3Zi z%FhQTLe;!u@_P|sofr(sK4VR1EFq6YQ={Fw+uLfhZG3yMuZ@|g&tO=oUhxPj(qLC% zOkXbHN~jB0ogfDzC$gaZL7{`p!J5@b5z(8ufVuW%aJT}hw1)(bxjug^WjyL&=hmqT z>rExA>&^TW`|E0m553_=QGi|3w%Y0a^sKmYPS1r$%&8$t%Rn%;^qt zKS1QwkF+NqBSYRTOjzg~9Wxq}{{m-V6XfMGo=#KufR2LyCm1N-uM2*IoFBuP(UikE zq(fTO9A_eGl$H$9o`&h7;KC3M<<+tnN*8*?tT}FGhXhCwtB6C3_=5#$FsghNLC75G`OpELjB z)+K9r`WW#Ee{_|Fi8_69vR5dGdaK&g1+a*HMXU@?TvkM}d-=7x(L(ZkES!9#1VCs zXmD`g?0g$Q|2Yo`6FYDR%c4?oBhsyV*^AM&YwO-y zGW^4z^|fnPY4`aUGzJAM3$K?J-2=}YLJ4j3xA1rPqf}+Z7OAN{%+=rXlr1Jc}w==BQhcT_`m{k;5iOUe_YE~_#sf$3W~E% z6(7wNq!8uRWdd_*hvSl?L6H&tt&1e+9@#Gs1_-)XI&;obx_e!{x;KbkC{m>2tioi) zm(6Wn> zc!=bV&gkfR}5U1Ebua5j8~%*L(PE{F>vhMlroVbT2ZKD|0dYkO13FpoD$RpwFt zZZUjOas&K0P4rZ6&0E{6xchYkHw>N~9KdMmspDL!$vWDDA`lD->GloK5Xa{iyS#g$ zqg->*+lfLcwH5Vf@8;1>`Vu^iq(J7b!*7d#yHla#oXyIVVNw$NJTh|_5djB;n%PNB zbuhs}dP2*l5_KD03FHE^0lk4RG{810IG8kqYX1lX@0mqcYDvsc11ES9QY?zensjt> z3Z;kntv-}Ymu!f;kUeEXGBP}~0EO zRhcCdI2Q>MD=nxjhA25V7d+KO%W(Ol4Osn~FwJ|bB$@fCDy1SQ4tHM_t0}=zzBf24 zoN0Om@7$3Y8YwbQh?J(gXs2M?x`~0}`o;aAB>$kE8*kmW_dcW~=+J^IGF~{_wRctc z=JTa=R#!*ERi{3(EPr*LwmGo)-Pj~8^gFeW7>;;w7`s)maIOKju` zVK3#L8sSChtx_#s1h3uJk;7DxsgI>@oc$hUQ1lK1-s1S#AtTAls!ppHgHZv#fIdIG zo5z$;Hm^}P94Fy6A?b_lP`Qol#gy)DFN80KpRukTnQB13$--#im~?VMN|3UnRi2{F zJObkVrXrTXVBQA0|PUw3*Y>R4oXdlFW>d=nU_royAiojiCB0nHNB|S6bK{a(3 zEjcL5pn*^rWDqe;lw5E*4lO3sTY+XomQ{qV=y^7>SI6n^_E#tT0gIHctC%f-#4H13(7kCaJk|fU_8(Te!yW5_$PK8Shn)#E`bd{w}&X4+2 zgU^+C9tM_InhFXEY^kgIJ<&-b*pAN5p<0Nir4F1f_Y8~o!o$6?yiptx`NEB5`jO)Z z{Loq`9Ra#44c*hYTBS|xp3I!YGZ~%4S`xA%h{vnA?0v^7zPP)oU?f<1!Sq$DDuu0E zuh96Ch9P7Hoq%G%OGvNv*X-8-ZtkzmphwN+crVUa5^hJK#Z);!_|*&EZdX*GKbh-l zHw9uF)kR{_Jd)m&kW6*dp5GH(|MRppA~>;}QXrI_PAgP_5{_Spzq_diR4*6bv7$9U zq^LSI-C!t1bWCU>#7z)J;|1Ri;?&@mJb`e@A$FCQRMobYr013pxst84gREsXuI0rojeHoYe62XLz z&6?b2j1+w|=$bs!^r-^@4yhlTCZp;>V?pDIpo_^TbZM0-s=TbM)0MOi@}d`;v;ejT z`b?Ny!jOf44eP@YbyG1)Fkb*AnC9SIF!z|=d+%IO$UiFK7NVdfu7>odd{zN}&jx}OTlBQTB)SvLvTk10UVDfM1lW6_sY=`1wq%f zb2B>y2vRGjfoN23R{a$kLa_`!@G+g2!!C#~IyL#=NVz6qho(J$8T*xfr|paLej&&A zc#$I@Th)7!uLpyMH<4A6Ay1GOZ0z09n6?ye z(0(4Kt&f13UtLitP44n~j~0G43=yL)M=-(9UNYpaI0c)paDEzk(1bW0U3 z8Z*8I`{ybD>_=*}Hz=v$gG3k}>|>Gr-T4!9LckrT>5^ywTvwn=p4{*5)AZwgWn!EG z0_sl`!cSef=*V2Bc{k55?c3NVdkAtw4f)ea(FWeXw~qwe9;7J1aIu$%VKp9AIC#G( zY4JWH&IC17qI zgok)LzBViqEJA@tlEh>%6*qw3z1MIsUA&C?ZGR7M!C)r0-nGhEzY`!V|MJlO-^JB?zD>2rBZa5a5PFJSJ3M_`>G%c zE5EILat=dD}#4yAY?Itja)mVI3v(zzR5cOjyH)TT?zm7*HV2ynjz{b#r6f&@b3}8d)V!8=u zKdlp|b%#^i?XQDo{jSEIJ}VvC>2RpbQLw)Ah~ln^8PRQi!KrjfVsk);2GMkFQH)a= z>arxJsV!bJe0HZc1)-)j=p0SeGZb#|t918%bi3h9ShbeKkn+D^Ni5;b^0FvF1ij4} zgijgk-N(#~qPLO-IA8N|%tF*mvHnXaL82INT3$`nV$E$uPNB6y(Pn+x-{Q$-E+<(` z&mZAXAD=DwFZfl14OAt}K`T=KBY_q356<=jS)3IjM6Q5o#``xs*^e96AQ7ByuV~q; zDMy5^#%O#sHTe^|pLD8*DxAdXyrG^1^LTY;kc}0UgwcH5n)kV?E z4C^U}tn{UnSqY~jBh_HmKDG==stJsx7S*>o;emY|i17dAbQ4uq`|5$)MxMibZK^dd z0Yb|w!n)8oN5)Pw*6e^MpM@k%IM7G4ml!ms}o`B7v>H^B} z@-NpE-WL$3%?P;2w&GB$&w~Y+#0dl(M0FRi-!XQ6s1+L9`~f#-!J>8AT4!X8{WR#) z+=D{vWFSA_LzL~!o1*Sd#b89I`U^_!1%^Y&>Lqy4&&VKSzzo5}jdV>-U%m15>P@2Sqhy)lpSlEQ{b)w zx- ze`#?s zmEUQbtnn(z;1SVR%=@^;Z{M6$>_F!;|1+$!q|p|MvB7a|CpWVrf4$ue=)a{rzkxw} zhN|m(F$fBFNM9M`SEhmleO9siZKbkr3V`uLCZ3@K0U-ze`4_;H>ItpaSf3fhKdxF3 zFn4b5kL8!jdJ=^Rui2%gz4Yeu6`l21cJkL36$L(?(*|Cj%TR0X_w4{eo^Ic*0b-~I3nx-qS`JhF5B%o0 ziVo%+?RzVF%$}dALLOQrW|3G6Kd7zon7GG)M#akRHx@f$Bp6`Z6fHGc%rGkO)C`1w zTZ1fFutZEui{j#*kl63WT(dJ1f+S7egW)@pTOWSP(c#fF-e*K)g^4Mnfxf=pw!5S2 zPoMgloDFoccjJmZEqDGD0TOzvXB@9Az35njKx6wOMqD2?Iw-BO4LZS=NgQ!7*(ZgUEy-qlf z3`xH0`~6+lcg}nM_%WB+&))0VYu#(zd#`)stcRDJW!uGIQ`P!AL7;;cU;5UcfzZ!G zGaFNlR$K8;XA!dt(4gR4uQlBx^#=c)^UbuAL1PYh<{2S3ZFcrblQ?|=BU*pf_Eu2c zcgbHT%5R3BrlT7eLO82U*pl;2vpTf`F|NADUy(R@x<1Ijz{nT{f&s3ffk7eczQ7Tn71itk0H952`}tnCzrVR)KXz}8>CU2K7S_Ilz_=lNr|{qL^^Ecgs8;# z;Ya_~yJVN1s>6dit5rcm15?ixD|!?HhU?iGs>V$*h1M77B{BR= zD!1f*ds`k2THK)DogX6ZiR|S}+*u-xV;&)?dB(ciI4rjA( z3`#zGOakeOrU%Y(Hgo+ElB}XOke0GsZ-&+Vs$~o16>>y#d8KlKn7GL4nfRfRNpf2vaQNjGiQa7u$bz%BGcxY{Bd0+L2zBr? z+gl=E#BWKDE@lwlfEQ;v@!v3?_7adL;BT|k`UBuBCK7<=M|5|>f0x0&XY%pt~vaCa)S2nkxeR6(qraxE!j)Lf#szI9+BGEzoHW}cRW z7h{ZP2A7h3XX8ZLHINV6{P7bie$m%sdsv0Zsk)X$Z(^$ZJ)XWnW(3jdCRwjEOK6S# zx-iSW-4squ?$XxMaoPBu$yM*JBZQo*8Sx7bC#p55dH>DylmcuPZ7sbzif-vAa-JZq zKY0?U9uWPwQw!GR|Fb|jl01rH=(5F6qySuRs4f>*JK_@MwG9}?+c^#5j{ zx~Sm4PqU^7S|}(m>y9nXQpVppgwxD)j*b!wKXB9`rJQQGkdoOvFk3|;zsmhN71M>X z%1;S7J~L9hC{|XU6NfXIcvnL-W@dLladQ7j;EmYfhSF48H6zN6#Q zi@Ho?d-`~L=QC_>ZdMLtxFe1tdV70wQ_lA$dXveksw^c=psJd$qbWp*QO}M~b7{qE zjC%(*M=Rl~nhpmC2VpB#xKehbw|K?HeW%9371z+@<7mb0k9d!E8BonGHk=MiIzDp4 z6=11QNX@4j`~PTm)s~aPU|*&uL}Bb77jxeq6-jhhg2Xo)s)4AOoNCloUI#6KIxlwbZ;oQJZqU%tQBnD;j7O2IbL&$$wlxG~oL)=YltAz8 z9UdO_SEZqH7+cOR8mm(s^9wM&hj}|4sI^XWFzDP1S8tm)OR|{Q0`b5=*U`+~uE0$s z)nzk+l0UYKK6!0iQiNi&s!vAeNs(WHp!atKMOrYFR^!oKC&~h@< z2zQ5mM|GhnU0YngCh7>!fuwR9+71_B$SX@BO#JOtRN7Y2MU?t_PafPLysv~4`RUm5 z%khT~Uo(Yxj@QbzNp7>p+*}_mLAPEQDmh9QMeZFrVkajjuk&^S|8sEXw5Y2K&vIUL z{8*0nnfIgbB_*iMr~AIE$K>#al^oyA=9Qn?R?X)#zG)KSjfDHa{AOnd)kA0mJ>2Y7 zU_0=D)}QXe(@ql*Pwapk#a1?S`_i(qm;^$St7Ny2WvlheEB(H{hc&g+65(QEhMJ53 zx>nug*FQP>K4r%)3DE7dG&GD`5$JurybF43^Ye^F=hM}LVWFY-_O{w}%-?!PEn{&) z3fkGR`g!`#8&xb4cH}{sQ%%n}-}Pr?kDx{MF#Wy5`R?P#{oTWf^v1HX%<0)j&w8G* z)KWwybFW@F(cH9%*-LY=OOSFRMWv_DUH*Ena%TYpn<5$IqqBVHmyXMjH+MIP+{kac zD)C&gmE|HE!PdH^^tU>AeG34=Zz1WD|A1f!0D_4o76t!+pe+D`DynI#|0w$?VgZQy ze+{PmM3cnw1o_PrnY{9bh8IyY`bu^>(xSE=?H}h7G@`5Lx}FXrXBVwx;_=9zAF<7V zT0{sSx`tlDJ`$WtLIYpLsB(EiRne{!IaPh*z7Nil-slD)M;=$6lfRwHO!`yw$&+z? zjFhJLSaY~})lutMamz7JP<|axK6XtB^p|N;{=HI@pXei_8Wl~~IsMIuw;XA|r~O#} z3ZRtF_SUVx>(*fhfFNb^E+_fl&!DUzJ~bY?nLi3%45-SiAQDVvFl9>A?@iz8h0f{i zlelgBB`I3Hwe7Z+2DKI9mR00G#=8y}uOB5{_aAYw?Td6oQuDRK-#1~&0K5sj_;ifF zPp8NQ-ozrm#s93qOdP``(ymsiLkrXkY0nW7H>YN17|YGNwJH!QdZ9o*9eiqriBDx} zH9Mreo8A#Jot4#N)U2v!a?1f5ooZ5>GasMx+NG%|vRexVCBO9bxjVh9EH7`Hhr;~M z_rk;Qsh{pe-+vA?(McH}@tO0e4=&&6H*G(02eMW~nyJAxQ9W6=Nayh(B~IqwLxIc) zDFiL2_b}Z-LrMA2FhHO_3T9vg)1slw{bV^fVk-z46aJhncwb8K3iDkUvw;ae{vGcr<_{3S8n z9Y^~MHi}V4N?r@^$(J=`Zm2M?yrkreh~L^g>`x|0{2{%dZO~g*TTc2CN2&R zuAK$?1dQif+;_X&?IwWIZ+$i((8G4`5j$#z2-`;s-OIT#lhpa|+{CLUBO_yiz+3cp z773JPWn>=2(ot7?4wk3pa&U7yOw|}_Sz7jgx`%~=dA}AS?wOR5H0Lrguz`VrK?t9_ zC_R!}&V`(|9s-qVj|8G~QffXaCL)%Njy;Cr44jP(!0>4KC%_vbc#!=WpN3 zHEP*%>VFUltI!t`6BYNwhgW$Ha-&DZ{8(RD!@+$0X1>XB(4juZew^A-Ht29|-I2Nf z0IW|460=pNL1kiU7)(3f!{r&I5`H@3%JT`0ULjKCybd;%(|jVai%`~dY+gK1@#A5Y z3ts@dP}7g3`Z>~ByBHs87(|CmL++{NieYre&kHW&^P1jppm*7+Q+h)tXfhYE*v-zt zo|m{zPf9yeWT(2?#)FJv=`z$jf-No+ft;PXo+ri$h z#@^eyAv`^UJUl$%gxDn7M3B$J?`6BycF=O`2jj6Zj#ZI^iAtJZVzom5DhM za^I@HMCq5=wkpV&ND(1B8%t+io_Q2Dswn?CN5J`^(_)qFA1qP=&|d9JMPI|8%9t$; zumXlKcNvoK1q$Ola{#WArW?wsuYf4<`GueS??C@4HabvxOS68pA8s%_Gb$xk;FG9M|nVgb>e5|t?tnY}%v3+f0Nb!-vEO~{HW z;~1!^so@#)VJ^^*TU%QT1ysI{5(kg)3Mh)3ENiQ%oF9($Qbl*N{w5}P34%JKBwbzH zPE5CjXE_be_G|m}PL4PcUP0bfmfRP$uI(ZodZ)?d-j4#gjQoh0P(9c=kpBpQ+%xju zk1^|B0FrZid;7UW*VPfA3r|htS*dSa&-$&|Z*L8GC6twwwGk(yu^al4k&&{p?VFir zPV228Yj}^qCmuBD`FQy^JsI^HlyX<1bP8Y#Xtt%&Io1ztsD@A=lRT8{${KuJ)Xko<7| z6pk~IfyecyiMoBqGB{Y5n)|42^qro1wAeVnUQqGBpf>xYAj zW6K2SJ6uTbPRWYl;o&7Ri*%?UL*_o!elTLT+Ry6E~-4gV@6NoxBA#1wEP<$hP zk1*^xVLVh;K90Z=sQdBW$I&yV%EEikg2$TL8QP)rh z;wPuBCLdjPapCs=v}LxzyCu!UnE$jd^%+5R*w$Mj!hIn}11Zm$@C{iR_psvI*ic3w zq!ZB0AZa=40(9W8#;S!MUTd5haI%fqaY-(+)d`Os{FGN^Q?l{y{l{mph+@f8^UtIF z^m1uukA{6c;P$VLvhOqZmJ3_9oNtONe>x*R1CHP5{h)lkPE;?vY#2~8CqJ>{MGsy& z9gE5QMM3pTA9K7i9$1M)K=Gf`ky0H9X)JNH^BLrc&+liwKegg5PlfOPs1s@#sX4)lC1;1I$8vF zd5Rjq+ZELk_Wg78bz$Jqh|rur@Wx`Y3WOnDAKdqTe?QAh1Q70j2fHjZ|C4P7<7y7% zq`%B+1g>8eo=f(2cH|T{0bqDnQK5ABNKa!Yz&JqEr@s!LR$NfNKJ7NP0pMesIJga$ zlnRLH9kg0~XUBz>2FAfr=G3}9F1|Y%9%2ME>8>xw`1_yTTx8L56Cgv)1mLc#H}m*X z{5X&jtFXA}q!zzvJesZvzUlwy+H0vJETg3E*sg%%P8kJv0PtenfP+?mN_s0O9abxk zw!bc6S$1coEB%HbKhnqFDWM4o2^luXPrL1Y1Nh|remn(tKq~VnKlt=f`USdnM;d?9 z+xyWz3im$D7LKElG*1>Z&#y=U9H9&>MwW$izG!szUXJ+8>59#b&+u^=n5xu%T=E>CXLc`ioaqMoL$4h7vDRZ4q}i^sQpy`(16LPcJ-YG0 zYDE-20La#S(>C_S)r8`AblRfNv0|siqNviSbf?_8WMPGa_Serkb4!97GrKcgXU5*D z!;PNVeJpt77yS4lAnw)I5`^)*ED&yXx8 z7#HD3n)&^Uzn|r)0)b12AYW%3<5MJ77u#VMPzJ!wh=ax>MT zBlIz;gr#4P-^bE9!bHJ*lBpnNF8JpLM3OvUi4nb|{VO(>*qRA2gy73C%m18zYBjPU zHGOs&fm$ihBM)J3dIujU2?00Qvc;!PPpK1Z6%Dc~8XJN0r7(0$3Sw&pU~Ak!?|~vW zV=qhm{&MC|?Bb24<}#(eQe0dZ0`bi1s|)o#y#O$e|H&IoFR#9p+zArS!*F_7Q`7Wq zl_8R_T7_0ZG@8ePA;j&MiHsbYQJf@Mlbxr6fx*@RE#Q!agI?ucD+6J{Y?Dp(K>KJ| zar;e$z)6vVtE0w};K1xk$3Al7@+L1ny8~EFnRe8WcMC$}oxf~n{mqFIwn|F=yxLKP zA_VNmG<^W)b~)uHS}0}9Ob+Det%CT05)6@Lrv7`y7stRE!)uN@Sx$` zzfr4#a2>c$vhr&ibHq6omVOW33?q)<_WwW@z#43l0{S9ys1F$(1d8Y@2 zrcG4B&fWFVaCbR=X^w63eYJ>?8fifTRdH3v{d$V-n{PQkfWp#1imhGAJmDa%%VA%9uR= EFK2e&b^rhX diff --git a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png b/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png new file mode 100644 index 0000000000000000000000000000000000000000..8bfaff81cfb69e8b3f4834d5899110f79beb8452 GIT binary patch literal 5505 zcmai2WmFVUw;dXh9HeAG>2A=WLsAeWrE8Fop{1pVhJm3=K%_xZN>XYlhf--67^J(K z55M>8t@q>IAN$;O*ShQ8bI!hZ?X{!cXsVD9JthJG03>RvirN4G*3A9foDlmSJ7WGwaY|0RgNtanh>dZcub^nsYByFq4Fwx?fDPj7AH6Au^vvpc#+N?Ht!&J*}K{_E|2 zV@9>rib7M@;vi|am8`0ET4^_!?YGKGQmt>v$&XA>IpR1m8-JSXv>yGH4c$!ds&ASS z2jGz$tyOyr*$m%JcrE%UJG_726q~+oXTm7sP2M43+B(G+7r9 z&!uLd==&`%J>y#&A>4=3{ri>xT*u5vtKP(4ENfZ3J$nl=y<)?9~rqS zahlC7!|&A9RMT`YU7WKyUfP|ao)$}$VZE?DEmHg%X_`%v*qd479A)c&GF==)!(UNV zwRS$+=wx|q`^5jl#xmYVBtVLg4T?c;XH0}%o*|(I5B?-2kUKw8dH~y;TDGDceeKEH z2aC-+}}~np_F9y7zka_V%Wx zj*8w*M*uZ#D`tUig%_VB7=`UD0?f>GtyyoiSNkIsrem#soVVrYgSHS#_;Om0ehYB} zn_yv!ep`HLF*qCE6X7J%;aFF4L>XC)*OrpHrvTQ9H9fETo;E4T5Jv zo-hO+|M25I8GQW47VNf8P9N6tZi2lS&SV8&Z18yKZaGX&Pub0>$Q$-h`PG|8N@LsU zwJgiac@jlth7s>Zq9DkRo3w7p>$m<_B_b{Gy1F{LW%bCw%qoHtq7gUTp}~AeDSJdX zteRjzc<$#WtSX&FS9&)`_DYzr#bu7lj?MeZ%sCrN|T zgG2BGk=k@h;i$Bvyal2{;+0koxkB>)v9x`~(ir0%Zb3~nv?INd2As?2-aPVBE zv>0KbY_$RHB60^tFQ)=Uqx091aybFxl@NO{5xcQ_aiX&svrHpfPVIx$;K za{PG{DX72S*HK+Pid$S;JMP2MKrVe3orH|1=b2+l=95f=XQ*JvMEQa;AMl`~^Kn(~ ze%NF^<5Vqn-SLyYkveqw*MY$H90ygW96(Y24psSH&9+oZ%479w zI2jGP`Rr&7B%2Guc`|%Z%PZasnE*SP|7vj5tVyk%$4`I&R%qqmSocvJ|30qJ0>l@l zB_A%>c`_2+-92&5Fy|NfH_TU6q;uhiJHwZ#a(y<(Z)L(*1kl_m6!h%!a?^0QF7fsn z6Q3LC8||F7Xky;mL4TFF2Gvf{4aV#?q@Kxs%zD{<*flr1*Zz2S?^TxAZ@u4d+lD;% z_BO6>hhA?SCb(A?6dE($93t*4G2P{R7Jq)8oZv+1`>E^OWWQhR*xh>1QCDBnS0Q2i zICp4uaW6Y}`>=fBTJYDTPB4bx6^12LwR`DDyL#qU8JGO5Y1bWLQ9=FZXS4ng$NI}1 zvSJRTzIW4km*cSPo%M5kzZOPUQXR~f;U&mTA|hC3U+d0l0}BXgyEq+ktFP*~$gf$& z$tWL+$!x4jUCo_{+7(~a+Ri;Z3_RbgYv}t<1d_GGR@2%Z`+S>BlDvN~S@eAJt&d4| zE}wki@tLqyGw*K9*r99rGQSWA$*|XsQ$~Vr+Q;3^l@tQmN)o~MWT=0lRn-Q%A51Zz z!N2pa(h4OBU*N%?h_qc^9?hIpCCj!b*&eg@E-s)v83S)OIId07aj7jRbcBV371Kk2 z0UpUl7%&}3@5I#HwDM|SIc(?E$=_>-0P@F^DH% zBqRm68RDw)RKc0YoHGk^-gleEgIF=BKkPd0My6&OX#x7pXA3S}>wart3IXBZ4BrACtG(5A-3aufiG*PRx!R3 zGx)yWMQ!E&Sn>K8q-VK8Ua^E>E;6`MU#LJkQwlGDU=+}Dq>Vb9)J93kCmEgM4abz)$NEc?Sgz=Qd~mUM?X}9+oo{aE{Y(xXokDFU>YVYoS#`U zP^Uw=FN*ondc2o?8bZt+#^)}>oUY=M=G>N4^-abjXJ%wvkmg$nuM`d@(4=dsrQ zZcrHaqtJIm8d$DjUiW<4$bVPFZX8nfyk8#Mn#aH4_b3+usCo6@$`L2)Hks ziF62};?AOyY3JlvVXLCjQsQBz*iYW1dwaRb{k$yCh<%mi=x}-N5YV(j`eU`6g@=u( z)V^*Q2!4k6z-Q|CQ&6+yTXTbvlYXp{I`ak=62q2Y7wBiy8XmF_e^yiCjcT^K!~7I` zLHNl}ayFSj!(FVuz)D}6O`_?Y9ie@z+h25fA)5q)WbSsM2}{xI$p~C922d8f>9?k3B!n zi48ll&VM$GzC=P?W}F2gv~-cNk={Rd%<6MH6C@1E@-ImWEQ?l2{z?e)3nsR%7{HTp zM3C{3c7(VLZxpJ;`m+-85~2+pez3dH7|e~08#w99F3hb9t$J7ELY2iqY=0gQI6mdh zaj%@h3H!zP1XR#%S+n{+L9Mq!_stm1Hu;k$bT1u^_h=oVI*W6_y}jLiIjGVmwK`JG zTKgl?q@?oz!-`GVn7zA%`)6Q#owe_21=~G;Ea7gOeJ;=eaxo2j|4xS9(jjvP6JW7& zFvTMoE(8>l7oC8uC)XO0T3>z>(LN_l{f@J-$34Kp@#b)71P_Azwt`5?>lY*RP4{b| zqC}h+0aL|5M^3S{E4^LVjqyvgkU<;C`0TX5L1feY_+c@fH5FaQ(xB5;5Gb@5e;Pc- z@S^r9GL$XIzkN`1jPeJmB3n45DK_O3ikfJc>FvKa#HNBxAEWJ}Z?7Jz2mzUh(iFpX zVXveTr162@S(eXjt-;WK`duEEYr6*=VHVUAM@z)y>g{aI7H{;4AmD}B=_2l{0smrK zA6GL8Ogx$v0&8V)&~#0i8`)O*Y$m8W=Hh2PmGulr_x3N8Pjz42y zidYetXZxL`EUSS%E{E-`HS8(5`=KRjD=Nx4*k{XKn)*sL#L{D3BqR7oD z)%c1EFAQS|m5Iy9bqIkauNLE1i1}i06yldEeF`JNhLW};%4G6UxnleT98dl zjaix-25Q84)>;<{#{^*xfG$BFrqhkm-h(2e*Rpv|8n>&!rF&aV$?I#Fo@C@m%`?P zT6dx>wm+(M%VWDaW`=KKo&xIupP?CBP%Z@m@B&&5gBq%4fKK-hQ8IPF|m`; zERsI0rY;rz3q+bAr{6}J+|*FN#_3MDpro5*N_JRwjVeyT{;?6>qj=791$QHs<1Zyf zes}mCgKOer38Dx{nQ-ZF88&C{V?D9TX3IFrQn}_xuS}hO&J(26)LH&;rCF8`ot~xf zHLUHgDGzfs{wG>E1ddcsm>z1!`j|+dMXK2Aj-t9s`oiMl#|s)DC>1jyMhfv@>3{l2 z2vC0`a%r=}tw!-!USv2k#3gosO%HoLrK zNk`nnNM!!9xNYwODz16>mOO0e{hJ0>o5B%iHBwQ`RE@8m0JL+>AagYB~9 zloS%USKZby^jQ}1N7zY*adQ;jA{I{9)MosRvr6wI(53Ypm@ezgeAL)f(qmNxRF3uY z=jCB4wR79v2F{(oj=M^0lN<$*<6{e6-;!k%^x%HSq7EYjC)0jhq@UX-+fMzo;7)(S zicDCWBJ3Bk%K)(;tND-gqbYn}{Et5VU!r-{jxX@+LM!fOeljiyRLk{_7`JRiobA;h=F-41NpFJGp3R3(t+IpXEsmyuR(H`I+;hKx^ej6~tnTRu zC3Y^C<$a1RSw05_4)K5qvxv7V+(s?6&=MGaymi_JRZJRx`>Ip!la!pfoL)=zO8%0v zvS^5V5P)CHhLyz*P_gl->C!VV^qTai)2)sB}O$c4-rCSX*XJ0lLhznH-+5&iS){ zWgKGjV#VeN(SNE9B8z%eJxtywCjy3dPJc3f-rrKVxx#`Lp%D}Mk{x-p#0`q5hB(Q( zu&U8;>a4NjLK%_A+H+J03!8xT>k?YoA#0-~+0*d$i~O>(GH}r{j>HP9=5qzk#N}}7 z@lLOFIb{-h5T(@nwv2c*m>RvIKZ2fGm&zZ}4-dM4Oh#fB1Fk*$$2oG~Z7?lVeDoQey-3;NxuhrqRg@5Zf)Ra4)Z}M zdm9=7+ErY47A;=4K_eF^jzml1vGdAuOZYDv__kY!rf_+UhhQQbFRr}y@-jLRAONbz)wexB(3rFg zg-J(+-oPx&y7%Szq>kZUcXwp67k3?Z{U0AZX4!RHaCO-(bDuvC!f?RBXAmb{)Zd`n zX67nsO_|dB?wOf9N^x=I+gaThCIYBbLB8Yp3cv7)q8JDG9PeY{?G3hx@>s0LY9U0X z{_tmEq}&`iip$ED&nd?9 literal 0 HcmV?d00001 From 6721946a9580c8032a45c178ae4b8730b32d6c83 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 13:48:24 +0100 Subject: [PATCH 1108/1270] DOC: Fix list indentation --- doc/source/dev/development_gitpod.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index 619bfba3ad80..147cf369cac5 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -46,10 +46,8 @@ The best way to work on NumPy as a contributor is by making a fork of the repository first. #. Browse to the `NumPy repository on GitHub`_ and `create your own fork`_. - #. Browse to your fork. Your fork will have a URL like -#. https://github.com/melissawm/NumPy, except with your GitHub username in -#. place of "melissawm". + https://github.com/melissawm/NumPy, except with your GitHub username in place of ``melissawm``. Starting Gitpod ---------------- @@ -78,7 +76,7 @@ repository: might be some actions running. This will ensure that you have a development version of NumPy installed and that the docs are being pre-built for you. -#. Once the build is complete, you can :ref:`test the build` by +#. When your workspace is ready, you can :ref:`test the build` by entering:: $ python runtests.py -v From 4fe5a40791258dab9d065d9c8cdeb47d8b25e4ad Mon Sep 17 00:00:00 2001 From: Kasia Date: Mon, 10 May 2021 13:03:18 +0000 Subject: [PATCH 1109/1270] DOC Improve datetime64 docs - deleting space --- numpy/core/_add_newdocs_scalars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py index eb1c794f36b5..b1038f1d05ea 100644 --- a/numpy/core/_add_newdocs_scalars.py +++ b/numpy/core/_add_newdocs_scalars.py @@ -224,7 +224,7 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): >>> np.datetime64(10, 'Y') numpy.datetime64('1980') - >>> np.datetime64('1980', 'Y') + >>> np.datetime64('1980', 'Y') numpy.datetime64('1980') >>> np.datetime64(10, 'D') numpy.datetime64('1970-01-11') From e3035738627cb4ca8fe0dfe358bc93aa06ed5fb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 May 2021 13:17:37 +0000 Subject: [PATCH 1110/1270] MAINT: Bump hypothesis from 6.10.1 to 6.12.0 Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.10.1 to 6.12.0. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.10.1...hypothesis-python-6.12.0) Signed-off-by: dependabot[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index 4982c9035a4f..bba27a7b1912 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,7 @@ cython==0.29.23 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.10.1 +hypothesis==6.12.0 pytest==6.2.4 pytz==2021.1 pytest-cov==2.11.1 From cf8988470e6e3fb216b9db5708e90cb83ade0255 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 10 May 2021 08:19:40 -0600 Subject: [PATCH 1111/1270] MAINT: Cleanup tools/changelog.py * Combine author/co-author search. * Get rid of 'u' prefix, it is no longer needed. * Use raw strings in regular expressions --- tools/changelog.py | 44 ++++++++++++++++++++------------------------ 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/tools/changelog.py b/tools/changelog.py index 1f116cc20fd4..2bd7cde08d83 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -22,6 +22,7 @@ - gitpython - pygithub +- git >= 2.29.0 Some code was copied from scipy `tools/gh_list.py` and `tools/authors.py`. @@ -58,21 +59,16 @@ def get_authors(revision_range): lst_release, cur_release = [r.strip() for r in revision_range.split('..')] - authors_pat = u'^.*\\t(.*)$' + authors_pat = r'^.*\t(.*)$' - # authors, in current release and previous to current release. - cur = this_repo.git.shortlog('-s', revision_range) - pre = this_repo.git.shortlog('-s', lst_release) + # authors and co-authors in current and previous releases. + grp1 = '--group=author' + grp2 = '--group=trailer:co-authored-by' + cur = this_repo.git.shortlog('-s', grp1, grp2, revision_range) + pre = this_repo.git.shortlog('-s', grp1, grp2, lst_release) authors_cur = set(re.findall(authors_pat, cur, re.M)) authors_pre = set(re.findall(authors_pat, pre, re.M)) - # include co-authors - grp = '--group=trailer:co-authored-by' - cur = this_repo.git.shortlog('-s', grp, revision_range) - pre = this_repo.git.shortlog('-s', grp, lst_release) - authors_cur |= set(re.findall(authors_pat, cur, re.M)) - authors_pre |= set(re.findall(authors_pat, pre, re.M)) - # Ignore the bot Homu. authors_cur.discard('Homu') authors_pre.discard('Homu') @@ -82,7 +78,7 @@ def get_authors(revision_range): authors_pre.discard('dependabot-preview') # Append '+' to new authors. - authors_new = [s + u' +' for s in authors_cur - authors_pre] + authors_new = [s + ' +' for s in authors_cur - authors_pre] authors_old = [s for s in authors_cur & authors_pre] authors = authors_new + authors_old authors.sort() @@ -95,17 +91,17 @@ def get_pull_requests(repo, revision_range): # From regular merges merges = this_repo.git.log( '--oneline', '--merges', revision_range) - issues = re.findall(u"Merge pull request \\#(\\d*)", merges) + issues = re.findall(r"Merge pull request \#(\d*)", merges) prnums.extend(int(s) for s in issues) # From Homu merges (Auto merges) - issues = re. findall(u"Auto merge of \\#(\\d*)", merges) + issues = re. findall(r"Auto merge of \#(\d*)", merges) prnums.extend(int(s) for s in issues) # From fast forward squash-merges commits = this_repo.git.log( '--oneline', '--no-merges', '--first-parent', revision_range) - issues = re.findall(u'^.*\\((\\#|gh-|gh-\\#)(\\d+)\\)$', commits, re.M) + issues = re.findall(r'^.*\((\#|gh-|gh-\#)(\d+)\)$', commits, re.M) prnums.extend(int(s[1]) for s in issues) # get PR data from github repo @@ -122,31 +118,31 @@ def main(token, revision_range): # document authors authors = get_authors(revision_range) - heading = u"Contributors" + heading = "Contributors" print() print(heading) - print(u"="*len(heading)) + print("="*len(heading)) print(author_msg % len(authors)) for s in authors: - print(u'* ' + s) + print('* ' + s) # document pull requests pull_requests = get_pull_requests(github_repo, revision_range) - heading = u"Pull requests merged" - pull_msg = u"* `#{0} <{1}>`__: {2}" + heading = "Pull requests merged" + pull_msg = "* `#{0} <{1}>`__: {2}" print() print(heading) - print(u"="*len(heading)) + print("="*len(heading)) print(pull_request_msg % len(pull_requests)) for pull in pull_requests: - title = re.sub(u"\\s+", u" ", pull.title.strip()) + title = re.sub(r"\s+", " ", pull.title.strip()) if len(title) > 60: - remainder = re.sub(u"\\s.*$", u"...", title[60:]) + remainder = re.sub(r"\s.*$", "...", title[60:]) if len(remainder) > 20: - remainder = title[:80] + u"..." + remainder = title[:80] + "..." else: title = title[:60] + remainder print(pull_msg.format(pull.number, pull.html_url, title)) From 011028e96625291cb5ee402eec1df478adf45619 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 10 May 2021 09:59:10 -0600 Subject: [PATCH 1112/1270] REL: Update main after 1.20.3 release. --- doc/changelog/1.20.3-changelog.rst | 35 +++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/1.20.2-notes.rst | 3 +- doc/source/release/1.20.3-notes.rst | 43 +++++++++++++++++++++++++++++ 4 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 doc/changelog/1.20.3-changelog.rst create mode 100644 doc/source/release/1.20.3-notes.rst diff --git a/doc/changelog/1.20.3-changelog.rst b/doc/changelog/1.20.3-changelog.rst new file mode 100644 index 000000000000..df7f1056521a --- /dev/null +++ b/doc/changelog/1.20.3-changelog.rst @@ -0,0 +1,35 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Anne Archibald +* Bas van Beek +* Charles Harris +* Dong Keun Oh + +* Kamil Choudhury + +* Sayed Adel +* Sebastian Berg + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#18763 `__: BUG: Correct ``datetime64`` missing type overload for ``datetime.date``... +* `#18764 `__: MAINT: Remove ``__all__`` in favor of explicit re-exports +* `#18768 `__: BLD: Strip extra newline when dumping gfortran version on MacOS +* `#18769 `__: BUG: fix segfault in object/longdouble operations +* `#18794 `__: MAINT: Use towncrier build explicitly +* `#18887 `__: MAINT: Relax certain integer-type constraints +* `#18915 `__: MAINT: Remove unsafe unions and ABCs from return-annotations +* `#18921 `__: MAINT: Allow more recursion depth for scalar tests. +* `#18922 `__: BUG: Initialize the full nditer buffer in case of error +* `#18923 `__: BLD: remove unnecessary flag ``-faltivec`` on macOS +* `#18924 `__: MAINT, CI: treats _SIMD module build warnings as errors through... +* `#18925 `__: BUG: for MINGW, threads.h existence test requires GLIBC > 2.12 +* `#18941 `__: BUG: Make changelog recognize gh- as a PR number prefix. +* `#18948 `__: REL, DOC: Prepare for the NumPy 1.20.3 release. +* `#18953 `__: BUG: Fix failing mypy test in 1.20.x. diff --git a/doc/source/release.rst b/doc/source/release.rst index 8cc4a57507ad..b9cdfb9c8b57 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release Notes :maxdepth: 3 1.21.0 + 1.20.3 1.20.2 1.20.1 1.20.0 diff --git a/doc/source/release/1.20.2-notes.rst b/doc/source/release/1.20.2-notes.rst index cdf45b65ea26..10d39f7f6389 100644 --- a/doc/source/release/1.20.2-notes.rst +++ b/doc/source/release/1.20.2-notes.rst @@ -4,10 +4,9 @@ NumPy 1.20.2 Release Notes ========================== -NumPy 1,20.2 is a bugfix release containing several fixes merged to the main +NumPy 1.20.2 is a bugfix release containing several fixes merged to the main branch after the NumPy 1.20.1 release. - Contributors ============ diff --git a/doc/source/release/1.20.3-notes.rst b/doc/source/release/1.20.3-notes.rst new file mode 100644 index 000000000000..8c25b3cc3215 --- /dev/null +++ b/doc/source/release/1.20.3-notes.rst @@ -0,0 +1,43 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.20.3 Release Notes +========================== + +NumPy 1.20.3 is a bugfix release containing several fixes merged to the main +branch after the NumPy 1.20.2 release. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Anne Archibald +* Bas van Beek +* Charles Harris +* Dong Keun Oh + +* Kamil Choudhury + +* Sayed Adel +* Sebastian Berg + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#18763 `__: BUG: Correct ``datetime64`` missing type overload for ``datetime.date``... +* `#18764 `__: MAINT: Remove ``__all__`` in favor of explicit re-exports +* `#18768 `__: BLD: Strip extra newline when dumping gfortran version on MacOS +* `#18769 `__: BUG: fix segfault in object/longdouble operations +* `#18794 `__: MAINT: Use towncrier build explicitly +* `#18887 `__: MAINT: Relax certain integer-type constraints +* `#18915 `__: MAINT: Remove unsafe unions and ABCs from return-annotations +* `#18921 `__: MAINT: Allow more recursion depth for scalar tests. +* `#18922 `__: BUG: Initialize the full nditer buffer in case of error +* `#18923 `__: BLD: remove unnecessary flag ``-faltivec`` on macOS +* `#18924 `__: MAINT, CI: treats _SIMD module build warnings as errors through... +* `#18925 `__: BUG: for MINGW, threads.h existence test requires GLIBC > 2.12 +* `#18941 `__: BUG: Make changelog recognize gh- as a PR number prefix. +* `#18948 `__: REL, DOC: Prepare for the NumPy 1.20.3 release. +* `#18953 `__: BUG: Fix failing mypy test in 1.20.x. From 12c2f00bfa34b8257ca9f118179cadbb82bc0428 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 20:20:02 +0100 Subject: [PATCH 1113/1270] DOC: Fix indentation Co-authored-by: Ross Barnowski --- doc/source/dev/development_gitpod.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index 147cf369cac5..9dc7230fb8d3 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -47,7 +47,7 @@ repository first. #. Browse to the `NumPy repository on GitHub`_ and `create your own fork`_. #. Browse to your fork. Your fork will have a URL like - https://github.com/melissawm/NumPy, except with your GitHub username in place of ``melissawm``. + https://github.com/melissawm/NumPy, except with your GitHub username in place of ``melissawm``. Starting Gitpod ---------------- From a44e51125e0db90d57c5a2701ebb8e669a67e03d Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 20:20:44 +0100 Subject: [PATCH 1114/1270] DOC: Fix lists indentation Co-authored-by: Ross Barnowski --- doc/source/dev/development_gitpod.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index 9dc7230fb8d3..09c4eabd015d 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -230,6 +230,7 @@ FAQ's and troubleshooting shut down after 3 minutes. #. My terminal is blank - there is no cursor and it's completely unresponsive + Unfortunately this is a known-issue on Gitpod's side. You can sort this issue in two ways: From 9e37c8eb966e2d1160a55a797ad059de7f1ee8c2 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 20:20:57 +0100 Subject: [PATCH 1115/1270] DOC: Remove extra -- Co-authored-by: Ross Barnowski --- doc/source/dev/development_gitpod.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index 09c4eabd015d..ba80bb266df9 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -198,7 +198,7 @@ If you want to see the final output with the ``html`` theme you will need to rebuild the docs with ``make html`` and use Live Serve as described in option 1. FAQ's and troubleshooting ----------------------------- +------------------------- #. How long is my Gitpod workspace kept for? Your stopped workspace will be kept for 14 days and deleted afterwards if From 70d3224e5207e66565803b7f9563b81ea995fc2c Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 21:00:29 +0100 Subject: [PATCH 1116/1270] DOC: Make FAQ titles --- doc/source/dev/development_gitpod.rst | 118 ++++++++++++++------------ 1 file changed, 64 insertions(+), 54 deletions(-) diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst index ba80bb266df9..92cca81fca69 100644 --- a/doc/source/dev/development_gitpod.rst +++ b/doc/source/dev/development_gitpod.rst @@ -35,7 +35,7 @@ Make sure to select **All repositories** access option to avoid issues with permissions later on. Click on the green **Install** button .. image:: ./gitpod-imgs/installing-gitpod-io.png - :alt: Gitpod repository access and installation screenshot + :alt: Gitpod repository access and installation screenshot This will install the necessary hooks for the integration. @@ -57,7 +57,7 @@ which will add a **Gitpod** button next to the **Code** button in the repository: .. image:: ./gitpod-imgs/NumPy-github.png - :alt: NumPy repository with Gitpod button screenshot + :alt: NumPy repository with Gitpod button screenshot #. If you install the extension - you can click the **Gitpod** button to start a new workspace. @@ -79,7 +79,7 @@ repository: #. When your workspace is ready, you can :ref:`test the build` by entering:: - $ python runtests.py -v + $ python runtests.py -v ``runtests.py`` is another script in the NumPy root directory. It runs a suite of tests that make sure NumPy is working as it should, and ``-v`` activates the @@ -93,11 +93,11 @@ can check the Getting started `VSCode docs`_ to familiarize yourself with it. Your workspace will look similar to the image below: .. image:: ./gitpod-imgs/gitpod-workspace.png - :alt: Gitpod workspace screenshot + :alt: Gitpod workspace screenshot .. note:: By default, VSCode initializes with a light theme. You can change to - a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or - :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. + a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or + :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. We have marked some important sections in the editor: @@ -109,7 +109,7 @@ We have marked some important sections in the editor: #. GitHub Pull Requests extension - you can use this to work with Pull Requests from your workspace. #. Marketplace extensions - we have added some essential extensions to the NumPy -#. Gitpod. Still, you can also install other extensions or syntax highlighting + Gitpod. Still, you can also install other extensions or syntax highlighting themes for your user, and these will be preserved for you. #. Your workspace directory - by default, it is ``/workspace/numpy``. **Do not change this** as this is the only directory preserved in Gitpod. @@ -136,12 +136,12 @@ When using Gitpod, git is pre configured for you: done for you as you authenticated through GitHub. You can check the git configuration with the command ``git config --list`` in your terminal. #. As you started your workspace from your own NumPy fork, you will by default - have both "upstream "and "origin "added as remotes. You can verify this by + have both ``upstream`` and ``origin`` added as remotes. You can verify this by typing ``git remote`` on your terminal or by clicking on the **branch name** on the status bar (see image below). -.. image:: ./gitpod-imgs/NumPy-gitpod-branches.png - :alt: Gitpod workspace branches plugin screenshot + .. image:: ./gitpod-imgs/NumPy-gitpod-branches.png + :alt: Gitpod workspace branches plugin screenshot Rendering the NumPy documentation ---------------------------------- @@ -188,8 +188,7 @@ uses the rst extension with docutils. .. image:: ./gitpod-imgs/vscode-rst.png :alt: Gitpod workspace VSCode open rst screenshot -#. As you work on the document, you will see a live rendering of it on the - editor. +#. As you work on the document, you will see a live rendering of it on the editor. .. image:: ./gitpod-imgs/rst-rendering.png :alt: Gitpod workspace VSCode rst rendering screenshot @@ -200,60 +199,71 @@ rebuild the docs with ``make html`` and use Live Serve as described in option 1. FAQ's and troubleshooting ------------------------- -#. How long is my Gitpod workspace kept for? - Your stopped workspace will be kept for 14 days and deleted afterwards if - you do not use them. +How long is my Gitpod workspace kept for? +***************************************** -#. Can I come back to a previous workspace? - Yes, let's say you stepped away for a while and you want to carry on working - on your NumPy contributions. You need to visit https://gitpod.io/workspaces - and click on the workspace you want to spin up again. All your changes will - be there as you last left them. +Your stopped workspace will be kept for 14 days and deleted afterwards if you do +not use them. -#. Can I install additional VSCode extensions? - Absolutely! Any extensions you installed will be installed in your own - workspace and preserved. +Can I come back to a previous workspace? +***************************************** -#. I registered on Gitpod but I still cannot see a ``Gitpod`` button in my repositories. - Head to https://gitpod.io/integrations and make sure you are logged in. - Hover over GitHub and click on the three buttons that appear on the right. - Click on edit permissions and make sure you have ``user:email``, - ``read:user``, and ``public_repo`` checked. Click on **Update Permissions** - and confirm the changes in the GitHub application page. +Yes, let's say you stepped away for a while and you want to carry on working on +your NumPy contributions. You need to visit https://gitpod.io/workspaces and +click on the workspace you want to spin up again. All your changes will be there +as you last left them. - .. image:: ./gitpod-imgs/gitpod-edit-permissions-gh.png - :alt: Gitpod integrations - edit GH permissions screenshot +Can I install additional VSCode extensions? +******************************************* -#. How long does my workspace stay active if I'm not using it? - If you keep your workspace open in a browser tab but don't interact with it, - it will shut down after 30 minutes. If you close the browser tab, it will - shut down after 3 minutes. +Absolutely! Any extensions you installed will be installed in your own workspace +and preserved. -#. My terminal is blank - there is no cursor and it's completely unresponsive +I registered on Gitpod but I still cannot see a ``Gitpod`` button in my repositories. +************************************************************************************* - Unfortunately this is a known-issue on Gitpod's side. You can sort this - issue in two ways: +Head to https://gitpod.io/integrations and make sure you are logged in. +Hover over GitHub and click on the three buttons that appear on the right. +Click on edit permissions and make sure you have ``user:email``, +``read:user``, and ``public_repo`` checked. Click on **Update Permissions** +and confirm the changes in the GitHub application page. - #. Create a new Gitpod workspace altogether. - #. Head to your `Gitpod dashboard `_ and locate - the running workspace. Hover on it and click on the **three dots menu** - and then click on **Stop**. When the workspace is completely stopped you - can click on its name to restart it again. +.. image:: ./gitpod-imgs/gitpod-edit-permissions-gh.png + :alt: Gitpod integrations - edit GH permissions screenshot - .. image:: ./gitpod-imgs/gitpod-dashboard-stop.png - :alt: Gitpod dashboard and workspace menu screenshot +How long does my workspace stay active if I'm not using it? +*********************************************************** -#. I authenticated through GitHub but I still cannot commit to the repository - through Gitpod. +If you keep your workspace open in a browser tab but don't interact with it, +it will shut down after 30 minutes. If you close the browser tab, it will +shut down after 3 minutes. - Head to https://gitpod.io/integrations and make sure you are logged in. - Hover over GitHub and click on the three buttons that appear on the right. - Click on edit permissions and make sure you have ``public_repo`` checked. - Click on **Update Permissions** and confirm the changes in the - GitHub application page. +My terminal is blank - there is no cursor and it's completely unresponsive +************************************************************************** - .. image:: ./gitpod-imgs/gitpod-edit-permissions-repo.png - :alt: Gitpod integrations - edit GH repository permissions screenshot +Unfortunately this is a known-issue on Gitpod's side. You can sort this +issue in two ways: + +#. Create a new Gitpod workspace altogether. +#. Head to your `Gitpod dashboard `_ and locate + the running workspace. Hover on it and click on the **three dots menu** + and then click on **Stop**. When the workspace is completely stopped you + can click on its name to restart it again. + +.. image:: ./gitpod-imgs/gitpod-dashboard-stop.png + :alt: Gitpod dashboard and workspace menu screenshot + +I authenticated through GitHub but I still cannot commit to the repository through Gitpod. +****************************************************************************************** + +Head to https://gitpod.io/integrations and make sure you are logged in. +Hover over GitHub and click on the three buttons that appear on the right. +Click on edit permissions and make sure you have ``public_repo`` checked. +Click on **Update Permissions** and confirm the changes in the +GitHub application page. + +.. image:: ./gitpod-imgs/gitpod-edit-permissions-repo.png + :alt: Gitpod integrations - edit GH repository permissions screenshot .. _Gitpod: https://www.gitpod.io/ .. _NumPy repository on GitHub: https://github.com/NumPy/NumPy From 32322042ec59a07541bd4fba252fbcf9914d94b5 Mon Sep 17 00:00:00 2001 From: Tania Allard Date: Mon, 10 May 2021 21:05:52 +0100 Subject: [PATCH 1117/1270] DOC: Make screenshot labels bigger --- .../dev/gitpod-imgs/gitpod-workspace.png | Bin 118160 -> 201069 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/doc/source/dev/gitpod-imgs/gitpod-workspace.png b/doc/source/dev/gitpod-imgs/gitpod-workspace.png index 618b7063b8ebd7496a7d7975e2e8472192d12cc9..a65c9bd7e152a6795dd5b139b1986814404317ec 100644 GIT binary patch literal 201069 zcmeEuWmuG37d9dm90jG0C}|+A!hm$A(hMCcJwtbw5{i^axAee(#L%HAAxICM(hU+r z!?(xxgy+2TUf<8}&*ya^!_4gG+0Tx(?sczw?N>?)(w7OT2=VanF3ZYDsNmt9AH>5W zSiOi3-ccZK*ap7{Ohx5I@$ibmh>o6}1Ao&Q%BaZW;kn(#!}EEGhj#$p@>#&cbG(m- zxAYVbk1rMvkJ2WwN?8DWU}3B&Ya%a?#|&Oy#3MLEg@?Vu8So=?=Eh&IrOw>LJNxr} zaN9t0Jc2*IqX2$m|H*(KY@7f5#!ton^Bd;}Q_ueSnqU?CRl^ zZ)j(3<6v%WMTfoav**^14uT8}*c<)z??3%Cb}|3^PFD7RJQjFB4(ul!57_T>;A-Yz zZt_2xVL$m#v!BoPpW6vwFUBWsZDDPvYV*v{Sm=Sk&+GsC*594`&$pG#U5qU>CCn|2 zt?a>r2tDB8;{2o4zdyp+RO^?f54r!m;m@YOeDq6GY|r^rjP0#09I+z@wK8`QdMLnw z>+L^U32q3aI;WBX~9-auEtc0kV%bAt23mzJ?X{WZF z*L|OGUvd{WvL?9>%k$C9okASv4Jg|{^PnTc%)5NBJ_L1{J`rJi-|Fqdz|wAJ%hoL zg5}{g3JUd+yN_tk5Rh!hp7E;RpD|2Ry-4SShyUk)KI?=p53B|juK)GU-=Be31h4T# zjZy_<*Uo}Vh~WMEKUxa9pzZI0;sZE7BOtj*cE+zo#P#jy1)LWbVih4-{7f8JK>tgB z{<=HKJ$!-)-_6^V#JFxu;GyY5*Aqy4MNj;{It9K@r$lg(;q0ue9qpgb`PX%^_uBh_ zCz?Sle6uRxA8-FLE+VNHNhHp> ze;wt&ZZr8AJeAOqjYl2a*w{ETGgD|no-gRHkN+NsNl#y!b>}n%_xro)ypBENGqe+G zjH?x>2;t*J#cYKnyNWL@5w{9z?6z-M*F>bGc%L5cGTy$eNsd_?JM8p6+1hO5NZ~T? z>1WdsKAACGR!Wx&sd89+=&`p^QSoS%OWL~r7;}Z1Pt!49%++FN3*Mh#wgt*4m>o$p*s8OfB z;IT}7w4R{b_Clro{LSKqoE#~7dir$fuw-8QIrsJ2-G%h9_`F16Z+&;Q(^i&3bh2NA z-{|OQG@Fi;v~iuU$g?A4-JR=-QrdX~EFENyJq%!chO4@?vp zHcvGMms^ftT5g3&NJu>5DnLKv8?W3=B2&`*fHvJ1cAALt{A$i+t$m zFuZWJJ3Wva^1=4&+dD+;URKqs+<9V+4aLT7@5GotseQRlL1Dt2V=Ea#$hTfOPqR#x zXS=FZ?^S!=5aDVZktGHdkijSRAi`obgd;Lsi<;oW7Or{ z(+ujpsQBX|BA!hw8dY-7^SNF^;M#T-I$&FkHR!|ZMPHnE9(T<*=IS1)@#z^}G)nv9 zR+ESLZf&3Df@#TDYWq%agSDsQIpT~^(AFLXGk%kKWLZh-UmN-3XMA67$jE08{rb^Y zq>TC=KP26G)+(k$`;j^cf@5G1ChF_6B;i{H)oaBRkwpVM#+l@~xw&%m{Eka%KXOpg z*6wPR@(JAiG*va+*v*b)WZ)rsbE{KkcHFO4zBe&|V5@$3XMQ2LPvU-V8Y4_nPn zzfRR`AdTBSUf!XqUE$OHPPb<(Vuu zONw8mKF>I8`XtX!J)c_`Dj(04yEnbFGhndn9|pF(J7%T(EhN76_%rR%hvyJ@7z|;d zo>Al!?^jtsRfJj23|H4WhipaOamW|!+pRgUM@rqJ^YOXf_U7!r*KZvMcDE@ZxqMWQaw9sfM6EjByY;sL`}u0}nXd>D!wbv46_}Uqwvco! zwaTT*PMSTfrA@UvZ;2?`k3JE5Jr$PRs%S?HVUqgZ?TpJ_E*h=(uBUSTowCyN&?zbQ z-8-YCf}!**+xC+FuC~Tt%F~U&zRTsM3Tj|+4UeR~5&3x0Y$pA9_I$tRVm;S_Fui6% zR%$C|apa2hki@?i6$}0;w@?R*;N*L&YkMmtotvARcNG!`Gt47@OgBqQOJ`?f#M%a< zDxXi(d7@U{d`6pRCTd$%O9oS1+4|D%&vi@3kUoZ0>-`COy9dQvo;C1Z=--n9%%aaB z0luX~#$kp>iE(@7$y6}Mis$lZr9Id%yQ?GRcC&Kw?`U}KGD2ZrEs&|mMwW44WU^Dd zhQABG#q_Qkqf$SG_0xp--{ds;mLVHiY7{~-1D4B4D!m5Ptz6>=@5K5G4NgxE*UOnl zh^Y8;1sv8!E928W4_C_*#GyN*Uhi9@l9!bN<#8F72}HUsw-MWKj_5+%mn6|LT(gREg|OG6PEMk+zT%D#n~eh-v4WP z{AZ1D;L~fjf6P8Wcp$isf1scxNjKd!6Lc_Bm5hpstO7bYyJG6(W^P5+x>RQfM%hRP z>nBE$(ue=>J|ZN91j9isPjw(8)xyPYhZ)ZNAN#KUH(((+lBDaBk~oOxzZKxI`l^eT zfFy?n0HeOB+pGU{*rE&{5ksPzsg*d#kf4SMfERhsf)E_O+eaiEiy*$fnB&IP1(J;{ z06%zkD!Fm296Wqc1`(3$w~6CN{=Ig8F6EC206>hR=ZF9C_r2n>0 zKNo^EA&G>WY6|?{+w|u=klY4W{{IjB*P8ji&G5?$Up?KHxgS4%Trmlp<)3HnwAc1P_^JCmvT z92UO3B1PUXsCHUyruEKm&rWt@=Hqi~d`+I>eNxFy>pBwwj7G6k|GA-I7zK|>OcZe0 zc)IGkKO19jXUD+6knD9}4&`<%vl`=r_p?SxzxmyE%yA$t$phC8B8UQy@Ba(u|| zuwe5Qu3@mc(47u&{?>fwdQax#a;vc?a93B?*Z5F&CZ_Ikl$M&Bk)A@}lcdp6U3cnL z0lOKQwrDnF{NosQ2n3Z~T8h>mLZpAN17K%yaS>qjtyIw$CY{MhaRaaY*$|%b9Dv;` zF|kqszg>TxB>7*U1WX%U4nb`Es(*8>aa%O--E7*WIR|iW@Z?k}70&D9OL>b61}B>_ zM@L7G)$Owy-rjkf50j&*-=Dhy?@haJ-jm_JR%wdPQU3m&a>_z7dI`ZjJvCKY>GETW z)3hTe^^J;bCLxtuTzGgYKCq>KZJs}0`O;yRXM^~F#M}soIQldUh~ij{7cD@{wk}}c@7&5@;J%8I zE0Kv58+V@Y*?7TyE?boevh9WSCsllD=kj+fRay?$>rW4s3!RHi zwac}w6VY$`slM&aC0MJc+)$s$ni9(lm*Yekg*0&@ikrB(t;h3wO1SKG^9*VEoxa`d zGm{JP=Q0k7K<8W5QiCzz?)xft;~B0n3b;H17n0lv->{aF zlHyaXELe(VRk1HcRh3FnGLPjc6Pj0bmzwwTuT>b%eT&mhJP78f%PFZAb5A3zMzK|@7j^faGc~2HsHImBbjs6SjS3D24wdw@=_+T7QV(*Kd$vH2H7#bYZ zJfx383L>U$f^b0vumdj6&~1L!qBUbR8b}^XIhyV56CSQ!tC=n?CW5MGF$X7N4icuIR<8FNQtF(U+Hmi3^WLWK4!Ds=jv8I~i-guYb>_w@QaIy zwQ;ai9|Q_(R!p)O#}EteP0~(y^nTWw87wkHo>Zvk7}VG4Q|$LV<`_~O(C%~Fng&5G z7vvCGK3+^q#ciSaRP)HEY!7^JR=KL_pvOMBK+;6}>j= zA(m`rVdurhU8X~*46wFGKC=`9!?eX6@BhYHDg4F^7jE-p5-v ztTD6#3rWtW%AN_w?27ChZ0@FWZ%)phKiPUg>~-2egb+4c2p6u~VZ6+lf1)eQ7ay4v z>3BO21Os=UZKo&qLIJHNdWM)|b@m0m!=-<^=Ts^DH$g|q@WZmzJoWr89WrSHh=Th- zzV1#JI>pm_@bt0m4_mi$x0y0dg=N=p*sKj5xTS|1wI zgm8}RMLMNi02MM>SAIc`lYS!V>J%hkCVV(9E_fcZ>6C5zCwE-{w)&&7ugFD)&J^L( zmKVLH!>BgG8$D%@601#imJZjd+MY4~kGo99`%Ki|D#kEnz-RbMxnD>;7zBFj{({^J`}(@uf=-C|Nl; zG(HV)Kjo+(1Bgj!7auS$juZFT82%Z$5EKAk@1A0*sTmt3a806WAif_L)c?s+))EKO z_0-mthW`1?E{dtpJoWMFiV1`LZ;gu7<5JVXDSQ^;StJ|LV78*m+1nklS92yJk*Lym z#I=1`Zlf)JYs_6%Tl&wg|LI_{5B6~vK&`REC%`++j}vwA;e>i8bQ{Mmg5C>ko(^O! zEiE>qjkv>)Iz|8q-8;W5(g4h5cCfw3#KfeK^23^(l0#p>e(ni!`PkZBu)=opi>%{Q zfWByIOzy;NR*B#7d`|h_Hzu(G5Yp>NCi3mutS4~~O*?K56t{qf&&kGccPSH^2S3;r6Xzd^E#l#jE7Thox9#&v$D{P<)+_cHZ{>pWJ8)$HQc zIHw-1fVFg8UH88yer?x-Zhfr>z_AMNhFjb^hofaDgM6iv@U=wT(c2gifgOo!8?wAW zGr8*7YSo34X<^++_a%+`E0=d(3@xiuJd2cuu}Byd~udF!X#N3=Hoz3iSam%XJubllQ%A?=UGjISRCRae=tZ zymxLXuTcFE;M`-(h;f0p3c^S_ymRiXM!WMyC3&6Jjw*3{HEtq$+3jbTZQxtz7< z>*KY6hwTt}SIKL+1srI$e1b`m8Tt0G94dc2J~<;hI}bAEzosBm^&T3D|KD@!;}ePR zdsBk@&1<)14sXxxPZu-_br)|3)()Q~R;XT_W5Nj5={oTStM%;e&!;5ls3t#x_vbSv zyNKDQ4W&D5R?a80u&{6@0H_-r9NbGo3h~}BiunxX*g{bJP>VHM{ z>t6hGe-%U0x`8<6n6-4gUcR(XS1~&kkcsk4B zZH_7=+YIGjTjo=(O_}Rvy!`|wHP4SbN{qRr8X#tJvbFJA9kzVEYOpZN26XL1 z%tgDO0XiAPOjq@knPwXNiNG2|?&`trr{|__E2UF0%ExC&sPHbvahkppgFT%K^FA_k zlx0eja$wi|@D#*Rh|}HrQ$1=LA&-iK{BY&m%<~EzvAp7o8Rqg6a`ccOyDvdksmyX8 z#P)lUKOxLLu0Po+B)xXcfdgjnkXUd#ydXyt1F{tMz}gMJi#?WT#kGn1hybM2I&Qx( zuFUzQFej&FUq@BQ1tdBWG@g}NPn@80RRO)Pipkbg{d}o2Sr9~BN|Ih#qK*Dx!Y38X z_tM{lhH~?x>~_gDbkmAWM3#Hx*3>@TR1wN)_)IKp5ul0id)SQ4r^GW@o{MX)!yMOQTNjJteB6#D7c zn5oE=lvq#H5i&r=Mi9M79iBU3CMAZ=q09?xA^jKCL%yJs?dLn<2(vV(h+L<>T=EZ_ z??_gCibytF$pG}x(03C?zp=gJ)=URX8IcQh0+0@ygIYu5+`O$^e$tH$MeFct*wMex*m|f5XTfR znS}*BP!|}hljba5O(0}nO<;z5)qbY&f6-6FA_;Lv&l3ynecYZvOe=jDH!zu zm@=&7KA&GL&x&|4%=JtcWIMtDkG_f;fRSa?hPL+CmWY_z&+;gfdy1$!FzE)kO%@1Bg#a4yP zbm0=#6X(w}4-v<;6RrR+dWG(!JlC17Dp7373Z?!8WayA1+`UEe9XYn%!)iBqEe7xB z`E8x!>{lVGA)}E}{8ECTfB?$-|JH#fJ*(VKpY{XVQIV-E+EMP~FMH)k>MmWV#>(uoX< z1NAA)Vs;8l%d(bA`4D2V0gQ^LfX zKYzG(kwJdc+#t`i`@Q6RXNvP2J+r?>fBw*mUYeun8{TTeIgaKIhuTuQLfS75n{HUE z-8$fX;Ea^$iQ~@2#lStiwC73U8XPUD|ChVZuSST{Zx_#>E=$7M-ccnkoP|dH=zHjo zmR~>y)gP}~?<|SW22fGwUiGIsFjWz&J*RR#an4hC>l-J?c`E&ueFqt}Z2lweaaMSM zDX^EFLWlWHzcX+3-Py*M_K)Sum_bu}EoHH$j@iJy7+x5E3rnma7N#) zGplECz`z>>oQv0#><$2LVs4bPrcE>nthl;-2#w(I*Yml!ve!=uPMjY+KbN!{a z!HqWaNzS3yIi=bfGcq$R$7{4A^HDC_SZl5N>CJ7ViGaD7SObUG4{RD;Wr48iP&CSs40EYPpjE@?o0cq3(?U9P&gh`~Trlldf7 zTMRsR`tHid=ZJc;yiXGLu2X5LsHl*pas5L5nHQ;`f+Lv6SE2hda5rvvzVEzGMZoog zM!kE^{7Lpzvb}eqJL=0H7s5Xi4nUy=6$XaaP*>hu6Fl@AAkXXTqmic&KGIEk0~>L&`)%RD|AWTgPX>(L-uxNg{KV}Qs&p(YZAtAH>_(8G7X9 z3C8#_sAYKH7I*Y<@ew_ENP=rnT$qYPB-IH#Bx-Bg0teLyA|V?w;9xG@M*JN({Q62{ z6@*Ff`Q{;97WN|DLjN+UY^A2}>^Z7=q!s;KBLe+T*okZFrcNm(CW zbaYYI*H3mUHu`$4cMkZ;O~B+)kddLk-!UV^5#oNV2E#SJQ`pysJO1iNkUH9K0+5G} z1hN7|5peqLDZ;{RY?c6OkYpSwEqotLh;D|f)sviuJMTa)qHHLunTm7m%b4ynMriIW`(30k|!P zD-4@EVq;@Lv;I7d&J~`C39rM|T-EHRFk0HHSJU!V#;TpS7rGf0lSgY@OQht=EQU4# zrEAuoA9wr?glS6@+aSPOW+|-MTp20>1ap6Et3ndrP##X&cq1At!}6Ksy()Io2#{^1 zjvu!et+4IS*HRStAu->Pgkt36R4s&$)_GPl$f$~5Pw{KH&S}DGP&c~LT~1M}?69}- znV5x#%i0Cq#^N{y6f#eeqF6MRn8O4yu|psp9RNLfo>XTsRQ&yOpiTlat8RtTJS?k{ zN*8ET3NlTQnHJjzEr3KGUdmWHT?V`(2r{1#5wbX%Lvl(S_(U_Pqj6ZMNlCB;IHm@E zWFlEr#HkK<*Vm1f<>(K0mYw^c8vX$R%qqGiChd8Jbq5VZ9GcS%QDSAVKDlX9$VFw^ z$lqXz(`ShV!ee7AtS5wwV_GT5pI@QkYSC3|bCs5q6x{9-zjWymYy*H?^q_1==*oDl zyOR@Uxv;(f2F93%)8vV{d9I42d%l)=d6d}FnpEh`T%S!W_CBQb+WT(pwRHok?)`*t zA4nqhfH+FS9|)85i$BQ~HYng)bGu-_S9SIiu_f?!NJ>g}80e7Z0WRH()4B?9&GDp2 z3+2URNMkk7$?Q|mF9Mb0z}_?Waj^NI6Zd=gtPsvV^ojK&qWNMg2)pQO6c}I zol1=)*ZN$9HkD3qIb>=R1hd?ftAj<;-Y4_kFm;r9eUG(#asR<2Uq;*jP>#5Q^$Cd> zMXR|W8svG#GFYMh6l~fH>>x9>2+~Rkgg6<`JwK5g4hKIW^ed77#1D*5VpGf6`6ecF zl+*Lec@U`s9805sRnTO+M^LIFv#NHIvn#x$v!C804jBIk123;S$Jd)mkz(MIZ6(+=d~(Vc$K*4oNtpu;ck$NT)t#XK8 z=H8C*FyV5OE_%R2EQh6D;Y%e`F{Iz2YL3qeuPXDroz6`}M6|OSetWpO!eLRR_NDSr z$~fy#=3`1@Ke^k&hbx@!Nw8MHN~(AU1_P6;W-kW`EKMvwuLdh{wzVzt9L$OyESO`x z@?U-(cmx-HpWLBjp2BIC0?nd^Suz#9p%T-uT5-3?bnX!p+;qa6!~0+{dl(I8Kxu}I z-^$$;laf3h372)BeQn&k2@;B{fk0sepU77pdb2QEW|`m^ZTA&Wxta;MfH{GOC^j>j z?6>TxM96E{^xHs_SbQE#77qU&1DGG!Rm;_NknV(f_RvXC7f1VjZEMr{Fqwy7))=+c zR*yGtU+Fhm-QSwm$X}dEK6Cbb4mNj(Px|5-#O2B3S}Y?T)T$&;(f@-*_O4c zmkc%9S>a0{!-=-`J{$q|&fnkP+#!R)_xdB7REnHjL@(iqii!%^k>hed+rx);KxxO9 ziD)I?g^QK<7Yu>iP@(rR9GQgd4yJmM%Oim0{GJ<|VvZ&+2oKh}mj{T{ZTCEm&$8r8 zw~YZAn~p>UoN4OLi}x9NI6JH=Uz^+!KG~aQuc7SOmMTgjR21GRm12TDO6#xhhf@R+ z(|Qf)ImZ#2JcFyYSNUQolu+(+>xlvZ<+cWaN2{Wi@vwgFL3K~^o5PhLcm*~mAHMh+ zsLh;K_67~ZwE9-g<*jt(50yH>-tK`c1t~HgsiETpQaE?x?-$tbn*M4p#>4(Vt(=cXaB zG(%HjQx=UW<50)4S@&jZK+Xu{sJ|QWtBK?J#uH+3ZTvXp4i>Xq@C6}8pkRO2NpLA!+2tbKocS8* zCT|2}e88{wgE<}MQTU}de_zn?4uB;ZZ%d~kXKIZ%LBjdR@NDIM9C+G?6hA+H_5Tq} z6}iu@uBxdNo}8SV5Tlc-=;b9NgUv&OJlr4xy4?Tj706jAR4AXrfd~OT38N#?6`fgJ zNz|)$Ha4MQ=HMv51%$q{b8|qEX7o8=XLZDRAu937P4nHfe?s~{R|-w} zMR(6nCd7NR;@OL9Qe*bIy1GCp0lB2ZSxoJ9(QS7vooR%>%(@rvE65vQ*!Y0vmobpf z;$I1wKl2#bX^JUz&z?Ptl9QB>z>4I6waL>!3OFvkD{kBP zuu--;4AqWH`g;ld`f9R9WU??mDJsfXr3Hu(Fzdpp2(liAUZYiIjZmxpno0~nX7s|s z6MPc5q!(bUbxAL7(5b9@sKwrkpKI4~SnvKUo8Hyc1waFwY4z>fw}9#9b*1RTg^@wD z!|aPk%m4&~?qwg8(&;W-p;G&i84gV|^FBVG1crldq zYlB7U3c_C7vDQM3*pzvSkl>v=ji@yc;JJ<0bOH~m$FFBV_> zRfPiDI}8x8V`Fc&eO~TodK{j^*XL)N9NfUoBtC+gl5&3db8xN8bJ1s_w#pH3wu+tn zjEoGB4hCdsGm6y?fb_-1>;C5oE<~_sAUirb^sAkamF)nU0oPU?uQOg^T38U9;++-G zcN3RB>7tD9hEENPz3QFDJIoT25JdL~{VE;s?++79H#@&HGY%5(%ebx#7A<`RpxrSH zFn?P%g#lNtzb(6CIr-6-rj9)ZSLwk9KUQrZf1+8apV6k6sbA|hHyYk1rfcnPdK z2ccV9b8_xcD(ucMfe^)9r_5rks@-7+Xyo(Mk(DP0mq3=GFZs=z7bE5CL2uq1mDAac zV5XbHoL7e>MQ4(UQfjdd<9CVZn3&G^oo5OPq2~J=-}6DLD&X=Bm!6k!To7t6RD|wA z1ZYS^Ow7T=#0x@Ca5r6%fmeJice%M47#jS01wCFzNj5b#Vb&)mzUHy`TlC3&Cch96 z5Ky31qMm+hzWs2jgMa-)lO*3zs5q*yUO$IS#WM8w2Q`k%>cE}r7CnbrStW}@aB3&~ zBtB-F-W=GxZboKQ6091qyE?Wkrb;Cp=S8Q-sD5*8UJRbh+gO7x&bbTsXl`m6b(s$1 zntSlz!4GNSDpC5Z^PDg`BR2jT_gxy)ecP!$Ax}4WONL)J?a^escG=g(Uk%Tnx#klV zFbf&Ua=@k^2sy2oDL6UF)ApQr9=?9v0oQkG4o!l&6=*DFXJvH&2~@+&OJGwv)}8(O z^{WY#*8a8rUXI_K8UKV>W4wt}+2_QyjwJp{Zqd5uhd4!2*rl5sj|@!n^}2vr=La%5 zkWhx$^D^`dk(ZL|S`EF;q8KiN4*+~V;a$)c!{H6eINpgVBt9yVs&)K*eJEdxGpWEI z4nqft&jJ|>OcAI;K6}aUmW@*_{!<)-52fSaCY877iR)smfWR6JIr1$yq}Tw^XnJ$KIS@AnC}+>$N^ zL0AS%+st^QD=i{|GI|^{+xeQDMOT5|(I74^4pYtVvG;+yy`p7fgXnQc0=auWABdsU zD-mMB6l@L)z=#2g7b@+2WOq>zDUK7w27y-hs%r~i$43FJXXD<2t4+SfXnIc-?z%O5fW~8&dlq)xC3N@7cSG}r@a$8 z>9twtN(0eA)u*7Kpws=7u#7bGl_AQQZF9hj07LE|bM^)#QqK(t1a|=+oXGF2E`yl6 zuT`y*d5N?jfq!@U&6_v%N3)hm?jT%z`}W%9hsMkjnYzE~T>i|vf>8r`e?VkqW_f9- zsbzG#-jWW10Rbbh7S5}qxo6(Gk-nFbUYg$ULB-3JuZ;ntx%^>*hX*iZG~P!)*!9?7 z2C#dOF)ihTWP|z1(Yd>RYD%C8!38V)?BAxCPq2zV`q7*MlgKz94KoGZCzm!cy2&-K z?=<93aZ>2<{dQh{_d5*(L952sQLXF{CjCQTTpYF+lt(Hlm$q=xix;n*$(Aa2?GdYd zmZYmNy&X3V|Y;TyQ8sK!)|0gBvrBGgQ9T?>%DsdIxgG-47#eX(r-wY1AyKk zEy`EIt&lWo&2|H5HTtx@e*CbFDodBqFnl~cJ*`(kos<_wkzMkzRgU8mpgK1A&ro{N z?~MueJOh4M1Yb)_3(6b2JE-RP?zoR=AuQ%V%^+J!dWUhJ@AR`r2%nPtEXbT`aquIu zr~(D(Qirq;Mp7=Yt0g>E+ks1aXTm^?V5vQ)i;WSM@qKw2RFtO?Q7k}j$Fr0ANqmJV z;|r=pYisVHdHJBO_YCr}DwHr+0R*9ke8Z3>C(90Gw|@>UpgGxw?^~(a9&mXXLZrFo zdAQSigV=NbOcxIh_;7~6Pm~VtMGG~58TDYj0db*D`5Mr#{1%YiB8U?yPEOyK{Pl!q*N>7li>*9bDKs61tleq*`Uo{P{h@% zSNkTorT+)%40ISI5)_o=Y>LNg0Hpxfe@p6(MOUgXML1q)1xi9mWRE;2IGbLk!W{BJi0OSL$Ow`_TFP6LUJ z6y0c?T%9f>J-sN0|F^a_%ib&`$8KaD?vee5??o%I^c_;YcWExWr4|=wLkH%{OBgcJ z?72vT`=Mlf=-$$v{U80Q27Kzst4iF&JgC#yuehfjSun{vsWY@Q8@-{TTC` zxZ){aK2YzOPb#&}We1GhVsADmAgY+&1eGhmBhLYsFxu^sRJwR5m>)TkarM8uNOvD3 zC64!a@7}p1ANSB0`0=$C27871jI1ocVSh69^{5XNdMy1j&GfIrn%ZIxO-awNN2(OzDje>GhX=nsolpH~wXP=;> zi1aG;hZy-;u!X4svjl!2@~sbNe*T#c?a719SopH(3v2T;&IzHbNo0?X8g-o zF3E{_Un|JVGW#P4s;8Ko2=Y4Kt6Q&uYt2-z*Hy#%n0sG02$8?LjjhuH;iQS_vuEG8 zI)zh`k}SDVP9caf`%Xa<`=&S70W9J%_T3XRDK=eL{*I&y~Rk zs;0B8QB|XcVS*;yQNP!n94z-Q0=+*qaN9t9&*!*gbS?r2;XC3X@Fz(jH+Yu(W1UBB zn}LMoS&}iwAPbA-5hzj~{3h@n6n0@{Qf_WlLsvYoVXdLey+PoD8M{%I(|^8Hnu#Gn=j) z5+<-2G^|EgY4g3IFHd9H_}%I-N;N#2Oy$&Z*Y^TV7m$jxBm&_QC?6xE#4`7Z1>d8U zm6d_oM@dY~Z&M<~34KxGCvjgLpi=jv=>~!uNasSHpfAcn3dnlKEq9bmAy%WBLuSBL zlj*|f*LNqp4fJ{@<=o|?gpapXp;o}MRZ?Gmt>%rE9YX_B2P&E%M-TlDq;DFhj&NF! z4PO|At8arj~{vT#8!5>M*IYbLmt{${eFH zaDlA1>Z)n7k#|8#>2<3IuDw3W2J)y@&pJ_ssq$yf1N#3y$SFS7hn65gx}~t4n<*o= z(4q5v644m=7_R}5s0Gnq3l>wL;NjtT{O1Z9%k zUCyQOOE*^|^LHLLE2m3oiDtw@Uo52&9>j7RK4-7_s+>;Up0$${8JRxvDma*vDPySc zqq6d}tfxXED-)aYTCP0ED-c#0TrLNK(l>oqaK^zr*!;_-{@!+2^!8t=f(~6B*Y^l> zImewS>hZ)J%`QaYg8-_cKhgxli%K5`2GT%OoF@i^Wvx`DW~<7gTZKsZt%uT?(O5Yh zs+WulIr2)=SZk5e||KSdOa`JlX)bWhM5bD z@(X5oc>0_JNR*^a6~DMURZ85%1tB6NRO{+K*jdhdimWmWUD9cD(lN#vkgtIhs6z@e zP`jV%A&7MPINK}xcR~IRC@4`asKq=J9(e9`Vc9=Q_6}^mASe@}pn8jiMJf`i=Xd-sFZuIN)YXdn22o+Bg!lXt9~er7fP6y&^nNz0 zit-q+pYX90u$E3S<@LO*dY06V_hKIe%+&%t+@~la3XfT(?D%$&&0AgoH3gt{o&Q`a z3{-ZQwOcKdU3N!L@$zNZ#`G)f$al{JlWMv8;2gBS6b|k08SJMY3ytp!X1#UmmSa5# zC3=tRFvD;Ylk&NM5|t2RT=h^k<#@Lk#@JSxFvQe+Gunja2V9_N+BC(}3A6_aVIb9s z9RHeg$G}0RD2p?R=KfO=Il0#lMbCN29e^q@L~LgqsXTQ^5I6DP7 z_w3b($(5`C$pRQCstq%JOJ3lJUsJkme7XSDDl!2?cz_5Z%S!r8Ce?!|T9?T)Y#)(Q z*z&U4hbpyw%8l>(Kt0G5bP$(5X)+$zF1VGDZFB&a5J+LT38%wrWo2ac8i5K2=n+?k zN`Nq~FGz}R9JnF5ANl9osW|$xm|~M685O~i1-d|~2gIQ$gUEF2SclKF6MN~rh&vys zJ9@H)%u#?!8O)MTfbn5Msrk1%TEU`XvGiE0oX;nPv?vRrgJjquky?Gjg`*pw(tZgr zbkJs)d|{JaU?}OUTPlMWQ~ZB9Z+;TeX)Cq zf$a&7g$)X&#Y!zknKL4-kz}!CisaQi~tjl|b7ko*CV-tyG>aY3;zTE@d9y zvwYW4hG;;OKKCHi_T7QkVrI>kr4)pAcQYew~n^Pc22r^Cp$r`1hlFTSxvV zr$tak(^IqFoi06L*T%L~7GS?*Cr6KRy3CYu_7RRE43Q*I7|_qlbeZo?*Z!1rNkfY3 z-1Xyfc*sl0^0=9hVMNqp@HjQ(^XBC8|GR zporzU*mymiPfMMqOV1%f@%^=C-3-WRd!Va0=jS$S3?#ONm2*_^B?riSm6Oa?HDpI8 z_~@QLdepM^vTDrZCr=!ZkPb1rmUDZ@6f?qjqeA$fZYlNMl)ZExQq3)V{z-d52+jQY zG7)IKQ*1o-U}sXbIGxPT+J)5p;3{`dbLuk$HH;tEdDe&Q0Yclz5rMw);S(j%N?-*4 z-4N`!Y~hRWS_QS$82kA(l8e`TyXYzv|3?r;P=hUQmGof8E%N_>Ezxd^oD;$sTWw&= zQz?H`a{ae60PExvwnUp%G5!!2(!gfQ2qN^q%=h53lotttL5^{<8pY^0F_;Bor4vwAPJ-^AUnmP(XR)qc!6j*m&imD(Fn|+c~~p zJ45mgAFxrtxf8eQ>{cdN0n}n-l+o!J{4;wkdg~*4CDj!@dJpHrk7LC+$$}cVWXmFS zW?(vB%qiQa|9Zc_0xTCr5J$LUgj{FW>kh3z;UB2h6&m8_!&OBzSqOT5axgQBUI$qc zz;PxEc@E33_OjyWm52=(w)6A$^98tPGe@LCx^ONcI7z_u;Z|1~DPw2~zjJ|XZx$#n zgiq9oo0<7Y6o8lm)|Dy>B9Odt5Vx{$aD=^jm0L%JJNn*uBa(V9;`-tuNL)kDo9m>x=oNW+d7S_=421ew;(|a~13cN3liIfYqHP1=Y{G zLghWA5Qa@4z^#4%5oj=PGczNeQ%8y|MbiRSY)M)I(E+44`E)%niT1mp4xyy0EAaHAPs-5@BUoIj`(0_`hz)R>$A^yfvFkQVpfshh`z*dV5t=Q5F(9YX$U{>OChuMrr>x_uaD7oeIb2&q3UR625gyZ{o~GcWR@- z(G+F;gJU4uH`gw8lm@ERxjW5b@v0OY`uToR>da6J5EV6GLmn+;ZYp`{j)4_GNN&4h z&Tt7>(Pk2`7~>u22q(zua?_qf(6$qc?P zsYJX?f!Fat2o-Nfi;UUZ)Mi~x&EM}6S3jrF>LpX@73Zo^{!Fu@E*&gc=!1SF4YvoU zvM|HxGEmLK*h6Akvl?O_7;dM`J)tJ20mnpv3Ws#Cr!Xb|br22K0ufY-OcF$xys0|B z3{BigRd#p#7V@47y7R%Y|A%37>1de=g$A^F0HdQd14L>F=O8mjDt{j;qTEZ19S%^^ z94K06=`Ts;09IgXtV|qh^p6n%L^f2I$_Nhi0%fX0pZx4 z%^-PM(gCQ?Yq?+!^wFr!6>c6gZ|Yaty)fbUvsU%aCFblIjW%kZyk6Jd{gvAIJ2GVT z{doOq?!pJ>6rrjja0CsIH9p|s(Kt&_O!G~%%po^gr~Bp%W|d#F+>%gxXs+72z6dJ| z3$3)C7$Fgyz%fGw3Dv$o>$2BPdFk3=(Qi~_Gvlag}Gz`@MPYVGaB7cf_HY}D!d6RbS2h@UJ$xq#OxyT= zQTSY4X~t6zj~a$yz#mLh^@3BZq&Y%MxvL#lvo^7{C7F@0DS;Bma25TU>!7~@a&Q9V zfaGR~)G+2FP4!@@3*VSyV z4DOK%oJW3o>&B7MKS1+f^EIdrw^v&h8pxx}-BZz;0sJ8H_{#kGra4f%1kSD4pKXh= z9;<=|KZid}H-WLFmOlIf0<=;UaL$y+7LZ;J|K^2W;*w*=CVA(lCRLF#_CQRTnUP^; zRk850(zKHX+T|6<^Z0zg@6Nuza(RCY_++t2EW_;>MW^Ohn79V^(!xu_-47m6QB|6* zCl~khoH={JX|V?)Q1iA1s`^23egg<%g_YvW~u%3OV0pJ3^CA|z6oXH=hvBLmkzz@ zI$~L_wcdpm8*wwzo|h(9lb@CayhSTGSfUv*8R!0;8ECIv&lsE>&$(VM0B7NyVO45h z{?PvX)ftax%{5U+a0P0oyDbT@!4GMl)p%(Ma)BoQ z)c-9JIsReq;Z*jA z4@$dval>~7j}{fZVPh!Fle)bJr|kLL2JSq%M`lB`A^yLuJjkFR!o^Vlg%yDXiK}do zR0s#_4NDW75f(VB5h&@zd||vjxO>4y>lCV#vSj_C=VFhSoVSM=1{MC^{VQb#}c4CTooVkamlo!Y8R0zl1x0Qx-ST}7< zvQCP0-R>~L$tmGDD?q_YL#xo`ypUmw=`R6-=2x#Kyh@(HF_KbJu*y{+s?po@3_?lF zJN!bk{%wmuIuSJ8`VdCTpGBrJ3lxTBuraDvoZlCqBSDLTaZT1ItTb$P5(VcR zhKspPUI=~shw2Br@JTH2wQq9{S`C@5^Kfyw48V7W0>uDr4h}%U7Y;&;4aUU<7Aq$4 zB_-(S>RJenO>NB+5fZhV=<1GcT)p-09IS8EwV=?@e~`jJN%N3{$9gfj5r9^xUp`h8I03s4&`RM81fgFKo|iGCN#LKax#1Z%x69deWW2JQS^(Rj(f$jo08HNKAt3FZ>q9?QwKwzWBIxsm_a_5 zUe{h(!6gr%7pQ#?kfQ1MCg3*H5_I)e0ykX!GC|bPML%m`oM<4li*i6xC z+@DZ%55VP%4b^kexPk}|Ic(D-PSe{rfat$moR4H6k2Y5jNW-rjzIX@(`Cm z=|~%UJK#P9Ux8#c`OG}Oz}rYh1#`tOjbDea1mb&fGBKIAzGDJUY|9pmtul9WlX~X6 zEIQK)%_}J>0jLa~0}>wf@PRehAfWK`Smx#pf}(KBPzl~(krB|E_OV(B#yTz!m={?r zUs`O80BV6glfclV{3KE6@1XMjD+47P!s_1pgeO1=1!9T@a9+(ATBseDRoU|wnaqzz zULOL?+C+_OFVHLiC0Lm(jP&|-P*nN@$WaAf-6i~d?b^{)L`8Ze&^WCCnlRI-b*(}! z(-I@Mmg~(d_yw0Z-*@sVNU*AO=G(`xXBubflv{MKtbnLVN3lpUIQqqT;$rR>B~Ohb zsyuOvrarsuZF~oS3-~^8KqL_heq7i%k?Zzg5zysHO7o0YI~%yUx!Ktjg8bC!#EpS-2|CANDH5vOnLo zSs{tJ;3=sHarcmpEpPCXk>dZu-CKr5wYFiy1{R1(C=Dj9w6r49J=9QwU2Z@@ z1f-OdhM{BV5=9ZDduSzO071HWui5*F9rJwO&-XaqKW^QJIBV8g_kCa2d7WW0*Jm}& z1!tr-2GzROuVfA<>$&A`GB``tJKq8UQR>alN;|PsdH$(7!>T8?PYQOzoiBjX82}teKzV%r!&kXO^ARa_V z0tgH$PQ7B&+y_!^u^Wx?Pe%=dgB*%z15gHDHOkDXSen~L&d_|Gp%X4QYD5nY3kRSG z@-KD!MhO8{R=z7%#6Qzz1Nca7oZ1b462L zQL(h$s-J}3G4Jn;Cm5-#=X|9jbh3Eioe(!}Xg%BKG^pLJ*%(zL{=K}7b22R5smtZ= zCdg>oM=EYOnFkXx`#DSx)8N&x6BFy!0E=lg!Ncq4@k6wiq+)8kz{afC6quX2@kpmn zEZOfc1q-o}QPjMFUHhQq5TeN~|D1G$0Ni)z$UYv)3Q@}axK> z$^ygAI9#p&YK|o#BODpXTfHuG`Lm=gGyTjiMY-cu-zC=74D4F zYAqW*lVxJh69FolX`NoMc1ciAC-fQTq=79>E3H)g+U*WV^x4& zf!@#SU^bKoggIeZ zxY*-+hJSvvcn1s+kB?lL@h9{~oLi~}fOn>~f{DP79y)Rt^of@pLgJ_iwtyfK227Rz z|G6)`86pg$v?ZI@LWFSbB`X+$j(Lh=evW_?GF>S#;q`0Pxu`fRpmN&4RwEb1_GF!i z;2A{Z1Mh&h*yMj1+>f*;L3XODrUomEdc0_u5V1#GfvCY!^=+jmGhS%AhBs|tWMsq> zv0L2Ub^r5;gF>1EUWl2q9Th)^@Ko!c!YD!~B7-tNJU_?pKX!7pv{NgQDQrd6)w-N9 z!$467sTCYk&_)dxn91B4j%*4*zjm z!=>4fwwIvwmM>^15EHyI7PzvEvu%;qRZ)^K>>gD)u+l&gs?0V99F1HLEa0~%>hO)Bv9uHNQGmt!t&CKe6iGnf)+!7qzU<|wm zrh(C%2G%y~U~{8RjGwl3`yQ9X(^B9FLz#i52Ccn!ZC-(}h}Gb*M(g^R*^E(pYtL+Q z@v|)Z zk-5mb;{HvcBKI2T4qqsezNCGXXy0b5G~k1RojvzrO-26Bk1HY@af@3Ci!P&6s6cqI>b`6^vuIz*~wArmbm#@Bp#-_m_cr80cyr~T$YU&w2O>iSWNF;SkD_EvRLj+uhi5_*X5K853-*?g90XXq$?Uz!B}8;{I*vA8EznM0k~oEM@2+cMRhJxEv|ftg$$hTOJPF1S3(aqlh7Vdx z#1)Nn^QoOMG`4s<&MA*4_>c(iG8mD$w%kx3eZu00V8?p_>d7-Yiik+5iYX-sYY$`Q zKx+-iH`F3Fp`enfu>yilDJ_+Gh($^YOfp>OCNF4cKK^sHk#=OduGVdK{PMr#|KvB7 zi;wIivxbms)wnuZs7a5=!OB?WhJ_Bgno8c*)V>tPR&FL(W!ZZmDK2}3dIYqtKAn|n z>9u!`k8|R;^WQ@O-Ki6=|Ls~%@xY&3<9{DGODNd6^7l7&si4Mz#T1FA=vvju)?h#8 z*GvjthezjRVKKzRG#gA>F;eY8JxaMjxs}r0;^7z=lG*U~dF}LG;BV%@sFq^XMB+p} zkR<(?G({i(Kb{jWP~v7(_{YDY+UR&qZ#+}!n4UBzr%~nF1T*j`Mn9fMpc@>3aec;c z0U+Q&-Gus~XPaAawMR_}v`XgR0RkKb+FV~|r!OBd4YYC@yciIq_iTQ-k%%~I5`lZ4 zBb{uL0q~|@e_|k zefY7!9ZU+tYXhN#-S@ZoU>qd*E=KcydIo)W_vyX8-4xd~v`$$bD--!pIu>ynES^x* zu;v2Yx=M?r_vAWH6}c)KXVC?caLQKSZ}j9@1$UJxvP{0ZvF6!&P-iGF91CPs=n7Ul zubrTj7}&J@*Til5O4y0$us}Z0&l{O+)(@LWk1Ha=H2& zzS3f1;MuT!mYS~`)Jn+HH=Nw&UoTQx7MeayvezSYX+-V{_@7%z;v*rYwcjEK*SgLt zo=Tttc|7}?J(hoFW@ZN~)wV~lrc+)!B1(3jSktoFX|eOs>YJ8W9*g8M!2Y#M%nk3| zyVqM~FG=`AW$Vk}Lpwp`X#26LB^jP4%t95N)?<3J!5snUX5ZE5h57MD+<)3}{^&=< z)rmKa&M_Obw)GM7#bnU}hE#S+>J=Mua|BL&^y>tInj?mBp(CPH_hZfq+`4tkbt&}j zv*e>kj;t0mDNyrs7?iF81F?4Hmwm=RKM$?}5$vsWBFw+s#CdZ{Q}^=u^I2seh#lk9 zC!?gsmon(BL*>K-F7?onS_biRTPqnk0g~&u5uLNIdcI#FzYBN*%PV;Wg*88$LmPw( zUUJy3^~KMeI(3=XRUVWLCFP~p%?}FP-(I(znVHfvfB!!y;Sn<449iTVa4-QLVugPq ze3t1)k+r5U{*yXGUz>T}hTtnF{w@NJBT4Vxd;_n|rSmX8U0GRa|MF#PHiNftsEo4n zl^(%6!+_^A85b&A4PoZ6x!HH(A|tgQ3!VALL;L+fipw0q$f3qZK65S%+DqvUU*nba zpajW6EWee!tZdg?IA+*C=tNeMC=%NFp~X2kg;-(W5-9&N&L{zNyZxti*A5P#&07CC zc#*nl59$Hb2*4_GZV$G0cB0+WGfJO>_pqKb?#CqpewN`0!T@_xJ-ZKiY^d?9Bx~>%7Yi z#{xiU#av$cVd*O0KY6%rSkTbScYJai!+RP&)1=6vrWbK7*%&=}^^Kex*jsJvs29<^*BQ(zz~s{sUJowcqx7fDu|a& z>^gkeH@Ub#j`GNL`*yC^NdCd0phb|A>+sq1{bs$;dHN>|_F4Pb;tJGZc~uF`Kn^oTvG z2;P0wO9p}AO-#1=Z`)LvtzlQBc!WuTHh;9h zCiYWdVIlaDcTkCdcws(yi<~N^%tD~rBR2P+uA&THBxqWd+AR| zW$^Udz*L`ERg3txhz`pin3iqR#s-I88({+G#Xlr!qt&r zN@EUM*E=<4h={s6uZi_uyO2mp7B#g)n%_k5bnn1pQ}*y+vxSw@eDSkkpan;^>OE;B zpLP&&QBx0wf=V?}mXQ)X{GAq%A}@p;8=1CX{~&{3-Krs0UiwkE^%s+^cP<^kTl zqc;_G_wL=ZV!O8RqY*dISQYZ;=vVVyX+g5gW6+lHQiHUDMxr5WiDxlLx!aU%r&m6Q4d! z!{uPUGTqoVf#J_n9J=o4_=L6|$elk%)$22v+p*@rtUXc-Lvb~=+*}4#*4H2i=qA3{ zI}Q#{$}cltbT!kEPiA3%y+1+UbAQ&fHD(q(cEPDpm;5jM{B0n+^u91dWqK6z~|4E8Xr} zG-XGW;BKdAi2b>n`w!L|aTmV_w2Q^gY>Tj3axL5#zSX%_j8>CteVxE< z_DZ!0_%zta9o2+P(-_r->cS7<7s*MG3+j(D10_YsxWGdIJ6m9AlP^I^#Zy3a_4;*` zj$}|Dot61MX01>7t4h`saL*nu@KDF}EfjqII#!lw4NYb(HFUM;5;Zp(4zu-h_XQs= z<#t^}Qa83Gx6c??V4Y z;Pt1bUX9d3I>w-L*k}|eo}MB&zb3n}w4#ex^vE(&sEeJ!`I{#2G&43(wD zc-H}&FH)*mu3ZF{#GtCQecf5%rF!Va%M3sG!}bX*2He}1$Ysd@GRw*%U0@gksbj1c z5%n3VHbB%2+|s?%S)w92iTC5mQ4T5XX~9~sYov%Y)9~Iq7J-M9`t#z6=~`3R@tn@h zdw9cRD%L){Z<8!TuWDI}w`%;GPr#!~q!`z@(Js95lJ0U=Coa=S_C|X-n>+bBp;DNP zV(Dj~^hijbtbQ3dT@*r}MJwXk;~DOn9d8;EqfNp(G=x~RisVsz%hk9B&80b{KTg7) z+~3^k8k%fs%v!2;>>D76Fupd#@K{!B*|ho(-Z5`FCD*-?R}?j!3tBHSW@Qhjow3-jC4Y@N zEqiax9t2r|*B3O6sSndr;z0uUUHi%P^WyLB9~MEUmGiBytp-|t*#AbfI>S*DD1`Li zLw@SWG1l_8CrSJ^Y0o+QM#Smxeno+0gjH2}e&YUXqe$h5T(dU3Rmm`iGEVu+!j;mq z4*^xV?uV$(M3%12_2D#28>GU;VxkP$bwFz*1_Z0Hv?A$v$n-qvcH?ms)4O^_Oh8@< z+wg;fS_HGv#NM@^cOWDZPiB1h%a2n3mWX+Bjjwe4C2;$(t>il2e~_Cq#B3)G{#;`` zi*A!mVZS@sJ@&A2xRVyzTqMItTkk@kpN^2NuVQ6VpDA}*P(qRnA-@ZV?!OCMe3h0a zlO!Yk9QPggzn1qAoE0W5(@=%RkRxOp5!0U#rz79C#bv@IJ3$27=QXDBBj={}P>hm@ zMc38H_ff?w4Hz)KBE(X3inF$?GXIpUyAw`h1fO zpA?_cbg4a!xF03e9^O8c7_{sq*tmyHVk+7#U4@g$ni6`1t_AmNXT0k&qopUSebOEA zl1U^i${hqlrj$NlRtw@W;nfGBKzsYgFgW;rZ^NP&5zg;}zbGABm5kOvtj#Yd1J}x8S zI`oeN?T;e`qPL3fGf@%RC;+*0Id+n%g|%smP$3520kjRqFYfaYI;)_$S_Ue8lxFA( zp|uHgv-=<+zVO2QKWG16FJX$D;W=AA!_5C+@Vo%P;M#@G|DaZXy@X~~<*I60ThYIu zBFZ&HyL*t2O!|cXkFVp5_i2@KKYjYhpHF*Xsdu1mv!?yYk-{iekJ@Au%Y&ZUufY9SvcFKX(zDeT@&|{lx|ol_XzU!~Tmig%cH8 z`b#3N4w4QsghP!Z6ycq4(AURe|9?7J7$I!Gk8~Y?7z=K_cBW_!y_mOe@p3h|BrvWq zH8HWsD^mmqg*;wgKn#H6Zw}-S0NH1O=Ihk}9>axj4>hx{2{*%0WHnG=h)prK193H8V-BMp;MXcp z$B*V4)PZEQzj_34o|l)oK~(=}lqDamv+?Ip4o~;|%wH8Wf4t1znj}50V7DoPI}UOS zVU+L`jeKiN0)|Ez4*h4ozN^(ua76n1`9(TEMSi;u?CHpt%@j7}6aIj0&ng&0K_(== zM$ti!2&}rhcRsURVxCilVR1M6qiG)hlGuS=jR^bsB`u$Co(tw0m+PE_*VP_91h#GR zA@VSed+Ld%+ZSW3A9yX#pSvx6K#R~dhWiuIn;H0;1|CGROO#DGyNqNYU;8qpHH7W zSc69d4^}%qTKh^vIJXU!5Xw|=@n%aWpK}&5+@Q2xI zLR`%yLHrnMr>R-sA)Kx|VA`OACF}aK_VuVNm<+i?sDL1knqdm&bA%Go29p$7zSbR= z$EM0%rLK^bA%oj`4cry4gPk!vc^Z6gM?HxNU=W9n=(F20vM=L?9xk;V)5zD?n_y1O z1A!;?UQbFyN=i!nX>rCBin2l$n&P&)2m~K$b>*t{N>>A~tA$7$*c^j~LV{%6e3p7H z;sEpQfY44>t?q4h-*xWB%yxO?J%DAhjVu{kd&#eZ(-ux81Rux9n~)|K4ZXm#-y{8` z-hT&$t!YiKEHC!e%a=V$txcZug`f33*3@&6kEjL`uUo`75PD4BWhBbyXBQG5p875Z zrTVqB(CLO|nnYlZ`;kzhH1LAm3l}axBwDHj^hc!+(#b7AyiyHuLahybc0INds^uU!r$*gEkYqJFVFTf5Wv+&S|hI)M_k*=^{?^S*ZAyT z`jU86ZzehI1_{}QrIZN`fKE%3VcalR!(oKSj$j z2MINM68IK}Z73fWQrR33T_#M-)n^cwOr>;}9u@ishTHgXy|aF+qdE^K|M_eV9U%wx zr8eVB2h|d9Qyl}q$0X?=aS~YM-khL29f}sU`bqxhO5r~U!5z)Mp}gVrzz(w# z_)2(QIQRfu6pkkRFGs^^>H&tS`h=G4Ioojfr(?$}2q`FG>~vLmXvk(ACL2{x7xGf8 zP7)L2hVl&-u=@E38BqrUeE<}yL4b=FnwXg8=~S&lFcHV7l|`(Haq7<2*4oeRjFKB& zbOdib(L5ecPhh@zi`f{C^tt0i12#FkLoZI90`oLTNS#`2t`Yh8`7=Jtu;$S?j?kSF zGb0q&G1K89Y$(MqCkh{4G(!1bYf1aAU3VM6!bfq3V&}b#EAT)XN zo*^mF3ZC(LFhykuBn0cq*>n(Vvg=ou>(W-ZFM~w^{_rz8kUToY$g~p92(U< z2q*-d*=0I9&|}WP*kG`#OC9#5$f&5_6{nB8CX)kv($mtdf(Fx%!>aF)h8$c}-=xn` zCow|(dI)TltItD{P{&pL*_OfWp{1!w0J=^v9N=xjZ;j6(6v+e5<%rWUhEP;04vs-- z0|i_I|F`O209|6DUU3*I0@b0#YkwsXKuA4^1;MH*D=z`kz?{M(#cwmZ3n?++R51@= zJ+~VSv%qJDtvELkjQFkKoL{V36tz4{U>`qpgbwzKSw{N%(@xG@$8A|k&`Dijb zaP~y=pFVvWariq_hybEnKLP(lY_a>6UG8t(P*p@5cvirsx-KgBc(;+#;HDqK*9Fjd ze$;?8COD7^5^`f9COz{b7Yt{ZjisfVB+iPYf~xqZJNzxp9IYpVCec8}QB@DU?pF=z zY5&9Qb<5x6I>s1)w81nRf3J-2)M3n`Ewnp_zNS~Z`=^F5eBOl7&`)tb^O?|+56XC z;%;zoC^9~RqYv>SD;@^5q$DKWpP*ma2B&5kNP6YbFEI~u!n;QRJk#Otk0+tv#k*>% zGUu4j>Hl;2c}oGi(zWNa%Iq_3aF+nyKJSJLh^O-mzUm8AcpZ4gXHq>78R*2_RV&5h z4tGFjTUbapn|V=PAj?@LBo;JIzgAYd6TW^`+s=B4ui#_o5+WnXX=z8mK!|U~Krl7> z1+zNVBG1SPEQNL;`x&M(d@k98Uz7-~BYekAv2z|)UCO|tQ5SI~L>!!kYeWzFZIWTi zr|}&!0#G`RL9W_Dt^OLZPX^SC9{KqnYH0TMX%o! zs{u0+_1#PeZ=S2I{of!ryq{|Bm zYei`Rn6Jl&$EP^UX=}lo7whNW_iJVDaxW_otQP21w@wrQ97-(1$a{hNEs{EhRqtDU z9KWdNZn>)T)C(AX&|`iC&nl>-NR%NDnN2petct1M-3L9lMr4|`lu|feB+Npu&TEmt zFl$R;Lug|ZmVK8l6`US;B0Mai)~NAp5QQgYmB{+I4>*;|1e|GH95`?0gwpvAJC-7O z2)cnYV?Z%mF1n7Q>$Lp)M81+&>+5e8OhXcu1VHTq@x8XQU1_EvYuzr7iu;1mRPE5- zDRwsVA(@8+_kb?x0<~q%o$(k(Iw~r3N@cPqYS4Obcl_B|=<2>QyJ}6Q%7iC4e>-Iv zGT8n3JEH&-7Z&|{1%bcu`C}ex)&G6`o zy%d(AL#xA|h~*i1kE_4jhh%F{KzGfmZ%Z5tVPs^idb$X?FUq$qCa7d)f{*1Hefs$E zBYar^pcLBOdyEEILQ8%kF4jYrvjhTmh3%IAy0I2l}UgPz6udEpky~<&zpG_60y8-A$ zP6xJ8TRyg_{^f6~17@IZys)y?W)i)u73pD+7)TEsx+>2-*X~$ZHYj%W75e&g8b7mm z_C%~et`i(Z7E0pc^{|oyAg!Egg^>S6y*&}?jdorF_q*z9RkkxJOy!M16sV=>U@CGr z;v1_vUd}^~Iod%{rY|%}B1l-}gVwJPb|zuCV$H?T^XJ1joPKtEO|TK9K_9mPt{mc1 zCQ>pWwWvwQzDo-WxCD|!Cs8_V1|W{vdr03;`kWRvZ(qMoc2K>W>Tv_QDdnsqy*nE^ zX)7)G1nmKK3!`ifKq@}(DP{v9D9e(h8QuI`;?*QM&RA!4u0GQr7^0|!i3!Ow?Wv(46*b4^Xe%p=sAWalUQKHHM97R4e|efj?i|m*HaADfegE`+ zde+pHI5mMQi%aHNTCW9yWUvr;B=J>mL=^6MdafF4Xr#Ne5L}Z)=Wagp{;bfdfpNK< z;2stc;ed-M&MAQL1DKCfC@VU>^6Vqcv@AGs^r&S|PL|o-Gqilr2>P=lf9uV!6Pj3> zc)ouA?iwyj;bDDy$n~Cktmz_Rgf=VaZ%!Yfqr4x5qs!rq_-^z_PPouzg#uinU;v@n zQVj@`TWcqs=;KY$kz}%|W_RBN^v@`@G#aQ@ka`&t3r&c;j>{!Mn!mG zsceh5>Y|xsHT5$T_k<6yY_;yoJLL)D@-9&+8zt4$YCroD^kh|Ed#{(M^T)~jntH8)X$VvBg)pBSU3mHX};!)?Z-CC`P<7(QS|D*t+fL}B4P0AMoD z(c-w|Sts+H%Y$e?D6~S?`BB50T?rzzR7?MD2J5AWHZAKy+&hM^|Er5b_=xkKW&&45 zTDtwQ3=zST0GBC}MSJ?e{4$m+FN)s=2->Z_1t zOi4MRw)VZgN#{>T{C|&qSypAz4AUyb0x)Y>xP4ppDoJAU%)L?@`XpqNU3%!0aLrQv z@6KY*&tZxpJo)V5r9a*K-*3>S!?~=pGLa#!#>Nd{zvbk7jfG0eM9yyPJrJ zDB?hEk`p(@p9p8h@M%KTFT7sWVCdQvB+BOh7kkowU$S=@-Xm%5#^-W^>vxF>dL_>7 zf=eua`N)3{EJpE4jNC+;?}S+W;N1irbj^M;7 z-s$e{?i~V~iaI>piLpF!;>3^*KJFEog>489)qpkv*YoAw3a9-ys28T&6Z3O`FE(8H z5jt70LrwR|N(A5wt+SRj;IOqkMMBAOnuNp>OcAJ9<@BK3hi)Sy2t+JAmVG(UHfh|L zP6n9{P~ckuDR&I4tE+R20Msdy>hMm!FS>$B^KZPvC6o_VU&QseL25k|R$E|zGYi4P zi98msecv}&$rHOkhYPNum?f2+1YiNTy}2#%K5?)D0+gowq1Lp7+Hf6QhHIA^NPzr0 z{R7rB<_Y*gkW%x>Q&$H1HUV~bnns|5r=uOZ5{nGgMu;W{PY-h;XjmXj_?FSf=Xj~K ze$6%{_LeAiN`j4YhWy*EOqDAO4&^_f!Hw_-j2Z*cAbre)s-ct4G2Ya;HR0fQ+rb$y zA;OFQh%ZD+LJ4qZ_#H$fW4WgS&XCUAh;9G4k`@Y(JHCIzHwNptUO5{mX|nm6Zy)T= zh_U)195$OigzWBb)Fs|B^j)8eShzj35HB#ML7a833Gh8lLnC-;)D0|Dn=JDx9x%^< zP$&Pl+js74!Eg8})o-DLoNzs}gJy|);Qj+iH<#2q6%$4EHs*Hk(GY6;ZBqcKxBi&_ zQ7)FPt4C6F`$vj~UaAbEqoJm%^f7nXYf-|0ivocg>UtIqAQ zc3bmpYT!jkxGOXNmHbnrvdY;FclUfMZ;PcW3EEbC69C z)c|pLn%$L-YNn7Nt4fN`W&)nzdY2q!z9Kh)9mofL9^iCT+=wlCxFjTSBOSxiz?R=A zEs#aH$DidQp6cP}9TN?1K#NdQn$G|oaaGMGH$u$@%wN!neF#gd=uKQ_N#y6#C!SAdqOQVug|X6i^-ZOU{q?^@GAc(9zqh9EpB3Vm4wj?q zhQYKKyycxB-Qzlh^ub|8neXsDm><7bv&KM+;~)zjxcOB-1r<6p#I9RGGG1B@tBD_y z|3OO&S;7Tt8`H5fr%#uRqNi1Eu>3@9nlF^tya__@gQ<_FRkm@L;n_dcRsY=)g{AN_ zM$sVI%P%4&Q7#S&@f=`%emHAgc_m%lIn&QPV$VWw9jJ91FydMU!42sc2uhtvv}8%91yjIZWt!XNlNB<%B;qSbu{;DHy8iV0tl8nBZBJ5SP}N zz=UcPZL8{%{ML3sInx=Ky>4y+R^&+kfPn1aY3;er&_0!1&+zbANTWC5K<{;ScJ`?d zY(#|(@WXfo$wO)PquraJOd|I_BTIf%r9Ea^zWCT?{I6 zJ+nwPEhxr)Z)j{^<4cNNJ-j@nuvAc4tG9S~5EsjO<*i`nNoQ(?T{N3jp-N*bHCYCj z#LI2qj~D~77UYtkCzDB;TO!S@^W%&WidZIa6Qa^A!Vbk*_cYLV(&pI_uEFbi^9&S-4 z!Uy+jCs*~wkH49**8|*P(Vw|#1%=12^i>c@Brt-HWcwz~rDK071IBre=nFp?uzY!$ zu)CosUQ@hcaQM)Fm+_x|m+21dhi3k1g1XC-n|MueyVo(Wc_x5*zgq@Y2J6@x*1s|) z{#DhC<2A*i2jqtc{pcZ|X1r~;`=h*nS26#tg?`sac;iLX+q;^F2(v|441gH099%)z zXy*Uo7sk}wd)PiZoLh0nOyqG+5(aga1g)9!fb=Xi<%ho&B7fY``tL;54A148+v|QU z`Rv-y2%lMeEtg=8-JO2$o=)HvUhDRk8}rBAkvir4zGwdER)z?-U(bn(Ifpzck2n)L zd3SgBx9wB+37lt#iDb!^d;_KD=wf=&BEp#oYkk4gJ1fh4)23BV4w1Xff;P7f2*9wI zXdrmBab0(?#sduMVL4Vn_^q$6FUt-RU;X~$YLw{?+YmDi?QAKLkg~RqX!KMZ2!^S| z`!@Yop%JMRX%YsF-q*G^(DG#I4fS#+ePD(z4oo8Y-vD>u_!$~46?%?6Lhlj6-#h!V zvWaf2sEE`^gFh%Zcmc$jP>wmOcm%<`0%JAA*VRV{eBV zx@^UiW{t>)_$w0#!|{QdtV#Od6iIjDjup0Ka?*I^vDC;SOkh^IA^!j(u0-7Q-4=K~ z@?SnX6^LCx zLcR*bpu$GJtG5BgnuY?ilv|{d*(| z0&6@vN@r(aW~8o|`?WK*7cQlkg(mVL)T0pjm*1o|e^d(?{G+d^`4zb_n520DE-qE_ zs%9okZD~bZjR?Sh|J+^GnU$j$R86e$JsPiHU*iPsi4XqU0VMQU$==XjR12e59s+mS3*A zLDoIV8?^c$3=>2nn|gW>ejyciDqM?FYx%%wjF>4t&iu@*Oa%7Q9lO-tB0!CxBu1}A z5n>A*<|mF2e2&-$^AsB}4EQ4Y1Izva)x(jTU93HL%)&xLPJ~GLc6b0$*i745)>{m> zu3r5}W%z_TO+hbc?(@oq<>j}ag1+fj4Ux|+cd*np)ha&e6lp2}IN6rAoBn;c+l>nC zV&HBHd&pyoLgs}u^_4e~k!eUdzmPPM=yig(kU{yr>yKTu>neV}WM=bytXQfgM3jw7 zuUWVH>Q>>6oKLbrC2TLcl9{M=S_f>Sy;Y?p17fhc`(&zR9k2sH=ZjWWAu$7^L|zaW zL-0iv`U;Fkwj21JA@>Ugb=p4p=?F^?D}^Zy@=0K&p#z&5f=L!Tb?uG~qkGl6a{Veu z_#fgSp1(g~Z?|Kz{BlU<_HZT9w_#yV3_Ex&`8?K{DW6Hk{}f8|qkLX%EO*v8r=F~< zau!CKW40eiU6X%6Gnx|*+fq08gWQxpClCd}{3KGxFC5R-q5&p!b{fVo-e{rIzt#sJ zQ!ETby*&g9$W%sV}qu zoK^E&jn4(q^4bfqX@lD;FN=rm`UPZw8ckF!6xWsrjVv}hupYxfX!av$3vg=fRXj48 zr3586AaL%@8f<>HKX$O=1r8AFK74P5!RxgD_>*gXJ>I;|>~6V_TST2p*$*FcN9*K- z&fG(KuvmebJ_b#@7Wx@zM)*D2)#3Z*ukXMyG{?TA;zffZ4%^p^g&mNTm{^r4t3ll) z44;?cF92+M(Cz8aqCQNJEfhxg;RaIxJkm(4>;O4n&$DfOKyQBeH)5b`6Jp4gOV;-W zN~bx;{6f(ijnvl6h3uqxDD~RJBAb#v`1|?uJ+*TT*Zet(Q6otbu$XQM0Lxvi(xI_3 z8-uVLs5KmZ+&+dlWraea00;Qs+6Qt8Jf|JZ{OQQ$jvqgcz9^!wjOo{J`T7+muohjZ zFPbmWa(!&b5OBZI8?ueitW55!?34_mp(f~~gx0T5gFMi$nSJvxjiUUdnH|-aS+Nww zZb0rl6{#6my@r=&2&G7{AQ4R3#?%XlwPYN{f)qIe&SdO+f!-!r07%9p%SaZD2`)zJ zW3p$M)H_QZX1joSF(@^-)Mv0I`CwoJoU)FfUD%dC(cmI8?K>yT^CD5)N6`v`x(<3fZS$=Qd`f*nD7auc$fkZ#Gv>9y5j zP?A?zvlIH6KHFtK$+NHRV&YfQOC9ZXa%PqxGd6EdoJ>zo!<~Aq_#ieuJU8VoT0f3BiXLEo2-!O{Kip+_K=-MkA->wKEcRbbIir>pn0RJ?!l5 zz)wR}O*Jk=gL$%G;!0fE1mQmAq`-Mwiu-7qWncodHaAz=dM;oY+l5^w7PfL%`A3Ce zpS2xO8$+o~|9CF}oT^OTnE024pRPr(f=y!6>@u^W;taS=|6@ah>PY(C0E6mOCnXy`Zji%X&)YE4P)QpFGn2w<+Dl~<_ zi|~d*jIkI}C`a=9*s#@D%0fx>fdlw@oe?4=F8KY5295=uVd79A zD;*cy6k88Cp0;T0&=I1d;468!+mb|KF%h{ZmzBJ>!)qMJ3zF_e2Nx{fi4C`(=j5Ju zBWUM>?=J$$ZhpHQ#uDH@872i3!Q6|e&7A<@z4Eob$24AE8h7nZ%_#`QB|q;rRPw7H zDRs1r7kr{v+1%XL*9naPn?Y%h3^y%qAj=+%F<1Bq0(x+LUBm*%!Cg$~pbC(A4tb}f~I^5*oSkK_imI|ip2Cw=VcDykM zq4y?_M3?M7N_8F1Usu43cD57}U5fk@6FrpLwu7~UYReXxd)L7b&!%DuXw;=Z0?I(# zpy)4mjO9y8Y7G)X?>#9jBxZVct=TXePn1L~rknNB0r3!kX`5v7Ej(C#A)0aBNP;nRs?N47fa{0iqrymVeaeyRq z;vbjGF@Rf72-cy$_iuxeKk5~63}yMv=ZF`{-=t?YbZ_J|UfWKz%tw;q`f+`YCF}wT zhGH`PT`jc%R!>j;ONkHz_RQdoKFsUiTxAUbgwUNFKTAh*`Lec)W|A49o_FR5Wx8p$ z-V5f`m0DRx#M$RP*(~{5=!q@F_kT5~0FvrdJ3nb0bq4AKB)B!MYhy>(;lQ(-YW@(w ziS*4XSA>NFNt&RI4798##02#zI;ETWoNZ{!L{#qlRc7P2Yb?7TVV0NG4su{KHDHBaUI)-Hg7rq@7fLo>UUf<=SRUHi z+QP=Svo@EpzxJ^Uid|RG2jHBb-F3AVmzI9cvA0sdQE}h?YgyytAuxXh?;jr1;3U&l zk`P`2=cRXwohf|0Uqm3p0WNl$&65zvRqM#B!W27-xi@^T>D|6v=-0rUfo(QkV`AMN z6JKSb^U00=P5iN0N_LIzA``j37N_y$-^$2AvTmkZ`uJ@{%GSz^WeW7$ATvJqiBjL0 z5%wn<)zZxSHjttexKv^W<6GcSs)}#n#T>`6#o%`!89e5(5kHvXZTIE%H9&8nF-ve& zn?Ww>^s7>tQ{`wC-aGXjDttG;*QOzRGdbGD4#3l?RNcXg{lBDK&VYf@0Wk>)_uW+N z9NSz39+?FY9QxG6!4#i8a7^bKf9lRXZwyDU4> zh}k$_%fd=%Okk(phJ;@rJSJxEgV)7tJ1UaW$j0*ByT-sVKr}fCe=o0DX&BPhev8e* zRZmW)Hb{{yW~t9HMKzG3H|SV{9s}Y6**?uq#V|wZe~O>i+H2MPMp`!CD@SeuEb5N-OpnC z;1QMNS0MHBQqrDlGPWXpGyjiU??x}H~JErlYKVNeZ5$BP#duqkyl~F z(3`{jfX$5JvWW8%PCeVmsI?p?&l)U&gqnbxtqmgyo? zU*JHBthQ7vWRK1c zH@8>bTlQs>LMQ%DXO_sb^@)Ztg|0;}*XRM0^UHAjwhvyatw` z2+n5LTZz|2=h6bDcPc;B?p}6otAP>l?>2a>SNwXca6@T{$m=@oUJm7RkXkJ;|70)s z3-Q82E8SdDcpqb~#md%9BI{wz&SZ*qm7~{QLxUUHTW=_#*0aXmE69guaDHH{;l;#N zMguE!mN`5|FK=GozTLoODw0_(eIQ$aB(a5nZE<}+5eG37uL@E!EXEIg2g?%NGp{K! zo^N|DOZP#zYHv}A7BT&rvuLCQ>WH&nghZ6np^~Rec#@|q@Z-t3?3Z!{-$O>?jVsIx zj2?0!v_hsKT&?an=uib6)?n||aa(;Oy&^4T#R*kfQt&8RqFC)iAHPOq2u{}0kkZ;> z{YOiVfGX$6EU0?Yr=B97%AC`tq1^e~{0CfW;XM()(luQ7nlk(z%D412CHXB1Pm57Y zbMs8?z7G}1_l1wvj|U219E#{JX34!VZuX3@;)e{u9&87>K8eLQ)48pX>z8}DxP~5; z#>T4K@!Lzu7gRjnE))L zjwvXVlW4tk2AMQ74YwI;G+VcCWAn?vx$do5!5HW32Q@E)WZC3| z{f_6IG=E_x(yh##z#1&qI}MLXBRBc=-Q1hGoG*QwfJT+gpu!y;IXs&-N~UC4L#dW+ zUQ4}h^ZZ3_{9@;Za`qfP(GS2HoN|bDH4dF!x$(o=AJvwtT^u!vx_mj;r3P4k#RV@D zd|gK{{mkT;hF|ywm$esh{rYtlRS+>&t2Tzjl;o+wbT>nB^*0~TVV$|mb$^J3#uMhe z0X$VHPo9UX+$?moE1{y{ zGBlc??i9Rb%1%k!MP}+Dv-(ikja$SAA=iAf$$r_Spp4T_h?{!_f8zP=F9nGGunbQ~ zu%=^EHhyH8fkZo~gq-XEtBJ8&W40bDvO%uynJvs zlo!0})aycC3jOt98fG2OVJa#QW4@z0oN~?#%wA#Z#O^HC<-76+Fz5AEpt6|C@kAD0 zBh%oRuH#A*j=QFQWg~CGJvwQSb4Pv-oy^tSq5MCLy#-WNY5P8`hzf{EcOJSVq(c#j zLw9$BG}4kvgOoIaNOwzv(%n*mD2N~kDu@V(`oE7l?>O`R-t~QJxn`Z2LD=lQpXa&n z>%QVMMazd{YFDcYT%$E1zAt*W(nskEh;Uf*57@`pc_-Ox1$8{PZd;jD^$oJ#vtE`pz%!y>ZtwdCs0R~k&fz*cH?Va zNp35NQ2eAtgwMLfYH(IbQzGZPjmL1<28OM&SHj)NaO1TA&p+$jAR8>(4mI$H0%086 zP4DBcInO%;5(I@;RWE37j&3uQl) zs4M2Z3Tbmkh<>BKNd;IOcz1b|otpGd@4E;pNmqEttlgl{Cxbq?P$>)6`5_prlm zp^l1ilH$#y8Cwe@B zJ;>zND8KJo%R(N9GpGrms6@>_XZqSHpxB)SdQv5{T=#~|7;y4Dyx&dC`<5MkhIL{B z_WRZ!GP3j^x`5aPNJ`I=ILgww95Aj6Z@-G>YC0U{&}zNJVkMo1rMgWP+4!N zvV4dE66qF=g^+W?ZwB|k&JOVXbJ(P+!O;latU8oAz+oZqC!7P` zFY*5eU<&HTv?bqz=fz(RgD<_i7cejm-*5$%m@41TsK~Ouv4hH*k5O0n(f|NFP<}!w z-ky$T)6w+KKL?~DE$6~}t7788u5TwE&9XiR8cLP;?2^sdJ6QgJhgk2_kQ-2J=`nBZ zz&P&@{@zfOkwsHoox81nV|O;7mGo9IZnKSFZn9{zj?Zrze+M zVJ512imefLsdwi*GBbJ{jReKTjV?`VrsPxTC|SV`f$z~<_FP>{4`oIT@WpfbmiTzw zTPY$C%3H7uFNE<*l{W=nF<|)DJj%{{feiy3eKjCqPDr3~7k(%Cd?M1fd;`RX%2`7w zVj7k(m@7E&PdpAGqoS&CQOcWXsDweciNB{dh2vX%krv1VOShj7y{7SaQql26^!pZ& zWPmVP^G$hy*@sfzhR?2nFq1Y{JNZ%-_)URE8yp?jUrTOjyj1beOA-K6Vm0Z`V_tXA zrhI*JvZqimRoXuDY)y2aU@huJ>P z#wGLQST>)N@xbqSht2>-5ch&WoDc2*7)^|C1y1HlXB<^ps@{H9q^x$x$WyifG0Y@#m9IrJd}{Dys4M z!K%O;4TxFgI{B(?ma=!1Ut=aa3LS=VwXjgwS~YJLepIeW+6P>384@#kwTA3b{(lV0 zjKMea#H1I}w@z#Az-S0h1+eeu?5TWL)WO!*TvFxD)za@8t9e6LnQ6jPlGH+T+{C3* zur}ROwhp%+rIA%!wk+HgY2#e_v@Cubpe&6wO-r5}b}ZH==BIuwaShE>p<593amcipM2<7)~Cg(cv)DG$^L9P z(+YWNR4I2j3vT1kYXlXjDqeeR@+cXN+=@khK~e9aq)hLfq348n83a`=Tct4DU@_3V zGPB?jaUyK28CDRY48^~OPD_1kso;)U`0=J#sHT1?r93mBEcZNw+qFzd$jC|>t!^_? zD?N+Mh&Nc>#QIJs%2uJ#WlO1es#H?BAmD&p{cI8nB(t-&~rOPkUwFF>&zTRJ=S5lbUAzM7Z!%HoS>x#Z^`1NACKlFy$KxStkR zvdW%S+GwgcvM;NL9Rlvg>cbaKD9_gVY=^~+ugJDuUK-`(PX4wCP3Ti|guY!lnk{Eh zrE|n5>`9UK5ejWA;hWD`LZ27Xgt|jQcK8;8|C;`7ES>6^W_A?%Nj-bj@-&d9V)Yoa{?7o7V0nDi(t>c#tGhh} zrJ`3JN6DKlgK&9fXrbua))v=at0 zMF}h%PLNVKQ`jXKrARlb1g9jj1FTWmSBb4#6dv0v*0iOncko+|dIt}s?K8^IppSosN&%Is^NcP-vJKlS%cUnIE^NVVPe(3WI=1QOa z13%c~+>p)^?h>6C%SR-o9FRteAZxQEL#axuG@=&xv{ z6uPcF%t&)6WqOy+;&%cGn6YG-$g$<+9VIVigUJAzVE{Mj^59kaBoVsTJysN zZZ5a1y47VT_T_BCk}Ecx(@!4g#W8XyG&dknDbv?htFq1$RarCIPO<*kG=SWC&Hm$$ z1<^}FC8eJN!y#LsT~JMgjf${_R;}hP5#c>fCcN8n&rH7^#mV)=GDal_VLgge*YZs; zPFcPk{^JZ6^<^*X4`X9v@5i)YQ199@x}k;{mbZp>PZdI&5r85eq`Ii zt)7RVMf?57h3WJNHA)~Sm^5VQ-e@ zXqYfXN4s@-csj$_E9vO$37%1}d95#u^PP7o!*230zF64EzK!F7?KCBh^$SlQxX46n339C`L*XNAv+fLUbSZ|UM$_?UT$SRqCId$xaE;G z{&*NuIgPcrPUmsgpkCN2!`OvA!w!^{6fh!87azE1@Nz>75YTrQDFUa_OAgh+1(l^# zvimm-yHIYxQpUH$m}T;BJ(P{bHje=rJ2V9)`=MCcf$}s-+ZRCTuk?tGh{qq;6>u3TW#N4eLSDK%jgt5sW9X4Y|&X*bR zdfHPe+}Da+i=VjXz;R;`m8kwYj9_`_Kr+6|EQK$M;za`Yn^>h99_)JFw;J2qiXwOE zkJD|f77Z4W+zYqXIGKCB1gRXbE4T{-fxP=g8-3scR~xs(Ja?lP)?$P~u)8{4d%KMb zxl(#&3TeVNUPv=zVWGq8?r744`9-LCMtjaE#XU_T6I1eFM!f|K_kuVOb&EuEe$o=? zQh)tfO*(Z`0fuz0sR6*ok7y0=rSbGEc4#U@v}e6I*&gV>*EP^LdN`G9fcGpeYCBKK zJ}u5?hcbbR^BJwv^Vue83aM!KO#1L0!pXP<}E(oEcC7kyu z6<-Ubi>SlKypDBCtxqLx>^xcAD+8xyD`;|@JC2xMhf`Os4g?m*t9emzmj;1`Cn|e5 zwA(MvHf~})k>^#Aeuu`}E*2*UrR)Ulmeut21gWrthHl~;&oaQmDYGFQ{BG5!pX ziT$RltuOmxbS8=OY=;l|2jxuVm>dU&{} z@o zDjF&baUTL^xWBzHExrEG8%}e{mEK$Z2o907W-@u5 z-+ULPx_iH+>kCj_UO-JK(daXtG$&JXo1R+iq8(3H5z#1&(R<{ZDIdqD6G9QTvg@{E z;yx*TnQ9T86r9NJ6pzwKf`7S@5b}L6jNK@vb^4}(<}vR{?%Und;QyHE=jIaMt@pVf z^Fo#EjR)AQoZU#*>-SUorqxqFUUq2o0%qVOo&2i)o!Z8 z|Ldzt4{dZQ+B%G$oecx)(`rvpD;d`St-MDUuZHGAt}*#FH^(0Y)RO(4A!HNe*DiYd zjq&0)S7?@~pGM%N+(pNA*LOS@!|6bhCQ;jGO?vvqMT5!I!KA@mr^S6A4<&Eft!G)sePn*5 z4XLl#MA(0>wRF^4+no6TBTST9D%IFG5;KVwnw zwj%Ty($Y2eKzAMHtQf}1IJpb~jAEZ`A|q|?m32)!QwKnDefJv{y1({wSkF*6ccX*Ve8oNm zrJ?~``K4`*F7xlx0-{nyIkSAv<31QmGrrTGQ-`{l@w({sdP?R-RcZH#AAO7qJfXjY zKZ)3hENiqYlDzuy=^55zD*}E>qQs3FsTk|?-4squ!A0-k5xZZUBF|*bm0YFS2cH&f zYBADVNC9;}Ad#_Jtc1sNWhu&`a>RX@@tf_XFn=ySSbx9%1n<*J^bES+T+&kwYk2rJ z>zpE|d+iY3&;J<|6f&@Vn<9-kl08@@nvV$oGU}b`c2Dr)V2>QQ+uiH4ktJw5LCd8r za8W9Qq3}|T+s6j$OR`tGVen7pzspTvlvw2=>1)8bx|wqizj(kA?X9Dd-CsZY_XW)3 zKOH15+S{-Gkf)@7AameOud=Uvrf_||IONv`5GSz}v*n-ck@gl{F`AFYaaDTt%&Hch z#=e*7ag(OSV8p}6USFjJe0t6drC+C8-l!aQS*@OHQa8wJB4+h>DU<5`W6}! z(0Ie$lahY_>c4+JHf_*tf=8rBobjS_kw465=${Z)`&e4gMB{w07kDIkF+0P#P=xU3 z7rUS{NQn?G@U7GjkeK-4z1%`M5rr6<%{@VCBI6gF{2$fap7*f^%0l0>rTQnJ5K!#_ z3_&IWyc>ke2(X!*TQ-?|9||~FQ>sF)@m==+TAO%i@-S!7o=y^U(_F2cj z5qG`P{XXRWMAq|X6YAZ{oa-dWi&3ySN~F-i20Cmw^p<(GegK%(lyW?7M2VMoZVU1a zF-$VQ2$~;{6{mq_GLk5NxE|fz_zxG?uW_xn?4tv;9Ssn1gP@mLq|V9~~r6CRhG0IW|u^dK;1+N;Q!(cdwwu z@hse22g$70T?#PN^Sa=0kh}5><;Mh?5B6>x;W+VwdB9<^l9DWiL(-&59`IP4OF;0% zJ1jNT*em^OA^dd-xWOC}62u95uMpOIgztffTa@j)1vmGBptCWgzL9Vl#}o;mF}A*5 z7>Gby05eW3Sa40?AQ2MTc{Z~SIMj4W`4r;X@$@n=64SupAfTf+>cIcI?)gt~`16Ba zP|#X7hYQ{nA1sivP=R%4fqXI~djP#&k+2WYv=-@TV(j6~Nbp;<09gwrE;yz?Sg5lh z@G1lx6Gdq8dJCmjCDm%-&Zv(5-!;a6iVf5U+6%Y?olYjM{1>|}^v!rr36YWr@Z8&B zkn-?cpl{;Oem*b8B=`!5gf6AtyQ{y>Ux{4Y0quv*k#1^y%Kv;(Pvb?{py4B?zihn? z_dVVNW(zG^x~#-Q=k$JK@#z1&M!!BtNaDxadrBG$T>M`bhkC*ivoFi7KY-A-!9g!G zspm3F2ot0mi|Q0hMdJJHt(keP%O|sg1#OG!N>`-E0-yfBujoIE8p(mCx4?gUW++)9 zm4oLE&tYkf!fd5Rk^bkc&fZ)h_PJh`oRTP5K9aZ-8wQewOc&MvebZLMC1wc`g3izP zr(?(fvOeqnNHUaduT(SgqWa$2=(`E9VTiZ~V?!gb-4J1qq+F=itK+pAZsE04fjjb? ztn@6@H-`*4A90W(@DV(24|sieeG$7fi;taY zBGEV7#d;EYViScDS<)aw=~IS^Z*&1$1~+x=bzW#Wyyi51PTOavL%J zVV>vS9PRhO;y{obff7mXHJ_FdyuK9p5kJNo9lX`}tp4j_ZP0{Y`?w^;-D`?3j-Qu2 z)z0Uo5{6OP##Se7j-yo^^aZmpX)|SnRX=za-*zk?oJd>)+opo((OMQ>s!)-n@Z`T`S6OtbyKhO+TOCiAs7IUM_f_ zN;!nNl;r=tR(}7Av}Z8jI(#7Kf;7}gmH>OKOd4EUoYo@ny00BsZm02>2^Q4!Rph<0 zt~*!&8$mJ1k%7chxgtB@b9fHK)4|M9x#)o2!E<<>6U9F*yhPtMS(^5LzgrBr!9BzX zn4ny+59xh|FUZLYIqMMm@Zw3}wHZuwVThER(z3Veu7Gdp<&0Z)sVyrSO0MsoK%g zVt_l#q?@1mbcl$(>;3J!ikqA%;?qVg?q;hrfhVr9l)^2ZJLCo!a1JcNBoP|aiUpuw z2Yl>25A%l>P_kMqCg>CV=cxSWhXF#=6Dh=lL-DI0XPQSomqvJOkWO@cy=Zcu6dY^s zAyJ7>Ab0EEkMge*(N%%ACUihD6sw?@CGjv0601HMXM$GK=aP}gV>1?`-ttrzeFhDM zWa3DYlhxj67&<-%<8d%6Fh^^)oy-NzDjdI;55(zOTS{hRsF|9s3ko;`!oa@ zrRC>$GQPJlZUQ#tr{wKE@FW_eoy*bqbtA|WQTs*sC8{WdB12A@R#k_^EC<)_bB`_W zOLEgfAEfT@zp>fqW~oQ+_W+ZM_-MT85h>)Mm4ZV&h%ans%gH1e@aP8Zz%)pm9+N$+ zVW14|T1XMYeaF@;Tw$24lBZt_{0?F?htMYwW(zcz6n351ZlIQ(*PGt+p%L}uu=xQ* zk{mX-L*Tjuz~1XgY~~IwRqnemVuNCOoR=rrA80K={<03=HoE)PXPC2b{?YP3HV@i& zc&49+@88bvCRM(t`W24y&T3kR%$hBprqpU9c8#C620BPsc!RIGo|?^&3RGJ}DZYDF zVNh+_>0v*&nsASkfz9$!{Dy}$uN_svyy*e!t9m3~&#f_G=H5txk9b7262WM5MqWlh z{(yKdUI1n^2PUG5gTYzn1pN9Y&hwFnn^}u$=So~POz4#)#C@qe03Y0sN}60 zonDmthlS_(0C$R8R1+yR4$@LQ;`<2(r`!Kg@JMJQf~*j1`Qi<6D{bZ1$uB*Qjxmcn z-QMGJ`R1>^h?WT*-yot~vzZqkU*6nZk^$W=IPf%!-BQ%qq`~YW#qXbX4hV>QfpEGI zm7xJ3_%-c#hM^SB;aihH@eScL$(4$Pq&7FKx!aUw6<})PflJW*T$gKA{oeNhR_ff` zYpVwLl45QNjuNw~qdUI?skBzHYVeIMb!`6hZyl53cnDzL5A@7Y(752oY=HWO0AZKT zYgJCaoWP2*L75QUWUp*OJMV~n1&@zWN<>Qj?*kIbenvEJ%l=v7sm|++`Maugyba<^ zbI}xGzbjlHC;Z#wgw{XXkH31Eo`+>aWQ+TXD=q~jApX3eJi5+ z@O|qCV{nvldiXA=*?FmHQc^qv%Z{-|Mv4!+_>E}jc|l?8g9u0ZO&IF$#yazVkb1cG zVkqZY;w!3e(4!vV&m`7~xxdNcP9jx6SarY#6%uKAIz-(E-e7tYw^`$SrO^6rSkCvk z7w0t?_pkkjGHzy+D|UT13tc_v{|QE_WleZ7op&k)9RV z42$Or5t9w<{T(A~IFrPqrFc~?2!Yi%qXPWJ_aY)SdAOVj5Ij$N(#dt~9T;vHz~9|0 z%v=uq;a{p-0cKACWIU10aSv@?y{v?2=8S2xO8T>F8gNJJ!CGqYSp0tGD-S&ef9LT2 zN`cU+J{9j=cuU-OeAz5#{YiN5>C|s5RqfV&I<$0E>*vLx#M_?eeSG* zsRmx1{7o}elna!=l+UZ+^GnJ?mHGeZlrXQK!i>9!=|i|96OAjUn+=kSjiKaWE16VN z<{M%T5mZTig}Q#VMdtH7xLpz9`{)u6d9AR{g2LJcM+A3ySwlG z=`;|sdtH8epk$@GVvQ}6gpX*C_~Y3HOVK@zdY>0}RkF_F>9&?GI9kl6zx4tz(f}%> zi-Jq>GVcZeskH`~MBrgJjjb4`VuZGo2ZQv1^#0@Stt3h`GT_zV^iESS4d|$c|2|gZ zC(!YYBSHe|>c%oaXqq&5f46z;zF^e{-;bQJw;u}$O{M1xR; z(0w5O=n9gSd|~@gy~P@@d%d9y;U9Be`hBX%TJ;AKQWDTm{eht;WO+-%K(#i&!|EU4 z3z8NMrEgQCQ%Gjx2csGii}SZapp~YR+9S`v) z%0zk1E<4Hw>f*T@lPJ$EkW1U74uCs;DP%P9P%_ZHnhZ3$ds2AAq%Y>_LB0FY?=6gh zGPs;pYBg!J`vxUh$3vmr6C5f(eTGa9F1^~G+h(P6#Vg|2O3>ZeZY8m9pI53I^zjP^ z39s<>d;BacJ6*8UI0T(9uvrd3)7<5hP}C{*xp$E~bGmwIBqH&`mwAi0npYCv@<|2a z+ANp`-_JDI2=bc4BXxX2&MehnJBfCoKM>?6#SbwufbvrpK0b?QoeaenPqt|BgC;kN zG?Z04HIgMi^d!?<=amrenTSTWVecj0UIvX^8&=l3OiT+Fodb7E)Tbk)@I(Bbb+654zi ziYcnCkL+(W2(I~reSze-w%t~j^`N614757TV;AQwz5_YT2duqRg?)Ik@EPToz}spp zE|ZddGt+Kf(S+5&?Sc8DlWisaO8`PgKd--#E_dB0voSrYK`ZjSR zd+EMU=UEck>xkY;!{#aNv3S|g5V31LE&*5-X$O=IC6*R)&pWG6eem|nbZLA7`J_|J zwfeuh%>e*9q$1EwpdE9WSihYh5n|kml#**GjCAoL7ptual?q)mW4dTPs}(sOkf%+T zj`QxCluQa2#|sBfvH6~rJN4p%PEZT2=6J0-gxw{M%(v}}^Teu?cP5+Sp!D&%qK555 z9osOFfyst`&MJ*LC`suSDFHw=*-sz;RtEpz3(_0?|b(yu@Y-)$GFMShurC2=5H zS2S1Jmy_%UMSlKZ#G@khkw=&3cydUGS+yUYKI{Hx$4UDgGYxl>KU|8VyZWi9n<71@ zVjHWiSbf9Z+c-vrB+9)m+-L38k_F?y*ns!Nk^*ylkL)STY$FKM0wfu5!36q}2%0PCeoobm0t4+i z#%^@~4mJs+C$oyvNX_V%KEo)-;hRdos&ESoODZ~iV`TV-*S3(wb3W>6Gpb0OnH;lj z+uAo_Ak z#KA-V1HEU9P_g$mR8?2FCI=pGGV`fEjI^Kc8u|s+14OAOAHg><5Fr)CG)CWr;8NT= zY^c;N!9Px&8%WTN_e-cUXr1knRZm6hNsW_SKG%sRZ6`6!g{Z9O&q#9 z^RBIiD4Q~3g!3DYlHZ@<_=(?U;tt%AWYEFksj>g^zD&E)u-2RBX5L~7Lv!>{RIrjm zX%^Xarsl4pxY$$YO2M2zqO$U4i(7O{)f0o1dwgu1De)6DHOYS7x!@+o zc>Yji_r2u5xqt28=%P8}U*PWSD#2Ci*?R!FNq$ zz%tdQ>S0q^_Cd&5iH(gINByjWbMy|Ur5rYp97wQng2-Ea_wO`)$0?PXCMDQo>2%p7Mksy5Y`8f(F@aZ zkA5mIp>7%MFY)8H@l-~NWR}xJniT*)pbX!~gpVPgiDrs=ao993-tdF1jpG zkyl+ZbS@7;ZR5|Iur{xgfgW~y@bUdVzdn`M+o$ASd2BoyAq88k-ohEk>rl4%I;bUU z+RL#E(O`F{APN8norFGdv(bJo)~I(Om{~kzU@a`)@7KNWVT|9Nb6i!HqbKr*fc6m^CLSi*rlPVTo?4wt zKBT8dL-@{?Xq?|D1#UrTmH~wc`hpB9grw1wncVlM=vku+x4B zZ#=kR*7jsU=&S)U1c_Bgh6=kFkRn)%PfNz4Q(s2$te#yFZRnKBw8s z{Q66s=<}5s@u4AMmR{r=c~lKp*Y0rV_9XJSbza!84=An1yH|yk@HR4?^eA@GtpEOV z#dCN45wGKB=6DR;&J)xHeT7u)eH@|~D4Y8zr}}#g!uXu_oW9a}vGr0L4Pp?O-HxoT zjQX`jSdyOBld>kg>A~=yTmJJyd@OqPi@J7u_3bk&*c5$)(LNVUi;-WKDHSZx(mUB0 zzVW7+vX4!;7V?DK?d|cS^$~7l^MrAcO-DN)|3=6o?nsQWY!$=2<=;sU%5)JH^0D?-?K%dtbYUK5^cDqHXk9Rqi?A){+!)x1JxHL{xI=#`>XFX ze>Xts%1@n*9&VnQI`xnB`LmSfO*~b@6&chFB7mi41qx8mWeALfP%YCum zGuE5D`E{k1mr>)*VUvHtJE&KW>DQFj^Tn?zZO!gvpK1baBKjqX^q-+JSC{_cas zLEV}5UmFJod?Pw!(tlyu3o)l!Usj(NydB%m=1xC0cNUA#)c(VoL|=YC;qCuF_recP zf)9J*;v@d$!rlAKkDugW$}PW-hh+Utijj z1}p{AGoyQ>!kmfeh4Rs$C3*V~a2SaCuCQ}~4c9whK>)w-9x7X!__Co;;CDY92>a25 zJpdsc6`u`zHvlfE5oEKQ75m^29k4W3?)CkICiIeC3DsXq9rVA9tFtsp@Ac_)C! zfXNE6;0$=y!e-V01Bo?Cupo4ZMSxp;3$# z{QiTl?^~f0R^z97_Yd=x*YG;=A&b5#~0LtB{|+OPSI~Jk=_4;EOT^ zcjQXrBWBaY(5p}{kSAi-R;)ROSkG!L4%HlqAK$;J9ZwJ^{19+jLiVDZv|ty9VS+@c z6t2T`;V6jLUxF9Fek(8uR5AtbQ8EmJJ%UCD_&Q1z*S`Y(E2D8WoM%kUj|Y z>gq=rKZ`fUm{EtQQ*N&PBtfzLqB;~WGdxYfamkV^ypWmPf zI1DyNVaJIGOsRHo#Joht+cP6gAm;s%R(^F<@TEgKOg*~E@$L_TQpxmSfR#bg7>r7C zPEk*Li__noBE+K4p0kYxg$oxxgN+)+Zkx<;&oBkmG`Yi6{vb*aYxgBqASx13OE1il zh<)ZXVFF$O5ndhmaUE*VH@h_e`o=c^YDeYOSxE_xu=cG;Z+f$!ylIN znq?#nD~VsDB{8RrTA+8M3%8AE%gbHG9}`j|0$@e~Jim+j6h(4cMlp`2(p?fIq6&z) zn7VF9SzOiAcK&-I{x!Qo{g-$m)S|@F;kmmZ(bkI+2T3a^)_7rKvubqs`IyMLJ;1di z&mA9L#Nn)E=!-lI{tk9Ehvs>2S6(UFGu_ocJwoJj)bf<*`I}drJ5!feUWkM#3RJol z&1-JJ?53w;@vM(~ks$(gO_@qM^O+GYBg<^^|M%uDAxRLAn?8}sMQ|<-E0WU^tnnSj zQ;;%2oD9A5E`2(r3O7=_apr_H>g*ea>CD0#j`%&JD-0U50YAO%NY9#Ux_a}T4PP%7 z$>kBzuO^~#c5}!Q^#VhY$CzUMuE%{`Umx5W>rAzzexnOVG5#963(&aSk2UTH*1ti;vRlU9hq z@$wXdOOH&%niU&$W}mK{PS8a&4(6Q?4%yv7HJvAber+kTZi?)skj;Qr5aYZaY@|9D zuE;%;LsKp6Pc!2B$JpYp#RBIyC4R&g@vPUldJR26{#O=%n57MzHjM9u<;I6 z@fTG324nAK@>$zwz;y5G-T=Kd=YTaAp*>jX>e*zyi8nhdY~F*>(+L~zWbB*QeEzUS zb?o+0RJ9#rdF-P1<0McuF3gGEq>v+2L1-eP%nm}&+ybSa<4770 z9N^wCb1IGwX1IcQX>QWzH*)&!BFCiwzo`+`D^Qj{wf= zeVpEIGJl`E`D3jD*X_XhugVem3*Z+l5cvHBz((2X*SO41=2wjYeqN@Axtn1K(#_$C zgBMtRYNwGwvNZWoBFWKPe+T;;ck>-z-fX=6c&7Ho!-ltZub9ssw|yqwb9TusEgd7PGZ@r$$&;=Bcl}~Xh9>M{ey?nhQpBC5 zvhRLhXIi*3IfdNZ77BrYvv2D01@it}TnmYYEZ3W~+V{L(c#wQRT`jNx+s{;AG_IE0 zx0Xp;Mos#MlUQGiL{*!+^)}H(S`A{rMw4lnPWWlkIK9I;+EIsn-NKWToephNp7#5- zms1|cHx2*5fPd9tw2#p(@zV9p*=Q~XL)e@n#CaTQpiD85At)y(a8XdDKhvPUE;JFPfniR(AJ0*zz8^*r+GiPy{mT(5+i2SJMBkxBp%pzs?s4CVV6*?31J7 z{jpYvl2?_VHb&B!Wn*9QE*x!x3(?qnKB!}uu^7UF8%zWOM&XR}_w37U{vUhM0bM+) zNn_(Qr)wi$_oK>PL3p1yd; zyxaS4)xZ`v?MuPA|5cK%#!HxEB-}sPiL@a~eCPY|aJS0yLp&~-OLU#=LD1Om>Tq5e=L6Dj=a@hzo{#kZeyMSTx0PvmLXcj`0Vd@= zuWE^w%)}V_-Y({IA~_ z+8P8RUSw_?Vlv|3^nPA%2Q&cPR?i(!&VuMW34$H-2`%{1j zM(&ZO%t1{EMpK=vSCgP5z6x=kP~(PjmMH$$oeOS4RKxjADeRU*(dVfN3vwCE!a!2r zEs)uib#9z}+cKH;XVZBqILW9E5*}Wnyk{L31NH`)2TS6Olf)!Yxt!2wFXc*KaA0mP z`eT&#SLGNXh?d5fE4dE7F6(FVMnOA08DKk%`NHmLo1{(YZ@R54?O8fht|wTDe+L1q zQRn~xkOywT{iB8e5=c3aF;-yw@0$<3BlW`OBgyB4w&@2qitfM7ViK&HNN4-S#(Ro~ z)Q%7fI5|Wu1sDOCbAFI75^#kd!weaMfX{Thh(GqwGXC%Ndony1puCr;e{m@gomCBV`hfJV6dx7#r z%g)ErnQ)yd;WnmCuNCxuRHyYc0i*!bv)?g8WNr}sb(4Ni4-C;xsOj+ip)-PREF~p@ zfst`EOSBQjZ?IurSj2I~MiG?2Sq%0d?D`0X%(DK9ZLW~Sc}Kd6S1|Y<-!Ig-bYW%7nacgtLOkO zz)gU=&Ggq$BXBN*7BHp}>uIN_8?6)-ZE$WJ(Ys=!4b_!(#dDxZ?Ga7?Bv}8wDN7vV zRfmd*@~5)vjLluRse(WL!H1d24zPudMDlZniLVNGEC;ZFvlV>kI z{dt#&7U96!MWB*%1(a^3Zzj#R`FsN2G1w1Ky+~BPK*(Ym7Vkb}dhmXsTeOtDjE1^? zk4-Jta@u+nEb8wo=c)hHH2n3H;08WIhq3%3WT;N~xy@h72NkuMO0Pg2c=!UCcuy`9 z)Eaho&#m|mY-RcN=BJL_455jEj=Lv`C{Nk&#k2=0fOrL50gvejzJMs>_<5r<%{85x z%o5c{Xik1YExj(-4znW8%VH*X73&Nd`J~T%}gP&`Wi(0WOz)0X}~j}#sR|6-?m#m`3ekBheCl2QAXe@qV^IgbOSY6C6z)&2lPfd zFSJ>$6(<0nt&@emw`)@;Xq54#?>o-#mgb)oSigmpru^QafKH@a@X07Hm@YWmHr|B2 zr&g8^QW0ih$9KKe_v%#HpsIq5A_1g%b%{Le+IVnFIqL2Lq`81^g^IWQOu3wy=Qcwj zL;D0j)rwW9789E}EPo9LwwzP5mq1(6^EFeAQ75MA_Bwt*D7gq<{c-XMphwyt{(xkkWZ#5k=g{g>Q6usV4A&0%b$~nr z0RjYa({1Z|Phf#z1|Gl}=J$CESN4ePEU-}AnbZ!RXji(1$@Y}bG9$J?x$=ALI zwBtpN*$qnZL<(+`c_7cO)0iAwAmPw$a9~BZ9?yPk^)3a{7GxfkGtQ|=ZdARj4tTRQ z)2UGml++4xW^IQO-<6a*Qr^ox8wHKHgL#_7aC^1o4CxZ3sRJ{YM$iT1iPkVK5IkUU z4IL<{*3}#+Q())mmfkmC)WOqUevu|KW);Tt1y7D^OOV zTa$~Kz;PJa9YK2tiiS_f#nB7V&5#b(M^$@f1WwycoRY*g@dEmOh6dWQJK zk^mT1OPtc1MK^R)HC3a7p?FXMgF;@tdxD|S`vUjl-VDRIA=G>!c>BgHY&!fJ3xJ~h z7Fv%sDs&oMsy-UyCkY3%BvHT;oyNZ1(#H={RkNrP+!k}(JJb>#lea16hXzG zxL`1$#%b6Tp}?L%jjo%BYJjD-gh~M7VNh=yOwCEet9CiP?^n4IPmK=T zTAfu^x^c6D2*+1&evjT2Su$~;Cr6jpp!>SN*-aqpu!M_?d5x$S>5V;j_4&aF{#Q{P zHR*AYcO1+YWG1a8h>{+Im20l#YH@76JerDG(gd8ohk)i(M-Ai^$LkpTFrJ-{xsT?s z-iSihqvqs7Lx ztTS#W2WexACLLECKi34^;91LDw14-Wlg5MG0I5MYoXjZb*FpU&pojV834bcbX;MNEln(x+sgcC!qWrXFU!DS6k?Y&(KsFMAbNMB zN>8nNmuWHp_dFR}XCWHId|f#gchULudRI)Fu-Wwd7qhg;3YxHV?NdIkz#~1)*u{;HTZ#wS`j}E|89>vzRU~SEUZCm(Xj?X4%>uz=~Wt~!8H4X`j<}v1AAl( zFL{nw#b0+sZU@I=Tma|&y8;>>uXQAUO~L~(9uOmrJg+0xY7)I%eBRCOQtKaoZ22tb zUnSaL-$+G$&D*8l-~+W@W5?3VAH@1XCU^!cStg85F(G_2lHvfB~?*8e8@nMtspq zG78S!yui-(z{w%}`kgM9jp}wA44>IY@C4XU>Sc)W*GSMZ6kdj8V zyuNyivqHUhoemr=+Xr4-AzpEx%MKxMT~bQ_jC)}LH6lO%TB>fn!4ASO&K}aQ3JbF{ zq$pu$Yh?%;t31U>=XEjd@;;=nBpY=*f+Dz44K>;?>cGB4LU&>U6pRI{&cN&5wG&MC zdfyQ)TxeO1V(h|WQ+VC<<#BWE7#Gm?nkB@x1ZmrD?QrwuaI+iXI3i9=O;?k8B*u5l z?JOg{4#u7SsGgW3&7s?zQ_0V=Yk%5lzxOQTR&MB5*RyC63#RIA35FUSJ&=|GMDAvn z6@>PVp-F3Nn-wX=du80GZ^DC5hhJFXlpyzA>RTcmA*bQl=GgAXEUISN_`aeORn}!# zt(t@Pl}_p^cjT2&yEB0phU8+IO^V(WuAj4Bid*EI`jE_~m3xZm6ewz5bKU8vHn6FP z`+Q#0juZdY3FeJVIoI|yJi#-C1lS{@%7Uhhf0eKr7&#rgd!F}diD{+w3N}AW^;rZx z4xc7F!YN?=U>@e|XvzCh(|J){4k?ftbi3^gxTF7@8LZI(@4a}BQ zWqpQQjkxioQMEbdM@ml|?4EWFw%)mSRQ}`3lOH~*yC+K#J1IguG}Yw48-rIQh#K!N zu~(hjxxr0**=M{YzVc(Xvn)yL_ly&z%qB{OH8x{0uh2c&*@wq$Z2SByCq3vR?U2d@|<hfSI8d{T9|$$$l)T0Te17{%fPUi zST34kJiAfjlw8ur3}p*jz8)??RlhGLHJwGy*%oVcZR+$(QsCD~%dU{bp{cWnmrqK8 ziQXA>n}UW+zs}P;9RKwX$j>yDWD`69{8Ujx`&3PGD*MR8OGbpO>OHNVvhMT*atQ%= zp(tAeo$Od$udDm1;qs}%|BtV?jH+_ozrQJw?(SHWba!_v2uOE#H`3kRNJvOXH%LoM zNvDLMG)M@_b8+1J2r7_KzanuDXTj z?Qj7Z_L4eaRa(-#?R`VlmbAL)hv?VZil|S^n<>A@5&>tcM{C8e*9~T318W57BiufF zS(dAgur>J67hq3!%z#DAAzoxWE#wJ~Cr6}}a>2tnriDhcY+Is;U9^{gAp5iFc%BwQ zZHIDZpi^RzrjUfb#HLb*n{3*20mbQ4L_1Fj+IbXsds(`+8ykH7n&JnIQw3^JZzXhf z`I=KWcE8o8@!?$K2vJgEe8q*uQwQww>m0p2vFpj}Tpw6FBdU@*BSzk8i{5xlc-UCD zXfSQ;DB{5u4!%OoR;J+z^lP}-Sy7_aCfj7ptI>fl^0rHonbVAa3mZiDS_>p3Ku?>N z*Tz1NU1G$Dn525)vKl-EKi;?Kn?N(l^d}`4sS0C>kmUE)+n>q>k0FsSJM0s|w+I-A22EPAneBWdL49JS&%(&^ffv z$hGO0^D=8Ux#ge@0Ov2=SMo%K3@=$GXS_0M^oxmK&6kIv=|Tp$JD&FjS7ejsa1=|_ zd$b+5e@AeO6%`}6t6&9iDY*lNLV1b`w(kQiOexC%)926 zqMMeT8RkQvx4^4Xa{!PJfk-3z(|7gjZxu2m&WqnavS`erl7Yh7d|o;k;T_7W2Z`8%3AU%s#h1Ph0K*+%)Xs_|NzSf|i(opME{0cJh{ z#uq$DkH9L*9s-PMgxN``)+|yuUV@B3tRmdkR#M6)%#jsqPs#nDG-+EEEvEN|D=K4k{ z*=cE4QGYGG@Y&TNCSFJ-Cnzs^GgO4FMeTq>HrTM)3{Q!u+nIPn(}OAmvKKC`{PrH* zDw61JcynJolkOznJfUw()f%(N^@kUnae`}{~`amj`#C# z?!i$ttcJ`1IJM7r&P2ylY8Z$;)C1W!sXv`(;>KSuwY_)HL3?aALulG=yew+}$1Fz} zF&Eg7kNWVNVUu`slOF2)?<6KDs}QqpG`Tztm1R5$4EbV5bs6)vtvg9O)W!VqNVZfOU2SJbP$p@RJd%hk5jQ|ER@%^P zzqhWxYwvt!MEVSybgXj*t_!PHUE@&5Lf)!&V>U zGhv$J_S(&9>V#;5e!USNT01!1k^~YN=J6GnBtUtVl5%(<2!ZQp>@zINGCHgSbNE(TzND@? z2;crg7Kd;_ckg>I%EM(U6Z57qzEa85Y68 z=?^TD+NQNX&yn62Y7|WxLx0i<{>IeS^&QVPaS-LxhhhhzH&iirB+jo)SNfvD=-W%u zzDAIV!Ws{nJ2uefde!nJO;`epU*fW`#6jUaWeH@YX*w^OVgaXG9K z>6!hun04Dr%qr(82OAXOyWa|W9HHdfC-q4ytTE@fvQC8AJQxOb8EF0DGYdZxDmZJ{U88ig=57Oe+6tao!ek?mNpdE8R{4 z*q_uyEU;{-V-T+#>RDdgneb7pc}A%(2(cK5#@GvZEU$*y2y#$iCFN}DkfeGo^{ueo zVkB%aq(5VIs*_PBCnZ|El~WgDz7Cd{^99O6wI3V3R3mtR7T&WOFOZrqF;BfH;D?j| zRu=vnaA+_{*9HyhSegqi7-3yS5Nyh)yq51yQwK*O*YV!e+m>|>G1iPSw)4TH*=*79 z--yJ+A__l=4(ByVx%3>pKLC@^vvrAk2KmTK=(#`AofsZr-v732T$;` zpZPgMUsLUqdiusS5m1ufQjFo^y%16wkPnGznHn|fj8CCqT_G^8KzFC0ld)|AQOr@|AK0_o3r-bj96 zSwC<2N=>4-?&E&(`8~1D%~W*S;pFD|tvt!5#LkLTC}(Ogd3?H~HwR@`IFuB0c#uN!(XU2_Xk_{p?)p2JIWWm+Scs)(V^Zg za|-Xmzy2&e3Y(W#W+of5vFoa#2lJF_rV!n9SbTH`=L6S`l4PJvv1gOqRM<5e*HPG^ zvgt1(ojhkBR!=M2zW^v0z=aUV5q3CSJOISHb|8)KYwssBd3gzteO2#IPwCx0mSiH8 znbhK$To5|hsen9#gzyrp7C})epF^-x(2)zVYwJ`|%Iv=DYPua~;L8=eN+g)QWC~{p zw?%_%jogctLZf%`FqZG9Y34|3Eamv4GdoLcHey7z_G^){pG^6%!76>eL29(t_`OSM ztl!|-c^lutkgs?41h4Wd$V}2^@w{Lem~wgORxkBfUk{lr)Ud*%OlH|Hx0ulB{oL?v zNdahH^d}1#{wceYXS-hL%3JP41%l>ZVo2-5#rVE2HIyO0-i|i5lOuA6Uog_tsTz9q z^La*7#sN3Oyt{G|Thl!3jR(WKov zT#pvW1h_eOl{i{-&p-;ni5#2mUyu zK0I6>ea^1jf#?N#?0`L#pT{Nrsj!dR$y`N4r65QwrF&FO9r81NXY73Simn6ZAOni& zm;z;d$bvL0&BVuTbTz6#Ws`^`fi1#qi{jA`^+w2feWqs@i_)vDnwmLdEY^FLBG6#xb59@XP%LgxPEC`CFSNz$ky!Z9DJ2!(5;| z!~ddOSV9S>q4{};)vHJj2j}pcUGAmr^&V#Y8$45XJ65=EabUU_Eq&rtVg_;755b|e zXX#;?&3f0y&0>Oaj;cDTPk8Z(TVu{#CR6S+0_<|El;~p#l64SU(xj{3{f)YybBg2k zwerSyAG>p1{pS zK_NpmYa(0aelBi-vV}h>)TlE2(9N0>5px*(aXE_aO%M$njzLpA44>xL zyVtt>9P>cHc?PxPc8JWlLX3?ff2|vnGnOGN193kc_ezjJU zgH*8_Q|G?1Wy`be^y`CZYGlr$+>8F+C8HVS_5H{D>ljZ>mG0KSJ5RFyi)am7n?Dmp ziqsx!NgfD6_kwpxxW@7aV5tKEw`g>KP57&Gc<$mfDq-JL@0(9=kGmUQZ7^PZt{}Za z-aP&JBZtgqe1)c??F<*om9k!HBw4O}Jzn#@i1E<=L=;3hO~X-V3F(*=nYp0d49iXNuIbOO<4aye@ft*V;Xj{r zq{CuW5E(Ce+*DGM;}}>OE1qj9b^9jve7VbUR1rm~J=j(_EKLw(d(q>s%&wkm_-d_Q zk-N<9DD(N#)q(t-Rzd zm_3cGc7Y#^xd9Ynac>_f$l&vgv8N;~p|%hWM3I&t89Rj`>+nRmT)QX7_MvIQnz$;} zv|`Y3H`omvOlOD%qXsMg@?rsU&gN6J4-PXL42q&SLOv~JC&nF{*ZAsVMz}LFef1IC zg}m#Wr~Tom4g;C9w*E3AN*VW+!LP(kQa>_}{(d&;!`|1u2S%%mD8(j``sP*!xz4le-tLU*Yz10QCGF znc!q@i*bTfCN9=gw?Or=rjh8lsXf3O49XG7WiX2CFoB#-?!70wkj&@r=!s;WwC?4m zf}*Jla=P0UDAlCiRuMC#{S9kEKgy9r$c#p-%COHR{hVQ4Zk@qvcG1VwR4#A~^nr!u zMC&3nJa>R!vHPe{#LvOg*Kf)h$xP|m@+KXOvE7x8LpI?P{Ap&N=B+SbDXKZpIlS9=M4 z&M&jUbz~3tZ2B}KH}DHHp|Q`#NVT-f?&L0GLR(OTgeX230;J3oFEEp!jOTtx^NBXo?qJ9xd=3|d4qs>;VIWqr zo${r7m>)b4r8iVLJ(M6Wv2~=iZuW;mgJBtp6(V+${6T|&6G7D%rl`MJL-6X$tG74Rh?Vm#8YN%^sf$zASSuLS4Px}-BC*sNHDRF znt+93Jl>bA5PP{sU_vcW(g7A(s;7r<=gR0bcb8`~4^bh^Eko>Y6xlFHR8iMSp}#Vd z5w=|}*{#L0l<*{Cdta_yQS^8RHG2xfFY&>SZA%=s9{)oNLtqHyQ0%J+eyX>}50YmA zysiB^ShCG_`m=YVret?g0WQ?gUf1qFKBHDr1~i(k81g#4G21YqfzJ@`avohU^q2Qu zMb~W>pccAltWaO5(mt}~PnLvRoVQ>0QUp+dxJ3I`>H01euaS`%c|1PV_|DTM9ltCe zuEhO>VK5RqrQ5q=3SWkJao7Z>bh6G~GXRf@I8-58C|u7r}r9ImQ$8CUlpi6vm438U$B0| zV)Y!&U^A7$ubT4YxxpxmV`+`=9fXJY;y>D;Wtg)FBg6ariW5G3{c^ zq3{t)7t>Tq6jdCpD9gY~NEN-AEVH&inH*&AeN+pqE@U=JJl5BcmJ`9>tZ5MLvKd@1j80k zqn8a=P#aENC6Y;1vHDgq&HJ)2?pk_@ zdIMDkv;U;$HQ>@Z?bFsm;q~H3s?gli_mNj;y&HSVn!omn!lY%mNrq}bi~u*8tS$+K zOl@CVgbZRUDWI%&(f^ZJ1~_z^o*0Zz@qn3-jNcD3 zbEC+hlKZi!%G(Qy<~l#Tpxkv+fHUl$=?$KDU^Vkg8hdn&6sXHC;!PCWmS^4#AF(cK&+C3_~c{GxxUE|4F5-G9ecPnPI)&pE7gTlzofYLCk6Qr%=kQ5J z?#McV5QzFq%0efvL51J^B>YHtOxLTNVFLLlE}c;(3;IX6R7+~spU5)#jYLp z&k~9CA{M(hrR1Zv$N^~)eqG8!i6SEGq9mwz2U%K?SvEO39C*#d#e@bMx=ShGi_7M9 zBqO!=fPQCC{d1qa7?bzxu2ir{%)n)Y=BH9jeA(YgQ?Ys4p5+iN_)qM2ZrQy?pYt9U ztpU2ASqX>-?;#{a&;tinxZ80fyyJxN$EN?_`wrShpY?3+uB>~Qul&TSLnbs*GdI8^ z^(?o|c~skyy8r+zKvbhk|7pp^&V(YaA#`>VN!IG!u-TR*%14aw`cpXWrg56UF`>KW zr?JosBp;6gVJd_N@gyAz-pj+N5&T6cgJy$>qSSc+2WUS>B*5?P!#d=Zj1@9J zGTyL$Tm4uWHBXGwG@X6`P87I0>sF0uFlybtyD+NuO(1jN)!oqEpgh}b_dLP;c55{; z^Mv&F-G5P^{<*vV<8SK%Q^rJviQz11vBKdv@rCz#jI@4ZFet0x%1>|^_p!o}v&hWz zuzyTzr`kRIS8>|7ZrOZN2}H{lJPpP)idqzXaF|mdZ`DUz3$FWhpLDnSq)%BMjAyO^ zyvTaFiZERIXt|)np4cN!7g#?VL63`L87*u2GyV3Lz=Ygq|64JHYfNsR%1bc?LALVm zyYRnRv;Mvv+arQec+|>1Cm_R^9?!h>w@Sq6k|#a5!koA@K7LGx(&tt9RK z=+qn3dd(vN7WKckygz2d|M=e^dW2k;2^r_@!F=;|+IUFy4>BDA$6sLRZb#e|(KeR$ zgRK3s(0ojvNPU1HT#N;<(xR~F!n<5{!TF{onW9X7wiKke0i-&eKajI&cPzaPx^-ZN zMp17$;|&85BxT6|l^>1!|B+F6{4Nryf@ayNmIT6&w7YQ8-t!+R9i6-4GJHY3**}-x zVKNYMtkwY`NODoXo2hf`qWrGYJ&Uo!jd+vO*PXS5LfPAM3@J-^>3HC`2&&jl2Hso0 zU!MmMx4k!7oh7YQ`3xg0k|n9Rq(itAw~fc>|A4XnEoS$x2Z#bZJ)v?qyePQ9Cuoh~Jb@|ti{GZvcQ81 ziiv@3YU=aGwf~KBBeFZ+hr`(f-=^W*FystxH>WM+_Kx=Tf!t_Wc4Dtu3Xl*DCF z=CGzf$Gf-QX+2wVLT56Q2_Dg7VV3>*uBSoN;IgItMvNeg%MImai9p;GE$U2_=g$8T zY59i+FD1JmG<_+m&@Y z8vuYH2K1u|NTM{c_?a$(03bLZjpHTja1pG@z5G8wdyijXkL+fnGz7~SIPIS!cF}mC zKJ@wSD}NmruHEGytU1!%=_3PX=G2YfKR+Zu^5VId0Fe&~_nA>mFR(M& zc_c+fg@)e)1cAp18u^o%1=2zK`(Fcyr4lm;%7Z+KZ{PnHu=n3nVPefxPRmqS<`|Uq zAm(bP-2lj%#kJorSdk4Ei^XCZibGsZ7X}5_N%y{)xGjBn{qP9fi;xBmK)aRZqs3wi zV0}DH?J~J_`Kp*k{%Lg29k6;Nk=SMe^2rLcRj(t zhAErCQjTO&E0F^cOBG)Ko#F7i-(Fs8KWF(d%i@98-1^k?!-XBfEGS6wc-IR}@_j5a zN8VlBYycoenO=NJt>mM6Y}lHjktC*a-DCa0hnxKDWhxROlEPn}+`pEtfB)f$GB9Of za_qDgS3o3aXMr&CnhB$8s!YU-B4b}husGo9zL7Yt3&Ur(Q?SP>B`hJ@n zumz8mo+MOl)c&3ilKy$g60q&=ZTk}a_{VTMv*5a)=R#Qlsi@af8h|nHr>Kf(gWM_B za9+2*iC*0ztwNq3x4p;wa+5sJ9eV|H7P`B958fITN)T8SwF4Aw1&7E%-H}f9|62P0 z=^OTX9ATOeX9D&gK-@g~SOHt03jhF@f|k?Pe=XtPkEs_6+?ju^*5=fzNWLG_ZX7;? zeVTBz>g{%i*1WhbYCjaj@A}$vC$dr+=IjjgfY|~M9VR@cfI8QU%XwvAqNF;YGFv;+ ztu#9U4isoic&bWnct?MAmBnR^<#P>Y*LwA4H_Qtmf^qkU_qP|1m=2H7$2PE?rk5P4 zz^wTc^dT|VXaP=?)Rz4+g<6(wli_mZLad$$-1$JTVJH~1fBu=;yy@&lmoL_JZT4T)62CQ zse1!L=EVi^8l0@-A)|F%X6(S5*pl|q0o>F9ITn(&fdGd$>gnpM#Km-V^h=YORR z{%rJeRxrk)a(f{rkq%Z8kQvxJJ%9|GSkQAtjn4uGaf3ROCwGf>Ywfrjl0q^J379m& zrXU3bjJ%)0F9SLTx8jCllu#(y^NW%)%M%NZSAT!5A$69%k9Ep)=B_6|h0oAvzB%L8 z?0j`L2OkCk;q)vsprdXKS zYraaat-dm>@*KE0BgaI0$zi{>pRdEge*j?FLRO|nbm&U?3)AH~m|P-u6F6R15T`B! z3#Cyn%uo;W+8c3L-d&KXuKNYvPr)+-8$^P_$*A$p&BpJ;9qA4OVtYvm1YX`9YFXP; zy#LC|a~ixN^dRKoWzp+Ca5~#$hv*rUJP(=3=gb-70!08d9G@&A*~H4#GjrYNwFzD( zk5vB#X=~6A9or80np=`sWsVXUH_LwwN&kJ<0^$ZQ3;-U}XX-0~Q>Xm?0FSdcDl>~vq5pCU*C6MKeqI@O)+ElolWGX$L+4e^d!U?;Cl`OzFWH$4B1 z);>klxDPJW&ZQML$+vby`mTF7T~`nHC0msnh@pwAi0Nys(_l;$czr=^gp3)pK_V#w zPi2D}E;XukxXl<+9)_hJB-AX~Xk#C1FF_98l93L=X$vQx`b(G3O+yY_<*cUTxE`jL z+6<*pY(Z&sl-ek0zxOG*lqRvT50?+&t(2UR9fXY+4TEZS1|OZ+tME&c_3V&L-yOiu z$09@ueKZt?-E;*!+3lVb#JN||lsQyVGQk;Bb&VkQfCl(B4To zTaT1Dko2(L4U)_f{dJ(qZJQshB;dCHe9lHtaq!Wn<|&@?9djb1w2G>o4pC%7B22DB zEV2ZTD#hfD*jNCdzF-Jc3oOK=kIRz#r|J$iMG1{QvSqQT;*$T9{rZnJyPh*h54B*gm_{_$-?1>3`ayxthL8f>8$Z?2G!5p;ev#Sq%`L1gb;l6TIKS&*dpenWm4kfKCq%moL z!_CQuzKYdfxpKwjLR_EU8-4&NWD*So@sC2kFs(31UbGkZd}H9E0M~y|yikRiv_4hB zHXsPp+ZU}E+f&s(0QdsO*GM<2p6N^bhDH4jJgnAFu@#xvlcXLo3OjUF-+rzwqF2fs zYmAdkH`~mYk3gp9UqG?1n3Z9gaqmc*F(|DQFh?hkxPrG_+@UxLuz%7h0`<%>4TYoEYp_6#CWG0C<6Vz@LFQf|od8NVtv%GSvy-YqcO>wQK-awz^fDhW6I}Fw_Zqnze2Vk8nt01DCO)5VJJsN z0g*Q2meZu3tG#PaXc`q+=yz|K>XCtizcB%7B5w%Fq2q|BhHpM~KZ?im+5sPdJAIv@ z-~3C%G5BRHj7Vl!JsI8*5a7pid2*+=ey?>gA1BEUJt7(>ih6_<5an_s|M&G9U>)v1 zwD@C7UiMB-@5nWx+r3T<@cJ4C=^Y!*F&laBX+WPqE21`LEz@nG?)!?xGvCp6Zs}Ur zv!Xg5diMhQ8*jq|%03Hq$8Z6w=)=q+b!kka6A0Tbc7Ja6#uSLR#mho;6&-jF__ci; z4EH^-=$jkSp`qh(<)IXV$h(m+H#1-SV_jK@Xwt2E-St@^Q^}7VRA3ztfv~yM{@yki z40iy2Dd6+-2xIU4zF0>c5!-B&_AZ2+EOt)r(J+N0**Pe$2lBR=+s;VrXr6YJ!7Usi zQo3f3hN)+r&xxmn<=I$qxaKtMi20>VBr9ox6e531Q*EXVb+dWzy(jP{0(BX2@#EjH z8JOEdEr(*+EF~qxVlh~+V)umOa`?3fIhEFrzRchxus^?UW#QX~O`kB6+W^|BABl5% z4jioRqE1=l93;^S6-{6hFu_IcPpMaT+U%=PPDXynG3PMk!Ef)vv|(iV{mjaGrog`N z1QC}M`$vjyOn0lnY9pjoH+aRZU;U5${XfrI^+G{7p>jE*FmSA)P&i13KD7+g8(prW zx`$><>il(svQwjrJmvFLlums-P79Np$rA{izZq?y1 z^7#(+t+DSu5#@902n1b%kCfMMBusFvbkZtJ_muMfVHZ+=55j+TWpv3=qv06xvg^ra z?fo+k1zi_VDes1tPgMbWg1`j}5)x>JDFqEJA8Z^)VJ$t#R77t(FbEY4O>HEGA>}ej zArGMz7ZV%Nejfbu1Lxb_wH4tO-_j0CAB%2-{PlKs)x38;9hOzM$NU;k&Vhv(3gJbg z386Y0yx&*;jHsh0b#=;Yl9624C@!H&vpjT0DdfC3uNe`Rb=SkD?o2)?o)1Jpb8Qvu zD9;wBgxf-aL~T8{_?O*<$hH2Y=EIsuj1k_6pOi>xhJ1U;d6o=N1LcXS6HD`ng-TXC z;C?rS!Enl;oy6re4UKbbTrO3cN2%eUq z)oXe;sc)3OFpgzecsBY0_QJ?wCIYtEU4Uz7(p9BZQH(?aw}`UWbBJJRZq%SOoR)<$ znO5afYMyfLE`@Al@~)d41w`tiv;8+wFKl~Xdsaa<0AUKC$@^!F?vg)_8y=j=QXO(&5NcgeZ z9_ciVFqo8eelPO+FHTE!{~w<^aUS9RqQze)r)`fI0*;n3gE+sxLw&i)UfksV0Dnj< z*Y)|P3ia;$5=RknRLvoQ98O6!*OOpWhBuO!6%hh5`y@UX_EF-TE*d?}DiJalE~w$B z21Vc4`YFpo)t%xexOllQcQ5hG_{FJFW-R?I3aBMs-O2FE}N?>dc2harqH97sCcuKsoI+i?EhZvz+qR; z98?CC)}wRX&l;Dhx|~a%#TZDsHVf{&BqXLA#-y@{5#G+a)OCr*OdBVQ3Fx3!1Aozs z7d+JhcuKn8r*>|$@6B~b2&q$}tQw-9@lTtjy6(j`v3qyR;5Ma+<6&`0pci;(X`<)U zUOxE-f}JUO_G+8T@y)VjSDJ}l)&BcWCV4+1a@2j~r5iRF_rMk>(&cp1HL%ld17;>L z-*>S?d%CyyMy^|KUJZ1X{2dXFbb%p(A3o+ydew6pn#o1f5S!qFK$QOvAEcNRqSeA+ zF-v;iT z{L_nbwSy1^nuwlSb%-LN3NO!(-o~b1L^MgY3ip``JJSG3A5p({KRpM0+N=3n0LbK~ zqEV{80q6RCsW|ZsCScY;Kc5TfL!5YTzVE*^e~17=x?B*Q=03OMvV#_NT|yy)703V# zIns+pzm9IuOZ`3iBIf7i!39|3mx)e-du@4>;w$&$C0VvTKpeK>GmuKFB74 zurC6=B;yXt#}9lzNQ)?<<|N&1fL<48fKp#gNHDShE`<`_B?K6lcimnQzjy$R_IYXWn^2h?lt$bw>>>_y!x+(Ao#qzJiGnquj@dpDX8|DKdyR(--9ju>k1!d-(AD0EW8VUh1JP`iQ zzRRZl>y^epg+qWbLN(2wl(7;T;wPnamMKSW`*SUf3;?Ku;s;KJJxshPhQ1FNPlIei zbOyA!BBem`ko1LJ%c={q4d!y(XY9v-cZvMv8_+#HOZo#`6(_&}YX7kPMX?M*(j3J# z-}kN+B4c|0pD3k>WkTTpy#{EtCTyMisp>f=t=niw^FX}51%HOS5xRmzM@94A;3v8O zkM`CE*VKq4+tV^G4>|3%=TEIEXEtg#fkrYrL%>D22N8EnCvi7$#AuD3U>o<1^e3bv zQIE$Mp$n_G&g-Bx|7d|q@3s9y(5_6^a;f#l3UUjRy~{Up(B@4=C0}4!EaPzI?OdKT zf{ngNedRZM!{dU3^%e6>UpjJU(IHfBeVZ88;VBA%m=UY;lokug#Z4c6N%#BN-7^nwH3 zdimHs2l$Q>d5~c;+JPE8P;~!`OB^+heL|bi6({-D1JCA+oPX(ezF!nT!F`?NlQ2PaNb?O)55UFP)KW_k7UViAlq5O2)uraoXhjIoP0rc zW>BGwz8SI5$#il8XOZvAJ_3nsnf>`DCF9-KHxMA|r1bJRK4Icij&A_t1%p)3G^9BI zN&k=WO|(3)@$WCjm| zJv^#o!xBWGN^@e zqEF}t(}OkzL2%&$-1H>sZNU4(tU;K#@k!7VF3ba27}uTqi0KOe8^DOyM_VGzX@IVf z!ZB!bdHelX-*XKN%~-VFvi$YLL4g(M2lqcWzQ=zF;!-fW*kc~g4K>f9Qhp9WH%}lJ zRe_^wnW6!eBf}@8tLVUCqo_766AYpbX!=w zH$Z~fY-0x~R+uxbfYc5^2V2GT`#lqLZw)+0xxk0%!#gP?(-V1WvL!E=+`gY(zEPDG@eC$fM5F!l#a@92au(Oyivy zu}d`al@hA_oTiHi8CPpqeq`*JwPOjaiB!B z>}0nK71cS5!a({)%t5HM;E`+0a7xL6KeQ*5{VCs0GV>b{Kb7X*9Zd%0*~R^RKs5rO zvpHVz|ByOo=%KM+w1OD{Ao6L$l?de+@!C8mqf^bdb1)u4Zks>=Lk*BiVIj)3kK}zR z@!bY+HWu%}BL=D=DEO$Pl2Js=M{#CDTNRN&PZ*L~BvAB&<&jn(~&Y)Inn{mPI z5?OUxr34rY0D^H^#!$ZnmEp;&_Rl~gUKq5+9+r4qopWHUdZgL?O8|YGMoIz#aijVh zO7p!z_*9h~ASb0J1JAPx(X;{s`A1sqhky+J zlP#|rGfR(ZZx93o8XKE&U+DLmjvBCa@+{hez+-u$ob_Wi{Ei>&v*8$nK*Re61R*yp zsW3BFc_Hjia>6bUG&)c(-3fU|<13ZzW0?}To0&;qhY-D!>F7Gdo1N?P3H5Il04)|- zj!v?Q${`tMSg}l;iQ$;v?K!5q73mB^p&w|gfBFk+l-K#o08p=s*f@}FxAiFmX21*g zHH0l9pU?AH4@y9{aKaz7JWnxRR4?Sj3aLPlbZnQBI+bjc*n?mR20#Zshy| z$J(}owT-YlN%A62#v0qD?Q^re?z*xV`4g(W#FgjQ&*}1P_Ly5%g9ohj&f?>vu&G7V zHq9NM7*b>riDNeNZADnpoc_k-Jzfr!tHv)ZYBka3+^4b`qfU zAz7SCltdjrcB+Y**W;UK|ILkDY#6ILxI`NAb!5a%Qa ziOp??P|L@n&VEKyR3WkGz^u4=cFh3I0YuUt?lNk{j;-HBw9?(E7Kaq9Ik!TjX2b;eQ z{&O5o2@I8^Lrwc@>HH%dP)uNoJy89Q$0wrl!G=!W_u1hF%UeGg)6@yTr-pyqG48LH zGBfZM++q1v7WIKsje)u_dAL{0(Es!2bOp<}kpqv?cc2P3XS8^mFsfy(I`qxu%k+oYKua#G-3Zx{t8WrwAiT!Dn85CD=9nmPqq5+r9(UUIl70Q2aV#F zjT!q16jV~v$I8|Y{1^2V;+fPwNqTl3<&F0GJk27*5{ddx51276N*3?PH2e9Nf$|DT zv=4kHTSF~Hg{>H=1ZT$mxU2*w)2g<6b& zx`>SU-g&2S)&j6k=88*~Qp{pqzQ%fRaql3ngo85-n#ew@dZ`lE{XtoQ-Gvd-040|7 zu=Ao_-*9{)&+nvO3I(iktN1GRnnV~wudOF4XC6Kl1|-t;`xcwf*ZR2o5WuQh~)a{5nKUXoQ%|=)&<{ zVji#q*5Im9^*#8Bi6q(*XgG+1t>9lBqE{Xc2KT&eB})(KRlp32DsHS6LL#y2TSLUA zeWU9<%yGMRMp%3nN5CH35t+Z&h&Kwy=5eyVmZj={F~OUnqT;|81gh5Lj+>V{>j73x zN_Ic4!)AKC}AZ2gT+t0*R zNJUD8Y}4le{3?k?Jbp%=pl9i3EIHR5{PG%XH7^ngWgJ2zZL7zT4qDxq2Fdc3@J;#)+f787IN3z#TCcj2wUKlfB99E4 z$>(eWj()R!N{{bn&?y#Mj#$`2S?8Y-JOm$_BQ?s#OkM6uH})jg_O}z*Qc2DnXp@#u z(!yW5l*YB_?Rpvmg!CiVd;W$@9nH^@Z;h^5n;?F<8H1#9f(%Uj6k~? zX#VFGg5d$q-~I7iP5fq__6F+>Ce?d)t&1Iqo{yEV<`;Xf*F1E8x0)isF4HX=O|)#V z7&MkkkELg5Rb^Vy*&qY~{Ui4F(A9&leQ()z8@5mVmc%A*U5E5ox0|ki7oB^5z-rZ*5FN$3ZDXTv0V4{o z;1_@uThCXW3Flr95=Uhh%f?I8OMWHZ#->{8>2_XgFa0hW@BmBca#f`rgU_@NjKeR% z8`{T-RgmQOPf)&8Dimcdy71)`pdUQ>CYDx zSTvoa<`dZyD`K9Kke+-&d*DU@058c1bo6NrsT{z!gL`1sZ3(!Hq6;30j@~yC$}Pjm zCsUtd{`=7j+M(*1OrFU3m_uqJ3-Xm?2H|fp%od{dWAs^f>EWjEy;IqtDT4O6B@@SJPDzr7Szy zV2SyA>kkcr64U`#j5U|D>ll0%1B8%|5ANc8B}=AUo7OxJD=J!l*0_qe$Mfq|RJgy$ zV;Kf8Bj`K0-U>=inQyn7$!3BwL;nW5gSz66a!K9Y%ETGa830mg&|xtVjXQ=|SEXO3 z()8`~#P413Gt3HlX|)XX-?>?@eEVAPI$zk;V%kJH$Iv%VR@e4~2{@AiW`~pyNxe^d zf^4|o8NR6WP*RFz-<-EB@YCs(iYf(PaDm|UisOfgWE$5spC7h!Ro0(0K*jTUMG9TV zs^10h6>3%MWiglzRpxN_2hw%9T73HLFwtU{J%ScBQDc29RKwoAG=1_-qmsvdb#X78 z7>!(@(eIX5^mtjn$Sf*-p@pY7o85hR-d>j-^g~A^k^2u29bXBaTux*!HM@W39ZT1L zQK|Xa=JU9o5-PQ^U8%*_Npd_^Ju_Rc{Vs>2Wk*n$`a$%eN~6Q0iHMueWabqp=(AkU zZF8^bOkRu6+yiI&lW)tPS6#X}H?7sn)YR`r8MWT)wK@FK8AZk$-vEw?x1(v+$6P$S zHv;;tPQAx@9W|h>Nvt<`XvXh&q1oz1i5#Z7g ze!#C?eD&M)6-TXdy|exVS<2A5he_g)rx(BXw-Z*tJlp@*RZjo$>wWQ)=J|EJSNcB= z=k5F3-AgSFzZR%_9i%-p+xZKJ8NU7%*G5ebrZa0>Xe@LI9hvWE?oddme&Lu-Fkl{=M*h z*YBlPXHbSr#JShtZflZYvf5(FYFd2ye#OyKA-5Dc-^jH^`OZT#l0?*jq`{P>f8A$g zQK>-S=$Gd@;r$S?UVHce4$DOTuar-sl6~7O*%a$?@GYD+Yo+$_BoElczB{K|4xSrL zp4SQJB0uS)O9+YWKiph$uAl7tw7kbSm_C>!70h%w7HV74PTuP@@V=!fRYF4VtE}+0 zywUY@s$Oa4&e&T!dG5M5eB0xH?XX@uvgpV2>?(!m=e#bMxgsW)H6Z`n^@4&KLySIE zGZRn5YU0JC*pQqg_S5W8SHBDaCO2N~;5@qK^-{fs^3+wjHti6mBT3}H5Jg?5@vLm<9aV!<)w_o@1xb=HYcpMQ2k4MA~-c_pa)wiR@vcom& zuAiv2QK+9{eKEg+Ha=${Qk8-Mf#J<&V%ZYz`W)Yez9cEH2NU^H9Pe7A(NcZbhTrT% zmL*%j-obCHFXHEJ0$SStp$h$VA)s)BiH=0cGk$iRNW)n%qAiiTBMX|#A}AfZttYE# z@%}&d-a8)aw~ZT*BxI9Kc9HD8_sEWv?46yFy|=RWmh6O#NLfYpCbDJAmaNe4xcctz zec#XDzvuOO{=1T{uIuwTKj(QI$9v)5DxRctyIrs1`pMxZ8#y% zGwQBKQTAu@;k;ekAm!qFX<%O&;rqGbBKk|amJs^Q=kgx4uN%}p*V?l^xm$t%@iLar zrwuiYdbe=rwP7r;uWRTAQRR1kWMh3a@G?^?8Lc>8h^VotbELF;$ah>49xXE1zh(XH z^a>ky<5y-(kFy`M!-dLb!e^+$wo&l~o%QZc>KAJ5C%e!t_Vq=)(rb?rnQwgu#-2n` z;cL~VofEGsQu~CHMdR771+!?ge&;vJ(7-0oLG4SagG>paUT#d(eH%DGlazJ75GG=Vzr}Cok7N&Schr_q)IwE_Y4I~P@!G)&diY*x`Juw$}Ha1;t zRNfXZoVXbpKoGi<9f8lGA%<2`^bnZb!!zrJ| z`t$8?;c0)Bh`(w@v{*k{S}By2lk}?G*(`6ALiaNvm-(Q}VN#cI@ZII8)75Dr8FWaW zQ;cir@O~^9NV=gNXN%R_q$lK>$fOa{Wjk5BJ97}?Qs18{^3+xT+j8HR24&qRcFi>o zx6VB#54<-;$ zh?(vc@2U1sd+Oc^3~2L=p?j%QUgpptbUf(3x8zTLJW<1Gm}9ii_W5OWJlj1M1C`^! z_8+^Ar|I@?v#)5hI;=a_qbuaf$ah8{)>28)t#?j55ZM}0n z+i9UwJIUI(OIEK6zkbO)aq(Jek`>b?7$-Q?ef?}(BE*An?8U3e%+qyFb`Q^w5U zu-%-KIxk;pzg4odV3<^k_ZK{!MEF(O<@iVSZViCUe1MUWrjt+8n-xk z5Fb+3-yd2V zxUMR2bDTde!N_WWsK50iJC)$0W=rv7Y2A^+)TirO&D|)G?9cdcE~<G7Nd&O*Ap941+^M8Ats@Q(={Xf+~J>80q@&iMSW$dwa2 zU(GdXi(-i4dDg&z_StgtZAGC?`LWAu0oIHQs(p3l?|Nje5br}O^0t=mWWCIlM)dd2 za;*^0cCbCMnRWbarBuJps;?*YCn>vM*u!hB6XVrmoro>JX+$O^w1br@5vhTD4=YAIeJz)>)VgV zH@Y%fI;&B3=k=4)U7b1Z9l2~bTBoF*YYm4!*}jM88&50xgk77ImCTw|#sw1YkK`*R zEq4=pQbeMM(i80^5*u)HSq-rBN{Aj2XU{3~&3+IQ?M)YQmCRt+PS=x&3^jB^olR!> z$Mo_K`cpl++oekP+L38)$+?cf-1Mf|`NM0q0sb+FO`bwxf}Gd6==C+>hq&a_6Wvyy zN;G_|Kku}JVyE6dDA*V*wVu?v<+?p#W;K9|ewZ%kKD4M{P~}PePATogaDq&627t5v z9}&eG0&yQTX`@9CH>wh>Kb6^9_57&1i2A5Ip@~8$>08=bHQ0}c% zpqi8U;CimCawg}vhs)KYx%0I-qIN@H}E~&(xc$*E&bVe5aNpe)^>}T z{$WsA%`&G!qZcOnT1^X3uwtEJVC=pmDb{@*#&`sLa%gC=U zs?T}tczS&I_?hV{_}YG!T_scZtuxd4F=0f)+E`sx+}tSfGZ1sF=t?b{&OH_to4diNH5H`BpqCB!`<%EAEzR zWn$3^xjq~_F3{;%&JwflJ1CvRXyoYiMyhiTqQzP$Zc^)rOPS!*M%6PMAdbV~uKiNE0z`Lz=cVP{Cy7bW7 zJ7b?EfXC2S%;g^{<2-}!p&?_Ph2ZH;O(n<~@7c+VlL=dN7@0w za=iQlF4-2x7+t1V>hWOW;9x{S?GcwDS zs!^r?df*|p6Y6oq1myoW?_#P{ll7_MKdzDZ2<{OO&Jm&t=ng~7qyf_Cfr=Ai7)9!X zhfLVdMc$77w$S}eY2Z?8zbeuAW6=1=x(}m8ZjYjb1PRI^Ba(J!C_^DyVzEDaw!DA9 zbN^twR^#q(6I6OOqsqu;8hFRw6GL;c&zn3X zUk>MIqsWWjsf1)`06CF~1{YDaoL~Imx;?%i(n`z0VWyb8TeI8v4uViX-i%1Bft_}ok#H?UHWV;Dp^b-ti$fG= z{M&`BE9grHASz*3$?}9yvdN{}|2Wt!Loal9?6gTfaDEuff2cfweJ^_Vxb~vjZ+s8x z7qDs~UPRN8U5*D)@DSu%M6*DI&!YP5n#Fejc2i;ldQjdnpgH9b|4=+$Wr3r3 zSH}smCxK!yAOl&TYh{4*ZFrc-bv(gZ0V?M${d%`ZGM?KBH;n94kv)M7Ru!_WR0U`! zw=1ceK_x|=Er@fq@wR50vc+XGaCM%j7PNT*R%5#|zy8Gg9Wkq}ya-3)Vxd##NQz@$ zh5#udK5X2o{S!ug<5UVi79!dS64*v3S~g*Mw06Rx?`2IQ>|khr2HYi;+cn{TFo39$ zErXhYYsCecsp!ti&4_lgwp^omyNZm-YD)qIu5bcc918xOuO;Ok&b<#xAUlW(^-IHd zy{V@kjYxv!iK+pExT*W*RZz!{&;7mM;;?XOU<;>1;r;$b#ABmqn=1Cx3(ej{TpE0l zUDyC>LTjHz!Fnn+?Tkz3vtK^G^?{;W+Uj#^Z2qlFfjA&Ddiny~xl?a1a=qJGGU8OZ zpCbBwM5E0Oct&BaaJ}dE!0SqOv^x7eFd^!GfH;I?*SRf%^gxp|Xb`y5!yiF}+ld`r zfIhK~$bvbwU=?_Wy33j}O=TPX#!M#6=gA6O1rE0$b*(I3+n`okmw21;%6wbnEBimP zf1W}_8W%9uM40IVmSbq7Y^0{ldr};l{g5~~IBL)@VS3fUDv0)G7O)C??Fosi93~`y zROU9LfJso~xgzPs;UsA{^w)iG>Mg`o6 z;v<5VQoWz`Y0TtN=s;d95bh9JUwIqcO9vl~6VXtKzA&xpBs=;M?%B6MNU&*5?E22ye{-pAZopy?G=ErwvNKH1M)%bdN+;}>i2Q- zk6#kPXdzA1+e4+Q;sCq|@_MZH;4;dJ)1$?b5xJlx6vY%hhD>r+t(PQtVf!jN&doQ= z>+$_@uHxGxa!4x~3m-RqAJw6V3%q05f=WsiU=EUBb$N9fJQ3&&-d}W*wYwZM@BtKt zAa>+Dh{~fercFkn!6*9q;2x!z&s0$i?0$@*cqUqgVG%%w0rw7c#R}jia>R0&w7uaR zKXYt3ZaAFjKAwhTKGK|ojx=x*LlWI>Gx-J)Tmj)u$xHmqxA5uj;|sEl|H<0QV?_Qm zuWv5D8xJ_!A~)$b>Ca$P%U(4v`-E%a$CM{tI0*xmn-J@bWUPlj-J=}1Z}J3)&uhK z(U<(_B#ZbzQyeZF0`xv8f=3&!niqFpCLXWF4RXsIr09hB|MVDbfBz`g`{WgO=TDGD`Wb{%vy=MczPfNqgdLa=+n~ zI5>`QN89VQfgIKNi~ml0+{KSi%rK*$xLZ_FT6aFdzK#$|JL6rqscRt%zg=niGSdFb zoPVH@>n8pC)VtTZU%X;55%#K_qnFBva@*>zjPY?#>Jf~Ot-4|69gNhi$$Mag@luhL z9j9B{y;9sPsAsxtsyX1k;(PkD<)??ASqovdjvo-I|J!P=8A`zQJ z8YJ!?agU&Eqi~wttx~(Z-IaK=bAyKU{g^nhJhw_+Hcpd9!t0Z#ki+_&WVk*rs%EC9mT} zYNEI|^6@_j>+~0YgA=Hk(4kblJ;792u2HDE89qtXVX`k{Y%ClkKY#vEk@Z#gMILl~ zT4hbTuQb-wR4m_mG8m{&$n2DpYVl~%n@y40F+R28zZI0h+L(BK^SF&ZK zsy$E~qw~g2t*FJo6XztjrRQ&CGH=c*|2*;K487m_ z;fb}&_7{93LQhAnA{q`-oTK*Z3D1<-0C?od3E&+VT4I+>e)%>Po7wcx^Q}d97S`jd zB5Ajx?vbM3G#_v5AHNh9gE5HR65@gl~#U9LGvf=)KV z@JsL85!AP`9`t)`ze?elA>h=l5eyvBDN}pDb!MrnH<*#k8yry^a1*wZd+M=zQ+eWD zK{L@>ADN@Od?oKwnfNT5BM6%IFRyErU}Q}OP8 zNfr2k6?^@oz=VoP*Z%5@!Ti7aGzf!u8{kpRoBNcMJU;OQXNzu{urWQCQ>T!Gll0EG zB8K#pJr${3$uyllMimncM)y>-+gW3G42>-HWn)dpC=GrXS_K>`EHYk^H^^q}oES-O zpv+9iORXGd?5Hg*mayPvrcd_cw6K~~vON&F?tOW}X00V*@LF%|?1m^|cFU`=VIF3B zAz}wVo{E@JvA6UO=-=_-6I))k*?#fIh|&lB=(xPtK}j*AcYDi_5b-Ka2XuK4U|qnJOM9_K zp|N>3SCv}T56L_IU6v>Nq@(>%3RT#Dew7n`!}N7Kf%%rlsz;5j^pyydpBv?^Cvh?T zh=>ji=NlKEtb9SSAxr%fF&Y)r%i(U*7d~UIqhw&W!O)Ks`?=~-t11vI5$sKARTF!n z%G1|=L~EstlT>&A>ixS>20o;Sb_-lb(W2gvl-j?RJ4Sgt2>&wA%^9NaWzU25Y!pNzqQhFYy7U=l1E0sFBcrUwgS#N z#_1=wDc&sA&eX=uN$)*%4?16rSFNO$Ixr3oAzF4NmrF~h?t%fC7?`#Mu{!!(Ni;ts z{cy&AbKvDM?lyl|%)Zsu1DZ;(R0jJG^M0}s)ufN~NCI@{19MQIbD6YZYN}+35u|kL z3v=_4audcK20sLc1%pM&kaqrYw2a+me~tPt&B%mINn~+;5sw6bY_ECGd>3kbwe9Z6 zBQRu|25v3Q*4%|J_%U>(L)ag7JA&} zl11ZqXB&~vQkF`Q zq5nY3Idv3N1|s#q5BW$wA$7(H3rX@yJBwXXgPV=`RIUrw`5wRRtlWywWu~3224=d& zrqG^V85N`w%F4wiVfW)9cuqs}M4Ge4e#~e8dTuGM24)< z}!*@yFbu80U0$x`T&-ZEKBV@n+MyWHIoz*fJ5?3o>JkIm;2Y05=S z6=H&l!i!TW#L8hqOS_5sh1D)kh^B5nmEL)njXsuBJ!k>D+~z&ITiDrd`?YVcDEp+9 z1?BA0R`vdxPq835y~w(G^#n#1Bry^;MEDPbrFSX@=D$?f%9DlO(x6^Qan<4%#;oba zsp54?6X<5lZppA;c!`rwfu5Zc(f%pP%R*B=fWv*!Mp_>;L&x^*#^XOQ)j?Ibiu=B@ z#YREG_!i-vcsI2q=w1!JlL-MQJg;-0d4r~_rgVO^< z;RK6Jd~92SnN^AXpjwvZzm7_elIm=};7_7Zm|tu6?<%_1|9g~rvHy&}~ z&^&kf82zGq9YhOVeJ$hngCO1q}l%!nS6&gd!vzdh1D zc&{t_!Giq^)zT#Ck#zV3!wsA@WlpsiD$_rrrw3)>npJz#7CQ->;#OIic6Oxt$s2Y& zl@@v~BS=T;Z4s3pi|1{B>19r-dn;u&!z5jr+{rHK(I`Rws*dF^ab1;&yB8|WsHakU zv1FQl#+27K?Z}>EWP5SjTf4PC#ctv#EJ&l6d1^6byC80%=C4emi-^4;Loay~o0fA= zI!W1FHk!(oyiz@aD|hGnNonzbzu=c1_tqM_=0}j3h$#I=2_%*p!G)YqUr|SALLIg$AFWC7TMwpDx0!INg$jd{G2m z;NO!uWn1k2DMY7K5yPl5Y`IozQ_nC`*gaEjp+TCRUs`*~-qV8k zWs-m;?(GYOS<@t6WLJwGn_(Objcb|x+B3kyQWAMB?(CHzG2tC)Jszf$p+ybpZ-nys z94m5i9x=j|S?os0LKr!cak$C{4p6b_T8PVeU?S=YTv_D7pFeIz#3$bT?Y#_;O>1M8 z=LPpkx%R@st>~8rQynY7;RQkp<3t|YVDso`=86fb>WI3{&CT7rT$RhG>g)Z^X>`XX zgwxV@+VTWH5Al#DztkIS3R)O^-e@x4%9xpTRG!|9aT--qL%1@ybITq4PdgHiywco*+JrPJ!Mo4XNV z+V-g1=B5IYS)_N#;7x=r4+}j{h6TUH76pdQQOCQ(E0>0;G*@j&Pp9RNFdJR%71h$6 z+8TPFf3wTGXh6FuCRl8x`D+~Fi0CKkvNL5_|2B7|Mpl>enzC$OHX%B#iNs@)~0>vh81JZ&-$uZz9`B~=PjI<1b%Bx(S!h%Fp^{bcZxS{%m=hb=QxEiKTp=ngE&IWR_P6# zm<2=A+gEP2Qe8ZWE;sm{VNFj5@6d^6p!eqp=l9PfUX3fxy+43O$I+Fy&4FJPqx#zk zJBOZ!GozXG*8}}WP59SOV))^D5wt6p{x#V6g`xcYkN5~_4zgjV=3gh;-~Sr%n))|v zHvPf>!IO{P5B4II<~Z=={BL9p#1F;i5wHve-|K%+_5VOJeqTKo;3}Z|G5iP6@n@Nc z>)(pkk@CjeD$xZ^x87~{L-3WEZ@u7`S)i0EY_r^#`Obc}5ys1A{aMp3EcsJ{u<$^X zbr*gW27UPn9M&0sU7){TvY2|rBoTp-{P+W=82BBgc%==#mp}mqVSOUEeU@|4>AgI!nrV8NXb|(n;7Rbg*SiypMJg0!G%E@Mg#XC zFF2wB<6}R|3UXNLvEq)lsxD@WKw35;HM8|h*21-@IU5w;pq>S0KXE4ANYTG<(65_1 zgp2^jzGz@uMhQ4AE*i+Qj{Gkajk=d01~+xN3EBTb28=a&#<^Hars^C$?J?FWYm>oR zZ~BeH0zA90#ZCaY08PN8jxQMGWr=vghp2|ZJK$6h>>hjwDd$G)W?cyY5=@g0@WHkI zw$iT=FM4KhnYA}VAU-FKwZWtvyQ-LBl7FbiA-C=L%3@)&c`_8a?LTAbs$LJljS(7bn^0Q}s=!)QKnTpX08?PmdimKbSx*a8U+Y0g1hq#n zb7b%M`l-a}ruFE{gR-huMlGm}Xm7ne4@-5LqU!e7hOYzg2msj+Xx$yM;4R5`tP3EXF1S`_@aS8E$6v`kHrNWa1E1V` z=VEKpLxcdj@)|)^e3e=AnI8)D&jyv3=V6W}=zH$9UWAS{9ll%|Y%*>% zhQTR4upSJi67euJw943y@B;qpjXE%s$Sjt#l{TaVIVo*HaVofBw_kqGYd=$uiu^sS zzWxaz6D`e9r^X-0^Iy*(JhcAwbg=z*suhkzVG!PkX}QiLq$9lRF%vHxOdqoQUUogv8!wcJMMH_|)!GsZMgPrP>)=`h{(g=Lp?ukGTXqaNSK{fCN268^YVnd5Hy-AlJdv_WLyIc5d3bMR! zse-}tQKQQJoB0eLZB=Un8kckm9aYS2{Z<@Db3W!3DI5_~6_6K)zi)x5xN8X)M*fc=OB~kOzIC(*%D#uPotfVOi(MX zB=tAZ!rzYs@jCz^q$X5rm>Nib&g6eQ>AHTvpiB~ zPf<);$U_NUZu*E=X`3Jc$4CY7Wg?DnX+o`+_hZ%Cp>oX>^(H&Rj*QdMOsgq#oxfR3 zgFjppv&>4`g%*pJpqnM&6!J_)!+w^-kV|g(0M<^;`*6(kZZKFO?e+^)&8Bflix@46 zDY)vN0rOBEGtPlno=_}um+8l2Ko-BfJ|#L7)vn##qt8iIA1kJ^CBP9aZoZ<-XpN=$ zj|bw%Rc+_h_;5k)t?!q%@H>Yx2EdDA*x{oY7#7G4f1m z>)e4aUHExe14kJzKbGUNUSq1m8QK%C~{-e7u6PTkdOztGaUI zzYl;->1C-H<}2X~FM8R9u-0a z^IZ}|_1^9~aCpb2Uq~I}{|V|``FAz%Xz>r;*w2c7RZL|5_)cLRQ0gm*c{cHDN(_@c z_;{Acw_+KK9U5cEHp`+$CeLm}VqIQVqjtl+nm7iRVavw4dq1kVy*uNH$ye8n&qi3t z*fTw0;|S?b)>WxW-nY!^sl0k3K-EK$+!F-j))%y$6E?P;`bjmpewQTq-772^l{$Vb z?>~y#s*s4?Cm9ID{I93HG_@m&B!=nJsaJ~v9%vMzj&3c>%053GZn7-UyOT=b=10U= zmP%oJGaAQgSSv}>kW1WI#w3b+tsuy2PWbTk!$Dd}vT%%QTV>wr@tN4$bY?zqQe_C6 z*r*B5y01`n?b_b7)FVBMb*kv~XCH}|6#*M6YuraWXt5DkdWnIrY;T(L8MmVCy(jGi z)cG8;v!L4fFl81hx1yi48kcdaFZ@eQX^FTjZ|Ke=L0(`6mvKM3IFR4+Hsddwn12?b zw93HTie=20KO<;&J=oB08lFp(<7yi}*BZ646L9Fo0i?}4*jbNR1-=5w8 zhz?H`3vm^QxE|!)WFu#YYSBT(BGZJ!bSQq@x)K~@ZbU!y)hJY92#DLa&{T;k!%nI1 zK~3Plj+7y7sgj<lnZ zuJLE0=7A|jXzb^Qufe!CqVu5(6?I@j9h?=sF2Zg*)SR?A39BCT&eue}QV1=L$sJ@= zxKEy~j}T8LSNUy`db%8bX>l_l!h8v*>Wx5p4L%q;ro11%a@lA&V?6dzq*fT}1k5No zi!D{|2vjoerq_2M$1ySz+R3dl8%JqN$p>g|uw)HBA*vuXb6I0t3%`PCiMbG=phAmyEaY$8TeMvX|** z!3@p^UHa!XUsH?L-#~#snleW`H7gg#*n3Uq=+DIPn^%A8BU`I0=rYq|tIfMH3=A7A zU&0{Vu_Jl3L-JW(*5Kaq2<7Yv*fL4di3~W5kw$%;d}{SS9`NTV&qW;gpT{RV{iCt| z$6@22hhT(R@|O_*&)<)qD}eF0(RH&NVi2B(rrQ0lwjM0m z&|?RW>!b&Z4z7J}^6aPDM!Fgv&ZP5J$&0t?>;M>1!*AX}`vHuzj~2^%P0*+@C?+ie zMUE_}gDy1sba&+%aN75&siO)`b zlUTGL)Y_ear{O)2`t@%T-v7SQY|)=7bU5R=o!?wx-SJnY^7jTiMi^1G;2aW;Socu~ z923v`LBDzJ`DLE}^(4p-BA%7iXf`HPU*KPIS@gg0=*qlVdsq;|`U5y#lp-G0K+&qE zmGFda5?+ZoE_S?qydIVi5guOm^+kzpY9D-g5La!(KW~|aNkbilW9M?B4Mlk9aG@%+ zf_1=V6L4z7C|DmWzaIS#?h|w!)uk}7@LcX8#c$DTFl2|$%fRP2A1dq;ty1X!2=JZ( zBPfUJ`Ws`CH!q-K4aa9FYBdCcp82p6Z)0A;?+x{{XGx!q~<{rIq$=; zmsTh(F#z8A*gjzVu=5|4G5vl>l#-Etaq@pNh`shq*hE#T4N1thgM zGRfSxjlo+UO$G+(12EXoD%ZF51iGnMC?;tDhBlyCV8q1%6ND>7EGaM!c_tC451MLd z%<(+J`(Y|7e6~Lqp_4RWta)eln$02uLl#*wRy-=_E;~i z4V$7btNN>N-A!>0C7`{bSLXZ&s`KwfJEuskuh%xF$zvHW{)<(0Q0Mfhx0i1=ViFu; zm;1A`S7r7^hp)uZ2E+AFy&x-zH%Rp3`+Des1?B*&!d728-!||>Q)r}6nZu2w%jtDv zM=bZsVVOLA6#hnGe;#^8%dn37L*F_mQ3KhnPC<#Dh2Lr0qY7FSL@GPT) ztW6scQxzQ;vQ(ggQ!v7ELSVW8)ixGc`*YinW?JvH&R4Qw^eMZIKep)XcAr9mFjSNB z1F~t=Gp9uGUS{h6hjhCQZ~(32@rXiP`5*;@&;wECf9)QBoW`_>$s$U1^{2((O@yje$6SCNT#ov z%(#vSY{^qd&R@qz8W|n%94pgfEbC( zt67e{d^l*#5o}Vwa!qado84A@x`2~8Yj_lZY->q@rKQ@iOE5Vpxnv4Hw#jFL4YPmr z8EqP8yHWhA?Rp^7T&h&9Kux-?J$4BV!720?K9Gs=nUgbRxe^b&l$sAQPpa9if0K^? zqllnJ>|=~%dJim95KYc*_Ab5)1`K;3eFOKMIR517)F(Wqfdex-0x()Jyr~xqXGP^F zy4Z*yRmVqIFKE%{Sjo2)=V@c<+#Pta3zuFwFh#R6b;Xp!wLD-J_j5rm+`Dg@`6T?5d9Q=}?7k@#%AXMxWBV?YYW z$iyy(H;zOlRbL5JE;`5ap(qSVsfOyq80}>As(dSBfaHxt2`}nBMh!g zU7T=vh1Bl^Ms*(Z!mw!W(I%wBM3eDgh#{gpX6ox^a~xw}t2BWFcozngx-R2p+%$FM z-QXHV=IN~P;nrm=GHDMtXjb{hCd;q=&d>kPh5hHrcmrP}$oG8vlHR(K5wDpAmR&|- z26>BJ1R(OA2b+^)lf!_%wPlGs{hsp zj56{-L;wJ;V&0t)IktZqxBl~!U-ozAog8Vo!DOE)<}1qd?y0i@c0;{c+ydgq$Qi3rsK-ewpL|(M~zcx?2#w>Kl};}f>}!|{y%FJ_+)5^$Ino<@*f}a&#Lv;3sAJH z^kU;ZOZzvB1Rh_Z&EYU|hMNkz4+N>f;7m|hWl07lE?pdc0v3QNP?Er~e+6!MU1;vP z47C6PO;kDgs`DSXUf?`Wn z+8AoY=rc&M?LWW&m+O%J$NIbthRC=+V|zD{)x|H^X5It91)EZ^8lFZthh^SB<3_=g zM};?uM|%5TZq_T5u;3mjZ#CtwNC#^w`S}@CcK~Udh)rMEvf%jBJp)Mm+69W9YT8yt z1BHO2DNK6%()`S-C!k?xex|*OYJ;*12kt501fzFThs<8wEl*6W>2U)z4l?A{HL(E_mATd(68Mn1Ilt>Oq5l)p6jN1e_$w zs}z<)AX*8kxm|Pyq<#hJnv%OvotNA#S$;8M=7v}$Y^v@A|GcMgyX7w+&BuKv=0w6W zWkzPozA)?Fc&tDt%}9L9m?oAG&=+r0wJ{R3Avl^V+e@KJIIeLNK;Ol`m7?#aEe6np zR9XVArN49$oKKR5OIrVNKdIS~uy@t?O%0VQ<@HebC4PWwRI#%pir3CCrwR-ym28B+5|VdlVVXY=L7-I+0-M=;H5Fe=GhA;0*|ThaeTt> zfF0EFtdRW^=cm@^S--xox>%1UPcuvu@I%%zhHBm<%VF)tS*nH67iKNVf$}NSG-K)F zW$c|zVEZ;zaUNAR9=@-N`>l5uzk~Z3Na3=Q?!g}HO^zrU!PR2Hwsu+z?8LS>5u|YkB2H@{v04FM$0%)%5MdPUp8=fON&m$)kd^{ko$h@Q~El4Yeo zTg+qsp_W_0Ck<7RyWlczQ3A(yl2;j!8CtXjpnZXnT`dgVplfj$t(wFWy977HSxffq z#wqp=UEA0>v=j>#<1hTnvdx6DU})9=`_P8@ci_EF+~yqP*V}))Ubb*K{&r2g$<}EP zLUuJ8@&WD>Ox-MeyNZrsSPRePZIK_3cJD4)oI;cryI5dfNR1byn?WsOH&Gpqd~j+A z*ilk8p0zkliUHfQYe3aiF=scQ)tpeJb4yrbNWLlrht^#J^ocUm9%wrRcp`z}6R|ti z*#-|mX|ktw8K2M1M=DNIr^;OU-JRPR*{hZ^F1MPFL7hXj6MJVbN_+N{e)(E*=ycSn zHPOYs2%A|W6qAKX=1(K9vZSAS!971auaO8gT~|G~`4S!6syv=ql+Dj(#qc}2_=WZn z^n9AALwAjhjrm0gDlpYi-O+MFS%KQ#JPl8C=DYeL5DE8&Ui0ZNIC8mwaE<4IKrBO% zNX($hmTX^hG`qZ)e9}SZEo&;;OX^IL1&^j!6KxleZ;vR+ur1} z5u#WSbU8;>w|Dfu_qzYwislk+n6BsM>o8}VJKtbiyaujzpV&8&SF<*HB8@*9Py&uC z!D2FcBT!|R7`<)4<$K?hz+&v}R%#({#wC_-J{Oz$oquLt{GIdQ~} z_TQa%B~C>0y@>M73FC|OX__Opf*GEC4$4vV22_eqsrW4VHvBZ)AXVyBb-9jRUGQb$ z)38f6B*CaKcCa5V!0L{86>U4#r{}gz{P?p-=on3t@bRawN_mm}Vv5-2rrF_b76m8V zDt_L~3Ztk2_B8IDJ0t!y3?|E>f-ER%~Kpjg_J2Lh4@aa#R*g1}V zOwxQ~&$&U6iTbOdSKCde4M^7Sxc&ex9+aD+kH2anXGR!MO-$zQ50L45?aG{Q@NsxQ zWMxd)2)~}iE|I`3k#M@28y$j8c>+E7gdfY~CDhfFFozB-*rB=g-0D}llgnMVXQ)u& z2%vNBwwxPLD7Fwjy(*n|)ghrRqA3;-&_D*oWZb@G8a#O1gP4L`Qt^(PDxp>KJvfNO!v#=~4??FKyC&<{&CqAnZvSMu#i zKVsdY(jGocDwX&+Q(+G!>nFOd|yNx&g&sSxg$ zl$M_H^ZWcHj>?sz?c8(!Yhoe91im#8PZK9r5itp}-jTMH*0IRXsIPzWMMr(2<(Ck!G zmQJ({CJbm*Xegyg2(Jv!<4U}tVH%Kbbie8qF&YE4Ugn0T1uurA0yG_@S&Fx?Ott|U zwuB&GYy}yd+{qiRAF~xu7WaDTr#@j{?JIh45QiK+AIp+yfU}~fv3x~I_mP)_SDX37 z{%1dpxK?-9O(}R zwzI`#%d2Z~X~*Ee?_#_^+7UiGGrjNmj&w$0_o`p#RbtC(dD#wBN^;@iwv=8ZOA9$KSa|uRV=SI*RXkXK;_ee&J{f|NZ-Z za$kIYtSAc64;L{kA`zTVJO;z(ox-EBC^DQEMp$RGsh?}VBG0szHxHEDdi%9*(IR6G z4I_x*WJdWRmV__AsRiw1%nsIhea`vMaUq-w4NqQd&%SFYJj}w46Gujp+@^Py#zkoj zC}(?e=_B$D4Cb!Tz0RAfq4mr+jFDnlDUneyGsS_-nRlo>uIy5E=F`Nr;%;uAWubGj zwLB$UC*%rMAJ8*U*QZj@c+O%+=aN-J^4C7qO z-B$7{u6*v)dx}%uC+RHC<#JX+DEBm8qu}oN2dhRzGikYY5E4%icH6)Cf-3Av(l@&p z<9@Y(D_d2T#P&4UU53+5Kt{onztxtMYre;ZymWqE^SLf%{$!k@T<&@UQF17zSxE1` zTZ0g`DF3Hvd~C|a$3zn~V)~3K86B^@Wz~2)-`1R>e13XpNpa%}Ir1hhl4!R>LKYNr zc#yKg7k!;d6r(2}H`<0%NeqScF}>v&b`{^w=fz`0n(1DO87*Gi`JwO6b>Zm5Y1EYb7| z_w*j$JsuX~$8c~?3WEQ>P2Al|Wq8;Oqz7OwzL>*@?w6(V=ZC|uo zl{kk;#(cHx+l6z8I9dD<4nu<`dWHP&p?SoLIPA*o%N$cV3aoM46s*h>rEAlAYcE{f zF)=-z$VZLuI28*DyC+YYVAK*L^-DV9&6n>qYklKIv(@ zL9jP{LPY7kwd(!+Md@Aq*~|58?&)m}p*|i9s_s~`kH_>%muj4)pgX;Uw$t0yjB?;$ z7wga~h(nh>_Q<_j>1gKT5(dWypWalrUBM^U`~27wsZ5?&HWa<6>J#M3Ob@f(G4fy} ztysrFM+|O!5^!%DUR!1RB4uE9bh}{bqW`?e#H%D(!tB1oY1eBetw*BfZ)G2}r%Hp|W+_c%vKK?5 zS94(RD^kPA%Umh8)#d-3{mNy&)B~G#I9^gcNa=W5t&z<6J=T5excHs?oO$Elp z`kc0*;KljrCsRL|q^c4>JlxwFz^`53mHd%0>;UOygT_BQRej>_XmzXl={aWCoTgaO zg)o|@kyD!A6d(PwR6UP|x#Qdql)I1glXV@@v!<2Q zTG~U(uN?9#W>;7UaOx|(WRaLA4eC?8EYcH*9$5UrwDm$;u$oy&k(7-0*}6fy<3?ec z*YmuWCo+0&QbQw%J%sW4lq4G`4N**=N7+dj8Ma>s|ZPee^hH@67v} z7sec8oWCD!)*e%_bu}|PBR-l zBB)q?5Xt45%k-7R$P$au!Kn%2W9CCk(!f44Zpfq+Z`kuwi9JGIB`6FMm=DK`N4M!h zL?W`zYfXA+LwR|+dmy$oQ_q1eoV|E7b}4928KXU>t0~Sq{G+mW;aHRjOWu3EgsZZe z6xZ9LR#Mk;Eu!^Z)_~NiP^aU&AJvXoF5f9MwBQ@;44ln4L_%lFSf?1iR;;07CI9;4 z7DdC8=mO{fFT{*K8^R2$wc_X z^dEC@vaEc2$`{Fx_9Nv7kAE(y3Ag;m?>L{2aK1kLG!Y&i*wTqAT$yP)PdU6!(3a@$V4!o_(umC4Gx^9iROa(MjCL zV!`5z@Q}$t%gV=?U>7f2AMfG#E2Edx4sYzo1MwQf8m-!K=pq!!jaC6e3{_|`HIy2s?jfLR5v76^-i*a!ZW)*^1k9*cZv^u5dY7dJ! z#KQ|#-Vdh@V{I}<4kI!{MR{h03$naxL1$>NrT|>*(Q%wl(EKS~jo@F{ye_SG2Rxy_ zz}n`{u~+=cV3iL-65cNeQp=~8(Szw4Vcmb+iM{w;MGaaacPZ2R-G9FP4VmAuI`1up z0x!6W_DL|mA7w5ejo3FR`abfZ;HlEnb0}z&PE?5TjN|$`V_WEF`a$j%&NnfqXr%zj z0+0}0V4cXIf-awt^YdO}UqZ(D&S4=_ ziq@lihF!2{7^fM(u`obguc01=7Q{%My2&QQ{$WXh{$6rf_-@awB~-_9GN)!b74CWn*|$A1m8L|Tj0 z5$LWTofK;X#Z0BoZ%pA7TU4`5hZ+HuIlOBcP7qVxCb}-0Y4Af`8qCKKeNCh@Whr)b zW{3r8TzZIE$YIyjaKW~S^&&&WaHa|W^U1S>x5uQX4B~*9hiuQc9j4+hl1QXJs6jKu z8rgnO(8!6ML5*0Sm}~tziI_7J)F$-OAGoQRYGwN;1wdO)1QoQZSZf4JGsms3LWQNo zzjacr!zLR-9*UlO;X(4^L-C^$WMKaa0}u02Zw$-bf!9}i0wV>vD6M0`V`eeR9C(02 zy7u9}%4^f#sgx2j223~-M8zvvjecxb z(LpUi9qdI&uhF?N>4MP-M*wF5l1aI68$u?XV|W+LV^5Dzf>niJxChQNLld*{FKe%I zs6SQdMh>yj`BFu&-WU+p+(P?752!zk*6BDlEh@a4LIwxOb)70<-^OT3nlh>_%NosSOGr@Z*i2(yG!13X%UlgT#jdmK(C0yYL>-cIBAxNPfY{l*s&vFhRix{tpWfrvr`i zj?jIQ;^!dNFY>7@Ba9;s4J|GkhJp_{n9xLg<^zx2zuRk!Bz$l$v5HG}g-|^dt~K-h zl{y=ZVKZ3q;>x&DM~&szXPOMYhKB2ZH{JXBewW{Ew8iU*zH?kx2W^R8pq%ONM240d zE4^;hgz!eO$ZcA0LbpK{ZKN#Ek%CPpPLWgPD)ihgal0pLMQaYxS4>t7mjXwcc!59U z`)f8cwtnm;MOcSd5FU4}^l@!V4_Y-*EE0s6g49z3yiS9c#na>S0d-x!(ff6pAm8=! zH^odbAD2^v;v*=%XwrHCj=#^9sp&(7t{;9s%UAa;DU!?Xt5rrLiu>+$8Pg!w1p z#oe0l_Fu(sw*(gd(hO5t2>>}oE~+6VbCL-PH73yXab$w~Tz+YBLD*Rii3C^gK!pu;!lvRd$Goujb zibtLoJfkrT4pzC+cBR@IJT_ z^>aB+^hBDwtf7E;C>COQ8n{onYN26zSRAtLw=_=Ba#mvEFEm#){S{znGi7M`@R0&v zY+l8#21^a{5wY$t2TcOi?{eY}WHY=}2e+pXpJ(=)3rQ{mqpZq~KvfPmWQpWc3VaVj z?l2seD=D9I@^Qafa@)MB)HN$-SHS1d9BXb*{sQ5b<3&#e`9rdyFkqMUaR? zUsKp`kQO5$1buYvhRrf3)zEI2?)aX8P|#j?;@ZqdyrUxxtb(s#pI}y}kY^kSWLs`D zGednL0*=R4(#~IukFwpE-+jG&4a~&K7mk0C1#L*KPQ?Mo+2iaQ*2j5X|k&c8nXNd?r z{AeqN7#E6ix3CmzetaBq1JDU87qx}Pogz%e@nAn+42ty`#uWzQu{xM!^Zhy(5+S

      $~A&QYa^ zDr|DwZY`qN#-;S`=#H;|`UjBI9RJiLDiuA29{eJ>#sjyu|n{pMv8d6n2iCrMy}b z=AjwNA&J2#+7i@N$s|YJEe$!qJXO?Yc!{O1of`>;B{lGCQwO#RTq{4buUr4T>ZODi z;4cSLCIn|p*Di9npjN87M=Ic%w=GgZ@P@>1Sk7E$eD1*Uk_o(-TkrTDH|zAEut4Za zeAD9J_^{(gx5ggO8SyaOt85dy(hC3bh*POOR>r4fw*46_KQ$Fh$uIY5LcWjhkCHO} zQeEHal|=XnwS1jTWO1T3FRyq~6E~t*#I{OqA%vfTn&;`|gh?@^TmwldA*pLZ;+Ppb zWtzL6wdDP?M|4`1lCIM6(-u-@h!ksg2LG2Ip_<_KO~o1lHE;twnl&N0rchq}qI25) z9=tklZO#~?JI8^dHFQ!SOo`aMgzEab6RI{0=6b^H8LkkHKdl2o)IImeepESkBpMJU z^@$edX`*3JfLg~t4lf{K{OMp`MWShD$iWXd?W*o7%#LiR?5p+6&{@42>!l9&mhauV z>;J~th2UXxk!WF))t{XT-c@#QyERu{l=~Ux2^4bDf+CkI!m`+Ex)I!`Z3aR#F3#jN zMAZ)=^OvA-rf^sE(Sgr<$iZ-|Ey4PfZ&eL>7uyliLcpAlB!?Ba14eWiRf=~mshLp6 zyOc6x7lW@7xta7`Kg=h@5l9+2-@!h@Lpx@ly~9?>ADJ*cuUuNGgZN;C*r3limmh<) zXtAMsadq}XL(W-Zz5RHOJ2wWk7$C{MU1?bf;@o^{PPq~zJ<5(h%gZnm;_>Q{owPl| zD};=_(9S_<{gT&K)+(ED+wN2auVzPUCJ&e`&FDUH=e^*_cRk|`!2j7Zp(-p)3rU4d zzoami=WT)~M(0m`bEMP7xV4|L3z{xmj2Gni;qm@0H#U4jrpmqbJ%^)i;nY>?hV0Um ze_6m)FcHMwAT#hX2w_6$bHS{UL{-Ys4-Z0+FZ^0nCX!&+!4H!vp*N0no`eH+_7gjJ zW?Nv9Aoi|vEQ>OoU4CuOcli&jo&1{@7~Ef1`frDN(YCuzH%yaZw6dIwSmgscC1ZMj z<#qII&+?Qxh_7d)-4i95Y`H{=p)i1eBjqo(M_tis`ct0o(bu*G-cvZp^21r{x|Hp> z%EM<<(!S;p)p3?ZcP1uCHPLzI&S;G}LO3WjRLR);cMU8AvP%xp9%<%**taXu_K}}P zt=0}FPg?+QfaoMWpIFxN9@M@MVkt4Yp$9&iZ~yF(d(O_)L;Tpou{-rL7P znM@0HzrWe4xo|Sf_rp++q~xzUq7s4ahUe7{EQNTJjK*H9MPi;llh-bjq7eQG^CMIc z)05zeM#58Ug~~_ExVVGwP%k2kLO{b0(%v^{XLwU8?dEY0{2ME-AtR`L=4|{HYUS1K zFp9D_%r|6+JU`T{rvVHO`-~hljic_E)PY88ZQ=`8*?{(;%kceT7p%FEh%L`r@OhZ3 zPN=f23NM1DAUIsZimn`FT>p{YojqCWaVTt9jOBX0tS0;K1LnL;UAFH5*|CW;}6Kt8z=XD;Ywlex$5{w$C4M5hx$>`4~G0H2T<$8Ad4M#hl$ zZgcDifNqUk|BJI8T zgX<8VOJLW}gsb(j-kKlcZ62CV>>QHntKgCPUlpEB9c;;>l29{)i<|1{MP+ z-g^??Z>Vox)N3>S+B{0gHC}Ao%W|P>(J!?DmOvuPtvV zipVC?nDo|I&efQA%BI9|f}Wf&8!JFam_F3UHum*LHzT5#Bq5n!`Ye)+vL|i=Vmz3P zM~pU@?h^4fiL5G_aX_UZxX5U+7un96SB8e=A}u9@`qXBO7tvRa(eoJ}XeB&?eVy!m zFssvmp0sLw+lqAHgnU3YeiRztV!lz`ID{eLX_Ds;>+JVS*la zx-~D_$>WkMyixaYMVH_yiqe#j0e7NFSE(z*P12z>o>(v-B~pzs>N3R>MuhsTeB`;k zLoB55Y}tk2-X~{DF?2bwnKIcqq?M-$J9eZaLZl=xIk*G=oH$z4#M#@Q)J@<6t`;s$ zNZ(c_<$qJ)ii`Wf&gbl%DDCQHaEHWZBg7JLpu}@xVaTPlg@I=d8>MFO* zdKgWtyNq%O0dv?VrGV6XuTQ}ykJV(B!b#>l?0C}E-k0yavAuo%bcNUL^D)tW!BAIO zF7O(ifM=wf$aj1R>XgwE>^q$TXY1{^;?he}f) zIW~zUuB*n$c>XatHkMleZ~CzR(d-&=7a3Mr91iuU3|x)XLlTcC*e#JE7xn5iID(ao z4HBKtE(``nYxb3zFiqfHf!2~;JB5)5mbpI^8cR@_A6ivVNskFk;1PScd9a_6qNF_J z+5E=y^XLF$*{8smbr^=-9l<@+Etj)c5G6A_E?2EhA+WDIGI<_4KBYG)ao(I8+f5uJ ziPz%SeG^Dq{em-KvUH6}&%Pp^%))BodgNDR4vu>KLtz;MV~v)AY_-AFg1N$DH2efg zT^BN4$sWr<>JMbJJWTnEo!}_+Va4@52M=Ou+L}UuW?cZ)g~b;HF`DI9lO#~xeSNQXV~{hzJY9Hcf(+J^ zmaV8G!~;8n2-xRe$z)gJ@jaUsmi*#xJ>cq@B7ZTjGJ2~{rMMM32(A2MqyYYkhC*_W zw0;xXEtYGAZ2|<|L~3pzBI%H_e(sO-vZ$-DS=KFq>-Y~Fj==cO)yGJ*t^HgsD0@Ti}Xk|H$A&Q0+AIXY54VV^y(dmD0AR1+-yF{*Kc`K)HDoZ63@^(NX8c8yGY*_Ga7ey zEexKE(04equdd0orA^npY#bc??nS7PDE0LWgx*R*!t&bv^&1vgqJ>{o#z}Ry?Jpk9 z%_(O67b>STAyqv-IZ&^xcUAGzC#ceZ)f~&C&L|Npaof<@xcTwoJt3?~kItp`V~md$ zvN|DT{VGB%d%Sg_8=dLeZ_w@{ouNhDB+so8(2B7#er0W=R@jqCpVLS(bRjnS43C};>Us0|IFm{k%GtL@3ng1mcGbBU>e)wv&X zk?mAoqF6m!u8mTLKqK+x1}J=ypou4C^iX-;%tvW zG5QSmliI94;>%rVLyheAtU0~&DghcM5b4LeHLdH{iJhFnjPCho?Rp9;0R8?sK-9n%+vAhMB0<*5vl$j4^)8XBB!Q!_?%J|cwi_oO z_pO$+L^7FzE1Vo;#UI#r{e4P}C8l&J)1qyopCG2Ge7S8_C98JWz48Wb9}3ex^9UDo zz3doaMj}t)F@}-5H%QV6eM4B(y4m2CqihmZ%lCFP zwfzW9*36!n=gzorb1cYZdxGGs#z>E@0@iLwP`MRci0JS&+*@AH_44km-$Dvs zr=AzJ+PvO`DgF;nPYBynjMgU%FB73d(TXca5JDpmmo&3aDGT&rpEVC>eyvyj1p@z} zbc99;BQ~!OnzC0?q9w;r?gNpzzYXp|+ineKwSg)^NEfCsjASdOr68}9cNT#^h65Qx zVA~?UQJN)|bAMiR^|TerFw?h6G8uB3VIRGnyPD#a3}hJ)+o+o`6Ro3_7#>}t(m<8L zDs}CpfC`=YDWyOvdBkt0xV{Y(HvVIre}sS_LOhQ*XVkaaSvTuzw-es8SZjEOq2xa! zgg7`8MI)K`d$T7lI;Pr%y%?F9yh{U@95Vk%i#$Q#yW6w1qk2FlKUxyTEyNxNi=HVAbL=xAk5e5r+AtI8 z9?W(Ey-SFW+z%_-!Fd$9TztJxhRfF@J|%xPQ3ftf4fixK8*+JQ7Jy^HUl+W7zdfpg4@#L|eq4PmB6EwDfa!sFsxKTPM&I7LedXrb~MX z0kM$e5HyGb7W$3!Z^q}pKD;48yO%Q#v!~~=^e^utO|C?Bn5g{PkV8K+l^{^9LW?YG zXc#tnA$F77XSmvGrC56*1l$LGtG=|(BAuZC18T8%ge%Q^MtP(LT(#>H9%odX#o}x^ zaNsEUm&T+{`(aKu{DcI!J{k9cY)0U>xb*yoHkf0bM}p)&w1pK=XhzR)$2V%;6RFQeb8=QW9sPZjXVeh^n>EGN<+toUd}pvkp+tO^YG? zq{+B+N}IWp@c8uc2=r#omkEZfQuToCPy65Ba3O)rlG%Ov_W=ECoKA5e9N~?NE8Q|K zaGPq9e?);W>@HT;)F>|e_!`qRhZyiDOgNqG()}T2UUHI?vx3RPeqvd0aE0yj;Cl1x zRv7WU-R61YE>I#5sW;o>93tEaoO(QY92mNC&L9l^j3{Fd@5-&XEr&CG-tWpZ$fw>u zwUkzSu_)k1Xp=ImkQhg_+yR99b&(*ypI8Dlf)j49qJIvZv04}+n`#GJLHnH2f?2BB z#)Mw@9PlkqKf4aFD~DuQw|qr`;4X^S&#f-b#!DFzb$kIpqV{QgVx7$ZL5aZNq@`} zD>xzUvA}?YH{)*2n!&dofaF&PUiHhZxyP*({_7YHl*IcjZVkTDp^J2TX?P?_rE0p7X=n5-tWX-Q6wimm7WwwwK9 zZ(C-Cu!|ib?60Y5&m^#0U7YY1cbJ|)ZI`Mv4FQ&M50OOvo6yER4S{C*e~i?BEpuU8 zn5KqvN88_wfBtbQ{p0)paa1j$vXC{nMa%d7|6J*g21-u$6-{{D;eT5v|7%HC z{ok*f`a4=Vc_i%9|Ni6uIamJmuR{bF9Ms>qQ-uq?|JUoR5B^>^r4$cjJpcD`{I8$; zuMbx!V7{FqSj`^)V;leH1^@M*f!_3HJXLjQpP-J1;nxPEG-uX4szyVFLpL zdwctFN^=Vf3oENuhQ=QifI;gP(A@5Q3Rr8lZuqTceeK+(1$a-i06u05;J>ue7s3AL zK>z2>HxUAlXf<|0kgnb4u5wT=twsOo{p~phhpWpX<4nPz-nbVqMTSNuYU+#_0pv@r zfJQWlTB#dohHqUO`9oo3XlSUT16W8X;jI5r4+fU<|6z>(HHVFn$&ERNiJ-c6FqcZ? z(oDoco9w=-s5p&-o$=hM+-Z8zIT}voHQxK$9f^D{lm3o2|rE6KMYtOZ>+$ zx&Ld9LH+P0`y%56`%_VWp?_^Lg5Fx=e;(F+0U(Aj_vVH_TY(ya(NfnePG4@8TRO|c zxGqUETmh{0GOTi+ruZ&cp2!VTu?<{Nc{9E_1L6rlv%L&J^8%oWxZO{e6B8|Erc*H$xN0C4D7fZ{Y?uHw{4YXNk*mXwrG5CSe)0$#T>^T03x9L9qY@;HE( z^Z_^ytoz(3O!V{tn$<6WJ_2a#&tWkUjjROh8j51{ixZsyQHqklojL%)isd~n_lUrA zs67X~Xf$m{1pxsMV3I*TO>(tl=p7S@;s@ZVdTjvY5l(jPqUrzi5PDD!wg1lJ?^8>L z_3m$>cSkPbL>%t=X=ge)`@gO7{&K|t!5JVmkRbGWI7dX)(a>-MVC8)XHZ_2O;8}EZ zG{B%FGG+Ra%Lvb2v90jwk2q>3(7*+p4!hKXR}p}JMOymv^E2N?7y6=-+Fx%Hz_R2c z7+vx4^*~4oq0cb{917uEI6xSD`lTfF1Hc`cZwhGS2CQQ1JNDvQWfz3S2LThgGxWC@ zcCPant+g@JI90?5)rsORAl9pvW!VH8qw{Zx-!H=e6PN*z7%NOduxbo*@nSGj@Be9< z9M1gx(FIByxh&lPd1UajSR*_tv8;M*P=y?Fl z#^77moX45{uA;WL?WuC?G#&v+ctA*SZ`bXd+|}Z%@jkS2OuaXCjw>2dD!bKGaESR? z+84i@tkWy=*FP724*<(Zl%!9nR12bY5|ClBeWj3Li6&^sN36WQzTTwqe_7o?n|59w z5Y`m@c)b}Zrk9C`So%>|2!@5Wc&E4VjJ03{V8QrlF({~V04*VjBjBTBysOoHodSrS zzsX0^@p1f*%PpUr44C9n{I=fn`(11Nex1c5fcD26@C&Q7xjSB{j`{;MI{%GFGBPs6 zx5-C)%tQHioYS{;+kl6k8UmnW9>tJ&Ao2$K`b5=xSqcw|;si|@?tx$_mYOOdx_;;+ zzDER_F!+MBze*!LJ(*ER=%aJvt+rMh5tAEW(1nIVh4*dshwZe;;4s}q8NC+ni*Ba< zeBJviXfCAKV=??%O@@htApik1T3^3i_~#|8ICf*eQL*F>u=8g-MiK!(4mi!#xYGXA zJ3pm{J}WAxP3eX>WaxZ+?b(D4k;rKdxB(W$Hc?c#Hp3x z1!g0y4y>y4zYmG}3o_#k;g|FFFJ_bDV`E$?Y0V7!`s!;x^jcFbq%AaXn2RO|1*3V1 z3y0Xvvp~8`;81^8ve=Q}I%NkNnsvZnprU6e+Pam#zh?yq0j`5An(!9@B7M(Cfcpj;XS~F1v)Zt>-ZK6^V%7{0PxLpoz$UF zM~b#gh^yFUotDTQdt-IalsKJ8K_I#txNH1L4?N&{L8w9QAO9G<= z4}yWLd{tk4$=}w*7fBV^govZymeN>*I}{=hEKqTYqjfj)t;yXGbe(@p1~(0q-t32m!~X=f4y+ zhZU7`j>{v4KU0@4DQcQ1E@+yOenk@-M*<^WQvYIaJu*GQ!tu{NYIzAb)t!y_c2*5G zK*6EzPgd*p?Bu^LHwb&#s3b%`n)%v~X0zLCNX6s`y`y_9+ z`{M(L{tulsBm9?@CRi*qw%h%zFZXht!kne$7NFTcd>ujz@AxtO@rrDi`WG_TC{r5V z>@&iVz(CHv6NGCv9CMclD4%nYTLr!uQMgjzlaB#LGa^{Wy;kgs;%FQ0_EbQmfQ5@P)<&i{1>aO%nE zhH)G31M-DI4Lbe08e7IAvcK^xKf;7w0|u>LaU8oyUT%-XUXfUQfg_RyS7$RzWF14x zz~BvNdHi18!W`8DoTxa|Q+<3_BOZ@HAUQyUHMJ;hnLz+BEKI&Axw~_P0m#a@gNFM5 zUdsR9z={wL^d|gtrtFInj**_89uSb2%TRFQZlp>}D)~Q+8+D)5cKY3{ZfstT#9KXf z0>YyG{r%Dx=@tfr?<2yWEx&XDZ{j*(nS(aksl0-^*`S*j0Qi9L>sr7IDZQH5<#$rY z0aU4`!TB}>qJ(Pyw`o9}SLg7Hf@~*hK^diLuUfD59H2f%9^9Fvo=Jj79Wdy(IRYMO z(sTfDZfVHV#h<}Cg>H7IX{~Z0sa-BFEqi#&S_N%VP|90=(aGi)OrQtrg3Xq7^z)UA^;-= z5C)5X#|hkPXCrRCcb$+ir5ZhrY2j4IIIdNi7p!`~*%o7)<=8IEhnfKzkSq&(^YcyUa_$khyfz+bDt(p1+-ub}MbHlLrpN*m1=$6I>l^ii?Cz?=#~xIaF*j}G6gZ_xQI zJ|N?JT8o%;gviGoTWz~k$fx6lqT$Oq^_uIO`It#N11L7cN?ZKB7_iYpdrXM)nPHh8 z;(S9k%D6WXGL#UyRqf!$05LY3gJ|6mL6a=@nKM^hcn<62UDBjv8a{0LPWzptIi@%{ z=K}LdH0YXF&+lEQC*xS8uCh*}IYD9Qj)SsH$8@?mSF0-|mI<1_P*Ro_$@9`ORFZQl z_A4})ijvSeE9%rz46np&$47s9!uWu z6NOzF8*Bb78EG}!Y=*vy$lW=qIfHAaWR$cu-B9g}_!Q&5TJ0w~6!dFJSy|Dj@l=Km zS_+9CS~t#w94&NudIPb!e0#X<9+_vohFP>SK^;C?vJ!`PJVj(~%E{9q|J#5m)Tj6K z?~rq=nbk^>&z22(P5)fMgoFY1{6BfUEGFKOB0-9Yc8&3mTCjo(Zk_>V0!2mH2+II| z0HGzRN&~>e>tfE>npFaAHy=d{mrfjtBfhx$0uLX-;v&LU-o+DS;>pq$Q~y{9G)uXH zK@gx8n2Q8zwLN=lDRKMsb`#X^yZ$ zgS0^$z$-4yuk3!$gI`4Gd{`*(&+vT8qOEs(1`1Mi$t4fZmV|ix?S)v0u5%2J}Blgs2bx21S~3<(xUw z8|cNQK49`>N{o*rMOsZS#R^1{`7TV=URzUBSJ#gq(Ga4aYW|ORY+)&>e7CZiSVOVK z4V2{&7?u)pe6dRZ+ahAt27F?9$+RMWfOQpxp3qq@u4Vyq8U@3h+nWI12hE~$Os`g& z?RqT(K!ecUg|`WmdW?85M0-#fkm#_<<%5Pgxlzkzr`I9zkR^h~<<-qngPn(sxQF_r zV5$-+@bF@iCkj`p=D=RpDI-bOxnNatU%$FS@yU??^#TBd)m(3s@_GqLIpm^KHj}v% z+vzA<3xp$%ua=q_~AO1WPoz$vr$<%Mce6MFXv<%RFo1n^Y7=q71Wf_AIdmF!%R(C8`n2<{iZ< z!NoOol}|(d`~1z;1||-RlTxP-8_)J*YOx~ za2J=Lz=t3P1@ues5|SB4$+98M(GfFOH8e(;Dq4jPVs6#b`uonZ>8#|QIyM))gSrwA zN0cBb1xkX8zhM=I;YWFXIiLR^*H?*(=Bc@KD66*ojJ7#6RU{go{%awf zx7`w&md+Xtf;BM`(A>b}iQ^gPF#`!-R~OLJ{`5V1P5!Ihq!36*`MHJ~$jQA%kAH_* zK*Em&Y#;S^fKqNow-!pplVo#!R1`<;k|I*Y9)2Qh5!H5dc@s2JcCaf_ z!FZIEBHG}SWmmACvxSMHFvC=o%L(S$!@Zs3V(k#7~q z=q04Plo<;%6z*{jk<6K&o`SsZ+JcaP;`igtv|y*{ARObWg>^rY00zlSX9L=TRuF~i zSZagnr>zWqCeDV2?(=gV`l$2VxtCn6pHF%15s`fO2EGu^WwMc5c~waeBZ{=T!u%wA zpWR2dWK}KUNk_jEW-SUQQ@($x0=2N8J+O0kr;e_vL*}ZD7WCoKWu+dON{W;&Az*;r z!Q+%;kWxkbs!52G*dNh}@pbYLGl69|WhVfQ{LgerM^YjN38`nMc$E5=Ro*B8B&*SJ ze{l%-h<+#}3}Y2$>}t6re#JyzwmO(c?yvc{B|a*9bL!uRMdtZ1Ss4QZz@s44w`sLI z7@i$vn{2wpRF#rP;EhL5HZZp$69SjTYu0oi2I#cV3-wFQCDW+zC<1a`PIe85R7hxP zxgE2WnOF1`%K}Kk5+zLa{-8C`5kwdjCsPbIYlu}zr2iu`iSt+KcN1Rp&X9D^nlM`EVVqohQL!P`8oZ3lO*W7zFK+ysxHfR2!YO*c`^6?RmL!2 znj$%Xf;#0y7q^oJV{`;Ym3Ty@Q{@5|#~3PDJI`-y1D{#O*0n&jT_Blt za!m=5Bj3nJo4_{DB~`>@hE_U^5a-O+XZGUWzPJ3w-tXvvogwi_WGUR2lt7LWJoE&= zGBCfolnFLD`J@_yJZTUnI*|IQgrHSUT`d^R;-wlE7CVmxCM8oY*!0>ty*nbn+%aywXZt6d^n$`Rk?|q$a&R+IB#MjXTFYh*l1Z zvV~_^aFgu(;=E@nC{D6Zd@!U0hu2{2|544t^J|IZS&@sMn&<<|z=PzoTK~(`+ z2>rq+t29mVLPOY~L|lC2Bvw_0vd?-H^jwlrr6IpPRUQIVkbY5hk}dNjWhWq!SJeNi zXA}g3NK}O{%RL?KH$U6;bJK1UmhZuwg+M|AgO1uJ(OFX(bK2)2r<|X|g0!&M1H$kr z0Ot*8!*jE;Ce4|vU55j`0i=g5ZBfZTv&}OJs4Q_Ku|D?`?*G~xc&Wi+A00Kk9}B`B zy{`t-XtGEqec$0<609$y)N$-q+A2D1s)|4e*hw%#h(-%6$)aHBosy&}81?hgy;SGq zmeP0+So2mbdZ?=k3YwBB%1BDr2s?A=sWin!#R^A?5&OPM?>#;#q>rw2tRUilqA2uW z=Q|gxq*;qFAEHY=UTG6C4=*Hn_$AH}DN4-zJuxh2H_TylY(nsA0KL)KIoIpD;`+KF z7>s&pZYqWOzAu(bysAa>OGOp7@3owr4J@QZ2rhXimd{R)h>?JC&u8#XzU&;aJofP= zyi|=(Wg;9Z{$W|BXq@7+shJ%LG7WRyW^h!Z){?e#wp?0EeE4WA&&Nv%x*BTam6+VL zV_B7Zr56?Qz6JtFpKU4v@zKdUI0@+jd8WvY)rdnyGOufu@{Ap2=P*S~S{(;WU{SJTgVlWvw10%KOC#n$ z&2C>)Ey#dHW;=*Tjo#y>8Q|v(`WNZ?-=_`rw)Co&Qv7*rvyCJ$$^p3=?~IEp$=+^956QSfYoY zjPJaief(d=kHapQnfb3VuW^E}D03$B?=SFto8_Pd*rHUi*)KrS@1V^rj_>pm*8lO1 zIF-Qwj+5nRJ_0Dli>aufLiDcjxcFZN5fgaMt1S6`KwrYi2LHu@0YJ`=o4ohzthARS zC4VmV)tu-+g4c~Sbh@u$xJ1NT?i$7Prh|K4s=RAkfzkp3SV zlhyog75w-&H5koO)KR+6j%}o>@#AjY`(q_)ls-wKTEBh!di-a0TH3=Ud0hFA*oJ(c zEwN;jt1?6=>pvaMpFXzJ)Bt7Z1PnA1!R;Qizm96jxq6b2ioL-kkeYA<5)Vvz-O(ep zlm<*M+ZO4)-%jS-Y=6F{)a@Q%zdlX-k4=xK>0i>`xdC{%dw|q5G0+T@=oCbGA_z&T z&HyBJAb@{i*7ccr3_=f)HktflDyh6QbGg=l_;EKEmcrSPK4kKvlVIuRY7Tqao@MX` z-lxY7*#5Cf+pss^O7_eSsP>EuE94`mhf!!4Gjs`;ak@CqU3)M`-@@ANtprHhTy2IE zq`Qgn_qPZ$q+=D=LHb4`!|&g{9qt#x5ZX_QQf|2HO7p?JG5)xW1(!2}#n_6B|pJgv^x+sLoCRc_%*cV^x? z3}y~zDrn7Cb-y&CKL~{{gQWINXeeaF4?LO7FdqcnxH$`=!Dbl9-9{TLKGq!mqwUy8>0u8l874h?I|E@RM zs{f82O(Gaqr1iabIfArQ6rRbCS^Lp&rbKC38QV@fwpm2ckL5Prc;f2ZIXY0GDiE0p zI!hBCn^un|iR;{7oyDWN-i5%FLIz(ejGbBvUz3t%r25pj2Xrfd8j1&!Oj!qOA_I@< zBfd(`br(~-9~n`S5FM)4U%%c7KVVFux9}k-`!wG+FF#*g(N966%~g`+VlvlrqYv!rqdg~vxb z2|Imx9jZ;I-beZHH^<)2K!Xd?tw;8ZDH82R#dw2* z`tEr!oqMB_GxOH+A1BM_zbA{Lw@r=d|6AYFhG;S&o?lw>=l14`mi+()C!nOjbmo^E z5LE%ZM7(!rrTlOEiSN$;c&{ykqolv~MLn#!AhWR$E@&(`#+IDOrpV3t3@{jVS9gOB z-q|kJ=5X2TDk{3Sp8pB|0l4dGm-$`h_ns`)`aW)j&DOiwE&w#{kEb4?f^sC@Ly|@B zit_3%fVcvzGp@*y$x&QB2L>*F;BUkpp|pUn=6b(-Ipij#$U6>(D!&iZJPVd)rb1x z^-oUg`ZK-uWM5$+;Ly#a*K!N=gNftRsJFD#o`;Mw+X2k3(>5nC@xY%k+8|aV@x3a( zZ*vxMgJ_(6kh;rTW{=OA4N-YHqzuL5?Dh)3zpNYS>uJS(-3`sug$psRuCXNT?7 zU#wZxrRSg<4429(rqUiUmr-o%E^H|%tR)KTnycX)sIDmQusqf`AvF*ouHA4x|HUp1 zRt{;Q7w30)R)$^Yf+P1F$*8g6?fc$ZHdPQ`<97GJtnK@Te`8E4Q(0N_GQ6gt@A+~AN*+HR0(F=> zy(AkPCWbJ{^ygFV95;o7uIDcM)5Ow6W=W$;KnbkVh(u%_@=Mvwab8^8lT;Ie!}VKfXb!Ou_|y@kadg5ut?Q3>j7O_c zUR<0DpI02uhI_j{?Yc`z*)vj@LA0n6;qQ_znYe*?P5gzNrlMD%D2g;)|I7=Y(U6$u#1Hq z-&4!lmM@oxa*z`6Z@~wslxrO#17sdj1bzdo!vueHHRb-9Y0L*FHzrCTeB8}A`hQoZ zB7}94m~?ROF=y42p0f6Gv#pj1w)N$>VjK9@>vG}d>c-Q(mCysvcCOcR#or{&Uw4>K zKRruq7wEalQAE+$&<3Pw?r_NAd8Yq1Npy6krR!FM3`-lz6TYbIeGPOXK zU89;_m6S9g7E z|2_L$;Ndldvw@^gvsV9UaJ>8S_SbZ_?>!Ggs@rizQt3}2La6(JB*ju!n$((<6lO&~ zRzCWZU34YcWbxnfd>IL>9-AcP9RMy0E?S&2aWK8Km8QJX_HoZUih>j2lH+xMIY5cY zP)M&`G!5DuzOkaet@X z_MJ|EcVd!d#Se{4M9YA8J}`~Eznp3eI8;{ru$qcwCT6*e%^1JzOlD};NsA+@x4v2Q z_wQ<{t=&q9Tk>`Icn61b@n<)SNL^+WI_NU_L1fhCAm$bmb=AT7Bf?ivaSG@JdguZF z*tHY98&LaOVrHM_|5axs2e#nCKCzmiF&u+@fgL&HvJ0pdmx#1XFvjupWzgXz{yW}PEHbW+7}UxWnda9 zZ{Sp)R`h0zT}ur!nxBq)6M{^%t#7WxnXe6Tkj>otPvYg7)Sf&t4my!nYQ5jU&W2VC zFT*ifj1YU@OB{_p8$B1)gaz$yQr?_MeSHjMWJ$u9ed|%^*)Zl1{8ZS^Of4MakKQt9 z@L{OmJY*fs`q}6vbW1)A)wYtB!NYr9p9YK?4&pcKUHV>4ne<4HG^gW85N%sRrTd&y zZ)58#ZS&%rtu+~8)_wVx>&bfd4TJC#su`kErvfQsjrluov26d8-XYw;2fZOd2iKb> zT?WM`;tzr*z0w!(7GczlvOLRf+sWMeUBM+7DyTMdv`#;)y19 zf+%6(;Cp`jVSPT2!aQ4}*H`Eu^7Czr^2z`)lyIMjRros(NC}f~p87NKl)f0#Ql0hvsH36sM z1R`!nV-e73E9`4FVZCd<79_<@w44Gq7{Joge!` zHyW2)3kmSM+Ps2ht)bZIb{gHyJLkT|Pg2?LcpOVkyQ=Qm{Dqy8>^ce~7Ct-edwx^y zycpKp-2CzW@YAKC(U@Nfff%VSP`)k~Smqx-uVhC4J8scJ_vt$}CnttE&v}y;doY`) z5m{DlZg&M?1aoCd#AQ;S39FG_?oXCbI2jBZr0eCuw#_rwK_%?xFOQu?$m!X4&Xa~- zJ^C)org4A9xMJ*ESiYP0UE(9*zaOEBGB@z-Qh-P;(lZ~Fp-bxfFJCIf9!eni947d7 ztZ|zR#DvAQ-ES83%fTg5%gN(yG#)0fBfSqMkpE<{1nJXgMx18csBbTQU-x9=ZsGp) z@e6fe`NHikb+B?N^m}EXWS7T2}ph*Ppnh-!Y_`5Z{fivcD@LrT-f4*X}KExT-*GeRkiTZoXblg zSU2h#T~>C&*FCvQ3izaU1rd?hL~Nt|50L!~I9!C3W8;UUUklkJTd8VI&3;W?D=Cy3&N|lF&nCOW6cC#s|B#f>cWxQ0NA(bfT=r2~k_~t{x2&VmJH0blD>b)OGx77j| zKa+s+{fIK9q~zkMxH(5aKh{4ebyMU>e*#x~tcp+nJ{~C`0wa8(^k$gS_R%yymzHDK z*pb&J-;s5RSBp+&<=f8{Ud`v;nl4U|M-lLuVehFH)VkfOwNzU z#MBhd7&&OY&3Uz5UF3t~kdKMRm~grp5!JvF_@HzugE1grZ6vhS14q;T#@bW|Hr8CT z8|rqDS`_w&P$Q;M=RV(Z=&cZ_C9*A4uQVtH)c63=yZBMna=p?)dBQJoyJD?zFXzd0 znkI@?kFL`MM`BvcxPYoj&60KGda(bi>-M0p+)Tr0z9LEIuVNbvT?8p;8F!0$w10DWK*}Ym%vqIGRDDx~IWRW< zx~K|W_c|7d^6Z>h>&d;_dT3Lwrk<)+78Hr%o@`_i=mqBx?oijLG-~HSU)?{ht&{@J z!VrI8KltU_82zA@*Zo75&qd4Tx*I9t_uGSU!|zk*PTBlZ=sXk#Fv-SsLd3g{9v_oX zzh6O|v5T!ijV?kuEB74Lc*7ZbH`yR!8o*;$6V7y!Ri7 z8Fzp3u6{r=ja!(=fz3ixz#rxD6n<>j(dkY>(c&SMM5w#zx$sKerguH}F0RkMg!b0= z#yI;HtU%BmNk;n7e}=sOk!xD95zDfl2#Pj5Lq`Loc(^9Jrk)!cqHk??|b zLP|Uw%T5{Su_)A4Z4WUNEJBorL6KX**-kCOh5P4#>H2H8^WxhR z0HFY!HI)jDdq7N*`)!VqG|ROox$~|7utI6bSXB5gixuF*HUFW8*-vJw|BEflM=A{R zyK5RTAOwB~=+2frwK%O+i}L@~k%yn78E6O{&>etmmo7Ov9HijH&P>CPS1cqT=M+0@ zjY)?t;A4aommW`9qXUai8XA*thKeru8v}iGg`AD0^z7I)3l#YYkjU4*Kevs4p6dOm z`1`nMHy^V)cnn|&5#fYb37K_$>2BEHuag*9VJ;)nRawN>7=yjNxfK@Zsir_EQ>pAz zzg*glK3^$_ps@cA|1$oW-T=aE&&X7k!+9n_wGeRhcmg;utL+6lPZ(JtUECMVhF@|B z$cXEi=XyO9_5rew1hVy#o{>Zo30r4*WVJ_@T|=rqTdgFhQ2rJn%P=lXKTAjoXA(q7 zWE#GF-`00glAQCb6c=;)#VQSO3U0G6Ra5wDjZPq>_98bZ1ZVgAa6S5SRYIHE@1!LwQXh*MHM^=A zF4a8zK#Xpm+P9a$wB&jYw0mYmcG@lt+D;3a7Hwg0@fZu^r#a(}CzbGiC6|)2Eva3pZe%6OtC*c#nzELd&p_<^4r(#|d zdtj}}llpDHsTlhxIC5m$fkVuQMD{Za)5Io1u(20(Q1&ZdMlEQbK!zrMvSpcI;lx@y z{t+Y78SQTEYW=kow$Ih$s%@LsMSF`+%Y)8qNov$KFx$EV($q?eCQmNT{yDZU-; zYJJBcmbOXo1L^UeD3o<%Xh4{5k3!2Z9tiLOPPeDpeN*GRp>OfX27+^P8rrjjtm*fb z=n3q+%XCJ^=anrx^8<}JX>NpsTm+U_xiL+lP>|!qjg1kx$unqGB1=9su`PHEpQ7F25}V4CtDgCC7`ZYCeU;Q^yw{KUO%+RR zsnc7y?GcKRk4-hU7R^uXUFIJ%3U94SC{7hl#F^C~L#cy8w z8N!wal>s1rYf3pC;UNl=O?UPN%yzVhr8Iu_gUlmxcJW*Kid7V^zF9us)?vms~D_NWK!} zo9S?hB`cZsuG}sd!O*0+n1rBoD_QN|qo^^tr|Wt>NAie# z(XlV7nq;Y~wfYzWBLIGk4bjITA%7It_{upib7hYkOe*gmz5 zWal;YWx|4S>EEpWY(_#n2!91|fQ-x=*d2~WH7lv)KOM?^haC_dfg~4*s9)G9pmov% zh-EM<)^RQ&rUW<^5IVz zJ{zQArVN9mS&ys=K%w78fGKCneA9$TT8ALfI5Rdl7@)ql))Ilq z*5mgE_x$i~H^YZ&m70Lv{?=yz>u;}bBJPQwYVQCfBbdpVsfk`I=0$>bVo>C&0o8P5 zx=_gM&;$yn5*88ISgm`ZZDg9(f>(1p&PY;}6qe{VrA|*u+#`LiVfLKK2~rs4K}`F! zHj#Y}U1=){U+AZVW0y0p(prxy8urDiYsZp=3)O_Ao}qdqlc}C~S=XHw)J=gdE!IDP zq@hU6(=`IG_f8mT)l0XtAtD>dhq*hg>(AuEbkLafZDtw?il5=xhYBbkFUE$M;@)~s zM(nPgxIhInj@5jF$O$otqYUv*Tek#KbjfH!j8FGw6f)t(Lk|796M|B{j3LTVYt>5* z!*q^+0i&QPa6^_Xal%E4hkz4IikqGPLv*38{oJCbicnpzij{Ynby&Swl z`U{G2I4*ICyc_wv6lz>?`s*~FR)rKXkf^|O@iDf+4TO!>XaPm0NL@jp z54q+HZN~Bs)th3bF>2G#g##_MMxx4q;|{(1-tT#3P(;D;^C2xo*O#g?Hg76``Q z8weA!`x+W#+xdjg;T7R))I)s7V3Es1biwg_JoOe@9P1#AvZxC+ZVoU2UB#`+mYoTH2pR0bZcB zFf;Mk%FZ?R?02o&FTgz956pHPA1Tg2SF@3hl<{^aZcKehf#`m*o&J4}bc6F3@-@1V zB6@jc&UR`+flpf94XvX}N4;kUkke2~pZndR_f zGCQShX6Ig5jIooay~+Xc%foqA`g$H;2En3j#Zy%>@AK{%+B^3?A>wLtjpx%DyW>}{ zS9{m-2-@H9a0%}KL-TEqbn<+r03zGBl-dl`QQNuRrG#p}$6_L2P@(y%-vO-|;akr| zz*V&&5iKK~2sonNyvg?c=b^Fo*mAC3rO1wu56lhQt^EuEM5e8c3>w_38D#kM9(FTA zscqRtlar|=&tJro>zTF@LUoP2>CCjI8h*;9GG@hGl?#C?9j65uq!C^U5#Mr+^iMow zo1rpF%5A{zO4h^pPS3l6|4?PEa)UqMA+RW-%la zP^vF3gvaI6)q9TnhteNNRV;mb&vuY(e$C4+9oy$u9O6o$(vWUZBJ}^P-Ne+#SRcD@ z78u$mRnl&?*G2Wz+hQ;89}Yq|ZR4m_1YMo-n1rC9JXfimA%&j3bhj~8&6y7^?dvAB zQ|%^LZxFrJ#pZ4?vcvvFjPGbE;Reb@Zt3Z0DU^uDaP*aRQb#b;Tt>F6t>isb0!V(w z8TN@Fb4~k4&@QEYa6b%#f*aM4tp1Z|ev|;cG<7E9rct5r@2pr40FB|k5$nioKLO(P zi`qV~i-2C<=KI^`)9@3@mv}rOUqkS~I58vpb2p0#{Y4sWQXH41w%J1Mp%r=zt?H8U zvwjQ`QZnda@^=)XK&Ul-LTY}o9b|ah*JD}US3C6H8lXo@-$0gkckEC2?f(KaZ)tm8 z_y2r@o($S4<55cKIfLYygQ-2yeaIb z6|9?lA9-);2*#&@POKf6!JjuHWf;0n*QutN({-ws^JOah?nyRFSLwQ&3xEMEl@Za6 zpW)z+WWT_qI{UI|rk9J2P9-Xd3v{@ef}-jRfDr>N4W1W{KSu2dCAAGsIg@-^0Pb%L zwT5Dpq!DUIgTyP6v*HRAZ9i^DgWhkEpV_f$Kl43Lm8&L1%608G`U#)AuJ%kI^4ic2 zj7;a|Smx#yHs7`#6eJz$-tA^mdOecK@cQQF=GRBDn1?GQWfss3Ke!B!cRm9*q0tx& z-Aqg`tMU|P8cm9`mwK-(LO82wDZIco40$~}hg4(pcuOiPnwY=q+xRhOZGipp^Tq_s z+w!Ch*Y+cdnh>-Yztm(ECo)Nt7XlQc^e^$v^52_KD= zGGlL{)^1@>?}=)9;M3A)-5FAVZkAzE#Xnu!(w(tr#BB*^B2*KUmwm4-rb&kI3HJ~& zjE>rH^DY7%Sjw&_opLg({5cqeS6V6zTryU`tf?rW8o! zHH#M}Bk=$MnB)OvjtrNDZhM6-xf-5~C(ml9V5ztk{Y1F|;*J0Vb@}1UHM)_@PcFEh zt<}EL6SS>@EKIu$WGjp!$EU3yOs0RE#QohxR0s5Nt<}eg&XKU$XOXh8)oCfzEmQro ziP8@-8bnv>ztz-pXi)Q>+s!yo;eKJ=#~us{O3ExRmGw`C)UOb?cvAi*`fK!{Z`X$} zi{3wFFE{MKkV5|u^J#6L`nUL{V#t>9E|K_n;+E}^YaKzUby0=4#Yr>!Ea*ORKTfTG z+w9Qa9bg1hbAwN-WIqq#F6ROkKgYAKET{ z?@o0G0Nf(`Mcm7@-@iRJ0Uf}ga**$1>FLQ2w?EeJ8mhb+JRP6FKXP5QasJ-y{m4D0 zKtcM|#}h!H>ClbY>h|{!vg{91arTW&GaW{RauT+*rU=5X>>?ViXnY>yLL?<5CU{P% z!jM1Ft1GkHxCp*@Yu{|P9Tm&QoPn3Q^2OxZM&UY4|E`UMQtnfY<1@H&iRbA16esK| zCx0PW^o1)&7UM{amNMhJ8o>bSo{^Ww>(uL+5Qce-f5uN4E;~K_ucNy-Qx#?>Pa!cE z@_UTHI`sB<&GwPP5|y%-SxEkP^j-iCMWVpS7>zx3euuv!w48)+VhS`Un*MNhp{GQK zv6&zXiQ^-C*OV9J=#0Wv4QgiXp+n>yJG=&EzaIMmy`P+kegBm}17_QX%RWYQk$YhOssv@D%Xc9xbZOj3m(n<0w`iCaR= zaCurLp=CQ=^8QKCb}9RI62nlR-O{5STXehobn7uy4G||VnKpWbE(zs%HOOdpk+?mi zc3#|I^@R&G{b2J=16&aA7ZyJl6#9r3s)Dbn>iU~;+i@7s1<>Hl4u0j~0geXzHB(h& zIh})AsCU@-I)SG=(MjXcER_XTn8#Xk!k%=jm42-7ux%oOA8$aOeN_@2@`XB5*9s=g zu#>9NlhwMp80uENeAP zdU625{gAb%mpm#cMRA687f(*rYp_tcyje2Csdd*>o9X?03=|xj1|!+^fTmdhyy+?x z{K$R>s0_SqJai78oL3k%8vV0K!a)V*X+LVJ`Lg^uWx+dg*-i4QZpn?fO&R>$QOniE z61i{}LEhO~`#d}8b@hA2 z0f*?_^#-&E@r=hqQP@R9+8q4aut zR3cqb$Bpw-ni_p>b}AsF(?@OcO@Pb1nA5Y-YF2p^1BI(2f7JE^f@CN}ZTRSF%m~|Q zP6)JW!+D_yh4X<`(wMrbqPO1s~N)%1L5fSCgo z#WYP34rj2`@9$yEYM)=)3xKZV;me-dxt`_{5T2CVO+@sc7|s62Ja{PSyglJy-k39LwVn8}2si;fz>EmO5MT<0Ve z=m*Oif?_JT!=GqW>EvUCRCINGJ*i1|-?wi&rN;ZxcpW#PlO_?NqQek5MZ>SaaF|-6 z-j4z*_)s7&qcAmnA54&_v>PMEP1JFYfBrs=h&p0QT_*S+4E7P4=oOK}Teuvm4Gt6v zmOe;*bj6-@ctVCsW2JVlQo@+tk6f{kZpYzhDyL1t)MK{y1hADKcq~T`Hsc2TjWV() zJr4lkqTfLOn9&|dyQ94PbSH?OCLMk1uP_j5PhKW$xVD6ZHP_uJM&6Vb``+NM+#V8w zo70M6zVRx-{9L(#7IwolL$ffjGxFd)^+dy;atwzqhAb zUT*Vfj?hQKdBs6h(=42s`2wKLIeK~wb?%NMCg1u{&b9eKuJ7z&Qaxhx+2)l3(Bj`E2TWwp7D)`7B8-PI=*1xD zLQsb(2bT^{$}ls9YCFY3%c1*;;~sp)E+TzlaBtOWp>u?mWvX!OZ!*N$v`mD9xcz&+ zf`t-=8WTv9JgfD4)TEp7t-!DmYI`H1fGEi6Pf0#wWk8LemCF;>)edaaf1(^=jJBB6_%AU85k;R#>lmJAGQm_5*(^_>)&8v3YGc}N;K+=qTXJXb5HRrE&bXnG{l87WFXkYY8=*v0XP{iWa5RJabnX<06qYW^bvgJ*zT2A%B2H^-R0PDm$0tW`tQk(9#-gjllV?TWNiAuJgEb$T@fZrwSPm5?S&X z`jmk0QA(z#ER>*8=B#w8V~tj!q0z6Q`J`T!i-wcP>r6?3w*~S+@z+K=5*P$@;QjlW z$M^Wr_`P94^hvK9ZwXwW>mDHRc&*1?c{&E47c1uI*d#M@ns4V%FJW-WX zW2Hf2zI1=YV$Gby{Oh7U7AOP~9|-W{ss`l8iP!5@c+n{q)Lb!tz?QYS^ZOPg?Asa8 za$CZdSx2L}*ICby>0hx@noDLgt?O&salZgq1ib2$G>F6=V3Ae+5c9gy$icI@N4+27(ZB#p2hpLIvZl%3GY?+A9t;waunk9@w~2J@5POsf>Q=$c zwHJE{&hxbBw*E}dFDLtRR|0Yjz#W9yr^ejM&vOq`Am&a63{c<7Nmp z#h*k!rHvhF9uQOL>)80L!)0QvrCH-389Yay7Q2oE zO4QXa!pcdUWGW60?9Pr#-r8gfE+S^KC;Ci^R_+$7O&)%&>9jbvp6^{B)R)kh^W8NN zR*oVm>WEa`G^fl$A_PbYs1#d?IgpNBf?6zyOA)<|4_;B=_E{XZ7$Yr^SKIp-Hw*2KU0xI$?JleW8WG9 zR33gm9i7p_8QaMLC#qeib-%dS!dTMFUEQOoL-Ei|!lm5Q7E5_&jb9}}vE$pt)Ong> zi(=RLs`--)ZY_>Q0wuTc)zdL)CU$9TLrm1F{H$nG(%41o_SYhrON5lw$#&9ANB4ae zeF={Bq1*k1GpCJ~Ddx%crm^q<2yv&N4SRlp~98DN;YOceGjl)Z!Ik{Bf%s#NhSB|5aLS@w2ij74kt5-B7~3YewcB_rfpT1R`S zzcS+sgU1v(9BAxp=h;)$IqFnpV%9trb2Pi=V$~Bh>nyhoj1%lA9I)nd8kfyyFO|x4 zS~!%7KNV09ts-)X%F)yN+filEv>~LgGTlRZu|jqWC`N{EMY#KH!9Bi&4y#?-s{m{&H>ukBsa;|t95Ens9`vjbf$u!y) zoRo#^Re~XiB?kZt(X471aDTOcn`2lR!>=pd=v11#uqU^74=ihU3Jl$25?Lu`)a<(Tpz~1fNibh1z>9#JK+522W78cYieT+v*V6F)C|vGk-|R z($b9#v;sZ!L!fz2#o{>qvQ0uabyY-W;~E76$Ju}kgDq9v^O$4nMoX8QW9?dH>$?eE>*6U4FM28z`!iRR_*+tr99CAS^HemOG>t_pIS!e&b&k;OKCVfw zc^1Jm^+-`Yuz;U9(T*Kc*O|-`hs>3zE4zUkv1_L!9wvOV8EP47;mGr`smeHUD!(mM zX|hc%SZ`z)F*RdXzX-CCZbWE;ncqnoImMxerB8pFfOf z?Qt|WxR#C-a}D`q4Ce95h-P4ookm&=wTHg{y>Prg0Sl+7$XVq%t(}$q1uO1+rM|+G zq;2+3Oh`^n4p;!IwK|nt2q5!85}>KfLZN#6I-`Fm*vi|n1dOW{a3tg_{uueOvoE(n z*_jLz@L{zi07#kM#M6PDydUr70vqLtEtHp3I`j3Fp@bI;W)wXY-3z4c@ ze92MHg;CT#X0%;M35qASSJv3+#w`RN|m=1 zF_9q-vk2yPgz9p^fMzUm!gY0oxT5S!##o`U#0Qz^*}~btS-AM* z<<&7w)lSW>^H9e)M+}>!^K_4qith2r@(Ha&B`e8R)+FbnjL~+YJT(*2x|{VZ6Xp}0 z_|x=NG)Gb!gkPOtSmPC7MV<@mE$a=L-)uL89V#baLt3m&n95OXYT(tHo~S+)FgP&S zjyR@cQk&LaFm1Q5RegLI#giO3SWR~*WmIknE9c1Q9g{Koy*O+)J0X5}F=lVl)1iCV z{^oCgvFvs*;sj?*q}0RiWRPIf!+q?XNxtd@ne3#UF_5{V{!OpS`FYDz(~h$Q+*bTW zZf!vXt7GF3olRw|(0vCHPwA#kGv8-!$y)B1X zWj#Q{z${)jKoK_rAc98i%c!L>-#`xA+*rWwuz?_~&E;I73Y83=41Fi(;^OJ>hw3C+ zLB~xvXl9*1Z$Z27~3dP$`EBIoa;K9>5NF z1_X*Kuy9Lh(QKQsR_289dE?!H3HHYlJI$aRIxSVLf-Z@@VECs* zv9@rVlX8p0zDku&1gf|)Jl#5 zGOutKg~dFB^b?R8I8ftpEp#tScK3LHd$4pWg0-A!9e|=3u%Lynxo!01>rt{v>P1Nd z%MACXzyjK)cDH&?V!mcrD55HWbD_1U^vO+>(q+S3<}Av|su$haq2;UO@&`cusS+|P zYAToTx~stkQ|K}dp1-==D(C<8mi}6sd|en|5CmS=DHA57%w5)-esA3#a`cr3^AyVd z$s-z@&T>=!{Hy{Wcu@Ot)(Cp^g2)qhq6qNxI zuXC|pk_cfb6eKHiyF6nMvx005b<_)BO25cNCOax;(IBLq>lg2?I_thzZ)U4ZK??zi zG@ctzvDJb6<=}B9!EXQIXu3CRekaUAm~qNl<_Kmz6xQ;4bC^IH9J9t0b|~;(T9`|R zs=@?HeNFN#_=-JIhv)UPdPpqF!+xcWR4CIz7FQlD#121TQCyT1HgN3m{=WVgRNBy) z4;I~r696tUuq;R_i^CRIgT-gGCJ(XJzdgoGL$IY2zJtDY+%D5EJ{UbIHohBTDDULU z@kV2}mbr5N(3buFSGL6031mwL>6Q6fu6Xxd2h^JWo7U%jeR5!NCR*twBSH&IzvZiZ zN6=ywWI_}2=DkdHvoAMTzWrM|)O8ALE;xPBf&;y;- zYK1*%nH7BbGUOF+u zo;sK`r;*B1(Fg3Z?IA1`9!eUVF{?PlGqbbOs0!v$3smJ39_e()TtPu4QBF*eN+_*p z>I>~JKJPD^SYJxcZE6>j45K^YQJd#nmHUIx@D}RdOtaiXZcowhCTh4+JJF|fKk7dG z40P(0LX9C05&eRT9WO|u4xN$1*HUcuJ3KQ7v;RM93{L<8T1aFlfU0#hG&(#yn)y;zWoB=|?Of9ES1^mbWL=r_ubxtgT^uy3(F%$>xHB*mRMoEf`t!MLYIdI>_g^-4M&vNEIa z%7I4}_1eTLi4=jXzM>SN$o-5&X6ty3;oyjh;#L5BIJ}u~jb0Gpy@H*}>blt>L52G< zf)EshH8{=TlqeXha#;=eo7qWNr1gc@-0k<-d5RkPMO8^M_6O|eOqU9|>rSjNUR)m3 zXW|4QJU%=Yijz_bWy7TmlCB`Z|3Fb>m|#J;dK8#8#78vozlhGAnh1uyCmSs0inHXn zJ-o@-Noos~p@C-gl|pN5?OeL}#^MDM(r_I>!&H_%?MK;FZlmpj9j|Z+lPAMq8d{k` z-wB{3IU^@pSDh___($tiQe4~(XmxEUiKtXylC!6`)~J9s41z^V$h-aqD`dfGRUnP0 z0Ha*Mw+E=M{Q&lW7!$ZJQ8$WG()@zw`_>LhR0)fa7+TpSBUSA;BS5V-n=ebmsT1f_ z(XgD+DHAK0!73_|Pc_M^Mek#B?6t;Br_<74QpAFtC)ZUC8%3APMdG}ySeU~RQp;m& z0BtQav;@h+#I_u>CSE}lQ#|_s|2cNGSi$;?(d}mMV!bULIU!s-4a;h8I+8-W&LoPJ z#{86Yo6}DRjdHyF$V#dY4wM)6wb6a+$b)wS(o5tIp!e@HZL1s_DU^%y%-cdKKH?)T zPljQPGsWPm90M9c`+kxps)UFB8C>`*F%c1&R>4$`LG*@S6c%KOUCzKK3qy32y%C)j z6CKLn0=!+|6_!$~pbpGziBeA&!z{886w2iRd(fawZZ2E)iUR$;5I>V*1(-foZ!t)2 zZpccvy1gLePo_6@PTe2!s0t0{2DKMrhTwydn1&?Ml#16vbsM=ez&Bfzay(!2TGak# z&iTa}F7I>kPQ^+7apZ$G_Ie}^`%NaGpnX5DidHxKS#~{-!^Bc$Ti~p{$-E!{F5NCC z8?{3@RtZ^w8&10bw8DgKFM_tScvVV5q7|S;f?2SHl==Yrqq^Qt1;L&tB_;Vg6&wX2 zHn2zfoW7+H`?Qg#6ax?Hc^hl_Pfmy6D$+z7-k9QkBfO=uoP;ln3qtec*B zt_32*;TmQuQ_P|#I6BQ z2n=|z#}-2Q%WqsB<0I1~0zW(-?zo*xK zCYle7(DHDM{*3MUPh=8q@&ucVW4cxW^Sek2Hy|_-+?3yov_R~r4jwg^sLhpUIYWDH z0407nUoFb;MaTvp33M$+uw$a<8a%g`LU`6-#`ZL=lH(US&&Z=IbLqdfQOSF2Mw3ts z9T#D7uCUSV7FzcX)ukr6Ia;6R@c3|Z=J`ufU<`(!+?E7ujEueoa;{K6iveb6%)6Mr zv6R2pa2zZR6q4zKRxh=%##L3kZyNj9557>{ny0divj41;zg7%^EZ*f5-`+aoH&h!i zSnXqFvJQ#_r2J!X8#t^LZwlCw%*^u~)|}}etoP!Y42|y!CD^SZMV*0doorkHvQg8lw82$?;LqY@s zs@`q#sSmoaKrAS_TDQ0495lv+@dDT%tN`usfWR-ObxxjNpvH`R(4Vu9`Y~l!+30Vq z;J;TyrUGWKKQ2v^NYudr^pLhIw6oE0oLteR|-$DGJ*A7Jku3Zz8ge~|#Bl^#T!Se); z=-|n^Y|{Vp+Ct@gR(%_otik_!8+>sv`9(!P0ioO3B3W4p2_WD}Xv)0VA8oQ+ycoc? zwVErAWBtMO2TuR{?aB2(=7RC?WI7{y)pMSDk@X?=_4T0|SPRR~1FTh_d1X~J;qf_O z--@F~`n|asKH&pUJ_Li|)d2)FmP8_%<_&;(0JZONd3kyMaC_MQc}6mJFsX4G_Q^C( z;Xe;Y2H^)_rP}Vt1>JeE79u}Y+s+$TJ0Eu<>|TO&@sZ>p|7T>ew4h*dPX@aL|M~=< z$n9?Sh5})07CF|>gMF+1Pn~G(P~&X z{uR^#?XO4KA_Sg-$6d{|sEpI)VyW59@v701l!V26j?FbSmyHvUJN5rS@IZ9V?sj4X zRv|W&3lMQfd<8mweSju1 zK+RpFtF27;=WpHj=S2WQMz8)#Bl`Cogf<1nz5VLclWUtuscJKq&3{$d*RKq(gO@SM zbG?W1{K#ap)@*mNr+%CcR*SX5?s~N~3OHGmJNb$rvz>wgoiEQFJ%HrtuglG!-!65c z60-np5+KO19+Y6UR=BP5X$&DjR&KO9`GC7DMU7svybM7=4ILJWJjFKz>_J)RX|!D( z)*J0lfV6f2*h9D#15+(XtXBA6GdB>xY=)}6xwfZTB+w|2W!dfmdH&4ne)6m5(<JJ5yz@r=`>9N+wP;_jrG{1AY0YT@<+X_Lk2G+j9GdPzt~T+UaN zQ_~d7VPgI)jb^)?h13~!>XL<@K0X!oX0+2%+kff<-OSV^^M%ss6PcW=iYz&93lYgw zIt>P*KZ!=W)o>6|x@;~x2B-#jNa#aGt;?A*36uh@9w#vx^`_DShS^S>lq%&@=S&uJ zVoiDdOxBpr;xF1qCJ$%Jv$?{zfINw7QAn9u7Nh`Z&G<15Q;+-`Y6%ak#l;hvs#cDh zU8~oAm1dL7c!TYxj@kZbvg#!7(;kZP$xVL*#_6nyfPetN0kF0vLJGA4eS>`e`Tp_~ z7&_^r^}t-YyqnR6joc7WB!biL}#4P~aGDNb^SyjOQYpDR| zlDdAJ-4TKpw4mQ!$MEuEsMTVj^*-ZrrU*YDWL>+T*tnyTlY*ZBfSDb9#B#|;gZcS# z!5KgYG_?RM8bCUiF+74?v`o%CA+S(9t&X?33ZxRSCWyi1-7t~K;!>X;ShfAB%^tp7 z(n6tJDwV-Dt)x()!5jo-GLd0Ea$5drJe{=(xXWM$i**`%nQxx^;~(soz}%tU`XYnf zQA%Ta0N`F~`@G$ExZg9dA85~ZIAf4h1o~I}5o4C3Qh58Lij3x8Z<=_u5Nqlu$0xOa zpbe_Hz6Ue9;a1S!CxpAJ-vzXf1qa{JSYrbf%f3Ev~RYn2X*#Y$dUvceE-0II$x$*m11q-0?EpzcGRJYs?g0Q41SL$i?N`~-mx#0HYBpn3>UDl77AiO`~C?0{?7gsps;#fb$?Lia|R+C zEWk!!lT4sUJ_?0I$`yEcrkz@#{8k86!C7*(59n)^*ThPA0()TigE`U>(ED5paqu@0UnBjF*j42=l3Q9_{@Mo7{aV zaMmzPA96qW0QUwwb$oPyg9p$yRS5~lvm`4{m@oq=ZxWt?ew5(@uf4C^&-6Lc8SM|6t@e3Kf#nU-?L{|IKO|%y zlH~WFErHpd(U^1gI1_eCzVd)+wZ_&mFGR#@|h{gf*g zkup9Cx#CG-ijt?@QaMA?#Hmp&&6e6aPou6P288o^=2I8cP$g3<`t@i4)?-L}Ea%H_ zQB;(T1^b7jd{9D|2BRoDQsM;weJ9xC`ZkRpXN~G?-0hX`cFXE7(=4P z;qo8s?xE1dz#s>LU>u2SHi@-DFO}i^IkM0f-A&1E%2Q=JJiMf&WMr{f*m2>d@;V?b zxEv?y!`;CLSxd2O>g-^qqSZcA*GfZ!pO)i$e8@09q*C{3FbvW|3Uov|36TrPwZ-gi8) zF!!T1D;@~c(7Bk8mq7(wbqMWG|AcR^fQbCgB6fm5TPCwLtl^U*+xDnG!9Zf28@qgan{3FDN4NjSr^FYlJ>B3# z#?9BX&t+iI8&W~MV?)xy;I~w|B8lkps7?PXk>U^X7uGu(G>Buq_K%jI}~rHO{WX4{_-7szI6f7s%$WwKO6MpGXx zh82QaHs&Sq%vT4SfiOD8Xil0ZX&gFV>xyl++SZ z9HD7Wh4+RDaxBnEGX;q1_@75EbIAf?K6Aw>U3C5NQ5vq1~j zPKbF8Aie_-)>;SW;A)9-p2&UR340do(A)+CmUU5%i^=Ba@%$_!`M@P-9FWCu*q89&d=`4R8Ko`rU0x&U z1MB8zGPwJLmnfAOY<9j`&29u@%`b4nHXmtQTAn0qFpJDk8I=e!@^j+pomBWSfyGfE z?HnD5b$DIC`E7wccjvtA+>HurQZ>35{L(%6^5iystcAC)@%kYC zzi3n_2oSm=Um->OLci1Bz1r?&Z_H6-KQNt~DS`ZP|pQvE>ACKA73t0VpBdS6$#M_QKOl4Y>>}H7ji@hQCNq ziIjzG0b3%#5=New9kCd``2=uQdY%38JHNsKomvCvL?DFZ8_$gb+DV|vQvW~dzB($) z>AkPsyW>F$zJRHRe71(B3Skd%;6P(n&UO1fKG6a*EdyF*YK1nI83znKwre$1bD z-F0WNTt~L~&UJ-X zq`HlRw{Cn@&%Zg-)PI{iw2>Mamorf~)nnPEzL^y#`XGPn`dWEPvqB)33Fix=V2^R* z^OnF~9MvzhtUu26E2!PhPQ()0SQ^vo_CU0!9eiclj$ND!ZPVC4x!1LYlJaAlEu7c?Q!ABnq=>6H>aZ^3Vb`;jY zZGAS$AvC7o-H*4P2jBJJ?t?p(5AK5ig~01|)vC4|51;ogbS>=ilMYJZ6%#EvI|Vl| z8KEtb9JG3RC@c5|l~ zs^QLyXdct-XKhYDMs9uKde}j?PA&GtOe-xti1m&PRoClf3UKVr$u_9FQ$7c}H-{A4 z%EK25X5YM7h@PYZr;jNfBj39a_p+(%Zdp@m!I(alPke`>fqZ;NJMA7m&$=b^FD4J? z5LtOQ(okzdWliQ&wh-!q+;ZUq)Z9uX4!9y}tCguzG>iQK{jOp6`!39&=qimzL~V zV7YI#7u-s8eskLAd7sV3x@Ljf)(?`Xf;3S-i6@UFkAv+RJ>+M8>-}r1&NdtV$zMse z&YTw^Npvqs4sD8-r0#v0UljXxa%qsMCPOpSa*YiKg>L;yn8~}DzB7V+4@p`U*>=#y zqrBdKEv0eZq8Tn~CXM43j@Z}wum5(s;Luc}-) z-u$4qo9+o5R_(_t8#&_aD!H$&7bO1-+(0R~1eD3{kFZVy#QyfCA?Wxz2H`!(mkxLP ztNV)Zpe;?e(mm{p8ZSty-6ct_2@N>2NW6;~6RXde+=aUn2dq?!L*gj>p4rVXy^_6I z9En0lFxDg5?mTk;DZ@o3oJQjPQA7K>1O*V}u4i zldgvI744k49p_(u)R!tzD>XVX?^d$*nX>%7u8xN7?QPM_I2N4c3WQFrL^-y~e*sH; zUTu>pmE8u9&~+Na?zOCyeoH~|3b?_HC!Q_rq zM^pOcg;Prhuk~VoYEqc?74Gdm72h!o3vsG_6kZyYwW8MStaRtXL8okt!v4;wX8P+7 zY(COfZ@pBMmw))9Vy1*L`0|TiG}up=*%0#Ti>g_MQ(^tC?H2tc zCZ=v8(zHMMK3v+G{6X9`rV8Ajwm;Dfm4H%CMrNffY@%lY-%Yh_%+76hp0CWfsah{@ zv!-h0@_yI!1AM~Wbp;ofSFH#{=#wL_h5afAX#J!a! zOK8J$=5R~IDvv~Ys!1xcKfzAfl~a>{Dx$wqSvq4sOSQ56fVNDd^-bUzSI*0GU3uE# zn%NGkHAwcEcbOoQZ;wJ#tP}S5=lR~&hTTv=Nf~!cRRt&tfA|0qR2Aq`${J6<7%iT= zj=wh=6>3DK;wBnlp>#C+JS`oK9lye6?8c%-f~haTKSB(c@#V- z3^TuRAP&WB?tG|4P&KPkk$KW%uRAGR@h`1JR6RxLWIDQI#g0Yo!c-uy7niINo4oK7 zoOzGqYy%Bdy*r=Np%V}s`~&Lb%J&P|b*i57lj~MImuC%&k5{1T!V*1xir-m)EnzEU z6<9xq0&7#E+KTX|lGbMW6J2Q+@$*}K8Ml_&3!kW(OI7Z`b(qvRbEwqE`%nA{ypH5& z`$>-DfzS{rX(^F5SbcFFdhmImR=bms1*ODw{Obq&YtDmm*vTd4^oDqNkcy52^D1Be z`Ljm8`;eTqTflKh>*ZB|&|dGZ5Ngj{wjKg1@Udu4aKJyGKve|MKoP{Uqr=&P-%3^d z{YS~ty8CSg>~^?fA+IPCtC6hsX?IlY#f^w~5398cv3fZgc~McsZqmQDJe46zR1Ep0 z5bmSPP@wm+Get@V@N~q15h5pZnwZ(g3V3nD5ejN6tUAyq$y!G3mD(6854VtTO zo5;)WlDn$RSt?+e-Ld_E8WRWX4Xrrd&$H}@gOqL}1sV3Q*6RSM_<(X!KANTE(icXu zoUr?6o&ByO$wzAVDIKZ39&Si&jZK)a{;bPObx;S-P zFUN%LKX27M*fquQmc|l2or)k>8i%Dl?)4wU>PY`ZDb`{N+zfxbhJ8~Ouwl% z(-!x=FHa|1jk(l}N9OpUwb0{3t97ROc$~wQu^|B-|2sNbg;K|8B*ys zWA6vAIV^wuK**}Wrc)-DpZ1aD)>dpfp8tCWJge=HCKE-R(!hSJ0z`jiECi$GZh? zQokOrdO^=UsUSwlsew;j^0Bju!5LQ5jkHqFk1q8Atoh>DKerTv9H!0C6{~oB>=1{y zh@s!7u@!PBPh}J_(?~tezLZb6p=)1KYGLeGDBarbx;bb{=vQJGz@?F+VGW8rxFL>` z^zyyjec&JjH+ob@AM$L>jYIQ-JUtWZ{kkj|?9%>u=O9GN;Ls&ePjcS0 zs+IBBAOk)?zNC14MO^EPzvL%hbx+KJi$aIodP$!!30Yw26x(9FBQk0b9YLXow7Fkx z0RVMq2dttqHmh1b$obB+JzhOqt<#iHf_h^v4O)Uj^7kR|JA}3)aMK+S*QsPdgja39 zdSQ}XbCP3KJb;Vs=lk+vyM~%|P$3IQ_X#m=z*RsJx_Tn`8Z?4QY9f3yQfk@nOLQm7 zbc+exlmy+^`Xw9Gq>jt^B_+HM*LFbCC{{27k=WbG=Qjz0+05?`cla~SaUvaTDti*G z7nD+Z?sslpx86KI<+jyiJzdKFQt-anZi`_1e(R6OJmpXrSc%7(EYIt+V}x?8Z5)#4 zesXnRosm3x7q%F+=nkMYhPsVkiYnhD6<^TN-D%CxNQO3hK$#uP2pyTbr8D=E$fVMz zA;`-pIbSxuRKuOsDWK@9AbquwAF?GM!&|Y@r_-8oPCadQ65KsPZifXdN*)&B2R7Ze z4v;ufw^|vmYjG<1`1h(4JWvl8AFE$yZ_ecoGfmmubR4!Wzx0V$kqL8KLf8{)9fW6n z8#y0fJ%`Yy6v+kOP6WTp{^F@Pk{0PhA$WbclTk+TS=qZm^lymdUUzy>pMGb^UdQ*9 zK#BwL<4jKIslcV>yEsf-VA(M68O~nhrVC$r(%9HYaxp0k3kb9MF1*lDrf+W@zlRra zC709NXYx@-m}9Pg?{D{*R~m;)fr+m8#X^z<#iw1EOOB*hNUg98J* z+Yjm~+~lfVO9X`qubFRAN?O0$+%2hCvog8opVF*h^=_pvwLX4blG<%!exq6MP^G%L zr{pZd-5E)#8^5ihr1(%ezHL(NLk7k<(H50JPEA@_ z84r7JNoDB>&F$yH#!mEh?kxI;G)y>IRdc*S+JPdL zi)wD10bKX0=gPPjha=28E7czFwk15lHXNT&Oiz!OmJujq6tF&g^r`s0P+8j!U+Pv` z=`9ME$uJ)uA28B9%)_j|Ach|og45U(#&Wn8Aj&d&8M19SkVBPADA~xzE;von_dJ+i zYU0WIecg8^peAccx~P(>NDh+4S{)VhylVm$JxtgMJ#0(Bw9oOhgZ9#f{at!pKeDWy zA0R7&I4_1y0Z!rpL*CJuF}sHYQkG+7J=3 zDT%Mz8Yb?%Zmi+=?l)foL;`QdsFTYdm29VM_KdzZcpPnD&Fz@!qY&*3L$(WL*D2#Q z-N;`F$rishGi&7EsHR;2(VuJMbNZN4LHlXB#h_1#(yWr;2?Ml+$7~4CeKC0jE1%O}09ubR`DVC8+B|bW57%e)a;O^prHB zmbY1tzMVi*Lc6JBQ+0Q53;H6Mf0K3_DDuU>v%mc5ytRW0>rD?xN;L6?NnvLxxk6$a z(<~-O?JL6KB3LNq&;aRCm@AM{0a`s+mKQ1 zdFK*cvg)XDx8xg|a)Y6VFt6rybvZ%XP`-j+e$E!DoEFqtqQL0Lqe!lSZ+vW7w zd#+OvnrP?9%no;%iW({SsOOcBbq~u0!r|ogxC~o3;|i??UBlPk8JxkGzO~ABq2p=@ zLZ-Hc>&yFaw9=)RLsOdvhtE*?G%3hnk(Fc8Y z=3O6CN2-+s_14mujbYfXe5!|@*`2Ft7e`3uGZVh-e#>7XB~i^S>3gKMtb9b#@$SN< z4(25x3|T*l@?+*HTo`PE#9qhEo->X?k2_|tgDlZkZ;4VW)8#9YS5#OYoe6r~v7ftu zx^50#XJt+^J-Qu#z+wk>%@0MkXRDlsJ5?KnMoc(K(wCO+s>;z>t|MY4ZGixZdDFK) zl#jSNY%UO5tBI=^NF6Y7>b;3t*fBp6U^($v3 zFU|l&3MfOeIB8m$24PljZmu~mc{IOdhZ2N%xJX8X4MLDD$OLaeD+~l-(N_%hU;xn4 zGBVtFWR5A0-&Qf4*L&c()29&|CvVq{`5{3?Z9b-hJ{_BfdyD0Tg@tj~oHy71 zqx%$HPFyG~9K6*%e72H9H={5Ha>HJrV&&!K^ORF0W6P{t=s{(}fVq<)bdUvLwW)`z z5do*@`Q)J&Z69@?+@%wq>FtN>*GX0S`fW5mqk<123W%OH6UzHI2$dTU2$-nNn(B@W^WevAO_4LcbpO#So!G zEdluGgKtkMdGmB4&6w`?W~&<+8agz>U3U@PAU9A+lO4Rro}RZ@CrMKNg9dN6 zTiz|UB6xNa7-wIwx?E7e3nHo*LlW^U;Yn3R!L-aY1A6V zL0Q!j!*^2vk(AznI$ssn?d5MxBcNnD0`0z{eL0AO@Ah`TbGAXHDP2#+7SBclMmD8~ zJG=}H@3%dBe26`OR-TJwRL?z7t5jTsO+GW2EqTS_K}tXbhpzU-MSHe_xlK3$y(5dD zs_M^Dt=BPuM3%{J*~{m-dIg6mDHITO%t2>9SP8cwGfz_N5?)g4_Glg{qHMI{i^1i1 zztOnAM(~aEYX;%jPkKDuu9-=JpwS@aHg16UOl$cXoQ)m`2$MgAkZ@!MMW2P8Nom=}oW|N*)UTe=>1*J|@p1GBnNo6W>Skqi@PUI-xHJv4sJ7t@h zozwhfT)+ImZ^n)ik#Il+Xnv=p%sQu>1WtwD6a9@Q_GrP9|I&%^tA|l(<5RVE> zN2xQET02iQMYJwh@o?&3Nuh<$;QaQqpYL$IFDesA+_c^(H_SmJ!`-~VTX>KYWm#v=Milk-~*bQs5ogh9%IXRQcM{dK_WoB+{a zN}=~S^`aJMC!k2A^xLz~qG$_=i&a60yxCCGX8#wJuLZZZYZCMsVp`q;P2o}|s)OQ3fz!-rDyy@r*d3Eugp(W|PpkOuYrpvLGu~;rA8Sdpc@MvR#MIV=J zK_5lozAPef8cU@#Ibd@Y%4}cUfQTRJ9V6`d57s%hEt$%D=^`e>+#NYD9wPy9=SZ4HllS+NsWGo+WYLC*Lm^go^hA`{XcJ z1ldLF%(#gb%i^S# zPSjlQW<)fwOz?G2=fMpk*FWFPCr`Lfq26Fv;_Ba`;Qx684y#&)EZCz6oA5y_ zIq=^z{?`KhnFadIRR6WjYtA6f&vNK2&Ht&4o>a zw>MjS&;1vo(a#@ z3a>ILu*t5xez2SNbpqsjHbYmT3Qq?#W7ikEFi)6-np@ycTr>tg<#&Py%~1@Hkd%wd3ip9D!{Urws0F>Q&u4|FgTfVKh_f;=>+iz#lnzp=RD zvA-R|X9>q5JSJuc$S8j6A+>BZnVmo)&O7DzbhWi(y1@VYj>7p?dFNzJk0?qZdC7K5 zrOav&`feZy`X|ioC4{}{uE0SLLyrt_Pno3fjiQ{~0wldbUG32K{`rd+goM3n_ctm5 z1CU>JU{cQx7bH!+S$6g$KjM8IHvF`VS`p?X?FArR_0m&Qtp;9yIBTaCrRd#2D;0bl zmM`EG$$J6&X>lp35E4Gw@5(H9cEE_l>?5SY{@^tsfd~W4h!1lh$aG83%zXCvRn5!FqM&e)5%A*7-7mhph7qm{o^x^_*s=lE`<7m~H*| z4a;}dsCy8c=b=&-ycrJw5K&)OXHFZFr&DerJz?Xyr;~?#5g$KAIW$0M6Plj@B?72V z`IYje`b$Qi&D>srxZ6T*3Q^DwFhtj(Qv$lCcLP$$#8m$Tie-f2cYiEum}!D}4@jFYwHe*U4!E6pzT>$s%t4feNj-&6dFm`wTRN`E1DRWf?DX zOficgq0s2mgp7^R?D^X||6wA8qp-MWxLYVY2Iu0VapL0RDO}fvoQK<{HLfVyCqp#u zI`(jj86``WS5n1WqBW$iLt`wR@F7I5RMyg(^&F+Vjk&`{ji&-iYs2hIP<+ zZ7)~gY~jsK?Gr(z2zXAk56u1!nG%|@M!5|cR1=e*Z-le3qp!3$Yi zgqy0KY&wO|Mli-BpYR9oRh8!FF*_5Z#j-OgH<9>kPjXE9a7y70*7%OYPG3CVaHAIt z^3T%C+%dw`Bh6S!3_VR98l%g`7v?)x(2RYH58*hzS zf64!UJ6bqYU#kV9xdB}rRZqBXPp1@rZQuf%^frVwcF_3$UCrGDdWpFYj!poJTdLIPok(NrSPO};* zkP9#ovt|#=x;4Brt0H{xoA~0_L}-EWf0;bVP*Sz=`239lh+y||nanb=THoa*K^_E? zRw3@HI)46!iU*vydvE6$-VHyad06&Z1{zsl2dF_J-5IhUP0HTje$JEfx4deVVimYE z-rA$U2Th1;fGRCDZp7=(d=!hQQS9vcs*5=*^z@H|{tuK9K}qH{EJ_NhX zXP0KgpvCiVKKH74K5B+CZgvAj8X}|i06a6(6 z&M2UwPkTEZ*G@!hG@|fq@#~WJPKtm5Um^I>rgZtZ9ce zI@n*wEM59b5@4~Rc2Z2Blnm4Ta<-n*b`8?_`yEDMJSHty01txD19ZYUb(E}{`PZSh zaxY}BSHOX>X_qkZ@s$G61KQUtwJa6kz)h|P-+;h3zZM6DL6!o|fZqU?AqqP-wds4B zKzs)fB1IJv;{_UdFm)#+#fYqj9+|)e1 z<0k?5;J0Q}_sXDp3samzz;3dUF+t6^W8@HubO`oH`7FZwQd-X8RW3utTS;Va3>VQM zX!R2wZVrOiesYdH8&nkvl>j*clw*pqh&bSuWM@b*Iv#BF$G-ZfVkmV3L484nbJSyR zk^kvQn@#u*ulVx#rm-343U9r{QS`1ZW11K~LH z3`m0d6s|!1j|;E{P?0F8zhDmU=<~( z@$vA0j^wB+xLvV|sMmu&Si=Q+)qpL{2vYq;N5RHICHfB-Er7go&|g5qpxW9?Rf4~? zLBFPyO348>MqpcCJzpfVs$`X-qT=}jijt+WxYjCYn6((F3w=7^-WyH^2FVDsXNcAS zeBRpj;Y>h zAT22h7dl`>2;#3gW^+SF?rA7oV@l#U_;%j&0hG58WYOU;gt*0muL-n%;Hb=aqo!(1*v9`vo!X-2|3J$5g^ z7r>oF8f_mQXSr%VTLlo>zrGBEhJA}5P_aiM73S`K3?~^f*S-`D+$`w2gk0A%{mWzI z5b3u6IZy#=aB{;|cMI(DGPfbcHAF}(R_zTZe&{2hGd1mr3mZkz3`t*1knL94audHn z%MLwuY}iAMUY3pnz&V~!5tG&; zzdT*rvLg-c8p{e)n3=1R=&Y%0>N<8x5J7#Lb6r3L=EFmPb}We!EPP*;uT3(!yFfwc z&_{j)NwXob6M>@yi!fD@w9A3o4wNkEF)2%bDUO!Q^O=^Nk-@241~*IKt9E?_@|QXM zRhLScdx(|t_-p2d%M%XG2qqEbTR}xVi)MU{aNdL5dopE;9~u84^8B?q31WpE3QNVs zzOf~~j&q9S*w&J^55e4LPI_1Zy*MoFNf3tfGD+_PP}bP{ zJC$`R3~sJ!-f@|N(5c+MhEEL9-|=eBg^hgccN>R@Xa!icX*~7|fAvd(d@O(cc3|{TAmzN-M(Zjn8p;5toJ(doT`b2c*S}%6z z3EERU_==>PKgHm9$DkEVRt!!?-pK|h4B>eG8q;DPn2is>(#;gSgT%wzj>=&Yf-^m3 zJwsw9aeK!otT-cxnT(v=sOb!6?)xC}YZ}FQgtlrfg!xeT>$X&7$^inNcW6Qi5G?)} zsYvI?F02F@3*RlBr$c%=k16@4p!L}lkmwhEpPxq_KBEJ;}Vrh^aTD{6)intAiR3-38Lu+@p=6c_nmg8*{B{n3O#<*T z_cTMC@+2bxI%goz5`=``hWzn~|7>tCN(6WLjk#0mL`de&0R-C2`OqIHV#%*(0GxO{ zkX^_9AKQc&c>+Rav2!|g?~l3oIg0775mTF`U-Vi` z>x!~yT*CF8K$#b#!_`MM5P^)j%v56$9WJCCcB}vI{6k{C_GReaB>!Xdo*Ye^T0dL> z32M=B9?_UD-pfc=IIw^WLAs_?aB6EpHI;zN66Cvjl0Lb7>r>x77AXI+WZ|~3T=e@` zR1=%vS_;B{CJ2|YngAa4^nyEhbE`Ogeg~E4mFHJs3n&jg=<+-%8yrrCc+$+RnfnNFr%bH0#C*ZP5kb1N^Rhyl}uTmlQjzD7g02^swc+JH+8t98s z4QHGDn$t_pB_C8!IS}LH8(oBAcQBYkz)46+)zdNbfvij(EWmm4tLu;+W$DZt_AN`r zL$t@ulnk;)kWt@8tC+z*cZ!BW64e}(oK>!lDJgNj@_a|GkUfwES*7PjuLJ$=`dpRg zkq0|Y7B}=#Pzt^y6-;)k%C!|>LT>^9IgkNr5V5HG$|IfZeWk$ru3Xtmk8rQ0>Ce+S z+L_UVY;Ds>s$p;p{liwiPtct={}muk&$ZU_&BIc`t^FM=NB$e{R|5!H9n}G;u&QSp zW0DEFuGdq}Fns~5QDxa$KUiFSIit*Gtk7z3b*L2oDfT;A0n95FS5om96Gb^~;~SbV{#7()UKWhDZg2{=-FOB;Q>_P?~NlGWdm%R;^jvSIc`sy!aI3ssA$IjpA##me1QIk?2W3hr{}!t%scx%(I!(Z9HU zg4g z&<5~v7dn2omp53X>n=(wep>qSF4S7Wh8DD|_1C5l^0>VOD3e+D?fQquPkD8QNJ|IA2Doz&6+<_7rp5+K;EZ>7*vse`;xf|Srg ztw#B2g2z>XJ>Q}9bQt8|T9tr|jLcOzY3XH?p*p6j=-`74Y7>KUhQv z3WPqq(C&=(j#2K@XEys;Lc7x+rg4+-4JL{_^nJ+3=hhT%-d85}bdA6diVz}@!&o)0 zxV1Sf42UenOj$RrFARRVP=?WK(p^E{l0HoGWR6|mB+SkJS53@sGfWU7Y>Y<3M{;i^ zS%R6lZRAcB6cFTeL};+&;%F*QY=o!i4<iYuHNunG5vme~{TB?NI&Qai*2I1Z&sf`a{LbsH;@xahy#syD z%h*TmVq?jE&-qU1OD5#8A06!5!*0Qhh&<)Q$fbfS`=}87^T^weo`r>NHIh7y+8E~T zQ3BnsN|%PH_Mnr7Dt#019;L1Qql4f8pb-#rzn%~`1dTf+4m%K$XGH+3Cs2OIg0uhS z7)uIuXtqm@*CD2)rI=rMpQ1n%N?<#i=x z1$M!mCe{g{s9m;gW8%ZwEEE^7r-ZVkf3Yg@+}d06%nYi~b6XTj*A9BxtFJYTauR{( zZHVv3QT7rBCP59EuY1X%Ac9=%F1LwSmbQsh(~6uGy_=1vo`zY4N^>QTo*Mz_d9;bT zaxU3TI)a>_H(gkJUOD9)AC3yEP)x-D#Bc9jTWU@%jCtxqv`CVE)vSXalXj@0E1Qv@ zUn7Mmq9gSI&;3!g{JS&-RhuNfREn>zB>AN#hmsHhD_joMF0MLT8dLH)&$7w+7ZBw! zncDK}8fxpi-#srXebym7k;zZS_v2QL3e5NB$-6b&@M?}eeSfRil-Vm&t9OU!HY6h+ zd&%-LYhmI37>-PSpy8la@-Y~Q3|3A5c8Zrnl`h@;E^7q)+2G{Le+Vpu<0;uMi}7SY z1Ehv4Ih?7Jf{jy6k=-h<2A8D0r3w8)3SJ$kF$rkj;?6gGm9F_zW#FZZyu5lx_b??Y zuh8~aG2a_YZzjsih+)PX5`-a1_ELy4U-Zv_9fIZ1tU&-rTt?LP&qNs(M+I#TE5**7 zABxqh_K52OKlCVa<_q}h1CF#mKB=S;x_-h&$SyoSq1p=h0@Fc~5jFx@}r9iLO zHTm2h9fc{HvZtwuICot@Ld36n^9+l1y)KJ;U!??=wGuuTuidvtsNfpZP1@tDEG8}c zO}c4lpX9o)FamG#4!}*8uAo%VBi?3JmP`Bd20MOM7d(&5alN^SfWwXwqI&x$PwW#b zO!v(1cBOG4?0|uR!^$;ecg=h)gXGZNnq0FK6CF*zK-G&u!B9E_$}WW8dS?~Mp?+0A z!%963TdTx5h=o$gUSGJBY*#eVc`4rCzZ4?QYJhRArTv*LBzzX@bKUR2?!KQDiaQ~e zqLHcDqNM?{XlG@FP zEmn+8O0L&-Q{5U|Jr>V-(mm5wAMS46lx}eBDlGrFU@m#~?qzDAWog8`mTdkgnEPuB zBo3gsS6gmgbslmAq=B3qig$Z^4H??Uu$S9sa16o`PLzvNL2CU9nl4|Y*`W@}UY2fN z+~2Ed{|K+pb!(sx7i4;^hRf8Qhu_G&@N(;W1l6Hzf$mLg&$+pOK|fKg{?A~HE|?|ya;oEzNXqj)s~haXJH-QyU!PlcqQ@O7FJY^ z`iYHr89E32sB25UfVUEqc6k54>#j3`s(KA z-?I&7Q40IWaqsyljL`r1GE=O1?b`rsTS^YGSda__;Vv1)p@MozxhCm3=77XE(&9eV z-5m6qwyc{00jcf`(q1V4_0=UVtXzFl^;^h2GD1RMek~mx*7Dkc0cqpUpFguKMa)Q} zHCZsUjOG;`l+m~_r_22JlS&x%2SknK9C+VaSR3Vx7?4_$hFzq%kY1@7Pg|#;ed|`^ znuK=dt7Mv~supS_JgWcr>UiR1fE5P+&8EsC;j-R|jgB@b2x-U1rsCq8#9ylC;<|iU z^N#ag-decrP315&p%DpWI^qBLO7!;1dh+l|QSS}98-guDn-))X~(T*G)74=|mGmjvR zyUog3%NySrmyCoegK4s%`$hxnVC|*1Dq6Rd-Rvuny8rW~gpsxJxIPXTv&-|;vvF`J z>;)*qUB5^`FsnS8Vs-0$L>kWGgVov<5O*zY!64R0X!9WgJ`ow2jK%Y`fd9!Ka7%2^%1 zO81@T?JpR8Z|i>V$;t^Pe`1nw!x3*KqzR4j(^@{3t#Vs`XmR#q%dw_SlCuO(DgE+_9}hbKk$~q)Owef4NKXpC2{rc^-QI2k=_RNwuYYVQ%!d# zb%qCc?~R}|o5n_aYAp{jvi%4lH5kbG!IM|f{Kz#UU!|@C8K${watV z?#+HN*!wRFQbdCEIK&U71C*trH?XiDLi~em1+%(8-AYQAtzM9nD@x;8u8DG(y}#dA z%B0p{ub86tBf`|emgX`~B5#Rwt;FF*zaE5pml92iv;Mtne@-Tvh_Gr$f{4SZpcbGD z=DL1tEx8H0FV}qjjD}%+^~x1}N0JELGxx%1a z>`nb6Vw|S)L0Dfp5kmMaEe(Nsl`&XP$K|ggY?Nvsg9`JXYKElH_~K&FV?>w#uC$v* zKNKnsAl(j;TEsQyA0X;}^X{FFmX=>tLv6Ac0MM1Kwd(t8pFvn98BE5^$tqy*=s%1$ z6)S$jIZ?W8*Kp3Gd36+%fJ�FWiH5H^Y_czX;u4Tn{!n+7QdSULj;=JpXFsoGixM z7LNo6GbnD4hX~P*nwy3OdEVF`kKPAoZfXjV;rEL`?uYvBLcg7GX4P8J8R#Dvd&vmx zx?fd&2xWPBxzssvnMvXI*P*bhDXhv#o>+fY&{x{~k>|;)S>uInj8EM)F!07H?yYJ2 zD-29bf^{qcCVk~T)gfbo)^O%K0-%sCcza1u^8TNpZ zhdm(TYt@;Ix+y6oWiwp12r>njpm@+wsqrm2V76w_W^UXbu;igZEbB0>nkfC^j1W!OzzPgDgM`Vg`yNGil^3u zY}W%tN64(Q{ys7?VXV8Mq2VSG=j|%H!M?t}&z~>gI`NnBW52?0-!@q{E9HD~IfbLh z*hQ`Z1k93|p{oLvJOf>jqZAwTp*h=bi#k{cnP}i5KYYNmn1GCEc#ap%twKYIFq^w~ z)2wR9{>!bNrV0JxBkg^|X0(#evX^B>`CKvf?(Vj}3qH#DH`>jspQ&kzQlC5yU+4E) ztu_b`m|h&QCM-VdNyFU1WFJo}ep1OVRQ)}xYw9l2Qq1A{-^bXP2{|cfL zkZ1O$-6TS2ROHQQ*{r<(i2 z3Re6Bp4GNI=9sRCxmqPalI!%mzF|PTyHS&utp85LLf}xWB^Ot%F7Q9{37G!9Mz1em zPfD13Qwddv*VPl=s2lU(BASE@71VILE({a`HEwj*`dQl(vRk0EM0NX5KxoW}Jb25G zpdKSInZV9b{fbG%$8n2%50nqe)OxYI&8DsFuvWYz2q4BLMA?lW*xcCIF=%}tNyMVkTHc^k&{T&;pTf|GJ^*A&Dw4J)gw z0YO3R>e&S9(h(UPJpW;VgfXJzpj*XFKHYqSxl)54=PulHnm!u!j7ZnkaI{_27**J# zp+Qq}+Y2%DXXe9=j{5PTlh_X11-H2kdM)dJlyY<1169JSYtC&SJ|NN1mej$J4W=%7 zdU`(lDCtG_#LgLNBY^8qE{3QzARs_gWHw1eL_}-666hBwJRz9n%Ajld_?Y*{UJ8|A z8u+6ol(XZz=_6Lt{|oEg*^nhW9^TOS!ty;RFi`+;WdyU3(V}k6u5p1 z>)+VL*bP}g*RK|76C*Izk!Bmf0?9dkjf@YKp>xDxt!Esu)hYr}q-zo}mUm4$o`wW5 z{_6;Mw=fD*uaet0gvD>^Eo}y%hsado@1s>vC{!%6$qz*~4!3>bC&x=&Za%ZoB9w89 z79&)68t3ZgF+k5MWqCCSc z>b2ti_Y3`+N-q)IL>zOar-4EA{~GkKzkZ?`m#Z-z!EZ*qvmYPc|z{CHa?K5M-Ak61DE|HFw%e==xQrhOR&dfWko=iB4vDITOY{2#OJOZi8z`%eRSNB*x5jE%L!WY$SwJgZq%@@2>6SSPR1;;#59|Ad=QnSwF zthdQXC$E#S5UfD)*d=+fkoYqhgzb>S`P-m zg91f$aNk$DmCR{j0GEu+9csxyF>me_9sQ2#D+4{FfPlbVleAnoK>r&gQ`O@o;zS#r ze<)iIWk))>(zvbY(Egl{)V6>h1+JBN=GOqQ!qVuK0DpJ7Vqj1Z&~nz(YW3PDJj)Bk z?h0%#fnAgaqu4V?cD#>T=G{xn6I!x^Hh!Il-?xMCOIl%S?q>4mEmlg<6J5XF0KU2KQl8EfGV`PWQ9K=DQ4^JT>kns4jKef zLC>Cn?)eKO2lY?iynMMF#2g<~RzqLCT9}z}-rKacv9ZzA)NDbY`}U1`kr3lbh3iIk zWF*lL_>di-r~LMzr;pJHuU&3l-X+Zm=!*|(uaRU4tnKq>&t7*~HC9r>vC9T@m#IYE z$jH!9>3MVrB&%MPtFJ#f!0svdw?7c|BD2m3Z7l^|mcgo;R6)U;Z7k#86C zByIxwc=`l9G;(s~`p;ef&2@DW7UyYHI==$2eN1R^20x1p&y4ZGIHz_mAQ!-MFqBie z(8gfOgod5i&}$#mN;Np~fKZ?467z*{oFWe$PfUz$@c97MYqjf%#5}>T&^Ab~T)B#_ zxqtIyv$9JiOsztxhW&Y_xg)a4UR;AskGkM({TBmzM!VVLS!`lT4|X>)MN@4? z-gu+*R3U^aQBi(!e%U;xZD+Bu31e}P3>X-UYqhUJwBpO)_r#xHpJ-?qI=B>Qm&y)^ zPky!7dAxT1@;&o({qLgILcRS zMz8z1qKm?H3%CTIAWKI*U<-QM-k5_Kij+)-(80hr;OQG_Gh;0i60`vyOn>wy z7`F(MI4|XOs`UctFdFE4K|n!8e)ZZlNS|wF;<8M_KEM_76S$bBo}ENNlMQbWtVEae z!tg$6W+=MoKJ@JFphI`^n;ijS5sHMq;_xsgf9N>uF!6h)gn@gd;#)bk5@Q;gn4~zd z5v(MxSr(2Mi<R!NH}pHzYz<%^1j7je*jg$NJvPt=q$eD0&ub#60h*u`l8E5 zoQsw@d9%Kyfz7H(hux)kFK}c#<3+j{B^kd}R9F~pCA5oL0D(I3{j(z2w7Iwneg<`r zCil)|n16J88$zF&QzUoeuP-rVEkG;@HFXAFxE=TJ-^c0Hr&>rlPjktng&5Zm}+K&cgWc%_1+v=NYr3HNfY z`j?#kg81DX^LsCDet0y59@c?0Nxbk(E;DGG^@2p$Fco&4JRVJ2hxBA~w`hbI zaf;|IU-@SZFK3uS_#{!1M^2f`oDtVnQTD|i0_@Dt>EWx=mCoy#7XBDTO?uxKCu`(X z#l^09lCzfx!a~eqMIT?PN_li`t#t>5ZakDh{|HWN8!U%>_M>_4UsmO9~68ro7OfWCCTVb%4>zq z%{;ZtNsOX#Q7i5?UofX9!&VmyMa5j-f*VVe1#ET1ZWUq~(nQgs6Xa+&XT;YIjby!4 z=c;kN;di#~_Dd}W(!gH*G@IO`$094}pVeMZG5~~bqw{JO1)(ojr0?4wPymd@)5xJy zcGl99x$?ELrq9m55oBU8SV%`>JMs60(7bS10F!4_CznvQ%<5KS2yrmGgQU+_1AIGU zsJfv)y@*fKhi94B;43ie(9xeb*m%ala!IwAe*BCtCftnR&}augrlKuo*bqt`id!g& zZSqYN`ubEFmLl5dcgB|>R8I2~tvw4mXTltTy}i8*ZLIYB zK3r$++JZ@azJloIY0^r<#?L-A%SAnov$DXxgV4@rW7VM>Wriv!D5OXRe}scoX!vYW zs@W&%p&jJt(KQ|^`I#v>T)=m23N)Hq7CPKMofNq$=R$Ity^#1^`Vc{M==s`ZC-d1f z;%JV%c3RG6h);ye7`*~nMhjB2MKRC4xu;*nCKnsKSor$!zZS`rzQ#|wYP@y64cCb2 zvl8oNf_LFOe3SYbwGclVTo9do@}Q`WmLqvt{0&VyF>qa@&^?(!G5#6-twcWqVk!qg zwi_?zTSAgQF#bx=?>|5NRe&?q)Qs4r3LAf?gl{+q`SMjLrOyF`X*<;@A=-n&l$rXz+m8d)?91O z`@Zfg64#~q=rpQ(Vp)|5#u=Es6DyU;a!Ip$Sb*Czz@Nb>Gh3a-W<6bk3!V&7W63-< z)(-h@o74_C>K9db{5#H_#v6?3FoIhPz(A0i1h>e@V{hVx7LVnGLY z_{}ng0xPxi{g4qLpslgHHo`9M>gt0DInf;G+D3q>IsGm>@SjNUgk&3ps4%6#_=Ynv zDrDM_Ic0}1$~Hvn(yE z)1~F_582A{CsQevb;t1>?8I;#i^Idi`6T~=*-$9L5@4p#>RIyO4N4dOAVB_~9>OmY z;SRVEATZ){>J&K@6QEtsjQniW zzcvj4cDSl#L(H282SY+#tvutN<$t_QG|@(&nd6LzM*E+JiQ)hgqM!h*J_OA|rpCD9 zphuq1mVH&%Dv*|V_Uq{;{@;K8C>U#FhbV0(Z~o`!eI>AL`J3Qr-!A9;UdujWZ5}&2 z(=|R`1~DrA;KFP@&Bn3AB+F0p&w1z{2t^@64BhJt1T-WSs6D+bQ4p|)%kkrXYpq}< zs5pM{P&I@QXYIy;j@+_rx6eoa09n}dp!r{*Q3<`&yQT;V3T80p2>G9m5+&UagDNgI z7NwcmM&$23t*9$iG%7WfiHQktq5)&PyiFWjLPC7}a7An4RH5(RArufAAt92Kxgdpp zEKV!Rz^;!@7xI$Hf87RvMKH+gImK9a3c3CR)p(}~J+BxV*a0Sr4%F1S``lJLgGsMR zzJLAd783>XlRDs2DBIiVlt^Tt8)+yxxVZkD<%hf5SkDzz0gG$;R779$S0B`>-}NgL zg{uFx&|CCishpYfv0u_i0Fc3qraE(I((^A{G3w1&k^~-*>T#p5*djU0hsH z8C?ofy0)YA(0}|$kH(;CWWan+3bD682Fej5qe&DE|5K2A-qfFkjrZ40?Nfqf6Q&S! z3Gx2N#F@<*dVUvEN`4Q&1H+_fX?WlxuvqjamrCjr0y?m* zHEeKZn0S&`sTM|VxdMMS3?s}ieqeP320;e(@5L5gyoOo;qMgxn!8*`WZAB4s7Kr*o zJHq4vUedEGGBUDe&r38UB%LOAc8(`qGSG+0CxW1(SwIB_hAxkw=%#Q2tt03y5-;Qs zEvi{qSSHL|P&~Sv^!ApaGd&mRMWNTR`lNNQ(C=b|@MF0io^Pmod~BpVN(?uQ9}ZlWp`UYl<1H z6aN1GW1;~80ab{Z9as~LI733mneJ8i*D4NG#fOZ#a4t7jSE6b60O<#CL-CU7mZ%Kg zH6QAetl6?2fEPPEJB*+I>V9;v+Fm@bPYs*gE0*2}b?f4)JUm z>uKiJ#fnmZCr+%^cY{hqH3qc*uYavH|6Y6WE^@i@`@%({i%~P0ME;Q)hpTFDe_x|k zhr385Xl36m7H_^@c2YsB5pMmHg|tp>9{`q+aItc%K!*y1AXSKzSm|E?+A>7QdGx!P z_!G-b(Jy;oX)B+klp^r^`~U*fU^Ez6n%~S&X(A#f_}$5^AId;$3Z#GgjJVgp*Cn1f z3;9e@;A|s+`P*-Ce7=)ki zO+En#jJC=DyGQ+d-BT1O1QBAAVUSmmDn&GNDAxu>Oh~j zKNRu#Gp9kgz%Kc#NAO_)$*_a|(76V-&=63i>p;!pdZZ&L*y!y37W1zLK!pB^|7&%% zcr^D)(39}-lkDjFyE*T_FRgzRzL^aX9sp(g6aKX?ve2Am}>wNfh^Cw_II9tlN#s$D_Tvs6@^cRfI?Po{u zac0J^>`=SUn~^`zJ97!0=CEoFOPjjP34+?%5+AJ>$nf%pqf-5@lF{zp)DVWi{q`wQ zqx@`Z^Ku}8+rg2|#hL5-H+dA)aNiB=<4x@2$CtCW+R$61GFql!Q_Fs7;*~EXLT&jY z=aU45Frh9x6|hluD1)At2nyWMPF9!nKV6x5c^ zc;u7{7W^@$(j_L^|Elf$4hVcGzA6(xEG#T+JjbozQl_Mop`zm8)a_{aCmbw_(I*VH zC2C8*i&|?<()2VnB;K*GtR{qFrF9zM7jb>|LMU&2ojDHPRG!&?rRHrrbeYOTi8>R5 zs%ehJrSlh$96TQtS}1)Hk=_Eq{K*i4)RxCS8MA5 z$v-U+SkNLPD=I2t6X}LN27FwDw&eTabjt%LW0H<>SNpvx=I$rA1DLZ0A>)e&b&0BXmY~r+9dmj0dX|SZ0)XsFsELmi2&o$mtoPNDQOl z6rlzDyv^S?uV*k+fOWS8QlmP>J2y6PWY{-Sf8IaTKTpU`P0$1vkd-BRhut_d%zXHJp4Vbbxq3=w10-;Q;3L&2vR8mmko(!U zuNWZz^)3f^8|3V@RCsj`&dyE1ei67nN}U4P2&Iv&?PsqDoZ#&zALf~|u<-+s^HciO zAKrF9*5)f5m)Uwe4!pm(;zkw2e_s6`v&Luyg6|lFI9EzcIQRZ(@mM_;_Qz{$p@cX2 zO2R&OkfP|&RtXde>2_g{4b=I*!Gny1TJ_UQleE*f8jVWJZM&lq5)z#q;*|FY=okXH zjIlxojL*U_7~cWwLJaYu=23tGgCdTXf?^FopP({J!tbJDQ|Y7O8UVHzCahd?vISkV z$_P~|2Fgw!K$YkSV^IJghPYk&!sVqUHuDWo-Q_kR1HYo7nBCXOY}xLo8)s<>kORq} zz&X@YJ8(pw!R;R1{!m|DI3qcloIyQUL?pXjLyYn@VM?;{K0y{r}OJw#~v^6`lPCibk#Z#gq1HS#~MQ$3aqeht)<&pSZt{gfT zNG91t`>`>u-f3CVe;_1}VJ3<)+5reeoLg75hN3Dwyhmhwq_7*M<74~WVqM%O_RE39 zeqOV@6nQbr%6PW#HA9P~od^`V~Y?x=UefZ_}#57 zu0S4rh7ZPi-#d)wfzNAk7qQEk^IEdQj};a?*nlfK;qgKxe7#erUjw)aN99C3(a15t zf8-6tg<;KY2zbpw@6{u&MHloj-Mfv!=W{=e|HDqJWt4-i` zy}lA^Z(!+w%hk(#^;?Fhvtf8>SRJpce`toJ5NcAo%q6F7%9#?_G&G`n zW58@}cQadSB-wv*_GjNxVA&3B$Nbh%O7_~o5S03^v9^i$F(G$8M5tMFd2AOc@l>{w=-+VA)6=ibbC&(w zIJ(y|*65qAy}EtzEe4T)@9@VrV{zGjxG)@V&dg?^5K9Reeu@8_pH<2;9%X3uxOba}O$!%NR65%?p>kgF zSdsVVO^;lFma%cp{tkQ6f4gaZur<`+w9NZS9VmY1rIPLusf6cn%MWcbFMa++CXy@|yWVQ%4%?evL&ZhG3kNzH`E=ZNn$PI# zpU;hrjY&yKCusyYBy4YsE^3o;nXEJ<-w4Q@zEiN+e^fZz+}Oy8x{!aEexc0JLaHaM zOG>`;Uuhy15~4|*+m@=C4>nKz?EW;<3cV2vT|l&^qrO)>l4(Eq#+O z|LsAbSOg4OiEodB^o^gqlwI8|=JE&e*^j8DM*tj-+#WlDDYa7^M+X2K6H80&d@F90 z54epK^=hR$_a`wD0)$ydySsGLeUcg!YdbpyM(#d0I-yQ*=A^U)z+6@fa4!L~t+~0` z!Af@JYa}WyT>{T`n9!?+ZDLN94C?&29FvZjJ1X0pPrz<_L*ap5oaW2=@iA+GaP*fh zNi;g;9LX>eCBNl=;DTV!gkbw0Qb>w#$Na%0&!h`N!u_ZfU?EfKd9{Sw_!j#_NR&m= z*xQy8x4Ww=?UhplV84Kan8#BvV2k=#2AXOh=dHBar$D}hf=w|KZ*1isN^jtpf;cRU zjQqK0vUxz5XgxrxA)$o04oF5nE`YEpD3HE9$<{dbF}2DSpa-R|j3>#%SPkVoK~s<+ z;_FlMm2^gr^5R`x&>*)YVqlCNRiBLqrpH+NlYVxh9^i z3}O0&rOxg>P#>M+jRA=8 zcN8cZ;i-L2Z=a0O3l}2uQu~7;@b;XD`*^8^6bFZZj10{df@F`wk)N9zCv$zKMNM2f z*(E8H$?q+PgTqKe-jIhKgg6XKAtuh$bxkWyjR^^lvjt2O(CVtPSZLQpP972#T>oY` z#^Oh0m4JcsO(DU3LFf#S1?RnqF|<5vt%Yvbr{gLw!7n$L+BmKIm|~!1rHc5GOR#-3 zhbtt^O#at@kl$QjtNV?)e`bK`M;=WBl2Ko?LWrOElgt5-$rCh8>p=%(;yRgHxVL*{Q{74w_2KITeIAa>sk^=TKq4iBM zQpPP{^H8Kb)-~R4dT(|6a8J{(+YXgl<@W2^?N;bld!x2o42+^vPXr+t?(y-9Y}DIq z7d!#!ZK8;4L_r9@08i$FySv5wb2qn3V0e;)69i!n8;|(=ZXx>OS#BB6Uh0|62gb#6 zN=_agJb%kG%(6EzF`#gh!c=ffx-6$B#3sci`cjPoIhe`RS)7Bcu%+PzSaI?J5#{Hw zmk=oQ2_omV2wnoiA}~;>vz|rUja}0HwXdFzKQf7dfVinC=-QghO{e|6g{P|*bO2Lj z+Su}Jl+Kr%NHK(ub_4!w>8J>6fkSZ3eg67we`=Elcqk~K;;6x!^sv?*U>xcvhvUv4 zl28qYLZA%tTlGS}$}+3Gp#SH&V+trmI4Hd{=vZ4n+Hh~jcZvHLHZZhhq#XQDMyGu)s+Jt z_#8GQg(e2s7iz55x!mvy-o=>@OxoM6Al+Qt!utAp&`yj!@4B*sKmi6ys*p!@bzg(Lm5xCWV_D(;-{Fp^>&qD^RT_CBZl?Q+1S zWxepX;p5UB%rGbY29BJBm)aZ$HFTbJu%+2~0)?5Vhj! zo~^vR6GI$kyXVlfz`0-$=_q<3da?@*3_(B( zZd|+#FgH1j3jQcA>47h2-l)XaWOaBgrm5^j7Hg z_n&#WSPtz3CrU(4$!t!mGj8wJuog@?&~jm{N#YT$y}Q@1k6nv}<|gc{IVBD}ot>FU zF5l{e_m8H+W~4DQC_qX%k7Ce;{B0GrGeJs#$OAht5&?qsQm{V&V8#4M_Zyf>0E%5v zP4DJ5n0&BnuLGWz(HL;T0744DwzdQABy-L#E~MM;F8CvacrOWVh0E z4Xnx?iDP==gTSQ2n04rF+1uP|6`^*}g(~SZl1Q;_5KOuNEs2CPVW~JZqf9$)j(v1x zba9-bx6m8v2zo$K3+I=Vl9K)GeD%=SwA?6{dL}tEZ7pW#n|j4z+D&sL0)!GREiEbJ zIAJT4Z{==XCpbbd-b%hh;~zT&T#SD+fByBw&rhOCDby;OO@G!4!B!WI28JGpy9?Ga z%IIJY&s76oih&Ri8j6r?gvVAD)(S>KtXf~Q)Gk1hcPta;Rj@W%K1n|cV$~P_xl#@R zY5jcQK6mx2@i^#-xII7qIWNj)bp!^umvhxTYamHyPa2jJeUV8gC!U_jsB}%PE-eQhZUIEo9C!4 zFnuQTq>MxD_9pU`_n+nsqw&*;?Bnl-zi>ylxeMM+pYVV9L!T&%3pbVwY^kzG>NP7f z&h)jorCN%;ZvZ(K@Um95l?+CQB)vEVB5zl zV(-r+$CerRJe`jjN+9H6^)a*c9R?X$#qS>Z5p`NvmW0Ot97g>0N-kiJCC%h|BU>;a zh=xnHQBc8G6)Wn|yp!>I3%pb1DJ7qrGJuq3;p9oG1Rf=Jal)Rb#`Jy8fJ*d!jdM|T zy*MqAc7+~@4r17nPensR6DU)1Eak{v%z{8%09y^HXkuqgfrEmVY6g!>UzG0|3998_ ziMqjHAOt05Wa#G`u88$$qDZv=&Iq%3O_4Che?&UYu?Y*F{g8R$C--|`(;VTcy8Db1 zMZ6#6Kg6=}Ic_0fiN~w4K!+(lyy1bUj)3>JYt7Cjf6IlkeB|Zk_4^75Z&acCySu!Y z9g7T)ek{Cvm3PMJzRF1k{q$S})9bm>{fhsrQ2r`dAn?zmB@1A_kcHO(-4O61`8>{F?;O(b3C3=T4~5rBKnUeEH5nEu4)4s$|t)0064-QVrP~QloTX zn%&2M*8i`3IRYwhWoAEe%}h)J*vGZbrCu|4ZpEIEU|{&+ntWA2ogiQV(y+Y3{lG>CLmGe6P^=7=n zd@Grk(N-{d1SU`TFSYy-Nr2)fK?%PYxAO}d$Z7?85^=39V3279KFIH)BFMJmm;@k% zGi*x9i-e#QSLFmRbj|3E(>YB{P1&}I?yPAu!Rm~1;XNrE?_6R)d<{x{GLszM0T_t0lv3< z`QDL3{ zbzcNRLGcYdy#8=%iWP`={d>Lt*SCF^uoKjcTU^pNzX0r6RpET224RSNEf)$WpGrk( z`>7cr5Cp8qgQfz*B!q-I*UkK+oJpA9PP6?z0iwxsd5Lmy8e?NE9CU-wOGEJ>LF*FxdvI8YJ}e^lm;F zXXQ6FH?uoYsX@F@YLMqE=ap|JYCfX=Y5e+?O+=&>q=AAt^DiKB!{BlWeepU&2+)@S zRsi*pg}rHcW=11Kho%CGAat0J(#5^WF_OtLCpc}=^dkO09w7?qC%$K3hTXtQg9~!I zUBjsqYXh|(z0yzbtG-Tt^@fmWd~EE1pl>iRFxe1kNlA)wCQEeWD?VT`Iyc7$WVMk> zo}QlRMIz$9IKJwxpHgU|8K0%6eJY+2hxz~SEug+=Gym#+FI&A0_WSu@sGvR@C;EBJ zS6F^{#oCd9hXkagq@d5D#43fNlvdF^Xr|!7rk=;dK}5vz&6c;alP$q{&)IdGt;R4e zN(@*gwb_zSv(mqA-Jy(M-Q@gnaVgkS7A{FcN? z=Ixi~i(Rxae*Rc~%AruwU{UXFc>cbL0C~F_C3o98%hPp?fjb1Tr62##cb>u=GwU$< zTO_Y1pjAdQJgpv*nTGgNiPk`Y5tHoBK+8zUuZ=&#vEMlr-%3Gf+IL11Ees80-qYna zXQzV6!d@@`h21P*W+4@m(&;?y=b{S;C+r-yO(-2iRNVn{ybjtDC2R=BHZ+Ju)NhU6 zsVz{|$tI?xf2(}NKCb6;DSx7OiH=aNS(*71scCP%baENs-6DoD0s&)NdWpyr=fhKl4?-A7GN_I z(!LkichQ?c5RZ_T!)k7KgeaEVa4c~OfWO2Bfu_sJ-Ehnw8U=lUY5({yhvw)`=4aFc_aQ`?6z~6_#$dV2P>^C}CrZ_DZN@ykZ2lzj;N?Pl^~cwCzS^nxB8gSP zda8{5VbLBO?oRXwuBrdrV5YDk>beTkqRsZdo`_ohBr+6@2r-ek*u=~VuD0*I&doRD zR=4UqlY7wY7wR3l&oXZ>4h+A(ZEt;8;wbWcWcIZO0x|hq;xGR`T;9P$y9Ww;?hI|p zpi790iyOAQRH@NQZ*X}vki^TwGg9;GXtnNgZajC=Y^bKDW~fF^8}sRDrO9fxOD@Sb zgF*GQU#WKhbdbW=-f?lU`bfAX8bE>c3D;?7{cR;!xOhaMhTa|(c~BnFOfoStV*j;> z2vk*5gg+TY7#Q}ts&Zdv-aLnYuC1-jZL?JG6@(;hsYf$^5(GhR%#%ir9XQ9;iYtla zXsKh@>{HZ#GvecO;#7ZLDFTm9I0L*V|5S+OO55suZ`+i~?fLx8(0ra6tIrD>lpb~V z%N>(~8TsY5HWZiUKEzr;X9>M5)IqiO5+Wc{wG+p|8 zXOP$Kj+a@tc4k6ddMJlF_o42j@MLUEWH280l8*4S=ZRiHlNIR)XR4eXY9^2B$$f~W z4!ldeyQ?$p4O-uP_u}^y6jW9H27lSCQK_|V@O`Kz<=ZDb=8`e2Cw20cD75b5!$Cjx zv0MU^_55YP?ZqbZS)uKsm1U;zw}aUV7M=RB-doxA94Qh~k^qU17~2{(T2()g@o;1Z zYX^a)P2PeCjXGh!0N;qw z*N?ATR(P;2P*R*D4J4h)5PkIq^ zrOXzmTaD70OrkFNru4M+-^0(%{tU`+XjN0gR&C9`1oD zo&&3@G`TvE2H%0l_`T8(ZF2V)neFXjjjo4FwI(tU<>+|ra9&6zuR9f#wcAwadrw6$L{ zkL7vlIJ!D|43sv2y%4FSRJ>qql9jU6+~{U>;<4HSO4Zq()UW(Le~KzA?OPx2Ygvs$ z6M;y(8(SyJp!sZP*gEuRcFc^K_3E{^_vO*@;&**P+YtlQY0H)8*T<<2CgdU?mYU6@ zM)VsN=6#b30VBqK6C^{oq&i>xNjb%)v-)j~U>}t!1VRxbcrMYAP;WP*6lH(J-uyXk zwcNB%Z*=arrfaNKWmB(BiH?51Iyg8z%#5v++}75X)h4#UNMLQ3=~F8E7w3t-e#Jdr=C98o|Mt&fn$i1{h9z^Y(GRKF!{ z^}+krY&3biUV9QXwP*17&Oy`fXCW&*xv=IED|9Cg~i0+{pWS}dtb^|#Z-SvN9}Z!bCRxV?v9r{Pmlf{Hsp zx2_{dJyHi!yU(RAW5WNeh~6EALHR3%Pv!RYjl4wAozH+8^L?hUhl>25b1J2m?NTzk z;uqe`*0n-tmg5}TyA0uU5Q*yNbH4+E;elTubNb`%gX|!OcE?byQ3sv1(>61seMHQo zY#jEcV>hFfl_PO?O_+!HEOq~LE1$mCN?-%%-x_-x6Q|~2QQhomJk5R_2r0DcN5hRE zI~u}GO4Jc{fBh;!Ek}Z7a_Pf0p@*RdKYCQTuWhk@A=Mje zagvt836DyAK`F0jwf$-Pna=|%5pRiVA{FdUp@J*1hgdFz42Emw+K^)v8zSXIBQV9Mmf-bW)xglU)|i$MdDD#o#w4HLprf zrL>wX#xr~`LzyI&gHfd;NYty<7*NX zu?8*a0ZYs2a`Vv$$=D4ScN?3s9Zxooe4>fj3WJo3P8^{*Bx1?W!|APA$%hHQmj&J4 zk6sUr8WLpPbNZy|D_yXW;o~>j4KJ+n+O$>J3cAXc244qeicBQC($(_+s)pe#mb2-9q#-8CaLQvUwO!8TdT zL#<-TL$QK+@Z7yFzC0T8(?giQRi`zcb-x6WP_tqu6`(A~3{uHhTjST73zaV)EZ0SYm{N(1>T*R8&e z>)!${zdq7k=nJ}e@w<=ph&-s_M>+sP*&m{A%+%bopoj|vSCHOOX+8T;>)m+43RUaI zqP@}(L*+VErY=A5CMlW2_DfQdW~~)NtJ9S>(Nv{jDUa=Yr>v2TYweV_WEF9Zx@f;hDhrzi*x7HTE zcjYUqU26|JRdtuAs)bzc`ZpSJ`=>lw+}F1Evi-6|hfY@&Zf+(Ys zrS>XMdRV7E#;?Q9##rb&gu#&$b%YvI%zMb;TW)00@f`1P5+_Jn44nl>l7$c3=u#xnBZAAYgocuW_G;2e+Kb?^Sc z|Mddsjbv7CdlH-Y+FxTIu-h!RP#ZqPyCaa)``_PCvfFfpZoK$pA_*P5g?_l`Hu&9E z91`@*Ab+)W_j&47z588)usb#!Zne(mec*yGuON5h0$i9m&At$%hGy3nYcOAr%ZhIo z=U|Uox@@@Y{`wh1O?!VL(TGOQr9G?^8>hA;6i!y$WG>{ob&_XK*RhKq0Eg$wL_%=o zHQBRju8@L7N;sdd%RB61TG&j?duL^mYQB}sK9J7A%}L7Z<8{fR&!*scGL}66H^2|a zAW5rEcArgGh1oOf)679RfJJD}?OR#+VO4B*(L0Z=Lr_@dXhDY7xq|>h`5rBZgX}*ehAHV;&n`w(m z#B=95L9_Mtrq8up7ZC8PWkwBR{e{+DjtxF(cLo!xQEm&Le*yCQE~2*=_iEPdsTc{Y z5-QA)fHc{)s8wF|3Key+pm=49uV}~On5>AnW%9G5d0xx34X($uVQvI8D~0BjBE0s$ z#){+akSi96IDercA*=P-(<Z|4tmtt(SP3-ktVnK&9rMvZ~*pkB1f=a1(_v6$d>PRsn%z99`jvZUF z6XWflU&3aQF4jen@+zI7)SAvj?9Fxk9AEXlS08xo290U%-TJfA+aY3u|HDt^#z<0j z=9-Iq3(I=gz=zP^%H-lS(gw%NzmNZ2d1=P-1(YBCdMFFQhj(UpxMcUS4$LvR%ooGD*b z%y>!mL(us!ko6Xj&1%QlF*QHdCA*7MSse+SVj&<4ml?F&%^J?2-Z};j>`x>i5>bbs zhamAgjilj~>mbs3-<;o?FV0+xj2P59c0VS*lrsBbM|LuHb>9^aPkb$iu7|xd^58I9 zvE1nP>u|Zw{zhj|1vG$rv{_09Z$>mnWNRlT3zeS3UZE4zpt<41wWcEIJ9h8a;xeg= zw+n{GO3c)4YF226RG}?C&IhMx30uyqVd`@ha&V;F#q#DKW`MQLBwvowH{1N7)-pcHaj1hcbd>l z|Ebd>_;W7>>#@dhmoy@NlXhr`%~EVlR0H1rf!~&CxZ3 z--XdGEF6bnuqhC%xgk|Kzz8yikQMJXf}E(9bbr)~M~WfE z^m~Li9rQ36PTDd%HGep>o9tluG0@kwz;K+notd|B^jZD66fJJ|-9f*ZppLIH<(tE8 z(m2jIEMjK4GM*V6@jR36E4#u7Z}cxD#MqDB&3XlRY+?47=pX47qfmH1Y!k0qR-PW6 zOUv40F`*2anx!z&PF?jI#7P7QbG21DipRYm7)u`}>J2ZWgn>uUT=jZ|SAoGDeCh7};k@fz=~+9l1b^Mz;22G*%s}QSm*C23 zShZp3Ytxf8V#s6HrJ{&o`2FD=-^#xZc>b*Al(BtWo9d4{-=D=EWxRTLi*ZwxD>F8Q zl`ro-?h6GsVK?7I=+I3t{?%@HKUXHQ>=vY%CjYj-b)9nG=n6AfZcJd>J-weQ9`rT% zaDJ9>9geroi!NMVR*pe(s(ndzR<#HWIt5&>#^%56rlpfu&z8?tITeCfPk98&um|9U zk}_#-Q&wJ8`cBpsKBk>HJ`{|&fBg9Iadfx&7U2yXCcX7+S%!dj@a`{t3Jg3vgDUao zP0jJf`cl($k>tH)b)g9XSuoy&mG8R8+x-_C%}1}lz4sbvrxdXZ6E*0nE8HiP!JZq< zeC4^m^kc$w5Y$)1n&b>L#Dm|n3i*CoKyg`5(R0g^bnSDOY~tgfSB07rJ|0O5{i)Du zIqa(1{938eO!SL6kAIMh#BjaDby>jOONvu(NpI2ZXQ+%M$pJa7VuR~BV-qt-?+~fG z^B*5bhDS|pxK~K!=cJ@GAVV%*YyI$-%gB?qzqJU`Wrj-@8`Cocfhy6P&t2M2AxKkxT8q{zuv zfg*3(TAAEvD#x%mgUyj6+2lE6-XFCnedF(vOGbmk(JiO0hdjZV5YFE9o(iYg(UElF z@HyKwe#hIfv84KkKFv8l1(X1ps0YF4CyN{inUa4r=FiA}e%#9Q2*(RW;=Mf{>myx! zY@askN*=c){S!F9s!^%u@vy`%*x2;>3)kMYhyCexha%mGYuZ*liP$zKOs7M-7^lx- z%{&;iSw%s>sEG(M;R_C2PbB#z_s7Z3&JF;m1Z3x4aiEcK)jLcU(0_y{v#lu7@gd~a zgJO5f2#zbCr(?$XKx6w6>SUuNycCYLeh&4PyitFlW;w^K5c|qa0EF<+kK$lbQIcfl0WN{jSd4{RE2aaGAuOIp@X)-7uyK7W3;J$Tz^DrHi$5FG#3^}VsfGr3bS3C0*=^COo_K*}13 zv`G1}o^+nf+%UgriNR*y-^bcowwm>Tu38wz%8`>GgG#=mmerQiEx(4LiN5rH^HkO< zwF&R;=UZ%Q)O>{@9=|KJy8;Vp$qIwoiQ#e#I|Fuo7d15;7G#MQ65*o|@(-7LsrzNS zz#&pJpISO9k)<_R(|OarbzMA9m-6gSaU8*`vJUa&7*Mbi5Il#1n!WZWQtRy2?Vd%U z0>xt@%UX_WgqWb$Pl}cKH4P+x{)u^(i!;_I7!u@)K8N4dAK)hnmF*!|)-KhsmF!t} zC3WgJymO0+i&GW%f*t_;uQHu)6%va)$?-6}Y%T}05-&q(I2ieQaS^F~8!z|s-;NbK zJ>@7F`N+H56!Y;~Dv_FG%k05Gl`Wv*SfV(8=0X9k5#x9N&jtk_p7eM>{^O z#aT^vD6IO9$GpnOz%Z!PYtSo+5FA@CKwEd@b22u`NvhNfsIds(!>c`*>b9ZjYfo{y z`-w>+>BV3*T|Aks+M6R4PD##NW%y_Bvu-73u5ee*a0d6_aq;m0x3xo^U)=|`&LS(J z)VEXbdzI4PY|RW4cgw~1rP5UvF$MTGufP=Z>*BA3Ld$Ikl~q_m6oj^y_GxXFVH48;ng=$iwdBQ`zL zWL<6J2fKt!D}(gKOHmoDc6x8us1ANCVvm|!L2u&C->w5a{mQGbe&XlnC4l3pTo$dJ z2YHhx6*$r~@8t8a`uH&*fV~4#o`Ly`i0^QTThzyT8p<)t$A^2Z$)57B#2IBbm%omH z%Wx+9bInAJ#4Fr(PJTGZ022_=uic~A0Yj@!c}f&KWG^zULLYsv?US?%;I2tL`}nDl z`|eQe*`3*tX0np{ncq)z*jMYzF@5u2eeOGcIAx1(u%q*b`gADrGX{}_Db`B~2(wfA z`h>*V(>aG2hq`XCOB^TFrI|}HiiUfjqb%F$4&v4vL8+Hv} zSGkQuiHu|jcMcvHY}RpMgtKQE8rlkNLWVO1;RDo9YXV|S+r`=mn8V*x)x-Fi3o)!~ zJNJIKRZf&oWUTT&(2$IWki5qL#=wD{@rzWv8`>175oGMp#$0NB1L~z+J>AdNx)8A? zk-5nY*OAn6O>(OEPdiM)Pfl~}ON^t0UIw4?um!}t}S9POvdB_N=4c2`+Qr<8n#%T&Ax z!wjd213ZA&NR+gDU4QQcA@7E7@D=w&5QQBRp~(SWR)yXV4*QuXYmIU&&su)md^1t9 z?nb0*QbwZS=7HaIp~QS;WiJw>pu?rsDl?B&0WXq36bTcFl`ao#9^_Q|!11j)t3xIY znApyW;_C9)ix=@dY_P3!M>HALBO9`LqH%F7sW7>ZH6g}Iz@c@K@>k%luVY><7%M3h zWzwJTpIehWg%CL0clC$4+rFdnj~WPp?Yn1-?$K?BlVX!2O>sDNcwg&tq@(9@^n1G# zAL$0#EUs%B&a?JMj@6@S*Y=y`=gPnPgAzCDE*jj-!6I${M;Aok_>V4RpxHDzZyN>-Mc;9U9V%e zDpLb1Fk7fsX}DLGjUW|c^0lzorNejz-vhBj(3Aq%1GnJ`yUaeA>o{ltf`a&c*J~_s zH;3XRB8_Zgxjd8^%>+m1g!=dEBx+K3@okg_q{At^=m8(`htk7`>)K?xBzwJhEvt*r z*t@UONywWY5j8A2J8#iX9Fc$Kd0le+g52J?%k%GJQMB5Rwsy)y_IP(U)LIMF+J?hS z<(QVnzK|C;)zFk1=>7Z*)N_e(jwiMqm|dg@6*VemJ=j`^@Jc!@Q9<#7UDG=qwbm0y z&y*l8yy#~tkWDGfhmAdbO7cowG!lNQVdMg@&fM!7|NE55!eBJA2+r>swnK{RQJk(j z5qo@b`JdRl_bU%ngSR%@)(;}Sk;07TH$Rc|`w;tD1^apn#3PrqGVl@|O

      ^$yPC) zqsYqE`Ah^{^)M@`8l3m%@>SuNgx7muIy3E;>%*u*>k!;`SUr>>q6h(Tc+9$tTP9BZ z6PcrWO`%dW(LU$k6%KSF*ZQmxoRs* zKEuQa%H*O)Mzmyj@d8#-tbrJo=6RlAB3w*#BF3JwoS2^Y3$+1JBAj?-o`>U>Z-OY7 z&2FoUruffx8-z#F`JK#u{cxfiNh9DvX*5&vLdIk4L%9NxZz?3!P z){Sl3wrwZ#ylZBD%)7pM>R+$!r>m=Kb)PzWpS?jxgX!Sf2%Pcc2-r=y+ChwsjS>0c z9`_^(kimM?N0i!^{H@ZKIP5WTd+qdjoZ;M#?$L`5PZV+kW<(+2!BTS`wm7T8yOoas zZqGJPS*6uEjr$8nk6FWEM`V3+KL49@#+XT>?0PBnv$XX(xP1RcLwii%-2QcXq#XdC;g zumrnNkmlP7nlQa3<8!50HVk8$ys15jc@MWLT*1}0qez?0)@F*NbpAe~koWGI+beq%FUYaQc)U6cL&^Vp zK3v9kw@6nLMjOUb%gJM2yxDNUTv@}VZ|PaLCoJ~b8-%$3l&A3JRkr1+QUJT38Z8e( zp=`U}lsMp%^mIF=_T3~?rdC&IbU1{8o?V4PE?e&`F&g~0%sMuwfFo>b?oYQb_iUwZ zZ8s(LTCw+XWhoJ!)yp)m{ZcpvOqf9YJ!n`8Onhd$`&Iki!;HA{{za{uir(~RJPdIe ziQiDCcqG{$Xdd5Da@Ti|!jC`UJR+iG6dal^HcKhBdekG<2g5N#YKetdRqM);r#l^p zgC~=@GYKF(RJhLo>YHKiHY$bUUbAW_!e@v=R@zRr&v;xEqD=YpUTQs-qRJ>{lGX3i zG-j!<6rWBkL!FA<{-MaVfnK4%34`ze8ggjFN5HM)Vyh#|Pkw&Ts@>W?d085>MHY`I z%8tR!lP@8=mekYrn)B3nvYdw(nM-XF@{oytsHyX3(HPv|O{LNZ3doy3WWGDQ^<3b=EQhgi8Od-*tdz}f9#_j)#2Y~G;juX; zQ|S`4%ILI}Tx7m1*YSCqXR_{y>Gf{!_QCjtblWx)PRI6ai^tWG3W+k!g(XO6OKmeb z3t7Xy%Nf6o0<eh< z7`uufAz#*wdtW+FIq*Q4Ong4348WL=Xf$^D7D6aghUhansM!sW#1@`c4tmz(Y+HOP z3MHe->SdaKH`CccKMpfSuFxbKBpP7`E>x*khD77i`Z!LnR!b_jm&w3^{h|16*XsR? zXkWUca{-xnB_UR@M!PbFW*TbjHgJZ>M{Io4`9np7N4?;Cqv?+oQu1^ zI5!|!r?b>!lAQsM@cAn#up2@sblSX9dFvY>+a;zeV$y1PXEMjpG-T-X`U#7{AScWQ zqmJchvzABq+qz8rff9X;ASizA0{0N=FK4lY6xp2Xwnq>W$)sV}$!z+OH#R?qnmH_2po-!%%b+6|MFyS7H3ai{USprr+(KE-zat(&RBGO0zT94L|toYs21W+W0{IDG7 z^mF<_NDP*=wKCZE_in($34>e?hrH&u?|C_;aij$(7I0b&bYq?cGgklkKxoWqxWV4t zfe7FYT16j+Bl3J)t>f%5rWAhkcO@|4^`Sl&NX3dp$&Pz(1NC@cPR*At(1*6?vYoo7 zl7UCnS&L6fxSNQ=>>si?#--d;^DgmpvU5uSi}U-RKGEGn04mLmKbT@7TCO7F86g_) z35!8wXfDX)Nn+h*<*}6vywgTbvEr~>a}0#Pt@+_=wl;6uVo>h6UzT(`8U|1)a+t75 z=)VHPu=twnnKjO`P`o=WJiA4ou;t3NYn@q0`r5ey9jTq>!bK(Hiq7EZgMt49T-9)p z#%!wth)51DZ{v-@P=VbXWccH{fLJeOgJF!(=V}!R@$eP0c-TE*WKpB7&?*nj0D&kX z9~s49@M&T&9t)j$F8sxE{&&JlF$louO5qTJRG`e?pN`kpg#!bvVD95ypwEUz6jk9#6T)W=?LA3!sI!U<#nBh-u!mli^Oka?rUH(*Ax zpru^Z^fe@sUrYz!p*aoEtpb#f#Qc21O4#%(8ZIM_x@kq9jAl0~e|dGPT9L7Ii-^M_ zVzG(Yhl?&j_j%&VFGKhBg{!4TLdBNp<@41RIUY>G?RU3{TXgoW$32YeQbN|u^ z17VzIFs5#vX>bqmPOC(`d=a>?&_!?FppefD@oh>(=p`XUt~-pN5I$eDvK@8oKH7D= z>le47fBWS%v>pWC#4;bjy2gcO`t@uOn==x-k<8?x@`S^|pue7wwY`^GOyR(&_fp(> zhvsi6_peQL^s&^g$RntLpdJq#rBK350t4TrYfbtI)8IxzO)U5TztxXi2S)n&tZr9S z_LY;xn%u8Y#k47^HC4OLRi8icbmHZ8#TNZqgajI|p(KYq_%I?uu5%G3_B#@DVCPo3 zki~o|i(m3td?Mv6WzJ9cX)0%H7ikcKnhfX~^qk!i%Hg&b*pK;A1g>CcC2Qu^^JQW5w-Xs$(^yK1M1`YGZ3L+?}gE@SCw0TnRJR_V44J;H*F)O z89K~%&Hsqasf?1mD;MTWmNf7t?6dEbk+)UZ%e0FtQ2O)MP&Mu$Qe&vAVQ=8 z!KWz3>J16j7C$s8tvD>IO`(-EX@fb67XhhD5DG>DGPu2X`MT$N6$ML-jHXqNor@H^ zCZ&eHi$qIsKCY|n33Gh3iBLk@SJh#&{{|>_rSTb)JTSOLz8Caam#r23AD2iX}}^y48xPqFsz@-mHVcb66OJMIG)sg6^SoKxLiRL z2O_}mPLDO8B#RW)#BK1KfDAUuu30S+g{aomV%XxvZeY|2^W@{(Sb^nTbNuWz`2PM5Khtx z+MR>`Izm42pY9ZlYM6I!J>fLu$wQzu_%4F zv134=Ur)qe7#$NReQ~&2H?>+TAA%>?;rdcL1Xd&E_us*?-|JQBS4zZ)h^eVd<<6L% z;YsJKZLB>zmrW&nZ9b0MH-Bcz*sadh$U^{GM5~29(47$^w=s`V;}Dr}%w86*FqdLQ ze^qxA{V#(ZfwEgx>)1>-XzFAG&EcqY<@jY)u$1O-L_hvihuh1VrDxq46((I?h?!;E zjS4hyGzOK!H_Qh)EX)d(PLS2*Iak{TK1bhP>n9yDyYSY0BNvla)uD|RtEF}`*eA&< zm1K0NUISz{>tFU%MA-@hQs|?g>6PI zfVdBF{J}<=+(YeNFaId;Fv-c@=mqOW#Ht;Cq?W5?17J_6O5n-0WazI?5&VYW%8^nI z_c0(5y^r|kXf~C%*5wP7)qQxaAs)}im*9;9M50Fu{(9150bDL zomxbK)cL<8MvSz7c+{#3w@9kkuP9{rY)=0|Um}wwYbp)fO8*gnhgI`JDDi%Dc0$&@ z`LH7_5XErX@wb1V;D;?)QzCWtnO7@sq*yi1?mb5 zv^Bdr<6J_`mOemEnD3*C93p_6KQi<*6Y6tiMRfl(>hcyuIBB$|XUKgJ@hOQ8 z|7~R}Hb)#^?o7Gw>~-p21o`!zKJJ?RgWz}=y|DpO&pp@?I;y0F&8Xjgcwa0?srV%X zeFkC^EH&6}S&z?kTUwQd2n5KW18)Hg1sJjf{m=HvRXRheVCr}-GuD=18!Rz^4n#!I z1<%}!JzsB;HywyrMPSQ~qaZ5@{p*@Nv5=`ufj$KrGM7}dj9veMJ2|)MZ$&XJ0+LumWzxbw3Tg!ATz$A;6TMXISXXa7awA>0=rHS7X=Q% zBP2VL^h&u#;Mk#m*AM z{u<7zwL!jUtLT6skvR5B8JdN=2;I2u<0q^R`t2}^A^BE(_tJxqq6uk{orPD_cmNAz zus?Kkc}qkQJQ%#xfWu&{|2uOQ31|O3zewxoqZ%UlHx`G7j*CqBCjb;b>&TOyKx+Tg zc)*oy5ZmJx*&@3tefC5oNkWWUg`+*%AQGLi=3v8!tgT(NzBG3_CXl`o*c0^O=lj zUX)r>=^{i_W27g=<(LMX=RvvS=i+_=|Hm{XPt$(+C@g-QS`b5SC z$i)4NO!d+BDqvkHsd^WUwg6Cn0MV3tD%yUk(P-IbIWi8m4Hy0EK$MFh$n{_cNKit( zcLnHlcFjSRef!Em3E66+o^<-j-{oR$4^6uGp?!^*NPuGK=41vNf?|no!egit_OBuR z`afjkC=#kajN+hlTEe60v@J1;QPge8$_nmqf7xKT&$hdkB@`Td90@iUknkXUoREWS zbvxYLwN{G-867YG0{p%*;*K=Q3|DU|lRSV;BZIwy6{78t+%KtBiVKK+!Z`>dG5vao z5C|VhddpQyk6*Gv%aW#0P>gFCMU=5P-QcFxeS$*HU0-LE+u3?7BSw9&YaxKveGtOEV8MSdJRJmd8qthpRw9fhKox^pII1L^17tVxSc z&g=5ZllVLzOoEo0eZ6BaZo$Vid(59I#9@zvo=0wa3L)dj98LauMGafNaN5rGYecqB zJ^wX3s)&_pg@<;xCuYpzjcqqvqER0eA05J~to>`|ejCKQSyq5S`$xIUWJu;zhh@*{ z_NRD5l1XA8DZYCAP#X1kCcAQBv*%}6LtL>3Jx$VjJ}*E4n0){Soi^Udp{MC)Hr0U6 z7dgQ9quNZ1UYy0;MMpjWU>$P?$etZ=hV+mOMPSgVpbc0Qi{?>)ZrAP2 zL}I%8bv+!s!2+nSpWnR!1*9PUP>$NImUAh#I)dLG@2Hf1nMgb_y=(pk?ThnQG;@hK z)D;qNbxv*ii1qUXe5RI1Fz5msb@jg}FMQM(LMwhLp*$OYR^~m2VdLrJcrx3|oU8If z>I#8%7}~6fv}YN3Z}AFd^FwBTrsn#Or%^lO>Cj0gI8vQfLTNmRkpnnYxQ@lED*3-U z&KJ6vVYlbZ8Z)hCBgeUz-%F2m$1-x925WJ`k;WA^NRfXLyz+ z@-I|vbVKT~u0q`t;PEfoIy+9__I{KAOLRk#aWOP}yyW}gY09|}^H1&IM1H@nJLVNZ z;M$KN$D)eo+zAY}Zp31#n01{Ew<}jqr9bj*=g&efgHw*0IsV_^1^Ytz_BSsYZMNdbVMO;GH{Tyv zsh~xdnDB_cyLFxm)|juzFEI*y%*H%4MO?H%O0quEQBlQET#gBYDoTEdt@hd~pAWH1 zSnQhOC++x94Rvucwlv%??Rn#42>U`GF5`#uwW%8ALZEj$z)H5uSer?X1nt3g)Z? zc;w`uKYI4BkjoAC0)xj>X*-;T#@+CF)&)o)iUH*kB>ke15x`{j+dh_$a_LOWvMF?K zPRuV0C33<`$bxm*+f6a!T?S|K+9}hoo^#-}1=2Xru2|Z+bh^VUz$1gwc+@2NJas?4 zUoVtHy@M;9v`cr|YXV4mtvuF_GQu;4Fg&&NHt3!zVd@bpvC7JIPe|-QfxDHB#Osal zcK2hWWo3v#NYW?=E=xBjv7;z$+{TnUvP+c16r@$fE zzn4YO2h6q#*%Y=_zXQ5bo-ew;03jcRv1X4NggY9zb>k0H~bQZJoUQ2`+NM>39$Iue>f zZz$lh=`m>;mgsqPW8IoK4^2Y?#O-t}ArW+jlG2}0Gpu5wxj*!=rxrU%w@L@;R#7zw zZQNHvS)m6s?`tN_&;xabB*qfbsHM|mhhIq)^FvL3(MzY@P%M|OGAG6wR2=~-AB;`= zM+TSEnnqk}bPk1rv|QZWR)V8T#VLL!0)w)6bl$m8b@NQ*VNVT_QKua((}cizBc=LN zKAp*U^H93#5WMd#bzR%sSzWobAuF=mIDUil)_>D z2rzBwD8`Ffh3xDIGU_P6*;=N}CkBQp&CmspZ6AWasMi_~;HXz<!)4v^nTVwelL{MESH@YBOP z?wMucaoJRLF$?x8dNN644Az^6uw9TsXq$r&HnyF5zP|Dgt<{kMmu2zSTP7_4^;yT% zvoN9aO5nE(ZVE3JlOrYMMd~3F^Y*Eo`FNF;V9e+b19fQ%)QT6C#uC)VqNYtWRmXc^ zD~c0rSRO&`^!(Wu$1wovr^sEWG`98Ei3`wk8$i+9RMB^Lc> zN2*FE!!Lc)VHh~r=bZUFGg^eG#c#)+NNgWJ)KMcV6*&r#P}gCG!mxQM>^BqgPLcga z!~Ri9-W~$*Z}FeIhiF`}W0H_t$FfxSfZ4q`-5FVQ$t6s#P)_3D{yUhuMGCRWji+A!^-M zqWJI8e8PK7q}z<~Tm{0Gx=;G|LsZjlgC1>R3B92Px#$S4Q!yb@<_4GF0li^Q^@#9b z3n8c*$V(cV7uB`ENT!C_@cGqFa77Df!0on^gVlAruV0^@JSKyr2@;Y~_{a>_+7M@t zU;Tq%K$6rqumY0rPv+Wje7}Mf1?dy(rJ_M zSoey-qudq8p&x9jl@mCYk=toUZS(f5_x(nJU;%x;UIT4Ad>u;+*P9XF;1|PFBLI0o)P%Ts#O)ORd1{qG2`}swa{a- z7eK(i^IhyNOgu)NfZ%g!q@+74yBd*7MK@aBa4Gz#tH<)EDl(5|o@C4m;f zS9Xl2ZPTTum}*2`TG0-$X4^BmjaNkLMgSvhenhCz)VcH$!aZe0i=N1`4Jyz5)l+KD$meBKK*3j``!;y+M7LFEeI_iutiBTWm$&L=t(-(S; z{GBD))I? zsZ&DuJUaO4BQmk`N9=aTWb>F5ZxqR?1 zhVPI>&B#pSq<7d>rcx(T%xS`+(OE9+mhc!xLM|m#T%Z0PgkBO~l1u7pcLxu(M1fg= zc)jA-eHd3OM=a5*y9Sdm!|rw1?U@V^a=YwykkZuV$uq*THg09i9s{tKXL5Ko0oCS5 za0ekuebqaETO*WmsjxI6pgZzY)tMN4=eT~o)H@B8(je~!m8-_X9n&faHS@LQj`{At zLhrXhBY;v@y6l#2j@Ww%gD)6>(+Z(H(PbSuzW#NiZ0ShNK)6{VS=FSpy)vv!U!;q7@>J&kS?)NZIuWvpb!${M7PrISq{_pZ}!HDApS66N@=PgTfy557A*`@YEK8@Wj(WyTGR`==L#YB9irB| z1Q?MIr3qP`igdUgKF&%fUT!vWY26tNI9qtY**H(8bNF3)fzV*lgr7+}+^>!`7AY)o z6(g3bMZz$FkOiH7*sK4h=5IBmn;Rr&p;f z-u_EFFFQ#rog^$v!K=fQhq7WP#`mDL@Cc`*8C4Am{hwa;OrExBKW|l&N%;bGdx@oO z^kklwCknwfhHo^V>kcWB)uPG3ev{k!WZ{pJ5u)u^&bb%o0oLU3(G_wQ(fT;sEj0(F_MlrD6Pt2|$ zx|)==3n%$rn&c2HdrekW53$JXubL?;RPTZM14VQq8N%4T7>CK(J-N3lI^o9pT1%C7 z(|KV!YdGS=C#)w@V!f$FWN@krNm4vr1sIFM1*jyo~OITpWP}sO^ zHboH*YD--&-k=}du?p#j@ql0PcI6o(aeI=SWI+MwO=e@_Xzbex4B;)3f-`B5G?$iexIO#e&R0mKamsu zc5N*QO}2C}#Z)$X=35B2;@JZAUtO>`5d=d1cH=ECQAot|#p1|P1lWRqHQBt59A!K+ z^vhT+`*`@IVMUar!V9r@CKI^;6y*wLW2sbf2H((b1bjx5xA|ASR{LdTa0FVo?SLf; zvk%WdJN-~s&*w|J38*s3+TmP{D7V1$it$(_v5V%*kzwBHaWy^6B>ZG(4qpMY?BQw5#kpYBlA3Dr`?qp`$La)i&5q+;`GPJU)sbec&b z=~wg4DigCwE#rYP!(DO-g>IH&Ln~2-*66FSb0L`q0MM0iQQ7E89_a%{nFDwK*HF?= zOpZ+C-vDQbS>MaYTVc|;bW>T?e*UI4y_fs^)(NyzPXOqd%~S?kxl%pI?>}X`%H9vl z)n`_(SvoD|SNG(yin)A4)I8_CQBIwM?aX(rT^f|(evqFD#Z?}UR~!F^smIS$OZniz z#hQ>(6%m>S>0$5eg2!v4Vex}l*qupAj5&Cgw!8^Y-ttoD_2+AUfM)vSBbeZgU7p7P z1>XEd#8FKM2x#Jeoc0>?N$U|_N&pLie?%|uYC)!|1l(B zwFm?j@J!_E^Y$ErD(A1-E>2eO_4+4*c0ZqcwIf{Ushs5iS<)yo78orLfI=Vu_~KY6 zx>(~(3VvtH$rG7^f`vP+j`^>HWlt%o;(-M~yHJ| zfVSWJ;+aK`@A2*4fUdv}nC|bD@&lzCSw{5Dr=@AT8#e~MA}qze2xOR0e?GM~9d#0W zI{k*UhGK(yg@d}>(_OdYR+B58nIp02b9RYnEae`BY#rC%rQU2?02>B#U#=jP2SXM*K`~(Xy4eom`1!_V(^&JRlEI6Iy-Jpu6QKoFZw=Q zvaX4a<$63!3d2ah@8auB`)-}?&8Er~q&~_elaZsz zI$A^&M1y%}j0CV6n`gaa33$iXac?+Vt3OR9G{XL!84a=T&r>^e8x(3(i$9$7JPqRx z9+wO>8hx0Kp}e~_Ww487!ql^)dB8y8Qs?8H?96cbg}h~-hBlO_hrSdYPNqi&1jij^ z5gKy&14C*!)(mO(8Tx;r_wZkuer3e%gJcdI15AyI?oJwxuhGOvs5en@oJ_^V?u?b! zXsG?xisJe{EFkUA4)IzkO#S9=GjQqOj3VgbsgZ;W@;vBYN5sRIIb$er(XSy2f`f#2 zW)R1BqLDbkI%DKCiB4lh492h!him@ntCad@r=P+am-^MB_)U zgdFA4{Q`|$mEw$rsbfK=q|Qf?#1PvgVD}j`XK2Yn9w3oGg63xNT>p2izhmZo<5VqH z{ipTc6fQeL%$n6N;og$q?CnQtRfEY)OkpwzZHt*K3I_Ch()EQ3eO=R}FdX#_wXTGI zKl9JQa8*i0vflwkb&a-1F+<@TtP=40~xKI zmURo7(qYTxbDZyQd(dJY>>x_OX*3;M_n##ySnh=+KzgHl0H#$|TL5f|#kI89Lbd_5 zEB3~G{r4p(;9G;hitnvbn0wQ-c2CT~yJNXxt$#YXN5i*0(~JORRj~wdqrOP1#3X|u zQa3d3nb84Tj9N?Q5U*6`lj#eUk}m*OMjW2bYPq$hb~G|r(1zqf39|e-!i|ee#v^c5 zez*nwC_se!@Ke-C0~d;<(R6B<9#&GCOdROxLLDib4)jFk#_gJ{5Pq~wrF+L82sEDN z6k2NLQEo+bm&`cHAy2?V=x6MJGH$3`BGc_kYXL}>Q5;o1Vs|a>&v9BS$+{8pC3RR* z+H~1depc~{NxYLkMIS+qin`3DgxX8T{S&jy5&|d@R5wp%_1bv~*WzPhG%m+z*m|Bc zYDp>duaW(7NW7k0L9S#P&Ebq>uC$yEkn*ABY_%m2CSf=N0X$beWN;^^(eVLV!NMx+3-(^CA?AA3uX#g z{br7xclz(VwQK;WZhu>c+zlb|Pu1J?Mx#JX8nTwCv*YFc*=%km=vH!Iw#QuVb_u|u z#Axt&fx8N>#_(@ef#WJGVA|bP3UoJ~IKoIjxWQ!jkGtIAZ2o3J)@q5bcWqjZ*{|dT zZrPOCy8~_qlX9X;a0BdLyrtFWHbiklK?c+@jxeAaAGEk0|GH&@ln^b{$NG{7rk z*9HUxpDZmVtQOydx-7FNTKGs?TSrcEWSnn83PQ1=2v-j%!V_@Rdo)4cTDs=< zcFdz19i%qNXYo3eGDa4+BpzJj^-zJ8b{eoH0jZT%Ermbbz-)ey6kgrv7KKzOFV5Sk zphjU#71b2}mLlVH!4-6{|Dw?>zqf36ncThA!wG+u2v;E&+<3*NTz-Gxuv*Ug8U?5y zn9pR1lRg~!-d*vyYHz?QEF4eGWQ(`mvu;DRP3bnj?tKt;uN$|e?qSS%7M92hq_I35 z1j8m$`a|h@fULy_IEdDY@bg;JC}1EL6}W^07Z(XW*jzu~xC|QyqSY%Ksra#N6{_Vg zdV5+==`;=IM(wI1S!AcPSlsCpR`A#XWeI~sPQhe@DU*&jibC$Vcub-oU^Llm%Qb(( zWTi_pp`sE0PD`DtP>t47!6ZaZKGw#EER72f z(?1YJRvC#3Ni2z$F~qVR%qyhT_s4$;G?In!>`f+XyBIGon=~7s^%rp^j-1e{h$(k! zRUnSM1K4IFgJqw*Y*2~sP)MK2#Lv09<-Rm*Pi!W6wGHK-3;fK4l5TPunoVL9O#Ipm zciB-9k9@}G`V$`rhOlEmtzGcNdsFt+S%7K*G@zR$(r~emIwu0}4)^$r>`zXl5z=V; z_luy{DAfKEN^#CjbPb`A_vy=}LtUyG?27{a`aydR6J`X6DL|$(xJUgMMfy;+T>k=o z6W?yLUELH6MjN;+asiH@Li6RiOb9?k%ccf~6+p2Spio0zY*wy`FGNW$1M0M64rPuj zPXYoy%pc?Gc9i^puahFO{1<9**A(qu0tD)LcDwLUtQuEH?*-ocGc5)(_T2~8!RDO;%4PGlDf${c4#q|OS?i)XcV*l3Va4F6-2r&87YrmCbbE>P9n%l49zU8`Z zX*K*y8y_#eB`<@6_K&`f;*8A`HknpKtfth2?zFA9ejoAGr#6+rK<&HHcG1N31AzVi zJT54fFBB={ewQ$Hz$rzcztf7Q!celPMYEJ#b&$?kzrc2qdqfTpkI3Ka4X&uU467NFZ)!vjGQL_vkdI zV21pDmsuOggBg$M3t)WQ=V)kAZ&)O+ppegivTvXzSAFsLLRYn)*W{f>Q1zo!ayFj=DY9GyWF9m+TkFq+NqTF5Yeo>9D$ z!^m-E&H^Jv&3idviyrxZg}E1__p9nIPja(+#r0HyrBW@8qnQy%W*>{wnJSOCw}7mb z@W%<_${N;KumKIQk;LCnm6F{LK$Msh{@wma8P9W2Ph&1@m?6f|7f`Ljk=y)5NeKit z+?3C~!xD4Kz@b%Mh#u@0ZE!xGT**$ODUnL-(*KIb?S$fhTIf_1N5%1;AfmE)VKk0n zSZWjGNMIv7XP=CvrU(Zn!Ga5=W8=8LNkC%~d%ml-!QnsBY|PVr^>wc8xA!-W;VhZu zz~nHJ0apTbvQJK`j;$GAixSI_iJsf@o=kFIE0g1LPnDUq`E$=Y8k3qVHhlZ>Vf%0~ z$6C^Dv7OdBTfBKrbOr`V0t~4YjYud1hF++c!|(({1hPrV$IRqjsqMEOTs9z`Tk-4- z&Ij2G#G9%4dlG;8vnP2b7l*$i(SjAP1W2My7uy>jLaTOW=-p#ElyILFjFKSdDykMu zU@sh?8Hmf8LYjnFsxb5i(?4l4lj?X^7-eM`AV`w9BtpdA&yNl=4NSNc8w82SMdV?A z=gKQZ+-LDtIf=j*Ibsh}=v4*bRvQV~CCx_QD*OE?)>F4)SD0Ebz&km?QxX7J|I!Gr z=pcZ(-lOfM4IEbovgEn+ykHj((*@ruPyvFC`3fG!_UA0~EbJu;;$yIT?3#PXrMMW_ z&@Noz7;46js+^t6r0*Bt2L}UGGoskS8*$R$su)^|5gyNIWJias=r-e~?em~L>NaUM z+@(SJ4+ghit;KiQ>Gc@T)aWNys2wk@Iu!$ZKtDWLmVruZzUaiMSiY1sJbD^hsv!4N zU8rHXD4N!y;dxx38!n;80{QGJB9ocFq|M%4t^X@HD2NDq-FeVPyRQZ+b12_G#Z0A9 z<#CxjtvjPc=lSW2#nufaiNP!Jnj_8b3OxG5)pSUoMY~oQbYbJ*uBiZ?-+U9S1; z8e9z_?QZ*H<3)j4{~M!0K&H7YsbiSX;Rvn&S8(ITw0;KUn7z$d`7F3lUa&j)jC9~}Rd)f_Xq|H%05lZ9s|N7_3l%DIMsw&a2$bePm?(Oa_#vPnpzf@YF5G$Fcm@kk zV)!Y6G~z%T)g;&HH7dPx#_EGW|2ZWD^hX;3YbovLvXT?gj?3tOZjQaedQ{pl!odAp zt`||}tv3JXG$K5vK&b^(ILQCGU7I0LIU4UGo;8K#lo{dX*@4zh7~ufs9Sh;poy!O; z{wLerHfIsg@&Bn2Wq`_`r_CC2mjwDMTG!HiDoRi@;`R%zq?BW80#zvQ@Mf3p1az*%^LJnla zML6hzfFCqSk|?KV|8w8WQ1<`2$^X5jlm`NquxbCp>rUlfVr0*ccXDP1JYY;wB0!au zYh;o4pvAXl|8sF`BR7&&jP&&-rm2p_e@)eHFEDACi_!E}Ml$Oe=#GAuQ%m#qzYT|7 zgae_PBI%ntjJ6sH@#7D_o$O_r1%d(K|6j8Jbf$>a|9X5Ak;QE5c_sG;D>=O5{wiH! zM;lKH1yAj=@6cT;6Qx5`4NJd__s})X95A03c&&`9gM}X}hJjXp_Ws`{bormju0L4O z56C7_mI_8g+(DOTkN(f7LuG+V2L|buc)DB%JMT$gVRNm(mF)EbY>eWFG?SCc)2?rZ kLRn`_|Lb8L7@c>2loUnD%$PoSfdD^gaRsp&5u?EW1z>% literal 118160 zcmc$`2UJwq^De4mz=S%3g3tsN1p|VJG#MNfP)P!kliCDnXpo%2jEW5^N)8H=lY}M* z5$Og=lA%dTlXK4D)#*mZ-~9gfz5CX>Yh9NkL&G_HpS`QT`s%CN=h<~dS^8ZpyEbjw zM1S?lWtB~vw%2Uhv~_gHAMlBno2JpG%~cv#FaLeRaq~#WeJ=&vAEe1ujd{L+V;#o? zE@18)@H%||I9`YTQU3MvD+Gt_2i`rMW4l>;GetGMXlyWatTM151{atmCK=Av;Zp5X zEfF7IDADhO`9&@ZlE?d$iPp3ee^%xSQ$6MIcl$? zyp1}v9}XwJHjk0&N#Vm%yxGLIpLSm!t(zb3l9iQhPBD;@kkGSrn)v=V?;UjTqPU=l zV2jakQxb(jvFyx0Df%_thh5?nqwlgop{rAWb?9mJ#6$3hnOVf9uae}$;ib~sr;};U zj6%6gc1w@?HowrjMt!JzWMRR^r6V40$|#K#{m{<&tVJ#^F6;P4Y}6Kuw#=l3lFCYX zr`5JB%jsDn9+h+WjAc!jgiXiC+B^B~tIlI1p<)itOGAXMj+$sB>*jc@uSLrG^kpuJ zii@wz4mTg|>vEkR>*(FK_zY&zEbdg*BDs~ia*n>a23Sh8Lh$qT?oao(4B9p)>zx*b z2lq=6vh3uEsAFgwoh$WXvgs*%#aUp6F&6X_{J8AlKjX(ed?_VHKG> zg>82%9+sO&5cY3nX0Se9E#chhv-JnZyNbeZ`nNuih|o9Y>gy($HP0pEFI~D6*tqbc z#B19Sx*74d=vW-~Dk>(a5w++B4>2-<7b;zH&3-TJGW2RSver>$c_O9k8lZ zMZ0+|2-?2BzBWt=$V6lDvkk;^C3Y@rD^9h?hfnRbZ7+1olWjOH2`na> zw{Kx_y5H;hR6~wUf8P_nmv@lwsSDfD0J|;}k6(q`ZgNqXUk`48X(_~`SXp_v|H^C| z-A3SdvQi-RRRt@BiVA|2hAw8`JX6Nnm1Sw5Y}NbqZO)P22@1)_VEBrreAUWSuN-21 zNt-NHid^O`YK7wJnRSx1&fc<-UWqf$W-Nn|!ANQnXN$>xp;_1|woO z@x3@g&iy!@r>AE^NyV7fnD^@1>heF9vvL4@JK!%NNrOza@;J|Z9MP?KZ*N8q$ElvH z-h1fj-zur;pelr-NqM@twkKoAqjPhkZ8r9XhQ}E8)iYOU;qfw&AuZVhRVfBVVVrP% z@;{bT^Bzt1&5dX^?{lcYX)^Nk`n_eg`8!u%Qb}IEXSyTL!7F~sT-y0|Ex&+(Z&JcE zK|G9uOfb`!=RMyTY%-Z`y;!E(L@6I8{jq^K5^?@fi8~fYTGPq66Exf!xtfIjgX#m0 zzDwLH|AwFceMF+6-@umtH>kTuj@=k67QX?F)Q@H!Yk`gYhMfNeruTX;|2H=NGdV!s zfm@7J`vnuGeO)3TfP0U82%H8!fq(w>^%ppEZXhz}COON0-z1%O6HSWLNC%zefz}|; z$$jZg^E#C$I$hsd96PUnjBi+a$0KvbXD_&4zpy&$^NREW|G4;1o{%!@10wx<@RSyu zdi-jQo8UN3kXYYvGw-B?)DheG+}ymSzU6@sQRbUq>2m(ON%8S@EurF0D%`z*U83eV z9HC@+etcO_DNH=oS3yBR)ZEN0y`(BwX!vX^Af;{Rp1pgQN-Y+rdd2KV1_lN`4g=Vy z_~zu~*ryJTjb(b7VX=R0iZ}|-jx0mK5dhh#X}*fDU!Sikcz6_ro8P(f(f8VO(Zr-A zLP@ZY<&xk&cJYtCbob3pUB!%=WcGG_9Fc)x@TlMtx!rhll-Q3)6U+Z$4@8Ax1wH~aC3Qxqhwzj6| z7c3nnSV16gZ=LV*D7yROvqPhd+ffmfGBOgiLG^m^6)qemD2I!PiemjwauIvj2$O(2q8X)R`{>+U za+5;adv{7pOG|gDm!SG9)KSy@?MKWw`-M9*)8 zTK3G?BEq8ll%TUMan2CSn1Z^Z!-ZiQ1&njF~;$nIpel4 z@g6^0dAYar8(f&ZpwI7I8L>Sm%5h~O^{N>q&8Q;LdMW>k%k{{~ttU-t!W4i{AdF{| zqv58KPS=v7L!;KmfG*iXim$C}IC8}CBievj=xlbMlm zXMZkw=+EaOA|fXV%3^;nhseJD0!BVRJJ)J)c4}pD`VINS(teK@k@{-55NH2G@6mry zy?@h%FUqLhyEEh1s~0Q61;fZ5D-}}1UyrZ*Vv70EmSNjSd;C>U(iKsv*vOHtA}KWU z8ULmP7{fCtW5CDi1Nx7Rb^ZJf9VTgKJwDdK$cnM)yA%IiRv85zbl$#mSGc58{H5_{ z=1Qg-#b(GYn66V=Dx)easpC7?ZuK;1C z{&+KWjQ{%9b#_Q?MV3WQ{I5-isTWdzO}K<$b@S%U30anko5wa?KnMrbh6z?r%qSBR z6Jt68L(e3bn4Km2{HppNpNjh2P8~2B>MxAOL{zpfS4L75gAR7I|#l^;EdaNUlV1*~9 zcMtl;gwqmZSX?|5zU1jPLZl&LY z6@{vpAP*EPd#ECtO5GvagVaADFNm_Q4~MVCYFt`Bxo+%n@mwML$!>4`){Mr=_m@0( z?b@ZSt*u|^8fe}6{rmU9`WQJmIpF?X_4NXxqC$k z_Ogvy%dGKOn7Ab^UFvs6$F?6BdOfp{q-3(?Nj|==RE(^fTS4-Szq5 zNH7{#CnamyEF*DE#eXrhJ>N;SD-YL(UQOz{&AoZ+E4qIc8neqNu_Z+0hYDp5r^E1i zk-%?$H{)yc14iFt@hOAFjQE~;`*hB%BxshkbwKDF!T8jzucyKN%)FJ-) zk}S~-3~Vq}Y8?>Do^p59(lbi~)_!m{dtynHlyg_P$oKICvx zkj&iiEZR^%`|ho(AVJZFFC{%XDryb3S4LZtnEgd{(+sb|?G;=agobp^&(@hmc71ez ztP)u@oS`4*C1|A56?=B5ZR#>J)nop#%a*35XvU`2)GU&nj0 z_`iL7oKGb)EO!5(X$>!9_|9xAij3HeZ23SQr@^}8+hhqBQ}9@)=g#iMLyZZe0tM*! zgNpLKp;>41^|~il|kl!EgN-a7B{uLA(TC}Fb94Fp2%j7W|v?dAUPWSX*B+pzGn<ee@AGLz~9R~T+b8ljFnGLU_4#4G<15^(BR_U3lC!d095>94Yp0}J(+pj2^ zg3pZfN=j1nQ$g8be3MUPiD}%L@gHF~pUtY(Fvzo4=m!tj`i}>268JA;XTCXk-5i-h>c_yij!{2eC1`K<12BHQ80@?lpMRqXgq!5 zZfViHmQx>jFeVZ`#IZ{b&mvavMiJNW)l3hQn)d#EvA%U7HZLOLB9nP*+T*&dTWh!c zGj)vet@qYH{u|tG?3I)Ed7*&UZ*6FPGcoewd2lbf@j{zb>BX?HPj{H^q#PuOC-F4x zt(DS3Ni%F$HQqhemZfBZ#*xpn5o(h`mdGjka3iSi8aD4jjnho4S0*XTzBn^@(&Lj` zcwp@24Rb*v|2uS{H)(w-M(PyHljX<;$JkuaBOJGI4V+BCR+91YrcL7+&SVuJY_AM3 zPKshtchKQ!@2VIoYig8CEF$la4i=OaM!kFYDVa%P>8>a?(k*YB?C6=uCQ}i?RiGNA zdiOMowQ^r>mNuoPI`dV|$kgVRf6d@IS9@t+W%;KD;Vm{OK7&HmrGm`7nR;%eMrOf} z#xd;uZ68|5Kc=RvPrp_l+si8aovDTMPf%*OlkkP3FC65{D=O-zS$)IXVs|at<8F}a z7a(0@8Q#94Cc#%{6I0Kukd={YU)99h;+{1-J3I7&odI}yl9hvl!&j-j?)PgO|X zL4L7(jSCOIX}dgDl<2Cj`Hx8Zi#h;7qOP{_eh8~u5iaq|W02VU+gMV2;aln_52!)# zH{S41prbJWT9ib7;RV7+)ewQLAbbb$;Gfe#=)jGG zSWbi*z(B^oILzO;4``UwZ`T3#(C$R6{F_RT;CGSBe&RoE?ocrCSpGl zzy>4|*P3Q@_wIGdi)~cS6pL5B2(zS8h&!ccZSU&l*(Y@-tP|@T?IHbFa!3PA#>>jc zocHtPRvJ2s*-Rrwm@gaT@B`ZUt~nO_Tn6`MXlUrcgFpJv#=;x7uD$g0)(G*CeTR1E z`8ny=zb0L>O*REj#`b-Cet zd237qk%3!r;1ZEYJU=!%+NkMZH#^k$kZ#|QllR-VZ!MWrIy4m&svw23OO;#!0fuGZ z%*^2prpF1JMx&nuE3OBTjWW`?ntv(lXY8&H5sAE%S840uFuVeBe&ibxiW%t${V(=v zBx%3(_m8>1-0j7;;=-qw`)101`9cbQt`pe7vIG3AXqZd-pmp6oao2^j5<71F9P8K0 zNC=0CIq(81z_T}>zfi@`!^_+Bi7~?h<1tq~$1xy&~3*U_3E16Rnn6U0pq6Mp>F2b_-13jzXJH z?0l_`VmhO3IL(M(Ln%u0Ih{$j|B^ZQsGOjj@NxAI1@oCX4FiJ|D|h!okHIfrzCbjG z#254cMg8+h0$N&H_OU+KTHlJX`q`FK|^EIplMi~=xDcEs< zpv=I|?UvUkb!x>_D(RW8yg$URTXTc{_L=-B&jo2u$D#aOk7wl+TPC2bi|KW4@*!s*UAcYAP@*NZY^Md z`Wttd=F~B2#AulW{F<l)sm7O=5;7-k zv&pfkemBfU%nGU?fr!I@vfWKhNH8eLc zK+cvcLX*epWPR8hTh!Fl9w+0?ni8+(+;nwyEq{Ide=)lhd=P42IEWCFu5hryu;i*) zq`%}S{isgbfcz3c*)oG~*3CHMjTqN0GmM^IXceI$ldRpFw0!c!iGceOahqu{{Tm)N z4ql9rNx^$D@nzPaq!683jxg+ylpN*AOJ{j-p?j4pIE0KcZGgZEukz_hGJw5uz zs4|978~&-Kqa!I5$`FBy-rn9zbeWlJQZDnw2TMEn?yeAmvR>ui*Qs~8o%o42E%nH| z8Nb{6TF#np=JR?I!Ssi`y~SdviFb&?K{_|q?tYsS0_$huGRGJFUuAWH>che84m*IJG5k5N#E2$izca@fm?}ny~M|HAb4=_ zjyF|UeTQBK`sSgBcgt^0B3^eZOJvBl7;swz+r$E;7f7-w9eR?MDX#_=HG7iw@;w_U|| zKdrA%8^%8R6AurvaG1fx6m!J>M2Wg zE_l@uCSsGBk|MQg)8)Rbw{lQJU0oGO*>%)D#*Hl?ag58J9uamr+Lp@i2`Rl{fwc`L z@*aVI&z?O!d%X?Zoff3lr$c-Dtb>|K93Cv=-#Xk*<>7}NI-U4dZAd=hFZvsXXAb}q zY9Kz8rY%suOTXfy&zkh1J+ljwl$dl*|L0mLhldysk9DacbZ55vbW18_*7?%Lat5mc z+`^p3I|@+m&t_W4Pmw!2tFaX8bgdS0Hj0%pyoSZN(@d7q6+*K|VNiS@eZE!`cPyo` z@ogen(skiu8G+y6ZhI1b)|t{>Di<0WH8?m3pgt2P6It6Obv;SlBV7ZtCpjsJG~1Ck z-!1Cix?DnDsNgHAtm=?*7;}xg!Hx=kYMVea;t1pm>kTX>Qn^O^M^lsf>x$18IolSk zY-|J^a6IOBE3p_`;sSXs)unoNVc~N7-hDnFaEc)VPm8sa7@L*q&8f~*I%FE2mtRXwDxmw!0^nPtLPG|Ozo6O{=&i5Uf{R(ZRFi@%W z))jUfR4LSK)#D{qr0C|jUdK`f2JSQvpFMk4&`d1xEW#4B^BzKR=~;1kd7N7lk%uQU z<#VA;e==>A9q(;e9|X}HAE&_3z;Zw3l#~>Xr{sVr;j*mE*5)k=i?Nl zvSyh-DdWcyo`4225RGHOKR&e5<6ASQit<>v#g$Bv5147b%`Hn?#8=HpXi29j!>B`d z2-;MXGJue(7s^&k@9DXH-ch%5iA)}>Hz3I=YB;0OYhtaQpQDK#4E z1_KgBnYlqLQO*T^xaLF8(pq(|T}(BjEG#ToU@MhTbab9|=vqe1#OKFr*pXTN=-Dq~8j4D^c@c^UecUP$fHqOzj(hRVem6rB;rTSrM zDIo8N>7T4jOiaFRr_HhEQof>V2aPx@s(Q_Pnv&YAP+Dibo=B~)*eoY|*CgV9la}0Yl3{J+)_xeFB#veTiT+`udZYw`0>V2b=b%Bbl=e7zc^CwEv45;)^tFl!Hy)&1g$pZI6G*D7dOe}cY*FIx3Qz*gbFspK5?1Xe;Q49-xA%GU_C3pA^I#XXjHo%x?YF zqdjl0A$=P#lUILSN7=xCm}jZ&fpQg@Gd14a zKq;eaAa`YAWJy)Vd>|GV%eA$&ZG=&Ti(oPAAas}XS?4qyj7yzg$$?n-;gAXph|KoG zA!-CnX=-jhEfI56Q2msI4~LZEF3xS#5gKHkQ-OEh&;~z_2C zlp1?#G7AdaCc7_uAXa<(?qg)~rG83dL=osd$ewx6oUyaBzl_WG@Zdce_G>w|BlBqY zEiEn8$ujo6_-ie0Q+H(x-^M(MOdEt}W3Bk5Ho`4!n*Sd^-gt^eqDeq_xYBPcH@_1h zQDy`($l*&%OLhJ?b8=RC{SCZ*d}2x))*N=wB6Tb}g35v@YxH~egp<%lez@J>wq62w2hBTn5aSclR9BS>K*MQ^^5!~8GeCM4kA$>dt|exx>fkmB!Py*|UDoFiRt$u@O7#DE=)f zeyIOZaFxvKV}?tsYwktsVc!lC?dk??^ExtlD(qMb|Exc6M$qQ+9h`L*u0Fdb#IZo% zW5FpGTW|#V$FESoruu zlb7Z{G0lUy^!NP((-J!l94^&NtIs#k9UmQ$?Z7^doRN4OuB3QsIkBMT3)^xqVeTSA zj+AL|8eT}&bZ!T^BZ2=!(wVAc_N%3GFDh2PI6uL3?>_w4!Ut zTwT=Pxg`-_MW_jE33!!gRXsV?TVb;@N#YP{{<~;-EUyZhRDxoz2F?nC+){BR+R!@@ z-`jsa@0PlnKf88k{+FsfXb-Srn)OrZeLOxgnpjDfNIblgkEv*;PHx?KpC9;34ED;2 z%9|P*8jUZ5+57r@PCAI_^cSh1e0R$#7OX4{r`W6v#)S7dLJ3*zlCf43f!|W6@W-l~$AYgKXwhoQN#NA=S zR^5kGF(Vy$Sz+43b8O-1U2=LT0iCmE$p$W?gq`&J8i^rEENSH2WPH>X%- z%WGFw?H9|@sYXI?UX@m!R#ca&!nQ@?|t(0g77_k1YPuew0{gto!UP;7e^ zHR;*NRBoW!l<)+3bNM?^YtxoE=P=fu*6zqd;7{KhETo?gWlI^NKB~g}OPNdb!OlWA zvEeihvajg8iMhooJ+2#Ehx^2QR-Pb?EUY?EAg0i2{;r zkz~#LG?1e~A_Yo~ZHjm+Xze&R%O@5kA9yY*<8#?xeW?w^$Ge&QS|4z@&D26)l|af_ z5+(&d8_lnyzcy}Uy~D#u?e+4)l)_ekp5;AIt3ELa5LAoKCTzDau)5` z{Ap;vwsY0tQbp{oMIIiV$87dMvC_Qdr8_whU)$9c?!wm;{#r;L%Q}kct1xNyx3*gA zbDNs-%&gJRu{M%C?b1*gz|ZNT#{D*)mYvZERwQ}B^QurK&u1Q!WOf7lXz0*0^=ia? zCG|2shYrLTwHSm?Ih<2qgE$l@asl~>wc#F#@;)hl2%had8poJ*&!M;P~V52y+3p zF@BG_l6*pAkf90vP%22U&<9lFtv>72dLxtS7$7JC#(6O)-=o~6qnW{i`Cv*?Z zbCrQ$jkqEtXaUevDd6mnFV7eneGwHD;bqG|e`_m^|D|quht32-Qn}99Szy*2|Mqm@ zsD7GhJuy;td91TwxuJmw&0h5M^lCoVqhtcoTZ#XNdSI4;K{Jqi_*RUIg?9G|adf2^ zctD#0)Z-L`&JU(|teUa0%F5x_7sJ;@oThq)l&9#ImzMfKUA7x)fb2ueVeCaB9(ji9 zP%)ErE;^(qjHMf%E6;xPYb#Xr*CtB#J$(33jbUTU*ftR+Q(+fO4Uh9B>(#e4JM=Vs z`ji|hCg(b$*D>8tiCghmU+~#VI$!AVilxhuf|soZ{TovRHVZV=G#fx|IZlbMbA2 z)V<#0`O}IIFl}-CjJ&!5!VB#-HMq9SGwZ^wc+vM~Iqh()tgMx-;#D6?UxbJ|ontsu zA(v)qx0u}@?jgxpAFUwfzT$Y40)4!4cg;dh%S7KOZ-s1 z0L>4gMd(a6*5QUjn5%f@zGHLq^Ua)|d)cr}MCkbK6E@3Ac;X|ubmF0wy`I4Y0KHL= zp@hK{Hew+!F@HiIt7oTBZ&(3 z-F(5Pbi#G9*FWLOl@p0(6ea`LSCsaIRBTJSiPCl?8uxa( zU45=~J~T;@U6g3+CRGS-Y@mQvtHY0pi?mIV#(DV+Lxnv-{c=U=wxGCy`kKjW<0l#un z+E2nT&2+Ym$ygvzRDoT@`oqwbe}({N-BO4ldHdbTc4iXUJQy@{u%8wrLaT+WMgMDq&|r}!g4JYqsfa!StJe6f ztu441N&`LF2??i3NGB!B_U+r#7DT(^Pi-VfBUEq2h@maX=hN7E2erk8g@vto4aa&k zEoO!9P8l{`-$zex6r>+5jvq)ve(qKNx%xqy?uBrjSiPs|k<>sZG0o!#qmxBJVa z?#qmT3+VQjgD-mt%pSRB6zF=qrsFyo*IPSpIr4Ui7ad4kF2`IQ<0F)yF8eTPqpkS z%<6Pb==m(Kt(#?`ckQ+}6zH^gvpv?`n^aAPhjQiIp`VjEktH`TZ+Q0<*K~i)`fRfS zjknMSNOjwU#mv7xzMhM=Xv;Oqz{k62q*TX2o3hj_1!?rj!*I z7w@9lF7X1+oDPfOh75Wid;5CZ7{A0myvg#LG*7#PMrSI)qXF*&a!WZNo{o@ZU$S>j zbjtXZ=}aFes8!s2fbkBeJ}MGAh?(!u_U9n!o*V7ZK|gBEFr`ec!Eu4*rsTmhoUL3& z3qIF30ttlyY|IMo2}n57m1X%uXt%Y|wAH}3;vZ7K!juv`772XT%lu{CYTNdv#o$(N zz{C=dke^X59PY3eq8j@eN$TqxVttGv{d(W*j{jV=-f^Gz#C_!Fh2A{Du_%? zG(mlVw)gE~Tc8O`1LeU7v(}Ld7hkLo>&OE_-&UU<@6MyMq~{h%Y?Mf_=**8pvQi|z z0QdeB+dyphOm(nR+AHH?3!XSn;;?+%K`_!ZFg`bJ9MkP4ryVMsu33>;|3M*xB# zHX}hzH|@i4U1_){hd)Bt;O-kzAsiKOx!T`Xr!t$>1t^klT;_TO0ml(5s6*4r42VO7(%$cyOb-}&))y8l<8rE&ujc6kmLb;g(> zfG4`Ttkf%MWS?*G!3zJ50v8RO>-pdBm#&*zX($fe%Xdq@efyTxA$QKbXW|xNvX<&i z8|+rxrcXYw#c{l|z*BLwlW;ob-1KCP7K(2BcG`aYq&K{(Ouh$-mNQ)(A3da-??+A_ ztUV%3LNH+9k{=iyO(Lh^+UBC<)+NuMzd;!Oe*d9Of3=uRpSe@6>*DjyPQk_j`%!YJ z5-V%I_Nu{#zCc~iO3AZE$7dl{bBNBaF3mD`W!hDQe?$|Z*S-zW&PDnyJ(frMy`(fo zgCyOIJKyImw@sZVg}b4nSUOe~yLy5uZzQdyP=)@D(uwhAyBsM>uANy0BY!E_vdR&i zq^59C`irfgziWMs>^Sl1J+w=iSWvR#ei8)gUe4Yg68Af^yu9YNJY`FH!tu3?ELc!7 zX_ADuyIa}iVS`J=$IvX1YEjVt4grnFDRpt-$MU#kC+pBBa?4_IYBD}B1kQadNqgnm zkF;0;6x1l|I)Hr@F5rf&U>KtEsW_Sf6C=Uq27o;^c5jQJC;#{nxh%xw(Gk zo%yp+`wWGyhu`uw3K)o(!-!_?OrDWkQAInkkW=<6ZzC3iDu%ScmbikG9CAy}GH(rv zfzos$3nPzaK(YCCU&@;=tmAE29)?tA0ma7-*R~p|a6hFH2p+$@1Ii>PuoyO02y7B_ zc5h85`<0$b5&^jExFc zEZTycagtyKtO~Lcn?BdKPETjQhltn+WP^EhX`$pa&67V%+9w*&oQaj=Cv#2xK#Y>} zmu$K=#NpAdBiLiQJuk0lRe~`2_3deqFwt4B_#<3OYr3?{Mr?oP3P{%*gi3D-!6w3F zd(O04No#ulIbjSju}c-TKIu~;QXO_y?NW~-)Xk7b#mH_HN*al16^tG#J;k>1WejtO zn1j9^qK;o`JZstcKo&&&ch!?4tv2ybu6)_>0WrUyiEZ>w3sX*HKqyAUveYpOWIN9W zv5vQ-aw1$So+XVi!)Zo=6Ifp@1gboupvf@PsY`|mxIa_YR9{t$jndFcSLV5?>C*6u zD+XP);tGsBwG%*`s`5l7)6m2W)B5R|yz#X#%I(?+UYc5;dTFDWfW^EvXo~|&GfKWa z78}r5AGIB7@&3EPY&7QV$~kw(np7=~|HVO4K|yDxe@yRcJ#&nFpmy~}vK5N-<50B# z%0O+TT3%n};;GT`sov9eCi3(vV|h{YWAx3aRV=IpmzTDJzNf{z(1)7 z5IWR9DGQ_zNVKBrid1ZPN^U{_ry|O|Fd|`u5LmZ=ul^ZfHrj5oNf95q1 z#HjKzOn??ep+6o82GlVl(2~@U36HdKHyl*zeq~MnLzeV}HdQw0-@d^3>v@%_F-{pL z?zS}ZXeR@7{w8?%n>8o90n-H^OZXX42lvqCy0*C~S#NoKk2joOs3H|W9-;O#2lO~V zTRSmsVZfp2EgR}HU~5v7G))^Hyu<3Zh4$#ev~7L#eX771B$o<&ktJQ4H}#B^KhJCKq>~X*$|C z`F2>JT3aR#)L0-veB&RXODfg%GW|aZ0kT0v?{sLn%E*TXcXiD5+qci%_8u;O5qIC0 zOW~{4gPYav6UB71Uk41JBXu;ZE7=UQy4UoT2ya@vC?_*V$$vfSNLfR`;G@PvhC{b` z-GPh7D;xf8%i99I+>IZtD8yWJpP#pgJ2h`{oNd$)SOYNNSo~9P!?1sCLK!rfL+`jS zvEdM85emCj7AQUm%K39-vz)_ipKs6y5R>n#|mNs>;0sv1xuIy*PVds zi5ibJRm1{GS(CvmoWZi(+tzydJmO>~$MVMe9`Lv(vp~D#^=6_9dzVvxX#A0%!du4T zpHz$X27{sbM*zJhyO5wvp@+(Mff{ieXti^m0Lcv~p}b1vB%B+uDF)0=crrFN29MWm z?M3X9k{%lkkhpcu9jXGdu9s2&2@{HPcxhv{=Bu)pPX);Ow!`>85Hg8N}&;2%6Js5wCX3@Cj!dMMt?^O7i}S|=9Bfn)Xx-K zO-4MX+c#FzvHf=*>1d9n++SU`ab0WA>8)7P52ApSqMZZ*jymbl>okFw0y@L;+?d`% z)q_2LB0jAxmk|p>ToDA)d!%SIBb;N^ug}ZfN{W&z60;k6L*6lw%?|nC#_E+8xXdpt zdGicv>juG@VwA^XFB8FPWYcfIq5v97SN0FjzgSh-eb~g}(SKy9QUR5wVReIC0y*oM zD*k4J9Bb%}xD$gcz{dSw72LKTZP$80iFn-AvjlM#mcd8wx66;N54$veJs2gDted(5 z?SpXUL=1s7co5{v3wcv<$Ghv@(!1qs+9B~z3KnW4zWRN77=B@4U~=TYSZ(A;_kFn= zW$xJ{2)V&`G+D>T$Mw@boF}a=&k?kck-@fUd%zD*{)U`wS7A&@-HC9dvhXC@p6Tbe zkS9v!>Gq1-<3K!rkU>>EnV( zws_~x9o?=hIPLT9r`(2&JZTjwzNm_JEI0omZv8T*#D@ZzipKoRo9b-y)-EU+2EYM!A}HIr2u17!Q8iT8ewvL~W$TIG)NO`o@h60(Z>a=C zwa#~2&UBXbhlQLj`~;dVIMlwma8 zXI%w*yKkMsaa>l?2$8^O5$+R1JebhN>?T2SG#a&mfA4j!T;k0>%w|;Sw(PGC^DlL2txN`);rpg&`rt z365rPpLbpO;W=BuAS6)tW^X{zc+tAMTNNaS9JySBHkWa%v)wmJmqt>G)|Q%!*6c2p z?B>g#sFmgDJ-n`ED-Gh5jw20k*HLmpx^)D{!MgFaW?9AF2Lv-`PZVT^VO6)Dgnx{me#4$+H{68X zbInXHJwaAw@-NCcsZ4>Qg*HBB z7Gjg{x;Vw^0rki#yKm!#%NFVA=tq5~D#mVA(5Zax^~pd^Gt8n$l|_JBW0a?#5TBL6 z!>X?3n1r8Y7le63^M29b59OX4dZ_g(Q2{|?$(w@PuR7)|MEUD4HM@MwpRN{DyL9o^ z_PpHO_$Tc%XN5fEb1GUz*WZ!@bcL;ldYcVALMcj8tLI5@bS#eLTt!+k-l=VhUg><# z(z%YQ_hfjv!M~Y~fRg~7leXYxE=5RzKW#yvbMj;)^(`Klf@{uDz;jWl`UDG-q1xfaq0Kw*zf;sY|C!SbLwK}~PPOq6*w>kZDF1GCTkP2$p zQdlL>s&(Z_Q#vCH8q6HEH=OeuPKey>uSiRV!KQ(>l$p{=Mocf@nPA3+dknV|_3K&7lu77Cm_G#WCnzZQ7T=$h%{SH~+cad_}>p7d=}O9PRB zdM5g}yh-|~VFgSOJJeWp^4`qLUkwp7Q*Zs#CBw1>*c01V;yGp>DQ?c$=oWaRh>=pj z6rrNkwd57yPe*96fxOI(Rwk3*JFH))=G^@{xfIHA%ob(!d;HRKc$n5(f%5&|D~0~I zAB3_LL5&OB`&{Lq5RUBFEhKV+Q{?ULHKhlb*^Ddye+#8g{;N<*|DS$`$Yyd_`JE8A znuzpLJ%g8gAWZe#;5|{y&!>zcd7TsBPfcmWHZq*4s1|@kDkbb*n!K2hjr?7n-3jT( z@)Pru@5xfhCV}c$lw7oNuI|vSYqu%9coJBVAMVyv+23JJG1giT_Zx^-#}dLAmcQaT zZRpCKy#huKsY+~ye(_laRJwhhWGB3@WScn9LWJ~*54a-aOE_6=MKTc60=0>go`H1> zpLg>a)(og6s2hEBzO9|q1i7lsG^sMKvRXjEq`I<0>hwEkr$=@l3Hwx`VT0z-kk3Bi zXCD1CLq%xrje`J3RP_F*1pBjr31+h?{yDHfYQ*}d3i~r(LwKN?H26L8%AtP>vA;T< zHjN^@2)R9#Qvc%Sbf(Q4N$)>Io7yu?<$@rB)3S49cr3;7J&jJrsx^Iz`aI#L81@t8# z$Dl#e1UXR-<4~ZfGuk^zwBE9jOX<%8O}tfDD`INs0>y;OVI;c>Biu z{Cxj$wGy~u`ZJ{3RpCDetJLzkng!k>+btU0OYRF=y9rfes;Ezz&{m_&^ERy$r|x# z`EVV;N`&1fR6!`tG--s zg`i0j)A^CweV;o0Q$D271O?3)B0zj!=2;<&cBo=P9?$*as6?t$7cu@w-BAH|up-v} zak24y)uIW!Q0J8tp0+ZoJ+(ahsoYx4G_<(m#GyFsZBymivzhI*yaHA_{ofF%h-qt5o+9`=xplYa?Y7?2rEuz1jg~pbYXM zXm3Oa!PV!zH?HFCkcy>5<==?b(!}%zpHz|AplmD%uZ&5f?aHsiF~Zfoq%bKDNe63I z*aG0oxw(Mqjjl_ZfFCI#29VJkL>Qz!KVB)6u_ka@Fa>-LgUcj zARL3#q}9~1oGYCO9&U&Rzn+2$D;tH4mcM@%r-H!hxP(xhjVcf@{+#p0Njm8D1xlFIT&inp*K~DYr*Nkk<1!U}S1mKb#db)1inFQgbz z)RwXzJaC2XX5=Iy06YX2=oU^T4{Rs3x9k3VZOE@9zr@9z-b=pZ9n>5B^O}uG!L6i~ zm6gg$ctJvWU0ogIncUpmZHsYnanRTcP(Jj57n*msCg9&&`f)3TWe^C1s(aZ)KQyDy zpBLr>Nb{2v;^$X3h>nRNIn`cTzZ5D3Z5lE%GODWeGcy@m{xu%|9}g){=ux)op!mEV<*r(5 z?Pl;ZSjArlpbu4l*VNOKSSFvqJ|!FHrl+f3`V$)R;sikZX$rW!N*A`BeP|F4Swi}| z1xsP-bcJ(85_m7K`rx(`(h>(z>yFcX3T&shmuc^&6NEmZ=j^4W zoo@YopWHpFla(tf){(GF2b}2Cr<}OSgJzLvOzORXtv`~$YMZv=5A(!$#_O$~?g z(O6lg&{yOjZHLBEXs9s_HkJMarQRiDdhMC93um-LlqYN576I}J!jYM;YEwn5x*z44 z;nm$6tXy65mtC5+V(EPw95Mxv@h%_-!NFs9FkmsNs+en2q`9dX8`H;+xApcKl}bO> z(vq?Y#;6(f!s+0MWIP)in*}z+pL_z&%ZQ$YmycavOLsZ2-!7QCF#!RGB=pja@t?b) z5Mnoygb!N9rKr7YSZmN8s6uwBhuVm|6q!hpb=-(al-pBXoQ-b8( zHBnKSnHgJ;NmqTS&Uu3qdZS;E+d@LaA5j|u6egIn9C=TkOg+w21(ZPW3_0&3yw{=D z+(KO_qr6t8_AECyY1tYVk#v5r^Ih<&dr>ut{<-{>D<9orleCB2=`LuBZ=s?eZJp8g z$uff2E0~W=U<A#t2qd>n*RY-;XJ;O>s=eT7o~V**^I8!o1_Ggv999_YO_V zV*jyz0@%o;Mn_&p?3=M<-re5lHRXrPG;8@TjdOjW@lJ$yzW;6K#N_Ku6CTz)=ip-U zeX@Q8oAiSYRSk^<)B#qy?MtJ^Rr%3Whd`$G`P38%`6e}UF0<-=Q+~&OcwZQ~KH~Eq z5@&c$fDPVj7T5r0xZS~ny`9c2_4PIR>9gMf3Kr@>vMB^d}^xuZlxz#cI8Gn2h4U?4m_Va*YT~hDH^le`*^7| zx$*yD>@T3AT;Dff)D{&bRBC7t6-9caQGuaBN>I9UC;@3qdg$)%jsc{khVGuByOEHN z?-{r5|NedFoVCuZy+X8Ce0EYbma6uf-~sCe{8lktahHJU9Xd=|l*Eb~O-gQ3jo44-WhChatif{_w^85Z4Qr{6l z*4{3$jx<$L;sKf+yv;}b3}&4RKZ=Jhfu(q7!8!tCz3FKutE8VATM^hQ+=+_Ol9g?;9Dhe8yypk_(8k%e^sBOg0Fz=2)QzJ|EFojy`E=#J8(g ziHlr=qJm-~9RviKm3d!#Ob}u314whhCjsgni6wIR{g({HPW?sf%3wcO3M?$A#{bvv zPfNiau%AD*-+#Y@?Z?i>Hsc)l>C-15rXOOtM}AYFVL1vwc+>(QI&CZ!O-}0YTrHP+ zXcm3=9Cw#My#lC7;s;B1=&2q*YH|BH^~uTD(h{7z>Z+=-EhAIY z{?>&B1Bwr}w#((H0wV-u>1&<#^B_{7lzlhusHd&14Hy#Pg1aE|5325&nV1H< zXa0_c2&Xi#5CKf!xX*xj%+hm!go5Jqn=Y28qu6ASG2SCCix52s1c7uk^iV@lP0dVs zbG$M=9RO*q`L?&WZP?h^6`Dc$KF`p;ji%G;5L2x3i`{CY_An;!26X~9Aosiydgsqn zVyQvY%BM)EsPd<{76%>yeuXz({Ue}6^G5Atd_+V>I*8Twrvjr!xx}a-7p=bm8S{UlgZl)n@*) z%wUkbjdg(!@Y+9=pyIOpQC;_l&U~am8yFm9gV-|sY{+x--7|B*6`2tX)yF1Eu?vsY zA?|cs-!tW|7i(UUr;(*2mz0ngtLQ*2@W%5w^liPyFZsxIjLs+}rodAYiN%G!?iEmU zC>(5Xe{S+k^ta{s=Qhu5EC=)qJ0;#24#dolro(z2J-tI~x{c3DQBV_};%Bh}E@+0g zyv%(m2C>SMG5YY6Eq^z1{3oRxDR;lREa9$w`OlWsCHbtYs5r2upnCy9B_%H2`-Z7z z5tx=X@~ez1Ty%5|TdzG!oH%Y_Op?Ri+VGVibwR`=+_arI7I!bZ|CX&%rE++U@GSuz ztf0q!fT_odTPJ7LBR`v+k(pV@^fu&bsbg}bgHQX-WcL>E zQD8XR8(~PtbLJ^lS5|VM|GkqHK`%ou)*!;wzpYrpUR_-cjGePy9wOTpm09u1v2unS zlcT(^d;1bg#bR%$LpUwvAC@w892}I#_Kv}iU%T9v?%6YUg2!90jj*bBc6WccAZ+mW z@Qc9}TNL0W{0RyQ3LrX3mz$B1jt*@y&&bOgQt(0`2JMAZ3+3+S-M?84P7Lg6);mVd z6^d8GFt`)E0b*ec6ox6Mh9N9=_3fdm@`KyH9z8hDT^V$%6P3kG|-E(FKO`-oJp zt*w2OPGZ7A4+8jwLs@WKM8wY;XJBQ1`}U2er^H;n8srjvoy7fpeaZP92hgcT;U+HL zvx&Eh;P|Pdl^^og!*644bnwA$SmHWrQKot$QY#o)M@A}5EH#Ik9IsBd_FCfP=CCvT zA&z=qS<&qxtWv{X@?UA?L-_xaO8*MY|Nj1m&E>Cj!a6PgAKz)I{(Hs$ukZhV-HGyl zuHNcu`PuAG8Ru0X>gqN*Fffn={G&gj8|G0>CR24#e^Aex0ICB!Ko`#!9T+L0u13WR&3}W2Z)@ORgSBdTp`~94Rk9YvN-Jm zsn67OPQrDgmQPkMj`p;P1wXVz?WHBSvh7YbHDui@C?MXBw}K!JjY}fa4Zch z12q;?5fvA=$Dcm1)D=_%stOhquB(3C0zf>VC$R*<=|}6MJuRSk>i69N1Je>0J9~e1 z)(&F?z+#nh-1u{sww zr;m({joH(LZo8H+>3~m3X9-H##4uviAFkL~5?*^Y$^EZ^BL*LE?=m-0!+94h$7u<2 z%Zy*lc?O1_ZKSK3*znoB*MgUXcW?wd0vXFQbkD3=sK2l@yJK!HGhaH_~Un*U5kd&7H?L-h?en^|cuPr`zP;GNba zJ&g^0li4cM8})>Vx2*>XuhxYxE2qdIpk-)4qkeem`eEb!QuvkR55P>b{j1;u)gYX2 ze_5R7)~041h`KJ8F_m2%Kml0S$MYK!a5c2^ORVMhJ46E(fEol95=0)*KWMTzNv%Cl zHqGl+fpDsFYq4>bS{a+-Bo!C;|L5N-3ih@6N_e|5EMNYHBsHA{rfVi5@xKOfvz6<{ zqN@!5R_FPiCR532YF|>Vb+HLJ-`?KVv4B;ue-Iz9k_ku=zTe);AKii`XL$ZxnRV;Q zL(bC~hR1cmc;_~U~4p40WszVSdIsa9AHhll?`O?CE zPSI(_ZpeL$s=^G@2yGN}ItqD!6s)i)i|bN>)SjPi zg8CRai29ws6JLN{;*wER3}Ks5JwFEU$o?)C&Evand@Ase$&=1W#pDzy8J2kR_pQOS zx4ECM#h6ltU**$xq0%j`_3OP7K}0hiY+=D8bUQSonl|~taI^f z9>|C~lUWcW%+AWg#4PQUMvlZpq z@Ab2$n#b>e)cHOK*d#SX;2d2#32r6=@c^&g-X*X2JBFN6Thg_P<$}Q8ZU9!ekP+G2 z^`6lPk4m5l=(L-DQL_t+Hb195s8&+0L9eah3BlFwaa?t!`8+Fh-T+vOxbg=9is)DJ z{d;q~#R;jGLFH2Ey^nuIxk>_hDRqsDhbPOc4BHaq*|TT%GS?lmg1_G@(4i}}vbrkD z)U(_{wN93@uO?htSWxi#`Raqe4jMIXX)n?HfvJ4pL_T3g3Z!4=A8e(*4EUIR9&@e8 znk4^y42~@PUbKAf1G470{!gK$)MW~gaCOT86EA@6eWIK$y`4j4!A%^*q+f}b|EX9N9!_y*Ku9`Zp!IxZJskb~GG z&|vfhvn)8}4BXUe;pl?vYVvji4(~w|%Nz)q`S%cT@fLFW}U{ zF)=ymQI@&W-V^sSUVg^1@5*H821i9@ovOCAw_^c$=~3HjL$E?7bDJ+nMtPHget1!8tur<5yxFR63RbmAO2008Z5_&Z*$!wK1TS zOQ}_-xX9TfFL`sRpLqV-mpR$=s> z+*-*Jcs}wSapT4f9tCUwULgkrzH^wDsqxoLd%a^y~uL>{)E#)FE)|#lg%1QEH!#sdV3YmNo zsa673s7tC=sTF8>z>~}iF6>B4lQiW^rT_&wva=63lltFQ{Tsv}(E!wiCW!isj7O3P zAjaUV&D3VQNrNa5&$u=K-mb}9Z+}=;?6ZjCwYK@Z7n}=|n z@;6R4VLVf$zefjLOWhIqs$`bE{m#Dx5h#qpwX2-V{JG_nu@K3$hyYnPlX#@4FI z%M(&iz#r;^dOS1@`WfI@U&i8QZOl0Ad!(JxGBd>u3Gdz=M`!r=K!>2Fm^c&F^^Z-o zAR=eO$IqXeU|jJUX+ECy$ar(1TJwJ2=Vab1@}&bZ-f6l{lbS*l@EN3>U}^llKS~te ztF+L?Y}W#20(U;2K79+ajyVrJfhs89U)pB@QN}7G8ym&_5xqt3_om6xZ9^*v#xKQh z)#a#ijmK%sdz98s2`x49`-bfd*nsC)lJzOz&L8K2I>CH@*vN-{cmUJv-=3jHr)G+y0!-RvltI^c| z{2onQWx0DMxg&I0;Og(#8a6+6;eruO{nguUj_0X|iF=O;?)=}4b|>je*EM8iyFr%h z^0V7`cy8OR4?!E2<)nj`V-~y~%;%u809ZJ-Ha1TeTeARD)77hoU{lYk{a^U@hpgLC zKh^1y=%xcGHj_DT>Z?x7&X#-#g1NMdynx|EQ8P0utBrqWyX;h6Qv<;%C@2Vp4xK(& z8!1fA$Y?>Y77>E*CKZT4q=Isof2p1mfQLVW26O@WtRJ(iiK0OjhgKcyCDY_lxl=d4)1IDz0mFj_qx}l$fa6PA4pK|y7!*dQ2RgGVv?^SlL~2br6S$!trDn1qD)nH6BIVYWHw zpL9SNBfPwNHA(hU&KT$!7+|8JtFjc`n;r)G`s+V?YAJsWI~f^yqRS6}h4!1-8RtSo zOw5VPMAwVUuc&bn+{P`gJo@BHt@!AL5+Nh4$kMH>3!df8EE(iu<4i;29PGus?N;3# z3?b{=?7ZFVO)kb!8XqZ+rbrHOLO(XJ@bQp02a<`)T~J#|E_z!0v_GGg<|~r%)-TDR zE3L<2hIQJ~{Vbw#yEJgV?z>lE!?yl;S}&F|0@Qk)y5`+OULfI>o4l_mR=*cr#SPbQ z-H9m|krNADLc&W7`cq{n=Su5W@PlhM#g)~E;})&pN##uePp7ZkC=<`}BBPxnc=D-n znkwfXEOC+PTVg_ ztY4bkL?Nx*G*bO@z6Y&fntY7hUuwTWL#Y?869lwizU*Fg;H#4);dabDc95-s) z-Cdzb>&V8`)Q9NEekY7krscCflimi^&gp42qa9E9m+yr8U z_X<}95kd)Eh5dV!!^Nc}Fpmb#ji$tGyiN(05%| ztVouLhzdab<#mPUjIKC5BP*?Ce<>&fI9>)dTZO0OQ(<$er{|gT~`FX+-Tjau0;3uL0A0DED(CTcleHvk8_S0AT7g#i6scOIA0>5v~nH? zx*WVVPzrwAD$JHrb;n%TQEm{LedotzLMQqa&(^`OybrublCBbT98};WO3H|o(%{Lq zxsQvnGI%S5LCcVc4446A6P29#tx}RpLqh{ufKZF)9FSr=CkhJ>kIWN9SvJ>ceKgTy zJa7Edg&$v?Z4$DBf_Lw-ARK()$|ha2CTu?mv(~SqUzQG+l0Y zm|Et$hJoc!J?n<{>+S^%q!%IjieC72vkgC1d_Y>wb*e2@L8*2_IL`pHL`<&&m%hcQ zkiOKkF*`zs5nH)8xD?2;&eflW?9B?Kbq~dFY?a_bW?))01+aCjp{iPxZln#Y!T!$l za_tx1VPGOpd!u|z{$JabIxfKcfSFfR&7VGGoy4&)>$@1;)$6trSA6@kB9hG#biXXI zr0%@dU4nGb_3}YDN|JNm5D(KT;uc$5l9`xd?BBodm)6|ArPkGj?4{ei^u9A)HRUJA zeI%lhiV`I&v#!XXZ!nu+bB$aag-8ddKe{+HY_YKjE|Jg?lapr zM?}Ku8X_zol=g*d;F|w*XNyB3c5}><3BRS8LLz@;n$fk1Gu;n`?;=^oA*^B#WhLn? zCGW07jJE?@q~Uazh01dC^Upcc)>?P_J|FAKvAd`koUxkGxf9e$q+as-RJlVb==ske zvBUiRHDQcaQuJXNVuZuuP^V=qv``?2Hhu1{)pK(E5W3b!{mn^J6CV?YiHB7_b(%p+ z8>1=SW!cr?lqQ#?H}RT6$UZgBjG56XJbdWss;_DBD%@T*XG0MgNRiDwzdE?nhoi2K zqBXT4p<1pkD@Gh>QpR5w-btMJnRq4^DJpPjh)bb&8-s>DYjI~8`=*+olVj#T?RTQ8 zWz1eG3K@mP?Ju%c3HPPRBMK2TU-2;{n1k4g@^aApVRd!&>!}dV@1M;rED9mzwRu#% z=!sT%^2(_kX~CKhZU9O){>%m;roK4LYSOrn@NTI=^dxMRl&N;VB;b(h(bTH9RF?td zt*_K|0An8Po;jw( z1W4r3CJs={QV;EAy2T~f_7@xhVr?5QnAWgXv_wb9?hGG1c|%9nw2(Ix9&*Bz4euK* z<)t{8GlUijREn5|Uq7!iqjPH*^GkTt4DbNrxi^C%-d^(mJ46x;$v*@8X{#nl@RRG& z{UlW}{_L_=INeWM7po7r*WZMVGx=8-lnO-A`tr}xzh5lZA6_tu4+EQ)oQlenM2sh_ zsHnLF)lJcxwgDx-Gl+=wu`2SuTU0|L!Yhl($QR?H70NtHkBo1Fem6*^5?u{Xtm0%( zn2E09)nIjb>Z6K*WqIn%*ncR~)M6RnFw=uLD2(|PF!{`LQ%h#s83cukn1`QRlY^0h zbuUB+-W+(L4X;19)4hRP%eY_+LVK@Y3Je)^^Y)Jc1D3qgEfUMu)*!QXJQ3At|?~^=Xd8AM_zdz$W+7S~^ML zv$de7L8kWMc2ecM`N6;-U~pS;`VO^u2#gYR^-ipDt4mE-?SAEl7dJ>xSJw%{cn)B ziz1k-cM0)eCGE4dAzy6mjb^z&Y6|Rp{lEdABZ%~P=vO?QZ+VAqa&pq&l?OENd`z4a ziyj+G2{sH**%M1c1f@K3PMeFQp4~cU-oJ3gvYVQc&Ee_X;ty!q_99`>j0p9y4FNTw zVyfGK0_IpD^5WB%M`35(M~!{3f{omBFU34-W8J6C0$$0YJl69BlTV6_tSXe~#nTWQ z@VO6(SN!uH@54l+)r z7teA=x9B=#vPDE5W?T2#+!qJG8tYd4U`>ME^CBaNCgM)wU7sFFFhjSz03*BOIUA{^YZdm?D)kLCMPFr4R}^m za0k_axWAeku<)a>8PQ=1T{E+%NS-1rFxv0q+=FI;?Khc<5^*?SeS+w{>KXC~9=@!! zvOCznI(gVE@{5Tqn`O`qyLxE|{Y%XNa5^KWoA5D8FCaIt!nMm;svp*6n9@nn-!J+z z+{QP(i^n@N+4l(4SIWdu|8q~?w8yt=TxA1MAQ1_W-|rz z==sQ(FPHZi(~RlD8+t%~oQt)T{=F}+VN(6O-wP82UXZo9q_7Q8ng`!=@yar(qPN?<6R+lv@G9eDZg<-H|^N4a@0~ULs0wpG6i7y)nsL5tJ+8X#8H4FyrUn~5{j|GL+HhWz=|zILWkTB4-dlusZG%< zT9Le9KYJ?&2kvW@r|kfdtag5BB?sVEE65pIwH9@&AP~w2kx+>saIqBRy%fZ>1%%nS zPgplX1-jM+AAlbpfZKw(N!J3q1Uv1&pJEUJuEB0X?7LchFf;^k*v!^eH;r-@&<^bU zHttW6FX1-ZM3=$!zjuQ;*!wDX#B~~d9=fvn)`1)S-gWfiZ14^YC>=1f;Rq5Y3wJw< zjH{M^%Gz91W4K>i2MaKAKVKv`4{z!sUfKq8tN(Zgt1Lp0|6zD3? zcI@u0s!k=B&K zi^5MzRS1mq-+PJU&E}vy$=>TI>+bpF`iQI)MY1Hdz7zk?l!(QtFc5W!AetmoDmWOt z^G#{NI%4Gpc!n|EB1;$^eli2kF(=@3p5(a|^dL*{zyIF}uYH*Jyrw)t67o3kY=5Y8 zh7~dy`Kp}&po_$2%?49Qz;zs`z@%zsAA#mP;oKhV?=$qGFb@7lz>J?SbmQbHb8d`2Gj`xS96)) zD{OaW{RRC5-BZvKe|Mc#1}$R7SQiS@v!IJz9=5{J)A`s~%mQ{&Mq)14y-`D{2{f9F zd4F#YfJy;cHJtA)i2P3b_X5*aEUWbIki_oTuapp+laRKiBG~);n{#!rVi<(dLPszF z5XH|9`ZUy@pE(hck&-$<`(S@o8LWh(wI(h*{3#*VHh-qCAu~MW&w(fkBD1p5J+dA9 zHMGm#SF*)=S=5!x$HxbhgaAC+1c7{8n|6nwpyCgfs0Rbl2DdsOG5DKil?~6oODc(1_1wv6PDRN)}jM0z6W? z4<95Ay>EzLep0FPy)7~ToC+Cf8PwpjbV*3z_oDi@h^qEH?BR%r!7BgHVX@j$%^+L@p?i+m#M@bVtZrx zi%NKco>Ura5rTxbudzR*GBl)ZW#E6>$zmRt*lAV^_S*FHbX%Lm6(6f(=@f+hRueJl z591(g>FsU-1zVsZ0xMy>JMt7SvckV9cjQ|;2GRGUQnQtlPY&-H>%Nrgy8gDMgmn=# z)}a4VX|pVEO_&uSzE2`m5^g6?t8WXlodC?iR?oB1rv&s(2*Q>5Iwu92X#!}--3{VBb`(-#rTndpg$45m zNvXUSkgctQm*QD9*b`%QK_^ouqH5*|iZT5mRzOeg1~I2Sv*gmHk#gGgCh3Q*Bmkf1 zynTYST)5Zb;;yD24vpsvc}Ma4`g; zqGU%h{npRZ>{UK0zKU%Caskt}4J<&sfOArHAApXj0Fz^CAkUr#!#uJ+fG2x;;#AiQ zx||4i-rUF(-Hg8tI?%r-P4x3P^xmbiIE*eu(9$M>;wy#l^l}bzzVU*ysQe!2hAJnj zWGSLi-do}3_!#t(PM{q^k8XQtWP~IT46DFE_hqz5Tl{D|f!;maT{t-bRe zcGzLKGrDDsV39abj^c{hr;!Dtu( zb~<)xYh6#tsHpH{n_=cACXDnHAi-!8w`hNU=JMtJIwcbF-NP$Se=4oMZMTm z<*Wzw!8#05^xWP!Rx54V^2bOcVsx_h(fjE;m#< z6Rg^JMk;u<#l*x^fu6GDpm!$J+X&)BKtRwOea|P*=jT8qSI7}%A0{x3$CLft#?Qy{ zbaWb5JDY8Up&n{q!E^#i<{Vz@4zeyz+jBQ83Zv)e?!f6jyu#25t5(D3&MMwV-Pv-E zok>K(=_Wz>j>jfQ4paKf&(C}M`}qY)yn}MY^f)qE;^v&pqb6I;@(l@ZSC9OuYpbphi-dV!UX8~2Q9nZ%bz4u8VlU>VEe`$H^}_zAPh&mK;h zDM-b2+KZ9?a(1!EK=y1T0cQmrEm%DQZoM9@>r~BW0k-!kBWDkczNkoim3l?DVY_9P z5&>W|hUqOWu}|yckasa+({~=eey^YlsW@K=N18xJ`E-v)9u_lv3h7!V5M}G?=qRAB ze^z}P)8bF$0TN#!j|jB6^!ERGy~3|8jXLhL0>G;G_XwW@hvv&?3c_ynQNjC*C10gF zVj3A^x>29mKb(;Zw3C<$)C}L?n@LMJ2H0_4aO&?71jb!e_OzlaEog1!%kYabWw=uG za^9H7ly>gn8(F_K5e~p3SH|_w1`<@bbP?21bqNbYzo(E|m+3bZEcQeH(PPSfJm)Bj|;CdwTY8Umr*5F8KcBa@hIc ziGN-Grv&Hly)k^6Gf@{vua7p94=0+295BSHTR;v6++011mfVp9^Uc~;8@Ho4<#{p1 zh=Q6L8Ic{}k6y#yImlO637+tDPN^rmKSF^KlT0PTiUuRdsuPzxhuIQ2HzL^7pv4Hn z`!&6Jv?TD5r6TP~XO}C%ZwZ1UeP8lYAO%ApJv$YFd=7W5&9_IMX$)xlGZU&z9nkZlHJ;IaP)L863?Nb1|Z$Kb?2V-af( z9WcsFLe+o18qsL~Jenm9Eh*ON^J;kk3Hq}5;V{v$%Xje8PZt92#V;qdlhgSocQGXU zcuFm-C!%Orjo;)2{?WrcvD6fUlg$yZb$g-LaKGMIR5{P*;|D6_pIt)v>1f_0O8eER zg@M2}SuBy{V!$(Ew!#W{%3dUnI<=%B;T4!La2`S~o_H%h{?LrW4#P(4x$_+{|V?*rqjXG#?aL z$0Xy)j+M9AQ?&ADs-aSHf?k_tN8XK$5_yP@(;9r>O!^sXFcbkBcosuz3;LFfv;4Ja73n?xmu z{IenVup7+?NX(S$p7KW9Yu>X8D#_~-sr?5>UTuYCda( zmDRJ6Ao!Id_Amyc{vg8pV%!c+$iT-KPeJoIPHy)mi4~BcZ0gw9&M^9Cd;R-DatGNq z>u636HG3f&4NCdv(z=a&cj2NY8YKK|M7s631O`&WYHq+$&9ei=lW9f5mT)-J=X<8b z$$nSF&8SfY{FsCJc4B!>x-UW811B~)y7V8wqhmTrx?jz6`2cjf=MH8z-F0t`G(V>l zp%11QlL5{<_8rCi>$SpKETtZ8$Hv8>CY2r#=U!_V-2#ZnmrQo2O%-AtNf#HA^kUq- zdWi{H%$Foh%1rZugL6zvjEzmO;Hp08wK1j{e_q*2vaCXW49*+;i_WTvtH4 zL@>54aX?A6FG}ovenvDB?b*gn^5gX40WJoUb9%}Y7^fM{}7y9`FJ@hEpzi&UZOTAZ*>Nq3okp+@KR{M4l>@LD#ZCbR2yPFN;7Y(`z&WcJimFji)9=MdYC z0E?_M06=QT>YIMQ^Z0;BJso;>t+{jms{N)sxV$v?W*nyo;TCK;moKNF@E&lB zx*YF~1Nx(_*=C@0f%OS+s=rtPaQEuftIY-p0Dhspb#mei1Kv%a-O=&!4UlObT?Fa+ zx95Q53AjZaU0sw^RQrMV@891O3cVkiE*p>S)d4yxF4K(k^!(rfFdD!P@mwABrWd%S zQDxtYMmrvE>pNn**N#A9&84KJ?-LPO$2550fl}T>Z8|5JOJB{BK^g~S9Kau#N0@`> zH=41W7aG-3kGnvSr^0si4H-ZQLesLdR{<3P^~}%Fi3u5E9{+Aoz|m_LU~>hKHi@DE zLN7+p&6Op*4GJY371aY3M-x7)jRt)HyYfSn%0;+gy3CAd!vb{NrF_ofKLm-r1KL>D zd6D=gq&z8FOc82<-u!0}U-63&MF^{D6-7m1-Zns(Lwa(!7$2M}#zuX^i(z>T8nn9) zy2osb-=QX8yD3dT2yA88Nn}>TyQ8QBiO)H(Ycqgiz}Ny^;^ie=1HA5N)6qcWy|u~O z+N>;ZRw-6>98g7I6m!wms0|NqsrXHf%FE%YA5$PO1+UVlhNZLn?x6jloSZbI;#wsN zKm7?t-&@n@LB9Q+rcu}^3MVo$a-&6Rp78=kY?%K5*K_Etk3Ud0)MnX+n^|=Wc4S0x&{c9tDA!ARkTQg%X?J(tl1QN|RE{55<^5RLxs|D)efL=FqMk+r07snlu>?Mc`uJvZR zQ~2t{^5`bIx|YI1R$;@RKa)mL2fIIoX#)8lQvAB+FMwl-ewB)+ds{)Rx<*DySH~JW zxncl~e)s!D{`V2Mf$+v})!8Oj&hESs_-6JJ57e{J*I>kqmlG1$!Ey7TE=|8XPuJCJ zP97c}t{PL%{BlF@2UP(=F}IK(#7D1hGIoI}^f;Z@f6w>$ZbcNu8uv0m5#sMr0>{d1 zmg)_HMp3jd>6sWzpt7=(sjwHWt1pr|%AduKxfepdpLSWY<*Lc&IS9j0dRl-FQxomk z2yHim{A8pD#d!ogg{4F}!EU&9&%grGT32${DiSTL_r&s~CGMLT!~gDuYXI1MqCJSC zhl7^%(L&u;Z)tZohfD(=f}u+7YY_L^f8~pF?|9EudXnMk09_KjPkQK#!RpfYHhOYnJ2z;)c7A zpFBxkzehxL?P|i8z?A3PdMV5R>r)5ds=Z>8lF^+uvw+vcmG;V$S^ft*Oimv@PQ&h9 zvpj){824V@h05(-Ab2!)^V0>9?$^77m4mU25@lq?@X0P}W>iSP9V(?Z>c~c$z5N|w z{2l%aZaW%|dmX#KRRNYXq`}i#S!r-`xd4vgdNh8u(amjalYKv$32I=qW44{$Z;GhYZ)d_bL+OBea0d>pQ={Hhrsvh(ofNw?{l$Z`9O?3T} z9-XP7NzU=^PR{nQ{?;^>+$OWb;~R(Rc>d;ju*$f5!<$)7rG=tz!?Fhf1iIUxxJjo_ zBY$>7hIwW0%%9Y=@bw{UFdm}h!jE$yxbv^|z2TOs+@Rl_=vWO$DGtBF^;;Wq;Q;0`@C{THQxCvCHk4erz`YpK zH*P5}!M&Jydz}j4v9NuOu{TpxQX00Ue7D~b(l_V6=l@&VQn6siMjJoB6-xHs`c*eb{xFQSC!*JU9cDPli*6 z8C#T-H4s>apK5}BXvzNh(}~iF^Bi%|m96!Tc-Q7z!@@nl zhiI@DbC=DbiL>+%4;;SW>n=)*v9;A70t-1RU-_6junyfHDSOQg$^tmrbY1WF)?wty2|&q>-Qj zM)Nne9F@Y)xY!tK>VW*acj)zEUlP`OHmL}fPyInbgdY-OuuU2M4ElNzHb*IcniX1X z2nIM?@s*gg1!gb`1L5l-LfFR@%BUVqj0@P3o6m*5R7k6;TxVP95S8CsTORCPuC}UCS z?c2J?UfPuA)8HRY2xHYhb}kF6s`xg&bubZl29Pp1CXCI^tpw45E7VTw3Bm)hWe2Jt z+~cx8{s99o>lKN!pKA#Ob)R>D{&(Zv7?l7tNx}$%s@x((lOMR7Rr^AxnDAXs@&QHV z_3`3GK<~5}6)gNLkfx)1y$C@A7_<3pX_i`Xm4*ifq~Zlm0g-zwcQL3YW1NYedqsLh z2nDFI@`~Mp4!_vUG~O`RLn}sgP^?qvVHVFg0!_9fSDURI$!hs{GGSJW@FV7s* zV%fDZCji+`i%;sthHALY>QI5P#Uq7qaByg4wshP7`i$HHh{ri>Jzgw8;KwMKQ7;>D zwrO_&gXNQC3F0ZacwwU)!w8HLL1_QX1WUUG+O8zMvX2Q1BU+&i`+NzTThJ&*WT4XH zT^(BjXgY2$P6A;4i5VTg?0tX!y%C^CxQ)!sk-NZ(ndQN@8>qd4*fUGrv$+eP`}bZ5 zwgaO_cnb|XyJKht^H(x6H_;iN=RgIR>Fq%te@)VZAY6|32Xk^HUDroS*@Xt5`mh5D z8OvMoTvO$3j5o;xn?2PCA>pZYumaYM=aQO7gz5vM%&liHXYj6>R{`quemw)}4{ZLV z>}fkiMFyQ@Q5R2c&|?=miNuU94}lAs7iC0q$)rn1yW`FL&~~)8S9Wo!PUS7H(8G8R z-%|a&IeRbK>&@(sIzA0=xSp^H>CI7?Fg+nc*kk&-+Z+39*I49&x%9PqZXtV>=y)*+ zmyAi8ZH#3Ar3l45`?cAK)@4w~P-t_M4osWN^7eatX0sqbZ7so`2@EP^ucGc-W3T0U z5g!dM=cRzubPv_?UWtT9H$&iGLCf$p!2-cUTih)gOvJ;SCv9|NVBZxXM9d+FZeD}) zhhHQLu?p9F6j~2B)4YTlJ6U&r$UD~>_c48I;bkl6_FeA`s)JtI5!B$rTj&jo;MfoM zfIF|34)>U(fQpmk9hSFJMpL_yhH+bjYi~W_DTV<$xHjDSaS{_qg296U1zGKxFeX)l z#{=f|fh2u=W_0uH9#LK$W?AnSV?S*BF$>^v`cVl|4z}o-Oi+xX$O&HIjKcg)^rTUM zv@am&qIphE#!np4QIuLB{0cabu-tzR89~z?`IUsL!EjxY&gOkfbHZeWFZruDpWC${ z#w4}d&N5{p6todR@L7b|LRmNpt0{E^H<4Sav{Oma#NMU2B0ZfH`l07L6o5u1}S0 zW6y^92d3aZLFnj1NL5Qdl!)fz-s0?o00pdH(4-uQJA1Hz%%<0o;1&AuSRw7_Dbp9G z2?@!ER-pTzZxZcAQQFl5`UwaIrs-?Fv(V{GC!s5rp7E2gY5M`UF4vj9I*I$6dn5vE zrb{d#b6ycCLhcXa>_7L3EfNjk>uKP1YKc&$2_7@GGU;73W7JRL@R8j1E8x2g-vQ;Z zN`*Sq{y%@^yTzKl5Q+6-*+>jT{-6ql9UcbkCRjfDd9HsG=V1ezYFd+lhMH-<|4DENU#gbnIg=4RP@5HAI|vl?AUWxeO4Iw;{L zs<0u;kAB;_)}aIzubdQY`!BG)e-FcXd|zbm05 z@WV|yNR8ir#(E>G_Z!{ihj;xB;dt-{U0xV3+m&CE4kNIMz;74VMCdRPykP60fQvaT zaPVrGck-}H{lmD(n5(F0Gib|{V`zw}3JH6<;N~nV3Q4`|Yh`dV@ezh24Wph_YCgMl z_hFy1HsSSjcCIPa$s?jb$3*&9r_R&2G;wyuZ2F}aq*WrnDV*GCzC*`ENNPtvi2Ls~I#aPWKFt-9NYD1Esl!5%D{}PZgSAMI zuw(0ZVT&tYU6L_*L9iW9(poh2g!;ipQs0%-WLk`8(39Od7L1rIo8iM1)$j@%I>h3S zm-m7^YCWA#JC}s4XA#f+DEZOoO~IgXWbmk}+PuqL#JQ7#9T(wFw&tnHfEEwp@+hk2 z3-)Mi!$%<(p#$Oth;pbOg{yQWd`?5PjU~&IRq7hXNT0`9Ap!-RxtQFnO&89A>@{>2 zRcUwGhnx}Z*uYwRX{}2UHkU4$4`j%dF5Y;*O`S4zMnWCv@oDTw0fN9cKp*lRt7gS4 zM>S6eHPs#FLzQ3r0JO`e`R7OZo)zsnknoUj)kiu`$CA^uzQ7((t1)|A>vFe*F1+Oh zln%eo~V zcdeuwl>90ll$aTj5w^xyTIfNfl_NJ^hFGZ7ej=dMm5}+QP=>hH3oi-FU{y?hTk(2p zD)e9>lFHiV>+%?~LH zcLYWfy%T+lOGnYNf8`BZx8!W@d)y~=lwr*GtE9}RAN#I6>QSWrB2zP)s>F9G{Ys}p zcX>)ORSRtW*|$$L-vLK!fyc+-xTRa!b0qo|HmDBJ001&j^v=Q{C}j)V10)p<)v zS7ab_Tw$3n4XIDk1WD%610RPuLKZV!=aFJ$;H4Y92}D^n`x8#fR0*&&xgGw~ET!z0 z{#rx%Tfly#3x0C_HrZFzsug!g-tms0Q}=Mq_9u*c^`fL^`JVszQrm~B*xe7c$KxlN zW9`Vm+7W5Mr}>UF&0?g$PuM9mJRCk&4$Aahn{F=PJ)3H}m`?xfG4qbu7-86pPhnm8 z__-K+uZX0g%!F$jbRkcD=eP`XEVz#ZPxllCd*`0{Ei|qGT9>&$%hLS#E*SSKneQ*!->AO=)tZ6*$k$AH0P(vBM z0`pJeGw2t(V~n?H;qCXm=Xxf~f*Y0n)J19^yS4ry6W73erP=HS7!C-*)RB|Vma*V% zeRP?G(&D?IZ6!#QM+U6dOeUP<*`-!YAf&Gr?-{hH-H!9I&^SbrU|NOT~=i9}%Z5M`5enq0aSI=g;%a7vrt2_>(CQ1+sSAexF za-tjoARw=fM5@alJpFX(@erQ@D+cyvYNFV}Vtds{r-Wis{CmL7I@St3?J+H+pMnQ7 zi#R>ZAGdL-V*eu1ITEK5yAlKv4eyJGeldr+I%U=X9&67N$qhcCi3DMl( zQQsugHoWMrVO>W5LD3se{-sJ!&t*Oe$A%{Iaeer$Yy%eQnF8$wwWqN69iQ&?Rh)iC z2A)H>4T#*z9=wi)$injE%IHT>U++A3YLCCzdtC@=rA3GJZ=5X*9Zt&b_d2BPx?q-8 z0t&+fnJ;V2k)0pw?THv*Q|z>~_y zuGU+_ueC{mUKZ!e@kfaWQ`$kJd@6LwUTp2rlexqA^Fu1v{Cz+8+%Stkfhx_&5OQ4y zPFH4=&6fGuyTn|#-n9AuVe6~IqWZqCO+-Yc!$Fh|=@>$g9=cmfItK)$yK|71Mmj}u zXjHmeY6uY|q#KcV&*0}QFnfBRb2+m>403UOI{ zeirQz`_{!BK9jaJzm6Qay{2fm$@m+B=NPuGA+wyQNp(y1qW!y{b-5oVmN9c=!-zw= znct#FHv2`HNo}ak#i+6$%|bbjsol=lPPp8xLv#)-$9N^Zc{Ypir#cGES5@8{97%up zgvI@$E_=iyDn_;=Dkya32XYu|n~1*?PBrSp8_895tXKi(a_;sx6Wz?8Z;_G?X5GZM zF%}I*wZVfHyX|VggawT#CJ@qq*m(^*6O_?CNAbx+Pn3naORuy!!B>Wx%7b5&dXP=D zI5G2kOG}_l-|Z`OFRg0x!;k$2OYC{+6%$C3fPy2o!Cm6-KHH(pX@<)T)gAL}}VE z^ZIyclNPFrS($=howC9Lz6b$?vnHJGlmxczZ%2BeBS}OHF`i1LQKud5Z%lh+NiF?$!Q_m8D5MJ*oCc=FxZ#z zm>nV~n+U(z#A~*(g=;urqO{3N?c=`{sB&|2t1_Zm50KF!jjS-z=;e=cyCRGsx25pb zNH@I}L7{xXEU7_5{z*N&s{0Q`PSiklppHA_g=5;v-qfxK} zMz(fwpey5f3vvXT2mP=FcQ-kAlz{`V`EoCX;X|L{rbZDZE`Vp0)mz>Iv=c zw9tv5;#ubXkDB^#?uW{`(U4UT+hi-gM}91L?03ezyql#z(?Zr-MizD`J^r$Wozv#0 z%|-9?Hf&;}cA9$)eyIk+i}tD!w|ERyUWaU-|Bj)|Po-2H9hAr%(3r}4h4XW(-WoCf`qi!sZs`*14b z8^y$}KN~)1y(Wt-nT5W@O#ke!#i(LK?u>y-oN3`gMt@BWEm~qz`5Z3Y(AD;c{yZsTU9WQ{=>Umq^9ima+5M_MY>-u9tAOiCj_44@xbNyP z+pi%w)vKQ#?qm-Ma@niK;l^VwGd1p38Tj(o3Xho|ZXt1pX2y%L<>2Yuic6cv7UC}~ zYaL(MG#VWFOxaMaA;qr^N9XJ>h&)LlzdccX<};}neC{)Jv-u|8qWcmDXS@mI4ipVM z^VOyb-&45|E*>lH^0^9UYv}#11xv9l-oM;&KB`~F&v@F7ch{0@_NZ9>6YA5|8wLRj1AZZwbvT%B`Dbg9V)A`aSSa`>5VY$5$!uS0)N(?)o}o3WAX zb%B;P*>>h~6YTk@9P^s*K_%xUc3plikTD-nun*D;i#iy+qug<<1cA-Q5&dg> zTD~u)GP++l-5WH0!c0zGg}|1so7mIRGx_kQ8AA1fpWs^Oq@4T^Avh=!`>^p;Q z$)9GPW&=L;F6aXru$O>TfP1CT^5xO0o-;wLg~3>E!Rh4PVrUEQ(g+q?tQm<57~;iFei)h$bab zwnYXuMAk01>afozsM@YMeKHxR!@_r^sI=73Q}6m!CoS zJ^fJSdfzB++1fS(MpS}0EG0yAqzTT2Qp-lce(?XM-uX^`P4*auoUk)L668G2W-N0d z5gdb&uKHvC-Z~NW^%IO)LZ4n(L1v^D#IA6%tI1d>M zSh6Xa1S&nwj~nzh7)Lda=lqTF0$zpq@h+wery5~1cz;oQq#1o5jf0v{ECB%GY+(Cr zfc30})nn^Th8TnXez&5yyNY+c!&LmX%k%da7u%QVuJ6yJXRMLs>p^4B)_;K=%66lk zx!T6~VT+w<779;vU#?U57iHyqxPB;+IW{~yiIrm~*VG)t!G%BNxaS?U8C>%EYxQx5 zJZ!3TyfA*R^Z_}xN)+vLbBt#)OqrH#VE$c-$ZRFNE^MP$VfD+B+X2TnhYk@CAl%h|A+Axk?14%<7=a?ysBQ;m|Mmc zUo$7*e6%4itFDeG=z7((yGCTOw(i2g0d?+Hv){x6k)LP?PIJ!EJx%KA%yj?UmnZKH z`>qv_6@5sJ@MK0`(GeYc@5uSq_Qlqx3!qjr^za-_B4Qm1?13(?+3x>Fm8fY#F<8=r44@ zr&3z~i}Av9-ud%S$%-C7!a!LBoxKJ_OLq^}FHc$FwAFChlp6SfU^b{* z0B=Heg{hl{U(Z~NQ&OhJzHwC5dr+Kx~$1!CU>k7l-Dinwvj4WOGNYgFOlq= z|NWsqu;$CH4_1dYzr<@!r?Xw0W*#yxteIOqE3Myp$q`A$S21Bj75*Vf(8kK>8u`a#5hC}iS%Dgze8J`;61 zEp+C&=dmjG1YKTt{wWwct*xSOTOQ`p@mjDmA>kX*LRa&XctXN*c*Kf9C*J6?g(@#- z3l;?D=~akx#R9OQg5&@vrtpu-?4dgY2IujLn#}Li7V~8cT>cDR^!?uHe1=YY_ugpE zHMH>pN->gQ@E&oW`A3*aR7CH4W}3jKoxt$AKjn`=r=G!2)7)7NNeTi}{3-@rWVtzd zQvZt#5d}i;&!DLA`KS`ZVFhj?7^whBgi*CI)a%0P_0pd&18O zJwyTP2S>+4p5|Qy#I|-JZ0lrIQpm0IvGdrsZ@7~oYm22IHCEEmF%FA*1q**207$nS z#crKPn?%JVO+(Ro*^laGOI4nt-yM{m{`=i=3QmAWpWRf&?S)$4B$p``eDMvyV4u+Q zb!PB<#_oLF?)-fI@;n5Wiir7TfR&B4wXtRru;8}WWJST2ZQsWyB+|Dr!=8U((mxQ@ zL`hw+J*;`1ZIC5A)j$a-|MMic7D8YqS?t!l zMudQA)7LQygcJ2ZF$_>Tk2j`9hlju3vdVg-$9>ccKj0=)MwAj*7ZnT8xJ3isXbp}4 zhGvay^z{7C!|iH^o<52s58OE(W?#71j6|gRcm+IoE-1i{poRolUczj z(C#7^aM>A1n@M54N5u3YD=dsK?VM;_!c5A_c?MQ-2}29;3(l50Ge3-RCGcW64?xaH*ea_aL`bZ zx;2Z0>pV=**4CCz@;LfGL>}yeXMpuYZvwfx@vGU>1-+O|I25XM6w7csZyUdV69ugZ zS8$hfKj@e#SPyub(% zRGuYoM7Gt}*XOw3n}jX**@EH;AVF~jy$!=+Ro(zjJ;2(!^E241ynYqt)3bRj5kAvk z(yfi6EBf?_t`%QVK*TcV&Ao7Cp!F4PHa0%aaXw`yZMy(CPK9xyBTOva&-W&c-wi|U zK+%M7U11N?;PP~$8$)d@N&Kju2-F`)-IW7 zcZ21mBGvO~Qx(((c!rw-;0}$Zy+k8v1*6cCdExgj_PVH!M*EAh)jAO#Wv(W&O9mW5 z2SZ|Ut`mCB_Hs=X2+ymLMt`e7aK3Z;uzz~8-f`|8WR_}Y96=O=8%B)u8+ROt>rqKYE|G-ylyd?* z5WI5B5QzuP`)S(&=RR)0d)T~Zs|kn?2H%GenSIdj9U2>JV@Mu(^7REX;JiQBf5Uw+ zRy$zBeJT?d`CPU0`Th8{siB`P-K?H#vgtQcDfxe=ikr?kq2T{}<97yAFPb)WPAjaa zyUKsGm0!2~7p}iu({G=$%0rZ>#(KH$4|E~dp1yuJI#p?oZ;J#{toQDt z=le!cVd_xDEMPrQ$MEgWO+tSN+WQf`g{3~S4H1?F0RMYc3`6TGWNz;5Zx%26nN#kv zj*;NLNBafOi()^naY?G8`@JTbbempHmx9}n1x~dTfG`2?y4arO(;xmzNyjk7Pdn#=(AwHowA0+4YA6xi;gIO$u5Xs zzM_V(B&y~K#XiBbz9cJ(XV`~XI#Ar!qEoN~Em~LE>80rw(+m@d;Zj)tPlEL=*PzIw z2ky7F9D0Ma4!Y!vOLKDC?AWQshlZl^0%>am#c5%1d&H_R)^X; z)M=9|c||00Zm>Y-Mb^uj@^_SfZwKtR_Se|1*(?HL=1nvx^i&Od_xr;e{ z&{H|BHlq9i(Dx@x+54bAfW!NiO?V%J8FPPACR`lXAwnPw`4TEg)Kj>+`DS)40g)c+ z)L7o#HCLX;G+GsPmEy(yRmXVvJzZ1@vw&hO8pO$5c= zv$M7LZ@q#IM$R3I7R)ABab8=Oq*Tox9Rujn-rHfxmgRT&#j>1LvLNdI=TV>%?@5%| z8lO#g=-aRtY97(7#K`F4Y;SQgF3r~bEk@7(}knhVsrcF?DbL#y#;)?s$7Ekc-4imVz7pwmaam=ICoj(pL5*g3 zVYgu^lP6oxX!D1Xm93|GC$mr-lO0P$M1}p^93vK)w9(&^=gYAZ1Jj3y+P{}>}r!?8lZQDO#uw`qb7gsT4=q;JRwnRY>aD42<}Nifw23YOsC_X_eD`Jr6HP_rMJ>;ZszR`i4OOA@njnn=G%AiMC( z@M;ULK%@=L4L5s@cGw!|%4pwn=a=mLJ%!P1yQ&myW|dgK9)tO<-N@TU&V~W%`{+|! zI$g7nW3sy#s*Aq!S38fk*&MBSBK*mQL(h@Lk@Jw~*}pL~jHult`TGhFs5YbP8~Ifc z0*@FN;KmNe%Z8AzKd@H%lJITgTh))=DX3=ODLu_&_pvtH64VD?Av;fuPP@SQjdIx;`Wm#HcXj#Un{GB)Sc4%f zT&#;h1Az}<+vT0)5Kc2}O)Bk0cIVR4r|7{A0QLuOtaM%qfx{|+)K~&Lua}`Qy*ldm zbTi(MphHMk)K@5N4IARR_$%$$RsONW-zB97QhGsfCS$%)RL9a*e>b$kAL01mW$ch9 zr7#}`t#*FlFr1@o_ntLt3(F@>9&9v%n}&9kP2!{3fzCi@bC}6hk>#xD zj2pAkH{YS7h(3~>@b9f83W2CnLWsgZ?gx&i8HPE~aV@nAq2=^9Zngtv?R~L_cpjs8 zJ3gIU$I00xMMYRO3P&AJKPF0nuCH#3AIvEnDGZgP)6>)RYNH29z8d@HcL~h_Av~E# zA>&1w_!Uz0>Z=X0omAv&Fo6ZQ292feW8JM83f-0&Zj$TS`gM_K`-n-)a$vYek~^h6K^v6{SoW$?mQ1d8oczKpVVFVgJ-KDQR0Gb~rJ>^umIU^yC4O^Cq7_nk&1} z-QnctSi&cUMfH1~4K)+r>9&A0DgAI?O*ZX$Yf=T-US$z2fAx@hPYyk(*kA2KS}%9i zJ_!Ro$b2w<=rB&g2J(l^|6Xg(VK}q=-@Nv$GZN#6`}1Z-NYulwV{h36CbSpwd?*bd z1TF)9j|n6kq>dmI0p~OM6iCXEA8$WBiN^jLs!M%Z0K%tEvR}Zdn1sSzHZ&zvNfaGc zjpw-2lJ}1K6ZNV*hZb)jlADW*?R(wiBizEcFFn(qIVf$7kfXAr8uYpkxgH4!!P&1y ztU(J(_N2n!xSZgtl_wP}f^D-MX$E8xZK69FWZ8l>EY434sVlhND1Hk+%ofIeB}%UG zo#J7{^LO0XCDkWAUU10TZx94LLzZKr7X+C7T3qOR2a!8qJ%0I`*>om!1gIFXhyl62 z!`rA3lLLZg!ntZ|;^pYK$V{qnbRl9KZj#;jr(XCHrS8YopfBigQg*dLp;?1jpm>Ge zIB%E-_@niSx%yo}&`&QF`wmPbPP4wJkWj#Q2QDAjn(j;baWsAoKv#YEY#l_#-zFw1 zaj0=U#N`2rC%S9gihu(Il2}VDkq^muqeh^Q=%)O zKUeRt&ipY%Hyp&KjwsWsxh3+drVnT)$*DSeA?<;ArC1O)DYw;Fjt>ai2TIYFh@b%? z1N(a1UlKJSVwSJ|la?iNm|{JM+*HLf{SqBb5uh_O&%VIl*k(!3BG14vT~&-vEB7?| zds~4plZhM30*Bvx0bxjagPk!cst@Ez?Dtrh`uwHtoAbo_N$wM9Rhr!om6B8Zx+Ot4 z?5j+EO3`C%n1c?yOeaB`Mn2Pb<9sQYg0?WaGuA7dJF(cZXaEHrNt)IFsYfE{R%NCl z3D!~+;HEOc(9;t<>@u}l#>!VjS`(YL%n1k*!N|MU-X>WzV@o>&^u)9`LBe5(W0Jte zZyVG<%tSm0{KTo!5Zk=8lWa}!gS9!0itP{mStw*WRq{I^wo`>I&I*dtTKO@>0fu{x zeZOgP>+AWO;4{sh4e??p)PYYG^$gs|ogRxfCS!@0?rwT@0|r?DpEq8 zr*CCY7EY|Ef)=s0f`e!}XYO@Fa-#6Y5Ad9w9U}3MfpK{S{LUz#!aNpu{nkr|P#{t1PV=rSX=HDN-O5pDU=6*qT&+~Frd9$~s&FK@bSvQEka9chHC#}^&U&i|LMB?r zMk14{IFl;d>@?V}f58Z-KpA)&GUc?Y58gytLpeLMw`7or=BnIQxi>Y4ZuJ#Ah_Mzt zR(8Xybyw`6p!kHg+4xuLz_dUG-+!&;f6<72MLE5_QMFL4&r~8@6@{{u2Y@()#P@!)?>43 z#9HyzdD2(_`<}vY22ZKw{B*aX+J8P0y>nbKm%n0LoPFmU6u{4|?D`POJfma*nZn^q zCf@j?8m#IGj!l2QjUw5GRg(X5%PWY5h+=hh$s4$DP2!$wCuoo$5ees7I5 z%VN((5n_m;Fw<{rP#8lNUBbF06EY7xjhWh0dWiVVpv;57`W8@42aPk|`-F;Nj@UFj z``2~NJz(ii7euLc!Fm9%Fnj=w!+;?{U(prLinQm%SqgD8e#!EyUxh6q{bP1hD;~RN z2y=}wEm`OV;G-u?Jjkm;fW9lI=e=j;DKdWWDw(fDVq8%CpUE&6g?4VF{f)F4twglsXVL5U&Xg8gt*k4`x;bV)&TuD(1+4o?12x@#7ab z`MQ*p%l$TuCUcV!pTmd$?=>>9T09t7h)b-dhwQ#{;d$e|m5<}Z@;RiDlXw}4>reWT zb2UmTB6GIai%O64L}4^&LNw_X+ceP#uJ{@Ivl(u((wV!zP!L*K>oDW0O)65FQ)QrqK4)|ITVR zjj8Ef*AQB*#+^V^(8H8JXJiU1gK53Ya^=d~S6Cj-O)5+RyVY>L>9$RaR@+zol6Y{0 z6tsAk*$5}lRWof0ikIQSXkL(207t{%Xuh%Wc5@T=v-KDQl=D`X(qWX2?Zxh(r|o{8 zysOY+tfE|YR^YwmH#3Tcz3qYx&Xq?)Kj9sxj_a$^az>fh9=?hg?)RT?1jK#v*bOJ! zMm1=j|IMhdu}^5aoajMLCqY%=^#kPoq3j#Uz+6zf99O7|Bf8JoOiy+hAn1;;M83#7 z)JJUFUto-Rrjpx)?$V=CroLSi0@WW zjO3>v)m9d%X)cZ2#Gy`%&ZLroVy?$j?~fQ&TE)}PnPPN`soy^uk%}k0Si-$HIre^M zGnyxt3D1!niq+K{oqm+7g^B=WBtxuuPE8H92rv!FeUXpv8>u;O&q*j8P|gA~w5rUS zxt1n9I90ZC#V)$=ad|O2e?~~U)x53{xvu#!%*NOu7r>4yELc^l-t(^6g1LG1J*qJ& zGW>R=c6SJO$(6@K@jGKrD_5hhw%A)#-ev_L${hGQRk3(4+k0dX28>GKXtz=z{ck=O zp5~zOO>@d|O5t^S?hURfS-=SQAd94{q>`*vE5ua71{NizFn+Zl%A^JCYHb511C)i@g}La)-=13Vax zQ^Tpuu`zAS(HMZ`M)gNepW7o8%s7I* zg9Lm-BIpQF9d@7pB)~~y{mYt@=SDL1o~I53UjCVn>y9lP5NVt8zS!k~wScYen>4fI zuc7Ol)%LAH#OaM-9t`^;005HFXO6+9jC4<}-I$`mOq+aF*% z341ISjBSQ&8|~J?xE>ADrlPVqZ&Tc`*KTVL4 zED>hz9H_i_0}PD}ybslfp&a+UZcGWc2RKkYhjO$WR@J>INBm@!_((*YlmWwcS@pfc zCHgvx>0^CXX5lYM*UL2L6D_Y%-}67~iWF_mUV;dC_TN$JAVI<2&~Bp&UPDI$j7@g4 zBn%<(DRK?N`56S9$uyO}9z4l>CP>WiB?W357u++KG6~~jnt1{lZ9CMGz%cM!%gut5 zaax(^mf6kgd-`p~z^Mn5k=S|q6Q2#{YXd_>VR`atF=?4L#sv0}aprgXlaENp7?qQw zjuQG)EPG=I(l~>0$;jXVRB%dm%g-lgZ>sz!^u0nw4vGoViHZ_M$&t}Hc3G;Hv2}ee zJ6k9__FAi-J=w?bzGi}W-Kty;Wb)@W8UqmsLh8ANMbV@QhVFoM{%#YxW+h#;#q!*)BIc$_ri((Ubv;R&3rmEMHXge?-OCX9m3JSJJv1xOo4E}xIlXR`uQ(LFsHAkHuz=( z=}&5~Gj_0c+Scgp*bMoJiWlNGSf!#|_J*HutSY zf2Wt31-^xQswyS4c|9rx&}ew}e@(fL(NOr}AP@#Ken9#q=&3J%p*rPO!yj8x{@WG9 z8tkh}n0ykn6@G`4iL85(mB&DPM5-odCqyMP`(=oY@zOp?(GDv-*<$pVNXR-mh~Av-0ioHPyAZ;r@MQjDoa zJk0{u7A4M@8bo02w3p|fE^F&b1r+SR_YiZQxteG^nmy<+(*^AL)+QzCCLWIcY0` z8y7u}Pf|#Bsz~qn@9A9Fie`z46;G5Fy1XvxsfnUX*+`Izu_>3q{-xBg@p$sH&{24f z37ZI@`IYaxP7S2=?pJq@Ir6)Jq}Ke%+z3*jN7qw0sU+rv7phO-i>EaHuuq;(@# zjq4k|M1Sgox1@JD5;Ii@%uXYJ<~4}O28`QQkf++@#Jd3YbTC& zVpf-Te02VSFu!N!L^bkuPO^N&%TQ5}Nr$4t5vwLiWVGR|jV^^%c1 z5umD9I%397i%7}A%Y1XflqSz%?tZxO^;M!s7kZEr`HV62;;XzCt2G_jwL2W%R3KdF z`=dNICmo{?^jQjKq08op)rciGYZ<1;DBKa|chvGD0W{b7a-IkNxZ}z+`u&44SBo|Z z6HvMs4_hOnTv3VcmTHk^)C?loKYyUUJr=Sh(}Q218dB4yVdnKNQX-LT*1_Pc{NHg; zir_31xgA~Z9wW~C)NdmhY9;R8dU}|uj!8Zu^V5Jjr>ivgWyk=QNzwr9i}BIi>o~Qi zq3ctiT`yXtEt+D0r5}s~!cc5>pRZ`T-Y9r=b31F2g7q6&_dF zG24XcMf!JL#^G|g`{#MDwkR2cs@VglG5@|% z9%!h(dZP{k;8+j-2EXr32Bw`}8r%k}X#B?$tuEJaW)8NQ8w{l&hf)cU-qg2`?p?e?cdDI(bAFE&oj zH=O_aVyU_ld0_de#LYFg%7AX_t+%A2a@~)(&FQqH;zgaBuu^+abf5S70|(u=60&t3 zYkPpoQ8e3;$Zl-Jfo4oBk^!fy7Ks#eRD^!c(RB?I*SKu5OGy@MZeN`ya7cUMXuifr zz1Yj=4jUH3+)9pvLYHUHw;Qutf$bI$EV>v-doJ;Y@9ip1%`gx!!}|O)7}VgCr_m~s z!Txt1+8#*wTkRjsUtYZ52l`t2B*rKUaPkVYTBtktzuLjjSN_*$1;Q8mb$CE0RoZ2P z*CK4<4Wz+w9qD!M_L#+>nd~|bpkzX;5x@=SHz-AS!)@~5C2V$}iFfI2`}1Wp+uu#l zH7FDNM^n((@An(CI(RxrMlmL#(@%AEe~jls#L8Mc0WmFGi(7u z3mM-N^A*yJ!dP@`U!kus?|vDa0EqxWd(+$BDtnrlN>r8QK>i2#*dK1sXGE<6p#~rn zmUGI&vKVc(gyTEep?3V3peQv8nIMIC)HlHcurR2-cMSON95qmyoNP2WH zIRD)@nm|>hNov3wbl<(@>yNp5;QM`|V+YC!0DR@y_ktlFwM&GCFhZ)%76GPHoAgfX zZI1+aq8GP1bB=h7@gz`CGd3_OP?H$7wp0zBgjrjsKGZ{k20uR5%o(k5-EX9z!=V_4 zZ3?>V_`s4_KcC^X!~DCLFvNkd=&bX+#2^@vwPF2d;PVtQL|OwWBz`+K-kP|-d)&+V z+2dp(co(krs$?FTb(@b`KOa$&gQ)fR{qw!$tr-~b5#i?GJNR(PD}IkwXbHzZNw;aa zQMm&*RtmCDf~PUBQmWie)4L<2YIya81zG|0kUxLV24fHwG~s{BsqBXkl39{cf=nS3VWy-wJ-_C?{G0y2J4 zewDNF5^a18=5j>UT?VP<#n-?@r!A!Iecw_zYjc;Pk#J+W7mF;WgL$f4zc>S5$I?pI zizU5UAt$BGev4b90_}iIL`@`p4SV-ahUXC}T@*x0ynuX!qQMA4!!CX28YSKAO(N zpJzVqJ+s|xeLTC^nSISqc$#*i+)50MEF%h*q_JeiLE&~(hG1ee*R&OMSqgB#EPx%d zVLHoJn4ddB966iZ4k<};cR?SuQ?70H-;D>QMQ~d>GA?aFo=#a;9wNont{FhX^n`Zl zRTU@-;)li$Qj`I2w(kJ&C1%nR77KjDV>205gLu!XN84j$;zv1VH~}~Il6z41Mk4`- z3r`+bz@W|{(2G*%G*r}8sGpOA5E!{-JxL_Px?{ODT5z|M zMZUwYz5Zi?Xc}gF^Cmqo3!YhxAM1k>8@0e88F*mBLMl%}@i5YO9XT$Yxl@Qbv-IM| z(QKvzb5sz8It>BBo>H#;usC;+S-g}fHJD-$2?aO5(Ny2^?~jlc{we zqiZtD8To{(Xms9F(=q8F;}kVswed+&YUmA-mic4Fzp*f}p|aw5vu1^47ovR>M?M?M z>$E{~ej+}7ijCQd6Y`T>bZGKtP&LQF2aR4Td@GJ_t{Z*Ex@nFA5g#sI0W&!;wN(&} z^=25g=F>A)VsNVJF+KzU!l7430cWkP@$z<8d3F-6OJVp(%O{%i5~k95=#Tce5BVly zDz~e9NW_Sp9+nsMi5S)9&>K#|)`Yys&%trA4je?-x@x_`~JeU}Lul?9ex^P;c z6;JQ(wL))DJTb=WA0aaF1l+IKp|}cvhxeqQh!WX{_llg)s3fTOLScqTyd|Fs z@sOyJ90Z%bCBD9rS#*iuEXFe>wZf>HttC;s%lJ2t+s6@OV!sQevgAyA;fsJ18|T}F zm2C3bEEFq7fR?VUf6NZzYOMIVEeoEFj=~aqCZSp1R>iX3mPj8uJ!CF!O^ugep5nTE>qeK^bcZ-M))sT@g2mNIjT}an`?5WZv1oG|F6@uq z2W=xfr2H-1FMt>c?BVm$p#-@zu$tc{?-5Nc1|nbmRN!s9l?59&cxGKuja24gb6Aps zPd*N$!gt{*y+^Z#NOEn}D|(%SrNjbC2jz$Z;=fy1VJnpJ@ZoU6t(^)p{#1TP#^5%W zs+(5d1K`-%l3JqAg(0Rk+0CYsYqB)8SZOw20fl0mGcWa(R(l&XfhUnxIQPyuv0#AV zfs!4&G2{>~{FFGnI+j0i20}=|XW&+DiHxq2iKE=kkA2hA68*^rf?>>c{esAa22-mC*xItFI5!luN=9A3`?LgJdnba5}<7ZZWuO568lgU<4MTpLo zuSc0O9R!F`qoT>WU4GRPDVtftsfgo~;_;WbpDT7^S!^Y^hHNO0lg7iTL>QHy+v}U} zclf+i8^G>mzJb`Yh$c$z1UL)&$f5p`h5s?Hm0C=R!KLVzRMVO`Gk8&Yw3tz*30X_~ zfDP4+NGJIdb#9c_V4{PcfQvk;3o{f>eItV0$)_6yL!^=>mh;1!VnWON`r@?Fqj5r6(xe@QKC3B)5EI@XV4I8QELL3~{l){?MA<372H z4OG~q(ntv?xC)UCL?kD2uOLt?8NyYJo4sA-&yf`jNm!0fLI3RaQZBJM+jseW1HV*F zZ3IG1aV+oL5->$|P3t^B2Pw~mHOw3!u6jlCnn%vp;8#x>jO!wR^s1N0gk&3gddOSq zmZv&?T^)So;AFEAXq-30<@%;SnVMV{T9Z8qy8CY_mVd2!wP@M*Qe{W^PcEB6Ow!4m z`?kbGOScHB&^*(Y2ItMZVW13L;M*lQMAhXHVLH+F)!xhqq8uwh^ie~@9mHsc5D+CZ zIZ8eMP{yBTldHF%C7yLaFBa=MiX3~IsqN+AXT~2YV?6*-_9`)arzUCYhPT{7{z4@o zYVbAJRXl0~(e5tp&+bhtN%#Gow)3UJ zdFXPdO&z=~o1r%*dC~qx_7pg!!RZb_5@SP@RD{$kO@%ZnGtn>!1tWRh3@q+c?S-(j zg!kXh8dq=Wr{gbIq!}pl+zOH8_Z<#{Wpx@piuLL@G2e&z4=!B>)3-;a_1#>58^-i^ z!NId-xH?#9ru7~Tl8iGE$@}ZCazt{Ff*yDu@$gU4)@{nXl5;Ux-#C!CK%sTFt}*-G z;tsMp8Q%A93gr6%Fo@QVY&(X`E~fvzOc^Sp=o6Ke=)iBfJPvrMz?J90o;Bh5z}*AA zz_S@F<8SM5wOiic;wM3U@F?RLSYV1 zewLZeT=Uh+w~F9`9b`RNCyT`_Q$U3nf3$=YR*}4+L?uRmRTmoXKxl)ICnb9nM=^32 zaE(VZ;l_|x=!jt$x!Wx|G!9DgPCzeIE^2FV4P%m=V-rw5=aVuijy4tvlLeO={g=o97Y zz1V?ZTq{Hrkebvdan)L$4;Z$yr|rc687sAvHrU6vwl9!~zOyEbFJ;(s1@uufyawX_dD##gq}erUVSGChz`B^Mb_x;KISha zpFG9TsstI8APkQfXNSTEV}8FqM%XSc-kt~T{B~-X^?py?ZB!mo$WGM#+vDO0X`8(K z;+8_bDrRQs`U|f~lFsKWM`c1}|0Q#l(7@C?!qqQ!aV`?sX-pPTY2O$e{`imo4YV5R zHy9cR2+QIndtjMNPHysfzH_jG$-Vxh`HAG#AO;#AAdp9UbQTmb29$Y6@eT>UYPyw_m+l_xDOciDS(Sk#(to^kauC{;r|M|yrRmi7_ z^+6%au;l-La&W=e9i}I7)!1B?`8+z+Zmt~V!KVa z9+_&7U*NlTxi?j#Hh-+k<&Pc_0@XV%`N5Sp_pE@gDbSBA8|&DUteb^5WT+|&sKd$L zb?&B4pV(GTwb?c3vglkn)ha;@{4cMDR-Uw+l@f%}|)z*J}6>ZgcVx_hzMGib~_ft804%H`tBbxuoLyYB7UNJj_k zV;I;dbw7J~_9t@#+B-0Tu6Npe#QD-Fv8w_^@cRK1pA0(t07X8D&vlR5`x1PuIO<;0 z{-*4+2QsyABIZXtwlkn_V@}!=%Ek3s7PP#8-+$7r!$X#*wsr=AY`M&Luo-k<1O3%b zyd6)V-1%m@$}*N#p5JBX>Ab7PHBaH@dR00_`$RxgUjMA^>3QCPfB+qEAz(7_B9HcW zN`Gt6{lzMf`u|L>G5KfugHR5fzGp?RXFKwimygTPXzYleA`m}RUFr2o{{%fq^!7xJ+9WZbY1Bop5C47ioGfEi$diCvl1 zHX6cDU4(YtxJAc-jndlNvG>|*0PQnktwc=u(AU@+>^XQL%*@PHoB9n-AKy}RvY0@8 z*cK8YBz7hzKdJ+QUy_WBs&*^P9z^=lq<>K!^xbqIcXetIr6`gk<{KD4 z$GGB##a7xe@M5beO1mG8kL)b8hK7qz8cvY}m9_FMe?*1o$K6*mF!N0@c4!O1)mC1j z3C1SH+j@5Z2Q)1(y6^5M_S6WySc$gzHX5QBq0aRKeDcYqY8Nz;loKYFM(0OHWuA)a z&Uc!H{m)gbw;;<43)fEjA?@B1JX~?c5M>*?=N#&qYG;p$AM?MJ+EUr#bRA865%Bnd zay4e1J&+9o1$9&PuWnddRQ9}|;M7@4(eK!^XRH{cO3S0(ya586M6%T4m78CDZ|dGr z6S^8daRP_oQoV+fYXoo9mZ(HRZh4tWM8lWxE$P*gqFH_YX}+=jli*y94hgE`6WIh@ z&fB2b?Wdrd%Ci*Sy$`q{-FiS(xG&bJzq-O-S`4OLw3M~g($Eb#0uuB}idGUY@cIf2 zEAD^m`dTaXl!9NtEG~i|fE|!wT~9lh9_b{bf@IOf?TX6us;FRZ(_}p{L8Ny0tjFp4#iW!h zyY`IEE>$6_5`ovvB!*kpX0kH1>fc(T=Z3ld{aS-YmrV6p zi3!URQ~k{X{&#UH9(1V0E~N(_?5?3m_J;p=q2ZxAlBbhn%@tP8`$r_p5k7~DZh+P+ zeOh_?1+!HBSM*LtjZUq&bms@Ks|_P>rwh2Y;NW;9FGyzd>?M@Y=TP?&b}S4h569N` zwc<|}u#A)&0-WfVd#g0R(}vVkXF*LDwgkB;gyso*I_9V-GCGnAK3zpY&p|1tF2iAy z-6A>_^}nm7*9hgZvXyoo{9qYeQ)dN&iQ&V;VGV8VP|sLcoIQTdUCzSg0ej+B@ufoR zFanBp{BO=b!7`ejD%NBr%ee7SnqR{>=?9KdVg9bxPs#PA$qtT(@7Tv6!+tql);D#5 z56!o`__P*6k$BNz9ww8O=86epd9Y%@MU#o{Kf;ax3m3=nO>Dk zLE#+NT;-m+5U2-I;TTJKiv8FoXzH4;ZIpe_Vkxa8pe&0ug^vf99<;#Du5b7P1k$rP zxr>xzYlXP1F0(}qMLKe+M}NOFvWI4B`PNIf4AS z1J^RKiI|(4`{1+JcB}PGc=d3&t7f;)fID=P^)6mQ4Y5i=$;erKpX7OO7$`@mDn{_BU?8`pe*rL_eVTf&~qYy7%ripsUqTQ&4b zA1c=%1l9L9pWYoG=x@2tqJ5t&LE3P_BSXk%@!AO?)_+9^v~`HQq}~OV+y?XvkSJoqw#E zANyyZVaqxndFqv%_hvxwc*a`o+bG}4q#iI2{0SUI-tvFUw-)NGtE*dWSpxo5z)St> zSCPZ+mHpu=Wr&Owq;qO&3K%5;tG+*f+^vOPyckCS&%8`?I9whvoZoeNTFEMruL4(|+N$9rKRIeVuRAn8SoF*^RS7mw>ojL4 zC$*2o#j3)jumU~JKc}Y~r9nX$+`tn@>zg!fY@n#2o7<^g!(J!OQxM^TFQ2MiDk(_D zy}{{(-C4=p-h1}lXP>h_F$(PjJewXK9sqAGwTlBfdd&cU9Vtg= z@8VK`pr{rP!Vl>R3&T)rY-osKHt*JdgHUAdQzfXcr`L|CssP3Z*qMd9U0vb~v||#Z zI!RWA^t<>Wtg$q^tdkI+Pij7TFO!?X&CLzUKwW)`|DOw&Ch2EM0s+2Vjm4VzmOE)` zfW)xQZ&T<#?4^*!M2>iDEZlAD?FHSLL?j5~bl`5}w0L1sT6t&jalw;z13AIN5ys{0q#QfnbOxDOubm zIhot#<>mWxPR%+GCnpKmHoRvD{J#z*G1jfP7~IwR1Tn^V)G*6-(o_{$gcN=zX9hz# z4EMF;50IICooY=rBx@-R=AlaG&W!dGbLs$9k_W_Nmi^)}_<$wOUO=dTo8a(ijDEr`c zx)9D1OASs*N>cv1rD)-lNkP^C9UTgdV=ZK5c|%G@riNT#pl8{q*2N8jDB+MFND*|29VFfq_fO*~hw{ApR`nefPNiaUq%YWar2#_bKnZaiGz!0nZ(!;hg@0f%K zVD|?*ge1wEUcp#`<3N3fhh;@$i;HC{`3Fw9kQ)5P+}y5@#to&VpC{iUs}610Ij!yP znioPxZ)aE{&d$#I*h`aSa__ldG8`9fk9iSrX3t9;IAEM|AV&ErnL02YUf#mf(NU%C z(RooWeAHLPz+#Hl%_$4YkO7_jB`YgC)t<)y`<<7c|92=>J+#35L!#>bcDe$%xQgqK zl2V?8eE#^TB!5zEf)ve!Rg9GRO-*BwpOOmeLDUjP#B=TkQYK6acKtDzLQy2M)SG2v zXGhxO{TEI?T{<@#h`(~SHkXcv9r^6Zp69gmQ5?fN4etI3?AWaEdZJ zcS4JZzH28gp(eoUb+5tY*V)bd{fKduD{RsK!foMxWACZ_?;2JCq#T%B#L(g3)_68k z)>4IKcAOf!RDO7RmZ=xV=X7`JpPbl#9`9_e)n@tFGHX@q1G|0)e8>bGT!ej#Hp_|p zcS3*Vz9tKUaZaz>g$K{_(+j_fA6_EhAMgHYqnmFx41Fc^ta8R|t69G#|7)FCt3CY7 zWUj^*Y~cW)9KrUMbC=ZzqvQFD<%_Lud{uYnu)Pwlz5VEK2e4TA{Ah>ru$BjfN&+k5 zs+!rsRFJwF^=Xu9+THw?{M~ubWGcuBHe+T0*wN-gxED$+J-?l8%G~;sSV<{Z{RP}M zk}S%;GTZ}Rij8Tz;^-s&W*XD68oKojJ&g^w$xCJi>y+~*A_u9Te={C`yB&+Cd&$Ez zkS0dL58nUBHF3NfjkW3648D0=ob|SoUPuY2q~&hm2m|1s%no~Q@h|N}fud@gHkiXb z_6;Z=j|0#yeK3D4qkX&XP?r{Xx*7JmYFv5%ezm-8I*l!N;FV;`Pn+6KFbFF&&Fxy) zEw%P2uF}wPR9omFSv>Y%KaLFlvEKjf@f131F>=*%)eH|XL0_@Dh(vi#7VA3B+y0%5 zpL6JXZ#aHg=J#WbWU0q?J2OBCj7H|Vj+LEH^X}@|aPz3$*zXDQ=i!(V`D-1*XTgDE_B+>l#M&lYk8C zL5YUL_DxiLQ|HXu-O`gM^GWv^K&>aEbp!OuG#Eo-EHFFQ`sP^DQWj*2h*1R35{FBC z&7$YQs;q@L{!vxdY)bO+C3y`EAeF5YdwDNpdnYK*cE(R%IH&Rwat33yS$Wvf zJlT!a#L~SyN$m;eZH#&9On~6Np4KGjJ{8YcAq(gLO|kpl!e4f{-_0$hz5a0{`%PDre{YIrOJeaX z?JC1PE%Xnj=kZTG#E#{0r&9qZ%;+^g9Qguwg(1msDh0+?rKoJ6W9Z`C)k@{#HWvK& ztlMGzxM^+EK-#j3SN>*sN5P9vf$Y5yt>ItB)>M_+<^%9Ur5`yTP0ZkI!)2LsvqdsX zT3VZDDr%5G+}%`d%SF#idDfIIhBS}MS;^(8^kKn6Tg))7LPkb4d8MX?$QYw`?7Z{( zVUM9%+~DcvL&rYk5xOFTZl+>kkNY(W3R<_#M3d9hIVjBp; ze;#afC;YCam^GUfJ=z$0hH%~|sKCp`j~f*gmX(pUX>(R0k=nEY;!;NgH;?NU0THxI zcARpBmRCHfw%tUvbzRKr74H5XE?p_ZxykQi^NSoY{KjnRvduhUW(N6?*`h{A)r91e zao?057scN;bX$YzvHymuoJ~BxI<4dWkkT5vSp-yq;}Rel-k7)*1PV~7I73mf&V2g_ zpZdo@J;bL^7>vq4Mf4SSH>3=e z<~e0G@PP&PN~%0JO*|2$vaXtsJQ`m=WgrPkNloqpBgdK^X}1ElvQNg{6_iZ{;frbu z8Ri2~KV&vu zzE^N)rh)p3*j~u}LRm(66esyZw#w3*%$rsWLX9Zv-I??$o{JY0un>o`M`w4L30*9T zlI#R2{Gd`etKZc;9!(a|{4MZ^1C0#IePSC&0H3S={O}NeGnffs7LDl@H#3N3#nht% zPoOV@h+J_u0e|faoOQKObMvZWR`=O{iN|L_YvDwmEe+Q9x)0@{DdQObBYvC#cearc>l|BE5l$6Wq8MyaykaDf53n51iUSSx4h+cE zKy7aV*lc;8CQhNYp4ews3Re$v%?kH@x4BV=(2DVHaJIz3wMNS+9`mEbi@yur#LqLt z30Ohsv@=|r^Tc`AC%%L?{doqMuClcT!YPqC|%l1%5b@kS*s(^ff^Va2A|#(`VUxkVd|kxp~4k3s(Imfn;)q{-|4qn?`9sDwHsG4J3Z zKBAu+sP}eN(oe^=WG$3n3^u{_lI+nS|Ls9;OTCwjKT2QCnrH5cTTTV8*WI7?Yr$rw z;6mcYsO9bpCT!;47l=PP+d{){94@-XVz5hXv3Fvo=%eHkEn8l`wpqCBz*;4fec0U7_qQus5gwRjpA$~|6u z!Yh{YWzNR!1;ymc+Dj?A=qIm-X)i}sW^l>(`uNt{;?}4wn2@ZhA`D2tP3wI+b?qyu z=7(MT6ABL#3LBC^(uK$2t^)>R=L+jIkzf433>=2c8>EFJZL_hXMI&menGJ4~nr1J- z?CoQZTki6B!{+x7tepAt4F~Ss!W-V7{3`ig%Bh$1omJY~AcXka{VD;P&4iL?r!^)kvD!{)r~5OD%1Q1T%ZUKfS-xAN*RzXG2*%}8qOUwqv zi>xMh^cbr|1x<01d<*ZJ4F~s0RgG!#&m?>NeSsj&v58DRCk(J3?oAG#(aylPZ4?>UbbqB%Xu6pfing5Q$)49UWnPXO2T0pAs{$;J+C-a1XBXt!NYac1A;>tYv zO^ilE21s6{=zE2$x3hO!UXLd1Q10Rn&iH8K_-_+NzKlwoiv<6GM%l3##OSjaP?fuI zqL;mVF&Q9|f=s7Pn6Qo09|-D`wwfn+e}#R#Yv$+SM}kOJWbzVWGQ}}pBt0;Qt-bJ2 z{&U)TCBeLy{5x#d5uQzHbEJY#%wP4_xl4B>uuw34JUlDBdpG9LW<-S=>yd-;&`>^q^;}F zmmmJL{LB8a<#g5>cmv6E?{}5d(^5v*Hc7e~) z+4^ax0ef}g9Bb2Q_t?2Q^mW!xyvoXCX4X12I%=3ynKBAV#e|NZSc$wP5>01IiJ%c0 zLB8C&b{gW83O1(ekUkSr4Vf?rItB$_1VydsniL`^Q9=jGo)JNoMJW>z{k?9C#mvBn zF}%Xm01KQyhQqZT6%h)?2U`Z~9bljJ`Mh5AhmO{%qNrga<1ZKd7|emVo(OCu2_O76 z+>3Q8PkTpJieIW51E@kanwqYuqO(<{Smh_}?!p+0r}4n=8$bD5??TLA3B=g)sI>24 zUuxX@zT>;fO|E=&mk`<%_5<0)3ueoNMv|q)qMek(eq+2s8+reOF3tSn4X5z$eWF|z z;l{Pk?}^l~V(rCBOtU?)oE_i!D(XpEAwU{izQBP$1PzLOzn?6q=J|QwScnCd`)%&3 zQ}B6Zja-x@V5gGF!PL%eVF#Z4*gNH=mcff8l4%8d(+j!7pp-xw?`reuH-DTw9g2)T zPW_~~UoN#IS#TG0B3h&V)013dkE!1^ShX=i}q$5oQfqmIj1ewn})+IZm?8=ctp_Ak9*5#87TGOY<) z?ZtVrM>J_^MM3M(cuFCyS+>4eGaGZTz;E6nxDRhVr>N$^^7lVTTWW&DB>m=)xY2n7 zgisYQ#@=sy;y`Uc@{gghxviaxu8 zkZWV5r-(x7Nn%VIx{hzydnNaEEUzeEOgY>k`Z@NFsRqOnuU48%hdid#Rrrv-7-iF? zo@T1SP*f>z4GW>K9BwYeO7D52iP1-?S#h&o+l92|LTGQM^-5*c^Ggh!SA*!Ss@+3x z^G!3p)i+<=K-qD`sbh5y0{F?E(uAUu5+&$pjn?Ad=3{{;Xn22#+Acj_)(($YV1Or3 z+hV9r!qCmKBLrSGK0SpbHw7e1kf39!ysoXVqQ1qa(SVLtgNfe53Yyny_LW2sOnMOn zABSp^X^w|u+9c34STDv!AsB<3OfViD_yN{@4?0KS{+~^QC)~N6kog8Cbhyk%yLSz? z3-~9VH1Nb=Z-x3CO{sC!9D+s!O#+bHzWj-oy=aPvV)~(e;O?EO66;r#9f~9yTl@BR zPhf^Ab?{%yk&WCALm_+AZq=r1#^6XqT8S`6={$jAvk;K_?;?Q^cQ>A10saxb*uVOp z92N>$xz+aYt3%n;udVs$sR~oXzM9y(?}38B*I}`?MvjcG4ds#ot61{62giwQy@Lp0 z*rzhk0-*AcCy5tbl^h>yWnl$~8sDV) zpQ6;iUGcr(?ES&{$5C2s7GFTqAJlRHwmzc_$yP1AM%`6(PHD`UT`TnciHO~jbAOk> zd&)`Ea~k%VMI!$_brg?BxqSiuY29BzD?(xVj6tVgSF|d!0nhV!eK1HCyCD994AeC{ zn??Al7&2BR%)uaIPO6!LoBqoW^GwQ(L_=(jue;jvQcJ(S{mI_+c4kSzrF3!R*f1jlhP^2GF>QMi4^23so-_x>qQ;?veI^}yHGRVx_+`S~_fJe`10qe4V7Ocq1oODo3Z%x~Fr5Dp`!IwIhQEwj5`c&-YQ!#A_rS z6+Td1FjFgfB}hGt1Wp<1WxyUwhJnCMx4Xst^fy>q4Tgz*Iz{<@lf=e$14vQX}h)YQ` ziDv!xww_D6NLWga5mhNn9m;-c_*&{UfEOcmqZ5Tm-M+CBgX83o<9KoINm^OSyAh)@ zqs^8pR|6EoFg<8RsK=Nj4Mu>Apc@w_S>LTG@o8z4qMwCc`-WL|&rgx6sI@>)_fm7H zQ^;m3VjvwQ*Pjub>E*Af@9@e+xoTF%O;yasJAqgkKRJ30OL&1zfqxE6V!mCtquz>^ zXJ!VL8`p}X5(_P(LmV;%34>yD3^3&5o1jc?>pNi73ALRnnp#_5oJ?@Rh)!+kc9#joKp5;Gae{+8~MQV-DwlKaiBa} z)S4&ob_=dK-h8ToMyle@W*f(dohla9j}>xKEky)_R@00P z!>RP*6`B5^n(>M|sQq>!#-XjvE}lpfg%wbhI#>|JB~bZ;)Yq^RHwXHn4CtPR{xBCJ zA-sVz)E^<>iXk>7dNvZ891LP9BAzlz9C{w{`82LJPV;53P-;IZ9_SB3jOKwCToe7ktRw%<567tFfRP2LX5y2^qR@6;|Wg1!<(_}EgWsWKSf`@ExYRWy$y^K=JG4?i;Y&c3!oNxdaBWk84Qc?-TWp6cDy!qbvu3xv+kCE0}w$i;CyGYD^ld)@Yrji_eO=ZAVPA`WC zrV10JQxbJZ-{{;VE0)sijcOBF39j-*p`E~2LsJ7n!h%N=YWvSBGQJSld~$jRp13yG zj?00(+mdT6gR_jo6BUF2nsXUZksp8HKVAd-XT@~dW^CkAGC+VHL zno6SkGRhg=-4)tE0{OFJxTG*b7CNWo!oU6EcIp*iYr57LQ zbrAIw2XZTP3G;g}a#ysncb}8;rPd*VeX$thSF;Y8Dl*NE5GSdZgtGrMSNa9&`0{e5 zt4=4o$}}(Hb6IpTUXhGU!rK;0LhXWzK079(R?{^$#9W>`o>;%vf+gIuY?V5Yic3aR z+R#GTV$GEHVjejSk+6#O``z4Ji0-xaY8kz`cjAUaqIprVh2&T_Ow&*rCgs-u1K3ri zczCj`eTfkVHu#ck{aq-kBAkiXG$M*j>Fp=6P`xz-u&VW$5+>S!F|WJUbeujdSU=A= z|9X00DaPo*l~+%-;O33Ol;C<#bu}ZZSsH4%aeCspt18;O1OqjU;me_FLre7+{{*dN zO@|M|2t#KO5WlTK<(b)5cUh0D=gngJhI=87ta6-fDqn>SBD@%mCg}Sb;Lha{&M5cQBjhcJ(CHt zU!dF2DIpVRf_+tBL~kPNuUlSZEwd)QS8HdQ5UOX-2;{CUGo>nixJwa56)&c-9OYr~OJ6 z#y(;9z6`lh9+RQ{gTUG@7U*Gv`+_|Q`Q)`Nys(i0f+2g~=qKo}sJ zF&^dZk90kJJ+tQ#eRh5K87HCl$G)=QF2v&uxFN&i+fb#y27r6@S3N$kws0yaE*O;N zu^y{v@HoVKTa?nUi8P(+woDr`)}17&P3L3ejyzdC9?T`*Ki)kfALl@4-e7B*oz~6< z7gx&wXjcWaD90&memJ8^O+FZvprDa_v?RTDw`$CRbW6=n8RP!+ZyN80DCa|T{{3~y z8_Ilhi7Qh#xAh{F(8!b;$;)I;_B_mP^8TdB^Dd-p27?1LPwXx^M3&Sn8$Hdi^FIkx06rMQDvD8 zmAQo9wCk66A#)~ePOzgW=qUZ)O!^UZtwOc}vo&^-tXxQ%vMfn;7_Xt@k`)_Sj2VjI zA#mYiZI};L2aciMd1l$ptcq%AnkF7zGf>Cp349;GEj@o#ewC~n-Yha*(=ChxIWe;) z%p};Fj7#W0xTU{(#if73VBd28?IlFjWNg`<>h_kHudceuwo9{u#n z&0jA&A+TY|7TT8|B_^E!7zq~v7mkrcQ;0=@q<3}iYv6iQ-e|4FvItR3e4sUe&`(@f^SB;O9pBr7NAoA;m@srM- zk8$Avtq=Q&P^=#6{u9lwvnw`9ReAmX?@l;@cKpIWu-!= zlMEMUgZ~{^nK^7MGQF#%OZ&gi`DXXqnNwqfQ&Y=*E~|<3NVB|-&T}4e8T-rPxpWqu zd}h*dK?*ZH>d7)5fL{VOrl$=JEB_c1Ek#1Q;i^9weC2ER+e0WjNTZLD-9HZ8{zd6) zrD$yVKsElt7lWE)*l5?GO}{I;--1XCU34LkE4Cl?Lj-5wNLXAoQ&{ev7L36_0DO-< zW3EC!8xm_YDlIFHpuPD!l(Uy_MBpjDnSUg3KLnoV(dvnNhjEMq_icwCIi>J1rbRph z)UPy2dV!3{bhFHOB=XOG_{vP`L>!&`;FiH}|7MOWC){tX?uREQCu?9LSy?iS0Cp@Q zFgG0^f2#bwbO??rz(9WC%_rH6@Ryd7tJ^%i!%mrGvintHdVL6@hTD=sbT17c?!u}w zS3{sc_!`lE*fMBoM}Z8Ki~GX%@?taQiM)#~H|FV$BuV(+8*8Q8KI4#bYzn0o-9kv! z)ta|$-c9T(U${i}*J0*--u}D|Pp5)>px=Fd_fWtFKz{@f*T=oTOjgvCmzCiQP9mFU z14w5U`9$3xB?46&Yl+1W>o?SpDhRbnJ9~%eeaTuXVb*}65dcA{YizVg+T7nSnLF@& zdY>`9*HvVdM5lfYHGa1CHeUmPHR1HY$IzBTY_a*Ut4f#Qz2*VM>7CyAvNC z90&>s80x;w#f{%vUVhs;_?>MA1`9SP;#Mp#Ed^U!bAOH&c?P@)gG#qI+-iw!gM~xRu3ip~)10v@kI--Q7KHIoC8a5K~O- zIM{K)YMSfor5QJZ+i?Ln!bJD9GRfRG@L84XBNr(|$Oe$5Uud!l!f>xDXM6!G8WF+=uD=Za1}hIgamZ8>5)*m9BL71s zfA1y|tVh)J!g}3ZnNghtdw4$*&r6|HO+n#m;9O+6u%U>H5H*jpHHbJIuzmm-#p?&*s|^Xru}>t z0Fd9H(<%$Ja&Z;>=5ANKLV7xL&Je=trmYQ0{0Gp&ewdDiZtd-#A^ot^P8OxH2dmir77(q z>i+8lDCf2bX1gy<`OSz_l9pc5q2L9I<_vK9`1w&~Z4p+ujdY%8PS))MNOu(#m0<7y zpX+R;kv0Ha_74c~rR+dCaKwKyMaBHu>mDT9WV?o#7ZnLiL!mWTd2)#z*>Gp3#yAVT ze&5+lEnKO;bs$iYf>FM0+a3Y<>`S>$+9w*Ockd7}NPBt!{j&N{F{<^*^{DVn>m$7X zZ6N^Yo}Pl}h-v{IHoZha{Qz>@;6pyA^`ZbGZ`3|af;F0VXh6j5J-LSgz*C~z-?k@D zY-vb}2y=gr7BOydNM~7XO*?*g}KOE$MWMYJ6 zx!nK}T#5+rJ*DF1Wpgs`;>7I~!YUP{Z9MAq z>VyBRTor`;%8HfBH_G;BwVz2JXcMIpvZNNX-h!sMr>=uVU`A?bbu0OJQdH>JBS! zlOK&EF?3mvvK8LqmgWtrqR*wHq9IfxnO?b^rdkv801@j|2}e8IlawR-b;9 zkmsuQ7z6|{%U9ZD=dRUyn|-R)%*{RL8d(8vOmP$K=SPd`=~?yk+~?bzX>2N1AkkK1 z4f~!U24k|IB>QYg+5^5|+J`16q zAkKB~jb6HhQHE7>x>rDg!TUNYLTE4tI;9*`*I)656#p~8qJwH zCNA)ZU--;vPrRayY~jAPD}h;V^#x*LcFA_q}wMjD8(p^A&dm>A;A7t#8ym|Hy#K(#LB@=EE~n-u z=(Kn1sZkX@86q!UZ9!H(nY(w{lSXztu@DP+dXo(%dTq%z$ z4$jW1QTpeH60SJ#4bFy1R6|}BPMX&o&Jd2Nh7p053`uMi2D~JTt zK!DTU&2BUQ%5pp$-hipWJrq5j!JI@V!livC%tWmI>1*@bUl=x+amPHCgyw~X9$K#sJdf*yq79eZJ6+qw(%c{5gGA0ro(dZz1T34N_#hL zC5`$s(3kCZ4>rn!rW0k4lrjHYlj|8^T4G5BVppQChb%h?XL3Pqj@#;=TI)}rtjk*t zY7Y)s>X=6hg-MXV9&2^*RCY>hhDZBL<4_&G4pjXOIbM2+8#^=3yaGdc`rFprR~A0g zd1)}ffjddQuh|q(hC`2+n#cX-N^6~FA>a+;efnlVYV~@W&53=46@Crmg0nYIW}Gw3 zz~D7ga27*wmYm@PsILI5TKQK7x=c7u@j_9{ga~ z1g&g*Lp5-(q`QIMS;G3yheWWp5tv$<2;5Q<`C%Mc(x5SAeRe7I@YW(9ee)J`s-^Mj z^_DNv+*ewzp*NuG5%tAZpJI3$MAw(ZXKy6$F{7D8`w4Eg@^6l~3+;lMFy4BNo^9kv zxC!CtADbo5Fleu*8CLZL)Eu`DYLg(iClr6Sue2Raz!yZS3(6z_G`K+E*gqEKt&f0Z z&L;#Ml;^ls0xtH0ISHP@0EO4RUiyrw3Ne7!PIKEW{O0xGIKP{UCl02bsi#kKX;OVR zYm)CK-dBLyocTqMdEU?G5!+8tI^jj!v5jcw2UREp9HRf~?x&!|#jT+8()hxptVh{h*dh*xga<$Hmx*e?kCw zWF|pHU{&Dxrp|do&i7-cs2i|AkuCap+1jR{+sMsZB2J?rD+(_>hW+OA5cTSZIi7x4 z_aR#_!pR1Uuv5>%8c${UViE`p1K?D@Fl1{WQG1E8WG73e6Q;=il;?v{h$c zSNAkQ_xvWkrdz6R+hZG1Od_I&3HS6Q(Z!c!9tMvOi)&5D+H*-D>c zyj;U-o_2;_PzW-l!Ar?~lp>7sh+#cdP#FDat$YFa6;&7i^fy1epBHw;sq>r1(LAPM ziZ@;Od%uSUwZL1!{lbUdF5RMLGk-ALp8jU~oheClj&@mzc0Hh@lHPREO>$JXRs-;F zy#SROfD^KIBJ#hJT`-{fiXd7GC}CTWlrmPK(-6pDVxl97M6Y*BwEmLN1u*jb)B5E> zBRtAcdGXfm>lmg;NbfLcvUtJ(^1TPPpF3ls$oK)^ zT4OOl>}&))wQbi8&1ANS^_b;9-(k;oX~hvd_LMLuWf&?w9&@TfW;7S*&nA5{hVp2|`}BZ69pJ~^CVW$K z$Nrh2z>XB^#Vj4A*xX^eZAqLnn+!NM5b1&36^hSBj@}q4RR9Zf;#y>sPH#WBV zAvw5!<-*zInFJle70|{c=4eWf5s{(Gal%-iemew;B6lw61U9M)FhWw!T0ET8Qj1iF zcQvgYSq0sTstyIOW(2vI?@vA4O&0oKO|JG|c-(&dv4=#R2oRG!n-uLY)DEYgfR|dx zAS%%+pky$h&p9k`_PSgDEio{56mHv;a%a__lYyF_J)^7)!vlky$L4fh`i-zahk`b| ztlm3QOl~h{j?3@Wnh$^&40#8G9LWIDjXvHMF$IPDNp5_{pN^>#l&3@I*5t&*ovoay zO0j*m*Qp(UxP|K2Sy(U}UK^CIt(h)WZb*AeQ5?5X#J4G(*-JvnPjb5b2D4%$UIfsJ z3l7ul{*AtRdz1b+W{q;)m9_}byX$8PVe$?uWd{#TzDS?jcJkt#3!9p-XTz2%jnUl7rO6w(L&WR)Uk`zk~! z>*9aQTyf;4WnHm*8$jKxw4?Slo>3VSc_6|`Q8h16oW4OZpoVIIG5PeIn{!H>RoPCB zZ*yHZTTW1l<2!s`26NJ(RJEFtoixlO$3qviIr5qZsp)-d2ZXw^S+#kslGOYUuYo~E zMH1;x3}+3Lfv>KDG_Zu2M4#3|uiNo-d_25V9$QkcZ^Ofb8mcjCN8ijk`~U`^96(Vv z&CZ1QgOb?vR?pDaayBWx{9<1PK#UoKUpXhH1KihUP0}CQG1E5FB*REoh}tGby!EFZ zvp)Uib$CgUlAJ8+b!p0BD7MT9N5wKhlLdu_YT~WI=OHw$QBrtb`>37(YG`X9A)&?A zo&nx|Ue3eCs;zhvf852zmmmHNVp=1b+yfx;n<*`8@Zvoi{vTf<`s)6j;6(~ZP4(VX z^T{nqWMt&`NWd%|@VzKkg~K{((fi}y)TB*8X3vv))cmw4h=mnju6?4BsARP3yc84| zC?zX<#`#N_BoqHVVIo}{!98zuRq;B@qU%_F0g2GZboff*Aev5v6 zaP7P9TT@||({80isD`oUd%iv>jkV`X2{Q+{z)tLCBX3hGTFv#e3%){3Hq!V#|qFF|HZ!Rc%dc8Hy5Hwd%e{(!UxxD6h#;FMVG#`&wz*_Ar&auGS={zg9W!T>(V7_KEM= z&uUb^{l-8IHa%^+dgkdmIt%adNJ&X=$GRb=W|o$nuhV}-Azu^X5p5hLbdTR1HdEEJV-_w%=; zDfJ&L4V7F3ar>YxmF~M$m8{A3P6`Y!(c-qM%>Ghv(6OE_@tD}WwPpqT@nG@&H9^yy zU{)UpDLY7XVm^Ftc{TRuzNd1CzKZ1LQehR9>F<_5xXi0Xd(}b&FX>}7?kXQAJ(n*+ zW@>8cvd}lzZKi#*t}B<5Rq?$#wct0V2H)$kk>K?CMq(L0i@|dU*$CR66>M9?6zl5+ zvuPiGt;;mhpj|25R}q`?d;8PI1D}UDgIsXKnozTKE*}}1`_p`5g+!9W3^8EN#>b}w zjLSj&G6jQ^`2b!iwX8uYaF#tzF8D;3vKd34feJicM|3(Utbcitpb{~y5X=~ z!Y+R|F(7FbzB(U=5!gh{s2QOyBEOU8yf&pqq5Nuz97+As->QjvD2KtvzrnJSH~!d2 zC+^Wnf)4@6?X*eJW(HrWzUI%#nttEm@-cJY3zAaL7g>~I{Z%`}N5{v6*A5Mu`ji51 z?41_5*l>dJS=m(w=hMriyPc3UV$?#qi(eKR?R9`^^XUA1Xwt+8NJfRO`~S_QA+OhR z@RvP=-4z5cRDtOA4|;1WmEUtvWnVSF>cI0WUzBiEf|54@l=aiGgHIM3SwKbJ;XlwJ zp-UzALM#iCYt(|`iqcu8Q*J{py~3?$phE4utaDq1vpIRmp!YaWg5=EzC;t_c%R?+`Sl8}$zj5qW)nbPP3B-k*un7Q z!#OYSiaM{i;z%nvz%*}Mt?hDELxjGK|$&a|}hESqvahB3V-PZY6|d@F?s2Bz|ZT=77>6kf|x>y{h)xM~TpdH6zV?aTCo zoT+LtsbOT(SjSIcSL{600d^jsS)cabaq&Tg3jz{!7WlU!tMMdgi}YwLZ>WoV&qe43 zX!F_l{xtN%S?MXy^U|u|E}*^EiN4*m73OS^GJvn-Hnpavv21cwmPbdT=SEnfQG5GV zQ$&CvC*Q+dYEc07kj~xw(vZOHV`g+= z6vc!Sb_TsR>D3IIs9J;dPH*#3m_%&vXya-h_IIXc0#$+FAMqDqOJr zp^9q7J4j22j|2>S<9PT74htqBQIHXlFjAFxLcW^(z5M~gS=j-IQK;?sqlrgnW)NZ! zovWUx4{|Wjyv|4e`b|{qSS0r_pg;C3vxY(;ix@Cz^z*;4Ztz?!qy2Ve(yo$3B4%M5 zEpMUp)-}#kXGokq{($#BWFFs(lKdPyL2B{Ly|1f+F<&E?}A zxaOB6xd$#^XIvaU`A{L@C=L`$-aL7sMJ>0~&?ldj?M24QUU@4^4qWArhw5o=%fOz# zKC1PV-Db_R zo4%l1!Vz;A!+AQbp`~fVs+s)m_C(Tj|C7l-P*n6DYP^OT(54qOw zAvD80%-v4X?T*Bx(O4i)S;er?ZH$xd;{LsS?;-p6lu5^hq-;;+vzF_JKv;b8OYmkr zIz1CSaRjfIYTERfWuXJXYDI?54=wr%hst603qJi}r|_#@%P`Wxv*-xYPpZfT`OdGl zxEd%+^(296lJCr5R`;ASHpL{SCK5RRS4{*4D9tCTO(eOMhFc#pISX*fs@(9Ml;N{t zX@lDaf>N3z>0S(x9Ee4lzHkZV&Z_PSl2bpdNAdM|7_9p>z*gt|X5ll2{@V+MEKHVj zBz{(fDf*oRoVH&)(v}j#S*a6Q>%lDQRtJBL#U5Qr3tOX;7>k^g2{^!(8P(<=*sv3# z@;VxPO{wFe@ud5tJwk>u`97z^ljObtiZ}uX*(xl`!E8af^Jo?FtkE=LEGlq{W4-y_ zuG9VuR@tY*gdF*K+)|Bb64$Rt2=gS;kzFP{HBMZURFi=bVA; zqW|3-Gpy5b+*)PtNrCo_dqkIS>c4-?!#*NLgN(; zjds+h5Tb8@ja?D10U0KIQd2mZ0@B1=r}`8M3O>KAS~mXxDH2NT_awBw~?uhruwT~&ErinYNG;ktd$eETF?150eBaM%b_eVq;PJ?({uMC#( z|3tW`GR;PG%2B)bj`99;K%ByeR;7I3)%UfyCx~_gvHiOVt*cLa*D`JzNdRvW((a(* z=LEDE)Hm0m)8Fa2x}zAiN?0^Yp%KfWFA>m}->cb*4vTs=(z3(QjdC$BQF8eG9!EDa zF!pogb5}u0Z7+w_=xkH*D|MpL6BX$g?|wt>&qm%Uqpc^pNYH#yqx;|82RH6o>@#M zgw^0eu^Iw;hlGLM9J<8aba}h??O|8rL?Mk$M(l3Br>Efl{=b(Y9U~I|6n=Ugexo3B z-yT|RMH*N>%Q_MtbN1okuOJ-t9Y^KX^D$Y|t+q6u%NglGsO{EJ>iwJPrMW(+5*v8E z31a%xtKhl%&5pQ!vZF3EwD#bFdr`RdJzfzD5TOW_zqr5to?Wi*jytTW<-ihYGd!?W zZk%UGY2FB8-u8Ar@s4XFatW26-~ol9vMI7kh>N3IBa!#`p8)>FMEcDsDJhDn6Q9lp z-Jy_HM-XP1@c^REN~;c|&PGmLyZo;3EZJx`9DTJXSr#Pt_2cGj6T=t?)s%$Q^~LH( z?RSOgn#+5wp{z+Z^{*~}*ZnBSOs|GuN#lKmCqKQ`AP7;Kl3)PTOuGn_Kkts5f~bB| z8xD(oJ_cybD@>aXi9o0G6^ic!yBK}-9HnN+j5_Ie0|H-0N<5vNvbbZct-q1Bgvx(U zX~sCH45;G+>utr(C(UYV*rdObabUiXImMoMNzdh4to5KreT8&Hy~TAadcy;IfgD(o zRi#~}gFxV(SlhSxqte}=R&_16Q-u~WMc#~rtFN=$8)J6!wa7R5*Kxu~<&#NpRFi3l z+$lsVux^P}D;KV#;=PQapPg@%NV{Rh@ylpW(PBd~aO$6#s~5D)NJ4r&byIhfU%0>B z(O-1Uf~@BZQLBG(S8?xZx2a$%!yYd-H5f#9lk&rYVyR$(5wg0tJLJuA$3*&Mn>z=P zQ9vAOL_~|*F03b0b0$5tWxGFB(mpea=0YxGX%{Kff_QD8t&Y&q(G{MSk(O=_oyW&3 zj<*Q@S=m0v)<5&3X*RId4ULAG=px>}xGpRyiSb!s;w}B_k{iM%vsPA>4L?1Ae&ed8 zMRJ!k+Ac}rN(9YTmG!QPv``MfveVSoJRKdbUCQE{2%0Xt8KIYbTQPhvSG)2uh~Ovc zpTIT>Qokps!h~KI@8YXKH z2&%Dw)%EFH+Owuxk{gPzPmft}-7CwRD}3G^ugA=tlJXHJVCVSLc^6%LyGfRyGWnaO zXPY`kV8y|@*Xp~|O?Jwrf3NG}Sh@~_c~XibGg(5pXgT&zgQKIpxhnVC)6dbe9(z;y z%F4_!vAxL))UD6HG-CcKs@xH%%@A|I0e)@;YnRCvZv(Mho{^auG*K?YL}$zBzg&zpWb3r^2#jxPyXM%Hl^e^}`%{TLEsPRlzfVl)5f`b{1ydeXN)Li0 zl_e?R{O7zqb8&mzPqce?p8NEp20BWKE{};#RpkTMv$vP!a)}VMvJC@D_=iHnfF}O; zepu9ZjaD)sdJ;GAj)gh1$u~3LDK}r0?5bAcYHG<(g26Oi4P8r17Du2%sK|5M{*IkO z2Wx(~y78mpJjwWoi0;W(mh<;nGaTQ~8;E&YwA{A1D8>75P(+SrpaHn<(Mt|GIeO~2 zyUwxI2%`ZX`pYLizR815EFN^kL2m;BXnh3u_}ZgPBnKnZ-dpUS_CA|Jxg?5`!cs56 z|1>2=)|x(H;pjUi)(tWGPCkqpN(yik_c51E43BxxylBZ_>{^=KG0c%&W&2~r zW7UDO^c?myG!1qfRESw~K`B+ATi^DC_C5>VHsFmw4asUxfgYB z-!n1gQXIS9t(U9@@q@t+nZofB#RRnCUMS@mm~uPhkGN*$CKYPZF7M!(SulOL#H#T~M4gj>z0w_~z|G%}pSZE@ec!8%%*@LFaSV0>}Vyp&4wRDj;1rH&zpS0L?c zxoo%bqiRBEos{t8O4MLkuL0$E4pA?=jW?v@;pf<*%rjKC@26Qa&Zo-u=gT2)7~g|w zYU5SgKV^HF{f>zWhST9}lA>4?%F2N3VnWHpmSe2vOfj6AE@PNTaM6|B8Wz5v(fN8Q zqiD#c4C8LWBT~e2QK!rzHU=RfE-w5wTw%aDOU(P4fu7-G#{rN=rbO6X!vDWu7&;8k z!;{=`wwhk}=lOg722sS$9VXfb7TDX3Vbjr;iYVEVr0_AZRX=#H2I4LhEl;vb zuOSl?oxbr4DR-#6yy?f_s3@ZD5nn+;*>=}EG$9RVYS_9Bdpn# z4;2w)S<}Re`{6>zb?;FyMH9cPZZkW{SQV^ho8}`r-OhiXYPnouaZ$01*r(l-g&Zs! zxz^j_^Dtccd>(hgM;21hc6k3WE?kMsL7`Fgf=s2OjQV(w8@2g|#%8(g&}Hn~J>joW zLuN`v1@y*@kzRDf-*_WGWG2S7*gS!>y6-E$&oGvT>1=OfnJK(qEh#NE(l>l_w*0R3 z?ZPJ!YlRe#yAMel#TJQ#rd9N?m7t^E2S0=&s8xU@0qp^mg`^1} zBDQlfRnXIVKA>tBIzznP$*+m_xp9YYhPb|l|1M1uhHXr%Cbc1+B96}?T=M|_dRH@2 zJB50)iQbPI;d!sLt}{`#-Xyjie>m;rvoCq3_Y_)xOx6G$HpH9_#3 z`qvn4ts$P1_rX()xu zC|DW@E+>{^Zve(emqsc2ow?l7>05X1OniAW5GpWs8PhVOf=Y#L=k?*uq`)LA4D*z>U^yZ7~@pMJ;8U-b#xVMipyYf=ftw4IM>ZI%n^*wfN z+h@|!s@AuxjBn|fejvC7qjI7Woqjjx!z&}~bM;yEa&NbrY$2PT&Bz7>%xoztFmWK47<Xnz@#VSM$PzLM zDMs}SGF+cIP1NKy$9rugWU!U%$mBa88;Y>P(2IGBYwjqGx1OM>m6pw4?nftKAlV_l zS<6oKK9LM{fBH_gge{C(j$7@sWjs}D^PPJm1!-?!KS-!lkUm#tW%nXkDm>20b=xe8 zo^pv!&FMlS^ZAxck1Zng;&GAx#zR4lkQ9lk=A#Q`ATiN%5mS9L38@8pKB;N}9qrJ2Gh}kF$>6Lw`uyVQWcdqx;qJ@+ux(uUQsG#Dbjy_o<`3_2Ltp{3R#Le0zUI=x~sZZQJdB-Mq`Qn`H7*A$3yc~6jdP+Jo5a}p$x`uZKQZ4 z!97G07?K~Nc3@$-Kj#76KgCvr!T!3nz9Q&0{-avUfi@?7_A3*9K2g!7!D-kK)eAK9 zffzx@y9i2?Ir@?hMHk%D|Eo?E-70&Nl+Nca5NA1p}kj=T7+)ublRjzaB$d zxR-=Bt8iV58+A3hN;q*^<5oP@`Am#_-T;?Ph4p}8GwzOk2Uk`Q2VJ-7nR+|YyEOINh5>!>NL- zxJ7JkA9HaWDBSNkt7dFl)3}C_n;4^qEQy)(vv9o#KPA$IJb~<;lB>+0F4*Ipl^{wE z(dunHSGgNzlA=+VU17Qx(Z$2qkgH?!Yr>@|jAa&u4h&3udY*xH#l;DYVNn4D(tw89 zrL6@D>c;kfin9(9j6@wXPRLV`5oMGuyOc4UuL|vTBBw%BLwnYcEcBjL5p&573Z;19 zCtoU@%=-IU#rIV>)Jyz{2z-*Pjbrg(j}Un1UdalJ_!afVpZJFL*7DjBj`r8EAC#7A zsqK_ad_ai6kxP}a7N%g&@+@|I1I-iiq*ZDm36e>V?ffSAxb(FmO+S1-)qFIk2}Yn! zv0YyLA^(JeeA(s=bnN2$sU=xNy&VrZ6ZsDX&zAOCpSA?NMvmK%R^dYO{I9k}J5HN* z?Prhn&Jfb_EJ^KJWX2$8<|>cfKRoM$=Lo+#lTx}yNiZVMA)ley-fsk{VI;S%M@GKE zmt>N5I-Db&X;0h*-F1C^z3D1^5AX6tYj<~Wjd{tJQHS35(AM{hU$T>1?;=zp!D&Lv z^v8%|>%}sMa0H{GiWv6CpLqD@Y_y9pRi_sZN7ZR?2E5d@C`2DnU8n8d`)sDre@fxi zn3(txCgl+uMy5+9mITNcOUjbEX~K)ECx`U!TC==+v%Xr!c5776hsve!)p>fap2-N3y<)+Z=Ej4Fx#slAN-`d0XZfom?|)H}0uaZk$t zi#U@;$*~Bzywhu7ge*5PQFT^a0#_L*z;k?l9xYs_zYI!X`Ny2lHl%6hwn|D$er7Fn zL{cK009x!hz(j7# z2a7GCwyZ=z3?pHd;3t~18~INk*oD+& zOb>(1bIBfFY)jnt^hSI~J%WXR>9LyvG0>;$6H9nD<6U4@GF0q@?2Nh@@g8j@;!QyS z{pXjgCR&Rf?CWzE?FpZ1Ij8VK@N^|sAX2-Cm2YyHlAi~%{WQ`k{JXGdrFvlIcB+k z30b`6XGUl^zhR^6MI>5sjl2Klt?_%qWKy*GygD(oGbnU0Tj}d|rRB8XgqNi87N~(l zvf%(KaU?J-fg+VW&6OTzy9n3URuYhBbAC`b$(?t8hd?`O!d%9 zqMAL?H-mhLd2OiPEP`$U-3w_asTa>7ydxhq5UA(96-{C+Xh_gZZhw`y)s_U4piA?j zvc?s9Ul;7gFhpBJQ~a%n$a|4mG|KN-+9mi@a`(=CqPDPQRSKBC8+@8cabtR7`@3T(LLgEc3|MGMha`^MhtFEPzL^; zMo$T&t8yVi!_+{{m?^7R9c!yDfvvhqq|YqFUL3a8KS{9dTtnLI$|c=>A4NxwAO40f*gIQdRK=blJbTvxW7uI8+q5?oaong z+zjga?vV-nC_e~)ulWjMPm(m#DY*HCPy-)t%Jv7u*4cPfjn1_(ynX_jkA5Ae z3lL;-h0V3k*4HO`+QhzqEjHgJHBX5?^MO?@TA8)=)+GXl*vR!!iyQqWz;`|_Hfsxc zT#SgKLH#bvG}M=RUY1GpcDpbgO(@UfEY7w{=y)dQl*{}<1fv?o%eE-H);*;|PMn|& zz!>?D`vW!szPYzLUo|8Ii0N<9BTndYj7z+Zpi)qvy%j3!vCBK&D&BjKHMTH_`mr?D zPh2^EZ0@%EkJv+5^0r&P@Y1Sjd^G5x8G$-f!|Y-+yFlB;_()ZGxs3X++XEc^< zD;Zn1O)LHjC83Q-_HjSkp~=MTS(VSzm5C3`@~h!pO;CExP{Bd}ai1UM{+~SMNvIz^ z5KD(TS1vQ9<9TrW@KG6hf@WI|L&Ersw{^MSFNZsx>9PGoie+$23`S^{O7^k5Q-(Iq zO%LRHUcc?1e7H(S@pg*93A0ZB}cCDe~&=govV z+caE~4b#Ay4yYwr%p6wqyZt^wM*_3?v~9>sX6{Zi)z7d-f&daVn$q z>^uZW5@S9B%Iz|@J*~cA9Dcm|!TlhXa-8d>J&Fx52`oY)O zUZVG=JYP?K90JdiR$!vHvLKzB?98co)$Y*D~IikdVMr814oufA&yJfrS!rUKclc2US(Ck&Skt zYt@_XuZC=E*l(Szahvb+$9w#)weit`D?OSgbL>fD`9!_;+HztbsT0 zJ{U*}=;Bb4ML!;-;83s^enp9B)24g(Q9k{T5x71gu#@u@W;Vp7Ek+B|^nFf-6eHAU zD0ben4S>Szj;?asC5I7_2)C{3Blw-1yH7NW>xfh7**uN5g5?!`lK(-^S2E8j9{4$n zW`}WTL3MyQ{XkohZNZ>hlOi=L`pwVR-l1FL!WNjNr{s^$%dV-Ci`inwqGnvGhme$A zm7T|~wA)^UDBe?s4&GfFM%B0CIVMx_cvoL#Ao z{0};|P=0YI6BLK$tz8BKnV+RSXVs1}RYvc@AbXbA&ZxhndOaQ$%Lh+vID8#61q~`o zdm9epoHEY%zrOtcr{jBlYjl7&`PcLRrELCxyVzU`XnQTTuAK&gKtk1BhW#agz8Mhm z3yNOT`A<)QYULT8;Prb_>=Lu0XdJfm#u2+$WE@1nY|`xC7uYqEi5lXfMlm_sAGKB7 zdCIQ${FNbc`Mk_wP5ali)fkC2KmAD4;oAIkO%8$TYL46p_}RvqhmI#G1B*v`0IZC| z#je;C)VEkbQYQ}RJoy}ajEpR`S!mDv)x%gAi8Mx7!SvExde4#3iR#EsGO>1~VXzIlj5uA&^W{!VdrQ)>m*h-LK`l=d z|MZT+DaXrTXCG)m$5Fwwin#K4+~QWZA?!jq<=B(Gbc1Ox=9ecD+nTF#dm z`M1p}DR3zBjajg3vEnL$8$)G0v>tYnSc&~$ggN?*PU)Z9`A*t@o^{`NQfw6bB_Ai! zu`-K)>f6d!X{pwovSS&MgKEPK8$OmsP##*vR#U|RP2W^a$ z>;S3R8EW}o*VBhU{LJTo)Xb2jD%nWQ+S4G*>H~uNS{FEiX|;fX6Lh2$kb3O*i7U?N zbpq8`KI@-N;GdWf;UIh#1K3H29)Ks>{pQa|wYm8qPA8 zgH_9MF=Syiwdl$2Yt-RFB52YqvzpCO`_2;^c@)~#wT=JO!S_AGJo9pRb~ZiIPs$K} z1vNEEUZM7P@7@80mvXR>%Yff=KHxAN790@~e0;2NaL^?3UkX@Y+kr z3RfbTQ+}-kZwl{S26{&bdsK+&B)vDRY9_@o3z5IC$mPw=to{h&*EF7l=Z=@S4@W&t zP0%Sc9i*2Y0*u(Ne1IVBPb&~4FLj!5-KwUkeFd1rNluM+3=qnpM%^Ts?!CbdXmoYt zTR(}(W7~nje(PW-q#N2D#nvd)d=UPrznXAu0Yy7DF_G+Bkm}Rgr$8|}-bxe?=sa_a z-7R4Fu#QD0O_8xagr2Mj8eN){Zcll|fP6j%-s1c`5-}cy{8EI)bWOEPEYuGM@e^;|`;m{~Q^VN|L&GqrFA;k{B=F_Ao|6Ny7f)`k3P`8L-J|6&h;SU`l%w?$ND;~l4x zsJOW!-D$4WGbs}{v~G*0vJ5|PU1vKiaLv0l6cz>=@s3MEk1)zDQv76f3~UH5rL3&- zZgIrZInR0#x{bH;8>_r+YyHG()J4NJzHz*gz!ruV{6K(D)Jv5VXB@hV<4M2|4Q~Kq z+vT8jC4BULg?gzxNm~i_?L<%^GH<=wTVEJNQQzQnzb;q3SeG?XkbM|_I69Xgv@+xw zcbK2_DRtUfV{A~xngFy|I{W76)Fy0IB;PbWKW&9VaYjMnu2fl32|fjdO-(qB6G?RH z!gD>ZF4w;sKRnA}BH95JQ7J6L$McqbUJU$<7NKprhL9{=L8-vIibbkVHKzKcKS*}s zKOo{mLw&msOA-1fQ?I~({ky*YjDz`V(P$kOzOKE5`FJbRcAZNM!ImUZinFSLA9jpn z7yE4>fA-4c9UzU56uR6A>F@8y?SZ1b?m6P<=6PT5*8R!Pe#u8oHP~5NkUogd?L_=z zC34lD6($W<*i6ZJ2fj}elw#nVlf4@%o4_CK{_gBnR}6w3O`agDyCK9Rx1Kkv4{#yw z^$69bJ(v)PX}FsyYy^ppC8;9adS15kUfH(~OdU!V^!Zh1z#)F_#U5QK;~&UcBJYc& z-5I?7+QY!N1m3D++r@=yay>~x%0GR=L*DZ`A%nN#*sjjJ9cXK9op}Ohk@ySkVie=5 zt}}I)?_Xyn>S48N>)O6;92cY5VuaZ>A<~JFk399E8S`J&MIqrE2dd*XQamsBnIzCj z;m|@7sx$pAOsev9tmRH{Zd3HBFeM`_h$%~ z#?10R!|+gfy8#~JirLr%5Hzt?%(_oO^5b@vm+vl}j}rz1n0?X(#Mw;iZhFiCb3wZJ z8uTm2Q6kde%qhW+x?PZn0{r4mT(A{!|R9I7(BYPWuG-XnTV6RhoSncNyllt)yU%z!C+DS)q98Ztsp4JG%3x`e8=y*8&b5k@R+#81X%La>WMd(DTW@>G>##ccI^ zP|;|mqvfg3XId@PAf9n!v8Y6?ROSy7$AQ($Q5X>Dgk8T(VUVBCOe{yj-3SaiVIUxQ zd=%L7z(((KAP(V^zJbAx(+_>qKl`q#K13x~TS-${eF>f=!J~TFDN1A!FmX4EM|Zhl zT#V2NrHS>&J0>L58chyVUjZsYG;2?X2;PPQJxG>`6m!^MOm8b|N$5ivGZI7|!%m`( zb#&=MOg+fk6SK>W7o%aF-(G2EqLK48vA6xht)yc<_q|8Z%sNJ*^FD{+Lo(jZwzh)r zK+cUs9W}g?4!dDVLUqJqZ?UT|2-Pn$D{HpL6^GL@!DgbMggPzth1FaT_pYVbkvaEb z8@@;zP^QlpW&4E>B?T*e(xxcv9hyY@8{?5}4IUG5&diGH?j_jU-Za!q)SS3Jy!+uj zSUWy!oFS;k^$iUTr|Py3=NNl&7JCxq+AfZ!x6Jh(BK;s`)+j+&THW*|h%-fUJ$eHx z>Xq_jF7LX8a-B5c%5BpH?{4GkI^JQ>E9^1Q$dCB^Sw84auP;gg)2Fh^S19sk2N(_V zD{>hRaS_SKX~$`ibhbcRC^*#3rf1W_JY(8}fjWY3ab0pw^F*@rJU(p8ean^LE*bux z=RggGq0jR^nT=*QW7Vi7cOaee8r5F^;6pY9?3szz$0}?qL`H-rw3z~;`i%@t>sZVo zf*~LNNmH1vRE48gwloR1LffHe^~)O|KumdZtaJ4vXyfkPJ%#+TxQ3MR;%fcNrM&aZ zp8l_UhK&kU}1?)4<_CzxJG z4R+{(nRc_wjytJdGXwYl~M~Ql#+nw{t7IWpZLxxzGs6M4Hn)CBK>1R`m?vfMgY6m5*x=R*U$1eqZM95a> z>XnHWNwKu%gstHj#rNw5sCC>ww%}JQfU3tP0q=X zfp5Cw2fQ@y(W?qg=9akv#*Qw-@Fiz?yt50z4_G3p0xUJIaT}NZ_qNA}8nL-boc>M@8P`3&d;pED{ie3Zi5jon5$8!`DYjU~vT;@LdY78svI5 zkEx}-6DuCPX9eoH1Od+&13MPjI|?T)RFss)OG=0lfJEE1KI~4Y$K@ujHgi@>w(9V~ ziX9qx(MG~WIQ%>GB6_OGtnzmjo( zbZ>7?ovtDT4QaVx=z7Y^B?z5P4;`cDOT0c75 zdF!0u$bxo!?SO+{Kex4#?pOS+atnTXhpnDX_P4Oy^);_>N#rxzOF=vja zT`DQa@hlLu=&8S`-zTJ1!ZJoK%r1IPH9nZ!ZmJ(mZ#uCGG2It0inxlLQzw>IwWeZO1%Lg`3;LBWbV zGqs5G$cJ~zwjf~Gu08S0paJ+5E6<{{ATO^P!+m06;O{St5w%!XYv+G%-CkT2^Q@?> zJS$16t-bpB^Qq&9*R+k6Ujbng@h|0Gz$=vsz4V`Pa&yb6{8mvxAB%#FjERXEMzzDA zPy0R{YN$r@mAwe2VVIYfr=F|4^W56x0GLQKGhaGYBkY}>=|ZhdOtK{Wn=MC-IsK)~ zKPlY1cMmK+_PF@?K>^I)w|V_5o9pWm-Q&~K!&0#=yy9PEM9#iH#{QLt~57QK&oWvMCdxebs|G3ot z8oHZ%+T1)mIIxc2k@E;z>I3)a&LqwmAxF3o{{s|NVy1KXazPaS6m6biw+>WrkYr$tn;%U|#|H^xyV51YUlXm8wdrVp zDqB+0TzhkKbK`#mRIk+wuVWtykfXyxY18$o@o|k4zQ}h9+1mNBV`Vip%st?YiY9xl zXiYV>P@@&^LCaF0yX8JD*RQAoh7~`u)2a&l_sDvP!Ok0};>*NB7coe#G^DVpK0HMB zd;gQoM_;olL_cnvM~(VxPK9%L+0>Ey!vv}lxOvqU?is=~nO5quKJMe#&esh~jab>) ziJSB!-7GyXdy3erU+OU{i-?~Ez|#9ay7eq4L$#}{*n(4pz{DW1bnQIK4fYzbCO)g4 zNAzzVV0*ar8N8lQQV21z?xmhNVK_bTI8vb=!M`;U6(Le-KR&$F1In?=%4|m6am#^V zayz3jn(7@dMC$A&l;u!hiXpWFZMhr@!61Rxy9gbTEImgV!DCUi^K9voaJhvv_L{KY zTGL>CPH0xUaLF`)z#aj!W|WdB+62(^{M5t)=+tEJ<7aii`2X@9UARx@oLxrgmP7!rlPEwNhlp*GLu}INGvYzJ-T!DEtt}a2f}gaPD*z z5Inx(yBXX_93ixB%i`pr!l*bjB$zY))2!q@?rN zA?z1SrV9YfLl+Pb2uBUpD7EPFo+!6{q-1Aj*BQgb04W6e)8JIHK`J$C}zyPfDr-2xlp(s1Rm=5M_Amerl(wR_o!O{bOi|fDkBM&hZ%Xz*|9SRAk4(@%Y#nu`PK|q7d1% z4E|%s%|HZ7W$l(TvN_#zncrk|nNZo<86GeV&C8+1<0r zW9V(ayM6b8av+yE2qB(4V1EWfKN;dInBhwo5iTENxe z(p2J=>EHb-N|DH9>pB2LqD!oY9<1hXg1)nb)la0?3Gx#kpDMv7fR{jQb2W8QB>_)u zJ$_!3XkQS;SCa47uNoZxcZtNQXITS!4m#-yXhg>XFjE)9$pVJi35u9%k>Dj^N2cX;P5c`=z{GgXGpGo;N6NK?x3EsZBt7}84cqiOqvf^`oN%v87^qs%I zIS2;?NI5^35eU{2iKKK=mJ8+~ZM+|Kqbi zh;{wW@BknId*WYI`fZv0_oM$@?mwsgQQpl2fOfOMu8q2p;o(Rc)c=lngaP2yUFoq0 zD;UJmujym}S=X;4qL9HVV%y)}KX0pNL(_w%R#rYnxZ$tte-Ds$v3{oq-4jTGT33P& zlv?LKn%`ggfiDT_*)G$CwB6m^C)R*jZG!Xf0#L5E?nw~XIc*00ccg-DWoc@gU~`{1V{O902SES%l`#KA^INZINndaIpDv?bXE=n_vz8 zbWlHdUKd?QI#WI0=#V}PFh14BeaSpQwOK#gKQpK`3js;S<9ZIG?)e);Jl^jQL2~HbPQN3XK2EyZIPW#J!ir6x&vR?_Gx2yo?=*LHi zk0+I+DW@{)%+4^)avK9;8$jpgcBz-`8}gHrz&L|Elx|MUdOE63buaX{)h-Xqi6;&~ z=Dr+L%@JgdcqD#YsYXrnU~w%*JU3`(sF`OHfL#~;b&I5CU@5ii^`);NGaJX2K48*h z*Dgx#17qOTzn44zB;teF6~#s=7c`e2^sKf6HdVz7X)HA85Ca!{05?Z3=ycX~Y%`k! znHF*PF)S2pvi@aLA)LS16)qRVWvX}nv?irv_6jHeIY}`T-hs8plzs0i(kbQVhfRf) zYY=uri7L3{9(1GnyDr2Dv2!JY!gGQmnYG#N&}8E1c(mmhNnFep4E80G zmVzbeY@3~iX52@zRUk3VG|`V4U(46+?G?R{v)M)o`Lts1PA_mXP^x4*)>RZ zk_vh7fAu|R1q+akhQ*Q1d|*tqsop8{Cn$N+-HIgoSo_wx!7)n>q8G9fHKjUXH{0N) z_RPr0sO#Sa5WEV?V@;AlVNFUsem~+}!1QI& zA{Q5z%g&iH@IK2XYp<#wA@t{5F=BBfwPt!OLgol=rsl0ZKnYd-LNpwuxaGl&c2YP<2Cbp-&i z;~lpMUFFaoPkvAWNFtsScm|Schrl|Jr=M#FY001cQMaR&Ncf!qLi!XCI)FU|o8hfc z)t_+{iw&T1oNo=?8HTTxe?&w?v`z!^>8Coc4ps-PuPG-eP4&9!xh*Dqy?(H0S4oHf zIi}j?j7PF@zH`CB!8}u7$r1k;9(Fw4Ujar-i?!g!&PahCWdaNo_x{=muWf74uNUlj zsG|+ptB4FBWPyLC_2&LM&p`kTTd@J~J_1y@;dk%r?dnxcu}w{>x(Vdz;|pEvzm)bM z*|D7J?ZqN%MsJB#GVF@MC!DTw4u2z*_vZ=V8kk0%QSzy9@CZ&vy2ORfdyD4ifNTnI zaGQam%X!Hv6E+<(E!vs~y1IG?8L?29|Fiy+wTIseo;gVWww`&sr27X{Q=o8``7=1l zB@0`Zl_)HS_ZTE;eGztQnn%tI0R89=v~9$-;@E|GYMUs-TA(L*hdUudlX)q_M|=+q zInw!GqGq;RpxgQpfDeuc{)g#oc*O;aGpb+WhZ})Mrd&H%aC~}ahe#@tIY>^ABbrC6 z%<45s3o`qk@ye($i6J%{fRFT1e=T#G)dd>>D29bY`<;=1ciC0XW>VmSvWmrG6nL!a z<_?IPcWmGzVl)^gFc_0Vt{=A$do9XX=#w+4^+Kmak7)&OV zpyURs@ItS4*|4$nMtsLqZLjV8pGA3j%niC=?vW4{v*}#KD~WyyyvUkU3ORYx`e|nc z@HYYLB9W!Z!oqY#ms}nw#(euS0Wt#y$67VKEcFz7D{V(ptdH?grUb*$hl>gVZlV$j2 zKK@_K!oMHA8ROwA{=dNN|Nhbc2)Do!1p&UlEZ+I~EHXGZrs*}bffoW|lNA*|@bK={ z+AeJ#9_Gp=@RJe|9UlyL0f+AM+rJ=x#SZ-0mux(j(^vNSn&o^~Ti2#bcYNi~SCbVO zf1e*-%YWm`W>Gfi&NqKm$^YE3z!NkxGITgT`hDrwFJBwQi6X#rHKBJpKekSVM?`G> z`^{C)looAi=!P?i4z1?YPo^>MvLn<8$7$Uakm}sS!1Nw1$r%Z~e~W-emg=Q`gt>US zgdtG}rnJtn(xp;{Vg$A8C-RPIIeVGn3cx!~euH3r$VU3fI!F+3i4&UXb@> z8`vuj)!+9lU>g{NaNI}QKb%OxaH*-P_O@+re{w<>ws^4-Bj}J*cu@D?#~X z!QTX@rhV$!LM0wNc6d75LwXu5YqXxnr?<&;mX}PNT#+PDTv4&AU0hsjpa<&G4>*W} zHk_14*&H$KHsaI z``2lch@XJr7evW&E%yq2VH7qbV z*v_%#U43wRx|nCNoR2IY-{x|%g~>m~eW+efuN8HV5{V5Q95&ZS_bftp*YXpGCd3>a zt*iivx0FK%X&!W{X7csdZ}txY?ANLa3M7|S{rFhyXdiE?K?d*J=Lx)Cj}U<|-u`8O zgZNMXik%UarkeG}*dW6*OpIPujC5$kM`BX^y7Y8{GsWcKhzO|Gz%b56#-fme9a3eb zYtd^v3)nyod?_HSXH`?9Y+2J2n{Rku%=JacJQ-S(>k#}E>--qd;h7wOmm=RR_v`SF zdeQjAz`ToYm%V38{CL5#ds^OrEnlCU)&LW?bjWhOhz0~GI7kJt=sGzyG5H>zWZO>- z3{=gH2Pi{=HXz|``sTbPm3dpdtNt!&D#ANsnbaePPczus9acxe%&54zf zo(Zv1rsBuzv0(#Ze^>S<2%8MwW+a@db?oUfnh+95M%ln{`K;~av29J|7G1G)n*LwlA#geTS!IaoN;D;hyIUBWO2wBASH2+*1fdPP!v9g z=wRUNJQ|^=Q>#6P@OQFR#>7YWX^LtH#|x#gxm;;myxvzivnxw<0aVS8J;PG84IB?_ zOy>~n&m4Sii{~bk$cc?LiFJM22P@(xpg#cD0_A>VC9EgYrt4d4{Y)CK1vq)iDhhRn z?UNI0vY%JR*4``DHi(5UF91N08XwTCaZ}2YCkq(xU>{p@v9;|-{JX{+50?8V8NBzL zAHP`Z2#kp{W=K~VxW9M3uHV%3mz9-8)k+mtt6iaiILYtdU(kUxCVhPO%1p}kl1Zy* zu(v&UQ#XlXbZSa@8RJw19QFGx(C&?8lKOBl$vJQV+yp_5@#e}y2QcwMw{jS z%wdvQ>+b5_ zgo@iplgQ#sq0NXqA1 zRr=3;)Iw!t()`^9<$V?(5x6u8^IPcMsvilpI@OU4ao=w0P4ui3_mSP-JHtq6=~4OY z45I8eAu8Z@HTc1~G38{g{MtuZ;86V3h03t?C;v&YKkIvo!d zWqz))9Y>+fqn60_YYX+pL;^3U%C+=mGb_ zvivoBJ@5kx3NpbQ(jJ8;pr@+vf$i2Mus5W(WK6!G5I4NK=?<;U7i|`jVfVq_uq|3-N#wM7+$m!H0^Xl@l z#dTPW)Ibd)btjnYY@|hSH#EeF7l8D>cjD!K#-XjTv4oyobDx~MQU9i_oZQ;ZMCElU zT=96>B#0X3H*uP9iIRj+fIkh zdbw6rU9D9rn<$uT>v;4%@gj$rUbJEi#2e-B{hpiK7RgY5btOl~&MvE*yAwe9zM`y0 zOIH)dA6MBlpDuQWHy1E0Rdi;IazRK*;J2* z}-RyOPpJbOp)o0!3NRL-)$x?kB>y=?wAKj-Nd?qVA~ zD0aIBMQAcGZK+t}>xC01)nNngU8>?tQwRhj|SCHq|S zw5zjWO&fs7Wy!9fv~<`t5n&tU1~oH-PB=)8%2U-FIgeK~=1GbDy;tm9V6fbIgpM9( z6r?voH-`zsCJrFMS97rwwi;0rmR#V?%-p;A<^?2xvt`=*$CaDuK4Obc4(qC@kUd*1 z$ITI*`ooxO<5F0N~9 z36a^Y=I%KuxbO?e7LYpoKb`si?V~?cIM=xzWy)WB_IoZ?0-xld4>Wne#)XWWGnR(g&CNs8T}m#I9??8S{&Y{ne#X_z5c_cqI$Z zx{GdViNHNz)6iFCJi){E)mO0TBgPIzB`)+7#*RJy=@)p<^47G6mGHds){<#m5*$fd zWd0XXR~;8sw{}HD5d@SH7(!Z5X@>5S?vm~ty3+tbU_iQa=#cJ`8oGNxy1Q$L@A$s= z-tYXuZ)TV|`<#7tJh9e#h_iebku4ELS*zTxdj6%*xf)t&s{V>xE6=pn-yHHa5L8h2 zt3cruPrkfC|2S6jft~dXnUB`=9`inM+?8yQg}w7YNliJ1g3sc!BPD*oXcrxN z=F7(!>NHQDR6%Qgf?wW0dL8h<62|&@vu%~{qx(h@pEF93@!L>?X^9kS50#Ox0}=&= zg_a4gmx^{YgG=ZB0~rL6do{`rkX!FG99wF(P>UzY8{RKHH)>LT?atGmD41%;)n!_d znC5E=tLENlN!ZOmRypK-7rnvvG_MxBaeokdTh3i@hu5X=d8XO1Xy?AS8?89y$7?I{ zs)>oRrG9O!I)6EcYtxInxN|q+uQz>+J^&kTgysNz_50!OR z_#EZ6Uwp5QJzX1Xy^|qe{)o^+zDt)Op3SG^hu-?5;1+W6e~-C*$gS69tPL@RPw=+) zcSt}^4lUK;OSjIc;}99yfYrf4tTAX@Ny!IO(or`xiw#$6>%t_qH*AqFMcdC7nmjvp zFD~3T-A(Ut>!hAFm00|mjXCrH0XFK&mtNWs6T$(eEE0-pC~I~Kks&!XhlwEw z3&4K$`vFr1U^b1j&4V9{B)#aVcuqH%Q2cW-1_FoI%t6fS8yZ4(*HBJc8M7exul_k8 zt3|)PLG<+Wtaet*DA6BR0jnd*$5UThyFj|{_(yP0LPf6gN2P_cGZ+2UF6%NH&u(-n zT_anQDiXhv?W5z>?l9uU*P0&w{^|C|vDeZ1rPD98e5vXevSqpWvUH||E8I*#o12>y zgKwqKH72=xz!mqBdu+<040yUf0>cOgYFTS~rsyFi7dq}|KsY`ZpuGR$JvnP_tyQjq zQvG`X6xUS%N0h$lkM*GId(9RCq1h<|pCH!Q=qDoIy%r)Ui{_v@H||^6-jNTLeilw0 z`ixYc_WM#Nf@obz&}mPfP?f#W%4q#uzb8Nu{2?+dfDo7QBev^fRBcmRlZQCs4=UA# zaFLpFxbk-z5g_iwMSgT~QRCLbid_qR60MN~6659L>*v86wu~hj1&0LxbJQGnuQWq_ z(<_App~j%(;CkT&Cg|G3Nf^hi)gwm>Rx|Y=T1@VmS18@5^YimsoCP@8*yJ;kjOA)` zS|;>j)leEK`q~B+h{=HA1J4tU@I8R&vFo*ykRo<6VpXifvnf5NHOzuXyG~lMSXs#q zfmwni{x>RpULCahA~zX&1j)O=PUl9DwOF_*FMUx`d$-VKB*|b0gnEUUxJ_sPj7yo& zWNuwpw%5@iM)Rf|3Y(Ff>UDFrYhGcIOwPo+-&R3rPLl+S z!JPVO7NF-v1I~Y#>(_?Mpq%PH&d$+-n3$NZQdOt#|9|wsYL30P_1*lDcWa|K22eB5 zvCZ42D#Q7#xw0<>b-0n;SYg}3B?P+U(k*Jieq-RrDBp0h(156?-Vua9+Y5- zk}e4DenJCKmmav?f4j8`ihvLM``@o`O-l*+aK=HzZMzmD1;{4rfQk(2pU#NCxLUS} zPbzzQ#>_}&tjOF4Xdl^aN+^0~EdEdbOc6KVJaKO; zC-n6nLTciU>hkhYm!8;QcX#)1wkCZ;9OQTa826HA${SSv^RDJqKm2rb^wseD&zqlh ziyGKRzP9(Xzw+UNj+;5`3-XwoBoA1;ie`uv&yPc>71n@UVH0AvifUmnJXuQ4(J z=rRBnHhA>!XXyX<8o;05#0CGLlK3(HlYv-uxEit6_eDY_J5K2TwK?*kT&Y9D2qKez zJt1Mp?Vx$(?7rLj?{4c@&+@ruUoX>nIiECH{yy4HX;Qf7tu#+&uc^wvM$r%e5-MP2UvVwcRv*5OT$NQ{2Z*;P zgZpmDzz-BjR@b?Ek7L>#c~ik8P`5_C@XEzQq1ndQarrF**sILH7J6Qah)WQuy_pn= z!Jx{j_^j5j1ep*SG@nRZ)r_7L`(xA za3?hljU5eN-{xQA+f{=piHVMBKsL*!EH@Y40_NaQTdM?e0RO#`wk+^XK{*$po?_mI zH=j&x?W`vP7bds(VbmQY6D0#t@bC*Tq^n>j{XUiZIFf(BKq{I=IyLdmuZ7T>V5g(}{xpg7< zC3UwD$b{lmIiuS{cQfsOypjAT4x)ZLRE1Z~`KkLCMPwMRwJ-ftB{Y z*HN>)8v|1O!dTDi)ew~`)l_hojL>w;8Z1g-5W%uTW}%f%^l zonMdp7+^_H)o4D`0=c?Dp#Z={S)exN5^Nor%ZL5nCq5zRFI2rBCirAObE=+ofqdr? zAmx$D@2OHfM37TU`Y9VkLE*kq@38zer}>f1>eg2Ny^&i8_~QmiI|hE?w8?GC@%`!GvhkkJ`9ew{|*eq zQQ+TJlW~W9CC_=bQ|?vA$A}QnGq#QawD+`o#$_a1$0cCOpu@eriRsi-GNjk#fu8^K zJgljyskmxz;l4F}HI#H$)%bs10T-uQAjB@hxuMWWxw=ET3vuq4h1ZWy;8lj0X2S&B zll(-5*81e6J_5n_$;tXChBu$OiV6yVTpUpCd2H+>C~|UAJ=yL0#0;byJdpz6t^gUQ zK}q1iRs$iC$W4B)PsfP20HY<)8b;Fvc=4NeEaU1t73k3z!7}i6l_jgV5 zBR?ihVAQ4b^g2n&ww)T>H}(6219DWqf@hP?9W)hoJ_ERf{L)hQXPID4ZEfN{3u|je zc8HS`Tg)(!KCXUSafV2+F*MBZfN%hb)Ie(!LQd8H4If~{BVo~@p+IjTIvxUSK|pSemaJiOH*c0sUpspWeY%dqR=PH}A;n8qm?FXs&)!nfG$okwk~Yb4}% ztK>F;ZPQ=z4D~;eY9N7@90*AQEAb8Y9F;-eNj=p9nIZHkp8x0n!b|zk)JSZI=M>yR zQ`myW*xt2DRbJep$5I`(-_8`paDLytkWd><^5|$92(L-F>Ok+G&nz5Pe|3KSU8)QxLph`{cd%}$#Z-3xwY=E>-jg`^+Q}YaMU{EVf4$m zcHRfTEL>9nvPox)hWR#pigaAYJ3+cUe`BqYAF~S!cMKlt0AM2knSTh!{rh_*5c@N^ z()Y~6a*8%aCg!6nxAQ4@!?(9S?wWajCM6C6To{22#!CSioCyz;%bvlmDMd_li3=^!<`;h z$eL3Z5~acQ*}MgZnyP9W=PV-0EB(Qh_AP@j17`0B=*T(Z#xzVgEBKwpODX zM4A)W@&7`YEP#fSCM_+fy_`X)RFNNxza{^bIa*_d?N5Acj@2O)=JN(3p|P&JQ=B=I z1*)3>UTo_55|c3G{WBHVD))uL*v+pwokiaua;yk$T}1=G>vaIc6Q}l1tR>?g*s_Ua zL`5_nCdSDh4DDv8*!@_qcg>lYodR-AfWo}w&b0k}xmakjo#k|?d(na3CGY83!(g%v zHzlPJP&pfDN&E#556`f`GL z{Q;sMwiqvRZ|M`yCxXTNKYCs4S%r$PXAbd~SOq zf-m3+0qhFnRaa7mEBR3Y}Rw|7{G_CLiYK|s>edePl}iRsp7#z7jV;-0<|!O+2<^l!j81wv*Wyl1@oF`Dxys0d zu;$uT=1L7AulCR2|MRs&Es{m+ve;VAprQ+#^!ZKc2@8EHcIhLREaShKBHL@ z_Y?WGKz6t)2mus&vE$M{ZTRhHZ_~*JL0+7|=lES(C5L!--^T_5g!YC;l;Ui2;_6lI zyc8$kBJTygxprh@b#70-T|4lEpHD?i*z+m`R)Sqi*<|%gucFxBD=++xh z0+4&*dOvaXGiJ1v4BvV6oP__d=@h@Zc?L@fq%B>-*9%nR)%V)Y31w>7N^u^t0>DRG z|3++;K+Q{*c^qEfcs&s2;`@`9QgKXr)>1CLe>xPLXgH}_R#sMkn!vYrhyD>!Bp|MZ zL#U=dAD8)oA{I7})+#`3D3htx{2Twx>$D6tB2xkb>;oW(0HnkkfI#lQSw$mpNL3L0 zNySjZit!ww6qFR0HSS(v^3vth4fxy(y!YiSpWO@xhKCj9<$W(^Es$s1&g-$7K-Kwa zqVc8Qs5m4y(1)KvZm2gTP%g`i*8t8iBzA&a2~_Tjbc6Jl0hsUN;`f~00B%I}pPmcA zt!Dk6DfT`8!&7n+moGwoGGXDANdRF30AB*=(%s1-i?G?ID=0W`@ z11Kyn3VBhvw`cpBGh|A9bA26Hat4UTYc@OXhdgGo&jM5x*cv%)XUYHw9U@Ym;fp*- z1QHfV91h?&|8p_d`OxK`$;sYuVvD~*O-$Cbg}CJy{)L=0ye4JZd+uK3rw$h(;m^aZNK{u!%! ztF;=Z@L{B#`+BF8ess^aXmy-^nbXQYA>MQHM_Ypv!k|7$Nk?tQUGAwcd)kOlG=qG z++=+vEiFqSa!%`*G;4uUcUNHM{u+)q@Y~iK^F$*Vwd&W)8kLhfw63-LGyi!e-DM<0 z% zj9Zq1yeI`6h`A*#SHJ|Dv!AYYGm!8EAlc_?*UZ+C`A?r{ZC3xdFP8XHu1>(0@-TCN z(KkbF+;X6<0#zxGlF7?-pg3euFQ zJ_gp)saoCyiEq67o!Hj9aJEZ`8db3aWMah**#^krX$<*-41nryTrn;y^48WnEyx3# zzB~ZKdb17;g?wnR%s-eCp6_}gqDlF?zX9u^AF#3r z?G{(smK22`wwm=Vx_p58hd09GplU_q*eS^pyop}Mo8JmW#iP}ujwwK8(7PwkUJxBQ zXuCMo4w0sF+QgBpA6`NLkh7BX#GuG{x+`pCpVgJiH_-fKt+!#)lI^?(GAHx5xeT^^ zXf_;2@G4_RL&YZW#CcdC59dtV_BO_uztki@dS!B2ga83q8Ao=6QEO+ZCwl?`l0Ya1qQ7h43f z3;uU{PX`(3C%Ziv0>I7u@?ilS-aukQ>ndUu`LWZVvy$UZh{q37B{Zm2t*j$Wc(qm1 zPD&b!%7NkxcyPPM71Td@w9R&?#rf29`!g zOY1qu4gUod0eCHrzkcb~R}3KDe(EMG0ZT4_pPM>y{5NCt7i?#(0R1Zir9)}$bFUpj z4ndlA$KIZ~3?#7LBeu8|oSZ5YXv&x`hQg*?_zhmZ=uIZvOfQNHNXkWdI9VeHRpZ2ulI=C-lN|v2&pahiJJX#AZQCb=o$zVGKYa(nmSo7 z;G8?_eK#$6M%zpC`7fpdFv~3gC8z2~52XOiKzO|GuLJXU&EgOT^w4DgLHt-g2G-NR zODgdB1bpD{yGPlQe_fEj^APeEKl?^g{alsxL60&%@D3l%yhiUrhm>%4H@Oz;5%46c zBm0t)UO95S#Tc|@=VA*lw6?Zphn=rEdWVr2nY;C6J3{5<2OrsfLjYm5o-X#>IwJF$ z9JloWtLoX88k)?=UjqY9uC9ZROc^-Azi=MS<$=KhEzywnF@n8SwuITZIgwS#+}X0X z;jjH=y(7pRyUrNQHPYtn_jx{uta4E<(U0o6HU8p$g&`Bf(>QNOWWD;1xw+~=&|Bbm#)P8f z!7r!=@+sBL`sEWV?Zx60JrY5|)*3pDuWZJO{D*005ZiN?oAqSkXfcn~@4d z;|c;E4|ff&2QJH=n{h)WaYOWbm2-AEe8N80U7K@Th!`}?2%eSTvDbP&_fLAbQys@K zR*fI-PZfvl^ez#u(@d$nDgHy+uMHgh^4uV@%x>0;cAdema@-%)ovnl_@LgvUUJQM0 zgpy(Y?td^$OblfJjGuoF)r9C1(RwW4HIoS`H!`(Z;PiISlvjb@&vC~)Nu2u6sHL#x zSl&C=@$cs(4|f-@TaO%^#~am34VpU*pf*qvLEHPiS?VCHHa9E^es1!)Nw}rSpGJZ2 z{%S-p$qpgLh5GLxc#b3*{}t3bBK_Dkc3{D!rgW* zu<8wFN=hp#c1uO678Vt{e#P{Vh0Il4k9IsmC^~+$a|6rqFqcY8RqhqP$G?JD z4QVGPrl8NZwaOuL$czb=3r=hcT);3Wi}?PHsH@9|v2l7u!_aqeA7o{v)v56CaLpBx zrwa5M0me=>0b*-*S}8VDSn^sk898s<4QDrC#eXtM)bIbG3UZfK&JX3*`36wU4^2|| zriBt09rQovGCf$|U%&3lcn3)lFu7TKzItSxt=!~weNyUofw%3>*s+?4`m_o;SxOfU zpwh^41fyQ3wF}b%wDSaWS)eAYeDM4NJ{ zg8KTr>%&tiYppg&H=w&>;^I!8UN5=kK*P%U+%2D3n~n@!4pE477M7!GG2_1kxkLf|Sr~Pu)B;#_1GQWESehf^x&2*P=!Uizo|v zU9mvaRj-_8S0u9Nz#%PlGOSyDR-MWACK?1jQID4K_|T?&VcYW^Z1zW)zO)2FE3>q?6r@z;-YTH7}7R_uefMZttJPQN$ouMAno6Gs!Z9s3LErEe>|6Dl{WI%w#nvO(^<0>03~&5%`YK^MLeC9 z*ogdR7UF4l_c&pbv%@mj>HJTN|O2W(#Kq)Oac!z8p6uPz7lD+dFOA+RdX+j~gFHy4gwjs+g)RvSumr{Slfb#TxfT2vX&=)UOeb+y zHw_+cEgIeT(9bT@xx6)|!}p0r^IwRroyFPji^F6!JbYeGDe4RQ52-syUuA4G*4iI^ z%xYu~Q%T_?tr}I3Sm09?xEYnKrJMOSeIM;^Q-i?&BZ_7~u~hGz@+pht|NVS26Gp?=pg3 zKeJRCe!10=17NezO)}6a1U-I>fYj&Ld!C%xM^q`3u8)Wt&?C_CNM}lM04+{yxkpN2 zd5YvkwiK);JNPbB#k$KodrgKzqy@eR*{|is`!!Ydmj?^mN67}N% z3pidggi7R=X0ueivbB|ZzW!1U*2LYWUiM*LVzyKj`-djJmJ)MhhS=)Bb}lZD%}o^_ zz|C#)(_4B?=7AMola}DenBI)umWRwt1y@9T@r-K1tdZeRT=DD!@%ZiK=QSc{$Me>T z8huycQWBuc{ROOccsOTLQhK@+iUPTi-DOod(?QMgnAMc3zxDaqIYrXm+6=x!u{=(( z$o)B&hKfI~|K@b(%MSs{*VA#9)S{?0M*E-8(dAPe`I(ga~H=MB}`EhrtX zgyMbC8{(hbbSK~csSnb#y-AHKY0AkKB^7`sx5>+ zUvKN^c~TGzq6-x$5d{&9egvJKp42h%2bO&U`@N`&jQd1~dzJy^*9nUfP0g3`S%zb9 zV=j5UH{uUOUT#muG*bCMohrU+{V;N^`Jt?GIb(J+-rpfMyA+5N*}IsS#r#AB+RktGJ|#OU@N(avgv;M18e6zP9F55vurL6%=^!phG&6)e0q+? zY;Ix3PR-`k>EMN4+}F89DIy|*dUKJ+=EyI76?nTsFO-}3lg7CXDyusq=W;OL@_{@& z-~#~~Rja)S<%B+1s;ijJA|b4E*x^lm(zqnGfZ-{2-{2VzJnT_}6j zRE;D}#5D`+8(jH8p#B1@vzLC=$Gr{h7oeT&FA!?m$(b~(GDe5>%Lzb;G8AY>$;qU| zf)30mYZ#uTXPM2fiB|u4G7y$!62s^@@GhZ7I^t2WEYUDm)hLTtJx!5(qT+V;Qp+{8 zl-@k=g)9Cs2djmtrJYrnb~XkzODBm_I)1~qL0dho!2AS!oA;{oUF}917|})8#9Q`{ zFZ;QadnakccoXN%kG9zjzXwyflnlNk6af{e`e(Voe_yry49$hU`UXkU=Xnv&fAz9i zy_}7nbW@R)Et`#T8NZ);GLuPDJ_cfl~!b$ELd*4&gV-`k=aY@OoUlo}_8q<~8Y}Q)172{b$pNNrIh6b}Y zoWX=SuDA(pnxk;P^UItN3q$$X)V!CI3E;=p-yO*80^=pDnllvyx3i7j<;9JznTA`M zxKH(7Z-abfnMXtTlfGu?b=XwC5oTHRv4X9P%jSZXjm`NjuZyTu{d5RP%NAQYLK3T> zv}jVaBr66@EzB)*LfeiJ$__FzX3Ia*KW^OsBuVd*Wo;Ap=c9mthk(?mSi+gOrw_xT z-Dc)1V!`53+7pq%6Q%Y@oN9>hq@)48uVz!Q1*wrssxz9b|}^ zN6Z=Z%R_)uyp3PG;8>*`)s#eRh#~fDjr}})<85eNkXhujhz-L6j8{TAbATwj#x28K zjp5-MljnYsfjkb+dsmVZrp70*<@(M5%TKJ#h_@%l)H*OFgYcSpH_KH6m&{i0iLbaY z=0f|6Z-@TrjpH0868ncNRgo}$VwM^{I5)iQJ2-4jNUt$Gc~Vp&Kpx?r9(Gd(=@ zBS@ZyV-5YVzvtp=HC)H>9`-z}p?&%Bd}3CDwA?MGkubSgXN(<}pK^bdRRNY$QvT=q zu!rQptQT+kkLaJ!^cR)eJ4xr8<#MA%I#9Upm5cF(FF(S8%8-PuI~k3=(Gm#mo}lKE z8GvSEv0(h}`#GjJ;MW((rh2tpMxz*WoHsFb(W%eK^Nu1LJgp!GLVNk1`a#@=&FanT z{1_>RsTFO%=(#urB1GVN)cgcxD(8kYn+l<$B3|VXF($rXZ%9fkSl;uySS(IRLGQD8 zap|z&us$b^v^B{gy6Wfi%!?s?>y$m%BnL6tX0}HmU5EGh)#foQEQ^U z(c6$Zps3&^mlRdHf~my$QAXDoHUF*F`=-b0s}80Qzw|?Sz0LNxK9ia4nyO8amvqv% zbM)NpD4Kv?q&@|Aj+;__5qnz1LsDcsME6NCl_Zi%;a&F)r{~H+@7uuqsY_?Sq^AZo zYb~F8K6mjF99#PM6eaUPOp6k%b+#67&6+rg55_rAJC;KNa?j;I(Nv9S{TS#KJ0Jg< zVzC`9KM?b2q3LA4cG%L^;N+yxMN;J}O2H{^x?*QyX*SumJ2~7y7`dG?vAkU*-$Jrc zSlG&@`il_2V`tmZVWsxc%4+vTfqx~j-{(bU_}%c3mU?W<%EP8~o{E#7WIVGnROaz- zW{`tzT>VBaUJm351e!>A?j|n=_-frpG*Z9R;YPqHuF=m0oP~t0zTgxSaidG0atc-- zG@Yh+4cE|`Ida*ly6qIVQB$NpuUBqzNPkV?k`c7-*XEq=eRuBfVjYf);67_BJ8?P4 zvA9ptY+>v582$3BZ(%@CfelDk4mk9dNuc9gd9U~DlTcs>&AhP18wmM{@y^1Rl<*sE zO+8t~^kR!g`cto+7oE=rPA<<&eW8?Pt$xitkLz!%LmFgpRc>en4{p&8%RVID*3>o; zcZoIfPn^}J#k;6UoNSd<$~>4c`#HWzU3ojma~y!jc(f^mBo^fDm`B+Pzq^?2u4WK4 zs8FrDXOua>b$_m#JM8k_UCnI~dEoA894->FimwtOoi{N^7f5UGISxwAdJ(VZmDtKB zhdu8vWP{e(JicBb%Vf24uN5wElh>{baCgSVJ&+mD)L2qvmE$Rc{mj#Qg|s;EFOF#5 zsz_Vj2T*Om1A<8;M5onH$e-)}nEwD8PobG?U=PUZLWko_a8BIE6VvB}@8_^|gf84k zuIef+Vc)G5oA1A3>{pa{??g|Wji*;ujoTR9F*0u`C4?2f$Yu{=;eUrJjAiCA!d=MF z4S&p1P*k~>Yz?;DlRv8rI?XNFP%?=M!$HeWWhK%&CLV_$o92_)_BXTH_YTO^c+U$y zqI0TIz9&>Yy1Gs_`nFv(s8yv1qWm8AvQOa|1p||6aQCdKA>p3pTM1B*2X3b24pYbFetk8HKh1>#3&>+INS&5S%&MpZ}qvv|Z? zSeK=jEECxW`cMeVL@hWkz-J$Yw}*O;Wqc9C5%l;7%1e<>Wg|ms-oGdUnJ4v@+!4B7 z^v&2mYU~4xG`;?sT|JEDGx6rKsP(nHZ8sxWTyE)@PRL4BFCU?RATIv&ajH()WL0tI zZNyK};@ixsyOi|Yw%R6a4#KIpSQOC5?hy82K}7@+$rZ&X9SMrpgO=umwY#TjHe{?9 zVI7q!Igb^R4g>_(+X`Mv8E~YPvzc4I}1)QSU_d@N>&W}7A<0x zMFzuk`6xb<@RSKVPWQ2M@!K}mHUTov)m?r11m3jPcAhn-p+4}Hl0sMbFrV*e)o~px;`!{_h1a^dbdbvWo{hmdqo9_`DBO|Gl+N|8`XYse zKE=U@Qq*T>iPsWb?pETGlv z*#De)bv6%r^`@9>-Vb4G>72Mbvv{hfr;J)9oND_0aBRlpef3K-=*Wi?4560*yoP7mIH)>{s}bNKXE! z7nU$dOwO>`gmRmJWIQs5MwhohM_F`oD}Wrs!V7quMQ87t!(vA zpO>5JBa#3N+{Kbww&Uq9BjNu}D(}AU6CA_@cXC8Y?C%fQz=xU-GRX`6Oma+)#Xas$ z-I8IUq83SmsT^i8>?J7{_E(>l6ofXBSobudsTFT|i7R*v5JajrZmdv$>)qz*dFQ6+ zlg5R6H&8vG3{QSI9fOhlb&FXy9}cymMiKBG+7=3KD%`Vf5nG%4-DG+T&wfPYLY8Js zhsdwC8X7AP`5@IY8)o3(Tm7nTOvwh_l*D7<5KHHU4)X`SK9#tB&R-;)H9s9`fb0u0 zdEw}3(<)%~J={$#NX37fbHc#JhQgU_4ox&LE5*J)iog-7gaCg z?k>=?C>luqxdU?ZY9l`UhcUorSyea|TWim5l}(?+que8`^CkhAPmR5Kzhe}H#r))n zXCI-|$nue`rOCXDp-+9aZ&nuc=mmr{9Rm{cWmT2oaZR+?L@4S`-$K*J=RiG|t=29u z;Y)+#5v0>36|Tv8&1Atn%mO+KKuGI1hV5jm5)n7+)#2Y0bD$zLoo6nMvYZ&qdD z|D1FnvVK@~3|@Ss)g8C0?-D8ivzk~FGH@@_p>V3jTHENaXGini?Y@_+Oj^Uvg+5gX zV>|07gI_=D6C-zcAi5hA2c51j+LIA8kzh;(Fl>nOjueU*{ygplRymE_@>iAz`b}>P z^Xa^&Hfjb-!)w!Tr}W>#wZj9wVY00T(!fPnA+z?i@NHK@Gr*8#EPduweEzyF1Isr zek*l7U;JJ&5tFQM5*uP07FBWlHOpPyrGj|;!BLboidB!`TZ%FfT_ z5b{s^VN6E{_k_|SK`X>#>gTm2Z&w55WI0YK#^DO79}qK!kCh8*iZY->W4FLbi=(4> zDr@tb`|AEYErZ$W^U6z>yT5e_K7bC6bM(xW+zWEQmJio+CVZKoKe2)x;nrAwuB-O- z>$$UEBbML5GA&W63w6loqcgcj7G=QX%m@Fd30t+MRdN}&W=twLW_nlZF)N$?kYb^T zz?8_0;8}9p54Y0G*bk|C_2PWGJIwFJs=JchUgh*f6Af1go#uY-)9@x%&OKn4X4#$C zT8a2kM#4+#qbq>4R{bEST5+ABQdK_ismNDJT{D#juxoKSFDOl*zkGSyBdTa__%%@1 z%4l2$B3;rGUh!|CTykYkGdwoK<~tinrbpKpd$CPiA;J@G`2Gs6jcsV@v78gH+ZV09 zUT8&Ko>mq@#ihv4Y^-6E-rY>K1Vz1Pu<%efwRy_ zZ5b)6SrOWX=p1q*whn6QBCRSo;*v9=IXt6r%>K)m^F0i;v^}ygnF`(C#CfSTya=L1 zYDidqHeFfwC-8C4QQ(;5&T$c7@eyk!{k9mR-CFx<3482hBXFuf^sOZZK^6$Uzdutb zBZs(<`ov?cwch&q^nGWUQ@Atwgz)%dRhtDBT5tw+-(F8KKk#Aqb1pERFyDrLVrHu> zw6C^$G5l~R z5IFa}aqncptjf^-+W~wkPu4YU$6U`1OCl5YwfAQ~=r1(CWDlqAw>R@eUzsDf$hQ+sy3l^d(+N(|GBn^EJcEKcD zPmMLcnX9l*SPGaiOt4z+MQ-Q4zKqeREwnE47MfE%h_7vFv8WHdcEo&na6b^cpgpku0u>#t159-j;{IT9mMwf(o?DK z_eqTllS89kVVh~*;fu&Wn+t7(I1P(T_tv;~okI~DhE@yaN2zaA3@8~tN&>vgdYsW0_>1RlKJv zjHA^!RbBM4q#~bxT3$v?MFr*8!|J)umGcYcwQbhbp}_D4md88CYQl5P=cG9>#@Bit znW`cKe?wHWdUTuYo^L};!8ElJYEd{jJiPoJ`CA~xLycKngjSxDXDkq^+ddD93cEx7 zYKGv5%un+i8}~=fB@@#1J_#EBt=u|4y*VQt-?T%(ECqTUnNaF`USzZGsfnLYHzaJB zgl^wab({tJ`U;g)9%k1u!@7|x%Q&!+-U>NMn;2O(`Bd2*ZP4w%nj@4L0ybanGCCQ$ zdE6?W=j#LFy-nX*KH7^*-_v`?V^_CKbx>lUH}`djOagSy42iONAcUF8KP}KdWVAGy z8XeBNl4MIB;7-JtYS$q{B(#(+>#~p8!Qram9jHiM6+AumnbT#nnK`#>xvJ5s0y-2i zZ`xWSoUX}tM3yh^*+dUY=eBIM6~j!=tID0kj&sgV(E;ODXG$ZNfZ>9Zz-)=9_ansZ z_bBrsnIdSepblro4C2yA?e!CdFmxfhp;9t|B%a~g%e)8%HK|ZC>z;AeK)hwiqSS$# zO(&2SeP3zAhsQqlB<&`ZNf-v*T&AY9wRD$2nP+CFV1fj4ypP?G93n+CKeZpZ5LB^Z zz8a7LAwTRC$CfWXa*Hi#owJ`a_nN8taJNH&vRc7V#?krNhOT$s0E%J64F&d+m12^-wXoXUFNfYsOg4#h>!p#NmhH`l)pUZ0 zVPvUZf5Ibz;U{OI|dPzxK1^J^y{Z*{jgm^}vve z8P{pLM-=yfl5@H^NP6!{Y28YQqoaZMIp>|uAqxViG3B)+?-&3kG;KO+$HdTzw)eT) zz4zPHStOA(+wj&1;%M)?n3CTpP6EqxaMQz3ZwcI2hkq&g!+nzA4+PA<(o>$gDTTWq zKO<)og5_!Nj%yV`2G=L?nNUp8^kFZr9X&-YYa2Yf#j00ww(hAdM?ED5x6`vs=Hs#5 zTo#_cq1c1}Z7AsGCdVtHD6zn_1EMlk9ualR@`1Gcy_^`x%v-)QROZ_@L#nnFY&aPUs$ zKG{|4;EYqrlXio3)v=Fi^}&Mn=#paJ&I=$D{)s^fxgReLKE@}AB@|@tt{<0-%d8%5 zTrM|HY4-2fwJD|PZ?M6m+YekHPtErMw4E`F6rf!hqR|N_A4CY$f8{D9+8ce?cHBkm zZQ^nE^W`%hlA=A2MO`IGC-D6+K6}U?M*9wI8XLPDA6u0#ypxTsXaLDF%y0`-oE+h1 z{&!!RN;NWPv)U7eQ~pF)910vZn`5oI`yXmoISvZk-L+k|Ty9=Ab>AcRI{rlVGdI4F zbcx+?-@?hlGK#bN*1C8LpFW*dAfx0yV0%vKc?VTYt%W#B7G1_gAWzEktftq>U2RU|iz+|xX+c{4Ir63VNdUCk3gz+V zhRB`U4X);6z-0{WwFm-}L^DwlEBdwLEu7?XLAVl_f}U^G+9S5ga9)JXkeUoxb`zZ@ zc~_SB`%gVJe!UH1{{EgA%C6~zrcogXEV;*MLeP^TIdb%% zcb}o9WLI|_1=S>NU2a`$`ZDj5xSwY~n|0%VKaJIy%Mnjz64*^+K3X-(n73d`_A;ZyhS4f)wO0Vxys&|7gd?j_1- zC`0j`NE>6rI0mod{OQw17G!iUaK}_-IiK1Qh&&T*OvMklv1lx?LR*RyKtJ476WPfI zE9kZg$uS)H=ktlGm;F6*A%02^V2l=d05Y4wm~#Jft#kxz2;6EbN97>ey?P5 zM^q?e5W(*lDh4mg9G{wKd&6hPOCs2C2ElCnO=g#GYDcp`q0O~A#_2LYZ$_6VKb1~C z%gtBxcB;3)oR+StHMw2cb@n?CWATlo7JH@J8ezJZoYXQ;dcpP8CW#F0r?Gk0t01!d z&Pf}WvXn)j#tvt^X?w!(5XpMllb^)B@3uAj8~_0th^pvW{oxP4-)vUrN$&Y_D z#jWQieN_pDmQFb@0a1VPy07VX-tcOi1x@j|Edsf%?_ulU;yp;ym@#Qw12ukOT-ot7(MlmPdpC5+E^nip9~! z?7JN{ZR!cuoCBHl4MQ-!cYpWXZMQ|x#`w6m7+@PYzOrX8xzE>)Oq~`4Hnr!4^szpYYhn>d?o008uhc zDGMjfD!Ta1cB`ls0Fj<<2H160-N+h$+1SJ^JPxf%^#hDd_ynJ2fEN?09Qc~HbvQQG z&mL3MSOho%*SyYyKgN3+goO!X!0blqXINgwT}ezXCzj+31j$V^x@H^{-VxTOK^zXf z^cCu?zP_<{Gp4Guo{QZ|ZQI%r{T}!-Ly?Sd3NRUoCF`2K1#p>IfsL29Q##4d3_rq; zl3Jt6;yC@7iCog|*X5!$I2>}B!k2Dm(yON!7h(}20U9$&n$s`eaa3i0SJp0~jarIx z5wwwHyO@VO=z$D%y-M{7eDtH|TKph*556GUXpGu7BZ|n4WI|)!HO?PZwV{n3s%T@t zO-9k7a)!GZ+fSn~V-#C;AxeT)lnHibMC#Bd{n<{W4%MO2mQ*6sPPtb+I`iDCW(E00 zjXSh4lQ;V!Ck^s74xXaM|E1FGLx=QG8x?k6voafzq)D?=#W_}Qxu-gF^jg%}D`UR_ z>?>ObKOu?LjQErl`$3P~t^Vz560u<)%UGSpqCk~hFW^f8Wk$7;gs_H!pw5i#zdz2u@XR{nx?09Hy-0wV$CEc*7R<)?=cxmal zdiK?(=%i>lO6$@ED%$AlPD2}oSu&}mENSC{P~(j&$!~xCYimb*`jiRuk9gQaa_yrY@$eu2=!ZAXTf~eub^=%;?3J&0IUba3 zdEo&E=!@o*=@dM5Ae&2M{_Zb$L6YsI|HnW4Q5e@&)&V^UsRO|OagTf?#Ixevzy7ta z<-#0t0}mPo5Y;dMcOq{@`dvHpx%ARY_S$V%m$cd2zx7T>T*KA-*cr-pI55CZl@x$p(gd+yOc{z;vsw7DvWPM0$Eq!r+^pZT=ERkE@rckgJ3 z2#0wEG^;E*JB|UMcZ;UfKJoF7@lOEvXTyy67zt)Kk+tR3OViI*IDk4X{Yj}qKmyCB+_6}5azsa~e_ ziNlrVB4KT8f>s>SsV!LvYtptGCPFq=SKh=kttRPBkM+dNs*IkD`I~4N^bLnPk|a#C z> z0#CPq>ffkSZ-vuIjTPn8_O9-;LBGMzb19cG=?j-!5N!;EMW&c8BD{@}ql$*=-s3L+ z!8W5!#q8vsiBVzY9f9CQOGBnS%*F`d?te|uEaVloXivL+M?)JIa4dv2dV|^-#+1*p=*i}x+;>Rv`j4edGZKzP z0J5O#pnuQOCl)%h4Yu#NvXnk;><=t0vdQ|-sN0}4o;#5?2E)PYnIL@Bkw+{G_x3B9 z^lhEQot1!o1GrSS`F^)U<=BREERHsYL7U}mEr`sw^|hZNj16QmSw7z;=PO!B-#BMF zrccIFiZ(7~DND{cNfwP7S0G;##Eg);{F{H{IP&nrZ@5FtQkJrQ{W@kDwB1SqbBuN= zJ~jZ3GWN-{jsEt#B_QD`b7sFXpOUnJmCIj zc*R)b4k;I2cp+1N=x4=EoO9ItZpH;m+PIW1ZCrpXo;KDZdhC2-2(y&iub4P*-so+M zHZGNI0b+ZidU-9BHkQ?}?~cAizTtJRwW6XqLAmvvO_sEADN9-ILp>8}ylEtpAmjBb z$ysNedC>m*`^;mi@AY?FS;|tby85bs^+y?TuRDLfkuT+pGtRJi(T=<$gHW@}P4WKH z7r$V7-zs8Q(#GkgRDb6YTfg*gZeO%9TWW%gP@~0|vd8P4P_RX7*;-!2>JLU)>dEv| z&lvCAULJEPOWJ7qr!6T={rkV|-NkiPQ2>Tfx?zZW8O$gHYk@IyNGgb@khr_Md#*%J z#^<^1z5duJZr{36di{a$1JD;+^~Bdkm>3_AKO-9gwXxLH7~dJ!q4lgRue7)Q@9FNE zo}Rva5!A*yUVLqA8K5=-Y9jz;Jg{*uh)j)}fx-x&HmZ$)+6Xj5Z3Li<2R0Uw8bw9` zwNY&Z)J8yU1aK?rr~?~g4ryEmKy6eT0ksimj@k%7)edZoKcsOj0N`2Pn0Ts<*IAYq z1vpKUkoE@i`f!pwzDX~~2J==0e3=-2_))NVb(J?1003x#+Nd@HYGVvQZ3F-SwNY*S zwY>0qrI?x7kL3lmF#yy?002-M)yC3hX7yMb!<%%NT({=T?9+I4Ya;*vsEvG`oh(;p zF|%)TQ*~7v&CIG=8vy`7ZR8-i?myV;+}ka2X4ZeOca$V~eYi~1=L$=hnH?n8IRF5t zjQ|ww@8;Tv@u4?kEj=^)Fh2BWeI*9~fJSozQFJ`N2s?0D00000NkvXXu0mjfsklyV From 1bb04bbdfa27a31cf081b64fb91d5be9fdb8ce0a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 10 May 2021 23:59:28 +0200 Subject: [PATCH 1118/1270] MAINT: Remove the PEP 604 pipe operator --- numpy/__init__.pyi | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8824497f08ee..01a08f24a749 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1063,7 +1063,7 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: str | int) -> dtype[Any]: ... + def __getitem__(self: dtype[void], key: Union[str, int]) -> dtype[Any]: ... # NOTE: In the future 1-based multiplications will also yield `void` dtypes @overload @@ -2566,7 +2566,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # `np.generic` does not support inplace operations @overload # type: ignore[misc] - def __iadd__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __iadd__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __iadd__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... @overload @@ -2587,7 +2587,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __iadd__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __isub__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __isub__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __isub__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... @overload @@ -2606,7 +2606,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __isub__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __imul__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __imul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __imul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... @overload @@ -2625,7 +2625,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __imul__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __itruediv__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __itruediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __itruediv__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... @overload @@ -2640,7 +2640,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __itruediv__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __ifloordiv__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __ifloordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ifloordiv__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... @overload @@ -2659,7 +2659,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ifloordiv__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __ipow__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __ipow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ipow__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... @overload @@ -2674,7 +2674,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ipow__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __imod__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __imod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __imod__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... @overload @@ -2689,7 +2689,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __imod__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __ilshift__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __ilshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ilshift__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... @overload @@ -2700,7 +2700,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ilshift__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __irshift__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __irshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __irshift__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... @overload @@ -2711,7 +2711,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __irshift__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __iand__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __iand__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __iand__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... @overload @@ -2724,7 +2724,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __iand__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __ixor__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __ixor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ixor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... @overload @@ -2737,7 +2737,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ixor__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... @overload # type: ignore[misc] - def __ior__(self: _ArrayND[Any], other: _NestedSequence[str | bytes]) -> NoReturn: ... + def __ior__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ior__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... @overload From 16f7824b4d935b6aee98298ca4123d57174a6f2e Mon Sep 17 00:00:00 2001 From: yetanothercheer Date: Tue, 11 May 2021 14:28:37 +0800 Subject: [PATCH 1119/1270] BUG: fix potential buffer overflow(#18939) --- numpy/core/src/multiarray/ctors.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 57cfa1e368a8..7907fb93046a 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -668,6 +668,14 @@ PyArray_NewFromDescr_int( int i; npy_intp nbytes; + if ((unsigned int)nd > (unsigned int)NPY_MAXDIMS) { + PyErr_Format(PyExc_ValueError, + "number of dimensions must be within [0, %d]", + NPY_MAXDIMS); + Py_DECREF(descr); + return NULL; + } + if (descr->subarray) { PyObject *ret; npy_intp newdims[2*NPY_MAXDIMS]; @@ -687,14 +695,6 @@ PyArray_NewFromDescr_int( return ret; } - if ((unsigned int)nd > (unsigned int)NPY_MAXDIMS) { - PyErr_Format(PyExc_ValueError, - "number of dimensions must be within [0, %d]", - NPY_MAXDIMS); - Py_DECREF(descr); - return NULL; - } - /* Check datatype element size */ nbytes = descr->elsize; if (PyDataType_ISUNSIZED(descr)) { From b4d52f4fc9b11c7b176fbb33028f0a3fc9cd7edd Mon Sep 17 00:00:00 2001 From: Kasia <39829548+katleszek@users.noreply.github.com> Date: Tue, 11 May 2021 09:22:31 +0200 Subject: [PATCH 1120/1270] Update numpy/core/_add_newdocs_scalars.py Co-authored-by: Matti Picus --- numpy/core/_add_newdocs_scalars.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py index b1038f1d05ea..a02a36ec0520 100644 --- a/numpy/core/_add_newdocs_scalars.py +++ b/numpy/core/_add_newdocs_scalars.py @@ -218,8 +218,8 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): add_newdoc_for_scalar_type('datetime64', [], """ - A datetime is stored as a 64-bit integer. - If created from integer, counting from ``1970-01-01T00:00:00``. + If created from a 64-bit integer, it represents an offset from + ``1970-01-01T00:00:00``. If created from string, shown as ISO 8601 date or datetime format. >>> np.datetime64(10, 'Y') From afddff9c543302db53bf8572aa4ec80fd6bb470f Mon Sep 17 00:00:00 2001 From: Kasia <39829548+katleszek@users.noreply.github.com> Date: Tue, 11 May 2021 09:22:42 +0200 Subject: [PATCH 1121/1270] Update numpy/core/_add_newdocs_scalars.py Co-authored-by: Matti Picus --- numpy/core/_add_newdocs_scalars.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py index a02a36ec0520..9c6a5bd3311a 100644 --- a/numpy/core/_add_newdocs_scalars.py +++ b/numpy/core/_add_newdocs_scalars.py @@ -220,7 +220,8 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): """ If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. - If created from string, shown as ISO 8601 date or datetime format. + If created from string, the string must be in ISO 8601 date + or datetime format. >>> np.datetime64(10, 'Y') numpy.datetime64('1980') From 67db8f707a711c0d9bad49792fdb31674bb85d3b Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Fri, 7 May 2021 17:03:45 +0100 Subject: [PATCH 1122/1270] DOC: Update performance for new PRNG Add performance benchmarks for PCG64DXSM --- doc/source/reference/random/performance.rst | 129 ++++++++++---------- 1 file changed, 64 insertions(+), 65 deletions(-) diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst index 812c719f8750..85855be59363 100644 --- a/doc/source/reference/random/performance.rst +++ b/doc/source/reference/random/performance.rst @@ -42,49 +42,48 @@ Integer performance has a similar ordering. The pattern is similar for other, more complex generators. The normal performance of the legacy `RandomState` generator is much -lower than the other since it uses the Box-Muller transformation rather -than the Ziggurat generator. The performance gap for Exponentials is also +lower than the other since it uses the Box-Muller transform rather +than the Ziggurat method. The performance gap for Exponentials is also large due to the cost of computing the log function to invert the CDF. -The column labeled MT19973 is used the same 32-bit generator as -`RandomState` but produces random values using -`Generator`. +The column labeled MT19973 uses the same 32-bit generator as +`RandomState` but produces random variates using `Generator`. .. csv-table:: - :header: ,MT19937,PCG64,Philox,SFC64,RandomState - :widths: 14,14,14,14,14,14 - - 32-bit Unsigned Ints,3.2,2.7,4.9,2.7,3.2 - 64-bit Unsigned Ints,5.6,3.7,6.3,2.9,5.7 - Uniforms,7.3,4.1,8.1,3.1,7.3 - Normals,13.1,10.2,13.5,7.8,34.6 - Exponentials,7.9,5.4,8.5,4.1,40.3 - Gammas,34.8,28.0,34.7,25.1,58.1 - Binomials,25.0,21.4,26.1,19.5,25.2 - Laplaces,45.1,40.7,45.5,38.1,45.6 - Poissons,67.6,52.4,69.2,46.4,78.1 + :header: ,MT19937,PCG64,PCG64DXSM,Philox,SFC64,RandomState + :widths: 14,14,14,14,14,14,14 + + 32-bit Unsigned Ints,3.3,1.9,2.0,3.3,1.8,3.1 + 64-bit Unsigned Ints,5.6,3.2,2.9,4.9,2.5,5.5 + Uniforms,5.9,3.1,2.9,5.0,2.6,6.0 + Normals,13.9,10.8,10.5,12.0,8.3,56.8 + Exponentials,9.1,6.0,5.8,8.1,5.4,63.9 + Gammas,37.2,30.8,28.9,34.0,27.5,77.0 + Binomials,21.3,17.4,17.6,19.3,15.6,21.4 + Laplaces,73.2,72.3,76.1,73.0,72.3,82.5 + Poissons,111.7,103.4,100.5,109.4,90.7,115.2 The next table presents the performance in percentage relative to values generated by the legacy generator, ``RandomState(MT19937())``. The overall performance was computed using a geometric mean. .. csv-table:: - :header: ,MT19937,PCG64,Philox,SFC64 - :widths: 14,14,14,14,14 - - 32-bit Unsigned Ints,101,121,67,121 - 64-bit Unsigned Ints,102,156,91,199 - Uniforms,100,179,90,235 - Normals,263,338,257,443 - Exponentials,507,752,474,985 - Gammas,167,207,167,231 - Binomials,101,118,96,129 - Laplaces,101,112,100,120 - Poissons,116,149,113,168 - Overall,144,192,132,225 + :header: ,MT19937,PCG64,PCG64DXSM,Philox,SFC64 + :widths: 14,14,14,14,14,14 + + 32-bit Unsigned Ints,96,162,160,96,175 + 64-bit Unsigned Ints,97,171,188,113,218 + Uniforms,102,192,206,121,233 + Normals,409,526,541,471,684 + Exponentials,701,1071,1101,784,1179 + Gammas,207,250,266,227,281 + Binomials,100,123,122,111,138 + Laplaces,113,114,108,113,114 + Poissons,103,111,115,105,127 + Overall,159,219,225,174,251 .. note:: - All timings were taken using Linux on an i5-3570 processor. + All timings were taken using Linux on an AMD Ryzen 9 3900X processor. Performance on different Operating Systems ****************************************** @@ -101,33 +100,33 @@ across tables. 64-bit Linux ~~~~~~~~~~~~ -=================== ========= ======= ======== ======= -Distribution MT19937 PCG64 Philox SFC64 -=================== ========= ======= ======== ======= -32-bit Unsigned Int 100 119.8 67.7 120.2 -64-bit Unsigned Int 100 152.9 90.8 213.3 -Uniforms 100 179.0 87.0 232.0 -Normals 100 128.5 99.2 167.8 -Exponentials 100 148.3 93.0 189.3 -**Overall** 100 144.3 86.8 180.0 -=================== ========= ======= ======== ======= +===================== ========= ======= =========== ======== ======= +Distribution MT19937 PCG64 PCG64DXSM Philox SFC64 +===================== ========= ======= =========== ======== ======= +32-bit Unsigned Ints 100 168 166 100 182 +64-bit Unsigned Ints 100 176 193 116 224 +Uniforms 100 188 202 118 228 +Normals 100 128 132 115 167 +Exponentials 100 152 157 111 168 +Overall 100 161 168 112 192 +===================== ========= ======= =========== ======== ======= 64-bit Windows ~~~~~~~~~~~~~~ -The relative performance on 64-bit Linux and 64-bit Windows is broadly similar. - +The relative performance on 64-bit Linux and 64-bit Windows is broadly similar +with the notable exception of the Philox generator. -=================== ========= ======= ======== ======= -Distribution MT19937 PCG64 Philox SFC64 -=================== ========= ======= ======== ======= -32-bit Unsigned Int 100 129.1 35.0 135.0 -64-bit Unsigned Int 100 146.9 35.7 176.5 -Uniforms 100 165.0 37.0 192.0 -Normals 100 128.5 48.5 158.0 -Exponentials 100 151.6 39.0 172.8 -**Overall** 100 143.6 38.7 165.7 -=================== ========= ======= ======== ======= +===================== ========= ======= =========== ======== ======= +Distribution MT19937 PCG64 PCG64DXSM Philox SFC64 +===================== ========= ======= =========== ======== ======= +32-bit Unsigned Ints 100 155 131 29 150 +64-bit Unsigned Ints 100 157 143 25 154 +Uniforms 100 151 144 24 155 +Normals 100 129 128 37 150 +Exponentials 100 150 145 28 159 +**Overall** 100 148 138 28 154 +===================== ========= ======= =========== ======== ======= 32-bit Windows @@ -137,20 +136,20 @@ The performance of 64-bit generators on 32-bit Windows is much lower than on 64- operating systems due to register width. MT19937, the generator that has been in NumPy since 2005, operates on 32-bit integers. -=================== ========= ======= ======== ======= -Distribution MT19937 PCG64 Philox SFC64 -=================== ========= ======= ======== ======= -32-bit Unsigned Int 100 30.5 21.1 77.9 -64-bit Unsigned Int 100 26.3 19.2 97.0 -Uniforms 100 28.0 23.0 106.0 -Normals 100 40.1 31.3 112.6 -Exponentials 100 33.7 26.3 109.8 -**Overall** 100 31.4 23.8 99.8 -=================== ========= ======= ======== ======= +===================== ========= ======= =========== ======== ======= +Distribution MT19937 PCG64 PCG64DXSM Philox SFC64 +===================== ========= ======= =========== ======== ======= +32-bit Unsigned Ints 100 24 34 14 57 +64-bit Unsigned Ints 100 21 32 14 74 +Uniforms 100 21 34 16 73 +Normals 100 36 57 28 101 +Exponentials 100 28 44 20 88 +**Overall** 100 25 39 18 77 +===================== ========= ======= =========== ======== ======= .. note:: - Linux timings used Ubuntu 18.04 and GCC 7.4. Windows timings were made on + Linux timings used Ubuntu 20.04 and GCC 9.3.0. Windows timings were made on Windows 10 using Microsoft C/C++ Optimizing Compiler Version 19 (Visual - Studio 2015). All timings were produced on an i5-3570 processor. + Studio 2019). All timings were produced on an AMD Ryzen 9 3900X processor. From 46ede3e25bc3682bf55e3651169e5800cd9332ae Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 11 May 2021 11:43:23 +0200 Subject: [PATCH 1123/1270] ENH: Add annotation for `np.lib.NumpyVersion` --- numpy/lib/_version.pyi | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/lib/_version.pyi b/numpy/lib/_version.pyi index 20b049757f2f..3581d639bcdd 100644 --- a/numpy/lib/_version.pyi +++ b/numpy/lib/_version.pyi @@ -1,19 +1,19 @@ -from typing import Any, List +from typing import Union, List __all__: List[str] class NumpyVersion: - vstring: Any - version: Any - major: Any - minor: Any - bugfix: Any - pre_release: Any - is_devversion: Any - def __init__(self, vstring): ... - def __lt__(self, other): ... - def __le__(self, other): ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def __gt__(self, other): ... - def __ge__(self, other): ... + vstring: str + version: str + major: int + minor: int + bugfix: int + pre_release: str + is_devversion: bool + def __init__(self, vstring: str) -> None: ... + def __lt__(self, other: Union[str, NumpyVersion]) -> bool: ... + def __le__(self, other: Union[str, NumpyVersion]) -> bool: ... + def __eq__(self, other: Union[str, NumpyVersion]) -> bool: ... # type: ignore[override] + def __ne__(self, other: Union[str, NumpyVersion]) -> bool: ... # type: ignore[override] + def __gt__(self, other: Union[str, NumpyVersion]) -> bool: ... + def __ge__(self, other: Union[str, NumpyVersion]) -> bool: ... From 85606c6f7a01b027107e0997057e57629f536ccb Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 11 May 2021 11:44:10 +0200 Subject: [PATCH 1124/1270] BUG: Fixed a broken `NumpyVersion.__repr__` method --- numpy/lib/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index 0f26d6503c5d..00e00e9a78ad 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -151,5 +151,5 @@ def __gt__(self, other): def __ge__(self, other): return self._compare(other) >= 0 - def __repr(self): + def __repr__(self): return "NumpyVersion(%s)" % self.vstring From 7e6e1f1a5e1d333a6a17826d5f56f7e23598625f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 11 May 2021 11:51:20 +0200 Subject: [PATCH 1125/1270] TST: Add typing tests for `np.lib.NumpyVersion` --- numpy/typing/tests/data/fail/lib_version.py | 6 ++++++ numpy/typing/tests/data/pass/lib_version.py | 18 ++++++++++++++++++ numpy/typing/tests/data/reveal/lib_version.py | 18 ++++++++++++++++++ 3 files changed, 42 insertions(+) create mode 100644 numpy/typing/tests/data/fail/lib_version.py create mode 100644 numpy/typing/tests/data/pass/lib_version.py create mode 100644 numpy/typing/tests/data/reveal/lib_version.py diff --git a/numpy/typing/tests/data/fail/lib_version.py b/numpy/typing/tests/data/fail/lib_version.py new file mode 100644 index 000000000000..0769068d4f78 --- /dev/null +++ b/numpy/typing/tests/data/fail/lib_version.py @@ -0,0 +1,6 @@ +from numpy.lib import NumpyVersion + +version: NumpyVersion + +NumpyVersion(b"1.8.0") # E: incompatible type +version >= b"1.8.0" # E: incompatible type diff --git a/numpy/typing/tests/data/pass/lib_version.py b/numpy/typing/tests/data/pass/lib_version.py new file mode 100644 index 000000000000..f3825eca5247 --- /dev/null +++ b/numpy/typing/tests/data/pass/lib_version.py @@ -0,0 +1,18 @@ +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +version.vstring +version.version +version.major +version.minor +version.bugfix +version.pre_release +version.is_devversion + +version == version +version != version +version < "1.8.0" +version <= version +version > version +version >= "1.8.0" diff --git a/numpy/typing/tests/data/reveal/lib_version.py b/numpy/typing/tests/data/reveal/lib_version.py new file mode 100644 index 000000000000..e6f695558a40 --- /dev/null +++ b/numpy/typing/tests/data/reveal/lib_version.py @@ -0,0 +1,18 @@ +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +reveal_type(version.vstring) # E: str +reveal_type(version.version) # E: str +reveal_type(version.major) # E: int +reveal_type(version.minor) # E: int +reveal_type(version.bugfix) # E: int +reveal_type(version.pre_release) # E: str +reveal_type(version.is_devversion) # E: bool + +reveal_type(version == version) # E: bool +reveal_type(version != version) # E: bool +reveal_type(version < "1.8.0") # E: bool +reveal_type(version <= version) # E: bool +reveal_type(version > version) # E: bool +reveal_type(version >= "1.8.0") # E: bool From 620ee78465b3570d1d7cfc9c1cb663516121e076 Mon Sep 17 00:00:00 2001 From: Kasia <39829548+katleszek@users.noreply.github.com> Date: Tue, 11 May 2021 12:12:13 +0200 Subject: [PATCH 1126/1270] Update numpy/core/_add_newdocs_scalars.py Co-authored-by: Matti Picus --- numpy/core/_add_newdocs_scalars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py index 9c6a5bd3311a..602b1db6e64a 100644 --- a/numpy/core/_add_newdocs_scalars.py +++ b/numpy/core/_add_newdocs_scalars.py @@ -220,7 +220,7 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): """ If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. - If created from string, the string must be in ISO 8601 date + If created from string, the string can be in ISO 8601 date or datetime format. >>> np.datetime64(10, 'Y') From f49fc543292103df783d40f18465ccd6f6de3509 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 11 May 2021 12:44:34 +0200 Subject: [PATCH 1127/1270] TST: Fixed an incorrect error message --- numpy/typing/tests/data/fail/lib_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/fail/lib_version.py b/numpy/typing/tests/data/fail/lib_version.py index 0769068d4f78..2758cfe40438 100644 --- a/numpy/typing/tests/data/fail/lib_version.py +++ b/numpy/typing/tests/data/fail/lib_version.py @@ -3,4 +3,4 @@ version: NumpyVersion NumpyVersion(b"1.8.0") # E: incompatible type -version >= b"1.8.0" # E: incompatible type +version >= b"1.8.0" # E: Unsupported operand types From 2adb6bc36cc61e7bcb1e2244c7be899faaf97f06 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Tue, 11 May 2021 11:57:53 -0700 Subject: [PATCH 1128/1270] DOC: touchups to release note. --- doc/release/upcoming_changes/18963.expired.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/18963.expired.rst b/doc/release/upcoming_changes/18963.expired.rst index b792fe0c8b5a..d93b8a06a24b 100644 --- a/doc/release/upcoming_changes/18963.expired.rst +++ b/doc/release/upcoming_changes/18963.expired.rst @@ -1 +1,8 @@ -* The class ``PolyBase`` has been removed (deprecated in numpy 1.9.0). Please use use the abstract ``ABCPolyBase`` class instead. \ No newline at end of file +Remove deprecated ``PolyBase`` and unused ``PolyError`` and ``PolyDomainError`` +------------------------------------------------------------------------------- + +The class ``PolyBase`` has been removed (deprecated in numpy 1.9.0). Please use +the abstract ``ABCPolyBase`` class instead. + +Furthermore, the unused ``PolyError`` and ``PolyDomainError`` exceptions are +removed from the `numpy.polynomial`. From d067a475ba8b5335583fa8e33859367ca6810432 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 12 May 2021 09:43:55 +0300 Subject: [PATCH 1129/1270] remove warning on MSVC --- numpy/distutils/checks/extra_avx512f_reduce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/distutils/checks/extra_avx512f_reduce.c index f979d504ee60..db01aaeef405 100644 --- a/numpy/distutils/checks/extra_avx512f_reduce.c +++ b/numpy/distutils/checks/extra_avx512f_reduce.c @@ -8,7 +8,7 @@ int main(void) { __m512 one_ps = _mm512_set1_ps(1.0f); __m512d one_pd = _mm512_set1_pd(1.0); - __m512i one_i64 = _mm512_set1_epi64(1.0); + __m512i one_i64 = _mm512_set1_epi64(1); // add float sum_ps = _mm512_reduce_add_ps(one_ps); double sum_pd = _mm512_reduce_add_pd(one_pd); From b2a18e865daaf212c9df6707c99334add0483fdf Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 11 Jan 2021 16:16:08 +0100 Subject: [PATCH 1130/1270] ENH: Added a (python-based) backport of `types.GenericAlias` --- numpy/typing/__init__.py | 3 + numpy/typing/_generic_alias.py | 194 +++++++++++++++++++++++++++++++++ 2 files changed, 197 insertions(+) create mode 100644 numpy/typing/_generic_alias.py diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index a2d4c60fb274..8ad07d098c79 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -354,6 +354,9 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _ArrayLikeStr_co, _ArrayLikeBytes_co, ) +from ._generic_alias import ( + _GenericAlias, +) if __doc__ is not None: from ._add_docstring import _docstrings diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py new file mode 100644 index 000000000000..1a15cbb83129 --- /dev/null +++ b/numpy/typing/_generic_alias.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +import sys +import types +from typing import ( + Any, + Callable, + ClassVar, + FrozenSet, + Generator, + Iterable, + Iterator, + List, + NoReturn, + Tuple, + Type, + TypeVar, +) + +__all__ = ["_GenericAlias"] + +_T = TypeVar("_T", bound="_GenericAlias") + + +def _to_str(obj: object) -> str: + """Helper function for `_GenericAlias.__repr__`.""" + if obj is Ellipsis: + return '...' + elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE): + if obj.__module__ == 'builtins': + return obj.__qualname__ + else: + return f'{obj.__module__}.{obj.__qualname__}' + else: + return repr(obj) + + +def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]: + """Search for all typevars and typevar-containing objects in `args`. + + Helper function for `_GenericAlias.__init__`. + + """ + for i in args: + if hasattr(i, "__parameters__"): + yield from i.__parameters__ + elif isinstance(i, TypeVar): + yield i + + +def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T: + """Recursivelly replace all typevars with those from `parameters`. + + Helper function for `_GenericAlias.__getitem__`. + + """ + args = [] + for i in alias.__args__: + if isinstance(i, TypeVar): + value: Any = next(parameters) + elif isinstance(i, _GenericAlias): + value = _reconstruct_alias(i, parameters) + elif hasattr(i, "__parameters__"): + value = i[next(parameters)] + else: + value = i + args.append(value) + + cls = type(alias) + return cls(alias.__origin__, tuple(args)) + + +class _GenericAlias: + """A python-based backport of the `types.GenericAlias` class. + + E.g. for ``t = list[int]``, ``t.__origin__`` is ``list`` and + ``t.__args__`` is ``(int,)``. + + See Also + -------- + :pep:`585` + The PEP responsible for introducing `types.GenericAlias`. + + """ + + __slots__ = ("__weakref__", "_origin", "_args", "_parameters", "_hash") + + @property + def __origin__(self) -> type: + return super().__getattribute__("_origin") + + @property + def __args__(self) -> Tuple[Any, ...]: + return super().__getattribute__("_args") + + @property + def __parameters__(self) -> Tuple[TypeVar, ...]: + """Type variables in the ``GenericAlias``.""" + return super().__getattribute__("_parameters") + + def __init__(self, origin: type, args: Any) -> None: + self._origin = origin + self._args = args if isinstance(args, tuple) else (args,) + self._parameters = tuple(_parse_parameters(args)) + + @property + def __call__(self) -> type: + return self.__origin__ + + def __reduce__(self: _T) -> Tuple[Type[_T], Tuple[type, Tuple[Any, ...]]]: + cls = type(self) + return cls, (self.__origin__, self.__args__) + + def __mro_entries__(self, bases: Iterable[object]) -> Tuple[type]: + return (self.__origin__,) + + def __dir__(self) -> List[str]: + """Implement ``dir(self)``.""" + cls = type(self) + dir_origin = set(dir(self.__origin__)) + return sorted(cls._ATTR_EXCEPTIONS | dir_origin) + + def __hash__(self) -> int: + """Return ``hash(self)``.""" + # Attempt to use the cached hash + try: + return super().__getattribute__("_hash") + except AttributeError: + self._hash: int = hash(self.__origin__) ^ hash(self.__args__) + return super().__getattribute__("_hash") + + def __instancecheck__(self, obj: object) -> NoReturn: + """Check if an `obj` is an instance.""" + raise TypeError("isinstance() argument 2 cannot be a " + "parameterized generic") + + def __subclasscheck__(self, cls: type) -> NoReturn: + """Check if a `cls` is a subclass.""" + raise TypeError("issubclass() argument 2 cannot be a " + "parameterized generic") + + def __repr__(self) -> str: + """Return ``repr(self)``.""" + args = ", ".join(_to_str(i) for i in self.__args__) + origin = _to_str(self.__origin__) + return f"{origin}[{args}]" + + def __getitem__(self: _T, key: Any) -> _T: + """Return ``self[key]``.""" + key_tup = key if isinstance(key, tuple) else (key,) + + if len(self.__parameters__) == 0: + raise TypeError(f"There are no type variables left in {self}") + elif len(key_tup) > len(self.__parameters__): + raise TypeError(f"Too many arguments for {self}") + elif len(key_tup) < len(self.__parameters__): + raise TypeError(f"Too few arguments for {self}") + + key_iter = iter(key_tup) + return _reconstruct_alias(self, key_iter) + + def __eq__(self, value: object) -> bool: + """Return ``self == value``.""" + if not isinstance(value, _GENERIC_ALIAS_TYPE): + return NotImplemented + return ( + self.__origin__ == value.__origin__ and + self.__args__ == value.__args__ + ) + + _ATTR_EXCEPTIONS: ClassVar[FrozenSet[str]] = frozenset({ + "__origin__", + "__args__", + "__parameters__", + "__mro_entries__", + "__reduce__", + "__reduce_ex__", + }) + + def __getattribute__(self, name: str) -> Any: + """Return ``getattr(self, name)``.""" + # Pull the attribute from `__origin__` unless its + # name is in `_ATTR_EXCEPTIONS` + cls = type(self) + if name in cls._ATTR_EXCEPTIONS: + return super().__getattribute__(name) + return getattr(self.__origin__, name) + + +# See `_GenericAlias.__eq__` +if sys.version_info >= (3, 9): + _GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias) +else: + _GENERIC_ALIAS_TYPE = (_GenericAlias,) From 610edf23678a81a9a81d71d238a8f8c1eedcc78a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 11 Jan 2021 17:13:56 +0100 Subject: [PATCH 1131/1270] TST: Add tests for `GenericAlias` --- numpy/typing/tests/test_generic_alias.py | 113 +++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 numpy/typing/tests/test_generic_alias.py diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py new file mode 100644 index 000000000000..ec9b53154ff3 --- /dev/null +++ b/numpy/typing/tests/test_generic_alias.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +import sys +import types +import pickle +import weakref +from typing import TypeVar, Any, Callable, Tuple, Type, Union + +import pytest +import numpy as np +from numpy.typing._generic_alias import _GenericAlias + +ScalarType = TypeVar("ScalarType", bound=np.generic) +DType = _GenericAlias(np.dtype, (ScalarType,)) +NDArray = _GenericAlias(np.ndarray, (Any, DType)) + +if sys.version_info >= (3, 9): + DType_ref = types.GenericAlias(np.dtype, (ScalarType,)) + NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref)) + FuncType = Callable[[Union[_GenericAlias, types.GenericAlias]], Any] +else: + DType_ref = NotImplemented + NDArray_ref = NotImplemented + FuncType = Callable[[_GenericAlias], Any] + +GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS) + + +def _get_subclass_mro(base: type) -> Tuple[type, ...]: + class Subclass(base): # type: ignore[misc,valid-type] + pass + return Subclass.__mro__[1:] + + +class TestGenericAlias: + """Tests for `numpy.typing._generic_alias._GenericAlias`.""" + + @pytest.mark.parametrize( + "name,func", + [ + ("__init__", lambda n: n), + ("__origin__", lambda n: n.__origin__), + ("__args__", lambda n: n.__args__), + ("__parameters__", lambda n: n.__parameters__), + ("__reduce__", lambda n: n.__reduce__()[1:]), + ("__reduce_ex__", lambda n: n.__reduce_ex__(1)[1:]), + ("__mro_entries__", lambda n: n.__mro_entries__([object])), + ("__hash__", lambda n: hash(n)), + ("__repr__", lambda n: repr(n)), + ("__getitem__", lambda n: n[np.float64]), + ("__getitem__", lambda n: n[ScalarType][np.float64]), + ("__getitem__", lambda n: n[Union[np.int64, ScalarType]][np.float64]), + ("__eq__", lambda n: n == n), + ("__ne__", lambda n: n != np.ndarray), + ("__dir__", lambda n: dir(n)), + ("__call__", lambda n: n((5,), np.int64)), + ("__call__", lambda n: n(shape=(5,), dtype=np.int64)), + ("subclassing", lambda n: _get_subclass_mro(n)), + ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))), + ("__weakref__", lambda n: n == weakref.ref(n)()), + ] + ) + def test_pass(self, name: str, func: FuncType) -> None: + """Compare `types.GenericAlias` with its numpy-based backport. + + Checker whether ``func`` runs as intended and that both `GenericAlias` + and `_GenericAlias` return the same result. + + """ + value = func(NDArray) + + if sys.version_info >= (3, 9): + value_ref = func(NDArray_ref) + assert value == value_ref + + @pytest.mark.parametrize("name", GETATTR_NAMES) + def test_getattr(self, name: str) -> None: + """Test that `getattr` wraps around the underlying type (``__origin__``).""" + value = getattr(NDArray, name) + value_ref1 = getattr(np.ndarray, name) + + if sys.version_info >= (3, 9): + value_ref2 = getattr(NDArray_ref, name) + assert value == value_ref1 == value_ref2 + else: + assert value == value_ref1 + + @pytest.mark.parametrize( + "name,exc_type,func", + [ + ("__getitem__", TypeError, lambda n: n[()]), + ("__getitem__", TypeError, lambda n: n[Any, Any]), + ("__getitem__", TypeError, lambda n: n[Any][Any]), + ("__instancecheck__", TypeError, lambda n: isinstance(np.array(1), n)), + ("__subclasscheck__", TypeError, lambda n: issubclass(np.ndarray, n)), + ("__setattr__", AttributeError, lambda n: setattr(n, "__origin__", int)), + ("__setattr__", AttributeError, lambda n: setattr(n, "test", int)), + ("__getattribute__", AttributeError, lambda n: getattr(n, "test")), + ] + ) + def test_raise( + self, + name: str, + exc_type: Type[BaseException], + func: FuncType, + ) -> None: + """Test operations that are supposed to raise.""" + with pytest.raises(exc_type): + func(NDArray) + + if sys.version_info >= (3, 9): + with pytest.raises(exc_type): + func(NDArray_ref) From 2f74022192dfeb89d2a7b7acf5b7428e795c74e4 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 11 Jan 2021 17:13:56 +0100 Subject: [PATCH 1132/1270] API: Add a runtime-subscriptable alias for `ndarray` --- numpy/core/_add_newdocs.py | 4 +++- numpy/typing/__init__.py | 1 + numpy/typing/_add_docstring.py | 31 +++++++++++++++++++++++++++++++ numpy/typing/_generic_alias.py | 16 +++++++++++++++- 4 files changed, 50 insertions(+), 2 deletions(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index f870dc8ad7fd..538123149933 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -2176,6 +2176,8 @@ empty : Create an array, but leave its allocated memory unchanged (i.e., it contains "garbage"). dtype : Create a data-type. + numpy.typing.NDArray : A :term:`generic ` version + of ndarray. Notes ----- @@ -5814,7 +5816,7 @@ >>> (arr + arr).dtype.metadata mappingproxy({'key': 'value'}) - But if the arrays have different dtype metadata, the metadata may be + But if the arrays have different dtype metadata, the metadata may be dropped: >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 8ad07d098c79..f54108f60adb 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -355,6 +355,7 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _ArrayLikeBytes_co, ) from ._generic_alias import ( + NDArray as NDArray, _GenericAlias, ) diff --git a/numpy/typing/_add_docstring.py b/numpy/typing/_add_docstring.py index 8e39fe2c6175..1450ce97906e 100644 --- a/numpy/typing/_add_docstring.py +++ b/numpy/typing/_add_docstring.py @@ -3,6 +3,8 @@ import re import textwrap +from ._generic_alias import NDArray + _docstrings_list = [] @@ -93,4 +95,33 @@ def _parse_docstrings(): """) +add_newdoc('NDArray', repr(NDArray), + """ + A :term:`generic ` version of + `np.ndarray[Any, np.dtype[~ScalarType]] `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[typing.Any, numpy.dtype[~ScalarType]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + _docstrings = _parse_docstrings() diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py index 1a15cbb83129..fd719c6110b2 100644 --- a/numpy/typing/_generic_alias.py +++ b/numpy/typing/_generic_alias.py @@ -15,9 +15,12 @@ Tuple, Type, TypeVar, + TYPE_CHECKING, ) -__all__ = ["_GenericAlias"] +import numpy as np + +__all__ = ["_GenericAlias", "NDArray"] _T = TypeVar("_T", bound="_GenericAlias") @@ -192,3 +195,14 @@ def __getattribute__(self, name: str) -> Any: _GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias) else: _GENERIC_ALIAS_TYPE = (_GenericAlias,) + +ScalarType = TypeVar("ScalarType", bound=np.generic) + +if TYPE_CHECKING: + NDArray = np.ndarray[Any, np.dtype[ScalarType]] +elif sys.version_info >= (3, 9): + _DType = types.GenericAlias(np.dtype, (ScalarType,)) + NDArray = types.GenericAlias(np.ndarray, (Any, _DType)) +else: + _DType = _GenericAlias(np.dtype, (ScalarType,)) + NDArray = _GenericAlias(np.ndarray, (Any, _DType)) From ba760724b099ac30e85aa583c8cdbd5795927672 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 14:30:53 +0200 Subject: [PATCH 1133/1270] ENH: Add docstrings and annotations to 2 misc private functions --- numpy/typing/_add_docstring.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/numpy/typing/_add_docstring.py b/numpy/typing/_add_docstring.py index 1450ce97906e..34dbdb0c6e77 100644 --- a/numpy/typing/_add_docstring.py +++ b/numpy/typing/_add_docstring.py @@ -8,11 +8,27 @@ _docstrings_list = [] -def add_newdoc(name, value, doc): +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ _docstrings_list.append((name, value, doc)) -def _parse_docstrings(): +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ type_list_ret = [] for name, value, doc in _docstrings_list: s = textwrap.dedent(doc).replace("\n", "\n ") From 7c20796390be054dd294b80bae02be94ecfaa969 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 15:00:03 +0200 Subject: [PATCH 1134/1270] MAINT: Replace `_ArrayND` with `NDArray` --- numpy/__init__.pyi | 780 ++++++++++++++++++------------------ numpy/lib/index_tricks.pyi | 30 +- numpy/lib/ufunclike.pyi | 10 +- numpy/typing/__init__.py | 1 - numpy/typing/_array_like.py | 5 - numpy/typing/_callable.py | 5 +- 6 files changed, 413 insertions(+), 418 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 01a08f24a749..318c39fc3581 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -10,7 +10,7 @@ from numpy.core._internal import _ctypes from numpy.typing import ( # Arrays ArrayLike, - _ArrayND, + NDArray, _SupportsArray, _NestedSequence, _RecursiveSequence, @@ -1687,12 +1687,12 @@ _T = TypeVar("_T") _2Tuple = Tuple[_T, _T] _Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] -_ArrayUInt_co = _ArrayND[Union[bool_, unsignedinteger[Any]]] -_ArrayInt_co = _ArrayND[Union[bool_, integer[Any]]] -_ArrayFloat_co = _ArrayND[Union[bool_, integer[Any], floating[Any]]] -_ArrayComplex_co = _ArrayND[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]] -_ArrayNumber_co = _ArrayND[Union[bool_, number[Any]]] -_ArrayTD64_co = _ArrayND[Union[bool_, integer[Any], timedelta64]] +_ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]] +_ArrayInt_co = NDArray[Union[bool_, integer[Any]]] +_ArrayFloat_co = NDArray[Union[bool_, integer[Any], floating[Any]]] +_ArrayComplex_co = NDArray[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]] +_ArrayNumber_co = NDArray[Union[bool_, number[Any]]] +_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]] class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property @@ -1842,128 +1842,128 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # strings, it will pass through the final overload otherwise @overload - def __lt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __lt__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... + def __lt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... + def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... @overload def __lt__( - self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayND[bool_]: ... + ) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __le__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... + def __le__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... + def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... @overload def __le__( - self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayND[bool_]: ... + ) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __gt__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... + def __gt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... + def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... @overload def __gt__( - self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayND[bool_]: ... + ) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __ge__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> _ArrayND[bool_]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[bool_]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[bool_]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayND[object_], other: Any) -> _ArrayND[bool_]: ... + def __ge__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> _ArrayND[bool_]: ... + def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... @overload def __ge__( - self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], other: _RecursiveSequence, - ) -> _ArrayND[bool_]: ... + ) -> NDArray[bool_]: ... # Unary ops @overload - def __abs__(self: _ArrayND[bool_]) -> _ArrayND[bool_]: ... + def __abs__(self: NDArray[bool_]) -> NDArray[bool_]: ... @overload - def __abs__(self: _ArrayND[complexfloating[_NBit1, _NBit1]]) -> _ArrayND[floating[_NBit1]]: ... + def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ... @overload - def __abs__(self: _ArrayND[_NumberType]) -> _ArrayND[_NumberType]: ... + def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... @overload - def __abs__(self: _ArrayND[timedelta64]) -> _ArrayND[timedelta64]: ... + def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... @overload - def __abs__(self: _ArrayND[object_]) -> Any: ... + def __abs__(self: NDArray[object_]) -> Any: ... @overload - def __invert__(self: _ArrayND[bool_]) -> _ArrayND[bool_]: ... + def __invert__(self: NDArray[bool_]) -> NDArray[bool_]: ... @overload - def __invert__(self: _ArrayND[_IntType]) -> _ArrayND[_IntType]: ... + def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ... @overload - def __invert__(self: _ArrayND[object_]) -> Any: ... + def __invert__(self: NDArray[object_]) -> Any: ... @overload - def __pos__(self: _ArrayND[_NumberType]) -> _ArrayND[_NumberType]: ... + def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... @overload - def __pos__(self: _ArrayND[timedelta64]) -> _ArrayND[timedelta64]: ... + def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... @overload - def __pos__(self: _ArrayND[object_]) -> Any: ... + def __pos__(self: NDArray[object_]) -> Any: ... @overload - def __neg__(self: _ArrayND[_NumberType]) -> _ArrayND[_NumberType]: ... + def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... @overload - def __neg__(self: _ArrayND[timedelta64]) -> _ArrayND[timedelta64]: ... + def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... @overload - def __neg__(self: _ArrayND[object_]) -> Any: ... + def __neg__(self: NDArray[object_]) -> Any: ... # Binary ops # NOTE: `ndarray` does not implement `__imatmul__` @overload - def __matmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __matmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __matmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __matmul__(self: _ArrayND[object_], other: Any) -> Any: ... + def __matmul__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __matmul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __matmul__( self: _ArrayNumber_co, @@ -1971,21 +1971,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): ) -> Any: ... @overload - def __rmatmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rmatmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rmatmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rmatmul__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rmatmul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rmatmul__( self: _ArrayNumber_co, @@ -1993,761 +1993,761 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): ) -> Any: ... @overload - def __mod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __mod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __mod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[timedelta64]: ... + def __mod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... @overload - def __mod__(self: _ArrayND[object_], other: Any) -> Any: ... + def __mod__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __mod__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __mod__( - self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[timedelta64]: ... + def __rmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... @overload - def __rmod__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rmod__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rmod__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rmod__( - self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __divmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __divmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __divmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayND[int8]]: ... # type: ignore[misc] + def __divmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayND[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayND[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayND[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[_ArrayND[int64], _ArrayND[timedelta64]]: ... + def __divmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload def __divmod__( - self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], other: _RecursiveSequence, ) -> _2Tuple[Any]: ... @overload - def __rdivmod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rdivmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rdivmod__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _2Tuple[_ArrayND[int8]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[_ArrayND[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[_ArrayND[signedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[_ArrayND[floating[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[_ArrayND[int64], _ArrayND[timedelta64]]: ... + def __rdivmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload def __rdivmod__( - self: _ArrayND[Union[bool_, integer[Any], floating[Any], timedelta64]], + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], other: _RecursiveSequence, ) -> _2Tuple[Any]: ... @overload - def __add__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __add__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __add__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayND[datetime64]: ... + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... @overload - def __add__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... @overload - def __add__(self: _ArrayND[object_], other: Any) -> Any: ... + def __add__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __add__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __add__( - self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __radd__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __radd__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __radd__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayND[datetime64]: ... + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... @overload - def __radd__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... @overload - def __radd__(self: _ArrayND[object_], other: Any) -> Any: ... + def __radd__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __radd__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __radd__( - self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __sub__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __sub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __sub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... @overload - def __sub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[timedelta64]: ... + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... @overload - def __sub__(self: _ArrayND[object_], other: Any) -> Any: ... + def __sub__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __sub__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __sub__( - self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rsub__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rsub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rsub__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> _ArrayND[datetime64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayND[timedelta64]: ... + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... @overload - def __rsub__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rsub__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rsub__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rsub__( - self: _ArrayND[Union[bool_, number[Any], timedelta64, datetime64]], + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __mul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __mul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __mul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... @overload - def __mul__(self: _ArrayND[object_], other: Any) -> Any: ... + def __mul__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __mul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __mul__( - self: _ArrayND[Union[bool_, number[Any], timedelta64]], + self: NDArray[Union[bool_, number[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rmul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rmul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rmul__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rmul__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rmul__( - self: _ArrayND[Union[bool_, number[Any], timedelta64]], + self: NDArray[Union[bool_, number[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __floordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __floordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __floordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[int64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... @overload - def __floordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __floordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... @overload - def __floordiv__(self: _ArrayND[object_], other: Any) -> Any: ... + def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __floordiv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __floordiv__( - self: _ArrayND[Union[bool_, number[Any], timedelta64]], + self: NDArray[Union[bool_, number[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rfloordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rfloordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rfloordiv__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[int64]: ... + def __rfloordiv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... @overload - def __rfloordiv__(self: _ArrayND[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... @overload - def __rfloordiv__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rfloordiv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rfloordiv__( - self: _ArrayND[Union[bool_, number[Any], timedelta64]], + self: NDArray[Union[bool_, number[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __pow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __pow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __pow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __pow__(self: _ArrayND[object_], other: Any) -> Any: ... + def __pow__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __pow__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __pow__( - self: _ArrayND[Union[bool_, number[Any]]], + self: NDArray[Union[bool_, number[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rpow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rpow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rpow__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rpow__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rpow__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rpow__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rpow__( - self: _ArrayND[Union[bool_, number[Any]]], + self: NDArray[Union[bool_, number[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __truediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __truediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayND[float64]: ... # type: ignore[misc] + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[float64]: ... + def __truediv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... @overload - def __truediv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __truediv__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... @overload - def __truediv__(self: _ArrayND[object_], other: Any) -> Any: ... + def __truediv__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __truediv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __truediv__( - self: _ArrayND[Union[bool_, number[Any], timedelta64]], + self: NDArray[Union[bool_, number[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rtruediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rtruediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> _ArrayND[float64]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _ArrayND[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[float64]: ... + def __rtruediv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... @overload - def __rtruediv__(self: _ArrayND[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __rtruediv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... @overload - def __rtruediv__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rtruediv__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rtruediv__( - self: _ArrayND[Union[bool_, number[Any], timedelta64]], + self: NDArray[Union[bool_, number[Any], timedelta64]], other: _RecursiveSequence, ) -> Any: ... @overload - def __lshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __lshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __lshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __lshift__(self: _ArrayND[object_], other: Any) -> Any: ... + def __lshift__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __lshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __lshift__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rlshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rlshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rlshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __rlshift__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rlshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rlshift__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __rshift__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rshift__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rshift__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rrshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rrshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rrshift__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[int8]: ... # type: ignore[misc] + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __rrshift__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rrshift__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rrshift__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __and__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __and__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __and__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __and__(self: _ArrayND[object_], other: Any) -> Any: ... + def __and__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __and__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __and__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rand__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rand__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rand__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __rand__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rand__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rand__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rand__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __xor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __xor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __xor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __xor__(self: _ArrayND[object_], other: Any) -> Any: ... + def __xor__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __xor__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __xor__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __rxor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __rxor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __rxor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __rxor__(self: _ArrayND[object_], other: Any) -> Any: ... + def __rxor__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __rxor__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __rxor__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __or__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __or__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __or__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __or__(self: _ArrayND[object_], other: Any) -> Any: ... + def __or__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __or__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __or__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... @overload - def __ror__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __ror__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ror__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... # type: ignore[misc] + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[Any]]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[Any]]: ... + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... @overload - def __ror__(self: _ArrayND[object_], other: Any) -> Any: ... + def __ror__(self: NDArray[object_], other: Any) -> Any: ... @overload - def __ror__(self: _ArrayND[Any], other: _ArrayLikeObject_co) -> Any: ... + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... @overload def __ror__( - self: _ArrayND[Union[bool_, integer[Any]]], + self: NDArray[Union[bool_, integer[Any]]], other: _RecursiveSequence, ) -> Any: ... # `np.generic` does not support inplace operations @overload # type: ignore[misc] - def __iadd__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __iadd__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __iadd__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + def __iadd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload - def __iadd__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __iadd__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __iadd__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __iadd__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __iadd__(self: _ArrayND[timedelta64], other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... @overload - def __iadd__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... + def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... @overload - def __iadd__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __iadd__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __iadd__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __isub__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __isub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __isub__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __isub__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __isub__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __isub__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __isub__(self: _ArrayND[timedelta64], other: _ArrayLikeTD64_co) -> _ArrayND[timedelta64]: ... + def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... @overload - def __isub__(self: _ArrayND[datetime64], other: _ArrayLikeTD64_co) -> _ArrayND[datetime64]: ... + def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... @overload - def __isub__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __isub__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __isub__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __imul__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __imul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __imul__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + def __imul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload - def __imul__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imul__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imul__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __imul__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __imul__(self: _ArrayND[timedelta64], other: _ArrayLikeFloat_co) -> _ArrayND[timedelta64]: ... + def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... @overload - def __imul__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __imul__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __imul__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __itruediv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __itruediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __itruediv__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __itruediv__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __itruediv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __itruediv__(self: _ArrayND[timedelta64], other: _ArrayLikeInt_co) -> _ArrayND[timedelta64]: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... @overload - def __itruediv__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __itruediv__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __itruediv__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __ifloordiv__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __ifloordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ifloordiv__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ifloordiv__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ifloordiv__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __ifloordiv__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __ifloordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload - def __ifloordiv__(self: _ArrayND[timedelta64], other: _ArrayLikeInt_co) -> _ArrayND[timedelta64]: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... @overload - def __ifloordiv__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __ifloordiv__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __ifloordiv__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __ipow__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __ipow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ipow__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ipow__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ipow__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __ipow__(self: _ArrayND[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> _ArrayND[complexfloating[_NBit1, _NBit1]]: ... + def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __ipow__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __ipow__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __ipow__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __imod__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __imod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __imod__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imod__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imod__(self: _ArrayND[floating[_NBit1]], other: _ArrayLikeFloat_co) -> _ArrayND[floating[_NBit1]]: ... + def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __imod__(self: _ArrayND[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> _ArrayND[timedelta64]: ... + def __imod__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... @overload - def __imod__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __imod__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __imod__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __ilshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __ilshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ilshift__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ilshift__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ilshift__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __ilshift__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __ilshift__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __irshift__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __irshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __irshift__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __irshift__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __irshift__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __irshift__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __irshift__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __iand__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __iand__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __iand__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + def __iand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload - def __iand__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __iand__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __iand__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __iand__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __iand__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __ixor__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __ixor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ixor__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + def __ixor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload - def __ixor__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ixor__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ixor__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __ixor__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __ixor__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... @overload # type: ignore[misc] - def __ior__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + def __ior__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload - def __ior__(self: _ArrayND[bool_], other: _ArrayLikeBool_co) -> _ArrayND[bool_]: ... + def __ior__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload - def __ior__(self: _ArrayND[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> _ArrayND[unsignedinteger[_NBit1]]: ... + def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ior__(self: _ArrayND[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> _ArrayND[signedinteger[_NBit1]]: ... + def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ior__(self: _ArrayND[object_], other: Any) -> _ArrayND[object_]: ... + def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... @overload - def __ior__(self: _ArrayND[_ScalarType], other: _RecursiveSequence) -> _ArrayND[_ScalarType]: ... + def __ior__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property @@ -3453,7 +3453,7 @@ class errstate(Generic[_CallType], ContextDecorator): ) -> None: ... class ndenumerate(Generic[_ScalarType]): - iter: flatiter[_ArrayND[_ScalarType]] + iter: flatiter[NDArray[_ScalarType]] @overload def __new__( cls, arr: _NestedSequence[_SupportsArray[dtype[_ScalarType]]], diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi index e486fe8f2af8..a3bfef6b6406 100644 --- a/numpy/lib/index_tricks.pyi +++ b/numpy/lib/index_tricks.pyi @@ -33,7 +33,7 @@ from numpy.typing import ( ArrayLike, _NestedSequence, _RecursiveSequence, - _ArrayND, + NDArray, _ArrayLikeInt, # DTypes @@ -68,7 +68,7 @@ def unravel_index( indices: _ArrayLikeInt, shape: _ShapeLike, order: _OrderCF = ... -) -> Tuple[_ArrayND[intp], ...]: ... +) -> Tuple[NDArray[intp], ...]: ... @overload def ravel_multi_index( # type: ignore[misc] @@ -83,24 +83,24 @@ def ravel_multi_index( dims: _ShapeLike, mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., order: _OrderCF = ... -) -> _ArrayND[intp]: ... +) -> NDArray[intp]: ... @overload def ix_(*args: _NestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... @overload -def ix_(*args: _NestedSequence[str]) -> Tuple[_ArrayND[str_], ...]: ... +def ix_(*args: _NestedSequence[str]) -> Tuple[NDArray[str_], ...]: ... @overload -def ix_(*args: _NestedSequence[bytes]) -> Tuple[_ArrayND[bytes_], ...]: ... +def ix_(*args: _NestedSequence[bytes]) -> Tuple[NDArray[bytes_], ...]: ... @overload -def ix_(*args: _NestedSequence[bool]) -> Tuple[_ArrayND[bool_], ...]: ... +def ix_(*args: _NestedSequence[bool]) -> Tuple[NDArray[bool_], ...]: ... @overload -def ix_(*args: _NestedSequence[int]) -> Tuple[_ArrayND[int_], ...]: ... +def ix_(*args: _NestedSequence[int]) -> Tuple[NDArray[int_], ...]: ... @overload -def ix_(*args: _NestedSequence[float]) -> Tuple[_ArrayND[float_], ...]: ... +def ix_(*args: _NestedSequence[float]) -> Tuple[NDArray[float_], ...]: ... @overload -def ix_(*args: _NestedSequence[complex]) -> Tuple[_ArrayND[complex_], ...]: ... +def ix_(*args: _NestedSequence[complex]) -> Tuple[NDArray[complex_], ...]: ... @overload -def ix_(*args: _RecursiveSequence) -> Tuple[_ArrayND[Any], ...]: ... +def ix_(*args: _RecursiveSequence) -> Tuple[NDArray[Any], ...]: ... class nd_grid(Generic[_BoolType]): sparse: _BoolType @@ -109,12 +109,12 @@ class nd_grid(Generic[_BoolType]): def __getitem__( self: nd_grid[Literal[False]], key: Union[slice, Sequence[slice]], - ) -> _ArrayND[Any]: ... + ) -> NDArray[Any]: ... @overload def __getitem__( self: nd_grid[Literal[True]], key: Union[slice, Sequence[slice]], - ) -> List[_ArrayND[Any]]: ... + ) -> List[NDArray[Any]]: ... class MGridClass(nd_grid[Literal[False]]): def __init__(self) -> None: ... @@ -142,7 +142,7 @@ class AxisConcatenator: @overload def concatenate( # type: ignore[misc] *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... - ) -> _ArrayND[Any]: ... + ) -> NDArray[Any]: ... @staticmethod @overload def concatenate( @@ -188,7 +188,7 @@ index_exp: IndexExpression[Literal[True]] s_: IndexExpression[Literal[False]] def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ... -def diag_indices(n: int, ndim: int = ...) -> Tuple[_ArrayND[int_], ...]: ... -def diag_indices_from(arr: ArrayLike) -> Tuple[_ArrayND[int_], ...]: ... +def diag_indices(n: int, ndim: int = ...) -> Tuple[NDArray[int_], ...]: ... +def diag_indices_from(arr: ArrayLike) -> Tuple[NDArray[int_], ...]: ... # NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` diff --git a/numpy/lib/ufunclike.pyi b/numpy/lib/ufunclike.pyi index e97383af8a7e..03f08ebffea3 100644 --- a/numpy/lib/ufunclike.pyi +++ b/numpy/lib/ufunclike.pyi @@ -2,7 +2,7 @@ from typing import Any, overload, TypeVar, List, Union from numpy import floating, bool_, object_, ndarray from numpy.typing import ( - _ArrayND, + NDArray, _FloatLike_co, _ArrayLikeFloat_co, _ArrayLikeObject_co, @@ -21,12 +21,12 @@ def fix( # type: ignore[misc] def fix( x: _ArrayLikeFloat_co, out: None = ..., -) -> _ArrayND[floating[Any]]: ... +) -> NDArray[floating[Any]]: ... @overload def fix( x: _ArrayLikeObject_co, out: None = ..., -) -> _ArrayND[object_]: ... +) -> NDArray[object_]: ... @overload def fix( x: Union[_ArrayLikeFloat_co, _ArrayLikeObject_co], @@ -42,7 +42,7 @@ def isposinf( # type: ignore[misc] def isposinf( x: _ArrayLikeFloat_co, out: None = ..., -) -> _ArrayND[bool_]: ... +) -> NDArray[bool_]: ... @overload def isposinf( x: _ArrayLikeFloat_co, @@ -58,7 +58,7 @@ def isneginf( # type: ignore[misc] def isneginf( x: _ArrayLikeFloat_co, out: None = ..., -) -> _ArrayND[bool_]: ... +) -> NDArray[bool_]: ... @overload def isneginf( x: _ArrayLikeFloat_co, diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index f54108f60adb..4f0dc0cf13b2 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -339,7 +339,6 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _NestedSequence, _RecursiveSequence, _SupportsArray, - _ArrayND, _ArrayLikeInt, _ArrayLikeBool_co, _ArrayLikeUInt_co, diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 582d3db9afcc..9f57b22956cc 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -129,8 +129,3 @@ def __array__(self) -> ndarray[Any, _DType_co]: ... "dtype[integer[Any]]", int, ] - -if TYPE_CHECKING: - _ArrayND = ndarray[Any, dtype[_ScalarType]] -else: - _ArrayND = Any diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index 30c210b62c33..d9cb0f1571a5 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -47,7 +47,8 @@ _NumberLike_co, ) from . import NBitBase -from ._array_like import ArrayLike, _ArrayND +from ._array_like import ArrayLike +from ._generic_alias import NDArray if sys.version_info >= (3, 8): from typing import Protocol @@ -336,7 +337,7 @@ class _ComparisonOp(Protocol[_T1, _T2]): @overload def __call__(self, __other: _T1) -> bool_: ... @overload - def __call__(self, __other: _T2) -> _ArrayND[bool_]: ... + def __call__(self, __other: _T2) -> NDArray[bool_]: ... else: _BoolOp = Any From 2e8c461fe7870f817f9ee84272623b82557e3f05 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 17:36:39 +0200 Subject: [PATCH 1135/1270] REL: Add a release note for `npt.NDArray` --- .../upcoming_changes/18935.new_feature.rst | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 doc/release/upcoming_changes/18935.new_feature.rst diff --git a/doc/release/upcoming_changes/18935.new_feature.rst b/doc/release/upcoming_changes/18935.new_feature.rst new file mode 100644 index 000000000000..8c6e25da6800 --- /dev/null +++ b/doc/release/upcoming_changes/18935.new_feature.rst @@ -0,0 +1,28 @@ +A runtime-subcriptable alias has been added for `ndarray` +--------------------------------------------------------- +`numpy.typing.NDArray` has been added, a runtime-subscriptable alias for +``np.ndarray[Any, np.dtype[~Scalar]]``. The new type alias can be used +for annotating arrays with a given dtype and unspecified shape. :sup:`1` + +:sup:`1` NumPy does not support the annotating of array shapes as of 1.21, +this is expected to change in the future though (see :pep:`646`). + +Examples +~~~~~~~~ + +.. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[typing.Any, numpy.dtype[~ScalarType]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) From 035e9b2ca33491e0ce07b5197aa03b5dd0c910dc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 17:56:45 +0200 Subject: [PATCH 1136/1270] STY: Stay under the 79 characters limit --- numpy/typing/tests/test_generic_alias.py | 75 ++++++++++++------------ 1 file changed, 36 insertions(+), 39 deletions(-) diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py index ec9b53154ff3..8b91089456eb 100644 --- a/numpy/typing/tests/test_generic_alias.py +++ b/numpy/typing/tests/test_generic_alias.py @@ -35,31 +35,28 @@ class Subclass(base): # type: ignore[misc,valid-type] class TestGenericAlias: """Tests for `numpy.typing._generic_alias._GenericAlias`.""" - @pytest.mark.parametrize( - "name,func", - [ - ("__init__", lambda n: n), - ("__origin__", lambda n: n.__origin__), - ("__args__", lambda n: n.__args__), - ("__parameters__", lambda n: n.__parameters__), - ("__reduce__", lambda n: n.__reduce__()[1:]), - ("__reduce_ex__", lambda n: n.__reduce_ex__(1)[1:]), - ("__mro_entries__", lambda n: n.__mro_entries__([object])), - ("__hash__", lambda n: hash(n)), - ("__repr__", lambda n: repr(n)), - ("__getitem__", lambda n: n[np.float64]), - ("__getitem__", lambda n: n[ScalarType][np.float64]), - ("__getitem__", lambda n: n[Union[np.int64, ScalarType]][np.float64]), - ("__eq__", lambda n: n == n), - ("__ne__", lambda n: n != np.ndarray), - ("__dir__", lambda n: dir(n)), - ("__call__", lambda n: n((5,), np.int64)), - ("__call__", lambda n: n(shape=(5,), dtype=np.int64)), - ("subclassing", lambda n: _get_subclass_mro(n)), - ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))), - ("__weakref__", lambda n: n == weakref.ref(n)()), - ] - ) + @pytest.mark.parametrize("name,func", [ + ("__init__", lambda n: n), + ("__origin__", lambda n: n.__origin__), + ("__args__", lambda n: n.__args__), + ("__parameters__", lambda n: n.__parameters__), + ("__reduce__", lambda n: n.__reduce__()[1:]), + ("__reduce_ex__", lambda n: n.__reduce_ex__(1)[1:]), + ("__mro_entries__", lambda n: n.__mro_entries__([object])), + ("__hash__", lambda n: hash(n)), + ("__repr__", lambda n: repr(n)), + ("__getitem__", lambda n: n[np.float64]), + ("__getitem__", lambda n: n[ScalarType][np.float64]), + ("__getitem__", lambda n: n[Union[np.int64, ScalarType]][np.float64]), + ("__eq__", lambda n: n == n), + ("__ne__", lambda n: n != np.ndarray), + ("__dir__", lambda n: dir(n)), + ("__call__", lambda n: n((5,), np.int64)), + ("__call__", lambda n: n(shape=(5,), dtype=np.int64)), + ("subclassing", lambda n: _get_subclass_mro(n)), + ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))), + ("__weakref__", lambda n: n == weakref.ref(n)()), + ]) def test_pass(self, name: str, func: FuncType) -> None: """Compare `types.GenericAlias` with its numpy-based backport. @@ -75,7 +72,10 @@ def test_pass(self, name: str, func: FuncType) -> None: @pytest.mark.parametrize("name", GETATTR_NAMES) def test_getattr(self, name: str) -> None: - """Test that `getattr` wraps around the underlying type (``__origin__``).""" + """Test that `getattr` wraps around the underlying type, + aka ``__origin__``. + + """ value = getattr(NDArray, name) value_ref1 = getattr(np.ndarray, name) @@ -85,19 +85,16 @@ def test_getattr(self, name: str) -> None: else: assert value == value_ref1 - @pytest.mark.parametrize( - "name,exc_type,func", - [ - ("__getitem__", TypeError, lambda n: n[()]), - ("__getitem__", TypeError, lambda n: n[Any, Any]), - ("__getitem__", TypeError, lambda n: n[Any][Any]), - ("__instancecheck__", TypeError, lambda n: isinstance(np.array(1), n)), - ("__subclasscheck__", TypeError, lambda n: issubclass(np.ndarray, n)), - ("__setattr__", AttributeError, lambda n: setattr(n, "__origin__", int)), - ("__setattr__", AttributeError, lambda n: setattr(n, "test", int)), - ("__getattribute__", AttributeError, lambda n: getattr(n, "test")), - ] - ) + @pytest.mark.parametrize("name,exc_type,func", [ + ("__getitem__", TypeError, lambda n: n[()]), + ("__getitem__", TypeError, lambda n: n[Any, Any]), + ("__getitem__", TypeError, lambda n: n[Any][Any]), + ("isinstance", TypeError, lambda n: isinstance(np.array(1), n)), + ("issublass", TypeError, lambda n: issubclass(np.ndarray, n)), + ("setattr", AttributeError, lambda n: setattr(n, "__origin__", int)), + ("setattr", AttributeError, lambda n: setattr(n, "test", int)), + ("getattr", AttributeError, lambda n: getattr(n, "test")), + ]) def test_raise( self, name: str, From 9803b72393f274afaa99d4d8226b5f79d2885cef Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 18:01:48 +0200 Subject: [PATCH 1137/1270] MAINT: Removed an unused variable --- numpy/typing/_generic_alias.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py index fd719c6110b2..f98fca62edf3 100644 --- a/numpy/typing/_generic_alias.py +++ b/numpy/typing/_generic_alias.py @@ -4,7 +4,6 @@ import types from typing import ( Any, - Callable, ClassVar, FrozenSet, Generator, From c96f6255da38116590fa573e8a45973121c6b390 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 7 May 2021 18:02:10 +0200 Subject: [PATCH 1138/1270] TST: Fix a python >= 3.9 test --- numpy/typing/tests/test_generic_alias.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py index 8b91089456eb..13072051a83f 100644 --- a/numpy/typing/tests/test_generic_alias.py +++ b/numpy/typing/tests/test_generic_alias.py @@ -25,6 +25,8 @@ GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS) +BUFFER = np.array([1], dtype=np.int64) +BUFFER.setflags(write=False) def _get_subclass_mro(base: type) -> Tuple[type, ...]: class Subclass(base): # type: ignore[misc,valid-type] @@ -51,8 +53,8 @@ class TestGenericAlias: ("__eq__", lambda n: n == n), ("__ne__", lambda n: n != np.ndarray), ("__dir__", lambda n: dir(n)), - ("__call__", lambda n: n((5,), np.int64)), - ("__call__", lambda n: n(shape=(5,), dtype=np.int64)), + ("__call__", lambda n: n((1,), np.int64, BUFFER)), + ("__call__", lambda n: n(shape=(1,), dtype=np.int64, buffer=BUFFER)), ("subclassing", lambda n: _get_subclass_mro(n)), ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))), ("__weakref__", lambda n: n == weakref.ref(n)()), From bb745921ac6c27d6caf9b76663668e5b9f28abac Mon Sep 17 00:00:00 2001 From: Laura Martens Date: Wed, 12 May 2021 12:35:14 +0200 Subject: [PATCH 1139/1270] BUG: fixed ma.average ignoring masked weights (#18960) Closes #10438 Co-authored-by: mecopur --- numpy/ma/extras.py | 2 +- numpy/ma/tests/test_extras.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 8c123bc3bcc4..bd5fc2ca3bb2 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -614,7 +614,7 @@ def average(a, axis=None, weights=None, returned=False): "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis - wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) + wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True) wgt = wgt.swapaxes(-1, axis) if m is not nomask: diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index d237829cb71f..e735b9bc77fa 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -292,6 +292,23 @@ def test_complex(self): assert_almost_equal(wav1.real, expected1.real) assert_almost_equal(wav1.imag, expected1.imag) + def test_masked_weights(self): + # Test with masked weights. + # (Regression test for https://github.com/numpy/numpy/issues/10438) + a = np.ma.array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + weights_unmasked = masked_array([5, 28, 31], mask=False) + weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0]) + + avg_unmasked = average(a, axis=0, + weights=weights_unmasked, returned=False) + expected_unmasked = np.array([6.0, 5.21875, 6.21875]) + assert_almost_equal(avg_unmasked, expected_unmasked) + + avg_masked = average(a, axis=0, weights=weights_masked, returned=False) + expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) + assert_almost_equal(avg_masked, expected_masked) + class TestConcatenator: # Tests for mr_, the equivalent of r_ for masked arrays. From 888ffb0a97ecedd73d07ee2935434412f76c277e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 14:48:43 +0200 Subject: [PATCH 1140/1270] MAINT: Set a number of `None`-based return types --- numpy/__init__.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 01a08f24a749..46b320158f56 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1727,11 +1727,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def shape(self) -> _Shape: ... @shape.setter - def shape(self, value: _ShapeLike): ... + def shape(self, value: _ShapeLike) -> None: ... @property def strides(self) -> _Shape: ... @strides.setter - def strides(self, value: _ShapeLike): ... + def strides(self, value: _ShapeLike) -> None: ... def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ... def fill(self, value: Any) -> None: ... @property @@ -3132,7 +3132,7 @@ longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble] class flexible(generic): ... # type: ignore class void(flexible): - def __init__(self, __value: Union[_IntLike_co, bytes]): ... + def __init__(self, __value: Union[_IntLike_co, bytes]) -> None: ... @property def real(self: _ArraySelf) -> _ArraySelf: ... @property From a307f98aa724c4d0aead7dd2629e696fe670cc35 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 15:16:11 +0200 Subject: [PATCH 1141/1270] MAINT: Remove two duplicate (redundant) type aliases --- numpy/__init__.pyi | 38 +++++++++++++++----------------------- numpy/core/fromnumeric.pyi | 28 ++++++++++++++-------------- 2 files changed, 29 insertions(+), 37 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 46b320158f56..f0f0e2ba1081 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1207,14 +1207,6 @@ _PartitionKind = Literal["introselect"] _SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] _SortSide = Literal["left", "right"] -_ArrayLikeBool = Union[_BoolLike_co, Sequence[_BoolLike_co], ndarray] -_ArrayLikeIntOrBool = Union[ - _IntLike_co, - ndarray, - Sequence[_IntLike_co], - Sequence[Sequence[Any]], # TODO: wait for support for recursive types -] - _ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) class _ArrayOrScalarCommon: @@ -1482,7 +1474,7 @@ class _ArrayOrScalarCommon: out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... @overload def max( @@ -1491,7 +1483,7 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> _NdArraySubClass: ... @overload @@ -1518,7 +1510,7 @@ class _ArrayOrScalarCommon: out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... @overload def min( @@ -1527,7 +1519,7 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> _NdArraySubClass: ... def newbyteorder( @@ -1543,7 +1535,7 @@ class _ArrayOrScalarCommon: out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... @overload def prod( @@ -1553,7 +1545,7 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> _NdArraySubClass: ... @overload @@ -1573,7 +1565,7 @@ class _ArrayOrScalarCommon: def repeat( self, - repeats: _ArrayLikeIntOrBool, + repeats: _ArrayLikeInt_co, axis: Optional[SupportsIndex] = ..., ) -> ndarray: ... @@ -1617,7 +1609,7 @@ class _ArrayOrScalarCommon: out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... @overload def sum( @@ -1627,7 +1619,7 @@ class _ArrayOrScalarCommon: out: _NdArraySubClass = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> _NdArraySubClass: ... @overload @@ -1641,7 +1633,7 @@ class _ArrayOrScalarCommon: @overload def take( self, - indices: _ArrayLikeIntOrBool, + indices: _ArrayLikeInt_co, axis: Optional[SupportsIndex] = ..., out: None = ..., mode: _ModeKind = ..., @@ -1649,7 +1641,7 @@ class _ArrayOrScalarCommon: @overload def take( self, - indices: _ArrayLikeIntOrBool, + indices: _ArrayLikeInt_co, axis: Optional[SupportsIndex] = ..., out: _NdArraySubClass = ..., mode: _ModeKind = ..., @@ -1761,7 +1753,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... def argpartition( self, - kth: _ArrayLikeIntOrBool, + kth: _ArrayLikeInt_co, axis: Optional[SupportsIndex] = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., @@ -1780,7 +1772,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def nonzero(self) -> Tuple[ndarray, ...]: ... def partition( self, - kth: _ArrayLikeIntOrBool, + kth: _ArrayLikeInt_co, axis: SupportsIndex = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., @@ -1788,13 +1780,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable def put( - self, ind: _ArrayLikeIntOrBool, v: ArrayLike, mode: _ModeKind = ... + self, ind: _ArrayLikeInt_co, v: ArrayLike, mode: _ModeKind = ... ) -> None: ... def searchsorted( self, # >= 1D array v: ArrayLike, side: _SortSide = ..., - sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array + sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array ) -> ndarray: ... def setfield( self, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = ... diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index 26a43c1a049e..3342ec3ac47b 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -11,8 +11,6 @@ from numpy import ( generic, _OrderKACF, _OrderACF, - _ArrayLikeBool, - _ArrayLikeIntOrBool, _ModeKind, _PartitionKind, _SortKind, @@ -23,6 +21,8 @@ from numpy.typing import ( ArrayLike, _ShapeLike, _Shape, + _ArrayLikeBool_co, + _ArrayLikeInt_co, _NumberLike_co, ) @@ -52,7 +52,7 @@ _Number = TypeVar("_Number", bound=number) # 4. An array-like object comes in; an ndarray or generic comes out def take( a: ArrayLike, - indices: _ArrayLikeIntOrBool, + indices: _ArrayLikeInt_co, axis: Optional[int] = ..., out: Optional[ndarray] = ..., mode: _ModeKind = ..., @@ -65,7 +65,7 @@ def reshape( ) -> ndarray: ... def choose( - a: _ArrayLikeIntOrBool, + a: _ArrayLikeInt_co, choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ..., @@ -73,13 +73,13 @@ def choose( def repeat( a: ArrayLike, - repeats: _ArrayLikeIntOrBool, + repeats: _ArrayLikeInt_co, axis: Optional[int] = ..., ) -> ndarray: ... def put( a: ndarray, - ind: _ArrayLikeIntOrBool, + ind: _ArrayLikeInt_co, v: ArrayLike, mode: _ModeKind = ..., ) -> None: ... @@ -97,7 +97,7 @@ def transpose( def partition( a: ArrayLike, - kth: _ArrayLikeIntOrBool, + kth: _ArrayLikeInt_co, axis: Optional[int] = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., @@ -105,7 +105,7 @@ def partition( def argpartition( a: ArrayLike, - kth: _ArrayLikeIntOrBool, + kth: _ArrayLikeInt_co, axis: Optional[int] = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., @@ -156,14 +156,14 @@ def searchsorted( a: ArrayLike, v: _Scalar, side: _SortSide = ..., - sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array + sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, side: _SortSide = ..., - sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array + sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array ) -> ndarray: ... def resize( @@ -235,7 +235,7 @@ def sum( out: Optional[ndarray] = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... @overload @@ -288,7 +288,7 @@ def amax( out: Optional[ndarray] = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... def amin( @@ -297,7 +297,7 @@ def amin( out: Optional[ndarray] = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily @@ -314,7 +314,7 @@ def prod( out: Optional[ndarray] = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., - where: _ArrayLikeBool = ..., + where: _ArrayLikeBool_co = ..., ) -> Any: ... def cumprod( From ae6960de23afc8b33e0dbfc3d5d361ab126d8734 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 14:48:02 +0200 Subject: [PATCH 1142/1270] ENH: Add annotations for `generic.item` and `generic.tolist` --- numpy/__init__.pyi | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f0f0e2ba1081..466af9110eff 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2778,7 +2778,7 @@ class generic(_ArrayOrScalarCommon): @property def flat(self: _ScalarType) -> flatiter[ndarray[Any, dtype[_ScalarType]]]: ... def item( - self: _ScalarType, + self, __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., ) -> Any: ... def squeeze( @@ -2820,6 +2820,11 @@ class number(generic, Generic[_NBit1]): # type: ignore class bool_(generic): def __init__(self, __value: object = ...) -> None: ... + def item( + self, + __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + ) -> bool: ... + def tolist(self) -> bool: ... @property def real(self: _ArraySelf) -> _ArraySelf: ... @property @@ -2881,7 +2886,8 @@ class _DatetimeScalar(Protocol): @property def year(self) -> int: ... - +# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int` +# depending on the unit class datetime64(generic): @overload def __init__( @@ -2920,6 +2926,11 @@ else: class integer(number[_NBit1]): # type: ignore # NOTE: `__index__` is technically defined in the bottom-most # sub-classes (`int64`, `uint32`, etc) + def item( + self, + __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + ) -> int: ... + def tolist(self) -> int: ... def __index__(self) -> int: ... __truediv__: _IntTrueDiv[_NBit1] __rtruediv__: _IntTrueDiv[_NBit1] @@ -2978,6 +2989,8 @@ int0 = signedinteger[_NBitIntP] int_ = signedinteger[_NBitInt] longlong = signedinteger[_NBitLongLong] +# TODO: `item`/`tolist` returns either `dt.timedelta` or `int` +# depending on the unit class timedelta64(generic): def __init__( self, @@ -3057,6 +3070,11 @@ _FloatType = TypeVar('_FloatType', bound=floating) class floating(inexact[_NBit1]): def __init__(self, __value: _FloatValue = ...) -> None: ... + def item( + self, + __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + ) -> float: ... + def tolist(self) -> float: ... __add__: _FloatOp[_NBit1] __radd__: _FloatOp[_NBit1] __sub__: _FloatOp[_NBit1] @@ -3091,6 +3109,11 @@ longfloat = floating[_NBitLongDouble] class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): def __init__(self, __value: _ComplexValue = ...) -> None: ... + def item( + self, + __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + ) -> complex: ... + def tolist(self) -> complex: ... @property def real(self) -> floating[_NBit1]: ... # type: ignore[override] @property @@ -3123,6 +3146,9 @@ longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble] class flexible(generic): ... # type: ignore +# TODO: `item`/`tolist` returns either `bytes` or `tuple` +# depending on whether or not it's used as an opaque bytes sequence +# or a structure class void(flexible): def __init__(self, __value: Union[_IntLike_co, bytes]) -> None: ... @property @@ -3151,6 +3177,11 @@ class bytes_(character, bytes): def __init__( self, __value: str, encoding: str = ..., errors: str = ... ) -> None: ... + def item( + self, + __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + ) -> bytes: ... + def tolist(self) -> bytes: ... string_ = bytes_ bytes0 = bytes_ @@ -3162,6 +3193,11 @@ class str_(character, str): def __init__( self, __value: bytes, encoding: str = ..., errors: str = ... ) -> None: ... + def item( + self, + __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + ) -> str: ... + def tolist(self) -> str: ... unicode_ = str_ str0 = str_ From 4bd5fd2abf8f681d2af900696eca1e8a99860f83 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 14:48:27 +0200 Subject: [PATCH 1143/1270] ENH: Add annotations for `ndarray.item` --- numpy/__init__.pyi | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 466af9110eff..d14b6de46999 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1676,6 +1676,7 @@ _NumberType = TypeVar("_NumberType", bound=number[Any]) _BufferType = Union[ndarray, bytes, bytearray, memoryview] _T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) _2Tuple = Tuple[_T, _T] _Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] @@ -1686,6 +1687,9 @@ _ArrayComplex_co = _ArrayND[Union[bool_, integer[Any], floating[Any], complexflo _ArrayNumber_co = _ArrayND[Union[bool_, number[Any]]] _ArrayTD64_co = _ArrayND[Union[bool_, integer[Any], timedelta64]] +class _SupportsItem(Protocol[_T_co]): + def item(self, __args: Any) -> _T_co: ... + class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def base(self) -> Optional[ndarray]: ... @@ -1728,10 +1732,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def fill(self, value: Any) -> None: ... @property def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... + + # Use the same output type as that of the underlying `generic` @overload - def item(self, *args: SupportsIndex) -> Any: ... + def item( + self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var] + *args: SupportsIndex, + ) -> _T: ... @overload - def item(self, __args: Tuple[SupportsIndex, ...]) -> Any: ... + def item( + self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var] + __args: Tuple[SupportsIndex, ...], + ) -> _T: ... + @overload def itemset(self, __value: Any) -> None: ... @overload From 9f86a84be8610b1ac90464f20e30ea250b80fbe3 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 15:17:59 +0200 Subject: [PATCH 1144/1270] ENH: Add annotations for `generic.take` and `ndarray.take` --- numpy/__init__.pyi | 77 +++++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 25 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d14b6de46999..6c40f75521b6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1622,31 +1622,6 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co = ..., ) -> _NdArraySubClass: ... - @overload - def take( - self, - indices: _IntLike_co, - axis: Optional[SupportsIndex] = ..., - out: None = ..., - mode: _ModeKind = ..., - ) -> Any: ... - @overload - def take( - self, - indices: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., - out: None = ..., - mode: _ModeKind = ..., - ) -> ndarray: ... - @overload - def take( - self, - indices: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., - out: _NdArraySubClass = ..., - mode: _ModeKind = ..., - ) -> _NdArraySubClass: ... - @overload def var( self, @@ -1828,6 +1803,32 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... + + @overload + def take( # type: ignore[misc] + self: ndarray[Any, dtype[_ScalarType]], + indices: _IntLike_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, _DType_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + # Many of these special methods are irrelevant currently, since protocols # aren't supported yet. That said, I'm adding them for completeness. # https://docs.python.org/3/reference/datamodel.html @@ -2794,6 +2795,32 @@ class generic(_ArrayOrScalarCommon): self, __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., ) -> Any: ... + + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _IntLike_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + def squeeze( self: _ScalarType, axis: Union[Literal[0], Tuple[()]] = ... ) -> _ScalarType: ... From 5f59e7729f0b1708f58ec0e5c72a90b023f45458 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 15:45:09 +0200 Subject: [PATCH 1145/1270] ENH: Add annotations for `generic.repeat` and `ndarray.repeat` --- numpy/__init__.pyi | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6c40f75521b6..b708b99ab7a7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1563,12 +1563,6 @@ class _ArrayOrScalarCommon: keepdims: bool = ..., ) -> _NdArraySubClass: ... - def repeat( - self, - repeats: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., - ) -> ndarray: ... - @overload def round( self: _ArraySelf, @@ -1829,6 +1823,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): mode: _ModeKind = ..., ) -> _NdArraySubClass: ... + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + ) -> ndarray[Any, _DType_co]: ... + # Many of these special methods are irrelevant currently, since protocols # aren't supported yet. That said, I'm adding them for completeness. # https://docs.python.org/3/reference/datamodel.html @@ -2821,6 +2821,12 @@ class generic(_ArrayOrScalarCommon): mode: _ModeKind = ..., ) -> _NdArraySubClass: ... + def repeat( + self: _ScalarType, + repeats: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + def squeeze( self: _ScalarType, axis: Union[Literal[0], Tuple[()]] = ... ) -> _ScalarType: ... From eb28af241185586467f145d4c986e8f315b3d940 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 14:49:06 +0200 Subject: [PATCH 1146/1270] TST: Add typing tests for the `item`, `take`, `tolist` and `repeat` methods --- numpy/typing/tests/data/fail/fromnumeric.py | 4 +- numpy/typing/tests/data/pass/ndarray_misc.py | 11 +- numpy/typing/tests/data/pass/scalars.py | 16 ++ .../typing/tests/data/reveal/ndarray_misc.py | 165 +++++++++--------- numpy/typing/tests/data/reveal/scalars.py | 16 ++ 5 files changed, 127 insertions(+), 85 deletions(-) diff --git a/numpy/typing/tests/data/fail/fromnumeric.py b/numpy/typing/tests/data/fail/fromnumeric.py index d8f7a5d69eba..8fafed1b7705 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.py +++ b/numpy/typing/tests/data/fail/fromnumeric.py @@ -117,13 +117,13 @@ np.amax(a, keepdims=1.0) # E: incompatible type np.amax(a, out=1.0) # E: incompatible type np.amax(a, initial=[1.0]) # E: incompatible type -np.amax(a, where=[1.0]) # E: List item 0 has incompatible type +np.amax(a, where=[1.0]) # E: incompatible type np.amin(a, axis=1.0) # E: incompatible type np.amin(a, keepdims=1.0) # E: incompatible type np.amin(a, out=1.0) # E: incompatible type np.amin(a, initial=[1.0]) # E: incompatible type -np.amin(a, where=[1.0]) # E: List item 0 has incompatible type +np.amin(a, where=[1.0]) # E: incompatible type np.prod(a, axis=1.0) # E: incompatible type np.prod(a, out=False) # E: incompatible type diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 6c6f5d50b986..ba10ce974506 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -6,17 +6,19 @@ """ -from typing import cast +from __future__ import annotations + +from typing import cast, Any import numpy as np class SubClass(np.ndarray): ... i4 = np.int32(1) -A = np.array([[1]], dtype=np.int32) +A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) B0 = np.empty((), dtype=np.int32).view(SubClass) B1 = np.empty((1,), dtype=np.int32).view(SubClass) B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) -C = np.array([0, 1, 2], dtype=np.int32) +C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) D = np.empty(3).view(SubClass) i4.all() @@ -157,3 +159,6 @@ class SubClass(np.ndarray): ... void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0)) void.setfield(10, np.float64) + +A.item(0) +C.item(0) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 815566b6889f..f7965e1c54f6 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -204,3 +204,19 @@ def __float__(self) -> float: np.clongdouble() np.clongfloat() np.longcomplex() + +np.bool_().item() +np.int_().item() +np.uint64().item() +np.float32().item() +np.complex128().item() +np.str_().item() +np.bytes_().item() + +np.bool_().tolist() +np.int_().tolist() +np.uint64().tolist() +np.float32().tolist() +np.complex128().tolist() +np.str_().tolist() +np.bytes_().tolist() diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index e0f44bcbc40f..8c45eb6f3cda 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -6,145 +6,150 @@ """ +from typing import Any import numpy as np class SubClass(np.ndarray): ... f8: np.float64 -A: np.ndarray +AR_f8: np.ndarray[Any, np.dtype[np.float64]] B: SubClass +AR_U: np.ndarray[Any, np.dtype[np.str_]] reveal_type(f8.all()) # E: numpy.bool_ -reveal_type(A.all()) # E: numpy.bool_ -reveal_type(A.all(axis=0)) # E: Any -reveal_type(A.all(keepdims=True)) # E: Any -reveal_type(A.all(out=B)) # E: SubClass +reveal_type(AR_f8.all()) # E: numpy.bool_ +reveal_type(AR_f8.all(axis=0)) # E: Any +reveal_type(AR_f8.all(keepdims=True)) # E: Any +reveal_type(AR_f8.all(out=B)) # E: SubClass reveal_type(f8.any()) # E: numpy.bool_ -reveal_type(A.any()) # E: numpy.bool_ -reveal_type(A.any(axis=0)) # E: Any -reveal_type(A.any(keepdims=True)) # E: Any -reveal_type(A.any(out=B)) # E: SubClass +reveal_type(AR_f8.any()) # E: numpy.bool_ +reveal_type(AR_f8.any(axis=0)) # E: Any +reveal_type(AR_f8.any(keepdims=True)) # E: Any +reveal_type(AR_f8.any(out=B)) # E: SubClass reveal_type(f8.argmax()) # E: {intp} -reveal_type(A.argmax()) # E: {intp} -reveal_type(A.argmax(axis=0)) # E: Any -reveal_type(A.argmax(out=B)) # E: SubClass +reveal_type(AR_f8.argmax()) # E: {intp} +reveal_type(AR_f8.argmax(axis=0)) # E: Any +reveal_type(AR_f8.argmax(out=B)) # E: SubClass reveal_type(f8.argmin()) # E: {intp} -reveal_type(A.argmin()) # E: {intp} -reveal_type(A.argmin(axis=0)) # E: Any -reveal_type(A.argmin(out=B)) # E: SubClass +reveal_type(AR_f8.argmin()) # E: {intp} +reveal_type(AR_f8.argmin(axis=0)) # E: Any +reveal_type(AR_f8.argmin(out=B)) # E: SubClass reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any] -reveal_type(A.argsort()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.argsort()) # E: numpy.ndarray[Any, Any] reveal_type(f8.astype(np.int64).choose([()])) # E: numpy.ndarray[Any, Any] -reveal_type(A.choose([0])) # E: numpy.ndarray[Any, Any] -reveal_type(A.choose([0], out=B)) # E: SubClass +reveal_type(AR_f8.choose([0])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.choose([0], out=B)) # E: SubClass reveal_type(f8.clip(1)) # E: Any -reveal_type(A.clip(1)) # E: Any -reveal_type(A.clip(None, 1)) # E: Any -reveal_type(A.clip(1, out=B)) # E: SubClass -reveal_type(A.clip(None, 1, out=B)) # E: SubClass +reveal_type(AR_f8.clip(1)) # E: Any +reveal_type(AR_f8.clip(None, 1)) # E: Any +reveal_type(AR_f8.clip(1, out=B)) # E: SubClass +reveal_type(AR_f8.clip(None, 1, out=B)) # E: SubClass reveal_type(f8.compress([0])) # E: numpy.ndarray[Any, Any] -reveal_type(A.compress([0])) # E: numpy.ndarray[Any, Any] -reveal_type(A.compress([0], out=B)) # E: SubClass +reveal_type(AR_f8.compress([0])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.compress([0], out=B)) # E: SubClass reveal_type(f8.conj()) # E: {float64} -reveal_type(A.conj()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.conj()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(B.conj()) # E: SubClass reveal_type(f8.conjugate()) # E: {float64} -reveal_type(A.conjugate()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.conjugate()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(B.conjugate()) # E: SubClass reveal_type(f8.cumprod()) # E: numpy.ndarray[Any, Any] -reveal_type(A.cumprod()) # E: numpy.ndarray[Any, Any] -reveal_type(A.cumprod(out=B)) # E: SubClass +reveal_type(AR_f8.cumprod()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.cumprod(out=B)) # E: SubClass reveal_type(f8.cumsum()) # E: numpy.ndarray[Any, Any] -reveal_type(A.cumsum()) # E: numpy.ndarray[Any, Any] -reveal_type(A.cumsum(out=B)) # E: SubClass +reveal_type(AR_f8.cumsum()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.cumsum(out=B)) # E: SubClass reveal_type(f8.max()) # E: Any -reveal_type(A.max()) # E: Any -reveal_type(A.max(axis=0)) # E: Any -reveal_type(A.max(keepdims=True)) # E: Any -reveal_type(A.max(out=B)) # E: SubClass +reveal_type(AR_f8.max()) # E: Any +reveal_type(AR_f8.max(axis=0)) # E: Any +reveal_type(AR_f8.max(keepdims=True)) # E: Any +reveal_type(AR_f8.max(out=B)) # E: SubClass reveal_type(f8.mean()) # E: Any -reveal_type(A.mean()) # E: Any -reveal_type(A.mean(axis=0)) # E: Any -reveal_type(A.mean(keepdims=True)) # E: Any -reveal_type(A.mean(out=B)) # E: SubClass +reveal_type(AR_f8.mean()) # E: Any +reveal_type(AR_f8.mean(axis=0)) # E: Any +reveal_type(AR_f8.mean(keepdims=True)) # E: Any +reveal_type(AR_f8.mean(out=B)) # E: SubClass reveal_type(f8.min()) # E: Any -reveal_type(A.min()) # E: Any -reveal_type(A.min(axis=0)) # E: Any -reveal_type(A.min(keepdims=True)) # E: Any -reveal_type(A.min(out=B)) # E: SubClass +reveal_type(AR_f8.min()) # E: Any +reveal_type(AR_f8.min(axis=0)) # E: Any +reveal_type(AR_f8.min(keepdims=True)) # E: Any +reveal_type(AR_f8.min(out=B)) # E: SubClass reveal_type(f8.newbyteorder()) # E: {float64} -reveal_type(A.newbyteorder()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.newbyteorder()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(B.newbyteorder('|')) # E: SubClass reveal_type(f8.prod()) # E: Any -reveal_type(A.prod()) # E: Any -reveal_type(A.prod(axis=0)) # E: Any -reveal_type(A.prod(keepdims=True)) # E: Any -reveal_type(A.prod(out=B)) # E: SubClass +reveal_type(AR_f8.prod()) # E: Any +reveal_type(AR_f8.prod(axis=0)) # E: Any +reveal_type(AR_f8.prod(keepdims=True)) # E: Any +reveal_type(AR_f8.prod(out=B)) # E: SubClass reveal_type(f8.ptp()) # E: Any -reveal_type(A.ptp()) # E: Any -reveal_type(A.ptp(axis=0)) # E: Any -reveal_type(A.ptp(keepdims=True)) # E: Any -reveal_type(A.ptp(out=B)) # E: SubClass +reveal_type(AR_f8.ptp()) # E: Any +reveal_type(AR_f8.ptp(axis=0)) # E: Any +reveal_type(AR_f8.ptp(keepdims=True)) # E: Any +reveal_type(AR_f8.ptp(out=B)) # E: SubClass reveal_type(f8.round()) # E: {float64} -reveal_type(A.round()) # E: numpy.ndarray[Any, Any] -reveal_type(A.round(out=B)) # E: SubClass +reveal_type(AR_f8.round()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_f8.round(out=B)) # E: SubClass -reveal_type(f8.repeat(1)) # E: numpy.ndarray[Any, Any] -reveal_type(A.repeat(1)) # E: numpy.ndarray[Any, Any] +reveal_type(f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(B.repeat(1)) # E: numpy.ndarray[Any, Any] reveal_type(f8.std()) # E: Any -reveal_type(A.std()) # E: Any -reveal_type(A.std(axis=0)) # E: Any -reveal_type(A.std(keepdims=True)) # E: Any -reveal_type(A.std(out=B)) # E: SubClass +reveal_type(AR_f8.std()) # E: Any +reveal_type(AR_f8.std(axis=0)) # E: Any +reveal_type(AR_f8.std(keepdims=True)) # E: Any +reveal_type(AR_f8.std(out=B)) # E: SubClass reveal_type(f8.sum()) # E: Any -reveal_type(A.sum()) # E: Any -reveal_type(A.sum(axis=0)) # E: Any -reveal_type(A.sum(keepdims=True)) # E: Any -reveal_type(A.sum(out=B)) # E: SubClass +reveal_type(AR_f8.sum()) # E: Any +reveal_type(AR_f8.sum(axis=0)) # E: Any +reveal_type(AR_f8.sum(keepdims=True)) # E: Any +reveal_type(AR_f8.sum(out=B)) # E: SubClass -reveal_type(f8.take(0)) # E: Any -reveal_type(A.take(0)) # E: Any -reveal_type(A.take([0])) # E: numpy.ndarray[Any, Any] -reveal_type(A.take(0, out=B)) # E: SubClass -reveal_type(A.take([0], out=B)) # E: SubClass +reveal_type(f8.take(0)) # E: {float64} +reveal_type(AR_f8.take(0)) # E: {float64} +reveal_type(AR_f8.take([0])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_f8.take(0, out=B)) # E: SubClass +reveal_type(AR_f8.take([0], out=B)) # E: SubClass reveal_type(f8.var()) # E: Any -reveal_type(A.var()) # E: Any -reveal_type(A.var(axis=0)) # E: Any -reveal_type(A.var(keepdims=True)) # E: Any -reveal_type(A.var(out=B)) # E: SubClass +reveal_type(AR_f8.var()) # E: Any +reveal_type(AR_f8.var(axis=0)) # E: Any +reveal_type(AR_f8.var(keepdims=True)) # E: Any +reveal_type(AR_f8.var(out=B)) # E: SubClass -reveal_type(A.argpartition([0])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.argpartition([0])) # E: numpy.ndarray[Any, Any] -reveal_type(A.diagonal()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.diagonal()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(A.dot(1)) # E: Any -reveal_type(A.dot(1, out=B)) # E: SubClass +reveal_type(AR_f8.dot(1)) # E: Any +reveal_type(AR_f8.dot(1, out=B)) # E: SubClass -reveal_type(A.nonzero()) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(AR_f8.nonzero()) # E: tuple[numpy.ndarray[Any, Any]] -reveal_type(A.searchsorted([1])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.searchsorted([1])) # E: numpy.ndarray[Any, Any] -reveal_type(A.trace()) # E: Any -reveal_type(A.trace(out=B)) # E: SubClass +reveal_type(AR_f8.trace()) # E: Any +reveal_type(AR_f8.trace(out=B)) # E: SubClass + +reveal_type(AR_f8.item()) # E: float +reveal_type(AR_U.item()) # E: str diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py index fa94aa49b974..f50c46c43d50 100644 --- a/numpy/typing/tests/data/reveal/scalars.py +++ b/numpy/typing/tests/data/reveal/scalars.py @@ -67,3 +67,19 @@ reveal_type(np.clongdouble()) # E: {clongdouble} reveal_type(np.clongfloat()) # E: {clongdouble} reveal_type(np.longcomplex()) # E: {clongdouble} + +reveal_type(np.bool_().item()) # E: bool +reveal_type(np.int_().item()) # E: int +reveal_type(np.uint64().item()) # E: int +reveal_type(np.float32().item()) # E: float +reveal_type(np.complex128().item()) # E: complex +reveal_type(np.str_().item()) # E: str +reveal_type(np.bytes_().item()) # E: bytes + +reveal_type(np.bool_().tolist()) # E: bool +reveal_type(np.int_().tolist()) # E: int +reveal_type(np.uint64().tolist()) # E: int +reveal_type(np.float32().tolist()) # E: float +reveal_type(np.complex128().tolist()) # E: complex +reveal_type(np.str_().tolist()) # E: str +reveal_type(np.bytes_().tolist()) # E: bytes From 12d300b416e4529c9d23de954aa1d35112693f46 Mon Sep 17 00:00:00 2001 From: Lisa <34400837+lyzlisa@users.noreply.github.com> Date: Wed, 12 May 2021 12:42:23 -0400 Subject: [PATCH 1147/1270] MAINT: Rm deprecated ``mktemp()`` from io test suite (#18958) Refactor io tests to remove deprecated tempfile.mktemp instances. Replaces usage with pytest tmp_path built-in fixtures. Convert some other setup/teardown methods to pytest fixtures as well. Co-authored-by: alize-papp <68250865+alize-papp@users.noreply.github.com> --- numpy/core/tests/test_memmap.py | 19 +- numpy/core/tests/test_multiarray.py | 388 +++++++++++++++------------- 2 files changed, 220 insertions(+), 187 deletions(-) diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py index a1e0c8f8feab..e4f0a6b3f665 100644 --- a/numpy/core/tests/test_memmap.py +++ b/numpy/core/tests/test_memmap.py @@ -1,10 +1,9 @@ import sys import os -import shutil import mmap import pytest from pathlib import Path -from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp +from tempfile import NamedTemporaryFile, TemporaryFile from numpy import ( memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) @@ -18,7 +17,6 @@ class TestMemmap: def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') - self.tempdir = mkdtemp() self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) @@ -30,7 +28,6 @@ def teardown(self): if IS_PYPY: break_cycles() break_cycles() - shutil.rmtree(self.tempdir) def test_roundtrip(self): # Write data to file @@ -46,8 +43,8 @@ def test_roundtrip(self): assert_array_equal(self.data, newfp) assert_equal(newfp.flags.writeable, False) - def test_open_with_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) + def test_open_with_filename(self, tmp_path): + tmpname = tmp_path / 'mmap' fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] @@ -67,11 +64,11 @@ def test_attributes(self): assert_equal(mode, fp.mode) del fp - def test_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) + def test_filename(self, tmp_path): + tmpname = tmp_path / "mmap" fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) - abspath = os.path.abspath(tmpname) + abspath = Path(os.path.abspath(tmpname)) fp[:] = self.data[:] assert_equal(abspath, fp.filename) b = fp[:1] @@ -79,8 +76,8 @@ def test_filename(self): del b del fp - def test_path(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) + def test_path(self, tmp_path): + tmpname = tmp_path / "mmap" fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', shape=self.shape) # os.path.realpath does not resolve symlinks on Windows diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index b355c46187bd..5c91cb9ea534 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -1,7 +1,6 @@ import collections.abc import tempfile import sys -import shutil import warnings import operator import io @@ -4811,17 +4810,23 @@ def test_invalid_axis(self): # gh-7528 class TestIO: """Test tofile, fromfile, tobytes, and fromstring""" - def setup(self): + @pytest.fixture() + def x(self): shape = (2, 4, 3) rand = np.random.random - self.x = rand(shape) + rand(shape).astype(complex)*1j - self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan] - self.dtype = self.x.dtype - self.tempdir = tempfile.mkdtemp() - self.filename = tempfile.mktemp(dir=self.tempdir) + x = rand(shape) + rand(shape).astype(complex) * 1j + x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan] + return x - def teardown(self): - shutil.rmtree(self.tempdir) + @pytest.fixture(params=["string", "path_obj"]) + def tmp_filename(self, tmp_path, request): + # This fixture covers two cases: + # one where the filename is a string and + # another where it is a pathlib object + filename = tmp_path / "file" + if request.param == "string": + filename = str(filename) + yield filename def test_nofile(self): # this should probably be supported as a file @@ -4852,54 +4857,48 @@ def test_fromstring_count0(self): d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0) assert d.shape == (0,) - def test_empty_files_binary(self): - with open(self.filename, 'w') as f: + def test_empty_files_text(self, tmp_filename): + with open(tmp_filename, 'w') as f: pass - y = np.fromfile(self.filename) + y = np.fromfile(tmp_filename) assert_(y.size == 0, "Array not empty") - def test_empty_files_text(self): - with open(self.filename, 'wb') as f: + def test_empty_files_binary(self, tmp_filename): + with open(tmp_filename, 'wb') as f: pass - y = np.fromfile(self.filename, sep=" ") + y = np.fromfile(tmp_filename, sep=" ") assert_(y.size == 0, "Array not empty") - def test_roundtrip_file(self): - with open(self.filename, 'wb') as f: - self.x.tofile(f) + def test_roundtrip_file(self, x, tmp_filename): + with open(tmp_filename, 'wb') as f: + x.tofile(f) # NB. doesn't work with flush+seek, due to use of C stdio - with open(self.filename, 'rb') as f: - y = np.fromfile(f, dtype=self.dtype) - assert_array_equal(y, self.x.flat) - - def test_roundtrip_filename(self): - self.x.tofile(self.filename) - y = np.fromfile(self.filename, dtype=self.dtype) - assert_array_equal(y, self.x.flat) - - def test_roundtrip_pathlib(self): - p = pathlib.Path(self.filename) - self.x.tofile(p) - y = np.fromfile(p, dtype=self.dtype) - assert_array_equal(y, self.x.flat) - - def test_roundtrip_dump_pathlib(self): - p = pathlib.Path(self.filename) - self.x.dump(p) + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_roundtrip(self, x, tmp_filename): + x.tofile(tmp_filename) + y = np.fromfile(tmp_filename, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_roundtrip_dump_pathlib(self, x, tmp_filename): + p = pathlib.Path(tmp_filename) + x.dump(p) y = np.load(p, allow_pickle=True) - assert_array_equal(y, self.x) + assert_array_equal(y, x) - def test_roundtrip_binary_str(self): - s = self.x.tobytes() - y = np.frombuffer(s, dtype=self.dtype) - assert_array_equal(y, self.x.flat) + def test_roundtrip_binary_str(self, x): + s = x.tobytes() + y = np.frombuffer(s, dtype=x.dtype) + assert_array_equal(y, x.flat) - s = self.x.tobytes('F') - y = np.frombuffer(s, dtype=self.dtype) - assert_array_equal(y, self.x.flatten('F')) + s = x.tobytes('F') + y = np.frombuffer(s, dtype=x.dtype) + assert_array_equal(y, x.flatten('F')) - def test_roundtrip_str(self): - x = self.x.real.ravel() + def test_roundtrip_str(self, x): + x = x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") # NB. str imbues less precision @@ -4907,79 +4906,79 @@ def test_roundtrip_str(self): assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) - def test_roundtrip_repr(self): - x = self.x.real.ravel() + def test_roundtrip_repr(self, x): + x = x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) - def test_unseekable_fromfile(self): + def test_unseekable_fromfile(self, x, tmp_filename): # gh-6246 - self.x.tofile(self.filename) + x.tofile(tmp_filename) def fail(*args, **kwargs): raise IOError('Can not tell or seek') - with io.open(self.filename, 'rb', buffering=0) as f: + with io.open(tmp_filename, 'rb', buffering=0) as f: f.seek = fail f.tell = fail - assert_raises(IOError, np.fromfile, f, dtype=self.dtype) + assert_raises(IOError, np.fromfile, f, dtype=x.dtype) - def test_io_open_unbuffered_fromfile(self): + def test_io_open_unbuffered_fromfile(self, x, tmp_filename): # gh-6632 - self.x.tofile(self.filename) - with io.open(self.filename, 'rb', buffering=0) as f: - y = np.fromfile(f, dtype=self.dtype) - assert_array_equal(y, self.x.flat) + x.tofile(tmp_filename) + with io.open(tmp_filename, 'rb', buffering=0) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) - def test_largish_file(self): + def test_largish_file(self, tmp_filename): # check the fallocate path on files > 16MB d = np.zeros(4 * 1024 ** 2) - d.tofile(self.filename) - assert_equal(os.path.getsize(self.filename), d.nbytes) - assert_array_equal(d, np.fromfile(self.filename)) + d.tofile(tmp_filename) + assert_equal(os.path.getsize(tmp_filename), d.nbytes) + assert_array_equal(d, np.fromfile(tmp_filename)) # check offset - with open(self.filename, "r+b") as f: + with open(tmp_filename, "r+b") as f: f.seek(d.nbytes) d.tofile(f) - assert_equal(os.path.getsize(self.filename), d.nbytes * 2) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) # check append mode (gh-8329) - open(self.filename, "w").close() # delete file contents - with open(self.filename, "ab") as f: + open(tmp_filename, "w").close() # delete file contents + with open(tmp_filename, "ab") as f: d.tofile(f) - assert_array_equal(d, np.fromfile(self.filename)) - with open(self.filename, "ab") as f: + assert_array_equal(d, np.fromfile(tmp_filename)) + with open(tmp_filename, "ab") as f: d.tofile(f) - assert_equal(os.path.getsize(self.filename), d.nbytes * 2) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self): + def test_io_open_buffered_fromfile(self, x, tmp_filename): # gh-6632 - self.x.tofile(self.filename) - with io.open(self.filename, 'rb', buffering=-1) as f: - y = np.fromfile(f, dtype=self.dtype) - assert_array_equal(y, self.x.flat) + x.tofile(tmp_filename) + with io.open(tmp_filename, 'rb', buffering=-1) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) - def test_file_position_after_fromfile(self): + def test_file_position_after_fromfile(self, tmp_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: - with open(self.filename, 'wb') as f: + with open(tmp_filename, 'wb') as f: f.seek(size-1) f.write(b'\0') for mode in ['rb', 'r+b']: err_msg = "%d %s" % (size, mode) - with open(self.filename, mode) as f: + with open(tmp_filename, mode) as f: f.read(2) np.fromfile(f, dtype=np.float64, count=1) pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_file_position_after_tofile(self): + def test_file_position_after_tofile(self, tmp_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, @@ -4988,7 +4987,7 @@ def test_file_position_after_tofile(self): for size in sizes: err_msg = "%d" % (size,) - with open(self.filename, 'wb') as f: + with open(tmp_filename, 'wb') as f: f.seek(size-1) f.write(b'\0') f.seek(10) @@ -4997,58 +4996,62 @@ def test_file_position_after_tofile(self): pos = f.tell() assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) - with open(self.filename, 'r+b') as f: + with open(tmp_filename, 'r+b') as f: f.read(2) f.seek(0, 1) # seek between read&write required by ANSI C np.array([0], dtype=np.float64).tofile(f) pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_load_object_array_fromfile(self): + def test_load_object_array_fromfile(self, tmp_filename): # gh-12300 - with open(self.filename, 'w') as f: + with open(tmp_filename, 'w') as f: # Ensure we have a file with consistent contents pass - with open(self.filename, 'rb') as f: + with open(tmp_filename, 'rb') as f: assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, f, dtype=object) assert_raises_regex(ValueError, "Cannot read into object array", - np.fromfile, self.filename, dtype=object) - - def test_fromfile_offset(self): - with open(self.filename, 'wb') as f: - self.x.tofile(f) - - with open(self.filename, 'rb') as f: - y = np.fromfile(f, dtype=self.dtype, offset=0) - assert_array_equal(y, self.x.flat) - - with open(self.filename, 'rb') as f: - count_items = len(self.x.flat) // 8 - offset_items = len(self.x.flat) // 4 - offset_bytes = self.dtype.itemsize * offset_items - y = np.fromfile(f, dtype=self.dtype, count=count_items, offset=offset_bytes) - assert_array_equal(y, self.x.flat[offset_items:offset_items+count_items]) + np.fromfile, tmp_filename, dtype=object) + + def test_fromfile_offset(self, x, tmp_filename): + with open(tmp_filename, 'wb') as f: + x.tofile(f) + + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype, offset=0) + assert_array_equal(y, x.flat) + + with open(tmp_filename, 'rb') as f: + count_items = len(x.flat) // 8 + offset_items = len(x.flat) // 4 + offset_bytes = x.dtype.itemsize * offset_items + y = np.fromfile( + f, dtype=x.dtype, count=count_items, offset=offset_bytes + ) + assert_array_equal( + y, x.flat[offset_items:offset_items+count_items] + ) # subsequent seeks should stack - offset_bytes = self.dtype.itemsize - z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes) - assert_array_equal(z, self.x.flat[offset_items+count_items+1:]) + offset_bytes = x.dtype.itemsize + z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) + assert_array_equal(z, x.flat[offset_items+count_items+1:]) - with open(self.filename, 'wb') as f: - self.x.tofile(f, sep=",") + with open(tmp_filename, 'wb') as f: + x.tofile(f, sep=",") - with open(self.filename, 'rb') as f: + with open(tmp_filename, 'rb') as f: assert_raises_regex( TypeError, "'offset' argument only permitted for binary files", - np.fromfile, self.filename, dtype=self.dtype, + np.fromfile, tmp_filename, dtype=x.dtype, sep=",", offset=1) @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self): + def test_fromfile_bad_dup(self, x, tmp_filename): def dup_str(fd): return 'abc' @@ -5057,46 +5060,81 @@ def dup_bigint(fd): old_dup = os.dup try: - with open(self.filename, 'wb') as f: - self.x.tofile(f) + with open(tmp_filename, 'wb') as f: + x.tofile(f) for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): os.dup = dup assert_raises(exc, np.fromfile, f) finally: os.dup = old_dup - def _check_from(self, s, value, **kw): + def _check_from(self, s, value, filename, **kw): if 'sep' not in kw: y = np.frombuffer(s, **kw) else: y = np.fromstring(s, **kw) assert_array_equal(y, value) - with open(self.filename, 'wb') as f: + with open(filename, 'wb') as f: f.write(s) - y = np.fromfile(self.filename, **kw) + y = np.fromfile(filename, **kw) assert_array_equal(y, value) - def test_nan(self): + @pytest.fixture(params=["period", "comma"]) + def decimal_sep_localization(self, request): + """ + Including this fixture in a test will automatically + execute it with both types of decimal separator. + + So:: + + def test_decimal(decimal_sep_localization): + pass + + is equivalent to the following two tests:: + + def test_decimal_period_separator(): + pass + + def test_decimal_comma_separator(): + with CommaDecimalPointLocale(): + pass + """ + if request.param == "period": + yield + elif request.param == "comma": + with CommaDecimalPointLocale(): + yield + else: + assert False, request.param + + def test_nan(self, tmp_filename, decimal_sep_localization): self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + tmp_filename, sep=' ') - def test_inf(self): + def test_inf(self, tmp_filename, decimal_sep_localization): self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], + tmp_filename, sep=' ') - def test_numbers(self): - self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133", - [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') + def test_numbers(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"1.234 -1.234 .3 .3e55 -123133.1231e+133", + [1.234, -1.234, .3, .3e55, -123133.1231e+133], + tmp_filename, + sep=' ') - def test_binary(self): - self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', - np.array([1, 2, 3, 4]), - dtype=' 1 minute on mechanical hard drive def test_big_binary(self): @@ -5123,91 +5161,89 @@ def test_big_binary(self): except (MemoryError, ValueError): pass - def test_string(self): - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',') + def test_string(self, tmp_filename): + self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',') - def test_counted_string(self): - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') + def test_counted_string(self, tmp_filename, decimal_sep_localization): + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',') - def test_string_with_ws(self): - self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') + def test_string_with_ws(self, tmp_filename): + self._check_from( + b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ') - def test_counted_string_with_ws(self): - self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int, - sep=' ') + def test_counted_string_with_ws(self, tmp_filename): + self._check_from( + b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int, + sep=' ') - def test_ascii(self): - self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') + def test_ascii(self, tmp_filename, decimal_sep_localization): + self._check_from( + b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',') - def test_malformed(self): + def test_malformed(self, tmp_filename, decimal_sep_localization): with assert_warns(DeprecationWarning): - self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') + self._check_from( + b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ') - def test_long_sep(self): - self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') + def test_long_sep(self, tmp_filename): + self._check_from( + b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_') - def test_dtype(self): + def test_dtype(self, tmp_filename): v = np.array([1, 2, 3, 4], dtype=np.int_) - self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_) + self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_) - def test_dtype_bool(self): + def test_dtype_bool(self, tmp_filename): # can't use _check_from because fromstring can't handle True/False v = np.array([True, False, True, False], dtype=np.bool_) s = b'1,0,-2.3,0' - with open(self.filename, 'wb') as f: + with open(tmp_filename, 'wb') as f: f.write(s) - y = np.fromfile(self.filename, sep=',', dtype=np.bool_) + y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_) assert_(y.dtype == '?') assert_array_equal(y, v) - def test_tofile_sep(self): + def test_tofile_sep(self, tmp_filename, decimal_sep_localization): x = np.array([1.51, 2, 3.51, 4], dtype=float) - with open(self.filename, 'w') as f: + with open(tmp_filename, 'w') as f: x.tofile(f, sep=',') - with open(self.filename, 'r') as f: + with open(tmp_filename, 'r') as f: s = f.read() #assert_equal(s, '1.51,2.0,3.51,4.0') y = np.array([float(p) for p in s.split(',')]) assert_array_equal(x,y) - def test_tofile_format(self): + def test_tofile_format(self, tmp_filename, decimal_sep_localization): x = np.array([1.51, 2, 3.51, 4], dtype=float) - with open(self.filename, 'w') as f: + with open(tmp_filename, 'w') as f: x.tofile(f, sep=',', format='%.2f') - with open(self.filename, 'r') as f: + with open(tmp_filename, 'r') as f: s = f.read() assert_equal(s, '1.51,2.00,3.51,4.00') - def test_tofile_cleanup(self): + def test_tofile_cleanup(self, tmp_filename): x = np.zeros((10), dtype=object) - with open(self.filename, 'wb') as f: + with open(tmp_filename, 'wb') as f: assert_raises(IOError, lambda: x.tofile(f, sep='')) # Dup-ed file handle should be closed or remove will fail on Windows OS - os.remove(self.filename) + os.remove(tmp_filename) # Also make sure that we close the Python handle - assert_raises(IOError, lambda: x.tofile(self.filename)) - os.remove(self.filename) - - def test_locale(self): - with CommaDecimalPointLocale(): - self.test_numbers() - self.test_nan() - self.test_inf() - self.test_counted_string() - self.test_ascii() - self.test_malformed() - self.test_tofile_sep() - self.test_tofile_format() - - def test_fromfile_subarray_binary(self): + assert_raises(IOError, lambda: x.tofile(tmp_filename)) + os.remove(tmp_filename) + + def test_fromfile_subarray_binary(self, tmp_filename): # Test subarray dtypes which are absorbed into the shape x = np.arange(24, dtype="i4").reshape(2, 3, 4) - x.tofile(self.filename) - res = np.fromfile(self.filename, dtype="(3,4)i4") + x.tofile(tmp_filename) + res = np.fromfile(tmp_filename, dtype="(3,4)i4") assert_array_equal(x, res) x_str = x.tobytes() @@ -5216,21 +5252,21 @@ def test_fromfile_subarray_binary(self): res = np.fromstring(x_str, dtype="(3,4)i4") assert_array_equal(x, res) - def test_parsing_subarray_unsupported(self): + def test_parsing_subarray_unsupported(self, tmp_filename): # We currently do not support parsing subarray dtypes data = "12,42,13," * 50 with pytest.raises(ValueError): expected = np.fromstring(data, dtype="(3,)i", sep=",") - with open(self.filename, "w") as f: + with open(tmp_filename, "w") as f: f.write(data) with pytest.raises(ValueError): - np.fromfile(self.filename, dtype="(3,)i", sep=",") + np.fromfile(tmp_filename, dtype="(3,)i", sep=",") - def test_read_shorter_than_count_subarray(self): + def test_read_shorter_than_count_subarray(self, tmp_filename): # Test that requesting more values does not cause any problems - # in conjuction with subarray dimensions being absored into the + # in conjunction with subarray dimensions being absorbed into the # array dimension. expected = np.arange(511 * 10, dtype="i").reshape(-1, 10) @@ -5239,8 +5275,8 @@ def test_read_shorter_than_count_subarray(self): with pytest.warns(DeprecationWarning): np.fromstring(binary, dtype="(10,)i", count=10000) - expected.tofile(self.filename) - res = np.fromfile(self.filename, dtype="(10,)i", count=10000) + expected.tofile(tmp_filename) + res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000) assert_array_equal(res, expected) From afe51888ff107edbc71f54412c134bc9c2d67f1d Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 21:56:08 +0200 Subject: [PATCH 1148/1270] ENH: Add annotations for `np.DataSource` --- numpy/__init__.pyi | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b708b99ab7a7..ef127123778d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -558,13 +558,6 @@ __git_version__: str # # Placeholders for classes # TODO: Remove `__getattr__` once the classes are stubbed out -class DataSource: - def __init__(self, destpath: Any = ...) -> None: ... - def __del__(self): ... - def abspath(self, path): ... - def exists(self, path): ... - def open(self, path, mode=..., encoding=..., newline=...): ... - class MachAr: def __init__( self, @@ -3553,3 +3546,22 @@ class ndindex: def __init__(self, *shape: SupportsIndex) -> None: ... def __iter__(self: _T) -> _T: ... def __next__(self) -> _Shape: ... + +class DataSource: + def __init__( + self, + destpath: Union[None, str, os.PathLike[str]] = ..., + ) -> None: ... + def __del__(self) -> None: ... + def abspath(self, path: str) -> str: ... + def exists(self, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open( + self, + path: str, + mode: str = ..., + encoding: Optional[str] = ..., + newline: Optional[str] = ..., + ) -> IO[Any]: ... From 97031c1b82c6d86cfad9c3ecded3971402e6e8c9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 9 Dec 2020 13:15:22 -0600 Subject: [PATCH 1149/1270] MAINT: Implement new style promotion for `np.result_type`, etc. This implements a new style promotion scheme for mutiple inputs as are passed to `np.result_type`. It should be noted that result-type heavily relies on "value-based casting" logic/promotion logic. This fact is inherited to many places. When user DTypes are introduced some of these places should potentially use "weak DType" logic instead if the input is a Python scalar. This is *not* yet implemented. It would be necessary to mark these the same way as is currently done for `np.result_type`. This type of "marking" will generally be necessary in a few places, even if may get away with just switching the logic everywhere. --- numpy/core/code_generators/genapi.py | 1 + numpy/core/include/numpy/ndarraytypes.h | 14 + numpy/core/setup.py | 2 + .../src/multiarray/_multiarray_tests.c.src | 14 - numpy/core/src/multiarray/abstractdtypes.c | 168 ++++++- numpy/core/src/multiarray/abstractdtypes.h | 2 +- numpy/core/src/multiarray/array_coercion.c | 7 +- numpy/core/src/multiarray/array_coercion.h | 2 + numpy/core/src/multiarray/array_method.c | 1 + numpy/core/src/multiarray/common_dtype.c | 318 +++++++++++++ numpy/core/src/multiarray/common_dtype.h | 17 + numpy/core/src/multiarray/convert_datatype.c | 438 +++++++++++++----- numpy/core/src/multiarray/convert_datatype.h | 8 +- numpy/core/src/multiarray/dtypemeta.c | 9 + .../multiarray/legacy_dtype_implementation.c | 183 +------- .../multiarray/legacy_dtype_implementation.h | 32 -- numpy/core/src/multiarray/methods.c | 66 +-- numpy/core/src/multiarray/multiarraymodule.c | 13 +- numpy/core/src/multiarray/number.c | 3 + numpy/core/tests/test_casting_unittests.py | 23 +- 20 files changed, 902 insertions(+), 419 deletions(-) create mode 100644 numpy/core/src/multiarray/common_dtype.c create mode 100644 numpy/core/src/multiarray/common_dtype.h diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 4a4f3b6e7649..c2458c2b5d80 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -31,6 +31,7 @@ join('multiarray', 'arraytypes.c.src'), join('multiarray', 'buffer.c'), join('multiarray', 'calculation.c'), + join('multiarray', 'common_dtype.c'), join('multiarray', 'conversion_utils.c'), join('multiarray', 'convert.c'), join('multiarray', 'convert_datatype.c'), diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index dacb720226bb..9ff8c9831952 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -856,6 +856,17 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); */ #define NPY_ARRAY_ENSUREARRAY 0x0040 +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * Dual use the ENSUREARRAY flag, to indicate that this was a converted + * python float, int, or complex. + * An array using this flag must be a temporary array that can never + * leave the C internals of NumPy. Even if it does, ENSUREARRAY is + * absolutely safe to abuse, since it alraedy is be a base class array :). + */ + #define _NPY_ARRAY_WAS_PYSCALAR 0x0040 +#endif /* NPY_INTERNAL_BUILD */ + /* * Make sure that the strides are in units of the element size Needed * for some operations with record-arrays. @@ -1867,6 +1878,8 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls); typedef PyArray_DTypeMeta *(common_dtype_function)( PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtyep2); + typedef PyArray_DTypeMeta *(common_dtype_with_value_function)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtyep2, PyObject *value); typedef PyArray_Descr *(common_instance_function)( PyArray_Descr *dtype1, PyArray_Descr *dtyep2); @@ -1925,6 +1938,7 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, is_known_scalar_type_function *is_known_scalar_type; default_descr_function *default_descr; common_dtype_function *common_dtype; + common_dtype_with_value_function *common_dtype_with_value; common_instance_function *common_instance; /* * The casting implementation (ArrayMethod) to convert between two diff --git a/numpy/core/setup.py b/numpy/core/setup.py index d1229ee8f143..2af2426dd245 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -787,6 +787,7 @@ def get_mathlib_info(*args): join('src', 'multiarray', 'npy_buffer.h'), join('src', 'multiarray', 'calculation.h'), join('src', 'multiarray', 'common.h'), + join('src', 'multiarray', 'common_dtype.h'), join('src', 'multiarray', 'convert_datatype.h'), join('src', 'multiarray', 'convert.h'), join('src', 'multiarray', 'conversion_utils.h'), @@ -849,6 +850,7 @@ def get_mathlib_info(*args): join('src', 'multiarray', 'calculation.c'), join('src', 'multiarray', 'compiled_base.c'), join('src', 'multiarray', 'common.c'), + join('src', 'multiarray', 'common_dtype.c'), join('src', 'multiarray', 'convert.c'), join('src', 'multiarray', 'convert_datatype.c'), join('src', 'multiarray', 'conversion_utils.c'), diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index febcc8512750..859d20b516e1 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -2140,17 +2140,6 @@ getset_numericops(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args)) } -static PyObject * -uses_new_casts(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args)) -{ -#if NPY_USE_NEW_CASTINGIMPL - Py_RETURN_TRUE; -#else - Py_RETURN_FALSE; -#endif -} - - static PyObject * run_byteorder_converter(PyObject* NPY_UNUSED(self), PyObject *args) { @@ -2407,9 +2396,6 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"getset_numericops", getset_numericops, METH_NOARGS, NULL}, - {"uses_new_casts", - uses_new_casts, - METH_NOARGS, NULL}, /**begin repeat * #name = cabs, carg# */ diff --git a/numpy/core/src/multiarray/abstractdtypes.c b/numpy/core/src/multiarray/abstractdtypes.c index 02c0eac53d7b..5fff1e6e442c 100644 --- a/numpy/core/src/multiarray/abstractdtypes.c +++ b/numpy/core/src/multiarray/abstractdtypes.c @@ -13,6 +13,12 @@ #include "common.h" +static NPY_INLINE PyArray_Descr * +int_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls)) +{ + return PyArray_DescrFromType(NPY_LONG); +} + static PyArray_Descr * discover_descriptor_from_pyint( PyArray_DTypeMeta *NPY_UNUSED(cls), PyObject *obj) @@ -45,6 +51,13 @@ discover_descriptor_from_pyint( } +static NPY_INLINE PyArray_Descr * +float_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls)) +{ + return PyArray_DescrFromType(NPY_DOUBLE); +} + + static PyArray_Descr* discover_descriptor_from_pyfloat( PyArray_DTypeMeta* NPY_UNUSED(cls), PyObject *obj) @@ -53,6 +66,11 @@ discover_descriptor_from_pyfloat( return PyArray_DescrFromType(NPY_DOUBLE); } +static NPY_INLINE PyArray_Descr * +complex_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls)) +{ + return PyArray_DescrFromType(NPY_CDOUBLE); +} static PyArray_Descr* discover_descriptor_from_pycomplex( @@ -66,21 +84,17 @@ discover_descriptor_from_pycomplex( NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes() { - PyArrayAbstractObjDTypeMeta_Type.tp_base = &PyArrayDTypeMeta_Type; - if (PyType_Ready(&PyArrayAbstractObjDTypeMeta_Type) < 0) { - return -1; - } - ((PyTypeObject *)&PyArray_PyIntAbstractDType)->tp_base = &PyArrayDTypeMeta_Type; + ((PyTypeObject *)&PyArray_PyIntAbstractDType)->tp_base = &PyArrayDescr_Type; PyArray_PyIntAbstractDType.scalar_type = &PyLong_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyIntAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyFloatAbstractDType)->tp_base = &PyArrayDTypeMeta_Type; + ((PyTypeObject *)&PyArray_PyFloatAbstractDType)->tp_base = &PyArrayDescr_Type; PyArray_PyFloatAbstractDType.scalar_type = &PyFloat_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyComplexAbstractDType)->tp_base = &PyArrayDTypeMeta_Type; + ((PyTypeObject *)&PyArray_PyComplexAbstractDType)->tp_base = &PyArrayDescr_Type; PyArray_PyComplexAbstractDType.scalar_type = &PyComplex_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexAbstractDType) < 0) { return -1; @@ -126,43 +140,147 @@ initialize_and_map_pytypes_to_dtypes() } +/* + * The following functions define the "common DType" for the abstract dtypes. + * + * Note that the logic with resupt to the "higher" dtypes such as floats + * could likely be more logically defined for them, but since NumPy dtypes + * largely "know" each other, that is not necessary. + */ +static PyArray_DTypeMeta * +int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other) +{ + if (other->legacy && other->type_num < NPY_NTYPES) { + if (other->type_num == NPY_BOOL) { + /* Use the default integer for bools: */ + return PyArray_DTypeFromTypeNum(NPY_LONG); + } + else if (PyTypeNum_ISNUMBER(other->type_num) || + other->type_num == NPY_TIMEDELTA) { + /* All other numeric types (ant timdelta) are preserved: */ + Py_INCREF(other); + return other; + } + } + else if (other->legacy) { + /* This is a back-compat fallback to usually do the right thing... */ + return PyArray_DTypeFromTypeNum(NPY_UINT8); + } + Py_INCREF(Py_NotImplemented); + return (PyArray_DTypeMeta *)Py_NotImplemented; +} -/* Note: This is currently largely not used, but will be required eventually. */ -NPY_NO_EXPORT PyTypeObject PyArrayAbstractObjDTypeMeta_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - .tp_name = "numpy._AbstractObjDTypeMeta", - .tp_basicsize = sizeof(PyArray_DTypeMeta), - .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_doc = "Helper MetaClass for value based casting AbstractDTypes.", -}; +static PyArray_DTypeMeta * +float_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) +{ + if (other->legacy && other->type_num < NPY_NTYPES) { + if (other->type_num == NPY_BOOL || PyTypeNum_ISINTEGER(other->type_num)) { + /* Use the default integer for bools and ints: */ + return PyArray_DTypeFromTypeNum(NPY_DOUBLE); + } + else if (PyTypeNum_ISNUMBER(other->type_num)) { + /* All other numeric types (float+complex) are preserved: */ + Py_INCREF(other); + return other; + } + } + else if (other == &PyArray_PyIntAbstractDType) { + Py_INCREF(cls); + return cls; + } + else if (other->legacy) { + /* This is a back-compat fallback to usually do the right thing... */ + return PyArray_DTypeFromTypeNum(NPY_HALF); + } + Py_INCREF(Py_NotImplemented); + return (PyArray_DTypeMeta *)Py_NotImplemented; +} + + +static PyArray_DTypeMeta * +complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) +{ + if (other->legacy && other->type_num < NPY_NTYPES) { + if (other->type_num == NPY_BOOL || + PyTypeNum_ISINTEGER(other->type_num)) { + /* Use the default integer for bools and ints: */ + return PyArray_DTypeFromTypeNum(NPY_CDOUBLE); + } + else if (PyTypeNum_ISFLOAT(other->type_num)) { + /* + * For floats we choose the equivalent precision complex, although + * there is no CHALF, so half also goes to CFLOAT. + */ + if (other->type_num == NPY_HALF || other->type_num == NPY_FLOAT) { + return PyArray_DTypeFromTypeNum(NPY_CFLOAT); + } + if (other->type_num == NPY_DOUBLE) { + return PyArray_DTypeFromTypeNum(NPY_CDOUBLE); + } + assert(other->type_num == NPY_LONGDOUBLE); + return PyArray_DTypeFromTypeNum(NPY_CLONGDOUBLE); + } + else if (PyTypeNum_ISCOMPLEX(other->type_num)) { + /* All other numeric types are preserved: */ + Py_INCREF(other); + return other; + } + } + else if (other->legacy) { + /* This is a back-compat fallback to usually do the right thing... */ + return PyArray_DTypeFromTypeNum(NPY_CFLOAT); + } + else if (other == &PyArray_PyIntAbstractDType || + other == &PyArray_PyFloatAbstractDType) { + Py_INCREF(cls); + return cls; + } + Py_INCREF(Py_NotImplemented); + return (PyArray_DTypeMeta *)Py_NotImplemented; +} + + +/* + * TODO: These abstract DTypes also carry the dual role of representing + * `Floating`, `Complex`, and `Integer` (both signed and unsigned). + * They will have to be renamed and exposed in that capacity. + */ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ - PyVarObject_HEAD_INIT(&PyArrayAbstractObjDTypeMeta_Type, 0) - .tp_basicsize = sizeof(PyArray_DTypeMeta), - .tp_name = "numpy._PyIntBaseAbstractDType", + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_name = "numpy._IntegerAbstractDType", },}, .abstract = 1, + .default_descr = int_default_descriptor, .discover_descr_from_pyobject = discover_descriptor_from_pyint, + .common_dtype = int_common_dtype, .kind = 'i', }; NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ - PyVarObject_HEAD_INIT(&PyArrayAbstractObjDTypeMeta_Type, 0) - .tp_basicsize = sizeof(PyArray_DTypeMeta), - .tp_name = "numpy._PyFloatBaseAbstractDType", + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_name = "numpy._FloatAbstractDType", },}, .abstract = 1, + .default_descr = float_default_descriptor, .discover_descr_from_pyobject = discover_descriptor_from_pyfloat, + .common_dtype = float_common_dtype, .kind = 'f', }; NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ - PyVarObject_HEAD_INIT(&PyArrayAbstractObjDTypeMeta_Type, 0) - .tp_basicsize = sizeof(PyArray_DTypeMeta), - .tp_name = "numpy._PyComplexBaseAbstractDType", + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_name = "numpy._ComplexAbstractDType", },}, .abstract = 1, + .default_descr = complex_default_descriptor, .discover_descr_from_pyobject = discover_descriptor_from_pycomplex, + .common_dtype = complex_common_dtype, .kind = 'c', }; - diff --git a/numpy/core/src/multiarray/abstractdtypes.h b/numpy/core/src/multiarray/abstractdtypes.h index 3a982cd38a0e..a6c526717032 100644 --- a/numpy/core/src/multiarray/abstractdtypes.h +++ b/numpy/core/src/multiarray/abstractdtypes.h @@ -3,12 +3,12 @@ #include "dtypemeta.h" + /* * These are mainly needed for value based promotion in ufuncs. It * may be necessary to make them (partially) public, to allow user-defined * dtypes to perform value based casting. */ -NPY_NO_EXPORT extern PyTypeObject PyArrayAbstractObjDTypeMeta_Type; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyIntAbstractDType; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatAbstractDType; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexAbstractDType; diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index ef99ae479bcf..0c4195d51f4b 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -11,6 +11,7 @@ #include "descriptor.h" #include "convert_datatype.h" +#include "common_dtype.h" #include "dtypemeta.h" #include "array_coercion.h" @@ -204,8 +205,8 @@ _PyArray_MapPyTypeToDType( * @param pytype Python Type to look up * @return DType, None if it a known non-scalar, or NULL if an unknown object. */ -static NPY_INLINE PyArray_DTypeMeta * -discover_dtype_from_pytype(PyTypeObject *pytype) +NPY_NO_EXPORT NPY_INLINE PyArray_DTypeMeta * +npy_discover_dtype_from_pytype(PyTypeObject *pytype) { PyObject *DType; @@ -263,7 +264,7 @@ discover_dtype_from_pyobject( } } - PyArray_DTypeMeta *DType = discover_dtype_from_pytype(Py_TYPE(obj)); + PyArray_DTypeMeta *DType = npy_discover_dtype_from_pytype(Py_TYPE(obj)); if (DType != NULL) { return DType; } diff --git a/numpy/core/src/multiarray/array_coercion.h b/numpy/core/src/multiarray/array_coercion.h index 90ce0355a11c..d4ebeaf14ef9 100644 --- a/numpy/core/src/multiarray/array_coercion.h +++ b/numpy/core/src/multiarray/array_coercion.h @@ -15,6 +15,8 @@ typedef struct coercion_cache_obj { int depth; /* the dimension at which this object was found. */ } coercion_cache_obj; +NPY_NO_EXPORT PyArray_DTypeMeta * +npy_discover_dtype_from_pytype(PyTypeObject *pytype); NPY_NO_EXPORT int _PyArray_MapPyTypeToDType( diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c index 2cc075141c1c..e13da12de5f9 100644 --- a/numpy/core/src/multiarray/array_method.c +++ b/numpy/core/src/multiarray/array_method.c @@ -34,6 +34,7 @@ #include "arrayobject.h" #include "array_method.h" #include "dtypemeta.h" +#include "common_dtype.h" #include "convert_datatype.h" diff --git a/numpy/core/src/multiarray/common_dtype.c b/numpy/core/src/multiarray/common_dtype.c new file mode 100644 index 000000000000..0db368df1e1f --- /dev/null +++ b/numpy/core/src/multiarray/common_dtype.c @@ -0,0 +1,318 @@ +#define PY_SSIZE_T_CLEAN +#include + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#include +#include "numpy/arrayobject.h" + +#include "common_dtype.h" +#include "dtypemeta.h" +#include "abstractdtypes.h" + + +/* + * This file defines all logic necessary for generic "common dtype" + * operations. This is unfortunately surprisingly complicated to get right + * due to the value based logic NumPy uses and the fact that NumPy has + * no clear (non-transitive) type promotion hierarchy. + * Unlike most languages `int32 + float2 -> float64` instead of `float2`. + * The other complicated thing is value-based-promotion, which means that + * in many cases a Python 1, may end up as an `int8` or `uint8`. + * + * This file implements the necessary logic so that `np.result_type(...)` + * can give the correct result for any order of inputs and can further + * generalize to user DTypes. + */ + + +/** + * This function defines the common DType operator. + * + * Note that the common DType will not be "object" (unless one of the dtypes + * is object), even though object can technically represent all values + * correctly. + * + * TODO: Before exposure, we should review the return value (e.g. no error + * when no common DType is found). + * + * @param dtype1 DType class to find the common type for. + * @param dtype2 Second DType class. + * @return The common DType or NULL with an error set + */ +NPY_NO_EXPORT NPY_INLINE PyArray_DTypeMeta * +PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) +{ + if (dtype1 == dtype2) { + Py_INCREF(dtype1); + return dtype1; + } + + PyArray_DTypeMeta *common_dtype; + + common_dtype = dtype1->common_dtype(dtype1, dtype2); + if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { + Py_DECREF(common_dtype); + common_dtype = dtype2->common_dtype(dtype2, dtype1); + } + if (common_dtype == NULL) { + return NULL; + } + if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { + Py_DECREF(Py_NotImplemented); + PyErr_Format(PyExc_TypeError, + "The DTypes %S and %S do not have a common DType. " + "For example they cannot be stored in a single array unless " + "the dtype is `object`.", dtype1, dtype2); + return NULL; + } + return common_dtype; +} + + +/** + * This function takes a list of dtypes and "reduces" them (in a sense, + * it finds the maximal dtype). Note that "maximum" here is defined by + * knowledge (or category or domain). A user DType must always "know" + * about all NumPy dtypes, floats "know" about integers, integers "know" + * about unsigned integers. + * + * c + * / \ + * a \ <-- The actual promote(a, b) may be c or unknown. + * / \ \ + * a b c + * + * The reduction is done "pairwise". In the above `a.__common_dtype__(b)` + * has a result (so `a` knows more) and `a.__common_dtype__(c)` returns + * NotImplemented (so `c` knows more). You may notice that the result + * `res = a.__common_dtype__(b)` is not important. We could try to use it + * to remove the whole branch if `res is c` or by checking if + * `c.__common_dtype(res) is c`. + * Right now, we only clear initial elements in the most simple case where + * `a.__common_dtype(b) is a` (and thus `b` cannot alter the end-result). + * Clearing means, we do not have to worry about them later. + * + * There is one further subtlety. If we have an abstract DType and a + * non-abstract one, we "prioritize" the non-abstract DType here. + * In this sense "prioritizing" means that we use: + * abstract.__common_dtype__(other) + * If both return NotImplemented (which is acceptable and even expected in + * this case, see later) then `other` will be considered to know more. + * + * The reason why this may be acceptable for abstract DTypes, is that + * the value-dependent abstract DTypes may provide default fall-backs. + * The priority inversion effectively means that abstract DTypes are ordered + * just below their concrete counterparts. + * (This fall-back is convenient but not perfect, it can lead to + * non-minimal promotions: e.g. `np.uint24 + 2**20 -> int32`. And such + * cases may also be possible in some mixed type scenarios; they can be + * avoided by defining the promotion explicitly in the user DType.) + * + * @param length Number of DTypes + * @param dtypes + */ +static PyArray_DTypeMeta * +reduce_dtypes_to_most_knowledgeable( + npy_intp length, PyArray_DTypeMeta **dtypes) +{ + assert(length >= 2); + npy_intp half = length / 2; + + PyArray_DTypeMeta *res = NULL; + + for (npy_intp low = 0; low < half; low++) { + npy_intp high = length - 1 - low; + if (dtypes[high] == dtypes[low]) { + Py_INCREF(dtypes[low]); + Py_XSETREF(res, dtypes[low]); + } + else { + if (dtypes[high]->abstract) { + /* + * Priority inversion, start with abstract, because if it + * returns `other`, we can let other pass instead. + */ + PyArray_DTypeMeta *tmp = dtypes[low]; + dtypes[low] = dtypes[high]; + dtypes[high] = tmp; + } + + Py_XSETREF(res, dtypes[low]->common_dtype(dtypes[low], dtypes[high])); + if (res == NULL) { + return NULL; + } + } + + if (res == (PyArray_DTypeMeta *)Py_NotImplemented) { + PyArray_DTypeMeta *tmp = dtypes[low]; + dtypes[low] = dtypes[high]; + dtypes[high] = tmp; + } + if (res == dtypes[low]) { + /* `dtypes[high]` cannot influence the final result, so clear: */ + dtypes[high] = NULL; + } + } + + if (length == 2) { + return res; + } + Py_DECREF(res); + return reduce_dtypes_to_most_knowledgeable(length - half, dtypes); +} + + +/** + * Promotes a list of DTypes with each other in a way that should guarantee + * stable results even when changing the order. + * + * In general this approach always works as long as the most generic dtype + * is either strictly larger, or compatible with all other dtypes. + * For example promoting float16 with any other float, integer, or unsigned + * integer again gives a floating point number. And any floating point number + * promotes in the "same way" as `float16`. + * If a user inserts more than one type into the NumPy type hierarchy, this + * can break. Given: + * uint24 + int32 -> int48 # Promotes to a *new* dtype! + * + * The following becomes problematic (order does not matter): + * uint24 + int16 + uint32 -> int64 + * <== (uint24 + int16) + (uint24 + uint32) -> int64 + * <== int32 + uint32 -> int64 + * + * It is impossible to achieve an `int48` result in the above. + * + * This is probably only resolvable by asking `uint24` to take over the + * whole reduction step; which we currently do not do. + * (It may be possible to notice the last up-cast and implement use something + * like: `uint24.nextafter(int32).__common_dtype__(uint32)`, but that seems + * even harder to grasp.) + * + * Note that a case where two dtypes are mixed (and know nothing about each + * other) will always generate an error: + * uint24 + int48 + int64 -> Error + * + * Even though `int64` is a safe solution, since `uint24 + int64 -> int64` and + * `int48 + int64 -> int64` and `int64` and there cannot be a smaller solution. + * + * //TODO: Maybe this function should allow not setting an error? + * + * @param length Number of dtypes (and values) must be at least 1 + * @param dtypes The concrete or abstract DTypes to promote + * @return NULL or the promoted DType. + */ +NPY_NO_EXPORT PyArray_DTypeMeta * +PyArray_PromoteDTypeSequence( + npy_intp length, PyArray_DTypeMeta **dtypes_in) +{ + if (length == 1) { + Py_INCREF(dtypes_in[0]); + return dtypes_in[0]; + } + PyArray_DTypeMeta *result = NULL; + + /* Copy dtypes so that we can reorder them (only allocate when many) */ + PyObject *_scratch_stack[NPY_MAXARGS]; + PyObject **_scratch_heap = NULL; + PyArray_DTypeMeta **dtypes = (PyArray_DTypeMeta **)_scratch_stack; + + if (length > NPY_MAXARGS) { + _scratch_heap = PyMem_Malloc(length * sizeof(PyObject *)); + if (_scratch_heap == NULL) { + PyErr_NoMemory(); + return NULL; + } + dtypes = (PyArray_DTypeMeta **)_scratch_heap; + } + + memcpy(dtypes, dtypes_in, length * sizeof(PyObject *)); + + /* + * `result` is the last promotion result, which can usually be reused if + * it is not NotImplemneted. + * The passed in dtypes are partially sorted (and cleared, when clearly + * not relevant anymore). + * `dtypes[0]` will be the most knowledgeable (highest category) which + * we consider the "main_dtype" here. + */ + result = reduce_dtypes_to_most_knowledgeable(length, dtypes); + if (result == NULL) { + goto finish; + } + PyArray_DTypeMeta *main_dtype = dtypes[0]; + + npy_intp reduce_start = 1; + if (result == (PyArray_DTypeMeta *)Py_NotImplemented) { + Py_SETREF(result, NULL); + } + else { + /* (new) first value is already taken care of in `result` */ + reduce_start = 2; + } + /* + * At this point, we have only looked at every DType at most once. + * The `main_dtype` must know all others (or it will be a failure) and + * all dtypes returned by its `common_dtype` must be guaranteed to succeed + * promotion with one another. + * It is the job of the "main DType" to ensure that at this point order + * is irrelevant. + * If this turns out to be a limitation, this "reduction" will have to + * become a default version and we have to allow DTypes to override it. + */ + PyArray_DTypeMeta *prev = NULL; + for (npy_intp i = reduce_start; i < length; i++) { + if (dtypes[i] == NULL || dtypes[i] == prev) { + continue; + } + /* + * "Promote" the current dtype with the main one (which should be + * a higher category). We assume that the result is not in a lower + * category. + */ + PyArray_DTypeMeta *promotion = main_dtype->common_dtype( + main_dtype, dtypes[i]); + if (promotion == NULL) { + Py_XSETREF(result, NULL); + goto finish; + } + else if ((PyObject *)promotion == Py_NotImplemented) { + Py_DECREF(Py_NotImplemented); + Py_XSETREF(result, NULL); + PyObject *dtypes_in_tuple = PyTuple_New(length); + if (dtypes_in_tuple == NULL) { + goto finish; + } + for (npy_intp l=0; l < length; l++) { + Py_INCREF(dtypes_in[l]); + PyTuple_SET_ITEM(dtypes_in_tuple, l, (PyObject *)dtypes_in[l]); + } + PyErr_Format(PyExc_TypeError, + "The DType %S could not be promoted by %S. This means that " + "no common DType exists for the given inputs. " + "For example they cannot be stored in a single array unless " + "the dtype is `object`. The full list of DTypes is: %S", + dtypes[i], main_dtype, dtypes_in_tuple); + Py_DECREF(dtypes_in_tuple); + goto finish; + } + if (result == NULL) { + result = promotion; + continue; + } + + /* + * The above promoted, now "reduce" with the current result; note that + * in the typical cases we expect this step to be a no-op. + */ + Py_SETREF(result, PyArray_CommonDType(result, promotion)); + Py_DECREF(promotion); + if (result == NULL) { + goto finish; + } + } + + finish: + PyMem_Free(_scratch_heap); + return result; +} diff --git a/numpy/core/src/multiarray/common_dtype.h b/numpy/core/src/multiarray/common_dtype.h new file mode 100644 index 000000000000..b3666531a532 --- /dev/null +++ b/numpy/core/src/multiarray/common_dtype.h @@ -0,0 +1,17 @@ +#ifndef _NPY_COMMON_DTYPE_H_ +#define _NPY_COMMON_DTYPE_H_ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include +#include "dtypemeta.h" + +NPY_NO_EXPORT PyArray_DTypeMeta * +PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2); + +NPY_NO_EXPORT PyArray_DTypeMeta * +PyArray_PromoteDTypeSequence( + npy_intp length, PyArray_DTypeMeta **dtypes_in); + +#endif /* _NPY_COMMON_DTYPE_H_ */ diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 8dca184cb52e..01ee56d1681d 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -17,10 +17,12 @@ #include "common.h" #include "ctors.h" #include "dtypemeta.h" +#include "common_dtype.h" #include "scalartypes.h" #include "mapping.h" #include "legacy_dtype_implementation.h" +#include "abstractdtypes.h" #include "convert_datatype.h" #include "_datetime.h" #include "datetime_strings.h" @@ -98,7 +100,7 @@ PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) else if (from->type_num < NPY_NTYPES && to->type_num < NPY_NTYPES) { /* All builtin dtypes have their casts explicitly defined. */ PyErr_Format(PyExc_RuntimeError, - "builtin cast from %S to %s not found, this should not " + "builtin cast from %S to %S not found, this should not " "be possible.", from, to); return NULL; } @@ -431,7 +433,24 @@ PyArray_GetCastSafety( NPY_NO_EXPORT int PyArray_CanCastSafely(int fromtype, int totype) { -#if NPY_USE_NEW_CASTINGIMPL + /* Identity */ + if (fromtype == totype) { + return 1; + } + /* + * As a micro-optimization, keep the cast table around. This can probably + * be removed as soon as the ufunc loop lookup is modified (presumably + * before the 1.21 release). It does no harm, but the main user of this + * function is the ufunc-loop lookup calling it until a loop matches! + * + * (The table extends further, but is not strictly correct for void). + * TODO: Check this! + */ + if ((unsigned int)fromtype <= NPY_CLONGDOUBLE && + (unsigned int)totype <= NPY_CLONGDOUBLE) { + return _npy_can_cast_safely_table[fromtype][totype]; + } + PyArray_DTypeMeta *from = PyArray_DTypeFromTypeNum(fromtype); if (from == NULL) { PyErr_WriteUnraisable(NULL); @@ -458,9 +477,6 @@ PyArray_CanCastSafely(int fromtype, int totype) int res = PyArray_MinCastSafety(safety, NPY_SAFE_CASTING) == NPY_SAFE_CASTING; Py_DECREF(castingimpl); return res; -#else - return PyArray_LegacyCanCastSafely(fromtype, totype); -#endif } @@ -474,11 +490,7 @@ PyArray_CanCastSafely(int fromtype, int totype) NPY_NO_EXPORT npy_bool PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) { -#if NPY_USE_NEW_CASTINGIMPL return PyArray_CanCastTypeTo(from, to, NPY_SAFE_CASTING); -#else - return PyArray_LegacyCanCastTo(from, to); -#endif } @@ -553,7 +565,6 @@ NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, NPY_CASTING casting) { -#if NPY_USE_NEW_CASTINGIMPL /* * NOTE: This code supports U and S, this is identical to the code * in `ctors.c` which does not allow these dtypes to be attached @@ -580,9 +591,6 @@ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, } /* If casting is the smaller (or equal) safety we match */ return PyArray_MinCastSafety(safety, casting) == casting; -#else - return PyArray_LegacyCanCastTypeTo(from, to, casting); -#endif } @@ -590,18 +598,15 @@ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, static int min_scalar_type_num(char *valueptr, int type_num, int *is_small_unsigned); + +/* + * NOTE: This function uses value based casting logic for scalars. It will + * require updates when we phase out value-based-casting. + */ NPY_NO_EXPORT npy_bool can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, PyArray_Descr *to, NPY_CASTING casting) { - int swap; - int is_small_unsigned = 0, type_num; - npy_bool ret; - PyArray_Descr *dtype; - - /* An aligned memory buffer large enough to hold any type */ - npy_longlong value[4]; - /* * If the two dtypes are actually references to the same object * or if casting type is forced unsafe then always OK. @@ -610,16 +615,42 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, return 1; } + /* NOTE: This is roughly the same code as `PyArray_CanCastTypeTo`: */ + NPY_CASTING safety; + if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) { + safety = PyArray_GetCastSafety(scal_type, NULL, NPY_DTYPE(to)); + } + else { + safety = PyArray_GetCastSafety(scal_type, to, NPY_DTYPE(to)); + } + if (safety < 0) { + PyErr_Clear(); + return 0; + } + safety = PyArray_MinCastSafety(safety, casting); + if (safety == casting) { + /* This is definitely a valid cast. */ + return 1; + } + /* - * If the scalar isn't a number, or the rule is stricter than - * NPY_SAFE_CASTING, use the straight type-based rules + * If the scalar isn't a number, value-based casting cannot kick in and + * we must not attempt it. + * (Additional fast-checks would be possible, but probably unnecessary.) */ - if (!PyTypeNum_ISNUMBER(scal_type->type_num) || - casting < NPY_SAFE_CASTING) { - return PyArray_CanCastTypeTo(scal_type, to, casting); + if (!PyTypeNum_ISNUMBER(scal_type->type_num)) { + return 0; } - swap = !PyArray_ISNBO(scal_type->byteorder); + /* + * At this point we have to check value-based casting. + */ + PyArray_Descr *dtype; + int is_small_unsigned = 0, type_num; + /* An aligned memory buffer large enough to hold any builtin numeric type */ + npy_longlong value[4]; + + int swap = !PyArray_ISNBO(scal_type->byteorder); scal_type->f->copyswap(&value, scal_data, swap, NULL); type_num = min_scalar_type_num((char *)&value, scal_type->type_num, @@ -645,7 +676,7 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, PyObject_Print(to, stdout, 0); printf("\n"); #endif - ret = PyArray_CanCastTypeTo(dtype, to, casting); + npy_bool ret = PyArray_CanCastTypeTo(dtype, to, casting); Py_DECREF(dtype); return ret; } @@ -842,7 +873,6 @@ PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType) return descr; } -#if NPY_USE_NEW_CASTINGIMPL PyObject *tmp = PyArray_GetCastingImpl(NPY_DTYPE(descr), given_DType); if (tmp == NULL || tmp == Py_None) { Py_XDECREF(tmp); @@ -865,23 +895,10 @@ PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType) error:; /* (; due to compiler limitations) */ PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; PyErr_Fetch(&err_type, &err_value, &err_traceback); - PyErr_Format(PyExc_ValueError, + PyErr_Format(PyExc_TypeError, "cannot cast dtype %S to %S.", descr, given_DType); - npy_PyErr_ChainExceptions(err_type, err_value, err_traceback); + npy_PyErr_ChainExceptionsCause(err_type, err_value, err_traceback); return NULL; - -#else /* NPY_USE_NEW_CASTS */ - if (!given_DType->legacy) { - PyErr_SetString(PyExc_NotImplementedError, - "Must use casting to find the correct DType for a parametric " - "user DType. This is not yet implemented (this error should be " - "unreachable)."); - return NULL; - } - - PyArray_Descr *flex_dtype = PyArray_DescrNew(given_DType->singleton); - return PyArray_AdaptFlexibleDType(descr, flex_dtype); -#endif /* NPY_USE_NEW_CASTS */ } @@ -901,7 +918,7 @@ PyArray_FindConcatenationDescriptor( npy_intp n, PyArrayObject **arrays, PyObject *requested_dtype) { if (requested_dtype == NULL) { - return PyArray_ResultType(n, arrays, 0, NULL); + return PyArray_LegacyResultType(n, arrays, 0, NULL); } PyArray_DTypeMeta *common_dtype; @@ -921,16 +938,16 @@ PyArray_FindConcatenationDescriptor( goto finish; } assert(n > 0); /* concatenate requires at least one array input. */ + + /* + * NOTE: This code duplicates `PyArray_CastToDTypeAndPromoteDescriptors` + * to use arrays, copying the descriptors seems not better. + */ PyArray_Descr *descr = PyArray_DESCR(arrays[0]); result = PyArray_CastDescrToDType(descr, common_dtype); if (result == NULL || n == 1) { goto finish; } - /* - * This could short-cut a bit, calling `common_instance` directly and/or - * returning the `default_descr()` directly. Avoiding that (for now) as - * it would duplicate code from `PyArray_PromoteTypes`. - */ for (npy_intp i = 1; i < n; i++) { descr = PyArray_DESCR(arrays[i]); PyArray_Descr *curr = PyArray_CastDescrToDType(descr, common_dtype); @@ -938,7 +955,7 @@ PyArray_FindConcatenationDescriptor( Py_SETREF(result, NULL); goto finish; } - Py_SETREF(result, PyArray_PromoteTypes(result, curr)); + Py_SETREF(result, common_dtype->common_instance(result, curr)); Py_DECREF(curr); if (result == NULL) { goto finish; @@ -951,50 +968,6 @@ PyArray_FindConcatenationDescriptor( } -/** - * This function defines the common DType operator. - * - * Note that the common DType will not be "object" (unless one of the dtypes - * is object), even though object can technically represent all values - * correctly. - * - * TODO: Before exposure, we should review the return value (e.g. no error - * when no common DType is found). - * - * @param dtype1 DType class to find the common type for. - * @param dtype2 Second DType class. - * @return The common DType or NULL with an error set - */ -NPY_NO_EXPORT PyArray_DTypeMeta * -PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) -{ - if (dtype1 == dtype2) { - Py_INCREF(dtype1); - return dtype1; - } - - PyArray_DTypeMeta *common_dtype; - - common_dtype = dtype1->common_dtype(dtype1, dtype2); - if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { - Py_DECREF(common_dtype); - common_dtype = dtype2->common_dtype(dtype2, dtype1); - } - if (common_dtype == NULL) { - return NULL; - } - if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { - Py_DECREF(Py_NotImplemented); - PyErr_Format(PyExc_TypeError, - "The DTypes %S and %S do not have a common DType. " - "For example they cannot be stored in a single array unless " - "the dtype is `object`.", dtype1, dtype2); - return NULL; - } - return common_dtype; -} - - /*NUMPY_API * Produces the smallest size and lowest kind type to which both * input types can be cast. @@ -1017,6 +990,7 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) } if (!common_dtype->parametric) { + /* Note that this path loses all metadata */ res = common_dtype->default_descr(common_dtype); Py_DECREF(common_dtype); return res; @@ -1050,28 +1024,17 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) * Produces the smallest size and lowest kind type to which all * input types can be cast. * - * Equivalent to functools.reduce(PyArray_PromoteTypes, types) + * Roughly equivalent to functools.reduce(PyArray_PromoteTypes, types) + * but uses a more complex pairwise approach. */ NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypeSequence(PyArray_Descr **types, npy_intp ntypes) { - npy_intp i; - PyArray_Descr *ret = NULL; if (ntypes == 0) { PyErr_SetString(PyExc_TypeError, "at least one type needed to promote"); return NULL; } - ret = types[0]; - Py_INCREF(ret); - for (i = 1; i < ntypes; ++i) { - PyArray_Descr *tmp = PyArray_PromoteTypes(types[i], ret); - Py_DECREF(ret); - ret = tmp; - if (ret == NULL) { - return NULL; - } - } - return ret; + return PyArray_ResultType(0, NULL, ntypes, types); } /* @@ -1437,6 +1400,8 @@ dtype_kind_to_simplified_ordering(char kind) * If the scalars are of a lower or same category as the arrays, they may be * demoted to a lower type within their category (the lowest type they can * be cast to safely according to scalar casting rules). + * + * If any new style dtype is involved (non-legacy), always returns 0. */ NPY_NO_EXPORT int should_use_min_scalar(npy_intp narrs, PyArrayObject **arr, @@ -1453,6 +1418,9 @@ should_use_min_scalar(npy_intp narrs, PyArrayObject **arr, /* Compute the maximum "kinds" and whether everything is scalar */ for (npy_intp i = 0; i < narrs; ++i) { + if (!NPY_DTYPE(PyArray_DESCR(arr[i]))->legacy) { + return 0; + } if (PyArray_NDIM(arr[i]) == 0) { int kind = dtype_kind_to_simplified_ordering( PyArray_DESCR(arr[i])->kind); @@ -1474,6 +1442,9 @@ should_use_min_scalar(npy_intp narrs, PyArrayObject **arr, * finish computing the max array kind */ for (npy_intp i = 0; i < ndtypes; ++i) { + if (!NPY_DTYPE(dtypes[i])->legacy) { + return 0; + } int kind = dtype_kind_to_simplified_ordering(dtypes[i]->kind); if (kind > max_array_kind) { max_array_kind = kind; @@ -1490,6 +1461,206 @@ should_use_min_scalar(npy_intp narrs, PyArrayObject **arr, /*NUMPY_API + * + * Produces the result type of a bunch of inputs, using the same rules + * as `np.result_type`. + * + * NOTE: This function is expected to through a transitional period or + * change behaviour. DTypes should always be strictly enforced for + * 0-D arrays, while "weak DTypes" will be used to represent Python + * integers, floats, and complex in all cases. + * (Within this function, these are currently flagged on the array + * object to work through `np.result_type`, this may change.) + * + * Until a time where this transition is complete, we probably cannot + * add new "weak DTypes" or allow users to create their own. + */ +NPY_NO_EXPORT PyArray_Descr * +PyArray_ResultType( + npy_intp narrs, PyArrayObject *arrs[], + npy_intp ndtypes, PyArray_Descr *descrs[]) +{ + PyArray_Descr *result = NULL; + + if (narrs + ndtypes <= 1) { + /* If the input is a single value, skip promotion. */ + if (narrs == 1) { + result = PyArray_DTYPE(arrs[0]); + } + else if (ndtypes == 1) { + result = descrs[0]; + } + else { + PyErr_SetString(PyExc_TypeError, + "no arrays or types available to calculate result type"); + return NULL; + } + return ensure_dtype_nbo(result); + } + + void **info_on_heap = NULL; + void *_info_on_stack[NPY_MAXARGS * 2]; + PyArray_DTypeMeta **all_DTypes; + PyArray_Descr **all_descriptors; + + if (narrs + ndtypes > NPY_MAXARGS) { + info_on_heap = PyMem_Malloc(2 * (narrs+ndtypes) * sizeof(PyObject *)); + if (info_on_heap == NULL) { + PyErr_NoMemory(); + return NULL; + } + all_DTypes = (PyArray_DTypeMeta **)info_on_heap; + all_descriptors = (PyArray_Descr **)(info_on_heap + narrs + ndtypes); + } + else { + all_DTypes = (PyArray_DTypeMeta **)_info_on_stack; + all_descriptors = (PyArray_Descr **)(_info_on_stack + narrs + ndtypes); + } + + /* Copy all dtypes into a single array defining non-value-based behaviour */ + for (npy_intp i=0; i < ndtypes; i++) { + all_DTypes[i] = NPY_DTYPE(descrs[i]); + Py_INCREF(all_DTypes[i]); + all_descriptors[i] = descrs[i]; + } + + int at_least_one_scalar = 0; + int all_pyscalar = ndtypes == 0; + for (npy_intp i=0, i_all=ndtypes; i < narrs; i++, i_all++) { + /* Array descr is also the correct "default" for scalars: */ + if (PyArray_NDIM(arrs[i]) == 0) { + at_least_one_scalar = 1; + } + + if (!(PyArray_FLAGS(arrs[i]) & _NPY_ARRAY_WAS_PYSCALAR)) { + /* This was not a scalar with an abstract DType */ + all_descriptors[i_all] = PyArray_DTYPE(arrs[i]); + all_DTypes[i_all] = NPY_DTYPE(all_descriptors[i_all]); + Py_INCREF(all_DTypes[i_all]); + all_pyscalar = 0; + continue; + } + + /* + * The original was a Python scalar with an abstract DType. + * In a future world, this type of code may need to work on the + * DType level first and discover those from the original value. + * But, right now we limit the logic to int, float, and complex + * and do it here to allow for a transition without losing all of + * our remaining sanity. + */ + if (PyArray_ISFLOAT(arrs[i])) { + all_DTypes[i_all] = &PyArray_PyFloatAbstractDType; + } + else if (PyArray_ISCOMPLEX(arrs[i])) { + all_DTypes[i_all] = &PyArray_PyComplexAbstractDType; + } + else { + /* N.B.: Could even be an object dtype here for large ints */ + all_DTypes[i_all] = &PyArray_PyIntAbstractDType; + } + Py_INCREF(all_DTypes[i_all]); + /* + * Leave the decriptor empty, if we need it, we will have to go + * to more extreme lengths unfortunately. + */ + all_descriptors[i_all] = NULL; + } + + PyArray_DTypeMeta *common_dtype = PyArray_PromoteDTypeSequence( + narrs+ndtypes, all_DTypes); + for (npy_intp i=0; i < narrs+ndtypes; i++) { + Py_DECREF(all_DTypes[i]); + } + if (common_dtype == NULL) { + goto finish; + } + + if (common_dtype->abstract) { + /* (ab)use default descriptor to define a default */ + PyArray_Descr *tmp_descr = common_dtype->default_descr(common_dtype); + if (tmp_descr == NULL) { + goto finish; + } + Py_INCREF(NPY_DTYPE(tmp_descr)); + Py_SETREF(common_dtype, NPY_DTYPE(tmp_descr)); + Py_DECREF(tmp_descr); + } + + /* + * NOTE: Code duplicates `PyArray_CastToDTypeAndPromoteDescriptors`, but + * supports special handling of the abstract values. + */ + if (!common_dtype->parametric) { + /* Note that this "fast" path loses all metadata */ + result = common_dtype->default_descr(common_dtype); + } + else { + result = PyArray_CastDescrToDType(all_descriptors[0], common_dtype); + + for (npy_intp i = 1; i < ndtypes+narrs; i++) { + PyArray_Descr *curr; + if (NPY_LIKELY(i < ndtypes || + !(PyArray_FLAGS(arrs[i-ndtypes]) & _NPY_ARRAY_WAS_PYSCALAR))) { + curr = PyArray_CastDescrToDType(all_descriptors[i], common_dtype); + } + else { + /* + * Unlike `PyArray_CastToDTypeAndPromoteDescriptors` deal with + * plain Python values "graciously". This recovers the original + * value the long route, but it should almost never happen... + */ + PyObject *tmp = PyArray_GETITEM( + arrs[i-ndtypes], PyArray_BYTES(arrs[i-ndtypes])); + if (tmp == NULL) { + Py_SETREF(result, NULL); + goto finish; + } + curr = common_dtype->discover_descr_from_pyobject(common_dtype, tmp); + Py_DECREF(tmp); + } + if (curr == NULL) { + Py_SETREF(result, NULL); + goto finish; + } + Py_SETREF(result, common_dtype->common_instance(result, curr)); + Py_DECREF(curr); + if (result == NULL) { + goto finish; + } + } + } + + /* + * Unfortunately, when 0-D "scalar" arrays are involved and mixed, we + * have to use the value-based logic. The intention is to move away from + * the complex logic arising from it. We thus fall back to the legacy + * version here. + * It may be possible to micro-optimize this to skip some of the above + * logic when this path is necessary. + */ + if (at_least_one_scalar && !all_pyscalar && result->type_num < NPY_NTYPES) { + PyArray_Descr *legacy_result = PyArray_LegacyResultType( + narrs, arrs, ndtypes, descrs); + if (legacy_result == NULL) { + /* + * Going from error to success should not really happen, but is + * probably OK if it does. + */ + Py_SETREF(result, NULL); + goto finish; + } + /* Return the old "legacy" result (could warn here if different) */ + Py_SETREF(result, legacy_result); + } + + finish: + PyMem_Free(info_on_heap); + return result; +} + + +/* * Produces the result type of a bunch of inputs, using the UFunc * type promotion rules. Use this function when you have a set of * input arrays, and need to determine an output array dtype. @@ -1501,11 +1672,11 @@ should_use_min_scalar(npy_intp narrs, PyArrayObject **arr, * Otherwise, does a type promotion on the MinScalarType * of all the inputs. Data types passed directly are treated as array * types. - * */ NPY_NO_EXPORT PyArray_Descr * -PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, - npy_intp ndtypes, PyArray_Descr **dtypes) +PyArray_LegacyResultType( + npy_intp narrs, PyArrayObject **arr, + npy_intp ndtypes, PyArray_Descr **dtypes) { npy_intp i; @@ -1604,6 +1775,49 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, } } +/** + * Promotion of descriptors (of arbitrary DType) to their correctly + * promoted instances of the given DType. + * I.e. the given DType could be a string, which then finds the correct + * string length, given all `descrs`. + * + * @param ndescrs number of descriptors to cast and find the common instance. + * At least one must be passed in. + * @param descrs The descriptors to work with. + * @param DType The DType of the desired output descriptor. + */ +NPY_NO_EXPORT PyArray_Descr * +PyArray_CastToDTypeAndPromoteDescriptors( + npy_intp ndescr, PyArray_Descr *descrs[], PyArray_DTypeMeta *DType) +{ + assert(ndescr > 0); + + PyArray_Descr *result = PyArray_CastDescrToDType(descrs[0], DType); + if (result == NULL || ndescr == 1) { + return result; + } + if (!DType->parametric) { + /* Note that this "fast" path loses all metadata */ + Py_DECREF(result); + return DType->default_descr(DType); + } + + for (npy_intp i = 1; i < ndescr; i++) { + PyArray_Descr *curr = PyArray_CastDescrToDType(descrs[i], DType); + if (curr == NULL) { + Py_DECREF(result); + return NULL; + } + Py_SETREF(result, DType->common_instance(result, curr)); + Py_DECREF(curr); + if (result == NULL) { + return NULL; + } + } + return result; +} + + /*NUMPY_API * Is the typenum valid? */ diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h index 33517b8ca220..ba16d4d1bd5a 100644 --- a/numpy/core/src/multiarray/convert_datatype.h +++ b/numpy/core/src/multiarray/convert_datatype.h @@ -20,8 +20,10 @@ PyArray_ObjectType(PyObject *op, int minimum_type); NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType(PyObject *op, int *retn); -NPY_NO_EXPORT PyArray_DTypeMeta * -PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2); +NPY_NO_EXPORT PyArray_Descr * +PyArray_LegacyResultType( + npy_intp narrs, PyArrayObject **arr, + npy_intp ndtypes, PyArray_Descr **dtypes); NPY_NO_EXPORT int PyArray_ValidType(int type); @@ -29,7 +31,7 @@ PyArray_ValidType(int type); NPY_NO_EXPORT int dtype_kind_to_ordering(char kind); -/* Like PyArray_CanCastArrayTo */ +/* Used by PyArray_CanCastArrayTo and in the legacy ufunc type resolution */ NPY_NO_EXPORT npy_bool can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, PyArray_Descr *to, NPY_CASTING casting); diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index 26b16b15b1b5..40ca9ee2a396 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -31,6 +31,14 @@ dtypemeta_dealloc(PyArray_DTypeMeta *self) { PyType_Type.tp_dealloc((PyObject *) self); } +static PyObject * +dtypemeta_alloc(PyTypeObject *NPY_UNUSED(type), Py_ssize_t NPY_UNUSED(items)) +{ + PyErr_SetString(PyExc_TypeError, + "DTypes can only be created using the NumPy API."); + return NULL; +} + static PyObject * dtypemeta_new(PyTypeObject *NPY_UNUSED(type), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) @@ -690,6 +698,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayDTypeMeta_Type = { .tp_doc = "Preliminary NumPy API: The Type of NumPy DTypes (metaclass)", .tp_members = dtypemeta_members, .tp_base = NULL, /* set to PyType_Type at import time */ + .tp_alloc = dtypemeta_alloc, .tp_init = (initproc)dtypemeta_init, .tp_new = dtypemeta_new, .tp_is_gc = dtypemeta_is_gc, diff --git a/numpy/core/src/multiarray/legacy_dtype_implementation.c b/numpy/core/src/multiarray/legacy_dtype_implementation.c index d2e95348dd70..9b4946da3c7c 100644 --- a/numpy/core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/core/src/multiarray/legacy_dtype_implementation.c @@ -1,10 +1,10 @@ /* - * This file hosts legacy implementations of certain functions for - * which alternatives exists, but the old functions are still required - * in certain code paths, or until the code transition is finalized. + * The only function exported here is `PyArray_LegacyCanCastTypeTo`, which + * is currently still in use when first registering a userdtype. * - * This code should typically not require modification, and if modified - * similar changes may be necessary in the new version. + * The extremely limited use means that it can probably remain unmaintained + * until such a time where legay user dtypes are deprecated and removed + * entirely. */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -78,7 +78,7 @@ _equivalent_subarrays(PyArray_ArrayDescr *sub1, PyArray_ArrayDescr *sub2) } -NPY_NO_EXPORT unsigned char +static unsigned char PyArray_LegacyEquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) { int type_num1, type_num2, size1, size2; @@ -116,7 +116,7 @@ PyArray_LegacyEquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) } -NPY_NO_EXPORT unsigned char +static unsigned char PyArray_LegacyEquivTypenums(int typenum1, int typenum2) { PyArray_Descr *d1, *d2; @@ -135,7 +135,7 @@ PyArray_LegacyEquivTypenums(int typenum1, int typenum2) } -NPY_NO_EXPORT int +static int PyArray_LegacyCanCastSafely(int fromtype, int totype) { PyArray_Descr *from; @@ -171,7 +171,7 @@ PyArray_LegacyCanCastSafely(int fromtype, int totype) } -NPY_NO_EXPORT npy_bool +static npy_bool PyArray_LegacyCanCastTo(PyArray_Descr *from, PyArray_Descr *to) { int from_type_num = from->type_num; @@ -551,168 +551,3 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, } } - -/* - * Legacy function to find the correct dtype when casting from any built-in - * dtype to NPY_STRING, NPY_UNICODE, NPY_VOID, and NPY_DATETIME with generic - * units. - * - * This function returns a dtype based on flex_dtype and the values in - * data_dtype. It also calls Py_DECREF on the flex_dtype. If the - * flex_dtype is not flexible, it returns it as-is. - * - * Usually, if data_obj is not an array, dtype should be the result - * given by the PyArray_GetArrayParamsFromObject function. - * - * If *flex_dtype is NULL, returns immediately, without setting an - * exception, leaving any previous error handling intact. - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_AdaptFlexibleDType(PyArray_Descr *data_dtype, PyArray_Descr *flex_dtype) -{ - PyArray_DatetimeMetaData *meta; - PyArray_Descr *retval = NULL; - int flex_type_num; - - if (flex_dtype == NULL) { - return retval; - } - - flex_type_num = flex_dtype->type_num; - - /* Flexible types with expandable size */ - if (PyDataType_ISUNSIZED(flex_dtype)) { - /* First replace the flex_dtype */ - retval = PyArray_DescrNew(flex_dtype); - Py_DECREF(flex_dtype); - if (retval == NULL) { - return retval; - } - - if (data_dtype->type_num == flex_type_num || - flex_type_num == NPY_VOID) { - (retval)->elsize = data_dtype->elsize; - } - else if (flex_type_num == NPY_STRING || flex_type_num == NPY_UNICODE) { - npy_intp size = 8; - - /* - * Get a string-size estimate of the input. These - * are generallly the size needed, rounded up to - * a multiple of eight. - */ - switch (data_dtype->type_num) { - case NPY_BOOL: - case NPY_UBYTE: - case NPY_BYTE: - case NPY_USHORT: - case NPY_SHORT: - case NPY_UINT: - case NPY_INT: - case NPY_ULONG: - case NPY_LONG: - case NPY_ULONGLONG: - case NPY_LONGLONG: - if (data_dtype->kind == 'b') { - /* 5 chars needed for cast to 'True' or 'False' */ - size = 5; - } - else if (data_dtype->elsize > 8 || - data_dtype->elsize < 0) { - /* - * Element size should never be greater than 8 or - * less than 0 for integer type, but just in case... - */ - break; - } - else if (data_dtype->kind == 'u') { - size = REQUIRED_STR_LEN[data_dtype->elsize]; - } - else if (data_dtype->kind == 'i') { - /* Add character for sign symbol */ - size = REQUIRED_STR_LEN[data_dtype->elsize] + 1; - } - break; - case NPY_HALF: - case NPY_FLOAT: - case NPY_DOUBLE: - size = 32; - break; - case NPY_LONGDOUBLE: - size = 48; - break; - case NPY_CFLOAT: - case NPY_CDOUBLE: - size = 2 * 32; - break; - case NPY_CLONGDOUBLE: - size = 2 * 48; - break; - case NPY_OBJECT: - size = 64; - break; - case NPY_STRING: - case NPY_VOID: - size = data_dtype->elsize; - break; - case NPY_UNICODE: - size = data_dtype->elsize / 4; - break; - case NPY_DATETIME: - meta = get_datetime_metadata_from_dtype(data_dtype); - if (meta == NULL) { - Py_DECREF(retval); - return NULL; - } - size = get_datetime_iso_8601_strlen(0, meta->base); - break; - case NPY_TIMEDELTA: - size = 21; - break; - } - - if (flex_type_num == NPY_STRING) { - retval->elsize = size; - } - else if (flex_type_num == NPY_UNICODE) { - retval->elsize = size * 4; - } - } - else { - /* - * We should never get here, but just in case someone adds - * a new flex dtype... - */ - PyErr_SetString(PyExc_TypeError, - "don't know how to adapt flex dtype"); - Py_DECREF(retval); - return NULL; - } - } - /* Flexible type with generic time unit that adapts */ - else if (flex_type_num == NPY_DATETIME || - flex_type_num == NPY_TIMEDELTA) { - meta = get_datetime_metadata_from_dtype(flex_dtype); - retval = flex_dtype; - if (meta == NULL) { - return NULL; - } - - if (meta->base == NPY_FR_GENERIC) { - if (data_dtype->type_num == NPY_DATETIME || - data_dtype->type_num == NPY_TIMEDELTA) { - meta = get_datetime_metadata_from_dtype(data_dtype); - if (meta == NULL) { - return NULL; - } - - retval = create_datetime_dtype(flex_type_num, meta); - Py_DECREF(flex_dtype); - } - } - } - else { - retval = flex_dtype; - } - return retval; -} diff --git a/numpy/core/src/multiarray/legacy_dtype_implementation.h b/numpy/core/src/multiarray/legacy_dtype_implementation.h index ca171d7733e6..b36eb019a452 100644 --- a/numpy/core/src/multiarray/legacy_dtype_implementation.h +++ b/numpy/core/src/multiarray/legacy_dtype_implementation.h @@ -1,40 +1,8 @@ #ifndef _NPY_LEGACY_DTYPE_IMPLEMENTATION_H #define _NPY_LEGACY_DTYPE_IMPLEMENTATION_H - -NPY_NO_EXPORT unsigned char -PyArray_LegacyEquivTypes(PyArray_Descr *type1, PyArray_Descr *type2); - -NPY_NO_EXPORT unsigned char -PyArray_LegacyEquivTypenums(int typenum1, int typenum2); - -NPY_NO_EXPORT int -PyArray_LegacyCanCastSafely(int fromtype, int totype); - -NPY_NO_EXPORT npy_bool -PyArray_LegacyCanCastTo(PyArray_Descr *from, PyArray_Descr *to); - NPY_NO_EXPORT npy_bool PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, NPY_CASTING casting); -/* - * This function calls Py_DECREF on flex_dtype, and replaces it with - * a new dtype that has been adapted based on the values in data_dtype - * and data_obj. If the flex_dtype is not flexible, it returns it as-is. - * - * Usually, if data_obj is not an array, dtype should be the result - * given by the PyArray_GetArrayParamsFromObject function. - * - * The data_obj may be NULL if just a dtype is known for the source. - * - * If *flex_dtype is NULL, returns immediately, without setting an - * exception, leaving any previous error handling intact. - * - * The current flexible dtypes include NPY_STRING, NPY_UNICODE, NPY_VOID, - * and NPY_DATETIME with generic units. - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_AdaptFlexibleDType(PyArray_Descr *data_dtype, PyArray_Descr *flex_dtype); - #endif /*_NPY_LEGACY_DTYPE_IMPLEMENTATION_H*/ diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index ff5a5d8bcab9..588648ca8d3f 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -867,44 +867,44 @@ array_astype(PyArrayObject *self, Py_INCREF(self); return (PyObject *)self; } - else if (PyArray_CanCastArrayTo(self, dtype, casting)) { - PyArrayObject *ret; - - /* This steals the reference to dtype, so no DECREF of dtype */ - ret = (PyArrayObject *)PyArray_NewLikeArray( - self, order, dtype, subok); - if (ret == NULL) { - return NULL; - } - /* NumPy 1.20, 2020-10-01 */ - if ((PyArray_NDIM(self) != PyArray_NDIM(ret)) && - DEPRECATE_FUTUREWARNING( - "casting an array to a subarray dtype " - "will not using broadcasting in the future, but cast each " - "element to the new dtype and then append the dtype's shape " - "to the new array. You can opt-in to the new behaviour, by " - "additional field to the cast: " - "`arr.astype(np.dtype([('f', dtype)]))['f']`.\n" - "This may lead to a different result or to current failures " - "succeeding. " - "(FutureWarning since NumPy 1.20)") < 0) { - Py_DECREF(ret); - return NULL; - } - - if (PyArray_CopyInto(ret, self) < 0) { - Py_DECREF(ret); - return NULL; - } - - return (PyObject *)ret; - } - else { + if (!PyArray_CanCastArrayTo(self, dtype, casting)) { + PyErr_Clear(); npy_set_invalid_cast_error( PyArray_DESCR(self), dtype, casting, PyArray_NDIM(self) == 0); Py_DECREF(dtype); return NULL; } + + PyArrayObject *ret; + + /* This steals the reference to dtype, so no DECREF of dtype */ + ret = (PyArrayObject *)PyArray_NewLikeArray( + self, order, dtype, subok); + if (ret == NULL) { + return NULL; + } + /* NumPy 1.20, 2020-10-01 */ + if ((PyArray_NDIM(self) != PyArray_NDIM(ret)) && + DEPRECATE_FUTUREWARNING( + "casting an array to a subarray dtype " + "will not using broadcasting in the future, but cast each " + "element to the new dtype and then append the dtype's shape " + "to the new array. You can opt-in to the new behaviour, by " + "additional field to the cast: " + "`arr.astype(np.dtype([('f', dtype)]))['f']`.\n" + "This may lead to a different result or to current failures " + "succeeding. " + "(FutureWarning since NumPy 1.20)") < 0) { + Py_DECREF(ret); + return NULL; + } + + if (PyArray_CopyInto(ret, self) < 0) { + Py_DECREF(ret); + return NULL; + } + + return (PyObject *)ret; } /* default sub-type implementation */ diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index f3c1b7f981e6..f7c3ea093a29 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1470,7 +1470,9 @@ array_putmask(PyObject *NPY_UNUSED(module), PyObject *args, PyObject *kwds) NPY_NO_EXPORT unsigned char PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) { -#if NPY_USE_NEW_CASTINGIMPL + if (type1 == type2) { + return 1; + } /* * Do not use PyArray_CanCastTypeTo because it supports legacy flexible * dtypes as input. @@ -1482,9 +1484,6 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) } /* If casting is "no casting" this dtypes are considered equivalent. */ return PyArray_MinCastSafety(safety, NPY_NO_CASTING) == NPY_NO_CASTING; -#else - return PyArray_LegacyEquivTypes(type1, type2); -#endif } @@ -3357,7 +3356,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *from_obj = NULL; PyArray_Descr *d1 = NULL; PyArray_Descr *d2 = NULL; - npy_bool ret; + int ret; PyObject *retobj = NULL; NPY_CASTING casting = NPY_SAFE_CASTING; static char *kwlist[] = {"from_", "to", "casting", NULL}; @@ -3487,6 +3486,10 @@ array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *args) if (arr[narr] == NULL) { goto finish; } + if (PyLong_CheckExact(obj) || PyFloat_CheckExact(obj) || + PyComplex_CheckExact(obj)) { + ((PyArrayObject_fields *)arr[narr])->flags |= _NPY_ARRAY_WAS_PYSCALAR; + } ++narr; } else { diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 7e9b937827d7..a62776748775 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -16,6 +16,9 @@ #include "binop_override.h" #include "ufunc_override.h" +#include "abstractdtypes.h" +#include "common_dtype.h" +#include "convert_datatype.h" /************************************************************************* **************** Implement Number Protocol **************************** diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index c8fcd4b42522..2cec1acd3490 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -17,7 +17,6 @@ from numpy.testing import assert_array_equal from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl -from numpy.core._multiarray_tests import uses_new_casts # Simple skips object, parametric and long double (unsupported by struct) @@ -135,11 +134,7 @@ class TestChanges: def test_float_to_string(self, floating, string): assert np.can_cast(floating, string) # 100 is long enough to hold any formatted floating - if uses_new_casts(): - assert np.can_cast(floating, f"{string}100") - else: - assert not np.can_cast(floating, f"{string}100") - assert np.can_cast(floating, f"{string}100", casting="same_kind") + assert np.can_cast(floating, f"{string}100") def test_to_void(self): # But in general, we do consider these safe: @@ -147,17 +142,11 @@ def test_to_void(self): assert np.can_cast("S20", "V") # Do not consider it a safe cast if the void is too smaller: - if uses_new_casts(): - assert not np.can_cast("d", "V1") - assert not np.can_cast("S20", "V1") - assert not np.can_cast("U1", "V1") - # Structured to unstructured is just like any other: - assert np.can_cast("d,i", "V", casting="same_kind") - else: - assert np.can_cast("d", "V1") - assert np.can_cast("S20", "V1") - assert np.can_cast("U1", "V1") - assert not np.can_cast("d,i", "V", casting="same_kind") + assert not np.can_cast("d", "V1") + assert not np.can_cast("S20", "V1") + assert not np.can_cast("U1", "V1") + # Structured to unstructured is just like any other: + assert np.can_cast("d,i", "V", casting="same_kind") class TestCasting: From 6b79a8ee7a9db83fbd693cc6d9097aeb3ca4a8d6 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 12 Apr 2021 12:48:05 -0500 Subject: [PATCH 1150/1270] DOC: Fix mistakes found by Tyler --- numpy/core/include/numpy/ndarraytypes.h | 2 +- numpy/core/src/multiarray/abstractdtypes.c | 2 +- numpy/core/src/multiarray/methods.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 9ff8c9831952..14e3adfdbb8f 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -862,7 +862,7 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); * python float, int, or complex. * An array using this flag must be a temporary array that can never * leave the C internals of NumPy. Even if it does, ENSUREARRAY is - * absolutely safe to abuse, since it alraedy is be a base class array :). + * absolutely safe to abuse, since it already is a base class array :). */ #define _NPY_ARRAY_WAS_PYSCALAR 0x0040 #endif /* NPY_INTERNAL_BUILD */ diff --git a/numpy/core/src/multiarray/abstractdtypes.c b/numpy/core/src/multiarray/abstractdtypes.c index 5fff1e6e442c..587d91c49cda 100644 --- a/numpy/core/src/multiarray/abstractdtypes.c +++ b/numpy/core/src/multiarray/abstractdtypes.c @@ -143,7 +143,7 @@ initialize_and_map_pytypes_to_dtypes() /* * The following functions define the "common DType" for the abstract dtypes. * - * Note that the logic with resupt to the "higher" dtypes such as floats + * Note that the logic with respect to the "higher" dtypes such as floats * could likely be more logically defined for them, but since NumPy dtypes * largely "know" each other, that is not necessary. */ diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 588648ca8d3f..251e527a6b96 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -887,7 +887,7 @@ array_astype(PyArrayObject *self, if ((PyArray_NDIM(self) != PyArray_NDIM(ret)) && DEPRECATE_FUTUREWARNING( "casting an array to a subarray dtype " - "will not using broadcasting in the future, but cast each " + "will not use broadcasting in the future, but cast each " "element to the new dtype and then append the dtype's shape " "to the new array. You can opt-in to the new behaviour, by " "additional field to the cast: " From aaaa9753277ff9d387beecaf0f43519eacdea4eb Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 19 Apr 2021 15:16:51 -0500 Subject: [PATCH 1151/1270] DOC,TST: Fix documentation and add tests --- doc/source/reference/ufuncs.rst | 2 +- numpy/core/tests/test_dtype.py | 35 +++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index c919ec9b8e65..151ac3370411 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -266,7 +266,7 @@ can generate this table for your system with the code given in the Figure. S - - - - - - - - - - - - - - - - - - - - Y Y Y Y - - U - - - - - - - - - - - - - - - - - - - - - Y Y Y - - V - - - - - - - - - - - - - - - - - - - - - - Y Y - - - O - - - - - - - - - - - - - - - - - - - - - - Y Y - - + O - - - - - - - - - - - - - - - - - - - - - - - Y - - M - - - - - - - - - - - - - - - - - - - - - - Y Y Y - m - - - - - - - - - - - - - - - - - - - - - - Y Y - Y diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 53e4821aed67..2246a4802d9b 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1088,6 +1088,41 @@ def test_pickle_types(self, DType): assert roundtrip_DType is DType +class TestPromotion: + """Test cases related to more complex DType promotions. Further promotion + tests are defined in `test_numeric.py` + """ + @pytest.mark.parametrize(["other", "expected"], + [(2**16-1, np.complex64), + (2**32-1, np.complex128), + (np.float16(2), np.complex64), + (np.float32(2), np.complex64), + (np.float128(2), np.complex64), + (np.nextafter(np.longdouble(1.7e308), 0.), np.complex128), + (np.longdouble(1.7e308), np.clongdouble), + ]) + def test_complex_value_based(self, other, expected): + # This would change if we modfiy the value based promotion + min_complex = np.dtype(np.complex64) + + res = np.result_type(other, min_complex) + assert res == expected + # Check the same for a simple ufunc call that uses the same logic: + res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype + assert res == expected + + @pytest.mark.parametrize(["dtypes", "expected"], + [([np.uint16, np.int16, np.float16], np.float32), + ([np.uint16, np.int8, np.float16], np.float32), + ([np.uint8, np.int16, np.float16], np.float32)]) + def test_permutations_do_not_influence_result(self, dtypes, expected): + # Tests that most permutations do not influence the result. In the + # above some uint and int combintations promote to a larger integer + # type, which would then promote to a larger than necessary float. + for perm in permutations(dtypes): + assert np.result_type(*perm) == expected + + def test_rational_dtype(): # test for bug gh-5719 a = np.array([1111], dtype=rational).astype From 834b1184c1f609a4e9ba06c55ea64ba882e4cdc1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 19 Apr 2021 15:19:24 -0500 Subject: [PATCH 1152/1270] MAINT: Remove `NPY_USE_NEW_CASTINGIMPL` This doesn't serve any purpose anymore. The new code is now always used. (In very few cases this may lead to small slowdowns, this should only matter in ufuncs where it doesn't seem to matter enough to worry about it.) --- azure-pipelines.yml | 1 - doc/source/reference/global_state.rst | 14 -------------- numpy/core/setup.py | 11 ----------- numpy/core/tests/test_shape_base.py | 8 ++------ 4 files changed, 2 insertions(+), 32 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9b8373954e00..dd189b8e9131 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -197,7 +197,6 @@ stages: - script: python setup.py build -j 4 build_ext --inplace install displayName: 'Build NumPy without OpenBLAS and new casting' env: - NPY_USE_NEW_CASTINGIMPL: 1 BLAS: None LAPACK: None ATLAS: None diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index b59467210254..f184812354d3 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -84,17 +84,3 @@ contiguous in memory. Most users will have no reason to change these; for details see the :ref:`memory layout ` documentation. -Using the new casting implementation ------------------------------------- - -Within NumPy 1.20 it is possible to enable the new experimental casting -implementation for testing purposes. To do this set:: - - NPY_USE_NEW_CASTINGIMPL=1 - -Setting the flag is only useful to aid with NumPy developement to ensure the -new version is bug free and should be avoided for production code. -It is a helpful test for projects that either create custom datatypes or -use for example complicated structured dtypes. The flag is expected to be -removed in 1.21 with the new version being always in use. - diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 2af2426dd245..b03e9f99005e 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -23,11 +23,6 @@ NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0") NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING -# Set to True to use the new casting implementation as much as implemented. -# Allows running the full test suit to exercise the new machinery until -# it is used as default and the old version is eventually deleted. -NPY_USE_NEW_CASTINGIMPL = os.environ.get('NPY_USE_NEW_CASTINGIMPL', "0") != "0" - # XXX: ugly, we use a class to avoid calling twice some expensive functions in # config.h/numpyconfig.h. I don't see a better way because distutils force # config.h generation inside an Extension class, and as such sharing @@ -472,12 +467,6 @@ def generate_config_h(ext, build_dir): else: moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 0)) - # Use the new experimental casting implementation in NumPy 1.20: - if NPY_USE_NEW_CASTINGIMPL: - moredefs.append(('NPY_USE_NEW_CASTINGIMPL', 1)) - else: - moredefs.append(('NPY_USE_NEW_CASTINGIMPL', 0)) - # Get long double representation rep = check_long_double_representation(config_cmd) moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index a0c72f9d0fcb..679e3c036351 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -382,13 +382,9 @@ def test_dtype_with_promotion(self, arrs, string_dt, axis): @pytest.mark.parametrize("axis", [None, 0]) def test_string_dtype_does_not_inspect(self, axis): - # The error here currently depends on NPY_USE_NEW_CASTINGIMPL as - # the new version rejects using the "default string length" of 64. - # The new behaviour is better, `np.array()` and `arr.astype()` would - # have to be used instead. (currently only raises due to unsafe cast) - with pytest.raises((ValueError, TypeError)): + with pytest.raises(TypeError): np.concatenate(([None], [1]), dtype="S", axis=axis) - with pytest.raises((ValueError, TypeError)): + with pytest.raises(TypeError): np.concatenate(([None], [1]), dtype="U", axis=axis) @pytest.mark.parametrize("axis", [None, 0]) From 6fd0f97b436c162ba4b613c7dff665d56d351f84 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 19 Apr 2021 21:51:47 -0500 Subject: [PATCH 1153/1270] TST: Fixup tests and make sure complex is also the value --- numpy/core/tests/test_dtype.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 2246a4802d9b..34d418926112 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1097,12 +1097,12 @@ class TestPromotion: (2**32-1, np.complex128), (np.float16(2), np.complex64), (np.float32(2), np.complex64), - (np.float128(2), np.complex64), + (np.longdouble(2), np.complex64), (np.nextafter(np.longdouble(1.7e308), 0.), np.complex128), (np.longdouble(1.7e308), np.clongdouble), ]) - def test_complex_value_based(self, other, expected): - # This would change if we modfiy the value based promotion + def test_complex_other_value_based(self, other, expected): + # This would change if we modify the value based promotion min_complex = np.dtype(np.complex64) res = np.result_type(other, min_complex) @@ -1111,6 +1111,24 @@ def test_complex_value_based(self, other, expected): res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype assert res == expected + @pytest.mark.parametrize(["other", "expected"], + [(np.bool_, np.complex128), + (np.int64, np.complex128), + (np.float16, np.complex64), + (np.float32, np.complex64), + (np.float64, np.complex128), + (np.longdouble, np.clongdouble), + ]) + def test_complex_scalar_value_based(self, other, expected): + # This would change if we modify the value based promotion + complex_scalar = 1j + + res = np.result_type(other, complex_scalar) + assert res == expected + # Check the same for a simple ufunc call that uses the same logic: + res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype + assert res == expected + @pytest.mark.parametrize(["dtypes", "expected"], [([np.uint16, np.int16, np.float16], np.float32), ([np.uint16, np.int8, np.float16], np.float32), From 1a828f525d2737bfe93e91034603bb2f4f3414db Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 16 Feb 2021 17:00:08 -0600 Subject: [PATCH 1154/1270] DEP: Ensure the string promotion FutureWarning is raised Promotion errors are currently ignored and instead we use `object` as dtype. This means that the FutureWarning is ignored when raised. It also means that there is no way to opt into "future" behaviour, although arguably, we may want to force `dtype=object` in any case if a promotion error occurred (or some signal at least). Closes gh-18721 Addresses gh-18425 (Does not actually fix it, but chances are we won't) --- doc/release/upcoming_changes/18116.future.rst | 8 ++++---- numpy/core/src/multiarray/array_coercion.c | 8 ++++++-- numpy/core/tests/test_deprecations.py | 5 +---- numpy/core/tests/test_regression.py | 16 ++++++++-------- numpy/ma/tests/test_mrecords.py | 2 +- 5 files changed, 20 insertions(+), 19 deletions(-) diff --git a/doc/release/upcoming_changes/18116.future.rst b/doc/release/upcoming_changes/18116.future.rst index 1341d022f377..1eb14d5f785c 100644 --- a/doc/release/upcoming_changes/18116.future.rst +++ b/doc/release/upcoming_changes/18116.future.rst @@ -11,9 +11,8 @@ are: a string result. * `numpy.array` and related functions will start returning ``object`` arrays because these functions use ``object`` as a fallback when - no common dtype can be found. (In this case setting the - ``FutureWarning`` to be raised will unfortunately lead to the new - behaviour) + no common dtype can be found. However, it may happen that future + releases of NumPy will generally error in these cases. This will mainly affect code such as:: @@ -24,6 +23,7 @@ and:: np.concatenate((['string'], [0])) in both cases adding ``dtype="U"`` or ``dtype="S"`` will give the -previous (string) result. +previous (string) result, while ``dtype=object`` will ensure an array with +object dtype is returned. Comparisons, universal functions, and casting are not affected by this. diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index ef99ae479bcf..6b7c3888d952 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -622,8 +622,12 @@ handle_promotion(PyArray_Descr **out_descr, PyArray_Descr *descr, } PyArray_Descr *new_descr = PyArray_PromoteTypes(descr, *out_descr); if (NPY_UNLIKELY(new_descr == NULL)) { - if (fixed_DType != NULL) { - /* If a DType is fixed, promotion must not fail. */ + if (fixed_DType != NULL || PyErr_ExceptionMatches(PyExc_FutureWarning)) { + /* + * If a DType is fixed, promotion must not fail. Do not catch + * FutureWarning (raised for string+numeric promotions). We could + * only catch TypeError here or even always raise the error. + */ return -1; } PyErr_Clear(); diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index ec4112e69587..ed168837463f 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -1121,10 +1121,7 @@ def test_deprecated(self, dtype, string_dt): self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=0)) self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=None)) - # coercing to an array is similar, but will fall-back to `object` - # (when raising the FutureWarning, this already happens) - self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]]), - exceptions=()) + self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]])) @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG") @pytest.mark.parametrize("string_dt", ["S", "U"]) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index d1af7f1d8667..25198bba9ea1 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -503,8 +503,8 @@ def test_swap_real(self): assert_equal(np.arange(4, dtype=' Date: Wed, 12 May 2021 16:36:59 -0700 Subject: [PATCH 1155/1270] BUG: Fortify against longdouble cast loss Base the "magic" value of the double, and use nextafter even for the equal condition to avoid rounding during the cast to longdouble. This might further be necessary because the code uses double constants and not longdouble ones (not sure about this). Using nextafter on the double value should put us firmly on the right side and nobody should care about the few ulps in between those values. --- numpy/core/tests/test_dtype.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 34d418926112..3625c2f62961 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1098,8 +1098,10 @@ class TestPromotion: (np.float16(2), np.complex64), (np.float32(2), np.complex64), (np.longdouble(2), np.complex64), - (np.nextafter(np.longdouble(1.7e308), 0.), np.complex128), - (np.longdouble(1.7e308), np.clongdouble), + # Base of the double value to sidestep any rounding issues: + (np.longdouble(np.nextafter(1.7e308, 0.)), np.complex128), + # Additionally use "nextafter" so the cast can't round down: + (np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble), ]) def test_complex_other_value_based(self, other, expected): # This would change if we modify the value based promotion From 17577799fcf88f53f583af3df678796ab948abd1 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 13 May 2021 01:35:39 +0200 Subject: [PATCH 1156/1270] ENH: Add annotations for `np.broadcast` --- numpy/__init__.pyi | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ef127123778d..fce5fb02b6fe 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -569,10 +569,6 @@ class MachAr: ) -> None: ... def __getattr__(self, key: str) -> Any: ... -class broadcast: - def __new__(cls: Any, *args: Any) -> Any: ... - def __getattr__(self, key: str) -> Any: ... - class busdaycalendar: def __new__(cls, weekmask: Any = ..., holidays: Any = ...) -> Any: ... def __getattr__(self, key: str) -> Any: ... @@ -3565,3 +3561,26 @@ class DataSource: encoding: Optional[str] = ..., newline: Optional[str] = ..., ) -> IO[Any]: ... + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> Tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _Shape: ... + @property + def size(self) -> int: ... + def __next__(self) -> Tuple[Any, ...]: ... + def __iter__(self: _T) -> _T: ... + def reset(self) -> None: ... From 8f05c61cdbed9b5bc832e727293951945303113a Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 13 May 2021 12:24:09 +0200 Subject: [PATCH 1157/1270] TST: Add typing tests for `np.broadcast` and `np.DataSource` --- numpy/typing/tests/data/fail/datasource.py | 15 ++++++++ numpy/typing/tests/data/pass/multiarray.py | 37 ++++++++++++++++++++ numpy/typing/tests/data/reveal/datasource.py | 21 +++++++++++ numpy/typing/tests/data/reveal/multiarray.py | 35 ++++++++++++++++++ 4 files changed, 108 insertions(+) create mode 100644 numpy/typing/tests/data/fail/datasource.py create mode 100644 numpy/typing/tests/data/pass/multiarray.py create mode 100644 numpy/typing/tests/data/reveal/datasource.py create mode 100644 numpy/typing/tests/data/reveal/multiarray.py diff --git a/numpy/typing/tests/data/fail/datasource.py b/numpy/typing/tests/data/fail/datasource.py new file mode 100644 index 000000000000..345277d45370 --- /dev/null +++ b/numpy/typing/tests/data/fail/datasource.py @@ -0,0 +1,15 @@ +from pathlib import Path +import numpy as np + +path: Path +d1: np.DataSource + +d1.abspath(path) # E: incompatible type +d1.abspath(b"...") # E: incompatible type + +d1.exists(path) # E: incompatible type +d1.exists(b"...") # E: incompatible type + +d1.open(path, "r") # E: incompatible type +d1.open(b"...", encoding="utf8") # E: incompatible type +d1.open(None, newline="/n") # E: incompatible type diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py new file mode 100644 index 000000000000..e2b5d16a04a2 --- /dev/null +++ b/numpy/typing/tests/data/pass/multiarray.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from typing import Any +import numpy as np + +AR_f8: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) +AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) + +b_f8 = np.broadcast(AR_f8) +b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) + +next(b_f8) +next(b_i8_f8_f8) + +b_f8.reset() +b_i8_f8_f8.reset() + +b_f8.index +b_i8_f8_f8.index + +b_f8.iters +b_i8_f8_f8.iters + +b_f8.nd +b_i8_f8_f8.nd + +b_f8.ndim +b_i8_f8_f8.ndim + +b_f8.numiter +b_i8_f8_f8.numiter + +b_f8.shape +b_i8_f8_f8.shape + +b_f8.size +b_i8_f8_f8.size diff --git a/numpy/typing/tests/data/reveal/datasource.py b/numpy/typing/tests/data/reveal/datasource.py new file mode 100644 index 000000000000..245ac7649e96 --- /dev/null +++ b/numpy/typing/tests/data/reveal/datasource.py @@ -0,0 +1,21 @@ +from pathlib import Path +import numpy as np + +path1: Path +path2: str + +d1 = np.DataSource(path1) +d2 = np.DataSource(path2) +d3 = np.DataSource(None) + +reveal_type(d1.abspath("...")) # E: str +reveal_type(d2.abspath("...")) # E: str +reveal_type(d3.abspath("...")) # E: str + +reveal_type(d1.exists("...")) # E: bool +reveal_type(d2.exists("...")) # E: bool +reveal_type(d3.exists("...")) # E: bool + +reveal_type(d1.open("...", "r")) # E: IO[Any] +reveal_type(d2.open("...", encoding="utf8")) # E: IO[Any] +reveal_type(d3.open("...", newline="/n")) # E: IO[Any] diff --git a/numpy/typing/tests/data/reveal/multiarray.py b/numpy/typing/tests/data/reveal/multiarray.py new file mode 100644 index 000000000000..33e9ede7cc54 --- /dev/null +++ b/numpy/typing/tests/data/reveal/multiarray.py @@ -0,0 +1,35 @@ +from typing import Any +import numpy as np + +AR_f8: np.ndarray[Any, np.dtype[np.float64]] +AR_i8: np.ndarray[Any, np.dtype[np.int64]] + +b_f8 = np.broadcast(AR_f8) +b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) + +reveal_type(next(b_f8)) # E: tuple[Any] +reveal_type(next(b_i8_f8_f8)) # E: tuple[Any] + +reveal_type(b_f8.reset()) # E: None +reveal_type(b_i8_f8_f8.reset()) # E: None + +reveal_type(b_f8.index) # E: int +reveal_type(b_i8_f8_f8.index) # E: int + +reveal_type(b_f8.iters) # E: tuple[numpy.flatiter[Any]] +reveal_type(b_i8_f8_f8.iters) # E: tuple[numpy.flatiter[Any]] + +reveal_type(b_f8.nd) # E: int +reveal_type(b_i8_f8_f8.nd) # E: int + +reveal_type(b_f8.ndim) # E: int +reveal_type(b_i8_f8_f8.ndim) # E: int + +reveal_type(b_f8.numiter) # E: int +reveal_type(b_i8_f8_f8.numiter) # E: int + +reveal_type(b_f8.shape) # E: tuple[builtins.int] +reveal_type(b_i8_f8_f8.shape) # E: tuple[builtins.int] + +reveal_type(b_f8.size) # E: int +reveal_type(b_i8_f8_f8.size) # E: int From 237b47d9bc78873d4f17399e730a593895f35424 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 17:01:33 +0200 Subject: [PATCH 1158/1270] STY: Various misc style changes --- numpy/__init__.pyi | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b708b99ab7a7..837db4cd2e23 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1718,21 +1718,32 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def itemset(self, __value: Any) -> None: ... @overload def itemset(self, __item: _ShapeLike, __value: Any) -> None: ... + @overload def resize(self, __new_shape: _ShapeLike, *, refcheck: bool = ...) -> None: ... @overload def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ... + def setflags( self, write: bool = ..., align: bool = ..., uic: bool = ... ) -> None: ... + def squeeze( - self: _ArraySelf, axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ... + self: _ArraySelf, + axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ..., ) -> _ArraySelf: ... - def swapaxes(self: _ArraySelf, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArraySelf: ... + + def swapaxes( + self: _ArraySelf, + axis1: SupportsIndex, + axis2: SupportsIndex, + ) -> _ArraySelf: ... + @overload def transpose(self: _ArraySelf, __axes: _ShapeLike) -> _ArraySelf: ... @overload def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... + def argpartition( self, kth: _ArrayLikeInt_co, @@ -1740,18 +1751,22 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., ) -> ndarray: ... + def diagonal( self: _ArraySelf, offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., ) -> _ArraySelf: ... + @overload def dot(self, b: ArrayLike, out: None = ...) -> ndarray: ... @overload def dot(self, b: ArrayLike, out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + # `nonzero()` is deprecated for 0d arrays/generics def nonzero(self) -> Tuple[ndarray, ...]: ... + def partition( self, kth: _ArrayLikeInt_co, @@ -1759,26 +1774,37 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., ) -> None: ... + # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable def put( - self, ind: _ArrayLikeInt_co, v: ArrayLike, mode: _ModeKind = ... + self, + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., ) -> None: ... + def searchsorted( self, # >= 1D array v: ArrayLike, side: _SortSide = ..., sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array ) -> ndarray: ... + def setfield( - self, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = ... + self, + val: ArrayLike, + dtype: DTypeLike, + offset: SupportsIndex = ..., ) -> None: ... + def sort( self, axis: SupportsIndex = ..., kind: Optional[_SortKind] = ..., order: Union[None, str, Sequence[str]] = ..., ) -> None: ... + @overload def trace( self, # >= 2D array @@ -1829,17 +1855,18 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis: Optional[SupportsIndex] = ..., ) -> ndarray[Any, _DType_co]: ... - # Many of these special methods are irrelevant currently, since protocols - # aren't supported yet. That said, I'm adding them for completeness. - # https://docs.python.org/3/reference/datamodel.html def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + + def __index__(self) -> int: ... + def __len__(self) -> int: ... def __setitem__(self, key, value): ... def __iter__(self) -> Any: ... def __contains__(self, key) -> bool: ... - def __index__(self) -> int: ... # The last overload is for catching recursive objects whose # nesting is too deep. From f0226a94cd2f017d6bd5d7a9b56b965ed6d7aee6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 13 May 2021 14:32:12 +0200 Subject: [PATCH 1159/1270] MAINT: Remove `ndarray`-based typevars Typevars imply that both the `ndarray` class as well as its shape and dtype are preserved between the input and output. This is not the case for the relevant methods, as they modify the arrays shape. --- numpy/__init__.pyi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 837db4cd2e23..fadda4aab23f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1729,15 +1729,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): ) -> None: ... def squeeze( - self: _ArraySelf, + self, axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ..., - ) -> _ArraySelf: ... + ) -> ndarray[Any, _DType_co]: ... def swapaxes( - self: _ArraySelf, + self, axis1: SupportsIndex, axis2: SupportsIndex, - ) -> _ArraySelf: ... + ) -> ndarray[Any, _DType_co]: ... @overload def transpose(self: _ArraySelf, __axes: _ShapeLike) -> _ArraySelf: ... @@ -1753,11 +1753,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): ) -> ndarray: ... def diagonal( - self: _ArraySelf, + self, offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - ) -> _ArraySelf: ... + ) -> ndarray[Any, _DType_co]: ... @overload def dot(self, b: ArrayLike, out: None = ...) -> ndarray: ... From d18083bbf97e9b8cf353a046517d08686b8aaf03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marco=20Aur=C3=A9lio=20da=20Costa?= Date: Thu, 13 May 2021 14:43:33 -0300 Subject: [PATCH 1160/1270] BUG: Update coordinates in PyArray_ITER_GOTO1D (#18987) * Update coordinates on PyArray_ITER_GOTO1D * BUG: update base iterator coordinates on PyArrayNeighborhoodIterObject creation * TST: Implemented test for PyArray_ITER_GOTO1D * Clarify reason for calling PyArray_ITER_GOTO1D in comment line Closes gh-18971 --- numpy/core/include/numpy/ndarraytypes.h | 2 ++ numpy/core/src/multiarray/_multiarray_tests.c.src | 15 ++++++++++++--- numpy/core/src/multiarray/iterators.c | 3 +++ numpy/core/tests/test_multiarray.py | 7 +++++++ 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index dacb720226bb..0f24d814f0fe 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1225,6 +1225,8 @@ struct PyArrayIterObject_tag { _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ __npy_i++) { \ + _PyAIT(it)->coordinates[__npy_i] = \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ _PyAIT(it)->dataptr += \ (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ * _PyAIT(it)->strides[__npy_i]; \ diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index febcc8512750..b2c6d1461b71 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -87,7 +87,7 @@ static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni * For each point in itx, copy the current neighborhood into an array which * is appended at the output list */ - for (i = 0; i < itx->size; ++i) { + for (i = itx->index; i < itx->size; ++i) { PyArrayNeighborhoodIter_Reset(niterx); for (j = 0; j < PyArray_NDIM(itx->ao); ++j) { @@ -130,7 +130,7 @@ static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni * For each point in itx, copy the current neighborhood into an array which * is appended at the output list */ - for (i = 0; i < itx->size; ++i) { + for (i = itx->index; i < itx->size; ++i) { PyArrayNeighborhoodIter_Reset(niterx); for (j = 0; j < PyArray_NDIM(itx->ao); ++j) { @@ -161,10 +161,11 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args) PyArrayObject *ax, *afill; PyArrayIterObject *itx; int i, typenum, mode, st; + Py_ssize_t idxstart = 0; npy_intp bounds[NPY_MAXDIMS*2]; PyArrayNeighborhoodIterObject *niterx; - if (!PyArg_ParseTuple(args, "OOOi", &x, &b, &fill, &mode)) { + if (!PyArg_ParseTuple(args, "OOOi|n", &x, &b, &fill, &mode, &idxstart)) { return NULL; } @@ -224,6 +225,14 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args) } } + if (idxstart >= itx->size || idxstart < 0) { + PyErr_SetString(PyExc_ValueError, + "start index not compatible with x input"); + goto clean_itx; + } + + PyArray_ITER_GOTO1D((PyArrayIterObject*)itx, idxstart); + niterx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( (PyArrayIterObject*)itx, bounds, mode, afill); if (niterx == NULL) { diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 3ebd4c858974..4a42d742e821 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -1772,6 +1772,9 @@ PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds, */ x->contiguous = 0; + /* update the coordinates if x was contiguous before */ + PyArray_ITER_GOTO1D(x, x->index); + PyArrayNeighborhoodIter_Reset(ret); return (PyObject*)ret; diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 5c91cb9ea534..6df5e8467a10 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -6946,6 +6946,13 @@ def test_simple2d(self, dt): x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) assert_array_equal(l, r) + # Test with start in the middle + r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2) + assert_array_equal(l, r) + def test_mirror2d(self, dt): x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), From fb4ea911607a81ef6633b857122e34229075705e Mon Sep 17 00:00:00 2001 From: Marta Lemanczyk Date: Thu, 13 May 2021 19:48:38 +0200 Subject: [PATCH 1161/1270] DOC: Improve description of array scalar in glossary (#18965) Distinction of array scalars and 0-dimension arrays. See issue #17744. Co-authored-by: Jasmin Classen <39028086+jasmincl@users.noreply.github.com> --- doc/source/glossary.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index 57e3bcf9229d..aa2dc13dff1f 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -166,9 +166,10 @@ Glossary array scalar - For uniformity in handling operands, NumPy treats - a :doc:`scalar ` as an array of zero - dimension. + An :doc:`array scalar ` is an instance of the types/classes float32, float64, + etc.. For uniformity in handling operands, NumPy treats a scalar as + an array of zero dimension. In contrast, a 0-dimensional array is an :doc:`ndarray ` instance + containing precisely one value. axis From 8aaee9fe3015a07c20cc0db8d45ba424b892fc96 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 12 May 2021 21:54:57 +0200 Subject: [PATCH 1162/1270] ENH: Add dtype support to 7 `ndarray`/`generic` methods * `flatten` * `reshape` * `ravel` * `searchsorted` * `nonzero` * `dot` * `argpartition` --- numpy/__init__.pyi | 73 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 16 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fadda4aab23f..5c552e9018b5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -42,6 +42,7 @@ from numpy.typing import ( _ComplexLike_co, _TD64Like_co, _NumberLike_co, + _ScalarLike_co, # `number` precision NBitBase, @@ -1239,19 +1240,9 @@ class _ArrayOrScalarCommon: def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... def dump(self, file: str) -> None: ... def dumps(self) -> bytes: ... - def flatten(self, order: _OrderKACF = ...) -> ndarray: ... def getfield( self: _ArraySelf, dtype: DTypeLike, offset: int = ... ) -> _ArraySelf: ... - def ravel(self, order: _OrderKACF = ...) -> ndarray: ... - @overload - def reshape( - self, __shape: _ShapeLike, *, order: _OrderACF = ... - ) -> ndarray: ... - @overload - def reshape( - self, *shape: SupportsIndex, order: _OrderACF = ... - ) -> ndarray: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... # NOTE: `tostring()` is deprecated and therefore excluded # def tostring(self, order=...): ... @@ -1750,7 +1741,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis: Optional[SupportsIndex] = ..., kind: _PartitionKind = ..., order: Union[None, str, Sequence[str]] = ..., - ) -> ndarray: ... + ) -> ndarray[Any, dtype[intp]]: ... def diagonal( self, @@ -1759,13 +1750,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis2: SupportsIndex = ..., ) -> ndarray[Any, _DType_co]: ... + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: ArrayLike, out: None = ...) -> ndarray: ... + def dot(self, b: _ScalarLike_co, out: None = ...) -> ndarray: ... @overload - def dot(self, b: ArrayLike, out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + @overload + def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ... # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> Tuple[ndarray, ...]: ... + def nonzero(self) -> Tuple[ndarray[Any, dtype[intp]], ...]: ... def partition( self, @@ -1784,12 +1779,20 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): mode: _ModeKind = ..., ) -> None: ... + @overload + def searchsorted( # type: ignore[misc] + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + side: _SortSide = ..., + sorter: Optional[_ArrayLikeInt_co] = ..., + ) -> intp: ... + @overload def searchsorted( self, # >= 1D array v: ArrayLike, side: _SortSide = ..., - sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array - ) -> ndarray: ... + sorter: Optional[_ArrayLikeInt_co] = ..., + ) -> ndarray[Any, dtype[intp]]: ... def setfield( self, @@ -1855,6 +1858,25 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis: Optional[SupportsIndex] = ..., ) -> ndarray[Any, _DType_co]: ... + def flatten( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + def ravel( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + @overload + def reshape( + self, __shape: _ShapeLike, *, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + @overload + def reshape( + self, *shape: SupportsIndex, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + def __int__(self) -> int: ... def __float__(self) -> float: ... @@ -2854,6 +2876,25 @@ class generic(_ArrayOrScalarCommon): axis: Optional[SupportsIndex] = ..., ) -> ndarray[Any, dtype[_ScalarType]]: ... + def flatten( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + + def ravel( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + + @overload + def reshape( + self: _ScalarType, __shape: _ShapeLike, *, order: _OrderACF = ... + ) -> ndarray[Any, dtype[_ScalarType]]: ... + @overload + def reshape( + self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ... + ) -> ndarray[Any, dtype[_ScalarType]]: ... + def squeeze( self: _ScalarType, axis: Union[Literal[0], Tuple[()]] = ... ) -> _ScalarType: ... From a1a9ffe96fd4b83135c9898c1e096e3fe2193fb6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 13 May 2021 14:55:16 +0200 Subject: [PATCH 1163/1270] ENH: Add dtype-support to 4 `ndarray` magic methods * `__int__` * `__float__` * `__complex__` * `__index__` --- numpy/__init__.pyi | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 5c552e9018b5..610343dc7f6a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1877,13 +1877,22 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): self, *shape: SupportsIndex, order: _OrderACF = ... ) -> ndarray[Any, _DType_co]: ... - def __int__(self) -> int: ... + # Dispatch to the underlying `generic` via protocols + def __int__( + self: ndarray[Any, dtype[SupportsInt]], # type: ignore[type-var] + ) -> int: ... - def __float__(self) -> float: ... + def __float__( + self: ndarray[Any, dtype[SupportsFloat]], # type: ignore[type-var] + ) -> float: ... - def __complex__(self) -> complex: ... + def __complex__( + self: ndarray[Any, dtype[SupportsComplex]], # type: ignore[type-var] + ) -> complex: ... - def __index__(self) -> int: ... + def __index__( + self: ndarray[Any, dtype[SupportsIndex]], # type: ignore[type-var] + ) -> int: ... def __len__(self) -> int: ... def __setitem__(self, key, value): ... @@ -2987,6 +2996,11 @@ class object_(generic): def real(self: _ArraySelf) -> _ArraySelf: ... @property def imag(self: _ArraySelf) -> _ArraySelf: ... + # The 3 protocols below may or may not raise, + # depending on the underlying object + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... object0 = object_ From cc7d3c41d2757079e06e88e7ed0b6f909f323c00 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 13 May 2021 19:57:38 +0200 Subject: [PATCH 1164/1270] DOC: Add a note about `np.timedelta64`, units and scalar conversion --- numpy/__init__.pyi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 610343dc7f6a..13010ad1292b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3125,6 +3125,9 @@ class timedelta64(generic): __value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ..., __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ..., ) -> None: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` def __int__(self) -> int: ... def __float__(self) -> float: ... def __complex__(self) -> complex: ... From d71e1e33d38aecefab0d00d395c75acd66c9744c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 13 May 2021 19:58:06 +0200 Subject: [PATCH 1165/1270] TST: Add typing tests for the newly (re-)annotated `ndarray`/`generic` methods --- numpy/typing/tests/data/fail/ndarray_misc.py | 9 ++ numpy/typing/tests/data/pass/ndarray_misc.py | 21 +++++ numpy/typing/tests/data/pass/scalars.py | 62 ++++++++++--- .../typing/tests/data/reveal/ndarray_misc.py | 34 ++++++- numpy/typing/tests/data/reveal/scalars.py | 93 ++++++++++++------- 5 files changed, 168 insertions(+), 51 deletions(-) diff --git a/numpy/typing/tests/data/fail/ndarray_misc.py b/numpy/typing/tests/data/fail/ndarray_misc.py index 1e1496bfecca..653b9267b32a 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.py +++ b/numpy/typing/tests/data/fail/ndarray_misc.py @@ -6,9 +6,13 @@ """ +from typing import Any import numpy as np f8: np.float64 +AR_f8: np.ndarray[Any, np.dtype[np.float64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] +AR_b: np.ndarray[Any, np.dtype[np.bool_]] f8.argpartition(0) # E: has no attribute f8.diagonal() # E: has no attribute @@ -19,3 +23,8 @@ f8.setfield(2, np.float64) # E: has no attribute f8.sort() # E: has no attribute f8.trace() # E: has no attribute + +AR_M.__int__() # E: Invalid self argument +AR_M.__float__() # E: Invalid self argument +AR_M.__complex__() # E: Invalid self argument +AR_b.__index__() # E: Invalid self argument diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index ba10ce974506..62024603c949 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -8,7 +8,9 @@ from __future__ import annotations +import operator from typing import cast, Any + import numpy as np class SubClass(np.ndarray): ... @@ -162,3 +164,22 @@ class SubClass(np.ndarray): ... A.item(0) C.item(0) + +A.ravel() +C.ravel() + +A.flatten() +C.flatten() + +A.reshape(1) +C.reshape(3) + +int(np.array(1.0, dtype=np.float64)) +int(np.array("1", dtype=np.str_)) + +float(np.array(1.0, dtype=np.float64)) +float(np.array("1", dtype=np.str_)) + +complex(np.array(1.0, dtype=np.float64)) + +operator.index(np.array(1, dtype=np.int64)) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index f7965e1c54f6..b258db49fd7c 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -4,6 +4,14 @@ import pytest import numpy as np +b = np.bool_() +u8 = np.uint64() +i8 = np.int64() +f8 = np.float64() +c16 = np.complex128() +U = np.str_() +S = np.bytes_() + # Construction class D: @@ -205,18 +213,42 @@ def __float__(self) -> float: np.clongfloat() np.longcomplex() -np.bool_().item() -np.int_().item() -np.uint64().item() -np.float32().item() -np.complex128().item() -np.str_().item() -np.bytes_().item() - -np.bool_().tolist() -np.int_().tolist() -np.uint64().tolist() -np.float32().tolist() -np.complex128().tolist() -np.str_().tolist() -np.bytes_().tolist() +b.item() +i8.item() +u8.item() +f8.item() +c16.item() +U.item() +S.item() + +b.tolist() +i8.tolist() +u8.tolist() +f8.tolist() +c16.tolist() +U.tolist() +S.tolist() + +b.ravel() +i8.ravel() +u8.ravel() +f8.ravel() +c16.ravel() +U.ravel() +S.ravel() + +b.flatten() +i8.flatten() +u8.flatten() +f8.flatten() +c16.flatten() +U.flatten() +S.flatten() + +b.reshape(1) +i8.reshape(1) +u8.reshape(1) +f8.reshape(1) +c16.reshape(1) +U.reshape(1) +S.reshape(1) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index 8c45eb6f3cda..ecc322251564 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -6,14 +6,17 @@ """ +import operator from typing import Any + import numpy as np class SubClass(np.ndarray): ... f8: np.float64 -AR_f8: np.ndarray[Any, np.dtype[np.float64]] B: SubClass +AR_f8: np.ndarray[Any, np.dtype[np.float64]] +AR_i8: np.ndarray[Any, np.dtype[np.int64]] AR_U: np.ndarray[Any, np.dtype[np.str_]] reveal_type(f8.all()) # E: numpy.bool_ @@ -137,19 +140,40 @@ class SubClass(np.ndarray): ... reveal_type(AR_f8.var(keepdims=True)) # E: Any reveal_type(AR_f8.var(out=B)) # E: SubClass -reveal_type(AR_f8.argpartition([0])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.argpartition([0])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] reveal_type(AR_f8.diagonal()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(AR_f8.dot(1)) # E: Any +reveal_type(AR_f8.dot(1)) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.dot([1])) # E: Any reveal_type(AR_f8.dot(1, out=B)) # E: SubClass -reveal_type(AR_f8.nonzero()) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(AR_f8.nonzero()) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]] -reveal_type(AR_f8.searchsorted([1])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.searchsorted(1)) # E: {intp} +reveal_type(AR_f8.searchsorted([1])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] reveal_type(AR_f8.trace()) # E: Any reveal_type(AR_f8.trace(out=B)) # E: SubClass reveal_type(AR_f8.item()) # E: float reveal_type(AR_U.item()) # E: str + +reveal_type(AR_f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(AR_f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(AR_f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(int(AR_f8)) # E: int +reveal_type(int(AR_U)) # E: int + +reveal_type(float(AR_f8)) # E: float +reveal_type(float(AR_U)) # E: float + +reveal_type(complex(AR_f8)) # E: complex + +reveal_type(operator.index(AR_i8)) # E: int diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py index f50c46c43d50..d98388422e07 100644 --- a/numpy/typing/tests/data/reveal/scalars.py +++ b/numpy/typing/tests/data/reveal/scalars.py @@ -1,28 +1,35 @@ import numpy as np -x = np.complex64(3 + 2j) +b: np.bool_ +u8: np.uint64 +i8: np.int64 +f8: np.float64 +c8: np.complex64 +c16: np.complex128 +U: np.str_ +S: np.bytes_ -reveal_type(x.real) # E: {float32} -reveal_type(x.imag) # E: {float32} +reveal_type(c8.real) # E: {float32} +reveal_type(c8.imag) # E: {float32} -reveal_type(x.real.real) # E: {float32} -reveal_type(x.real.imag) # E: {float32} +reveal_type(c8.real.real) # E: {float32} +reveal_type(c8.real.imag) # E: {float32} -reveal_type(x.itemsize) # E: int -reveal_type(x.shape) # E: Tuple[] -reveal_type(x.strides) # E: Tuple[] +reveal_type(c8.itemsize) # E: int +reveal_type(c8.shape) # E: Tuple[] +reveal_type(c8.strides) # E: Tuple[] -reveal_type(x.ndim) # E: Literal[0] -reveal_type(x.size) # E: Literal[1] +reveal_type(c8.ndim) # E: Literal[0] +reveal_type(c8.size) # E: Literal[1] -reveal_type(x.squeeze()) # E: {complex64} -reveal_type(x.byteswap()) # E: {complex64} -reveal_type(x.transpose()) # E: {complex64} +reveal_type(c8.squeeze()) # E: {complex64} +reveal_type(c8.byteswap()) # E: {complex64} +reveal_type(c8.transpose()) # E: {complex64} -reveal_type(x.dtype) # E: numpy.dtype[{complex64}] +reveal_type(c8.dtype) # E: numpy.dtype[{complex64}] -reveal_type(np.complex64().real) # E: {float32} -reveal_type(np.complex128().imag) # E: {float64} +reveal_type(c8.real) # E: {float32} +reveal_type(c16.imag) # E: {float64} reveal_type(np.unicode_('foo')) # E: numpy.str_ reveal_type(np.str0('foo')) # E: numpy.str_ @@ -68,18 +75,42 @@ reveal_type(np.clongfloat()) # E: {clongdouble} reveal_type(np.longcomplex()) # E: {clongdouble} -reveal_type(np.bool_().item()) # E: bool -reveal_type(np.int_().item()) # E: int -reveal_type(np.uint64().item()) # E: int -reveal_type(np.float32().item()) # E: float -reveal_type(np.complex128().item()) # E: complex -reveal_type(np.str_().item()) # E: str -reveal_type(np.bytes_().item()) # E: bytes - -reveal_type(np.bool_().tolist()) # E: bool -reveal_type(np.int_().tolist()) # E: int -reveal_type(np.uint64().tolist()) # E: int -reveal_type(np.float32().tolist()) # E: float -reveal_type(np.complex128().tolist()) # E: complex -reveal_type(np.str_().tolist()) # E: str -reveal_type(np.bytes_().tolist()) # E: bytes +reveal_type(b.item()) # E: bool +reveal_type(i8.item()) # E: int +reveal_type(u8.item()) # E: int +reveal_type(f8.item()) # E: float +reveal_type(c16.item()) # E: complex +reveal_type(U.item()) # E: str +reveal_type(S.item()) # E: bytes + +reveal_type(b.tolist()) # E: bool +reveal_type(i8.tolist()) # E: int +reveal_type(u8.tolist()) # E: int +reveal_type(f8.tolist()) # E: float +reveal_type(c16.tolist()) # E: complex +reveal_type(U.tolist()) # E: str +reveal_type(S.tolist()) # E: bytes + +reveal_type(b.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(u8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]] +reveal_type(f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(c16.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] +reveal_type(U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(S.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(b.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(u8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]] +reveal_type(f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(c16.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] +reveal_type(U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(S.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(b.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(u8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]] +reveal_type(f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(c16.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] +reveal_type(U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(S.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] From 853dba482e6e71d3763a005b70dfd95d8857dddc Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Thu, 13 May 2021 13:45:23 -0700 Subject: [PATCH 1166/1270] BUG: fix potential use of null pointer --- numpy/core/src/multiarray/nditer_constr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 2197fe798be5..a0154e4744f6 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -594,8 +594,10 @@ NpyIter_Copy(NpyIter *iter) if (buffers[iop] == NULL) { out_of_memory = 1; } - if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { - memset(buffers[iop], '\0', itemsize*buffersize); + else { + if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { + memset(buffers[iop], '\0', itemsize*buffersize); + } } } } From be34216364b4199a2ffebec9d61ecf7bf542d021 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Thu, 13 May 2021 14:00:05 -0700 Subject: [PATCH 1167/1270] BUG: fix variable misprint --- numpy/core/src/multiarray/_multiarray_tests.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index b2c6d1461b71..a63d91e59d97 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -416,7 +416,7 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args) niterx2 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( (PyArrayIterObject*)niterx1, bounds, mode2, NULL); - if (niterx1 == NULL) { + if (niterx2 == NULL) { goto clean_niterx1; } From 52e9a51fa0b1290c265a6e33eccf6beafca26c87 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Thu, 13 May 2021 14:05:21 -0700 Subject: [PATCH 1168/1270] BUG: fix variable misprint --- numpy/core/src/umath/_umath_tests.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index 7cc74a4f35ed..2e79d377e3d4 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -532,7 +532,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args) } if (f->core_dim_ixs != NULL) { core_dim_ixs = PyTuple_New(core_num_ixs); - if (core_num_dims == NULL) { + if (core_dim_ixs == NULL) { goto fail; } for (i = 0; i < core_num_ixs; i++) { From a8b20ec40d624549cbb0cba77cc53e39f2741835 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Thu, 13 May 2021 20:15:10 -0700 Subject: [PATCH 1169/1270] BUG: fix ValueError in PyArray_Std on win_amd64 --- numpy/core/src/multiarray/calculation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index 7308c6b714ec..de67b35b53d6 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -392,7 +392,7 @@ __New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, else { val = PyArray_DIM(arrnew,i); } - PyTuple_SET_ITEM(newshape, i, PyLong_FromLong((long)val)); + PyTuple_SET_ITEM(newshape, i, PyLong_FromSsize_t(val)); } arr2 = (PyArrayObject *)PyArray_Reshape(arr1, newshape); Py_DECREF(arr1); From b0e37849a62255158bff16bbd4f5f90ab1aef638 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 13 May 2021 15:55:46 -0700 Subject: [PATCH 1170/1270] MAINT: Small cleanups in `PyArray_NewFromDescr_int` I admit, the start of this was that I noticed that `memcpy` is using a significant chunk of time in the array creation. `memcpy` is typically optimized for fairly large copies, and seems to be just slow here, replacing it gives a ~10% boost in array creation. We can get another small boost by skipping the contiguity flag reset. But mostly, I think this cleans up the code a bit :). --- numpy/core/src/multiarray/ctors.c | 127 ++++++++++++++---------------- 1 file changed, 61 insertions(+), 66 deletions(-) diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 7907fb93046a..671ce49e4187 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -665,13 +665,11 @@ PyArray_NewFromDescr_int( int allow_emptystring) { PyArrayObject_fields *fa; - int i; npy_intp nbytes; - if ((unsigned int)nd > (unsigned int)NPY_MAXDIMS) { + if (nd > NPY_MAXDIMS || nd < 0) { PyErr_Format(PyExc_ValueError, - "number of dimensions must be within [0, %d]", - NPY_MAXDIMS); + "number of dimensions must be within [0, %d]", NPY_MAXDIMS); Py_DECREF(descr); return NULL; } @@ -718,39 +716,6 @@ PyArray_NewFromDescr_int( } } - /* Check dimensions and multiply them to nbytes */ - for (i = 0; i < nd; i++) { - npy_intp dim = dims[i]; - - if (dim == 0) { - /* - * Compare to PyArray_OverflowMultiplyList that - * returns 0 in this case. - */ - continue; - } - - if (dim < 0) { - PyErr_SetString(PyExc_ValueError, - "negative dimensions are not allowed"); - Py_DECREF(descr); - return NULL; - } - - /* - * Care needs to be taken to avoid integer overflow when - * multiplying the dimensions together to get the total size of the - * array. - */ - if (npy_mul_with_overflow_intp(&nbytes, nbytes, dim)) { - PyErr_SetString(PyExc_ValueError, - "array is too big; `arr.size * arr.dtype.itemsize` " - "is larger than the maximum possible size."); - Py_DECREF(descr); - return NULL; - } - } - fa = (PyArrayObject_fields *) subtype->tp_alloc(subtype, 0); if (fa == NULL) { Py_DECREF(descr); @@ -786,26 +751,57 @@ PyArray_NewFromDescr_int( goto fail; } fa->strides = fa->dimensions + nd; - if (nd) { - memcpy(fa->dimensions, dims, sizeof(npy_intp)*nd); + + /* Copy dimensions, check them, and find total array size `nbytes` */ + for (int i = 0; i < nd; i++) { + fa->dimensions[i] = dims[i]; + + if (fa->dimensions[i] == 0) { + /* + * Compare to PyArray_OverflowMultiplyList that + * returns 0 in this case. + */ + continue; + } + + if (fa->dimensions[i] < 0) { + PyErr_SetString(PyExc_ValueError, + "negative dimensions are not allowed"); + goto fail; + } + + /* + * Care needs to be taken to avoid integer overflow when multiplying + * the dimensions together to get the total size of the array. + */ + if (npy_mul_with_overflow_intp(&nbytes, nbytes, fa->dimensions[i])) { + PyErr_SetString(PyExc_ValueError, + "array is too big; `arr.size * arr.dtype.itemsize` " + "is larger than the maximum possible size."); + goto fail; + } } - if (strides == NULL) { /* fill it in */ + + /* Fill the strides (or copy them if they were passed in) */ + if (strides == NULL) { + /* fill the strides and set the contiguity flags */ _array_fill_strides(fa->strides, dims, nd, descr->elsize, flags, &(fa->flags)); } else { - /* - * we allow strides even when we create - * the memory, but be careful with this... - */ - if (nd) { - memcpy(fa->strides, strides, sizeof(npy_intp)*nd); + /* User to provided strides (user is responsible for correctness) */ + for (int i = 0; i < nd; i++) { + fa->strides[i] = strides[i]; } + /* Since the strides were passed in must update contiguity */ + PyArray_UpdateFlags((PyArrayObject *)fa, + NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS); } } else { - fa->dimensions = fa->strides = NULL; - fa->flags |= NPY_ARRAY_F_CONTIGUOUS; + fa->dimensions = NULL; + fa->strides = NULL; + fa->flags |= NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS; } if (data == NULL) { @@ -844,12 +840,11 @@ PyArray_NewFromDescr_int( fa->data = data; /* - * always update the flags to get the right CONTIGUOUS, ALIGN properties - * not owned data and input strides may not be aligned and on some - * platforms (debian sparc) malloc does not provide enough alignment for - * long double types + * Always update the aligned flag. Not owned data or input strides may + * not be aligned. Also on some platforms (debian sparc) malloc does not + * provide enough alignment for long double types. */ - PyArray_UpdateFlags((PyArrayObject *)fa, NPY_ARRAY_UPDATE_ALL); + PyArray_UpdateFlags((PyArrayObject *)fa, NPY_ARRAY_ALIGNED); /* Set the base object. It's important to do it here so that * __array_finalize__ below receives it @@ -862,15 +857,20 @@ PyArray_NewFromDescr_int( } /* - * call the __array_finalize__ - * method if a subtype. - * If obj is NULL, then call method with Py_None + * call the __array_finalize__ method if a subtype was requested. + * If obj is NULL use Py_None for the Python callback. */ - if ((subtype != &PyArray_Type)) { - PyObject *res, *func, *args; + if (subtype != &PyArray_Type) { + PyObject *res, *func; func = PyObject_GetAttr((PyObject *)fa, npy_ma_str_array_finalize); - if (func && func != Py_None) { + if (func == NULL) { + goto fail; + } + else if (func == Py_None) { + Py_DECREF(func); + } + else { if (PyCapsule_CheckExact(func)) { /* A C-function is stored here */ PyArray_FinalizeFunc *cfunc; @@ -884,14 +884,10 @@ PyArray_NewFromDescr_int( } } else { - args = PyTuple_New(1); if (obj == NULL) { - obj=Py_None; + obj = Py_None; } - Py_INCREF(obj); - PyTuple_SET_ITEM(args, 0, obj); - res = PyObject_Call(func, args, NULL); - Py_DECREF(args); + res = PyObject_CallFunctionObjArgs(func, obj, NULL); Py_DECREF(func); if (res == NULL) { goto fail; @@ -901,7 +897,6 @@ PyArray_NewFromDescr_int( } } } - else Py_XDECREF(func); } return (PyObject *)fa; From 7e2b82db684bc6c6ad4ea7774fb538abee8e635f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 13 May 2021 20:59:28 -0700 Subject: [PATCH 1171/1270] TST: Test error when `obj.__array_finalize__` errors When the getattr call failed, the error was not propagated correctly --- numpy/core/tests/test_multiarray.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 6df5e8467a10..25dd76256663 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -5982,6 +5982,7 @@ def __array_finalize__(self, obj): res = dat.var(1) assert_(res.info == dat.info) + class TestVdot: def test_basic(self): dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] @@ -8707,6 +8708,15 @@ def __array_finalize__(self, obj): a = np.array(1).view(SavesBase) assert_(a.saved_base is a.base) + def test_bad_finalize(self): + class BadAttributeArray(np.ndarray): + @property + def __array_finalize__(self): + raise RuntimeError("boohoo!") + + with pytest.raises(RuntimeError, match="boohoo!"): + np.arange(10).view(BadAttributeArray) + def test_lifetime_on_error(self): # gh-11237 class RaisesInFinalize(np.ndarray): From 9ae45756b7651f6e5c85b8a84612e6bc12011b61 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 14 May 2021 17:05:05 +0200 Subject: [PATCH 1172/1270] Revert "BUG: Update coordinates in PyArray_ITER_GOTO1D (#18987)" This reverts commit d18083bbf97e9b8cf353a046517d08686b8aaf03. --- numpy/core/include/numpy/ndarraytypes.h | 2 -- numpy/core/src/multiarray/_multiarray_tests.c.src | 15 +++------------ numpy/core/src/multiarray/iterators.c | 3 --- numpy/core/tests/test_multiarray.py | 7 ------- 4 files changed, 3 insertions(+), 24 deletions(-) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 0f24d814f0fe..dacb720226bb 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1225,8 +1225,6 @@ struct PyArrayIterObject_tag { _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ __npy_i++) { \ - _PyAIT(it)->coordinates[__npy_i] = \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ _PyAIT(it)->dataptr += \ (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ * _PyAIT(it)->strides[__npy_i]; \ diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index a63d91e59d97..ba10573d9888 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -87,7 +87,7 @@ static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni * For each point in itx, copy the current neighborhood into an array which * is appended at the output list */ - for (i = itx->index; i < itx->size; ++i) { + for (i = 0; i < itx->size; ++i) { PyArrayNeighborhoodIter_Reset(niterx); for (j = 0; j < PyArray_NDIM(itx->ao); ++j) { @@ -130,7 +130,7 @@ static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni * For each point in itx, copy the current neighborhood into an array which * is appended at the output list */ - for (i = itx->index; i < itx->size; ++i) { + for (i = 0; i < itx->size; ++i) { PyArrayNeighborhoodIter_Reset(niterx); for (j = 0; j < PyArray_NDIM(itx->ao); ++j) { @@ -161,11 +161,10 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args) PyArrayObject *ax, *afill; PyArrayIterObject *itx; int i, typenum, mode, st; - Py_ssize_t idxstart = 0; npy_intp bounds[NPY_MAXDIMS*2]; PyArrayNeighborhoodIterObject *niterx; - if (!PyArg_ParseTuple(args, "OOOi|n", &x, &b, &fill, &mode, &idxstart)) { + if (!PyArg_ParseTuple(args, "OOOi", &x, &b, &fill, &mode)) { return NULL; } @@ -225,14 +224,6 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args) } } - if (idxstart >= itx->size || idxstart < 0) { - PyErr_SetString(PyExc_ValueError, - "start index not compatible with x input"); - goto clean_itx; - } - - PyArray_ITER_GOTO1D((PyArrayIterObject*)itx, idxstart); - niterx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( (PyArrayIterObject*)itx, bounds, mode, afill); if (niterx == NULL) { diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 4a42d742e821..3ebd4c858974 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -1772,9 +1772,6 @@ PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds, */ x->contiguous = 0; - /* update the coordinates if x was contiguous before */ - PyArray_ITER_GOTO1D(x, x->index); - PyArrayNeighborhoodIter_Reset(ret); return (PyObject*)ret; diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 6df5e8467a10..5c91cb9ea534 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -6946,13 +6946,6 @@ def test_simple2d(self, dt): x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) assert_array_equal(l, r) - # Test with start in the middle - r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), - np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2) - assert_array_equal(l, r) - def test_mirror2d(self, dt): x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), From 7e6bcfc00e840a5b894a029f73160fc873fc7931 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 14 May 2021 09:30:00 -0700 Subject: [PATCH 1173/1270] Apply suggestions from code review Co-authored-by: Matti Picus --- numpy/core/include/numpy/ndarraytypes.h | 4 ++-- numpy/core/src/multiarray/common_dtype.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 14e3adfdbb8f..d1acfdf26235 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -858,8 +858,8 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); #if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD /* - * Dual use the ENSUREARRAY flag, to indicate that this was a converted - * python float, int, or complex. + * Dual use of the ENSUREARRAY flag, to indicate that this was converted + * from a python float, int, or complex. * An array using this flag must be a temporary array that can never * leave the C internals of NumPy. Even if it does, ENSUREARRAY is * absolutely safe to abuse, since it already is a base class array :). diff --git a/numpy/core/src/multiarray/common_dtype.c b/numpy/core/src/multiarray/common_dtype.c index 0db368df1e1f..a88085f6f084 100644 --- a/numpy/core/src/multiarray/common_dtype.c +++ b/numpy/core/src/multiarray/common_dtype.c @@ -16,7 +16,7 @@ * operations. This is unfortunately surprisingly complicated to get right * due to the value based logic NumPy uses and the fact that NumPy has * no clear (non-transitive) type promotion hierarchy. - * Unlike most languages `int32 + float2 -> float64` instead of `float2`. + * Unlike most languages `int32 + float32 -> float64` instead of `float32`. * The other complicated thing is value-based-promotion, which means that * in many cases a Python 1, may end up as an `int8` or `uint8`. * From 23dad3aae724fc748dc19d0f6501d5795f87583a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 14 May 2021 12:51:52 -0700 Subject: [PATCH 1174/1270] TST: Add complex-complex promotions to test parametrization --- numpy/core/tests/test_dtype.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 3625c2f62961..01c865d59883 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1102,6 +1102,13 @@ class TestPromotion: (np.longdouble(np.nextafter(1.7e308, 0.)), np.complex128), # Additionally use "nextafter" so the cast can't round down: (np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble), + # repeat for complex scalars: + (np.complex64(2), np.complex64), + (np.clongdouble(2), np.complex64), + # Base of the double value to sidestep any rounding issues: + (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.complex128), + # Additionally use "nextafter" so the cast can't round down: + (np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble), ]) def test_complex_other_value_based(self, other, expected): # This would change if we modify the value based promotion @@ -1120,6 +1127,9 @@ def test_complex_other_value_based(self, other, expected): (np.float32, np.complex64), (np.float64, np.complex128), (np.longdouble, np.clongdouble), + (np.complex64, np.complex64), + (np.complex128, np.complex128), + (np.clongdouble, np.clongdouble), ]) def test_complex_scalar_value_based(self, other, expected): # This would change if we modify the value based promotion From a527b5a3245a7ef9136ff7a4f46304e1550a33cf Mon Sep 17 00:00:00 2001 From: 8bitmp3 <19637339+8bitmp3@users.noreply.github.com> Date: Sat, 15 May 2021 17:22:58 +0100 Subject: [PATCH 1175/1270] Fix "NumPy" <- "numpy" in NumPy Fundamentals - Indexing --- doc/source/user/basics.indexing.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst index 9545bb78c5a5..7ee61b13007f 100644 --- a/doc/source/user/basics.indexing.rst +++ b/doc/source/user/basics.indexing.rst @@ -11,7 +11,7 @@ Indexing :ref:`Indexing routines ` Array indexing refers to any use of the square brackets ([]) to index -array values. There are many options to indexing, which give numpy +array values. There are many options to indexing, which give NumPy indexing great power, but with power comes some complexity and the potential for confusion. This section is just an overview of the various options and issues related to indexing. Aside from single @@ -39,7 +39,7 @@ and accepts negative indices for indexing from the end of the array. :: >>> x[-2] 8 -Unlike lists and tuples, numpy arrays support multidimensional indexing +Unlike lists and tuples, NumPy arrays support multidimensional indexing for multidimensional arrays. That means that it is not necessary to separate each dimension's index into its own set of square brackets. :: @@ -285,7 +285,7 @@ with four True elements to select rows from a 3-D array of shape [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]) -For further details, consult the numpy reference documentation on array indexing. +For further details, consult the NumPy reference documentation on array indexing. Combining index arrays with slices ================================== From ad9f4709130d21bc3ff80a31086b821c6abbb668 Mon Sep 17 00:00:00 2001 From: HowJmay Date: Sun, 16 May 2021 22:22:02 +0800 Subject: [PATCH 1176/1270] DOC: Add comment for ifdef macro guard --- numpy/core/src/common/simd/avx2/math.h | 2 +- numpy/core/src/common/simd/avx512/math.h | 2 +- numpy/core/src/common/simd/neon/math.h | 2 +- numpy/core/src/common/simd/sse/math.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/math.h b/numpy/core/src/common/simd/avx2/math.h index 22659e21b1c0..9460183df5bb 100644 --- a/numpy/core/src/common/simd/avx2/math.h +++ b/numpy/core/src/common/simd/avx2/math.h @@ -105,4 +105,4 @@ NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b) return _mm256_blendv_epi8(a, b, _mm256_cmpgt_epi64(a, b)); } -#endif +#endif // _NPY_SIMD_AVX2_MATH_H diff --git a/numpy/core/src/common/simd/avx512/math.h b/numpy/core/src/common/simd/avx512/math.h index b75651962675..0141396d06a3 100644 --- a/numpy/core/src/common/simd/avx512/math.h +++ b/numpy/core/src/common/simd/avx512/math.h @@ -112,4 +112,4 @@ NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) #define npyv_min_u64 _mm512_min_epu64 #define npyv_min_s64 _mm512_min_epi64 -#endif +#endif // _NPY_SIMD_AVX512_MATH_H diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h index a5508c96b78d..ced82d1de65f 100644 --- a/numpy/core/src/common/simd/neon/math.h +++ b/numpy/core/src/common/simd/neon/math.h @@ -153,4 +153,4 @@ NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b) return vbslq_s64(npyv_cmplt_s64(a, b), a, b); } -#endif // _NPY_SIMD_SSE_MATH_H +#endif // _NPY_SIMD_NEON_MATH_H diff --git a/numpy/core/src/common/simd/sse/math.h b/numpy/core/src/common/simd/sse/math.h index 1f82b546f276..97d35afc5e04 100644 --- a/numpy/core/src/common/simd/sse/math.h +++ b/numpy/core/src/common/simd/sse/math.h @@ -143,4 +143,4 @@ NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b) return npyv_select_s64(npyv_cmplt_s64(a, b), a, b); } -#endif +#endif // _NPY_SIMD_SSE_MATH_H From 848b296c43be65a5d4fa3bd3c171bbcb35fd0a00 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 16 May 2021 09:19:50 -0700 Subject: [PATCH 1177/1270] TST: Add test for rational paths (also delete those lines I forgot to delete) --- numpy/core/src/multiarray/array_coercion.c | 2 +- numpy/core/src/multiarray/array_coercion.h | 3 --- numpy/core/tests/test_dtype.py | 21 +++++++++++++++++++++ 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 0c4195d51f4b..550e4813a0b8 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -205,7 +205,7 @@ _PyArray_MapPyTypeToDType( * @param pytype Python Type to look up * @return DType, None if it a known non-scalar, or NULL if an unknown object. */ -NPY_NO_EXPORT NPY_INLINE PyArray_DTypeMeta * +static NPY_INLINE PyArray_DTypeMeta * npy_discover_dtype_from_pytype(PyTypeObject *pytype) { PyObject *DType; diff --git a/numpy/core/src/multiarray/array_coercion.h b/numpy/core/src/multiarray/array_coercion.h index d4ebeaf14ef9..c5ccad225549 100644 --- a/numpy/core/src/multiarray/array_coercion.h +++ b/numpy/core/src/multiarray/array_coercion.h @@ -15,9 +15,6 @@ typedef struct coercion_cache_obj { int depth; /* the dimension at which this object was found. */ } coercion_cache_obj; -NPY_NO_EXPORT PyArray_DTypeMeta * -npy_discover_dtype_from_pytype(PyTypeObject *pytype); - NPY_NO_EXPORT int _PyArray_MapPyTypeToDType( PyArray_DTypeMeta *DType, PyTypeObject *pytype, npy_bool userdef); diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 01c865d59883..b248d015b545 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1141,6 +1141,27 @@ def test_complex_scalar_value_based(self, other, expected): res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype assert res == expected + def test_complex_pyscalar_promote_rational(self): + with pytest.raises(TypeError, + match=r".* do not have a common DType"): + np.result_type(1j, rational) + + with pytest.raises(TypeError, + match=r".* no common DType exists for the given inputs"): + np.result_type(1j, rational(1, 2)) + + @pytest.mark.parametrize(["other", "expected"], + [(1, rational), (1., np.float64)]) + def test_float_int_pyscalar_promote_rational(self, other, expected): + # Note that rationals are a bit akward as they promote with float64 + # or default ints, but not float16 or uint8/int8 (which looks + # inconsistent here) + with pytest.raises(TypeError, + match=r".* do not have a common DType"): + np.result_type(other, rational) + + assert np.result_type(other, rational(1, 2)) == expected + @pytest.mark.parametrize(["dtypes", "expected"], [([np.uint16, np.int16, np.float16], np.float32), ([np.uint16, np.int8, np.float16], np.float32), From 2b7b2561f3c6c6fb0c80fed4ee503647636f14bc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 16 May 2021 15:26:51 -0700 Subject: [PATCH 1178/1270] TST: Add more tests to cover abstract code paths (hopefully) --- numpy/core/tests/test_dtype.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index b248d015b545..8a6b7dcd5f95 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1162,10 +1162,21 @@ def test_float_int_pyscalar_promote_rational(self, other, expected): assert np.result_type(other, rational(1, 2)) == expected - @pytest.mark.parametrize(["dtypes", "expected"], - [([np.uint16, np.int16, np.float16], np.float32), + @pytest.mark.parametrize(["dtypes", "expected"], [ + # These promotions are not associative/commutative: + ([np.uint16, np.int16, np.float16], np.float32), ([np.uint16, np.int8, np.float16], np.float32), - ([np.uint8, np.int16, np.float16], np.float32)]) + ([np.uint8, np.int16, np.float16], np.float32), + # The following promotions are not ambiguous, but cover code + # paths of abstract promotion (no particular logic being tested) + ([1, 1, np.float64], np.float64), + ([1, 1., np.complex128], np.complex128), + ([1, 1j, np.float64], np.complex128), + ([1., 1., np.int64], np.float64), + ([1., 1j, np.float64], np.complex128), + ([1j, 1j, np.float64], np.complex128), + ([1, True, np.bool_], np.int_), + ]) def test_permutations_do_not_influence_result(self, dtypes, expected): # Tests that most permutations do not influence the result. In the # above some uint and int combintations promote to a larger integer From 99bb6a48ced27e5ce7bbf1fc7b606c5bd840af3a Mon Sep 17 00:00:00 2001 From: Atharva-Vidwans <57441520+Atharva-Vidwans@users.noreply.github.com> Date: Mon, 17 May 2021 08:03:23 +0530 Subject: [PATCH 1179/1270] DOC: Improve cumsum documentation (#18895) Add note and example potential differences with np.sum. Co-authored-by: Matti Picus --- numpy/core/fromnumeric.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 5c7b3372b0f4..65a42eb1ee72 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2498,6 +2498,10 @@ def cumsum(a, axis=None, dtype=None, out=None): Arithmetic is modular when using integer types, and no error is raised on overflow. + ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point + values since ``sum`` may use a pairwise summation routine, reducing + the roundoff-error. See `sum` for more information. + Examples -------- >>> a = np.array([[1,2,3], [4,5,6]]) @@ -2516,6 +2520,14 @@ def cumsum(a, axis=None, dtype=None, out=None): array([[ 1, 3, 6], [ 4, 9, 15]]) + ``cumsum(b)[-1]`` may not be equal to ``sum(b)`` + + >>> b = np.array([1, 2e-9, 3e-9] * 1000000) + >>> b.cumsum()[-1] + 1000000.0050045159 + >>> b.sum() + 1000000.0050000029 + """ return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) From cbd1960a1f8d29f323e45a30e72b9c08c332e5d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 06:41:16 +0000 Subject: [PATCH 1180/1270] MAINT: Bump pytest-cov from 2.11.1 to 2.12.0 Bumps [pytest-cov](https://github.com/pytest-dev/pytest-cov) from 2.11.1 to 2.12.0. - [Release notes](https://github.com/pytest-dev/pytest-cov/releases) - [Changelog](https://github.com/pytest-dev/pytest-cov/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest-cov/compare/v2.11.1...v2.12.0) Signed-off-by: dependabot[bot] --- test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_requirements.txt b/test_requirements.txt index bba27a7b1912..e23ec0333299 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -4,7 +4,7 @@ setuptools<49.2.0 hypothesis==6.12.0 pytest==6.2.4 pytz==2021.1 -pytest-cov==2.11.1 +pytest-cov==2.12.0 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending cffi From 3ee6573d8f775a35b83be4199bd6fe996d05dbd0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 06:41:38 +0000 Subject: [PATCH 1181/1270] MAINT: Bump sphinx from 4.0.0 to 4.0.1 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 4.0.0 to 4.0.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v4.0.0...v4.0.1) Signed-off-by: dependabot[bot] --- doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 2606f6270dd3..3403668549b5 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,4 +1,4 @@ -sphinx==4.0.0 +sphinx==4.0.1 numpydoc==1.1.0 ipython scipy From 72cc4dbf336dd5d1db11821110279f5ff8e320a8 Mon Sep 17 00:00:00 2001 From: zoj <44142765+zoj613@users.noreply.github.com> Date: Mon, 17 May 2021 17:22:54 +0200 Subject: [PATCH 1182/1270] DOC: Clarify minimum numpy version needed to use random c-api --- doc/source/reference/random/c-api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/random/c-api.rst b/doc/source/reference/random/c-api.rst index 7c7996115331..15a8947ebc94 100644 --- a/doc/source/reference/random/c-api.rst +++ b/doc/source/reference/random/c-api.rst @@ -3,7 +3,7 @@ C API for random .. currentmodule:: numpy.random -Access to various distributions below is available via Cython or C-wrapper +Since version ``1.19.0``, access to various distributions below is available via Cython or C-wrapper libraries like CFFI. All the functions accept a :c:type:`bitgen_t` as their first argument. To access these from Cython or C, you must link with the ``npyrandom`` library which is part of the NumPy distribution, located in From b129f292c58e36bbb4d9d393087e9236d01cf51a Mon Sep 17 00:00:00 2001 From: zoj <44142765+zoj613@users.noreply.github.com> Date: Mon, 17 May 2021 17:45:47 +0200 Subject: [PATCH 1183/1270] DOC: Use versionadded tag instead of inline comment to specify minimum version needed for random c-api --- doc/source/reference/random/c-api.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/random/c-api.rst b/doc/source/reference/random/c-api.rst index 15a8947ebc94..de403ce98673 100644 --- a/doc/source/reference/random/c-api.rst +++ b/doc/source/reference/random/c-api.rst @@ -3,7 +3,9 @@ C API for random .. currentmodule:: numpy.random -Since version ``1.19.0``, access to various distributions below is available via Cython or C-wrapper +.. versionadded:: 1.19.0 + +Access to various distributions below is available via Cython or C-wrapper libraries like CFFI. All the functions accept a :c:type:`bitgen_t` as their first argument. To access these from Cython or C, you must link with the ``npyrandom`` library which is part of the NumPy distribution, located in From 39110f34ebc5e445ebea59833e621e4404c38be4 Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Tue, 18 May 2021 22:53:29 +0200 Subject: [PATCH 1184/1270] DEP: Deprecate 4 `ndarray.ctypes` methods (#19031) * DEP: Deprecate 4 `ndarray.ctypes` methods * `get_data` * `get_shape` * `get_strides` * `get_as_parameter` * TST: Add deprecation tests for 4 `ndarray.ctypes` methods * DOC: Add a release note for the `ndarray.ctypes` method deprecation * MAINT: Deprecate via `__getattr__` instead of `__getattribute__ Co-Authored-By: Sebastian Berg * MAINT: Deprecate the methods via proper function definitions Co-Authored-By: Eric Wieser <425260+eric-wieser@users.noreply.github.com> * DOC: Added a missing "the" * DOC: typo fix: property -> properties Co-Authored-By: Eric Wieser <425260+eric-wieser@users.noreply.github.com> Co-authored-by: Sebastian Berg Co-authored-by: Eric Wieser <425260+eric-wieser@users.noreply.github.com> --- .../upcoming_changes/19031.deprecation.rst | 12 +++++ doc/source/user/misc.rst | 11 ++--- numpy/core/_internal.py | 45 ++++++++++++++++--- numpy/core/tests/test_deprecations.py | 21 ++++++++- 4 files changed, 78 insertions(+), 11 deletions(-) create mode 100644 doc/release/upcoming_changes/19031.deprecation.rst diff --git a/doc/release/upcoming_changes/19031.deprecation.rst b/doc/release/upcoming_changes/19031.deprecation.rst new file mode 100644 index 000000000000..de92e18dfd45 --- /dev/null +++ b/doc/release/upcoming_changes/19031.deprecation.rst @@ -0,0 +1,12 @@ +Four `ndarray.ctypes` methods have been deprecated +-------------------------------------------------- +Four methods of the `ndarray.ctypes` object have been deprecated, +as they are (undocumentated) implementation artifacts of their respective +properties. + +The methods in question are: + +* ``_ctypes.get_data`` (use ``_ctypes.data`` instead) +* ``_ctypes.get_shape`` (use ``_ctypes.shape`` instead) +* ``_ctypes.get_strides`` (use ``_ctypes.strides`` instead) +* ``_ctypes.get_as_parameter`` (use ``_ctypes._as_parameter_`` instead) diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index 031ce4efac9a..f0a7f5e4c896 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -149,11 +149,12 @@ Only a survey of the choices. Little detail on how each works. - good numpy support: arrays have all these in their ctypes attribute: :: - a.ctypes.data a.ctypes.get_strides - a.ctypes.data_as a.ctypes.shape - a.ctypes.get_as_parameter a.ctypes.shape_as - a.ctypes.get_data a.ctypes.strides - a.ctypes.get_shape a.ctypes.strides_as + a.ctypes.data + a.ctypes.data_as + a.ctypes.shape + a.ctypes.shape_as + a.ctypes.strides + a.ctypes.strides_as - Minuses: diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 449926f586ac..4eebbaea3256 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -8,6 +8,7 @@ import re import sys import platform +import warnings from .multiarray import dtype, array, ndarray try: @@ -350,11 +351,45 @@ def _as_parameter_(self): """ return self.data_as(ctypes.c_void_p) - # kept for compatibility - get_data = data.fget - get_shape = shape.fget - get_strides = strides.fget - get_as_parameter = _as_parameter_.fget + # Numpy 1.21.0, 2021-05-18 + + def get_data(self): + """Deprecated getter for the `_ctypes.data` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_data" is deprecated. Use "data" instead', + DeprecationWarning, stacklevel=2) + return self.data + + def get_shape(self): + """Deprecated getter for the `_ctypes.shape` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_shape" is deprecated. Use "shape" instead', + DeprecationWarning, stacklevel=2) + return self.shape + + def get_strides(self): + """Deprecated getter for the `_ctypes.strides` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_strides" is deprecated. Use "strides" instead', + DeprecationWarning, stacklevel=2) + return self.strides + + def get_as_parameter(self): + """Deprecated getter for the `_ctypes._as_parameter_` property. + + .. deprecated:: 1.21 + """ + warnings.warn( + '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', + DeprecationWarning, stacklevel=2, + ) + return self._as_parameter_ def _newnames(datatype, order): diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index ed168837463f..6eab2505d5dc 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -906,7 +906,7 @@ def test_deprecated(self): class TestNoseDecoratorsDeprecated(_DeprecationTestCase): class DidntSkipException(Exception): pass - + def test_slow(self): def _test_slow(): @np.testing.dec.slow @@ -1172,3 +1172,22 @@ def test_deprecated(self): self.assert_deprecated(lambda: np.equal(1, 1, dtype=object)) self.assert_deprecated( lambda: np.equal(1, 1, sig=(None, None, object))) + + +class TestCtypesGetter(_DeprecationTestCase): + # Deprecated 2021-05-18, Numpy 1.21.0 + warning_cls = DeprecationWarning + ctypes = np.array([1]).ctypes + + @pytest.mark.parametrize( + "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] + ) + def test_deprecated(self, name: str) -> None: + func = getattr(self.ctypes, name) + self.assert_deprecated(lambda: func()) + + @pytest.mark.parametrize( + "name", ["data", "shape", "strides", "_as_parameter_"] + ) + def test_not_deprecated(self, name: str) -> None: + self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) From bdb0e44f931eaedfb45153fd1680f5980b3a459e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 3 Mar 2021 17:18:28 +0100 Subject: [PATCH 1185/1270] ENH: Improve the annotations of `np.core._internal` --- numpy/__init__.pyi | 2 +- numpy/core/_internal.pyi | 45 ++++++++++++++++++++++++++++------------ 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2d23f926de55..f23dee9e2d2a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1668,7 +1668,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... @property - def ctypes(self) -> _ctypes: ... + def ctypes(self) -> _ctypes[int]: ... @property def shape(self) -> _Shape: ... @shape.setter diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi index 1b3889e51cfe..c24fc2ca3cc4 100644 --- a/numpy/core/_internal.pyi +++ b/numpy/core/_internal.pyi @@ -1,18 +1,37 @@ -from typing import Any +from typing import Any, TypeVar, Type, overload, Optional, Generic +import ctypes as ct -# TODO: add better annotations when ctypes is stubbed out +from numpy import ndarray -class _ctypes: +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast` +_CT = TypeVar("_CT", bound=ct._CData) +_PT = TypeVar("_PT", bound=Optional[int]) + +# TODO: Let the likes of `shape_as` and `strides_as` return `None` +# for 0D arrays once we've got shape-support + +class _ctypes(Generic[_PT]): + @overload + def __new__(cls, array: ndarray[Any, Any], ptr: None = ...) -> _ctypes[None]: ... + @overload + def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]: ... + + # NOTE: In practice `shape` and `strides` return one of the concrete + # platform dependant c_int64-based array-types (`c_int`, `c_long` or + # `c_longlong`) @property - def data(self) -> int: ... + def data(self) -> _PT: ... @property - def shape(self) -> Any: ... + def shape(self) -> ct.Array[ct.c_int64]: ... @property - def strides(self) -> Any: ... - def data_as(self, obj: Any) -> Any: ... - def shape_as(self, obj: Any) -> Any: ... - def strides_as(self, obj: Any) -> Any: ... - def get_data(self) -> int: ... - def get_shape(self) -> Any: ... - def get_strides(self) -> Any: ... - def get_as_parameter(self) -> Any: ... + def strides(self) -> ct.Array[ct.c_int64]: ... + @property + def _as_parameter_(self) -> ct.c_void_p: ... + + def data_as(self, obj: Type[_CastT]) -> _CastT: ... + def shape_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... + def get_data(self) -> _PT: ... + def get_shape(self) -> ct.Array[ct.c_int64]: ... + def get_strides(self) -> ct.Array[ct.c_int64]: ... + def get_as_parameter(self) -> ct.c_void_p: ... From e8f6438ecb0b3803b03e18f96cbf67c626823405 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 18 May 2021 14:07:54 +0200 Subject: [PATCH 1186/1270] TST: Add typing tests for `np.core._internal._ctypes` --- numpy/typing/tests/data/reveal/ndarray_misc.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index ecc322251564..1e91240060ce 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -7,6 +7,7 @@ """ import operator +import ctypes as ct from typing import Any import numpy as np @@ -19,6 +20,22 @@ class SubClass(np.ndarray): ... AR_i8: np.ndarray[Any, np.dtype[np.int64]] AR_U: np.ndarray[Any, np.dtype[np.str_]] +ctypes_obj = AR_f8.ctypes + +reveal_type(ctypes_obj.data) # E: int +reveal_type(ctypes_obj.shape) # E: ctypes.Array[ctypes.c_int64] +reveal_type(ctypes_obj.strides) # E: ctypes.Array[ctypes.c_int64] +reveal_type(ctypes_obj._as_parameter_) # E: ctypes.c_void_p + +reveal_type(ctypes_obj.get_data()) # E: int +reveal_type(ctypes_obj.get_shape()) # E: ctypes.Array[ctypes.c_int64] +reveal_type(ctypes_obj.get_strides()) # E: ctypes.Array[ctypes.c_int64] +reveal_type(ctypes_obj.get_as_parameter()) # E: ctypes.c_void_p + +reveal_type(ctypes_obj.data_as(ct.c_void_p)) # E: ctypes.c_void_p +reveal_type(ctypes_obj.shape_as(ct.c_longlong)) # E: ctypes.Array[ctypes.c_longlong] +reveal_type(ctypes_obj.strides_as(ct.c_ubyte)) # E: ctypes.Array[ctypes.c_ubyte] + reveal_type(f8.all()) # E: numpy.bool_ reveal_type(AR_f8.all()) # E: numpy.bool_ reveal_type(AR_f8.all(axis=0)) # E: Any From 829e04c8aefedccab39727b22d5c5cb31deceff6 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 18 May 2021 16:54:40 +0200 Subject: [PATCH 1187/1270] DOC: Clarify a comment related to `int_ptr_t` Co-Authored-By: Eric Wieser <425260+eric-wieser@users.noreply.github.com> --- numpy/core/_internal.pyi | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi index c24fc2ca3cc4..6c449b1dcd93 100644 --- a/numpy/core/_internal.pyi +++ b/numpy/core/_internal.pyi @@ -17,8 +17,10 @@ class _ctypes(Generic[_PT]): def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]: ... # NOTE: In practice `shape` and `strides` return one of the concrete - # platform dependant c_int64-based array-types (`c_int`, `c_long` or - # `c_longlong`) + # platform dependant array-types (`c_int`, `c_long` or `c_longlong`) + # corresponding to C's `int_ptr_t`, as determined by `_getintp_ctype` + # TODO: Hook this in to the mypy plugin so that a more appropiate + # `ctypes._SimpleCData[int]` sub-type can be returned @property def data(self) -> _PT: ... @property From b4d1d3525040f2db7ad6d0be4fde73d4dc9a8590 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 18 May 2021 22:56:41 +0200 Subject: [PATCH 1188/1270] DEP: Remove annotations for 4 deprecated methods --- numpy/core/_internal.pyi | 4 ---- numpy/typing/tests/data/fail/ndarray_misc.py | 7 +++++++ numpy/typing/tests/data/reveal/ndarray_misc.py | 5 ----- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi index 6c449b1dcd93..1ef1c9fa1564 100644 --- a/numpy/core/_internal.pyi +++ b/numpy/core/_internal.pyi @@ -33,7 +33,3 @@ class _ctypes(Generic[_PT]): def data_as(self, obj: Type[_CastT]) -> _CastT: ... def shape_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... def strides_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... - def get_data(self) -> _PT: ... - def get_shape(self) -> ct.Array[ct.c_int64]: ... - def get_strides(self) -> ct.Array[ct.c_int64]: ... - def get_as_parameter(self) -> ct.c_void_p: ... diff --git a/numpy/typing/tests/data/fail/ndarray_misc.py b/numpy/typing/tests/data/fail/ndarray_misc.py index 653b9267b32a..cf3fedc45f2c 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.py +++ b/numpy/typing/tests/data/fail/ndarray_misc.py @@ -14,6 +14,13 @@ AR_M: np.ndarray[Any, np.dtype[np.datetime64]] AR_b: np.ndarray[Any, np.dtype[np.bool_]] +ctypes_obj = AR_f8.ctypes + +reveal_type(ctypes_obj.get_data()) # E: has no attribute +reveal_type(ctypes_obj.get_shape()) # E: has no attribute +reveal_type(ctypes_obj.get_strides()) # E: has no attribute +reveal_type(ctypes_obj.get_as_parameter()) # E: has no attribute + f8.argpartition(0) # E: has no attribute f8.diagonal() # E: has no attribute f8.dot(1) # E: has no attribute diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py index 1e91240060ce..ea01b7aa4e43 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -27,11 +27,6 @@ class SubClass(np.ndarray): ... reveal_type(ctypes_obj.strides) # E: ctypes.Array[ctypes.c_int64] reveal_type(ctypes_obj._as_parameter_) # E: ctypes.c_void_p -reveal_type(ctypes_obj.get_data()) # E: int -reveal_type(ctypes_obj.get_shape()) # E: ctypes.Array[ctypes.c_int64] -reveal_type(ctypes_obj.get_strides()) # E: ctypes.Array[ctypes.c_int64] -reveal_type(ctypes_obj.get_as_parameter()) # E: ctypes.c_void_p - reveal_type(ctypes_obj.data_as(ct.c_void_p)) # E: ctypes.c_void_p reveal_type(ctypes_obj.shape_as(ct.c_longlong)) # E: ctypes.Array[ctypes.c_longlong] reveal_type(ctypes_obj.strides_as(ct.c_ubyte)) # E: ctypes.Array[ctypes.c_ubyte] From aab61472c0208345ca20e3d9b1d3797e657687b8 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Wed, 19 May 2021 11:00:07 +1200 Subject: [PATCH 1189/1270] MAINT: Python3 classes do not need to inherit from object --- doc/neps/nep-0013-ufunc-overrides.rst | 4 ++-- numpy/core/tests/test_cpu_features.py | 2 +- numpy/core/tests/test_regression.py | 2 +- numpy/core/tests/test_umath.py | 4 ++-- numpy/core/tests/test_umath_complex.py | 6 +++--- numpy/distutils/ccompiler_opt.py | 2 +- numpy/distutils/tests/test_ccompiler_opt.py | 2 +- tools/travis-sorter.py | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/doc/neps/nep-0013-ufunc-overrides.rst b/doc/neps/nep-0013-ufunc-overrides.rst index 4a647e9d380c..ceb8b23e9587 100644 --- a/doc/neps/nep-0013-ufunc-overrides.rst +++ b/doc/neps/nep-0013-ufunc-overrides.rst @@ -478,7 +478,7 @@ are not compatible, i.e., implementations should be something like:: except AttributeError: return False - class ArrayLike(object): + class ArrayLike: ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... @@ -516,7 +516,7 @@ does not know how to deal with arrays and ufuncs, and thus has set ``__array_ufunc__`` to :obj:`None`, but does know how to do multiplication:: - class MyObject(object): + class MyObject: __array_ufunc__ = None def __init__(self, value): self.value = value diff --git a/numpy/core/tests/test_cpu_features.py b/numpy/core/tests/test_cpu_features.py index bafa5a05fa27..6ef8a02c0e02 100644 --- a/numpy/core/tests/test_cpu_features.py +++ b/numpy/core/tests/test_cpu_features.py @@ -48,7 +48,7 @@ def assert_features_equal(actual, desired, fname): "%s" ) % (fname, actual, desired, error_report)) -class AbstractTest(object): +class AbstractTest: features = [] features_groups = {} features_map = {} diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 25198bba9ea1..dbfb75c9a897 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2462,7 +2462,7 @@ class T: np.array([T()]) def test_2d__array__shape(self): - class T(object): + class T: def __array__(self): return np.ndarray(shape=(0,0)) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 572e736a0774..345aeb6c022a 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -2203,7 +2203,7 @@ def __array_prepare__(self, arr, context=None): def test_array_too_many_args(self): - class A(object): + class A: def __array__(self, dtype, context): return np.zeros(1) @@ -3220,7 +3220,7 @@ def __new__(subtype, shape): assert_equal(a+a, a) -class TestFrompyfunc(object): +class TestFrompyfunc: def test_identity(self): def mul(a, b): diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py index 90a349da1e05..c051cd61b660 100644 --- a/numpy/core/tests/test_umath_complex.py +++ b/numpy/core/tests/test_umath_complex.py @@ -541,7 +541,7 @@ def check_complex_value(f, x1, y1, x2, y2, exact=True): else: assert_almost_equal(f(z1), z2) -class TestSpecialComplexAVX(object): +class TestSpecialComplexAVX: @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) def test_array(self, stride, astype): @@ -568,7 +568,7 @@ def test_array(self, stride, astype): with np.errstate(invalid='ignore'): assert_equal(np.square(arr[::stride]), sq_true[::stride]) -class TestComplexAbsoluteAVX(object): +class TestComplexAbsoluteAVX: @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19]) @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) @@ -579,7 +579,7 @@ def test_array(self, arraysize, stride, astype): assert_equal(np.abs(arr[::stride]), abs_true[::stride]) # Testcase taken as is from https://github.com/numpy/numpy/issues/16660 -class TestComplexAbsoluteMixedDTypes(object): +class TestComplexAbsoluteMixedDTypes: @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 316d3a338142..ae1e6a180625 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -850,7 +850,7 @@ def cache_wrap_me(self, *args, **kwargs): return ccb return cache_wrap_me -class _CCompiler(object): +class _CCompiler: """A helper class for `CCompilerOpt` containing all utilities that related to the fundamental compiler's functions. diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py index d2b0a4c581ad..9c54ed66bbd9 100644 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ b/numpy/distutils/tests/test_ccompiler_opt.py @@ -73,7 +73,7 @@ def dist_info(self): def dist_log(*args, stderr=False): pass -class _Test_CCompilerOpt(object): +class _Test_CCompilerOpt: arch = None # x86_64 cc = None # gcc diff --git a/tools/travis-sorter.py b/tools/travis-sorter.py index c13204f7e018..416f9fe761d0 100755 --- a/tools/travis-sorter.py +++ b/tools/travis-sorter.py @@ -103,7 +103,7 @@ def summarise(jobs): print(" " + "-" * end) -class Job(object): +class Job: def __init__(self, length): global count self.id = count From f9cf25e601489dcc3c758e9f2ca630523128faa6 Mon Sep 17 00:00:00 2001 From: Daniel Evans Date: Wed, 19 May 2021 14:53:24 +0100 Subject: [PATCH 1190/1270] Update 17727.performance.rst DOC: Fix typo in release notes for v1.21 --- doc/release/upcoming_changes/17727.performance.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/17727.performance.rst b/doc/release/upcoming_changes/17727.performance.rst index 5dd23b28572e..55ab7752baef 100755 --- a/doc/release/upcoming_changes/17727.performance.rst +++ b/doc/release/upcoming_changes/17727.performance.rst @@ -1,7 +1,7 @@ Improved performance in integer division of NumPy arrays -------------------------------------------------------- Integer division of NumPy arrays now uses `libdivide ` -when the divisor is a constant. With the usage of libdivde and +when the divisor is a constant. With the usage of libdivide and other minor optimizations, there is a large speedup. The ``//`` operator and ``np.floor_divide`` makes use of the new changes. From dbed464aa31069a90637a540cc464e6a59feec30 Mon Sep 17 00:00:00 2001 From: Bhargav v <12525622+brpy@users.noreply.github.com> Date: Wed, 19 May 2021 20:45:09 +0530 Subject: [PATCH 1191/1270] DOC: Improve trapz docstring (#19041) Co-authored-by: Eric Wieser --- numpy/lib/function_base.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 0bb41c270c26..9ed7700d2877 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4089,11 +4089,18 @@ def _trapz_dispatcher(y, x=None, dx=None, axis=None): @array_function_dispatch(_trapz_dispatcher) def trapz(y, x=None, dx=1.0, axis=-1): - """ + r""" Integrate along the given axis using the composite trapezoidal rule. - Integrate `y` (`x`) along given axis. - + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + Parameters ---------- y : array_like @@ -4143,6 +4150,20 @@ def trapz(y, x=None, dx=1.0, axis=-1): 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 + + Using a decreasing `x` corresponds to integrating in reverse: + + >>> np.trapz([1,2,3], x=[8,6,4]) + -8.0 + + More generally `x` is used to integrate along a parametric curve. + This finds the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> np.trapz(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], @@ -4151,7 +4172,6 @@ def trapz(y, x=None, dx=1.0, axis=-1): array([1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([2., 8.]) - """ y = asanyarray(y) if x is None: From 9cf54f170accf0b06bc55306c5cc9ae9947b477a Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 19 May 2021 07:27:29 +0300 Subject: [PATCH 1192/1270] BUG: use PyArray_IntTupleFromIntp which handles 'long long' correctly --- numpy/core/src/multiarray/nditer_pywrap.c | 25 ++++------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index 5ac0f8442e01..7698ae43d07e 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -1513,8 +1513,7 @@ npyiter_next(NewNpyArrayIterObject *self) static PyObject *npyiter_shape_get(NewNpyArrayIterObject *self) { - PyObject *ret; - npy_intp idim, ndim, shape[NPY_MAXDIMS]; + npy_intp ndim, shape[NPY_MAXDIMS]; if (self->iter == NULL || self->finished) { PyErr_SetString(PyExc_ValueError, @@ -1524,14 +1523,7 @@ static PyObject *npyiter_shape_get(NewNpyArrayIterObject *self) if (NpyIter_GetShape(self->iter, shape) == NPY_SUCCEED) { ndim = NpyIter_GetNDim(self->iter); - ret = PyTuple_New(ndim); - if (ret != NULL) { - for (idim = 0; idim < ndim; ++idim) { - PyTuple_SET_ITEM(ret, idim, - PyLong_FromLong(shape[idim])); - } - return ret; - } + return PyArray_IntTupleFromIntp(ndim, shape); } return NULL; @@ -1539,8 +1531,7 @@ static PyObject *npyiter_shape_get(NewNpyArrayIterObject *self) static PyObject *npyiter_multi_index_get(NewNpyArrayIterObject *self) { - PyObject *ret; - npy_intp idim, ndim, multi_index[NPY_MAXDIMS]; + npy_intp ndim, multi_index[NPY_MAXDIMS]; if (self->iter == NULL || self->finished) { PyErr_SetString(PyExc_ValueError, @@ -1551,15 +1542,7 @@ static PyObject *npyiter_multi_index_get(NewNpyArrayIterObject *self) if (self->get_multi_index != NULL) { ndim = NpyIter_GetNDim(self->iter); self->get_multi_index(self->iter, multi_index); - ret = PyTuple_New(ndim); - if (ret == NULL) { - return NULL; - } - for (idim = 0; idim < ndim; ++idim) { - PyTuple_SET_ITEM(ret, idim, - PyLong_FromLong(multi_index[idim])); - } - return ret; + return PyArray_IntTupleFromIntp(ndim, multi_index); } else { if (!NpyIter_HasMultiIndex(self->iter)) { From 5bc6926d063866731cbe568366137f9e12182648 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 19 May 2021 13:17:21 -0600 Subject: [PATCH 1193/1270] STY: Break long lines --- numpy/lib/function_base.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 83786adfd334..d1d33b321501 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1495,9 +1495,10 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): difference from their predecessor of more than ``max(discont, period/2)`` to their `period`-complementary values. - For the default case where `period` is :math:`2\pi` and is `discont` is :math:`\pi`, - this unwraps a radian phase `p` such that adjacent differences are never - greater than :math:`\pi` by adding :math:`2k\pi` for some integer :math:`k`. + For the default case where `period` is :math:`2\pi` and is `discont` is + :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences + are never greater than :math:`\pi` by adding :math:`2k\pi` for some + integer :math:`k`. Parameters ---------- @@ -1505,12 +1506,14 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): Input array. discont : float, optional Maximum discontinuity between values, default is ``period/2``. - Values below ``period/2`` are treated as if they were ``period/2``. To have an effect - different from the default, `discont` should be larger than ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. + To have an effect different from the default, `discont` should be + larger than ``period/2``. axis : int, optional Axis along which unwrap will operate, default is the last axis. period: float, optional - Size of the range over which the input wraps. By default, it is ``2 pi``. + Size of the range over which the input wraps. By default, it is + ``2 pi``. .. versionadded:: 1.21.0 @@ -1567,9 +1570,11 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): interval_low = -interval_high ddmod = mod(dd - interval_low, period) + interval_low if boundary_ambiguous: - # for `mask = (abs(dd) == period/2)`, the above line made `ddmod[mask] == -period/2`. - # correct these such that `ddmod[mask] == sign(dd[mask])*period/2`. - _nx.copyto(ddmod, interval_high, where=(ddmod == interval_low) & (dd > 0)) + # for `mask = (abs(dd) == period/2)`, the above line made + # `ddmod[mask] == -period/2`. correct these such that + # `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, + where=(ddmod == interval_low) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype=dtype) From 6f205497e01bc68113fc4b5bb7589e56de8d0620 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 19 May 2021 19:24:05 +0200 Subject: [PATCH 1194/1270] TST, SIMD: Improve test cases of integer division --- numpy/core/tests/test_simd.py | 11 +-- numpy/core/tests/test_umath.py | 127 ++++++++++++++++++++++++++------- 2 files changed, 107 insertions(+), 31 deletions(-) diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index 3be28c3bb1dd..ea5bbe103900 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -1,6 +1,7 @@ # NOTE: Please avoid the use of numpy.testing since NPYV intrinsics # may be involved in their functionality. import pytest, math, re +import itertools from numpy.core._simd import targets from numpy.core._multiarray_umath import __cpu_baseline__ @@ -820,8 +821,10 @@ def test_arithmetic_intdiv(self): def trunc_div(a, d): """ Divide towards zero works with large integers > 2^53, - equivalent to int(a/d) + and wrap around overflow similar to what C does. """ + if d == -1 and a == int_min: + return a sign_a, sign_d = a < 0, d < 0 if a == 0 or sign_a == sign_d: return a // d @@ -833,9 +836,9 @@ def trunc_div(a, d): 0, 1, self.nlanes, int_max-self.nlanes, int_min, int_min//2 + 1 ) - divisors = (1, 2, self.nlanes, int_min, int_max, int_max//2) + divisors = (1, 2, 9, 13, self.nlanes, int_min, int_max, int_max//2) - for x, d in zip(rdata, divisors): + for x, d in itertools.product(rdata, divisors): data = self._data(x) vdata = self.load(data) data_divc = [trunc_div(a, d) for a in data] @@ -848,7 +851,7 @@ def trunc_div(a, d): safe_neg = lambda x: -x-1 if -x > int_max else -x # test round divison for signed integers - for x, d in zip(rdata, divisors): + for x, d in itertools.product(rdata, divisors): d_neg = safe_neg(d) data = self._data(x) data_neg = [safe_neg(a) for a in data] diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 345aeb6c022a..9d1b13b53a86 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -5,6 +5,7 @@ import pytest import sys from fractions import Fraction +from functools import reduce import numpy.core.umath as ncu from numpy.core import _umath_tests as ncu_tests @@ -249,43 +250,115 @@ def test_division_int(self): assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) - @pytest.mark.parametrize("input_dtype", - np.sctypes['int'] + np.sctypes['uint']) - def test_division_int_boundary(self, input_dtype): - iinfo = np.iinfo(input_dtype) - - # Unsigned: - # Create list with 0, 25th, 50th, 75th percentile and max - if iinfo.min == 0: - lst = [0, iinfo.max//4, iinfo.max//2, - int(iinfo.max/1.33), iinfo.max] - divisors = [iinfo.max//4, iinfo.max//2, - int(iinfo.max/1.33), iinfo.max] - # Signed: - # Create list with min, 25th percentile, 0, 75th percentile, max - else: - lst = [iinfo.min, iinfo.min//2, 0, iinfo.max//2, iinfo.max] - divisors = [iinfo.min, iinfo.min//2, iinfo.max//2, iinfo.max] - a = np.array(lst, dtype=input_dtype) + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + np.sctypes['int'] + np.sctypes['uint'], ( + ( + # dividend + "np.arange(fo.max-lsize, fo.max, dtype=dtype)," + # divisors + "np.arange(lsize, dtype=dtype)," + # scalar divisors + "range(15)" + ), + ( + # dividend + "np.arange(fo.min, fo.min+lsize, dtype=dtype)," + # divisors + "np.arange(lsize//-2, lsize//2, dtype=dtype)," + # scalar divisors + "range(fo.min, fo.min + 15)" + ), ( + # dividend + "np.arange(fo.max-lsize, fo.max, dtype=dtype)," + # divisors + "np.arange(lsize, dtype=dtype)," + # scalar divisors + "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]" + ) + ) + )) + def test_division_int_boundary(self, dtype, ex_val): + fo = np.iinfo(dtype) + neg = -1 if fo.min < 0 else 1 + # Large enough to test SIMD loops and remaind elements + lsize = 512 + 7 + a, b, divisors = eval(ex_val) + a_lst, b_lst = a.tolist(), b.tolist() + + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n//d + ) + with np.errstate(divide='ignore'): + ac = a.copy() + ac //= b + div_ab = a // b + div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] + + msg = "Integer arrays floor division check (//)" + assert all(div_ab == div_lst), msg + msg_eq = "Integer arrays floor division check (//=)" + assert all(ac == div_lst), msg_eq for divisor in divisors: - div_a = a // divisor - b = a.copy(); b //= divisor - div_lst = [i // divisor for i in lst] + ac = a.copy() + with np.errstate(divide='ignore'): + div_a = a // divisor + ac //= divisor + div_lst = [c_div(i, divisor) for i in a_lst] - msg = "Integer arrays floor division check (//)" assert all(div_a == div_lst), msg - - msg = "Integer arrays floor division check (//=)" - assert all(div_a == b), msg + assert all(ac == div_lst), msg_eq with np.errstate(divide='raise'): + if 0 in b or (fo.min and -1 in b and fo.min in a): + # Verify overflow case + with pytest.raises(FloatingPointError): + a // b + else: + a // b + if fo.min and fo.min in a: + with pytest.raises(FloatingPointError): + a // -1 + elif fo.min: + a // -1 with pytest.raises(FloatingPointError): a // 0 with pytest.raises(FloatingPointError): - a //= 0 + ac = a.copy() + ac //= 0 + + np.array([], dtype=dtype) // 0 + + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + np.sctypes['int'] + np.sctypes['uint'], ( + "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)", + "np.array([fo.min, 1, -2, 1, 1, 2, -3], dtype=dtype)", + "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)", + "np.arange(fo.max-(100*7), fo.max, 7, dtype=dtype)", + ) + )) + def test_division_int_reduce(self, dtype, ex_val): + fo = np.iinfo(dtype) + a = eval(ex_val) + lst = a.tolist() + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n//d + ) + + with np.errstate(divide='ignore'): + div_a = np.floor_divide.reduce(a) + div_lst = reduce(c_div, lst) + msg = "Reduce floor integer division check" + assert div_a == div_lst, msg - np.array([], dtype=input_dtype) // 0 + with np.errstate(divide='raise'): + with pytest.raises(FloatingPointError): + np.floor_divide.reduce(np.arange(-100, 100, dtype=dtype)) + if fo.min: + with pytest.raises(FloatingPointError): + np.floor_divide.reduce( + np.array([fo.min, 1, -1], dtype=dtype) + ) @pytest.mark.parametrize( "dividend,divisor,quotient", From 519ab995e59b33b68ec28ac0c635158f3acc5447 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 19 May 2021 19:25:12 +0200 Subject: [PATCH 1195/1270] BUG, SIMD: Fix unexpected result of uint8 division on X86 The bug can occur in special cases e.g. when the divisor is scalar and equal to 9 or 13 and the dividend is array contains consecutive duplicate values of 233. --- numpy/core/src/common/simd/avx2/arithmetic.h | 8 ++++---- .../core/src/common/simd/avx512/arithmetic.h | 19 ++++++++++--------- numpy/core/src/common/simd/intdiv.h | 4 +++- numpy/core/src/common/simd/sse/arithmetic.h | 8 ++++---- 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h index b1e297988d51..e1b170863a34 100644 --- a/numpy/core/src/common/simd/avx2/arithmetic.h +++ b/numpy/core/src/common/simd/avx2/arithmetic.h @@ -73,16 +73,16 @@ // divide each unsigned 8-bit element by a precomputed divisor NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) { - const __m256i bmask = _mm256_set1_epi32(0xFF00FF00); + const __m256i bmask = _mm256_set1_epi32(0x00FF00FF); const __m128i shf1 = _mm256_castsi256_si128(divisor.val[1]); const __m128i shf2 = _mm256_castsi256_si128(divisor.val[2]); const __m256i shf1b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); const __m256i shf2b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); // high part of unsigned multiplication - __m256i mulhi_odd = _mm256_mulhi_epu16(a, divisor.val[0]); - __m256i mulhi_even = _mm256_mulhi_epu16(_mm256_slli_epi16(a, 8), divisor.val[0]); + __m256i mulhi_even = _mm256_mullo_epi16(_mm256_and_si256(a, bmask), divisor.val[0]); mulhi_even = _mm256_srli_epi16(mulhi_even, 8); - __m256i mulhi = _mm256_blendv_epi8(mulhi_even, mulhi_odd, bmask); + __m256i mulhi_odd = _mm256_mullo_epi16(_mm256_srli_epi16(a, 8), divisor.val[0]); + __m256i mulhi = _mm256_blendv_epi8(mulhi_odd, mulhi_even, bmask); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m256i q = _mm256_sub_epi8(a, mulhi); q = _mm256_and_si256(_mm256_srl_epi16(q, shf1), shf1b); diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h index 8a2790e93b17..f8632e701790 100644 --- a/numpy/core/src/common/simd/avx512/arithmetic.h +++ b/numpy/core/src/common/simd/avx512/arithmetic.h @@ -116,12 +116,13 @@ NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); #ifdef NPY_HAVE_AVX512BW + const __m512i bmask = _mm512_set1_epi32(0x00FF00FF); const __m512i shf1b = _mm512_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); const __m512i shf2b = _mm512_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); // high part of unsigned multiplication - __m512i mulhi_odd = _mm512_mulhi_epu16(a, divisor.val[0]); - __m512i mulhi_even = _mm512_mulhi_epu16(_mm512_slli_epi16(a, 8), divisor.val[0]); + __m512i mulhi_even = _mm512_mullo_epi16(_mm512_and_si512(a, bmask), divisor.val[0]); mulhi_even = _mm512_srli_epi16(mulhi_even, 8); + __m512i mulhi_odd = _mm512_mullo_epi16(_mm512_srli_epi16(a, 8), divisor.val[0]); __m512i mulhi = _mm512_mask_mov_epi8(mulhi_even, 0xAAAAAAAAAAAAAAAA, mulhi_odd); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m512i q = _mm512_sub_epi8(a, mulhi); @@ -130,7 +131,7 @@ NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) q = _mm512_and_si512(_mm512_srl_epi16(q, shf2), shf2b); return q; #else - const __m256i bmask = _mm256_set1_epi32(0xFF00FF00); + const __m256i bmask = _mm256_set1_epi32(0x00FF00FF); const __m256i shf1b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); const __m256i shf2b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); const __m512i shf2bw= npyv512_combine_si256(shf2b, shf2b); @@ -138,10 +139,10 @@ NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) //// lower 256-bit __m256i lo_a = npyv512_lower_si256(a); // high part of unsigned multiplication - __m256i mulhi_odd = _mm256_mulhi_epu16(lo_a, mulc); - __m256i mulhi_even = _mm256_mulhi_epu16(_mm256_slli_epi16(lo_a, 8), mulc); + __m256i mulhi_even = _mm256_mullo_epi16(_mm256_and_si256(lo_a, bmask), mulc); mulhi_even = _mm256_srli_epi16(mulhi_even, 8); - __m256i mulhi = _mm256_blendv_epi8(mulhi_even, mulhi_odd, bmask); + __m256i mulhi_odd = _mm256_mullo_epi16(_mm256_srli_epi16(lo_a, 8), mulc); + __m256i mulhi = _mm256_blendv_epi8(mulhi_odd, mulhi_even, bmask); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m256i lo_q = _mm256_sub_epi8(lo_a, mulhi); lo_q = _mm256_and_si256(_mm256_srl_epi16(lo_q, shf1), shf1b); @@ -151,10 +152,10 @@ NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) //// higher 256-bit __m256i hi_a = npyv512_higher_si256(a); // high part of unsigned multiplication - mulhi_odd = _mm256_mulhi_epu16(hi_a, mulc); - mulhi_even = _mm256_mulhi_epu16(_mm256_slli_epi16(hi_a, 8), mulc); + mulhi_even = _mm256_mullo_epi16(_mm256_and_si256(hi_a, bmask), mulc); mulhi_even = _mm256_srli_epi16(mulhi_even, 8); - mulhi = _mm256_blendv_epi8(mulhi_even, mulhi_odd, bmask); + mulhi_odd = _mm256_mullo_epi16(_mm256_srli_epi16(hi_a, 8), mulc); + mulhi = _mm256_blendv_epi8(mulhi_odd, mulhi_even, bmask); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m256i hi_q = _mm256_sub_epi8(hi_a, mulhi); hi_q = _mm256_and_si256(_mm256_srl_epi16(hi_q, shf1), shf1b); diff --git a/numpy/core/src/common/simd/intdiv.h b/numpy/core/src/common/simd/intdiv.h index a6c293d87f64..1ce3b4df834d 100644 --- a/numpy/core/src/common/simd/intdiv.h +++ b/numpy/core/src/common/simd/intdiv.h @@ -204,14 +204,16 @@ NPY_FINLINE npyv_u8x3 npyv_divisor_u8(npy_uint8 d) sh1 = 1; sh2 = l - 1; // shift counts } npyv_u8x3 divisor; - divisor.val[0] = npyv_setall_u8(m); #ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 + divisor.val[0] = npyv_setall_u16(m); divisor.val[1] = npyv_set_u8(sh1); divisor.val[2] = npyv_set_u8(sh2); #elif defined(NPY_HAVE_VSX2) + divisor.val[0] = npyv_setall_u8(m); divisor.val[1] = npyv_setall_u8(sh1); divisor.val[2] = npyv_setall_u8(sh2); #elif defined(NPY_HAVE_NEON) + divisor.val[0] = npyv_setall_u8(m); divisor.val[1] = npyv_reinterpret_u8_s8(npyv_setall_s8(-sh1)); divisor.val[2] = npyv_reinterpret_u8_s8(npyv_setall_s8(-sh2)); #else diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index 1b02a4107b45..bced35108116 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -92,14 +92,14 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b) // divide each unsigned 8-bit element by a precomputed divisor NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) { - const __m128i bmask = _mm_set1_epi32(0xFF00FF00); + const __m128i bmask = _mm_set1_epi32(0x00FF00FF); const __m128i shf1b = _mm_set1_epi8(0xFFU >> _mm_cvtsi128_si32(divisor.val[1])); const __m128i shf2b = _mm_set1_epi8(0xFFU >> _mm_cvtsi128_si32(divisor.val[2])); // high part of unsigned multiplication - __m128i mulhi_odd = _mm_mulhi_epu16(a, divisor.val[0]); - __m128i mulhi_even = _mm_mulhi_epu16(_mm_slli_epi16(a, 8), divisor.val[0]); + __m128i mulhi_even = _mm_mullo_epi16(_mm_and_si128(a, bmask), divisor.val[0]); + __m128i mulhi_odd = _mm_mullo_epi16(_mm_srli_epi16(a, 8), divisor.val[0]); mulhi_even = _mm_srli_epi16(mulhi_even, 8); - __m128i mulhi = npyv_select_u8(bmask, mulhi_odd, mulhi_even); + __m128i mulhi = npyv_select_u8(bmask, mulhi_even, mulhi_odd); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m128i q = _mm_sub_epi8(a, mulhi); q = _mm_and_si128(_mm_srl_epi16(q, divisor.val[1]), shf1b); From 8bdeaebe0c82590a135d5b815fa41ffda0b9a9dd Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 19 May 2021 13:52:28 -0600 Subject: [PATCH 1196/1270] BUG: Fix missing "np." in docstring examples. --- numpy/lib/function_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index d1d33b321501..651343a2c2e4 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1540,14 +1540,14 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary - >>> unwrap([0, 1, 2, -1, 0], period=4) + >>> np.unwrap([0, 1, 2, -1, 0], period=4) array([0, 1, 2, 3, 4]) - >>> unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) + >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) + >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) array([2, 3, 4, 5, 6, 7, 8, 9]) >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 - >>> unwrap(phase_deg, period=360) + >>> np.unwrap(phase_deg, period=360) array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) From 9ee4ed31de1c582366e44c488047b4e9432682e9 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 19 May 2021 13:26:22 -0700 Subject: [PATCH 1197/1270] BUG: Fix duplicate variable names in compiler check for AVX512_SKX The code that checks for AVX512 SKX intrisics compiler support has a bug which always causes the check to fail and hence disables code that used AVX512_SKX features. Introduced in https://github.com/numpy/numpy/pull/16871/. --- numpy/core/setup_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 378d93c066c4..ed3172da8a24 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -194,7 +194,7 @@ def check_api_version(apiversion, codegen_dir): ('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))', 'attribute_target_avx512_skx_with_intrinsics', '__mmask8 temp = _mm512_fpclass_pd_mask(_mm512_set1_pd(1.0), 0x01);\ - __m512i temp = _mm512_castps_si512(_mm512_set1_ps(1.0));\ + __m512i unused_temp = _mm512_castps_si512(_mm512_set1_ps(1.0));\ _mm_mask_storeu_epi8(NULL, 0xFF, _mm_broadcastmb_epi64(temp))', 'immintrin.h'), ] From 817684ab3a9832b6353fdc898e387a7d827dda9f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 19 May 2021 13:56:50 -0700 Subject: [PATCH 1198/1270] API,BLD: Always force `-ffp-exception-behavior=strict` on clang This also affects all users of numpy distutils --- numpy/distutils/ccompiler.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index f025c8904c75..061f4862dc19 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -384,6 +384,12 @@ def CCompiler_customize_cmd(self, cmd, ignore=()): """ log.info('customize %s using %s' % (self.__class__.__name__, cmd.__class__.__name__)) + + if hasattr(self, 'compiler') and 'clang' in self.compiler[0]: + # clang defaults to a non-strict floating error point model. + # Since NumPy and most Python libs give warnings for these, override: + self.compiler.append('-ffp-exception-behavior=strict') + def allow(attr): return getattr(cmd, attr, None) is not None and attr not in ignore From 290a0345c8a0eb33ba98172267e6d392110fe905 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 19 May 2021 14:27:49 -0700 Subject: [PATCH 1199/1270] DEP: Deprecate error clearing for special method in array-coercion (#19001) * DEP: Deprecate error clearing for special method in array-coercion When looking up `__array__` and `__array_interface__`, previously most errors were ignored (during the attribute lookup for __array__). This deprecates this, in the future only AttributeError (which is ignored in `LookupSpecial` itself) will be ignored. Closes gh-18723 * DOC: Fixups from Eric's review and prefer exception over error * TST: Fixup the error message in the test as well Co-authored-by: Charles Harris --- .../upcoming_changes/19001.deprecation.rst | 8 ++++ numpy/core/src/multiarray/ctors.c | 48 ++++++++++++++++--- numpy/core/tests/test_deprecations.py | 18 +++++++ 3 files changed, 67 insertions(+), 7 deletions(-) create mode 100644 doc/release/upcoming_changes/19001.deprecation.rst diff --git a/doc/release/upcoming_changes/19001.deprecation.rst b/doc/release/upcoming_changes/19001.deprecation.rst new file mode 100644 index 000000000000..48087f8a5e6a --- /dev/null +++ b/doc/release/upcoming_changes/19001.deprecation.rst @@ -0,0 +1,8 @@ +Exceptions will be raised during array-like creation +---------------------------------------------------- +When an object raised an exception during access of the special +attributes ``__array__`` or ``__array_interface__``, this exception +was usually ignored. +A warning is now given when the exception is anything but AttributeError. +To silence the warning, the type raising the exception has to be adapted +to raise an ``AttributeError``. diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 671ce49e4187..ef28d7797926 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2100,6 +2100,39 @@ _is_default_descr(PyObject *descr, PyObject *typestr) { return PyObject_RichCompareBool(typestr, typestr2, Py_EQ); } + +/* + * A helper function to transition away from ignoring errors during + * special attribute lookups during array coercion. + */ +static NPY_INLINE int +deprecated_lookup_error_clearing(PyTypeObject *type, char *attribute) +{ + PyObject *exc_type, *exc_value, *traceback; + PyErr_Fetch(&exc_type, &exc_value, &traceback); + + /* DEPRECATED 2021-05-12, NumPy 1.21. */ + int res = PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "An exception was ignored while fetching the attribute `%s` from " + "an object of type '%s'. With the exception of `AttributeError` " + "NumPy will always raise this exception in the future. Raise this " + "deprecation warning to see the original exception. " + "(Warning added NumPy 1.21)", attribute, type->tp_name); + + if (res < 0) { + npy_PyErr_ChainExceptionsCause(exc_type, exc_value, traceback); + return -1; + } + else { + /* `PyErr_Fetch` cleared the original error, delete the references */ + Py_DECREF(exc_type); + Py_XDECREF(exc_value); + Py_XDECREF(traceback); + return 0; + } +} + + /*NUMPY_API*/ NPY_NO_EXPORT PyObject * PyArray_FromInterface(PyObject *origin) @@ -2124,11 +2157,10 @@ PyArray_FromInterface(PyObject *origin) /* RecursionError and MemoryError are considered fatal */ return NULL; } - /* - * This probably be deprecated, but at least shapely raised - * a NotImplementedError expecting it to be cleared (gh-17965) - */ - PyErr_Clear(); + if (deprecated_lookup_error_clearing( + Py_TYPE(origin), "__array_interface__") < 0) { + return NULL; + } } return Py_NotImplemented; } @@ -2401,8 +2433,10 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) /* RecursionError and MemoryError are considered fatal */ return NULL; } - /* This probably be deprecated. */ - PyErr_Clear(); + if (deprecated_lookup_error_clearing( + Py_TYPE(op), "__array__") < 0) { + return NULL; + } } return Py_NotImplemented; } diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 6eab2505d5dc..ffe0147b2d08 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -1174,6 +1174,24 @@ def test_deprecated(self): lambda: np.equal(1, 1, sig=(None, None, object))) +class TestSpecialAttributeLookupFailure(_DeprecationTestCase): + message = r"An exception was ignored while fetching the attribute" + + class WeirdArrayLike: + @property + def __array__(self): + raise RuntimeError("oops!") + + class WeirdArrayInterface: + @property + def __array_interface__(self): + raise RuntimeError("oops!") + + def test_deprecated(self): + self.assert_deprecated(lambda: np.array(self.WeirdArrayLike())) + self.assert_deprecated(lambda: np.array(self.WeirdArrayInterface())) + + class TestCtypesGetter(_DeprecationTestCase): # Deprecated 2021-05-18, Numpy 1.21.0 warning_cls = DeprecationWarning From b0c0ec10e6eaf93ea392a1641d82cd20eadf5d79 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 19 May 2021 21:29:50 +0200 Subject: [PATCH 1200/1270] BUG, SIMD: Fix NumPy build on ppc64le(IBM/Power) for old versions of GCC(<=6) --- numpy/core/src/common/simd/vsx/arithmetic.h | 2 +- numpy/core/src/common/simd/vsx/operators.h | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h index 123fcaf92d6b..eaca536201fb 100644 --- a/numpy/core/src/common/simd/vsx/arithmetic.h +++ b/numpy/core/src/common/simd/vsx/arithmetic.h @@ -222,7 +222,7 @@ NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) // divide each signed 64-bit element by a precomputed divisor (round towards zero) NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) { - npyv_b64 overflow = vec_and(vec_cmpeq(a, npyv_setall_s64(-1LL << 63)), (npyv_b64)divisor.val[1]); + npyv_b64 overflow = npyv_and_b64(vec_cmpeq(a, npyv_setall_s64(-1LL << 63)), (npyv_b64)divisor.val[1]); npyv_s64 d = vec_sel(divisor.val[0], npyv_setall_s64(1), overflow); return vec_div(a, d); } diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h index 230610129eb9..23c5d0dbe70c 100644 --- a/numpy/core/src/common/simd/vsx/operators.h +++ b/numpy/core/src/common/simd/vsx/operators.h @@ -44,7 +44,16 @@ /*************************** * Logical ***************************/ +#define NPYV_IMPL_VSX_BIN_CAST(INTRIN, SFX, CAST) \ + NPY_FINLINE npyv_##SFX npyv_##INTRIN##_##SFX(npyv_##SFX a, npyv_##SFX b) \ + { return (npyv_##SFX)vec_##INTRIN((CAST)a, (CAST)b); } +// Up to GCC 6 logical intrinsics don't support bool long long +#if defined(__GNUC__) && __GNUC__ <= 6 + #define NPYV_IMPL_VSX_BIN_B64(INTRIN) NPYV_IMPL_VSX_BIN_CAST(INTRIN, b64, npyv_u64) +#else + #define NPYV_IMPL_VSX_BIN_B64(INTRIN) NPYV_IMPL_VSX_BIN_CAST(INTRIN, b64, npyv_b64) +#endif // AND #define npyv_and_u8 vec_and #define npyv_and_s8 vec_and @@ -59,7 +68,7 @@ #define npyv_and_b8 vec_and #define npyv_and_b16 vec_and #define npyv_and_b32 vec_and -#define npyv_and_b64 vec_and +NPYV_IMPL_VSX_BIN_B64(and) // OR #define npyv_or_u8 vec_or @@ -75,7 +84,7 @@ #define npyv_or_b8 vec_or #define npyv_or_b16 vec_or #define npyv_or_b32 vec_or -#define npyv_or_b64 vec_or +NPYV_IMPL_VSX_BIN_B64(or) // XOR #define npyv_xor_u8 vec_xor @@ -91,7 +100,7 @@ #define npyv_xor_b8 vec_xor #define npyv_xor_b16 vec_xor #define npyv_xor_b32 vec_xor -#define npyv_xor_b64 vec_xor +NPYV_IMPL_VSX_BIN_B64(xor) // NOT // note: we implement npyv_not_b*(boolen types) for internal use*/ @@ -141,7 +150,8 @@ NPY_FINLINE npyv_f64 npyv_not_f64(npyv_f64 a) #define npyv_cmpeq_f64 vec_cmpeq // Int Not Equal -#ifdef NPY_HAVE_VSX3 +#if defined(NPY_HAVE_VSX3) && (!defined(__GNUC__) || defined(vec_cmpne)) + // vec_cmpne supported by gcc since version 7 #define npyv_cmpneq_u8 vec_cmpne #define npyv_cmpneq_s8 vec_cmpne #define npyv_cmpneq_u16 vec_cmpne From a44eecac4306504e7d3e6b8253deeb35e6b1fb43 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 19 May 2021 19:26:41 +0200 Subject: [PATCH 1201/1270] BLD: Add `.pyi` data files to the `numpy.typing` sub-package --- numpy/typing/setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/typing/setup.py b/numpy/typing/setup.py index c444e769fb6d..694a756dc5ab 100644 --- a/numpy/typing/setup.py +++ b/numpy/typing/setup.py @@ -3,6 +3,7 @@ def configuration(parent_package='', top_path=None): config = Configuration('typing', parent_package, top_path) config.add_subpackage('tests') config.add_data_dir('tests/data') + config.add_data_files('*.pyi') return config From 8cf69f4f9b60a04492d9bfb756508daa214313ee Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Thu, 20 May 2021 07:10:24 -0700 Subject: [PATCH 1202/1270] Split line --- numpy/core/setup_common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index ed3172da8a24..fe66bd17381b 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -194,7 +194,8 @@ def check_api_version(apiversion, codegen_dir): ('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))', 'attribute_target_avx512_skx_with_intrinsics', '__mmask8 temp = _mm512_fpclass_pd_mask(_mm512_set1_pd(1.0), 0x01);\ - __m512i unused_temp = _mm512_castps_si512(_mm512_set1_ps(1.0));\ + __m512i unused_temp = \ + _mm512_castps_si512(_mm512_set1_ps(1.0));\ _mm_mask_storeu_epi8(NULL, 0xFF, _mm_broadcastmb_epi64(temp))', 'immintrin.h'), ] From 2ab785320c2917cf836233177f691942c374ec04 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 20 May 2021 16:15:49 +0200 Subject: [PATCH 1203/1270] STY: Alias `typing.Literal` as `L` --- numpy/__init__.pyi | 52 +++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2d23f926de55..194483816f33 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -176,9 +176,9 @@ from typing import ( ) if sys.version_info >= (3, 8): - from typing import Literal, Protocol, SupportsIndex, Final + from typing import Literal as L, Protocol, SupportsIndex, Final else: - from typing_extensions import Literal, Protocol, SupportsIndex, Final + from typing_extensions import Literal as L, Protocol, SupportsIndex, Final # Ensures that the stubs are picked up from numpy import ( @@ -896,7 +896,7 @@ def where(__condition, __x, __y): ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) -_ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"] +_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] class dtype(Generic[_DTypeScalar_co]): names: Optional[Tuple[str, ...]] @@ -1057,9 +1057,9 @@ class dtype(Generic[_DTypeScalar_co]): # NOTE: In the future 1-based multiplications will also yield `void` dtypes @overload - def __mul__(self, value: Literal[0]) -> None: ... # type: ignore[misc] + def __mul__(self, value: L[0]) -> None: ... # type: ignore[misc] @overload - def __mul__(self: _DType, value: Literal[1]) -> _DType: ... + def __mul__(self: _DType, value: L[1]) -> _DType: ... @overload def __mul__(self, value: int) -> dtype[void]: ... @@ -1188,14 +1188,14 @@ class flatiter(Generic[_NdArraySubClass]): @overload def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... -_OrderKACF = Optional[Literal["K", "A", "C", "F"]] -_OrderACF = Optional[Literal["A", "C", "F"]] -_OrderCF = Optional[Literal["C", "F"]] +_OrderKACF = Optional[L["K", "A", "C", "F"]] +_OrderACF = Optional[L["A", "C", "F"]] +_OrderCF = Optional[L["C", "F"]] -_ModeKind = Literal["raise", "wrap", "clip"] -_PartitionKind = Literal["introselect"] -_SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] -_SortSide = Literal["left", "right"] +_ModeKind = L["raise", "wrap", "clip"] +_PartitionKind = L["introselect"] +_SortKind = L["quicksort", "mergesort", "heapsort", "stable"] +_SortSide = L["left", "right"] _ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) @@ -1266,7 +1266,7 @@ class _ArrayOrScalarCommon: self, axis: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: L[False] = ..., ) -> bool_: ... @overload def all( @@ -1288,7 +1288,7 @@ class _ArrayOrScalarCommon: self, axis: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: L[False] = ..., ) -> bool_: ... @overload def any( @@ -1627,7 +1627,7 @@ _BufferType = Union[ndarray, bytes, bytearray, memoryview] _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _2Tuple = Tuple[_T, _T] -_Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] +_Casting = L["no", "equiv", "safe", "same_kind", "unsafe"] _ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]] _ArrayInt_co = NDArray[Union[bool_, integer[Any]]] @@ -2828,19 +2828,19 @@ class generic(_ArrayOrScalarCommon): @property def base(self) -> None: ... @property - def ndim(self) -> Literal[0]: ... + def ndim(self) -> L[0]: ... @property - def size(self) -> Literal[1]: ... + def size(self) -> L[1]: ... @property def shape(self) -> Tuple[()]: ... @property def strides(self) -> Tuple[()]: ... - def byteswap(self: _ScalarType, inplace: Literal[False] = ...) -> _ScalarType: ... + def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ... @property def flat(self: _ScalarType) -> flatiter[ndarray[Any, dtype[_ScalarType]]]: ... def item( self, - __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> Any: ... @overload @@ -2894,7 +2894,7 @@ class generic(_ArrayOrScalarCommon): ) -> ndarray[Any, dtype[_ScalarType]]: ... def squeeze( - self: _ScalarType, axis: Union[Literal[0], Tuple[()]] = ... + self: _ScalarType, axis: Union[L[0], Tuple[()]] = ... ) -> _ScalarType: ... def transpose(self: _ScalarType, __axes: Tuple[()] = ...) -> _ScalarType: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @@ -2934,7 +2934,7 @@ class bool_(generic): def __init__(self, __value: object = ...) -> None: ... def item( self, - __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> bool: ... def tolist(self) -> bool: ... @property @@ -3045,7 +3045,7 @@ class integer(number[_NBit1]): # type: ignore # sub-classes (`int64`, `uint32`, etc) def item( self, - __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> int: ... def tolist(self) -> int: ... def __index__(self) -> int: ... @@ -3192,7 +3192,7 @@ class floating(inexact[_NBit1]): def __init__(self, __value: _FloatValue = ...) -> None: ... def item( self, - __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> float: ... def tolist(self) -> float: ... __add__: _FloatOp[_NBit1] @@ -3231,7 +3231,7 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): def __init__(self, __value: _ComplexValue = ...) -> None: ... def item( self, - __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> complex: ... def tolist(self) -> complex: ... @property @@ -3299,7 +3299,7 @@ class bytes_(character, bytes): ) -> None: ... def item( self, - __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> bytes: ... def tolist(self) -> bytes: ... @@ -3315,7 +3315,7 @@ class str_(character, str): ) -> None: ... def item( self, - __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ..., + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., ) -> str: ... def tolist(self) -> str: ... From 4fc55f5100a5f39bd5fed657ad0bc593c2855527 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 20 May 2021 14:58:24 +0200 Subject: [PATCH 1204/1270] MAINT: Added a missing object to `npt.__all__` --- numpy/typing/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 4f0dc0cf13b2..173dbc22980e 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -173,7 +173,7 @@ def final(f): return f if not TYPE_CHECKING: - __all__ = ["ArrayLike", "DTypeLike", "NBitBase"] + __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] else: # Ensure that all objects within this module are accessible while # static type checking. This includes private ones, as we need them From eaf7ad03d64edcbb633a0db0152a41b520125baf Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 15 May 2021 01:20:03 +0200 Subject: [PATCH 1205/1270] ENH: Improve the `np.ufunc` annotations --- numpy/__init__.pyi | 233 +++++++++++----------- numpy/typing/__init__.py | 21 +- numpy/typing/_ufunc.pyi | 405 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 532 insertions(+), 127 deletions(-) create mode 100644 numpy/typing/_ufunc.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 194483816f33..3415172ed2c6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -105,7 +105,15 @@ from numpy.typing import ( _BytesCodes, _VoidCodes, _ObjectCodes, + + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, ) + from numpy.typing._callable import ( _BoolOp, _BoolBitOp, @@ -3400,32 +3408,13 @@ UFUNC_PYVALS_NAME: Final[str] newaxis: None +# See `npt._ufunc` for more concrete nin-/nout-specific stubs class ufunc: @property def __name__(self) -> str: ... - def __call__( - self, - *args: ArrayLike, - out: Optional[Union[ndarray, Tuple[ndarray, ...]]] = ..., - where: Optional[ndarray] = ..., - # The list should be a list of tuples of ints, but since we - # don't know the signature it would need to be - # Tuple[int, ...]. But, since List is invariant something like - # e.g. List[Tuple[int, int]] isn't a subtype of - # List[Tuple[int, ...]], so we can't type precisely here. - axes: List[Any] = ..., - axis: int = ..., - keepdims: bool = ..., - casting: _Casting = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: Union[str, Tuple[str]] = ..., - # In reality this should be a length of list 3 containing an - # int, an int, and a callable, but there's no way to express - # that. - extobj: List[Union[int, Callable]] = ..., - ) -> Any: ... + @property + def __doc__(self) -> str: ... + __call__: Callable[..., Any] @property def nin(self) -> int: ... @property @@ -3455,109 +3444,105 @@ class ufunc: # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - @property - def reduce(self) -> Any: ... - @property - def accumulate(self) -> Any: ... - @property - def reduceat(self) -> Any: ... - @property - def outer(self) -> Any: ... + reduce: Any + accumulate: Any + reduce: Any + outer: Any # Similarly at won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - @property - def at(self) -> Any: ... - -absolute: ufunc -add: ufunc -arccos: ufunc -arccosh: ufunc -arcsin: ufunc -arcsinh: ufunc -arctan2: ufunc -arctan: ufunc -arctanh: ufunc -bitwise_and: ufunc -bitwise_not: ufunc -bitwise_or: ufunc -bitwise_xor: ufunc -cbrt: ufunc -ceil: ufunc -conj: ufunc -conjugate: ufunc -copysign: ufunc -cos: ufunc -cosh: ufunc -deg2rad: ufunc -degrees: ufunc -divide: ufunc -divmod: ufunc -equal: ufunc -exp2: ufunc -exp: ufunc -expm1: ufunc -fabs: ufunc -float_power: ufunc -floor: ufunc -floor_divide: ufunc -fmax: ufunc -fmin: ufunc -fmod: ufunc -frexp: ufunc -gcd: ufunc -greater: ufunc -greater_equal: ufunc -heaviside: ufunc -hypot: ufunc -invert: ufunc -isfinite: ufunc -isinf: ufunc -isnan: ufunc -isnat: ufunc -lcm: ufunc -ldexp: ufunc -left_shift: ufunc -less: ufunc -less_equal: ufunc -log10: ufunc -log1p: ufunc -log2: ufunc -log: ufunc -logaddexp2: ufunc -logaddexp: ufunc -logical_and: ufunc -logical_not: ufunc -logical_or: ufunc -logical_xor: ufunc -matmul: ufunc -maximum: ufunc -minimum: ufunc -mod: ufunc -modf: ufunc -multiply: ufunc -negative: ufunc -nextafter: ufunc -not_equal: ufunc -positive: ufunc -power: ufunc -rad2deg: ufunc -radians: ufunc -reciprocal: ufunc -remainder: ufunc -right_shift: ufunc -rint: ufunc -sign: ufunc -signbit: ufunc -sin: ufunc -sinh: ufunc -spacing: ufunc -sqrt: ufunc -square: ufunc -subtract: ufunc -tan: ufunc -tanh: ufunc -true_divide: ufunc -trunc: ufunc + at: Any + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] +add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] +arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] +bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] +ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] +conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] +cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] +cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] +degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] +divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] +equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] +exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] +exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] +expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] +fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] +float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] +floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] +fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] +fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] +fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] +frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] +gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] +hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] +isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] +isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] +isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] +lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] +less: _UFunc_Nin2_Nout1[L['less'], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] +log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] +log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] +log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] +log: _UFunc_Nin1_Nout1[L['log'], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None] +maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] +minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] +mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] +multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] +positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] +power: _UFunc_Nin2_Nout1[L['power'], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] +radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] +remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] +rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] +sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] +signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] +sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] +sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] +spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] +square: _UFunc_Nin1_Nout1[L['square'], L[18], None] +subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] +tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] +tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] +true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] abs = absolute diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 173dbc22980e..1bfdf07ae74e 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -247,9 +247,6 @@ class _32Bit(_64Bit): ... # type: ignore[misc] class _16Bit(_32Bit): ... # type: ignore[misc] class _8Bit(_16Bit): ... # type: ignore[misc] -# Clean up the namespace -del TYPE_CHECKING, final, List - from ._nbit import ( _NBitByte, _NBitShort, @@ -358,6 +355,24 @@ class _8Bit(_16Bit): ... # type: ignore[misc] _GenericAlias, ) +if TYPE_CHECKING: + from ._ufunc import ( + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, + ) +else: + _UFunc_Nin1_Nout1 = NotImplemented + _UFunc_Nin2_Nout1 = NotImplemented + _UFunc_Nin1_Nout2 = NotImplemented + _UFunc_Nin2_Nout2 = NotImplemented + _GUFunc_Nin2_Nout1 = NotImplemented + +# Clean up the namespace +del TYPE_CHECKING, final, List + if __doc__ is not None: from ._add_docstring import _docstrings __doc__ += _docstrings diff --git a/numpy/typing/_ufunc.pyi b/numpy/typing/_ufunc.pyi new file mode 100644 index 000000000000..b3b9fa95ed39 --- /dev/null +++ b/numpy/typing/_ufunc.pyi @@ -0,0 +1,405 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. + +""" + +from typing import ( + Any, + Generic, + List, + Optional, + overload, + Tuple, + TypeVar, + Union, +) + +from numpy import ufunc, _Casting, _OrderKACF +from numpy.typing import NDArray + +from ._shape import _ShapeLike +from ._scalars import _ScalarLike_co +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike + +from typing_extensions import Literal, SupportsIndex + +_T = TypeVar("_T") +_2Tuple = Tuple[_T, _T] +_3Tuple = Tuple[_T, _T, _T] +_4Tuple = Tuple[_T, _T, _T, _T] + +_NTypes = TypeVar("_NTypes", bound=int) +_IDType = TypeVar("_IDType", bound=Any) +_NameType = TypeVar("_NameType", bound=str) + +# NOTE: In reality `extobj` should be a length of list 3 containing an +# int, an int, and a callable, but there's no way to properly express +# non-homogenous lists. +# Use `Any` over `Union` to avoid issues related to lists invariance. + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods are simply typed as `None`. + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed as `None` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + out: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _2Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + out: Union[None, NDArray[Any], Tuple[NDArray[Any]]] = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _2Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> NDArray[Any]: ... + + def at( + self, + __a: NDArray[Any], + __indices: _ArrayLikeInt_co, + ) -> None: ... + +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + out: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: Union[None, NDArray[Any], Tuple[NDArray[Any]]] = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> NDArray[Any]: ... + + def at( + self, + __a: NDArray[Any], + __indices: _ArrayLikeInt_co, + __b: ArrayLike, + ) -> None: ... + + def reduce( + self, + array: ArrayLike, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: Optional[NDArray[Any]] = ..., + keepdims: bool = ..., + initial: Any = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: Optional[NDArray[Any]] = ..., + ) -> NDArray[Any]: ... + + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: Optional[NDArray[Any]] = ..., + ) -> NDArray[Any]: ... + + # Expand `**kwargs` into explicit keyword-only arguments + @overload + def outer( + self, + __A: _ScalarLike_co, + __B: _ScalarLike_co, + *, + out: None = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> Any: ... + @overload + def outer( # type: ignore[misc] + self, + __A: ArrayLike, + __B: ArrayLike, + *, + out: Union[None, NDArray[Any], Tuple[NDArray[Any]]] = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> NDArray[Any]: ... + +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __out1: Optional[NDArray[Any]] = ..., + __out2: Optional[NDArray[Any]] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _4Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + __out1: Optional[NDArray[Any]] = ..., + __out2: Optional[NDArray[Any]] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _4Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + + # NOTE: In practice the only gufunc in the main name is `matmul`, + # so we can use its signature here + @property + def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + @property + def at(self) -> None: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None = ..., + *, + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + axes: List[_2Tuple[SupportsIndex]] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: Union[NDArray[Any], Tuple[NDArray[Any]]], + *, + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + axes: List[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Any]: ... From bb8f3b5aa8b316f4ec748289cd90495cfe57bc4b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 20 May 2021 16:26:24 +0200 Subject: [PATCH 1206/1270] TST: Update the `ufunc` typing tests --- numpy/typing/tests/data/fail/ufuncs.py | 40 ++++++++++++-- numpy/typing/tests/data/pass/ufuncs.py | 1 + numpy/typing/tests/data/reveal/ufuncs.py | 68 ++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 3 deletions(-) create mode 100644 numpy/typing/tests/data/reveal/ufuncs.py diff --git a/numpy/typing/tests/data/fail/ufuncs.py b/numpy/typing/tests/data/fail/ufuncs.py index 4da9d08baf4c..e827267c6072 100644 --- a/numpy/typing/tests/data/fail/ufuncs.py +++ b/numpy/typing/tests/data/fail/ufuncs.py @@ -1,7 +1,41 @@ import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] np.sin.nin + "foo" # E: Unsupported operand types -np.sin(1, foo="bar") # E: Unexpected keyword argument -np.sin(1, extobj=["foo", "foo", "foo"]) # E: incompatible type +np.sin(1, foo="bar") # E: No overload variant + +np.abs(None) # E: No overload variant + +np.add(1, 1, 1) # E: No overload variant +np.add(1, 1, axis=0) # E: No overload variant + +np.matmul(AR_f8, AR_f8, where=True) # E: No overload variant + +np.frexp(AR_f8, out=None) # E: No overload variant +np.frexp(AR_f8, out=AR_f8) # E: No overload variant + +np.absolute.outer() # E: "None" not callable +np.frexp.outer() # E: "None" not callable +np.divmod.outer() # E: "None" not callable +np.matmul.outer() # E: "None" not callable + +np.absolute.reduceat() # E: "None" not callable +np.frexp.reduceat() # E: "None" not callable +np.divmod.reduceat() # E: "None" not callable +np.matmul.reduceat() # E: "None" not callable + +np.absolute.reduce() # E: "None" not callable +np.frexp.reduce() # E: "None" not callable +np.divmod.reduce() # E: "None" not callable +np.matmul.reduce() # E: "None" not callable + +np.absolute.accumulate() # E: "None" not callable +np.frexp.accumulate() # E: "None" not callable +np.divmod.accumulate() # E: "None" not callable +np.matmul.accumulate() # E: "None" not callable -np.abs(None) # E: incompatible type +np.frexp.at() # E: "None" not callable +np.divmod.at() # E: "None" not callable +np.matmul.at() # E: "None" not callable diff --git a/numpy/typing/tests/data/pass/ufuncs.py b/numpy/typing/tests/data/pass/ufuncs.py index 3c93fb2cf1d0..3cc31ae5e305 100644 --- a/numpy/typing/tests/data/pass/ufuncs.py +++ b/numpy/typing/tests/data/pass/ufuncs.py @@ -12,5 +12,6 @@ # np.sin(1) + np.sin(1) np.sin.types[0] np.sin.__name__ +np.sin.__doc__ np.abs(np.array([1])) diff --git a/numpy/typing/tests/data/reveal/ufuncs.py b/numpy/typing/tests/data/reveal/ufuncs.py new file mode 100644 index 000000000000..ade45577c9a3 --- /dev/null +++ b/numpy/typing/tests/data/reveal/ufuncs.py @@ -0,0 +1,68 @@ +import numpy as np +import numpy.typing as npt + +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +reveal_type(np.absolute.__doc__) # E: str +reveal_type(np.absolute.types) # E: builtins.list[builtins.str] + +reveal_type(np.absolute.__name__) # E: Literal['absolute'] +reveal_type(np.absolute.ntypes) # E: Literal[20] +reveal_type(np.absolute.identity) # E: None +reveal_type(np.absolute.nin) # E: Literal[1] +reveal_type(np.absolute.nin) # E: Literal[1] +reveal_type(np.absolute.nout) # E: Literal[1] +reveal_type(np.absolute.nargs) # E: Literal[2] +reveal_type(np.absolute.signature) # E: None +reveal_type(np.absolute(f8)) # E: Any +reveal_type(np.absolute(AR_f8)) # E: numpy.ndarray +reveal_type(np.absolute.at(AR_f8, AR_i8)) # E: None + +reveal_type(np.add.__name__) # E: Literal['add'] +reveal_type(np.add.ntypes) # E: Literal[22] +reveal_type(np.add.identity) # E: Literal[0] +reveal_type(np.add.nin) # E: Literal[2] +reveal_type(np.add.nout) # E: Literal[1] +reveal_type(np.add.nargs) # E: Literal[3] +reveal_type(np.add.signature) # E: None +reveal_type(np.add(f8, f8)) # E: Any +reveal_type(np.add(AR_f8, f8)) # E: numpy.ndarray +reveal_type(np.add.at(AR_f8, AR_i8, f8)) # E: None +reveal_type(np.add.reduce(AR_f8, axis=0)) # E: Any +reveal_type(np.add.accumulate(AR_f8)) # E: numpy.ndarray +reveal_type(np.add.reduceat(AR_f8, AR_i8)) # E: numpy.ndarray +reveal_type(np.add.outer(f8, f8)) # E: Any +reveal_type(np.add.outer(AR_f8, f8)) # E: numpy.ndarray + +reveal_type(np.frexp.__name__) # E: Literal['frexp'] +reveal_type(np.frexp.ntypes) # E: Literal[4] +reveal_type(np.frexp.identity) # E: None +reveal_type(np.frexp.nin) # E: Literal[1] +reveal_type(np.frexp.nout) # E: Literal[2] +reveal_type(np.frexp.nargs) # E: Literal[3] +reveal_type(np.frexp.signature) # E: None +reveal_type(np.frexp(f8)) # E: Tuple[Any, Any] +reveal_type(np.frexp(AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.divmod.__name__) # E: Literal['divmod'] +reveal_type(np.divmod.ntypes) # E: Literal[15] +reveal_type(np.divmod.identity) # E: None +reveal_type(np.divmod.nin) # E: Literal[2] +reveal_type(np.divmod.nout) # E: Literal[2] +reveal_type(np.divmod.nargs) # E: Literal[4] +reveal_type(np.divmod.signature) # E: None +reveal_type(np.divmod(f8, f8)) # E: Tuple[Any, Any] +reveal_type(np.divmod(AR_f8, f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.matmul.__name__) # E: Literal['matmul'] +reveal_type(np.matmul.ntypes) # E: Literal[19] +reveal_type(np.matmul.identity) # E: None +reveal_type(np.matmul.nin) # E: Literal[2] +reveal_type(np.matmul.nout) # E: Literal[1] +reveal_type(np.matmul.nargs) # E: Literal[3] +reveal_type(np.matmul.signature) # E: Literal['(n?,k),(k,m?)->(n?,m?)'] +reveal_type(np.matmul.identity) # E: None +reveal_type(np.matmul(AR_f8, AR_f8)) # E: Any +reveal_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)])) # E: Any From 376077268622c12d425bc1b70b8c66c53af14eea Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 19 May 2021 23:21:21 +0200 Subject: [PATCH 1207/1270] DOC: Clarify which keyword arguments are available to `ufunc.outer` --- numpy/core/_add_newdocs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 538123149933..b8f0ee907b4b 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -5315,7 +5315,7 @@ r = empty(len(A),len(B)) for i in range(len(A)): for j in range(len(B)): - r[i,j] = op(A[i], B[j]) # op = ufunc in question + r[i,j] = op(A[i], B[j]) # op = ufunc in question Parameters ---------- @@ -5325,6 +5325,7 @@ Second array kwargs : any Arguments to pass on to the ufunc. Typically `dtype` or `out`. + See `ufunc` for a comprehensive overview of all available arguments. Returns ------- From 81f528ae968830fc837543b2741786ba699a2d3c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 20 May 2021 16:51:05 +0200 Subject: [PATCH 1208/1270] DOC: Add the `axis` and `axes` keywords to gufunc signatures --- numpy/core/_internal.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 4eebbaea3256..3b0c464674b6 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -843,11 +843,13 @@ def _ufunc_doc_signature_formatter(ufunc): ", order='K'" ", dtype=None" ", subok=True" - "[, signature" - ", extobj]" ) + + # NOTE: gufuncs may or may not support the `axis` parameter if ufunc.signature is None: - kwargs = ", where=True" + kwargs + kwargs = f", where=True{kwargs}[, signature, extobj]" + else: + kwargs += "[, signature, extobj, axes, axis]" # join all the parts together return '{name}({in_args}{out_args}, *{kwargs})'.format( From a303b33b7b2a06266f51c86be93ca77ddf55025f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 19 May 2021 16:30:55 -0700 Subject: [PATCH 1209/1270] DOC: Add release note about clang floating point error flag change --- doc/release/upcoming_changes/19049.compatibility.rst | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 doc/release/upcoming_changes/19049.compatibility.rst diff --git a/doc/release/upcoming_changes/19049.compatibility.rst b/doc/release/upcoming_changes/19049.compatibility.rst new file mode 100644 index 000000000000..5a6eadc7afbc --- /dev/null +++ b/doc/release/upcoming_changes/19049.compatibility.rst @@ -0,0 +1,6 @@ +Distutils forces strict floating point model on clang +----------------------------------------------------- +NumPy distutils will now always add the ``-ffp-exception-behavior=strict`` +compiler flag when compiling with clang. Clang defaults to a non-strict +version, which allows the compiler to generate code that does not set +floating point warnings/errors correctly. From 4c6858a047fbf0df20db4a00e724c03400c26bfd Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 20 May 2021 13:42:17 -0600 Subject: [PATCH 1210/1270] DOC: Forward port missing 1.18.5 release note. Closes #19054. --- doc/changelog/1.18.5-changelog.rst | 18 +++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/1.18.5-notes.rst | 31 +++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 doc/changelog/1.18.5-changelog.rst create mode 100644 doc/source/release/1.18.5-notes.rst diff --git a/doc/changelog/1.18.5-changelog.rst b/doc/changelog/1.18.5-changelog.rst new file mode 100644 index 000000000000..f0bc51e6f2a7 --- /dev/null +++ b/doc/changelog/1.18.5-changelog.rst @@ -0,0 +1,18 @@ + +Contributors +============ + +A total of 3 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Matti Picus +* Siyuan + + +Pull requests merged +==================== + +A total of 2 pull requests were merged for this release. + +* `#16439 `__: ENH: enable pickle protocol 5 support for python3.5 +* `#16441 `__: BUG: relpath fails for different drives on windows diff --git a/doc/source/release.rst b/doc/source/release.rst index b9cdfb9c8b57..6d208d395b90 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -16,6 +16,7 @@ Release Notes 1.19.2 1.19.1 1.19.0 + 1.18.5 1.18.4 1.18.3 1.18.2 diff --git a/doc/source/release/1.18.5-notes.rst b/doc/source/release/1.18.5-notes.rst new file mode 100644 index 000000000000..e704c001a452 --- /dev/null +++ b/doc/source/release/1.18.5-notes.rst @@ -0,0 +1,31 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.18.5 Release Notes +========================== + +This is a short release to allow pickle ``protocol=5`` to be used in +Python3.5. It is motivated by the recent backport of pickle5 to Python3.5. + +The Python versions supported in this release are 3.5-3.8. Downstream +developers should use Cython >= 0.29.15 for Python 3.8 support and +OpenBLAS >= 3.7 to avoid errors on the Skylake architecture. + +Contributors +============ + +A total of 3 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Matti Picus +* Siyuan Zhuang + + +Pull requests merged +==================== + +A total of 2 pull requests were merged for this release. + +* `#16439 `__: ENH: enable pickle protocol 5 support for python3.5 +* `#16441 `__: BUG: relpath fails for different drives on windows + From 0d3bd92b07d605f604ae62b743192b2683bee3c3 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 6 Apr 2021 19:41:13 +0530 Subject: [PATCH 1211/1270] SIMD: Removed umath code --- numpy/core/src/umath/loops.c.src | 86 -------------------------------- 1 file changed, 86 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 04665dc5296e..683bd0178bf0 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -843,92 +843,6 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0)); } -/* Libdivide only supports 32 and 64 bit types - * We try to pick the best possible one */ -#if NPY_BITSOF_@TYPE@ <= 32 -#define libdivide_@type@_t libdivide_s32_t -#define libdivide_@type@_gen libdivide_s32_gen -#define libdivide_@type@_do libdivide_s32_do -#else -#define libdivide_@type@_t libdivide_s64_t -#define libdivide_@type@_gen libdivide_s64_gen -#define libdivide_@type@_do libdivide_s64_do -#endif - -NPY_NO_EXPORT void -@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - BINARY_DEFS - - /* When the divisor is a constant, use libdivide for faster division */ - if (steps[1] == 0) { - /* In case of empty array, just return */ - if (n == 0) { - return; - } - - const @type@ in2 = *(@type@ *)ip2; - - /* If divisor is 0, we need not compute anything */ - if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - BINARY_LOOP_SLIDING { - *((@type@ *)op1) = 0; - } - } - else { - struct libdivide_@type@_t fast_d = libdivide_@type@_gen(in2); - BINARY_LOOP_SLIDING { - const @type@ in1 = *(@type@ *)ip1; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in1 == NPY_MIN_@TYPE@ && in2 == -1) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else { - *((@type@ *)op1) = libdivide_@type@_do(in1, &fast_d); - - /* Negative quotients needs to be rounded down */ - if (((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { - *((@type@ *)op1) = *((@type@ *)op1) - 1; - } - } - } - } - } - else { - BINARY_LOOP_SLIDING { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else { - *((@type@ *)op1) = in1/in2; - - /* Negative quotients needs to be rounded down */ - if (((in1 > 0) != (in2 > 0)) && (*((@type@ *)op1) * in2 != in1)) { - *((@type@ *)op1) = *((@type@ *)op1) - 1; - } - } - } - } -} - NPY_NO_EXPORT void @TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { From b000d5d45a94982d330305744737c95557d13317 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 6 Apr 2021 19:42:27 +0530 Subject: [PATCH 1212/1270] SIMD: Add signed to dispatch --- numpy/core/src/umath/loops.h.src | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index 0301aa5ed7b8..bb07e047c372 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -58,7 +58,8 @@ BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void #endif /**begin repeat - * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# + * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG, + BYTE, SHORT, INT, LONG, LONGLONG# */ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_divide, (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) @@ -151,9 +152,6 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @S@@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -NPY_NO_EXPORT void -@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); - NPY_NO_EXPORT void @S@@TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); From 9bb27e8559877eca5159b43b92b7436ffcb621d7 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 6 Apr 2021 19:43:01 +0530 Subject: [PATCH 1213/1270] SIMD: Add dispatch to generate_umath --- numpy/core/code_generators/generate_umath.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 6b6a0fe64ad5..9e94f9cccc47 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -328,7 +328,7 @@ def english_upper(s): docstrings.get('numpy.core.umath.floor_divide'), 'PyUFunc_DivisionTypeResolver', TD(ints, cfunc_alias='divide', - dispatch=[('loops_arithmetic', 'BHILQ')]), + dispatch=[('loops_arithmetic', 'bBhHiIlLqQ')]), TD(flts + cmplx), [TypeDescription('m', FullTypeDescr, 'mq', 'm'), TypeDescription('m', FullTypeDescr, 'md', 'm'), From 7f9d342324a730185cdf215c66d73530033436ab Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 6 Apr 2021 19:44:48 +0530 Subject: [PATCH 1214/1270] SIMD: Added dispatch code for signed --- .../src/umath/loops_arithmetic.dispatch.c.src | 59 +++++++++++++------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index 7e9f464636c5..c071edb3b656 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -23,7 +23,8 @@ ********************************************************************************/ #if NPY_SIMD /**begin repeat - * #sfx = u8, u16, u32, u64# + * #sfx = u8, u16, u32, u64, s8, s16, s32, s64# + * #signed = 0*4, 1*4# */ static NPY_INLINE void simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) @@ -43,6 +44,12 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) for (; len > 0; --len, ++src, ++dst) { const npyv_lanetype_@sfx@ a = *src; *dst = a / scalar; +#if @signed@ + /* Negative quotients needs to be rounded down */ + if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { + *dst = *dst - 1; + } +#endif } npyv_cleanup(); @@ -56,18 +63,25 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) /**begin repeat * Unsigned types - * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# - * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# - * #STYPE = BYTE, SHORT, INT, LONG, LONGLONG# + * #type = byte, short, int, long, longlong# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# + */ + +/**begin repeat1 + * #s = , u# + * #S = , U# + * #slen = s, u# + * #signed = 1, 0# */ + #undef TO_SIMD_SFX #if 0 -/**begin repeat1 +/**begin repeat2 * #len = 8, 16, 32, 64# */ -#elif NPY_BITSOF_@STYPE@ == @len@ - #define TO_SIMD_SFX(X) X##_u@len@ -/**end repeat1**/ +#elif NPY_BITSOF_@TYPE@ == @len@ + #define TO_SIMD_SFX(X) X##_@slen@@len@ +/**end repeat2**/ #endif /* * For 64-bit division on Armv7, Aarch64, and IBM/Power, NPYV fall-backs to the scalar division @@ -77,15 +91,15 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) * Power10(VSX4) is an exception here since it has native support for integer vector division, * note neither infrastructure nor NPYV has supported VSX4 yet. */ -#if NPY_BITSOF_@STYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) +#if NPY_BITSOF_@TYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) #undef TO_SIMD_SFX #endif -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@S@@TYPE@_divide) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@type@) { - const @type@ d = *(@type@ *)ip2; + BINARY_REDUCE_LOOP(npy_@s@@type@) { + const npy_@s@@type@ d = *(npy_@s@@type@ *)ip2; if (NPY_UNLIKELY(d == 0)) { npy_set_floatstatus_divbyzero(); io1 = 0; @@ -93,26 +107,33 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) io1 /= d; } } - *((@type@ *)iop1) = io1; + *((npy_@s@@type@ *)iop1) = io1; } #if NPY_SIMD && defined(TO_SIMD_SFX) // for contiguous block of memory, divisor is a scalar and not 0 - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && - (*(@type@ *)args[1]) != 0) { + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(npy_@s@@type@), NPY_SIMD_WIDTH) && + (*(npy_@s@@type@ *)args[1]) != 0) { TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); } #endif else { BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; + const npy_@s@@type@ in1 = *(npy_@s@@type@ *)ip1; + const npy_@s@@type@ in2 = *(npy_@s@@type@ *)ip2; if (NPY_UNLIKELY(in2 == 0)) { npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; + *((npy_@s@@type@ *)op1) = 0; } else{ - *((@type@ *)op1) = in1 / in2; + *((npy_@s@@type@ *)op1) = in1 / in2; +#if @signed@ + /* Negative quotients needs to be rounded down */ + if (((in1 > 0) != (in2 > 0)) && (*((npy_@type@ *)op1) * in2 != in1)) { + *((npy_@type@ *)op1) = *((npy_@type@ *)op1) - 1; + } +#endif } } } } +/**end repeat1**/ /**end repeat**/ From 0b8838ef0c2e4c5d9e66163d260dc30902cc6170 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 13 Apr 2021 21:08:48 +0530 Subject: [PATCH 1215/1270] SIMD: Added floor divide logic for signed --- .../src/umath/loops_arithmetic.dispatch.c.src | 107 +++++++++++++----- 1 file changed, 81 insertions(+), 26 deletions(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index c071edb3b656..55066589f4a3 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -23,9 +23,53 @@ ********************************************************************************/ #if NPY_SIMD /**begin repeat - * #sfx = u8, u16, u32, u64, s8, s16, s32, s64# + * #sfx = u8, u16, u32, u64, s8, s16, s32, s64# + * #len = 8, 16, 32, 64, 8, 16, 32, 64# * #signed = 0*4, 1*4# */ +#if @signed@ +static NPY_INLINE npyv_@sfx@ +simd_floor_divide_@sfx@(npyv_lanetype_@sfx@ *src, const npyv_@sfx@x3 divisor, npyv_lanetype_@sfx@ scalar) +{ + npyv_@sfx@ a, nsign_d, nsign_a, diff_sign, to_ninf, trunc, floor; + npyv_b@len@ greater_min, noverflow; + + nsign_d = npyv_setall_@sfx@(scalar < 0); + a = npyv_load_@sfx@(src); + nsign_a = npyv_cvt_@sfx@_b@len@(npyv_cmplt_@sfx@(a, nsign_d)); + nsign_a = npyv_and_@sfx@((npyv_@sfx@)nsign_a, npyv_setall_@sfx@(1)); + diff_sign = npyv_sub_@sfx@((npyv_@sfx@)nsign_a, nsign_d); + to_ninf = npyv_xor_@sfx@((npyv_@sfx@)nsign_a, nsign_d); + trunc = npyv_divc_@sfx@(npyv_add_@sfx@(a, diff_sign), divisor); + + if (NPY_UNLIKELY(-1 == scalar)) { + greater_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); + noverflow = npyv_cvt_b@len@_@sfx@(npyv_setall_@sfx@(-1)); + noverflow = npyv_and_b@len@(noverflow, greater_min); + if (npyv_tobits_b@len@(noverflow) != (((npy_uint64)1) << npyv_nlanes_@sfx@)-1) { + npy_set_floatstatus_divbyzero(); + } + floor = npyv_ifsub_@sfx@(greater_min, trunc, to_ninf, npyv_zero_@sfx@()); + } + else { + floor = npyv_sub_@sfx@(trunc, to_ninf); + } + + return floor; +} +#else +static NPY_INLINE npyv_@sfx@ +simd_floor_divide_@sfx@(npyv_lanetype_@sfx@ *src, const npyv_@sfx@x3 divisor, npyv_lanetype_@sfx@ scalar) +{ + npyv_@sfx@ a, c; + + a = npyv_load_@sfx@(src); + c = npyv_divc_@sfx@(a, divisor); + + return c; +} +#endif + static NPY_INLINE void simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) { @@ -36,20 +80,24 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - npyv_@sfx@ a = npyv_load_@sfx@(src); - npyv_@sfx@ c = npyv_divc_@sfx@(a, divisor); + npyv_@sfx@ c = simd_floor_divide_@sfx@(src, divisor, scalar); npyv_store_@sfx@(dst, c); } for (; len > 0; --len, ++src, ++dst) { const npyv_lanetype_@sfx@ a = *src; - *dst = a / scalar; + if (scalar == 0 || (a == (npyv_lanetype_@sfx@)NPY_MIN_INT@len@ && scalar == (npyv_lanetype_@sfx@)-1)) { + npy_set_floatstatus_divbyzero(); + *dst = 0; + } else { + *dst = a / scalar; #if @signed@ - /* Negative quotients needs to be rounded down */ - if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { - *dst = *dst - 1; - } + /* Negative quotients needs to be rounded down */ + if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { + *dst = *dst - 1; + } #endif + } } npyv_cleanup(); @@ -68,19 +116,24 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) */ /**begin repeat1 - * #s = , u# - * #S = , U# - * #slen = s, u# * #signed = 1, 0# */ #undef TO_SIMD_SFX +#undef SIMD_TYPE +#undef SIMD_DIVIDE #if 0 /**begin repeat2 * #len = 8, 16, 32, 64# */ +#elif NPY_BITSOF_@TYPE@ == @len@ && @signed@ + #define TO_SIMD_SFX(X) X##_s@len@ + #define SIMD_TYPE npy_@type@ + #define SIMD_DIVIDE @TYPE@_divide #elif NPY_BITSOF_@TYPE@ == @len@ - #define TO_SIMD_SFX(X) X##_@slen@@len@ + #define TO_SIMD_SFX(X) X##_u@len@ + #define SIMD_TYPE npy_u@type@ + #define SIMD_DIVIDE U@TYPE@_divide /**end repeat2**/ #endif /* @@ -93,42 +146,44 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) */ #if NPY_BITSOF_@TYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) #undef TO_SIMD_SFX + #undef SIMD_TYPE + #undef SIMD_DIVIDE #endif -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@S@@TYPE@_divide) +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(SIMD_DIVIDE) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(npy_@s@@type@) { - const npy_@s@@type@ d = *(npy_@s@@type@ *)ip2; - if (NPY_UNLIKELY(d == 0)) { + BINARY_REDUCE_LOOP(SIMD_TYPE) { + const SIMD_TYPE d = *(SIMD_TYPE *)ip2; + if (NPY_UNLIKELY(d == 0 || (io1 == (SIMD_TYPE)NPY_MIN_@TYPE@ && d == (SIMD_TYPE)-1))) { npy_set_floatstatus_divbyzero(); io1 = 0; } else { io1 /= d; } } - *((npy_@s@@type@ *)iop1) = io1; + *((SIMD_TYPE *)iop1) = io1; } #if NPY_SIMD && defined(TO_SIMD_SFX) // for contiguous block of memory, divisor is a scalar and not 0 - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(npy_@s@@type@), NPY_SIMD_WIDTH) && - (*(npy_@s@@type@ *)args[1]) != 0) { + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(SIMD_TYPE), NPY_SIMD_WIDTH) && + (*(SIMD_TYPE *)args[1]) != 0) { TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); } #endif else { BINARY_LOOP { - const npy_@s@@type@ in1 = *(npy_@s@@type@ *)ip1; - const npy_@s@@type@ in2 = *(npy_@s@@type@ *)ip2; - if (NPY_UNLIKELY(in2 == 0)) { + const SIMD_TYPE in1 = *(SIMD_TYPE *)ip1; + const SIMD_TYPE in2 = *(SIMD_TYPE *)ip2; + if (NPY_UNLIKELY(in2 == 0 || (in1 == (SIMD_TYPE)NPY_MIN_@TYPE@ && in2 == (SIMD_TYPE)-1))) { npy_set_floatstatus_divbyzero(); - *((npy_@s@@type@ *)op1) = 0; + *((SIMD_TYPE *)op1) = 0; } else{ - *((npy_@s@@type@ *)op1) = in1 / in2; + *((SIMD_TYPE *)op1) = in1 / in2; #if @signed@ /* Negative quotients needs to be rounded down */ - if (((in1 > 0) != (in2 > 0)) && (*((npy_@type@ *)op1) * in2 != in1)) { - *((npy_@type@ *)op1) = *((npy_@type@ *)op1) - 1; + if (((in1 > 0) != (in2 > 0)) && (*((SIMD_TYPE *)op1) * in2 != in1)) { + *((SIMD_TYPE *)op1) = *((SIMD_TYPE *)op1) - 1; } #endif } From b1c3c98bfa13699dda51642723e3ce849d5950eb Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 13 Apr 2021 21:09:12 +0530 Subject: [PATCH 1216/1270] DOC: Added floor divide doc --- .../core/src/umath/loops_arithmetic.dispatch.c.src | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index 55066589f4a3..30d7a2a99026 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -20,6 +20,19 @@ //############################################################################### /******************************************************************************** ** Defining the SIMD kernels + * + * Floor division of signed is based on T. Granlund and P. L. Montgomery + * “Division by invariant integers using multiplication(see [Figure 6.1] + * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556)" + * For details on TRUNC division see simd/intdiv.h for more clarification + *********************************************************************************** + ** Figure 6.1: Signed division by run–time invariant divisor, rounded towards -INF + *********************************************************************************** + * For q = FLOOR(a/d), all sword: + * sword −dsign = SRL(d, N − 1); + * uword −nsign = (n < −dsign); + * uword −qsign = EOR(−nsign, −dsign); + * q = TRUNC((n − (−dsign ) + (−nsign))/d) − (−qsign); ********************************************************************************/ #if NPY_SIMD /**begin repeat From b6b32674d634b6dfe9d92212e8a6ced0f1e14319 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 21 Apr 2021 22:01:17 +0530 Subject: [PATCH 1217/1270] SIMD: Refined signed and unsigned floor divide --- .../src/umath/loops_arithmetic.dispatch.c.src | 108 +++++++++++------- 1 file changed, 68 insertions(+), 40 deletions(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index 30d7a2a99026..5e54a45de2d9 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -41,48 +41,82 @@ * #signed = 0*4, 1*4# */ #if @signed@ -static NPY_INLINE npyv_@sfx@ -simd_floor_divide_@sfx@(npyv_lanetype_@sfx@ *src, const npyv_@sfx@x3 divisor, npyv_lanetype_@sfx@ scalar) +static NPY_INLINE void +simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) { - npyv_@sfx@ a, nsign_d, nsign_a, diff_sign, to_ninf, trunc, floor; + npyv_@sfx@ a, nsign_d, nsign_a, diff_sign, to_ninf, trunc, floor, neg, vzero; npyv_b@len@ greater_min, noverflow; + npy_bool raise; + npy_uint64 tobits; - nsign_d = npyv_setall_@sfx@(scalar < 0); - a = npyv_load_@sfx@(src); - nsign_a = npyv_cvt_@sfx@_b@len@(npyv_cmplt_@sfx@(a, nsign_d)); - nsign_a = npyv_and_@sfx@((npyv_@sfx@)nsign_a, npyv_setall_@sfx@(1)); - diff_sign = npyv_sub_@sfx@((npyv_@sfx@)nsign_a, nsign_d); - to_ninf = npyv_xor_@sfx@((npyv_@sfx@)nsign_a, nsign_d); - trunc = npyv_divc_@sfx@(npyv_add_@sfx@(a, diff_sign), divisor); + npyv_lanetype_@sfx@ *src = (npyv_lanetype_@sfx@ *) args[0]; + npyv_lanetype_@sfx@ scalar = *(npyv_lanetype_@sfx@ *) args[1]; + npyv_lanetype_@sfx@ *dst = (npyv_lanetype_@sfx@ *) args[2]; + const int vstep = npyv_nlanes_@sfx@; + const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); if (NPY_UNLIKELY(-1 == scalar)) { - greater_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); - noverflow = npyv_cvt_b@len@_@sfx@(npyv_setall_@sfx@(-1)); - noverflow = npyv_and_b@len@(noverflow, greater_min); - if (npyv_tobits_b@len@(noverflow) != (((npy_uint64)1) << npyv_nlanes_@sfx@)-1) { + noverflow = npyv_cvt_b@len@_@sfx@(npyv_setall_@sfx@(-1)); + vzero = npyv_zero_@sfx@(); + for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { + a = npyv_load_@sfx@(src); + greater_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); + noverflow = npyv_and_b@len@(noverflow, greater_min); + neg = npyv_ifsub_@sfx@(greater_min, vzero, a, vzero); + + npyv_store_@sfx@(dst, neg); + } + tobits = npyv_tobits_b@len@(noverflow); + #if npyv_nlanes_@sfx@ == 64 + raise = (~tobits) != 0; + #else + raise = tobits != (1ULL << vstep)-1; + #endif + + for (; len > 0; --len, ++src, ++dst) { + npyv_lanetype_@sfx@ a = *src; + if (a == NPY_MIN_INT@len@) { + raise = NPY_TRUE; + *dst = 0; + } else { + *dst = -a; + } + } + if (raise) { npy_set_floatstatus_divbyzero(); } - floor = npyv_ifsub_@sfx@(greater_min, trunc, to_ninf, npyv_zero_@sfx@()); - } - else { - floor = npyv_sub_@sfx@(trunc, to_ninf); - } + } else { + for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { + nsign_d = npyv_setall_@sfx@(scalar < 0); + a = npyv_load_@sfx@(src); + nsign_a = npyv_cvt_@sfx@_b@len@(npyv_cmplt_@sfx@(a, nsign_d)); + nsign_a = npyv_and_@sfx@(nsign_a, npyv_setall_@sfx@(1)); + diff_sign = npyv_sub_@sfx@(nsign_a, nsign_d); + to_ninf = npyv_xor_@sfx@(nsign_a, nsign_d); + trunc = npyv_divc_@sfx@(npyv_add_@sfx@(a, diff_sign), divisor); + floor = npyv_sub_@sfx@(trunc, to_ninf); - return floor; -} -#else -static NPY_INLINE npyv_@sfx@ -simd_floor_divide_@sfx@(npyv_lanetype_@sfx@ *src, const npyv_@sfx@x3 divisor, npyv_lanetype_@sfx@ scalar) -{ - npyv_@sfx@ a, c; + npyv_store_@sfx@(dst, floor); + } - a = npyv_load_@sfx@(src); - c = npyv_divc_@sfx@(a, divisor); + for (; len > 0; --len, ++src, ++dst) { + const npyv_lanetype_@sfx@ a = *src; + if (scalar == 0 || (a == (npyv_lanetype_@sfx@)NPY_MIN_INT@len@ && scalar == (npyv_lanetype_@sfx@)-1)) { + npy_set_floatstatus_divbyzero(); + *dst = 0; + } else { + *dst = a / scalar; + /* Negative quotients needs to be rounded down */ + if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { + *dst = *dst - 1; + } + } + } + } - return c; + npyv_cleanup(); } -#endif - +#else static NPY_INLINE void simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) { @@ -93,7 +127,8 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - npyv_@sfx@ c = simd_floor_divide_@sfx@(src, divisor, scalar); + npyv_@sfx@ a = npyv_load_@sfx@(src); + npyv_@sfx@ c = npyv_divc_@sfx@(a, divisor); npyv_store_@sfx@(dst, c); } @@ -104,17 +139,12 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) *dst = 0; } else { *dst = a / scalar; -#if @signed@ - /* Negative quotients needs to be rounded down */ - if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { - *dst = *dst - 1; - } -#endif } } npyv_cleanup(); } +#endif /**end repeat**/ #endif @@ -159,8 +189,6 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) */ #if NPY_BITSOF_@TYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) #undef TO_SIMD_SFX - #undef SIMD_TYPE - #undef SIMD_DIVIDE #endif NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(SIMD_DIVIDE) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) From 7c163672933d42e76dd643065acbe36a7274dc00 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 11 May 2021 21:38:51 +0530 Subject: [PATCH 1218/1270] SIMD: Separate signed and unsigned loops --- .../src/umath/loops_arithmetic.dispatch.c.src | 181 ++++++++++-------- 1 file changed, 105 insertions(+), 76 deletions(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index 5e54a45de2d9..a52bb36b7031 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -36,41 +36,35 @@ ********************************************************************************/ #if NPY_SIMD /**begin repeat - * #sfx = u8, u16, u32, u64, s8, s16, s32, s64# - * #len = 8, 16, 32, 64, 8, 16, 32, 64# - * #signed = 0*4, 1*4# + * Signed types + * #sfx = s8, s16, s32, s64# + * #len = 8, 16, 32, 64# */ -#if @signed@ static NPY_INLINE void simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) { - npyv_@sfx@ a, nsign_d, nsign_a, diff_sign, to_ninf, trunc, floor, neg, vzero; - npyv_b@len@ greater_min, noverflow; - npy_bool raise; - npy_uint64 tobits; - npyv_lanetype_@sfx@ *src = (npyv_lanetype_@sfx@ *) args[0]; npyv_lanetype_@sfx@ scalar = *(npyv_lanetype_@sfx@ *) args[1]; npyv_lanetype_@sfx@ *dst = (npyv_lanetype_@sfx@ *) args[2]; const int vstep = npyv_nlanes_@sfx@; const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); - if (NPY_UNLIKELY(-1 == scalar)) { - noverflow = npyv_cvt_b@len@_@sfx@(npyv_setall_@sfx@(-1)); - vzero = npyv_zero_@sfx@(); + if (scalar == (npyv_lanetype_@sfx@)-1) { + npyv_b@len@ noverflow = npyv_cvt_b@len@_@sfx@(npyv_setall_@sfx@(-1)); + npyv_@sfx@ vzero = npyv_zero_@sfx@(); for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - a = npyv_load_@sfx@(src); - greater_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); - noverflow = npyv_and_b@len@(noverflow, greater_min); - neg = npyv_ifsub_@sfx@(greater_min, vzero, a, vzero); + npyv_@sfx@ a = npyv_load_@sfx@(src); + npyv_b@len@ greater_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); + noverflow = npyv_and_b@len@(noverflow, greater_min); + npyv_@sfx@ neg = npyv_ifsub_@sfx@(greater_min, vzero, a, vzero); npyv_store_@sfx@(dst, neg); } - tobits = npyv_tobits_b@len@(noverflow); + npy_uint64 tobits = npyv_tobits_b@len@(noverflow); #if npyv_nlanes_@sfx@ == 64 - raise = (~tobits) != 0; + int raise = (~tobits) != 0; #else - raise = tobits != (1ULL << vstep)-1; + int raise = tobits != (1ULL << vstep)-1; #endif for (; len > 0; --len, ++src, ++dst) { @@ -87,36 +81,37 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) } } else { for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - nsign_d = npyv_setall_@sfx@(scalar < 0); - a = npyv_load_@sfx@(src); - nsign_a = npyv_cvt_@sfx@_b@len@(npyv_cmplt_@sfx@(a, nsign_d)); - nsign_a = npyv_and_@sfx@(nsign_a, npyv_setall_@sfx@(1)); - diff_sign = npyv_sub_@sfx@(nsign_a, nsign_d); - to_ninf = npyv_xor_@sfx@(nsign_a, nsign_d); - trunc = npyv_divc_@sfx@(npyv_add_@sfx@(a, diff_sign), divisor); - floor = npyv_sub_@sfx@(trunc, to_ninf); + npyv_@sfx@ nsign_d = npyv_setall_@sfx@(scalar < 0); + npyv_@sfx@ a = npyv_load_@sfx@(src); + npyv_@sfx@ nsign_a = npyv_cvt_@sfx@_b@len@(npyv_cmplt_@sfx@(a, nsign_d)); + nsign_a = npyv_and_@sfx@(nsign_a, npyv_setall_@sfx@(1)); + npyv_@sfx@ diff_sign = npyv_sub_@sfx@(nsign_a, nsign_d); + npyv_@sfx@ to_ninf = npyv_xor_@sfx@(nsign_a, nsign_d); + npyv_@sfx@ trunc = npyv_divc_@sfx@(npyv_add_@sfx@(a, diff_sign), divisor); + npyv_@sfx@ floor = npyv_sub_@sfx@(trunc, to_ninf); npyv_store_@sfx@(dst, floor); } for (; len > 0; --len, ++src, ++dst) { const npyv_lanetype_@sfx@ a = *src; - if (scalar == 0 || (a == (npyv_lanetype_@sfx@)NPY_MIN_INT@len@ && scalar == (npyv_lanetype_@sfx@)-1)) { - npy_set_floatstatus_divbyzero(); - *dst = 0; - } else { - *dst = a / scalar; - /* Negative quotients needs to be rounded down */ - if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { - *dst = *dst - 1; - } + *dst = a / scalar; + /* Negative quotients needs to be rounded down */ + if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { + *dst = *dst - 1; } } } npyv_cleanup(); } -#else +/**end repeat**/ + +/**begin repeat + * Unsigned types + * #sfx = u8, u16, u32, u64# + * #len = 8, 16, 32, 64# + */ static NPY_INLINE void simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) { @@ -134,17 +129,11 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) for (; len > 0; --len, ++src, ++dst) { const npyv_lanetype_@sfx@ a = *src; - if (scalar == 0 || (a == (npyv_lanetype_@sfx@)NPY_MIN_INT@len@ && scalar == (npyv_lanetype_@sfx@)-1)) { - npy_set_floatstatus_divbyzero(); - *dst = 0; - } else { - *dst = a / scalar; - } + *dst = a / scalar; } npyv_cleanup(); } -#endif /**end repeat**/ #endif @@ -153,31 +142,78 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) ********************************************************************************/ /**begin repeat - * Unsigned types + * Signed types * #type = byte, short, int, long, longlong# * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# */ - +#undef TO_SIMD_SFX +#if 0 /**begin repeat1 - * #signed = 1, 0# + * #len = 8, 16, 32, 64# + */ +#elif NPY_BITSOF_@TYPE@ == @len@ + #define TO_SIMD_SFX(X) X##_s@len@ +/**end repeat1**/ +#endif + +#if NPY_BITSOF_@TYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) + #undef TO_SIMD_SFX +#endif +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + if (IS_BINARY_REDUCE) { + BINARY_REDUCE_LOOP(npy_@type@) { + const npy_@type@ d = *(npy_@type@ *)ip2; + if (NPY_UNLIKELY(d == 0 || (io1 == (npy_@type@)NPY_MIN_@TYPE@ && d == (npy_@type@)-1))) { + npy_set_floatstatus_divbyzero(); + io1 = 0; + } else { + io1 /= d; + } + } + *((npy_@type@ *)iop1) = io1; + } +#if NPY_SIMD && defined(TO_SIMD_SFX) + // for contiguous block of memory, divisor is a scalar and not 0 + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(npy_@type@), NPY_SIMD_WIDTH) && + (*(npy_@type@ *)args[1]) != 0) { + TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); + } +#endif + else { + BINARY_LOOP { + const npy_@type@ in1 = *(npy_@type@ *)ip1; + const npy_@type@ in2 = *(npy_@type@ *)ip2; + if (NPY_UNLIKELY(in2 == 0 || (in1 == (npy_@type@)NPY_MIN_@TYPE@ && in2 == (npy_@type@)-1))) { + npy_set_floatstatus_divbyzero(); + *((npy_@type@ *)op1) = 0; + } else{ + *((npy_@type@ *)op1) = in1 / in2; + /* Negative quotients needs to be rounded down */ + if (((in1 > 0) != (in2 > 0)) && (*((npy_@type@ *)op1) * in2 != in1)) { + *((npy_@type@ *)op1) = *((npy_@type@ *)op1) - 1; + } + } + } + } +} +/**end repeat**/ + +/**begin repeat + * Unsigned types + * #type = byte, short, int, long, longlong# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# */ #undef TO_SIMD_SFX -#undef SIMD_TYPE -#undef SIMD_DIVIDE #if 0 -/**begin repeat2 +/**begin repeat1 * #len = 8, 16, 32, 64# */ -#elif NPY_BITSOF_@TYPE@ == @len@ && @signed@ - #define TO_SIMD_SFX(X) X##_s@len@ - #define SIMD_TYPE npy_@type@ - #define SIMD_DIVIDE @TYPE@_divide #elif NPY_BITSOF_@TYPE@ == @len@ #define TO_SIMD_SFX(X) X##_u@len@ - #define SIMD_TYPE npy_u@type@ - #define SIMD_DIVIDE U@TYPE@_divide -/**end repeat2**/ +/**end repeat1**/ #endif /* * For 64-bit division on Armv7, Aarch64, and IBM/Power, NPYV fall-backs to the scalar division @@ -190,46 +226,39 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) #if NPY_BITSOF_@TYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) #undef TO_SIMD_SFX #endif -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(SIMD_DIVIDE) +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(U@TYPE@_divide) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(SIMD_TYPE) { - const SIMD_TYPE d = *(SIMD_TYPE *)ip2; - if (NPY_UNLIKELY(d == 0 || (io1 == (SIMD_TYPE)NPY_MIN_@TYPE@ && d == (SIMD_TYPE)-1))) { + BINARY_REDUCE_LOOP(npy_u@type@) { + const npy_u@type@ d = *(npy_u@type@ *)ip2; + if (NPY_UNLIKELY(d == 0 || (io1 == (npy_u@type@)NPY_MIN_@TYPE@ && d == (npy_u@type@)-1))) { npy_set_floatstatus_divbyzero(); io1 = 0; } else { io1 /= d; } } - *((SIMD_TYPE *)iop1) = io1; + *((npy_u@type@ *)iop1) = io1; } #if NPY_SIMD && defined(TO_SIMD_SFX) // for contiguous block of memory, divisor is a scalar and not 0 - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(SIMD_TYPE), NPY_SIMD_WIDTH) && - (*(SIMD_TYPE *)args[1]) != 0) { + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(npy_u@type@), NPY_SIMD_WIDTH) && + (*(npy_u@type@ *)args[1]) != 0) { TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); } #endif else { BINARY_LOOP { - const SIMD_TYPE in1 = *(SIMD_TYPE *)ip1; - const SIMD_TYPE in2 = *(SIMD_TYPE *)ip2; - if (NPY_UNLIKELY(in2 == 0 || (in1 == (SIMD_TYPE)NPY_MIN_@TYPE@ && in2 == (SIMD_TYPE)-1))) { + const npy_u@type@ in1 = *(npy_u@type@ *)ip1; + const npy_u@type@ in2 = *(npy_u@type@ *)ip2; + if (NPY_UNLIKELY(in2 == 0 || (in1 == (npy_u@type@)NPY_MIN_@TYPE@ && in2 == (npy_u@type@)-1))) { npy_set_floatstatus_divbyzero(); - *((SIMD_TYPE *)op1) = 0; + *((npy_u@type@ *)op1) = 0; } else{ - *((SIMD_TYPE *)op1) = in1 / in2; -#if @signed@ - /* Negative quotients needs to be rounded down */ - if (((in1 > 0) != (in2 > 0)) && (*((SIMD_TYPE *)op1) * in2 != in1)) { - *((SIMD_TYPE *)op1) = *((SIMD_TYPE *)op1) - 1; - } -#endif + *((npy_u@type@ *)op1) = in1 / in2; } } } } -/**end repeat1**/ /**end repeat**/ From 4619081d1faca58dd3e25db76164c1c7ad928a0f Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 11 May 2021 21:08:39 +0200 Subject: [PATCH 1219/1270] MAINT, SIMD: Several fixes to integer division - Revert unsigned integer division changes, overflow check only required by signed division. - Fix floor round for reduce divison - cleanup - revert fixme comment --- .../src/umath/loops_arithmetic.dispatch.c.src | 124 +++++++++--------- 1 file changed, 61 insertions(+), 63 deletions(-) diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index a52bb36b7031..19e05f2b57b0 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -34,6 +34,7 @@ * uword −qsign = EOR(−nsign, −dsign); * q = TRUNC((n − (−dsign ) + (−nsign))/d) − (−qsign); ********************************************************************************/ + #if NPY_SIMD /**begin repeat * Signed types @@ -49,34 +50,28 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) const int vstep = npyv_nlanes_@sfx@; const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); - if (scalar == (npyv_lanetype_@sfx@)-1) { + if (scalar == -1) { npyv_b@len@ noverflow = npyv_cvt_b@len@_@sfx@(npyv_setall_@sfx@(-1)); npyv_@sfx@ vzero = npyv_zero_@sfx@(); for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - npyv_@sfx@ a = npyv_load_@sfx@(src); - npyv_b@len@ greater_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); - noverflow = npyv_and_b@len@(noverflow, greater_min); - npyv_@sfx@ neg = npyv_ifsub_@sfx@(greater_min, vzero, a, vzero); - + npyv_@sfx@ a = npyv_load_@sfx@(src); + npyv_b@len@ gt_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); + noverflow = npyv_and_b@len@(noverflow, gt_min); + npyv_@sfx@ neg = npyv_ifsub_@sfx@(gt_min, vzero, a, vzero); npyv_store_@sfx@(dst, neg); } - npy_uint64 tobits = npyv_tobits_b@len@(noverflow); - #if npyv_nlanes_@sfx@ == 64 - int raise = (~tobits) != 0; - #else - int raise = tobits != (1ULL << vstep)-1; - #endif + int raise_err = npyv_tobits_b@len@(npyv_not_b@len@(noverflow)) != 0; for (; len > 0; --len, ++src, ++dst) { npyv_lanetype_@sfx@ a = *src; if (a == NPY_MIN_INT@len@) { - raise = NPY_TRUE; + raise_err = 1; *dst = 0; } else { *dst = -a; } } - if (raise) { + if (raise_err) { npy_set_floatstatus_divbyzero(); } } else { @@ -89,20 +84,19 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) npyv_@sfx@ to_ninf = npyv_xor_@sfx@(nsign_a, nsign_d); npyv_@sfx@ trunc = npyv_divc_@sfx@(npyv_add_@sfx@(a, diff_sign), divisor); npyv_@sfx@ floor = npyv_sub_@sfx@(trunc, to_ninf); - npyv_store_@sfx@(dst, floor); } for (; len > 0; --len, ++src, ++dst) { const npyv_lanetype_@sfx@ a = *src; - *dst = a / scalar; - /* Negative quotients needs to be rounded down */ - if (((a > 0) != (scalar > 0)) && (*dst * scalar != a)) { - *dst = *dst - 1; + npyv_lanetype_@sfx@ r = a / scalar; + // Negative quotients needs to be rounded down + if (((a > 0) != (scalar > 0)) && ((r * scalar) != a)) { + r--; } + *dst = r; } } - npyv_cleanup(); } /**end repeat**/ @@ -131,7 +125,6 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) const npyv_lanetype_@sfx@ a = *src; *dst = a / scalar; } - npyv_cleanup(); } /**end repeat**/ @@ -143,8 +136,8 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) /**begin repeat * Signed types - * #type = byte, short, int, long, longlong# - * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# + * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# */ #undef TO_SIMD_SFX #if 0 @@ -159,42 +152,47 @@ simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) #if NPY_BITSOF_@TYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) #undef TO_SIMD_SFX #endif + +NPY_FINLINE @type@ floor_div_@TYPE@(const @type@ n, const @type@ d) +{ + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (NPY_UNLIKELY(d == 0 || (n == NPY_MIN_@TYPE@ && d == -1))) { + npy_set_floatstatus_divbyzero(); + return 0; + } + @type@ r = n / d; + // Negative quotients needs to be rounded down + if (((n > 0) != (d > 0)) && ((r * d) != n)) { + r--; + } + return r; +} + NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(npy_@type@) { - const npy_@type@ d = *(npy_@type@ *)ip2; - if (NPY_UNLIKELY(d == 0 || (io1 == (npy_@type@)NPY_MIN_@TYPE@ && d == (npy_@type@)-1))) { - npy_set_floatstatus_divbyzero(); - io1 = 0; - } else { - io1 /= d; - } + BINARY_REDUCE_LOOP(@type@) { + io1 = floor_div_@TYPE@(io1, *(@type@*)ip2); } - *((npy_@type@ *)iop1) = io1; + *((@type@ *)iop1) = io1; } #if NPY_SIMD && defined(TO_SIMD_SFX) // for contiguous block of memory, divisor is a scalar and not 0 - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(npy_@type@), NPY_SIMD_WIDTH) && - (*(npy_@type@ *)args[1]) != 0) { + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && + (*(@type@ *)args[1]) != 0) { TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); } #endif else { BINARY_LOOP { - const npy_@type@ in1 = *(npy_@type@ *)ip1; - const npy_@type@ in2 = *(npy_@type@ *)ip2; - if (NPY_UNLIKELY(in2 == 0 || (in1 == (npy_@type@)NPY_MIN_@TYPE@ && in2 == (npy_@type@)-1))) { - npy_set_floatstatus_divbyzero(); - *((npy_@type@ *)op1) = 0; - } else{ - *((npy_@type@ *)op1) = in1 / in2; - /* Negative quotients needs to be rounded down */ - if (((in1 > 0) != (in2 > 0)) && (*((npy_@type@ *)op1) * in2 != in1)) { - *((npy_@type@ *)op1) = *((npy_@type@ *)op1) - 1; - } - } + *((@type@ *)op1) = floor_div_@TYPE@(*(@type@*)ip1, *(@type@*)ip2); } } } @@ -202,16 +200,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) /**begin repeat * Unsigned types - * #type = byte, short, int, long, longlong# - * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# + * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# + * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# + * #STYPE = BYTE, SHORT, INT, LONG, LONGLONG# */ - #undef TO_SIMD_SFX #if 0 /**begin repeat1 * #len = 8, 16, 32, 64# */ -#elif NPY_BITSOF_@TYPE@ == @len@ +#elif NPY_BITSOF_@STYPE@ == @len@ #define TO_SIMD_SFX(X) X##_u@len@ /**end repeat1**/ #endif @@ -223,40 +221,40 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) * Power10(VSX4) is an exception here since it has native support for integer vector division, * note neither infrastructure nor NPYV has supported VSX4 yet. */ -#if NPY_BITSOF_@TYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) +#if NPY_BITSOF_@STYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) #undef TO_SIMD_SFX #endif -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(U@TYPE@_divide) +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(npy_u@type@) { - const npy_u@type@ d = *(npy_u@type@ *)ip2; - if (NPY_UNLIKELY(d == 0 || (io1 == (npy_u@type@)NPY_MIN_@TYPE@ && d == (npy_u@type@)-1))) { + BINARY_REDUCE_LOOP(@type@) { + const @type@ d = *(@type@ *)ip2; + if (NPY_UNLIKELY(d == 0)) { npy_set_floatstatus_divbyzero(); io1 = 0; } else { io1 /= d; } } - *((npy_u@type@ *)iop1) = io1; + *((@type@ *)iop1) = io1; } #if NPY_SIMD && defined(TO_SIMD_SFX) // for contiguous block of memory, divisor is a scalar and not 0 - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(npy_u@type@), NPY_SIMD_WIDTH) && - (*(npy_u@type@ *)args[1]) != 0) { + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && + (*(@type@ *)args[1]) != 0) { TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); } #endif else { BINARY_LOOP { - const npy_u@type@ in1 = *(npy_u@type@ *)ip1; - const npy_u@type@ in2 = *(npy_u@type@ *)ip2; - if (NPY_UNLIKELY(in2 == 0 || (in1 == (npy_u@type@)NPY_MIN_@TYPE@ && in2 == (npy_u@type@)-1))) { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + if (NPY_UNLIKELY(in2 == 0)) { npy_set_floatstatus_divbyzero(); - *((npy_u@type@ *)op1) = 0; + *((@type@ *)op1) = 0; } else{ - *((npy_u@type@ *)op1) = in1 / in2; + *((@type@ *)op1) = in1 / in2; } } } From f74f5003090a4c7ec83dead46567bd96de6dfce8 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 21 May 2021 04:40:29 +0200 Subject: [PATCH 1220/1270] SIMD: Fix computing the fast int32 division parameters When the divisor is equal to the minimum integer value, It was affected by gcc 9.3 only and under certain conditions of aggressive optimization. --- numpy/core/src/common/simd/intdiv.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/core/src/common/simd/intdiv.h b/numpy/core/src/common/simd/intdiv.h index 1ce3b4df834d..f6ea9abf254e 100644 --- a/numpy/core/src/common/simd/intdiv.h +++ b/numpy/core/src/common/simd/intdiv.h @@ -368,18 +368,18 @@ NPY_FINLINE npyv_s32x3 npyv_divisor_s32(npy_int32 d) { npy_int32 d1 = abs(d); npy_int32 sh, m; - if (d1 > 1) { + // Handel abs overflow + if ((npy_uint32)d == 0x80000000U) { + m = 0x80000001; + sh = 30; + } + else if (d1 > 1) { sh = npyv__bitscan_revnz_u32(d1 - 1); // ceil(log2(abs(d))) - 1 m = (1ULL << (32 + sh)) / d1 + 1; // multiplier } else if (d1 == 1) { sh = 0; m = 1; } - // fix abs overflow - else if (d == (1 << 31)) { - m = d + 1; - sh = 30; - } else { // raise arithmetic exception for d == 0 sh = m = 1 / ((npy_int32 volatile *)&d)[0]; // LCOV_EXCL_LINE @@ -445,18 +445,18 @@ NPY_FINLINE npyv_s64x3 npyv_divisor_s64(npy_int64 d) #else npy_int64 d1 = llabs(d); npy_int64 sh, m; - if (d1 > 1) { + // Handel abs overflow + if ((npy_uint64)d == 0x8000000000000000ULL) { + m = 0x8000000000000001LL; + sh = 62; + } + else if (d1 > 1) { sh = npyv__bitscan_revnz_u64(d1 - 1); // ceil(log2(abs(d))) - 1 m = npyv__divh128_u64(1ULL << sh, d1) + 1; // multiplier } else if (d1 == 1) { sh = 0; m = 1; } - // fix abs overflow - else if (d == (1LL << 63)) { - m = d + 1; - sh = 62; - } else { // raise arithmetic exception for d == 0 sh = m = 1 / ((npy_int64 volatile *)&d)[0]; // LCOV_EXCL_LINE From 641587086a4f5cd1413f72d27cea64f1bddc1772 Mon Sep 17 00:00:00 2001 From: Ismael Jimenez Date: Sat, 22 May 2021 10:34:37 +0200 Subject: [PATCH 1221/1270] fix gh-19056 --- numpy/core/numeric.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi index 6b9ef4b20b98..f579514349da 100644 --- a/numpy/core/numeric.pyi +++ b/numpy/core/numeric.pyi @@ -238,6 +238,6 @@ def isclose( equal_nan: bool = ..., ) -> Any: ... -def array_equal(a1: ArrayLike, a2: ArrayLike) -> bool: ... +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... From f91f4bcd050299c930092390b54ce9ba51fd70e0 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 22 May 2021 15:48:23 +0200 Subject: [PATCH 1222/1270] BUG: Fixed an issue wherein `nanmedian` could return an array with the wrong dtype --- numpy/lib/nanfunctions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index a02ad779f3a7..02ad01a980c6 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -965,7 +965,9 @@ def _nanmedian1d(arr1d, overwrite_input=False): arr1d, overwrite_input = _remove_nan_1d(arr1d, overwrite_input=overwrite_input) if arr1d.size == 0: - return np.nan + # Ensure that a nan-esque scalar of the appropiate type (and unit) + # is returned for `timedelta64` and `complexfloating` + return np.array(np.nan).astype(arr1d.dtype, copy=False)[()] return np.median(arr1d, overwrite_input=overwrite_input) From a8b825c0379972234a86f30b76ae9fc853a88b5e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 22 May 2021 16:04:26 +0200 Subject: [PATCH 1223/1270] BUG: Fixed an issue wherein `_nanmedian_small` would fail for `timedelta64`-based dtypes --- numpy/lib/nanfunctions.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 02ad01a980c6..719c529c14d7 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -1010,10 +1010,12 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): for i in range(np.count_nonzero(m.mask.ravel())): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4) + + fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan if out is not None: - out[...] = m.filled(np.nan) + out[...] = m.filled(fill_value) return out - return m.filled(np.nan) + return m.filled(fill_value) def _nanmedian_dispatcher( From 410924efc5f65d09ca1c128369099b2a4c7551b1 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 22 May 2021 15:51:14 +0200 Subject: [PATCH 1224/1270] TST: Update the `np.nanmedian` tests for all-nan arrays Test multiple dtypes in the `test_allnans` + a minor refactor --- numpy/lib/tests/test_nanfunctions.py | 50 +++++++++++++++++++--------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index e0f723a3c22f..1f1f5601b19c 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -588,6 +588,15 @@ def test_empty(self): assert_(len(w) == 0) +_TIME_UNITS = ( + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" +) + +# All `inexact` + `timdelta64` type codes +_TYPE_CODES = list(np.typecodes["AllFloat"]) +_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] + + class TestNanFunctions_Median: def test_mutation(self): @@ -662,23 +671,32 @@ def test_result_values(self): res = np.nanmedian(_ndat, axis=1) assert_almost_equal(res, tgt) - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for axis in [None, 0, 1]: - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", _TYPE_CODES) + def test_allnans(self, dtype, axis): + mat = np.full((3, 3), np.nan).astype(dtype) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) - assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) - if axis is None: - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 3) - # Check scalar - assert_(np.isnan(np.nanmedian(np.nan))) - if axis is None: - assert_(len(sup.log) == 2) - else: - assert_(len(sup.log) == 4) + output = np.nanmedian(mat, axis=axis) + assert output.dtype == mat.dtype + assert np.isnan(output).all() + + if axis is None: + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 3) + + # Check scalar + scalar = np.array(np.nan).astype(dtype)[()] + output_scalar = np.nanmedian(scalar) + assert output_scalar.dtype == scalar.dtype + assert np.isnan(output_scalar) + + if axis is None: + assert_(len(sup.log) == 2) + else: + assert_(len(sup.log) == 4) def test_empty(self): mat = np.zeros((0, 3)) From ae9314eff5d539122bf87800a1bc50a9f99762a8 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 22 May 2021 16:18:47 +0200 Subject: [PATCH 1225/1270] MAINT: Directly grab `nan` from the input array Directly grab a nan-esque object from the input array, rather than constructing a new one from scratch --- numpy/lib/nanfunctions.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 719c529c14d7..2c2c3435bc1f 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -962,14 +962,16 @@ def _nanmedian1d(arr1d, overwrite_input=False): Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) - if arr1d.size == 0: + arr1d_parsed, overwrite_input = _remove_nan_1d( + arr1d, overwrite_input=overwrite_input, + ) + + if arr1d_parsed.size == 0: # Ensure that a nan-esque scalar of the appropiate type (and unit) # is returned for `timedelta64` and `complexfloating` - return np.array(np.nan).astype(arr1d.dtype, copy=False)[()] + return arr1d[-1] - return np.median(arr1d, overwrite_input=overwrite_input) + return np.median(arr1d_parsed, overwrite_input=overwrite_input) def _nanmedian(a, axis=None, out=None, overwrite_input=False): From 4c3e90643ce2077d083cb3ea2f5925a7f8c2470b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 22 May 2021 14:16:07 -0600 Subject: [PATCH 1226/1270] MAINT: Update mailmap - Add new names from development branch - Simplify previous entries --- .mailmap | 641 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 381 insertions(+), 260 deletions(-) diff --git a/.mailmap b/.mailmap index 4726668e8b40..30a0981cdbeb 100644 --- a/.mailmap +++ b/.mailmap @@ -8,301 +8,422 @@ # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. -Aaron Baecker abaecker -Aarthi Agurusa agurusa -Alan Fontenot logeaux -Alan Fontenot logeaux <36168460+logeaux@users.noreply.github.com> -Abdul Muneer abdulmuneer -Abhilash Barigidad abhilash42 <64172584+abhilash42@users.noreply.github.com> -Abhinav Reddy tabhi0797 -Adam Ginsburg Adam Ginsburg -Albert Jornet Puig jurnix -Alex Rockhill Alex -Alex Griffing alex -Alex Griffing argriffing -Alex Griffing argriffing -Alex Griffing argriffing -Alex Thomas alexthomas93 -Alexander Belopolsky Alexander Belopolsky -Alexander Belopolsky Alexander Belopolsky +Aaron Baecker +Aarthi Agurusa +Alan Fontenot +Alan Fontenot <36168460+logeaux@users.noreply.github.com> +Abdul Muneer +Abhilash Barigidad +Abhilash Barigidad <64172584+abhilash42@users.noreply.github.com> +Abhinav Reddy +Adam Ginsburg +Aerik Pawson <45904740+aerikpawson@users.noreply.github.com> +Albert Jornet Puig +Alex Rockhill +Alex Griffing +Alex Griffing +Alex Griffing +Alex Henrie +Alex Rogozhnikov +Alex Thomas +Alexander Belopolsky +Alexander Belopolsky +Alexander Belopolsky Alexander Belopolsky sasha -Alexander Jung aleju -Alexander Shadchin Alexandr Shadchin -Alexander Shadchin shadchin -Allan Haldane ahaldane +Alexander Hunt +Alexander Jung +Alexander Shadchin +Alexander Shadchin +Alizé Papp <68250865+alize-papp@users.noreply.github.com> +Allan Haldane +Al-Baraa El-Hag <48454648+a-elhag@users.noreply.github.com> Alok Singhal Alok Singhal -Alyssa Quek alyssaq -Amir Sarabadani amir -Anatoly Techtonik anatoly techtonik -Andras Deak adeak -Andrea Olivo andryandrew@gmail.com andryandrew -Andrea Pattori patto90 -Andrea Sangalli and-sang <53617841+and-sang@users.noreply.github.com> -Andrei Kucharavy chiffa -Anne Archibald aarchiba -Anne Archibald Anne Archibald -Anže Starič astaric -Aron Ahmadia ahmadia -Arun Persaud Arun Persaud -Ashutosh Singh ashutosh619-sudo -Ashutosh Singh Ashutosh singh <55102089+Ashutosh619-sudo@users.noreply.github.com> -Åsmund Hjulstad Åsmund Hjulstad -Auke Wiggers auke -Badhri Narayanan Krishnakumar badhrink -Bangcheng Yang DumbMice -Behzad Nouri behzad nouri -Ben Nathanson bjnath -Benjamin Root Ben Root +Alyssa Quek +Amir Sarabadani +Anas Khan +Anatoly Techtonik +Andras Deak +Andrea Olivo +Andrea Pattori +Andrea Sangalli <53617841+and-sang@users.noreply.github.com> +Andreas Klöckner +Andreas Schwab +Andrei Kucharavy +Andrew Lawson +Anirudh Subramanian +Anne Archibald +Anne Archibald +Anne Bonner <35413198+bonn0062@users.noreply.github.com> +Anthony Vo <43098273+anthonyhvo12@users.noreply.github.com> +Antoine Pitrou +Anže Starič +Aron Ahmadia +Arun Persaud +Ashutosh Singh +Ashutosh Singh <55102089+Ashutosh619-sudo@users.noreply.github.com> +Åsmund Hjulstad +Auke Wiggers +Badhri Narayanan Krishnakumar +Bangcheng Yang +Bas van Beek <43369155+BvB93@users.noreply.github.com> +Behzad Nouri +Ben Nathanson +Benjamin Root Benjamin Root weathergod -Bernardt Duvenhage bduvenhage -Bernie Gray bernie gray -Bertrand Lefebvre bertrand -Bertrand Lefebvre Bertrand -Bharat Raghunathan Bharat123Rox -Bill Spotz William Spotz -Bill Spotz wfspotz@sandia.gov -Bob Eldering bobeldering -Brett R Murphy brettrmurphy +Bernardt Duvenhage +Bernie Gray +Bertrand Lefebvre +Bharat Raghunathan +Bharat Raghunathan +Bob Eldering +Brett R Murphy +Brigitta Sipocz +Brian Soto +Brian Soto +Brian Soto Bryan Van de Ven Bryan Van de Ven Bryan Van de Ven Bryan Van de Ven -Bui Duc Minh Mibu287 <41239569+Mibu287@users.noreply.github.com> -Carl Kleffner carlkl -Carl Leake leakec -Chris Burns chris.burns -Chris Kerr Chris Kerr -Christian Clauss cclauss -Christopher Hanley chanley +Bui Duc Minh <41239569+Mibu287@users.noreply.github.com> +Carl Kleffner +Carl Leake +Charles Stern <62192187+cisaacstern@users.noreply.github.com> +Chris Barker +Chris Burns +Chris Holland <41524756+ChrisAHolland@users.noreply.github.com> +Chris Kerr +Chris Vavaliaris +Christian Clauss +Christopher Dahlin +Christopher Hanley +Christoph Gohlke +Christoph Gohlke Christoph Gohlke cgholke -Christoph Gohlke cgohlke -Christoph Gohlke Christolph Gohlke -Chunlin Fang Qiyu8 -Chunlin Fang qiyu8 -Chunlin Fang qiyu8 -Chunlin Fang Chunlin -Colin Snyder <47012605+colinsnyder@users.noreply.github.com> colinsnyder <47012605+colinsnyder@users.noreply.github.com> -Daniel B Allan danielballan -Daniel da Silva Daniel da Silva -Daniel da Silva Daniel da Silva -Daniel Hrisca danielhrisca -Daniel J Farrell danieljfarrell -Daniel Montes <53720019+Aerysv@users.noreply.github.com> Aerysv <53720019+Aerysv@users.noreply.github.com> +Chunlin Fang +Chunlin Fang +Chunlin Fang <834352945@qq.com> +Colin Snyder <8csnyder@gmail.com> <47012605+colinsnyder@users.noreply.github.com> +Constanza Fierro +Daniel B Allan +Daniel da Silva +Daniel da Silva +Daniel Hrisca +Daniel J Farrell +Daniel Montes <53720019+Aerysv@users.noreply.github.com> +Daniel Müllner Daniel Müllner Daniel -Daniel Müllner dmuellner -Daniel Rasmussen drasmuss +Daniel Rasmussen +Daniel G. A. Smith +Daniel G. A. Smith +Dario Mory David Huard dhuard -David M Cooke cookedm -David Nicholson davidjn -David Ochoa ochoadavid -Davide Dal Bosco davidedalbosco <62077652+davidedalbosco@users.noreply.github.com> -Dawid Zych silenc3r -Dennis Zollo denniszollo -Derek Homeier Derek Homeier -Derek Homeier Derek Homeir -Derek Homeier Derek Homier -Derrick Williams derrick -Dmitriy Shalyga zuko3d -Dustan Levenstein dustanlevenstein <43019642+dustanlevenstein@users.noreply.github.com> -Ed Schofield edschofield -Egor Zindy zindy +David M Cooke +David Nicholson +David Ochoa +David Pitchford +Davide Dal Bosco <62077652+davidedalbosco@users.noreply.github.com> +Dawid Zych +Dennis Zollo +Derek Homeier +Derek Homeier +Derek Homeier +Derrick Williams +Dmitriy Shalyga +Dustan Levenstein <43019642+dustanlevenstein@users.noreply.github.com> +Dylan Cutler +Ed Schofield +Egor Zindy +Elliott M. Forney Endolith -Erik M. Bray E. M. Bray -Erik M. Bray Erik Bray +Erik M. Bray +Erik M. Bray +Erik M. Bray Eric Fode Eric Fode -Eric Quintero e-q -Ernest N. Mamikonyan mamikony -Etienne Guesnet EGuesnet <51407514+EGuesnet@users.noreply.github.com> +Eric Quintero +Ernest N. Mamikonyan +Etienne Guesnet <51407514+EGuesnet@users.noreply.github.com> +Eva Jau Evgeni Burovski Evgeni Burovski -Evgeny Toder eltjpm -Fernando Perez Fernando Perez +Evgeny Toder +Fernando Perez +Filip Trojan +François Le Lay +Frank Breitling Friedrich Dunne dunneff Frederic Bastien Frederic -Gael Varoquaux GaelVaroquaux -Gerrit Holl Gerrit Holl -Giuseppe Venturini ggventurini -Golnaz Irannejad golnazir -Gopal Singh Meena gopalmeena -Greg Knoll gkBCCN -Greg Yang eulerreich -Greg Young gfyoung -Greg Young gfyoung +Gael Varoquaux +Gerrit Holl +Gerrit Holl +Giuseppe Venturini +Golnaz Irannejad +Gopal Singh Meena +Greg Knoll +Greg Yang +Greg Young +Greg Young +Gregory R. Lee +Gregory R. Lee Guo Ci guoci -Han Genuit 87 -Han Genuit hangenuit@gmail.com -Han Genuit Han +Hameer Abbasi +Han Genuit Hanno Klemm hklemm -Hemil Desai hemildesai -Hiroyuki V. Yamazaki hvy -Hugo van Kemenade Hugo -Irvin Probst I--P -Isabela Presedo-Floyd isabela-pf -Gerhard Hobler hobler -Guillaume Peillex hippo91 -Jaime Fernandez Jaime Fernandez -Jaime Fernandez jaimefrio -Jaime Fernandez Jaime -Jakob Jakobson Jakob -Jakob Jakobson Jakob Jacobson -Jakob Jakobson jakobjakobson13 <43045863+jakobjakobson13@users.noreply.github.com> -James Webber jamestwebber +Helder Oliveira +Hemil Desai +Hiroyuki V. Yamazaki +Hugo van Kemenade +Inessa Pawson +Irvin Probst +Isabela Presedo-Floyd +Gerhard Hobler +Giannis Zapantis +Guillaume Peillex +Jack J. Woehr +Jaime Fernandez +Jaime Fernandez +Jaime Fernandez +Jamie Macey +Jakob Jakobson +Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> +James Bourbeau +James Webber +Jan Schlüter Jarrod Millman Jarrod Millman -Jason Grout Jason Grout -Jason King jason king -Jay Bourque jayvius -Jean Utke jutke -Jeffrey Yancey Jeff <3820914+jeffyancey@users.noreply.github.com> -Jeremy Lay jeremycl01 +Jason Grout +Jason King +Jay Bourque +Jean Utke +Jeff VanOss +Jeffrey Yancey <3820914+jeffyancey@users.noreply.github.com> +Jeremy Lay Jérémie du Boisberranger jeremiedbb <34657725+jeremiedbb@users.noreply.github.com> -Jerome Kelleher jeromekelleher -Johannes Hampp euronion <42553970+euronion@users.noreply.github.com> -Johannes Schönberger Johannes Schönberger -Johann Faouzi johann.faouzi -John Darbyshire <24256554+attack68@users.noreply.github.com> attack68 <24256554+attack68@users.noreply.github.com> -John Hagen johnthagen -John Kirkham jakirkham -Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz -Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz -Joseph Fox-Rabinovitz Mad Physicist -Joseph Martinot-Lagarde Joseph Martinot-Lagarde -Julian Taylor Julian Taylor -Julian Taylor Julian Taylor +Jérome Eertmans +Jerome Kelleher +Johannes Hampp <42553970+euronion@users.noreply.github.com> +Johannes Schönberger +Johann Faouzi +John Darbyshire <24256554+attack68@users.noreply.github.com> <24256554+attack68@users.noreply.github.com> +John Hagen +John Kirkham +John Kirkham +Joseph Fox-Rabinovitz +Joseph Fox-Rabinovitz +Joseph Fox-Rabinovitz +Joseph Martinot-Lagarde +Julian Taylor +Julian Taylor +Julian Taylor Julien Lhermitte Julien Lhermitte -Julien Schueller jschueller -Justus Magin keewis -Justus Magin Keewis -Kai Striega kai -Kai Striega kai-striega -Kai Striega kai-striega -Karan Dhir karan-dhir -Khaled Ben Abdallah Okuda KhaledTo +Julien Schueller +Justus Magin +Justus Magin +Kai Striega +Kai Striega +Karan Dhir +Kevin Sheppard +Kevin Sheppard +Kerem Hallaç +Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso -Konrad Kapp k_kapp@yahoo.com -Kriti Singh kritisingh1 -Kmol Yuan Yuan +Konrad Kapp +Kriti Singh +Kmol Yuan +Kumud Lakara <55556183+kumudlakara@users.noreply.github.com> Lars Buitinck Lars Buitinck Lars Buitinck Lars Buitinck -Lars Grüter Lars G -Luis Pedro Coelho Luis Pedro Coelho -Luke Zoltan Kelley lzkelley -Madhulika Jain Chambers madhulikajc <53166646+madhulikajc@users.noreply.github.com> -Magdalena Proszewska mpro -Magdalena Proszewska mproszewska <38814059+mproszewska@users.noreply.github.com> -Manoj Kumar MechCoder -Marcin Podhajski m-podhajski <36967358+m-podhajski@users.noreply.github.com> -Mark DePristo markdepristo -Mark Weissman m-d-w -Mark Wiebe Mark -Mark Wiebe Mark Wiebe -Mark Wiebe Mark Wiebe -Mark Wiebe Mark Wiebe -Martin Goodson martingoodson -Martin Reinecke mreineck -Martin Teichmann Martin Teichmann -Matt Hancock matt -Martino Sorbaro martinosorb -Mattheus Ueckermann empeeu -Matthew Harrigan MattHarrigan -Matti Picus mattip -Maximilian Konrad MLK97 -Melissa Weber Mendonça Melissa Weber Mendonca -Melissa Weber Mendonça melissawm +Lars Grüter +Lars Grüter +Leonardus Chen +Licht Takeuchi +Luis Pedro Coelho +Luke Zoltan Kelley +Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> +Magdalena Proszewska +Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> +Manoj Kumar +Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> +Mark DePristo +Mark Weissman +Mark Wiebe +Mark Wiebe +Mark Wiebe +Mark Wiebe +Martin Goodson +Martin Reinecke +Martin Teichmann +Mary Conley +Matheus Vieira Portela +Mathieu Lamarre +Matías Ríos +Matt Ord <55235095+Matt-Ord@users.noreply.github.com> +Matt Hancock +Martino Sorbaro +Mattheus Ueckermann +Matthew Harrigan +Matthias Bussonnier +Matti Picus +Maximilian Konrad +Melissa Weber Mendonça +Meltem Eren Copur Michael Behrisch behrisch Michael Droettboom mdroe -Michael K. Tran mtran -Michael Martin mmartin -Michael Schnaitter schnaitterm -Muhammad Kasim mfkasim91 -Masashi Kishimoto kishimoto-banana -Nathaniel J. Smith njsmith +Michael Dubravski +Michael Dubravski <41096057+mdubravski@users.noreply.github.com> +Michael Felt +Michael Hirsch +Michael K. Tran +Michael Martin +Michael Schnaitter +Michael Seifert +Michel Fruchart +Mike Toews +Mircea Akos Bruma +Mircea Akos Bruma +Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Muhammad Kasim +Masashi Kishimoto +Mukulikaa Parhari <60316606+Mukulikaa@users.noreply.github.com> +Nathaniel J. Smith Naveen Arunachalam naveenarun +Neil Girdhar +Nick Papior +Nicola Soranzo Nicolas Scheffer Nicolas Scheffer Nicholas A. Del Grosso nickdg -Nicholas McKibben mckib2 +Nicholas McKibben Nick Minkyu Lee fivemok <9394929+fivemok@users.noreply.github.com> -Ondřej Čertík Ondrej Certik -Óscar Villellas Guillén ovillellas +Oliver Eberle +Ondřej Čertík +Óscar Villellas Guillén +Panos Mavrogiorgos Pat Miller patmiller -Paul Ivanov Paul Ivanov +Paul Ivanov +Paul Ivanov Paul YS Lee Paul -Paul Jacobson hpaulj -Pearu Peterson Pearu Peterson -Pete Peeradej Tanruangporn petetanru -Peter Bell peterbell10 -Peter J Cock peterjc +Paul Jacobson +Pearu Peterson +Pete Peeradej Tanruangporn +Peter Bell +Peter J Cock Phil Elson -Pierre GM pierregm +Pierre GM Pierre GM pierregm -Piotr Gaiński panpiort8 +Piotr Gaiński Piotr Gaiński Pan Jan Prabhu Ramachandran prabhu -Przemyslaw Bartosik przemb +Przemyslaw Bartosik +Raghuveer Devulapalli +Raghuveer Devulapalli <44766858+r-devulap@users.noreply.github.com> Rajas Rade lkdmttg7 -Ralf Gommers Ralf Gommers -Ralf Gommers rgommers -Rehas Sachdeva rehassachdeva -Ritta Narita RittaNarita -Riya Sharma ayir -Robert Kern Robert Kern -Robert LU RobberPhex +Rakesh Vasudevan +Ralf Gommers +Rehas Sachdeva +Ritta Narita +Riya Sharma +Robert Kern +Robert LU +Robert T. McGibbon +Roland Kaufmann +Roman Yurchak Ronan Lamy Ronan Lamy -Russell Hewett rhewett -Ryan Blakemore ryanblak -Sam Preston jspreston -Sam Radhakrishnan = <=> -Sam Radhakrishnan sam09 -Sanchez Gonzalez Alvaro alvarosg -Saullo Giovani saullogiovani +Russell Hewett +Ryan Blakemore +Ryan Polley +Ryan Soklaski +Ryan Soklaski +Sabrina Simao +Sabrina Simao SabrinaSimao +Sam Preston +Sam Radhakrishnan = <=> # committed without an email address +Samesh Lakhotia +Samesh Lakhotia <43701530+sameshl@users.noreply.github.com> +Sami Salonen +Sanchez Gonzalez Alvaro +Saullo Giovani Saurabh Mehta -Sebastian Berg seberg -Sergei Vorfolomeev vorfol <39548292+vorfol@users.noreply.github.com> -Shekhar Prasad Rajak shekharrajak -Shota Kawabuchi skwbc -Siavash Eliasi siavashserver -Simon Gasse sgasse -Søren Rasmussen sorenrasmussenai <47032123+sorenrasmussenai@users.noreply.github.com> -Stefan Behnel scoder -Stefan van der Walt Stefan van der Walt -Stefan van der Walt Stefan van der Walt -Stephan Hoyer Stephan Hoyer +Sebastian Berg +Sergei Vorfolomeev <39548292+vorfol@users.noreply.github.com> +Shekhar Prasad Rajak +Shen Zhou +Shota Kawabuchi +Siavash Eliasi +Simon Conseil +Simon Gasse +Simon Gasse +Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> +Spencer Hill +Stefan Behnel +Stefan van der Walt +Stefan van der Walt +Stephan Hoyer +Stephan Hoyer +Steve Stagg Steven J Kern -Stuart Archibald stuartarchibald -SuryaChand P Surya P -SuryaChand P psschand -Takanori Hirano takanori-pskq -Takanori H takanori-pskq -Thomas A Caswell Thomas A Caswell -Tim Cera tim cera -Tim Teichmann tteichmann -Tim Teichmann tteichmann <44259103+tteichmann@users.noreply.github.com> -Tirth Patel tirthasheshpatel -Tobias Pitters CloseChoice -Tobias Pitters <31857876+CloseChoice@users.noreply.github.com> -Tom Boyd pezcore -Tom Poole tpoole -Tony LaTorre tlatorre -Travis Oliphant Travis E. Oliphant -Travis Oliphant Travis Oliphant -Valentin Haenel Valentin Haenel -Rakesh Vasudevan vrakesh -Vrinda Narayan vrindaaa <48102157+vrindaaa@users.noreply.github.com> -Wansoo Kim marload -Warren Weckesser Warren Weckesser -Weitang Li wtli@Dirac -Weitang Li wtli -Wendell Smith Wendell Smith -Wim Glenn wim glenn -Wojtek Ruszczewski wrwrwr -Yogesh Raisinghani raisinghanii <46864533+raisinghanii@users.noreply.github.com> -Yuji Kanagawa kngwyu -Yury Kirienko kirienko -Zac Hatfield-Dodds Zac-HD -Zixu Zhao ZZhaoTireless -Ziyan Zhou Ziyan -Zieji Pohz jpoh -Zieji Pohz zjpoh -Zieji Pohz Zijie (ZJ) Poh <8103276+zjpoh@users.noreply.github.com> -Zolisa Bleki zoj613 <44142765+zoj613@users.noreply.github.com> -Zolisa Bleki RedRuM <44142765+zoj613@users.noreply.github.com> -luzpaz luz.paz -luzpaz luzpaz -spacescientist spacescientist +Stuart Archibald +Stuart Archibald +SuryaChand P +Takanori Hirano +Thomas A Caswell +Thomas Kluyver +Thomas Orgis +Tim Cera +Tim Teichmann +Tim Teichmann <44259103+tteichmann@users.noreply.github.com> +Tirth Patel +Tobias Pitters +Tobias Pitters <31857876+CloseChoice@users.noreply.github.com> +Tobias Uelwer +Tom Boyd +Tom Poole +Tong Zou +Tony LaTorre +Toshiki Kataoka +Travis Oliphant +Travis Oliphant +Travis Oliphant +Valentin Haenel +Valentin Haenel +Varun Nayyar +Vrinda Narayan +Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> +Wansoo Kim +Warren Weckesser +Weitang Li +Wendell Smith +William Spotz +Wim Glenn +Wojtek Ruszczewski +Wojciech Rzadkowski <33913808+wrzadkow@users.noreply.github.com> +Yang Hau +Yang Hau +Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> +Yu Feng +Yuji Kanagawa +Yury Kirienko +Zac Hatfield-Dodds +Zé Vinícius +Zixu Zhao +Ziyan Zhou +Zieji Pohz +Zieji Pohz <8103276+zjpoh@users.noreply.github.com> +Zolboo Erdenebaatar +Zolisa Bleki <44142765+zoj613@users.noreply.github.com> +~DWesl <22566757+DWesl@users.noreply.github.com> +~Endolith +~FX Coudert +~Illviljan <14371165+Illviljan@users.noreply.github.com> +~LSchroefl <65246829+LSchroefl@users.noreply.github.com> +~Lbogula +~Patrick <39380924+xamm@users.noreply.github.com> +~Scian <65375075+hoony6134@users.noreply.github.com> +~h-vetinari +~h6197627 <44726212+h6197627@users.noreply.github.com> +~jbCodeHub +~legoffant <58195095+legoffant@users.noreply.github.com> +~luzpaz +~luzpaz +~sfolje0 +~spacescientist +~tautaus +~xoviat <49173759+xoviat@users.noreply.github.com> +~xoviat <49173759+xoviat@users.noreply.github.com> +~yetanothercheer From 8dc768964b5578a8aa9db1ef2c55134a00731e10 Mon Sep 17 00:00:00 2001 From: Carl Michal Date: Sat, 22 May 2021 20:43:10 -0700 Subject: [PATCH 1227/1270] Fix compile-time test of POPCNT The compile-time test of POPCNT, cpu_popcnt.c produced code that would execute without error even if the machine didn't support the popcnt instruction. This patch attempts to use popcnt on random numbers so the compiler can't substitute the answer at compile time. --- numpy/distutils/checks/cpu_popcnt.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/distutils/checks/cpu_popcnt.c index e6a80fb40be4..f6c785dd2a97 100644 --- a/numpy/distutils/checks/cpu_popcnt.c +++ b/numpy/distutils/checks/cpu_popcnt.c @@ -4,20 +4,26 @@ #include #endif +#include + int main(void) { long long a = 0; int b; + + a = random(); + b = random(); + #ifdef _MSC_VER #ifdef _M_X64 - a = _mm_popcnt_u64(1); + a = _mm_popcnt_u64(a); #endif - b = _mm_popcnt_u32(1); + b = _mm_popcnt_u32(b); #else #ifdef __x86_64__ - a = __builtin_popcountll(1); + a = __builtin_popcountll(a); #endif - b = __builtin_popcount(1); + b = __builtin_popcount(b); #endif return (int)a + b; } From 52d5fe1ede45083d0783c3e2bbaee5c44df9d553 Mon Sep 17 00:00:00 2001 From: Carl Michal Date: Sun, 23 May 2021 08:24:52 -0700 Subject: [PATCH 1228/1270] Change fix of cpu_popcnt.c to use _mm_popcnt_u64/_mm_popcnt_u32 on GCC _builtin_popcount is always available, so the compile-time check always succeeds. --- numpy/distutils/checks/cpu_popcnt.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/distutils/checks/cpu_popcnt.c index f6c785dd2a97..540c98dab05d 100644 --- a/numpy/distutils/checks/cpu_popcnt.c +++ b/numpy/distutils/checks/cpu_popcnt.c @@ -4,26 +4,16 @@ #include #endif -#include - -int main(void) +int main(int argc, char **argv) { - long long a = 0; - int b; - - a = random(); - b = random(); - -#ifdef _MSC_VER - #ifdef _M_X64 + // To make sure popcnt instructions are generated + // and been tested against the assembler + unsigned long long a = *((unsigned long long*)argv[argc-1]); + unsigned int b = *((unsigned int*)argv[argc-2]); + +#if defined(_M_X64) || defined(__x86_64__) a = _mm_popcnt_u64(a); - #endif - b = _mm_popcnt_u32(b); -#else - #ifdef __x86_64__ - a = __builtin_popcountll(a); - #endif - b = __builtin_popcount(b); #endif + b = _mm_popcnt_u32(b); return (int)a + b; } From 3c84e9ae0cc0082a31aa87252a48e3ae5f77bb93 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 23 May 2021 07:36:51 -0600 Subject: [PATCH 1229/1270] BUG: Fix test_numpy_version. - Make versions of the form '1.22.0.dev0' valid for non-releases. - Put empty match at end of groups instead of at the beginning. - Require eol in match, do not allow trailing characters. --- numpy/tests/test_numpy_version.py | 25 +++++++++++++++++++++---- numpy/version.py | 2 +- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 7fd56681550a..28595026e212 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -1,3 +1,20 @@ +""" +Check the numpy version is valid. + +Note that a development version is marked by the presence of 'dev0' or '+' +in the version string, all else is treated as a release. The version string +itself is set from the output of ``git describe`` which relies on tags. + +Examples +-------- + +Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2 +Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0 +Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a + +Note that a release is determined by the version string, which in turn +is controlled by the result of the ``git describe`` command. +""" import re import numpy as np @@ -7,11 +24,11 @@ def test_valid_numpy_version(): # Verify that the numpy version is a valid one (no .post suffix or other # nonsense). See gh-6431 for an issue caused by an invalid version. - version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" - dev_suffix = r"\.dev0\+[0-9]*\.g[0-9a-f]+" + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9]|)" + dev_suffix = r"(\.dev0|)(\+[0-9]*\.g[0-9a-f]+|)" if np.version.release: - res = re.match(version_pattern, np.__version__) + res = re.match(version_pattern + '$', np.__version__) else: - res = re.match(version_pattern + dev_suffix, np.__version__) + res = re.match(version_pattern + dev_suffix + '$', np.__version__) assert_(res is not None, np.__version__) diff --git a/numpy/version.py b/numpy/version.py index 8a1d05aa481a..48bdb32dac07 100644 --- a/numpy/version.py +++ b/numpy/version.py @@ -6,6 +6,6 @@ version: str = vinfo["version"] full_version: str = vinfo['version'] git_revision: str = vinfo['full-revisionid'] -release = 'dev0' not in version +release = 'dev0' not in version and '+' not in version del get_versions, vinfo From b167abfa820bb3da6ee0f2a35b11a16bd8506b95 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 22 May 2021 17:49:10 -0600 Subject: [PATCH 1230/1270] MAINT: Update cversions. No changes for the NumPy 1.21.x release. --- numpy/core/code_generators/cversions.txt | 1 + numpy/core/setup_common.py | 1 + 2 files changed, 2 insertions(+) diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index 2d3a653913a3..09c1c31e03ef 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -55,4 +55,5 @@ # Version 14 (NumPy 1.20) # DType related API additions. # A new field was added to the end of PyArrayObject_fields. +# Version 14 (NumPy 1.21) No change. 0x0000000e = 17a0f366e55ec05e5c5c149123478452 diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index fe66bd17381b..a700610d3dec 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -42,6 +42,7 @@ # 0x0000000d - 1.16.x # 0x0000000d - 1.19.x # 0x0000000e - 1.20.x +# 0x0000000e - 1.21.x C_API_VERSION = 0x0000000e class MismatchCAPIWarning(Warning): From e6ce4340c3125895bc6091a474075a6109de370f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 22 May 2021 20:27:11 -0600 Subject: [PATCH 1231/1270] MAINT: Update mailmap. --- .mailmap | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/.mailmap b/.mailmap index 30a0981cdbeb..3fb20e3ce46b 100644 --- a/.mailmap +++ b/.mailmap @@ -8,6 +8,27 @@ # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. +@8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +@DWesl <22566757+DWesl@users.noreply.github.com> +@Endolith +@Illviljan <14371165+Illviljan@users.noreply.github.com> +@LSchroefl <65246829+LSchroefl@users.noreply.github.com> +@Lbogula +@Lisa <34400837+lyzlisa@users.noreply.github.com> +@Patrick <39380924+xamm@users.noreply.github.com> +@Scian <65375075+hoony6134@users.noreply.github.com> +@h-vetinari +@h6197627 <44726212+h6197627@users.noreply.github.com> +@jbCodeHub +@legoffant <58195095+legoffant@users.noreply.github.com> +@luzpaz +@luzpaz +@sfolje0 +@spacescientist +@tautaus +@xoviat <49173759+xoviat@users.noreply.github.com> +@xoviat <49173759+xoviat@users.noreply.github.com> +@yetanothercheer Aaron Baecker Aarthi Agurusa Alan Fontenot @@ -65,6 +86,7 @@ Ashutosh Singh <55102089+Ashutosh619-sudo@users.nor Auke Wiggers Badhri Narayanan Krishnakumar Bangcheng Yang +Bhargav V <12525622+brpy@users.noreply.github.com> Bas van Beek <43369155+BvB93@users.noreply.github.com> Behzad Nouri Ben Nathanson @@ -133,7 +155,6 @@ Dylan Cutler Ed Schofield Egor Zindy Elliott M. Forney -Endolith Erik M. Bray Erik M. Bray Erik M. Bray @@ -150,6 +171,7 @@ François Le Lay Frank Breitling Friedrich Dunne dunneff Frederic Bastien Frederic +FX Coudert Gael Varoquaux Gerrit Holl Gerrit Holl @@ -217,7 +239,10 @@ Justus Magin Justus Magin Kai Striega Kai Striega +Kasia Leszek +Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Keller Meier Kevin Sheppard Kevin Sheppard Kerem Hallaç @@ -254,7 +279,8 @@ Mary Conley Matheus Vieira Portela Mathieu Lamarre Matías Ríos -Matt Ord <55235095+Matt-Ord@users.noreply.github.com> +Matt Ord +Matt Ord <55235095+Matt-Ord@users.noreply.github.com> Matt Hancock Martino Sorbaro Mattheus Ueckermann @@ -407,23 +433,3 @@ Zieji Pohz Zieji Pohz <8103276+zjpoh@users.noreply.github.com> Zolboo Erdenebaatar Zolisa Bleki <44142765+zoj613@users.noreply.github.com> -~DWesl <22566757+DWesl@users.noreply.github.com> -~Endolith -~FX Coudert -~Illviljan <14371165+Illviljan@users.noreply.github.com> -~LSchroefl <65246829+LSchroefl@users.noreply.github.com> -~Lbogula -~Patrick <39380924+xamm@users.noreply.github.com> -~Scian <65375075+hoony6134@users.noreply.github.com> -~h-vetinari -~h6197627 <44726212+h6197627@users.noreply.github.com> -~jbCodeHub -~legoffant <58195095+legoffant@users.noreply.github.com> -~luzpaz -~luzpaz -~sfolje0 -~spacescientist -~tautaus -~xoviat <49173759+xoviat@users.noreply.github.com> -~xoviat <49173759+xoviat@users.noreply.github.com> -~yetanothercheer From 11c7008ebf6f7e59ecfb92fb3716eae2313c7d67 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 13 May 2021 17:39:29 -0600 Subject: [PATCH 1232/1270] DOC: Create 1.21.0-changelog.rst --- doc/changelog/1.21.0-changelog.rst | 745 +++++++++++++++++++++++++++++ 1 file changed, 745 insertions(+) create mode 100644 doc/changelog/1.21.0-changelog.rst diff --git a/doc/changelog/1.21.0-changelog.rst b/doc/changelog/1.21.0-changelog.rst new file mode 100644 index 000000000000..a0891b232120 --- /dev/null +++ b/doc/changelog/1.21.0-changelog.rst @@ -0,0 +1,745 @@ + +Contributors +============ + +A total of 171 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @8bitmp3 + +* @DWesl + +* @Endolith +* @Illviljan + +* @Lbogula + +* @Lisa + +* @Patrick + +* @Scian + +* @h-vetinari + +* @h6197627 + +* @jbCodeHub + +* @legoffant + +* @sfolje0 + +* @tautaus + +* @yetanothercheer + +* Abhay Raghuvanshi + +* Adrian Price-Whelan + +* Aerik Pawson + +* Agbonze Osazuwa + +* Aitik Gupta + +* Al-Baraa El-Hag +* Alex Henrie +* Alexander Hunt + +* Alizé Papp + +* Allan Haldane +* Amarnath1904 + +* Amrit Krishnan + +* Andras Deak +* AngelGris + +* Anne Archibald +* Anthony Vo + +* Antony Lee +* Atharva-Vidwans + +* Ayush Verma + +* Bas van Beek +* Bharat Raghunathan +* Bhargav V + +* Brian Soto +* Carl Michal + +* Charles Harris +* Charles Stern + +* Chiara Marmo + +* Chris Barnes + +* Chris Vavaliaris +* Christina Hedges + +* Christoph Gohlke +* Christopher Dahlin + +* Christos Efstathiou + +* Chunlin Fang +* Constanza Fierro + +* Daniel Evans + +* Daniel Montes + +* Dario Mory + +* David Carlier + +* David Stansby +* Deepyaman Datta + +* Derek Homeier +* Dong Keun Oh + +* Dylan Cutler + +* Eric Larson +* Eric Wieser +* Eva Jau + +* Evgeni Burovski +* FX Coudert + +* Faris A Chugthai + +* Filip Ter + +* Filip Trojan + +* François Le Lay + +* Ganesh Kathiresan +* Giannis Zapantis + +* Giulio Procopio + +* Greg Lucas + +* Hollow Man + +* Holly Corbett + +* Inessa Pawson +* Isabela Presedo-Floyd +* Ismael Jimenez + +* Isuru Fernando +* Jakob Jakobson +* James Gerity + +* Jamie Macey + +* Jasmin Classen + +* Jody Klymak + +* Joseph Fox-Rabinovitz +* Jérome Eertmans + +* Kamil Choudhury + +* Kasia Leszek + +* Keller Meier + +* Kevin Sheppard +* Kulin Seth + +* Kumud Lakara + +* Laura Kopf + +* Laura Martens + +* Leo Singer + +* Leonardus Chen + +* Lima Tango + +* Lumir Balhar + +* Maia Kaplan + +* Mainak Debnath + +* Marco Aurélio da Costa + +* Marta Lemanczyk + +* Marten van Kerkwijk +* Mary Conley + +* Marysia Winkels + +* Mateusz Sokół + +* Matt Haberland +* Matt Hall + +* Matt Ord + +* Matthew Badin + +* Matthias Bussonnier +* Matthias Geier +* Matti Picus +* Matías Ríos + +* Maxim Belkin + +* Melissa Weber Mendonça +* Meltem Eren Copur + +* Michael Dubravski + +* Michael Lamparski +* Michal W. Tarnowski + +* Michał Górny + +* Mike Boyle + +* Mike Toews +* Misal Raj + +* Mitchell Faas + +* Mukulikaa Parhari + +* Neil Girdhar + +* Nicholas McKibben + +* Nico Schlömer +* Nicolas Hug + +* Nilo Kruchelski + +* Nirjas Jakilim + +* Ohad Ravid + +* Olivier Grisel +* Pamphile ROY + +* Panos Mavrogiorgos + +* Patrick T. Komiske III + +* Pearu Peterson +* Raghuveer Devulapalli +* Ralf Gommers +* Raúl Montón Pinillos + +* Rin Arakaki + +* Robert Kern +* Rohit Sanjay +* Roman Yurchak +* Ronan Lamy +* Ross Barnowski +* Ryan C Cooper +* Ryan Polley + +* Ryan Soklaski +* Sabrina Simao + +* Sayed Adel +* Sebastian Berg +* Shen Zhou + +* Stefan van der Walt +* Sylwester Arabas + +* Takanori Hirano +* Tania Allard + +* Thomas J. Fan + +* Thomas Orgis + +* Tim Hoffmann +* Tomoki, Karatsu + +* Tong Zou + +* Touqir Sajed + +* Tyler Reddy +* Wansoo Kim +* Warren Weckesser +* Weh Andreas + +* Yang Hau +* Yashasvi Misra + +* Zolboo Erdenebaatar + +* Zolisa Bleki + +Pull requests merged +==================== + +A total of 561 pull requests were merged for this release. + +* `#13578 `__: DEP: Deprecate `data_type.dtype` if attribute is not already... +* `#15269 `__: ENH: Implement faster keyword argument parsing capable of ``METH_FASTCALL`` +* `#15271 `__: ENH: Optimize and cleanup ufunc calls and ufunc CheckOverrides +* `#15392 `__: BUG: Remove temporary change of descr/flags in VOID functions +* `#16164 `__: DOC: Add more information about poly1d -> polynomial to reference... +* `#16241 `__: ENH: Warn when reloading numpy or using numpy in sub-interpreter +* `#16370 `__: DOC: Fix for building with sphinx 3 +* `#16588 `__: DOC: unify the docs for np.transpose and ndarray.transpose +* `#16818 `__: DOC: added examples section for rfft2 and irfft2 docstring +* `#16855 `__: DOC: Fix Typo (Wrong argument name) +* `#16987 `__: ENH: Phase unwrapping generalized to arbitrary interval size +* `#17102 `__: SIMD: Optimize the performance of np.packbits in AVX2/AVX512F/VSX. +* `#17122 `__: MAINT: Use numpy version for f2py version. +* `#17492 `__: DEP: Shift correlate mode parsing to C and deprecate inexact... +* `#17586 `__: DEP: Formally deprecate `np.typeDict` +* `#17587 `__: SIMD: Replace raw SIMD of sin/cos with NPYV(universal intrinsics) +* `#17636 `__: MAINT: Bump pydata-sphinx-theme and set logo link to index +* `#17637 `__: DOC: Add module template +* `#17719 `__: ENH: Make `ndarray` generic w.r.t. its shape and dtype +* `#17727 `__: ENH: Added libdivide for floor divide +* `#17736 `__: BUG, Benchmark: fix passing optimization build options to asv +* `#17737 `__: MAINT, Benchmark: print the supported CPU features during the... +* `#17778 `__: ENH: Add annotations for comparison operations +* `#17782 `__: SIMD: Optimize the performance of einsum's submodule multiply... +* `#17789 `__: ENH, SIMD: Add new NPYV intrinsics pack(0) +* `#17790 `__: ENH, SIMD: Add new NPYV intrinsics pack(1) +* `#17791 `__: BLD: Enable Werror=undef in travis +* `#17792 `__: ENH: add support for fujitsu compiler to numpy. +* `#17795 `__: ENH: Add two new `_Like` unions +* `#17817 `__: BUG: Ignore fewer errors during array-coercion +* `#17836 `__: MAINT: Add git rules to ignore all SIMD generated files +* `#17843 `__: ENH: Add a mypy plugin for inferring platform-specific `np.number`... +* `#17847 `__: TST: use latest pypy37 not pypy36 +* `#17852 `__: DOC: Doc for deprecate_with_doc +* `#17853 `__: DOC: Clarify docs of np.resize(). +* `#17861 `__: MAINT: Update master after 1.20.x branch. +* `#17862 `__: Make it clearer that np.interp input must be monotonically increasing +* `#17863 `__: MAINT: Implement new casting loops based on NEP 42 and 43 +* `#17866 `__: DOC: fix typo in glossary.rst +* `#17868 `__: BUG, TST: use python-version not PYTHON_VERSION +* `#17872 `__: DOC: update the release howto for oldest-supported-numpy +* `#17874 `__: MAINT: clean up a spurious warning in numpy/typing/setup.py +* `#17875 `__: DOC: Prepare for 1.20.0 release +* `#17876 `__: DOC: fixed typo in np-indexing.png explaining [-2:] slice in... +* `#17877 `__: BUG: Fix buffer readflag errors and small leaks +* `#17878 `__: BUG: np.arange: Allow `stop` not `start` as sole kwargs. +* `#17881 `__: MAINT: Bump hypothesis from 5.41.3 to 5.41.4 +* `#17883 `__: MAINT: Remove duplicate dictionary entry +* `#17884 `__: BUG: numpy.putmask not respecting writeable flag +* `#17886 `__: ENH: Timestamp development versions. +* `#17887 `__: DOC: Update arraycreation +* `#17888 `__: DOC: Correct sentence/statement composition +* `#17889 `__: DOC: Rename basics to fundamentals + added description +* `#17895 `__: MAINT: Remove remaining uses of Python 3.6. +* `#17896 `__: ENH: Speed up default `where` in the reduce-like method +* `#17897 `__: BUG: merging PR to use -Werror=undef broke another PR +* `#17900 `__: DEP: Finalize unravel_index `dims` alias for `shape` keyword +* `#17906 `__: BUG: Fix a MacOS build failure +* `#17907 `__: BUG: 'bool' object has no attribute 'ndim' +* `#17912 `__: BUG: remove stray '+' from f-string upgrade in numba/extending.py +* `#17914 `__: DOC: Update release notes to mention `type(dtype) is not np.dtype` +* `#17920 `__: NEP: Update NEP 42 and 43 according to the current implementation +* `#17921 `__: BUG: Enforce high >= low on uniform number generators +* `#17929 `__: MAINT: Replace `contextlib_nullcontext` with `contextlib.nullcontext` +* `#17934 `__: DOC: Add information about leak checking and valgrind +* `#17936 `__: TST: Fixed an issue where the typing tests would fail for comparison... +* `#17942 `__: DOC: Clarify savez documentation of naming arrays in output file +* `#17943 `__: [DOC]: Wrong length for underline in docstring. +* `#17945 `__: MAINT: Bump hypothesis from 5.41.4 to 5.41.5 +* `#17950 `__: BUG: Removed empty String from Nag Compiler's Flags +* `#17953 `__: NEP: Accept NEP 42 -- New and extensible DTypes +* `#17955 `__: DOC: Replace {var} in docstrings type annotation with `scalar... +* `#17956 `__: ENH: Use versioneer to manage numpy versions. +* `#17957 `__: TST: Fix crosstalk issues with polynomial str tests. +* `#17958 `__: MAINT: Optimize the performance of count_nonzero by using universal... +* `#17960 `__: TST, BUILD: Add a native x86 baseline build running on ubuntu-20.04 +* `#17962 `__: TST: Ensure tests are not sensitive to execution order +* `#17966 `__: BUG: Add missing decref to arange +* `#17968 `__: ENH: Use more typevars in `np.dtype` +* `#17971 `__: BUG, SIMD: Fix direactive check for AVX512BW of intrinsics npyv_tobits_* +* `#17973 `__: DEP: Futurewarn on requiring __len__ on array-likes +* `#17974 `__: BLD: Fixes for versioneer and setup.py sdist. +* `#17976 `__: DOC: Add/remove spaces in snippets and re-format here and there +* `#17978 `__: MAINT: Update test_requirements and release_requirements. +* `#17981 `__: ENH: Add proper dtype-support to `np.flatiter` +* `#17985 `__: ENH, SIMD: Ditching the old CPU dispatcher(Arithmetic) +* `#17992 `__: DOC: Replace verbatim with reference to local parameter +* `#17993 `__: [DOC] np.kron use double backticks for non-references +* `#17994 `__: SIMD: Optimize the performance of einsum's submodule dot . +* `#17995 `__: MAINT: Bump pytest from 6.0.2 to 6.2.0 +* `#17996 `__: MAINT: Update wheel requirement from <=0.35.1 to <0.36.3 +* `#17997 `__: MAINT: Bump hypothesis from 5.41.5 to 5.43.3 +* `#17998 `__: TST: ignore pytest warning +* `#17999 `__: Replace Numpy with NumPy +* `#18001 `__: BLD, BUG: Fix detecting aarch64 on macOS +* `#18002 `__: DOC: Fix and extend the docstring for np.inner +* `#18007 `__: DOC: Add a brief explanation of float printing +* `#18008 `__: DOC: fix for doctests +* `#18011 `__: BLD: update to OpenBLAS 0.3.13 +* `#18012 `__: SIMD: Optimize the performance of einsum's submodule sum. +* `#18014 `__: DOC: random: add some examples for SeedSequence +* `#18027 `__: DOC, MAINT: Minor fixes to refguide_check.py documentation. +* `#18030 `__: BUG: make a variable volatile to work around clang compiler bug +* `#18031 `__: DOC: Parameter name typo axes -> axis in numpy.fft._pocketfft. +* `#18032 `__: ENH: Add annotations for `np.core.arrayprint` +* `#18034 `__: DOC: Fix a couple of reference to verbatim and vice versa +* `#18042 `__: MAINT: Add dist_info to "other" setup.py commands. +* `#18045 `__: MAINT: Bump pytest from 6.2.0 to 6.2.1 +* `#18046 `__: TST: add back sdist test run +* `#18047 `__: BLD,DOC: pin sphinx to 3.3.1 +* `#18048 `__: DOC: Update TESTS.rst.txt +* `#18050 `__: MAINT: Add aliases for commonly used `ArrayLike` objects +* `#18051 `__: DEP: deprecate np.testing.dec +* `#18052 `__: BUG: Fix concatenation when the output is "S" or "U" +* `#18054 `__: DOC: Update stack docstrings +* `#18057 `__: BLD: ensure we give the right error message for old Python versions +* `#18062 `__: DOC: add missing details to linalg.lstsq docstring +* `#18065 `__: MAINT: CPUs that support unaligned access. +* `#18066 `__: TST: Allow mypy output types to be specified via aliases +* `#18067 `__: MAINT: Remove obsolete workaround to set ndarray.__hash__ = None +* `#18070 `__: BUG: Fix unique handling of nan entries. +* `#18072 `__: MAINT: crackfortran regex simplify +* `#18074 `__: MAINT: exprtype regex simplify +* `#18075 `__: ENH, SIMD: Dispatch for unsigned floor division +* `#18077 `__: NEP: mark NEP 28 on website redesign as final +* `#18078 `__: Fix build warnings in NEPs +* `#18079 `__: MAINT: Bump sphinx from 3.3.1 to 3.4.1 +* `#18080 `__: MAINT: Bump pytz from 2020.4 to 2020.5 +* `#18081 `__: MAINT: Bump hypothesis from 5.43.3 to 5.43.4 +* `#18082 `__: DOC: roadmap update +* `#18083 `__: MAINT: regex char class improve +* `#18084 `__: NEP: NumPy sponsorship guidelines (NEP 46) +* `#18085 `__: DOC: replace 'this platform' with the actual platform in the... +* `#18086 `__: BUG, SIMD: Fix _simd module build for 64bit Arm/neon clang +* `#18088 `__: DOC: Update reference to verbatim in a few location. +* `#18090 `__: MAINT: multiline regex class simplify +* `#18091 `__: DOC: Avoid using "set of" when talking about an ordered list. +* `#18097 `__: NEP: update backwards compatibility and deprecation policy NEP +* `#18100 `__: BUG, BLD: Generate the main dispatcher config header into the... +* `#18101 `__: ENH: move exp, log, frexp, ldexp to SIMD dispatching +* `#18103 `__: TST: Avoid changing odd tempfile names in tests' site.cfg +* `#18104 `__: TST: Turn some tests with loops into parametrized tests. +* `#18109 `__: MAINT: Fix exception cause in mingw32ccompiler.py +* `#18110 `__: API: make piecewise subclass safe using use zeros_like. +* `#18111 `__: MAINT: Bump hypothesis from 5.43.4 to 5.46.0 +* `#18115 `__: BUG: Fix promotion of half and string +* `#18116 `__: DEP: Deprecate promotion of numbers and bool to string +* `#18118 `__: BUG, MAINT: improve avx512 mask logical operations +* `#18126 `__: REL: Update master after 1.19.5 release. +* `#18128 `__: ENH: Add dtype support to the array comparison ops +* `#18136 `__: ENH: Adding keyboard interrupt support for array creation +* `#18144 `__: BLD: add found Cython version to check in cythonize.py +* `#18148 `__: MAINT: Bump sphinx from 3.4.1 to 3.4.3 +* `#18149 `__: MAINT: Bump hypothesis from 5.46.0 to 6.0.0 +* `#18150 `__: BUG: Ensure too many advanced indices raises an exception +* `#18152 `__: BUG: Promotion between strings and objects was assymetric +* `#18156 `__: MAINT: Remove redundant null check before free +* `#18157 `__: BUG: Initialize value of no_castable_output used in ufunc_loop_matches +* `#18161 `__: MAINT: Make keyword arrays static +* `#18164 `__: TST: add a pypy37 windows 64-bit build +* `#18166 `__: Use sinus based formula for ``chebpts1`` +* `#18169 `__: ENH: cpu features detection implementation on FreeBSD ARM +* `#18173 `__: TST: Clear the mypy cache before running any typing tests +* `#18174 `__: MAINT: Changed the `NBitBase` variancy in `number` from co- to... +* `#18176 `__: ENH: Improve performance of tril_indices and triu_indices +* `#18178 `__: SIMD: add fast integer division intrinsics for all supported... +* `#18180 `__: BUG: threads.h existence test requires GLIBC > 2.12. +* `#18181 `__: ENH: [f2py] Add external attribute support. +* `#18182 `__: MAINT: Bump hypothesis from 6.0.0 to 6.0.2 +* `#18183 `__: MAINT: Optimize numpy.count_nonzero for int types using SIMD... +* `#18184 `__: BUG: Fix f2py bugs when wrapping F90 subroutines. +* `#18185 `__: MAINT: Give the `_Like` and `_ArrayLike` type aliases a... +* `#18187 `__: STY: unify imports in __init__.py +* `#18191 `__: STY: Use explicit reexports for numpy.typing objects +* `#18193 `__: MAINT: Fix typo in docstring example +* `#18194 `__: MAINT: einsum: Optimize the sub function two-operands by using... +* `#18196 `__: BLD: update OpenBLAS to af2b0d02 +* `#18197 `__: BUG: Keep ignoring most errors during array-protocol lookup +* `#18200 `__: ENH: Add new intrinsics sum_u8/u16/u64. +* `#18204 `__: TST: Speed up the typing tests +* `#18205 `__: MAINT: Update pavement.py to work with versioneer. +* `#18208 `__: TST: raise memory limit for test +* `#18210 `__: DOC: typo in post-loop return +* `#18211 `__: MAINT: random shuffle: warn on unrecognized objects, fix empty... +* `#18213 `__: DOC: Formatting consistency. +* `#18214 `__: DOC: Double backticks for inline code example. +* `#18217 `__: MAINT: Ignore ComplexWarning in ``test_iter_copy_casts``. +* `#18221 `__: DOC: Misc single to double backticks fixes. +* `#18223 `__: DOC: Improve doc for numpy.random.Generator.choice +* `#18224 `__: MAINT: Bump pydata-sphinx-theme from 0.4.1 to 0.4.2 +* `#18225 `__: MAINT: Bump mypy from 0.790 to 0.800 +* `#18226 `__: MAINT: Bump hypothesis from 6.0.2 to 6.0.3 +* `#18227 `__: MAINT: Bump pytest-cov from 2.10.1 to 2.11.1 +* `#18228 `__: ENH: Add dtype-support to the ufunc-based `ndarray` magic methods... +* `#18229 `__: MAINT: Clean up all module-level dunders +* `#18230 `__: DOC: Clarify the type alias deprecation message +* `#18232 `__: DOC: lib/shape_base numpydoc formatting. +* `#18233 `__: NEP: accept NEP 23 (backwards compatibility policy) +* `#18234 `__: NEP: accept NEP 46 (sponsorship guidelines) +* `#18235 `__: DOC: Fix command in "Writing custom array containers" guide +* `#18236 `__: ENH: Add aliases for commonly used dtype-like objects +* `#18238 `__: DOC: __array__ accepts a dtype argument +* `#18245 `__: BLD: fix issue with `bdist_egg`, which made `make dist` in doc/... +* `#18247 `__: DOC: Misc numpydoc format fixes +* `#18248 `__: DOC: See also -> See Also (casing) +* `#18251 `__: DOC: more misc fixes of syntax +* `#18252 `__: DOC: cleanup of numpy/polynomial. +* `#18253 `__: DOC: improve description of `_NoValue` +* `#18255 `__: MAINT: add an 'apt update' +* `#18262 `__: REL: Update master after 1.20.0 release. +* `#18263 `__: ENH: Added sanity check to printoptions +* `#18264 `__: BUG: Use C linkage for random distributions +* `#18269 `__: DOC: Numpydoc format space before `:` in Parameters +* `#18272 `__: DOC: Numpydoc warning incorrect underline length. +* `#18274 `__: MAINT: Chain exceptions in linalg +* `#18275 `__: MAINT: Bump hypothesis from 6.0.3 to 6.1.1 +* `#18276 `__: MAINT: Bump pytest from 6.2.1 to 6.2.2 +* `#18277 `__: MAINT: Bump pydata-sphinx-theme from 0.4.2 to 0.4.3 +* `#18278 `__: MAINT: defer the import of shutil +* `#18282 `__: MAINT: gracefully shuffle memoryviews +* `#18284 `__: ENH: Add annotations for the remaining `np.generic` aliases +* `#18285 `__: TST: Pin `typing_extensions` to the latest version +* `#18289 `__: MAINT: Move transferdata into buffer-wise struct +* `#18293 `__: BUG: Fix typo in ``numpy.__init__.py`` +* `#18295 `__: BUG: don't mutate list of fake libraries while iterating over... +* `#18301 `__: MAINT: avoid chaining exceptions in conv_template.py +* `#18302 `__: MAINT: Add missing placeholder annotations +* `#18303 `__: MAINT: Fix typo in PyArray_RegisterDataType error +* `#18307 `__: DOC: Corrected numpy.power example. +* `#18313 `__: Numpy logo fix on README +* `#18315 `__: CI: rearrange Azure build jobs +* `#18317 `__: MAINT: Fixed chain exception for array_split func +* `#18320 `__: DOC: add links to polynomial function/class listing +* `#18322 `__: ENH: Add a mypy plugin for exposing platform-specific extended-precision... +* `#18323 `__: ENH: Add dtype-support to the ufunc-based `ndarray` magic methods... +* `#18324 `__: MAINT: Avoid moveaxis overhead in median. +* `#18329 `__: BUG: Allow unmodified use of isclose, allclose, etc. with timedelta +* `#18331 `__: MAINT: Update openblas_support for macosx-arm64 +* `#18332 `__: BUG: Allow pickling all relevant DType types/classes +* `#18333 `__: CI: fix when GitHub Actions builds trigger, and allow ci skips +* `#18334 `__: TST: use setup-python action for pypy, disable win64 pypy +* `#18338 `__: DOC: Fix whitespace before "last updated" on overview page +* `#18339 `__: DOC: Discussion on the @ operator and the matrix class +* `#18340 `__: DOC: remove pygments_style from conf.py +* `#18342 `__: DOC: Specified all possible return types for trapz function #18140 +* `#18344 `__: DOC: Added sentence to docstring of histogram_bin_edges to explain... +* `#18346 `__: DOC: Change license date 2020 -> 2021 +* `#18347 `__: MAINT: Delete unused "dst" clearing functions +* `#18348 `__: DEP: doc-deprecate BLAS_SRC/LAPACK_SRC +* `#18349 `__: CI: CircleCI seems to occasionally time out, increase the limit +* `#18350 `__: BUG: Fix missing signed_char dependency. +* `#18361 `__: ENH: Share memory of read-only intent(in) arrays. +* `#18362 `__: REL: Update master after 1.20.1 release. +* `#18364 `__: DOC: Update landing page to match table of contents +* `#18366 `__: MAINT: Disable TravisCI git clone depth. +* `#18367 `__: MAINT: Bump pytz from 2020.5 to 2021.1 +* `#18369 `__: BUG: np.in1d bug on the object array (issue 17923) +* `#18372 `__: DOC: improve standard_t example in numpy.random. +* `#18374 `__: TST: Add a test for nditer write masked with references +* `#18375 `__: BUG: fix regression in a hidden callback use case in f2py. +* `#18377 `__: ENH: Add annotations for `np.lib.ufunclike` +* `#18379 `__: DOC: Fix docstring of _median_nancheck. +* `#18384 `__: BUG: improve the interface of `tofile` method +* `#18389 `__: MAINT: Fix version of wheel to support Python 3.10 +* `#18390 `__: ENH: Add annotations for `np.core.einsumfunc` +* `#18392 `__: BUG: Remove check in shuffle for non-ndarrays +* `#18394 `__: MAINT: Added Chain exceptions where appropriate +* `#18395 `__: ENH: Initial typing of random +* `#18396 `__: MAINT: Threading and Unicode strings +* `#18397 `__: ENH: Add annotations for `np.lib.index_tricks` +* `#18398 `__: MAINT: Fix casting signatures to align with NEP 43 signature +* `#18400 `__: MAINT: Added Chain exceptions where appropriate +* `#18402 `__: BUG: Fix typo in char_codes +* `#18404 `__: BUG: Fix iterator shape in advanced index assignment broadcast... +* `#18405 `__: DOC: Mention `scipy.signal.correlate` and FFT method in `np.correlate`closes... +* `#18413 `__: MAINT: Bump sphinx from 3.4.3 to 3.5.0 +* `#18414 `__: MAINT: Bump hypothesis from 6.1.1 to 6.2.0 +* `#18415 `__: MAINT: Update END statements parsing for recent Fortran standards. +* `#18416 `__: BUG: Fix f2py parsing continued lines that follow comment lines. +* `#18417 `__: ENH: Add dtype-support to the ufunc-based `ndarray` magic methods... +* `#18418 `__: DOC: remove layout overrides for headers +* `#18420 `__: BUG: Fix tiny memory leaks when ``like=`` overrides are used +* `#18423 `__: ENH: Lint checks for PR diffs +* `#18428 `__: DOC: remove explanations.rst +* `#18429 `__: DOC: point intersphinx to matplotlib/stable... +* `#18432 `__: MAINT: Correct code producing warnings +* `#18433 `__: ENH: Add typing for RandomState +* `#18436 `__: BUG: Fix refcount leak in f2py `complex_double_from_pyobj` +* `#18437 `__: TST: Fix some uninitialized memory in the tests +* `#18438 `__: BUG: Correct shuffling of objects in 1-d array likes +* `#18439 `__: MAINT: random: Use 'from exc' when raising a ValueError in choice. +* `#18443 `__: BUG: fix stacklevel in warning within random.shuffle +* `#18448 `__: DOC: Remove unfinished Linear Algebra section from Quickstart... +* `#18450 `__: BUG: Segfault in nditer buffer dealloc for Object arrays +* `#18454 `__: NEP: add Spending NumPy Project Funds (NEP 48) +* `#18455 `__: BUG: ``diagflat`` could overflow on windows or 32-bit platforms +* `#18456 `__: NEP: array API standard adoption (NEP 47) +* `#18458 `__: DOC: update NEP status for accepted/finished NEPs +* `#18463 `__: MAINT: Bump mypy from 0.800 to 0.812 +* `#18464 `__: MAINT: Bump sphinx from 3.5.0 to 3.5.1 +* `#18465 `__: MAINT: Bump cython from 0.29.21 to 0.29.22 +* `#18466 `__: MAINT: Bump hypothesis from 6.2.0 to 6.3.0 +* `#18475 `__: ENH: Added type annotations to eye() function +* `#18476 `__: BUG: Remove suspicious type casting +* `#18477 `__: BUG: remove nonsensical comparison of pointer < 0 +* `#18478 `__: BUG: verify pointer against NULL before using it +* `#18479 `__: BUG: check if PyArray_malloc succeeded +* `#18481 `__: DOC: Generator and RandomState doc improvements +* `#18482 `__: ENH: Improve error message in multinomial +* `#18489 `__: DOC: Rename "Ones and zeros" section in array-creation documentation. +* `#18493 `__: BUG: Fix non-versioneer uses of numpy.distutils +* `#18497 `__: TST: Remove the `einsum` typing tests reliance on issuing a `ComplexWarning` +* `#18498 `__: BUG: Fixed Von Mises distribution for big values of kappa +* `#18499 `__: TST: Branch coverage improvement for `np.polynomial` +* `#18502 `__: DOC: Fix links to landing page +* `#18505 `__: DOC: add guide for downstream package authors +* `#18509 `__: DOC: trunc, floor, ceil, rint, fix should all link to each other +* `#18513 `__: BLD: add _2_24 to valid manylinux names +* `#18515 `__: MAINT: Improve error message when common type not found. +* `#18517 `__: MAINT: Bump hypothesis from 6.3.0 to 6.3.4 +* `#18518 `__: DOC Improve formatting in the depending_on_numpy documentation +* `#18522 `__: BUG: remove extraneous ARGOUTVIEWM dim. 4 typemaps +* `#18526 `__: MAINT: Specify color in RGB in the docs about the new NumPy logo +* `#18530 `__: BUG: incorrect error fallthrough in nditer +* `#18531 `__: CI: Use Ubuntu 18.04 to run "full" test. +* `#18537 `__: [BLD] use the new openblas lib +* `#18538 `__: Fix the numpy Apple M1 build +* `#18539 `__: BUG: NameError in numpy.distutils.fcompiler.compaq +* `#18544 `__: MAINT: Update master to main after branch rename +* `#18545 `__: ENH: Add annotations for `np.lib.arrayterator` +* `#18554 `__: CI: Pin docker image for Linux_Python_38_32bit_full_with_asserts... +* `#18560 `__: BUG: Fixed ``where`` keyword for ``np.mean`` & ``np.var`` methods +* `#18566 `__: CI: another master -> main fix +* `#18567 `__: CI: skip lint check on merges with main +* `#18569 `__: CI: Ensure that doc-build uses "main" as branch name +* `#18570 `__: CI: Use `git branch -m` instead of `--initial-branch=main` +* `#18571 `__: BUG: Fix overflow warning on apple silicon +* `#18572 `__: CI: Set git default branch to "main" in CircleCI. +* `#18574 `__: MAINT: Update the Call for Contributions section +* `#18575 `__: MAINT: Bump sphinx from 3.5.1 to 3.5.2 +* `#18576 `__: MAINT: Bump hypothesis from 6.3.4 to 6.6.0 +* `#18578 `__: MAINT: Bump pycodestyle from 2.5.0 to 2.6.0 +* `#18579 `__: MAINT: OrderedDict is no longer necessary from Python 3.7 +* `#18582 `__: BLD, TST: use pypy nightly to work around bug +* `#18583 `__: DOC: Clarify docs for fliplr() / flipud() +* `#18584 `__: DOC: Added documentation for linter (#18423) +* `#18593 `__: MAINT: Do not claim input to binops is `self` (array object) +* `#18594 `__: MAINT: Remove strange `op == NULL` check +* `#18596 `__: MAINT: Chain exceptions in index_tricks.py and mrecords.py +* `#18598 `__: MAINT: Add annotations for `dtype.__getitem__`, `__mul__` and... +* `#18602 `__: CI: Do not fail CI on lint error +* `#18605 `__: BUG: Fix ma coercion list-of-ma-arrays if they do not cast to... +* `#18614 `__: MAINT: Bump pycodestyle from 2.6.0 to 2.7.0 +* `#18615 `__: MAINT: Bump hypothesis from 6.6.0 to 6.8.1 +* `#18616 `__: CI: Update apt package list before Python install +* `#18618 `__: MAINT: Ensure that re-exported sub-modules are properly annotated +* `#18622 `__: DOC: Consistently use rng as variable name for random generators +* `#18629 `__: BUG, ENH: fix array2string rounding bug by adding min_digits... +* `#18630 `__: DOC: add note to numpy.rint() docstrings +* `#18634 `__: BUG: Use npy_log1p where appropriate in random generation +* `#18635 `__: ENH: Improve the exception for default low in Generator.integers +* `#18641 `__: MAINT: Remove useless declarations in `bad_commands` +* `#18642 `__: ENH: Use new argument parsing for array creation functions +* `#18643 `__: DOC: Remove mention of nose from README +* `#18645 `__: DOC: Minor fix in inline code example of ufunc reference +* `#18648 `__: MAINT: use super() as described by PEP 3135 +* `#18649 `__: MAINT: Add missing type to cdef statement +* `#18651 `__: BUG: Fix small valgrind-found issues +* `#18652 `__: DOC: Update some plotting code to current Matplotlib idioms +* `#18657 `__: ENH: Improve performance of `np.save` for small arrays +* `#18658 `__: BLD: remove /usr/include from default include dirs +* `#18659 `__: DEV: add a conda environment.yml with all development dependencies +* `#18660 `__: DOC: add release note for removal of /usr/include from include... +* `#18664 `__: MAINT: Bump sphinx from 3.5.2 to 3.5.3 +* `#18666 `__: ENH: Use exponentials in place of inversion in Rayleigh and geometric +* `#18670 `__: BUG: Fix small issues found with pytest-leaks +* `#18676 `__: MAINT: Implement new style promotion for `np.result_type`, etc. +* `#18679 `__: BUG: Changed METH_VARARGS to METH_NOARGS +* `#18680 `__: Docs: simd-optimizations.rst: fix typo (basline ~> baseline) +* `#18685 `__: REL: Update main after 1.20.2 release. +* `#18686 `__: BUG: Fix test_ccompiler_opt when path contains dots +* `#18689 `__: DOC: Change matrix size in absolute beginners doc. +* `#18690 `__: BUG: Correct datetime64 missing type overload for datetime.date... +* `#18691 `__: BUG: fix segfault in object/longdouble operations +* `#18692 `__: MAINT: Bump pydata-sphinx-theme from 0.5.0 to 0.5.2 +* `#18693 `__: MAINT: Bump hypothesis from 6.8.1 to 6.8.3 +* `#18694 `__: TST: pin pypy version to 7.3.4rc1 +* `#18695 `__: ENH: Support parsing Fortran abstract interface blocks. +* `#18697 `__: DEP: Disable PyUFunc_GenericFunction and PyUFunc_SetUsesArraysAsData +* `#18698 `__: MAINT: Specify the color space in all new NumPy logo files +* `#18701 `__: BLD: Strip extra newline when dumping gfortran version on MacOS +* `#18705 `__: DOC: update Steering Council membership and people on governance... +* `#18706 `__: DOC: Add release notes to upcoming_changes +* `#18708 `__: TST: add tests for using np.meshgrid for higher dimensional grids. +* `#18712 `__: DOC: Simplifies Mandelbrot set plot in Quickstart guide +* `#18718 `__: API, DEP: Move ufunc signature parsing to the start +* `#18722 `__: DOC: deduplicate dtype basic types (2) +* `#18725 `__: MAINT: Bump pytest from 6.2.2 to 6.2.3 +* `#18726 `__: MAINT: Bump hypothesis from 6.8.3 to 6.8.4 +* `#18728 `__: MAINT: Add exception chaining where appropriate +* `#18731 `__: BUG: Check out requirements and raise when not satisfied +* `#18733 `__: DEV: Adds gitpod to numpy +* `#18737 `__: BLD: introduce use of BLAS_LIBS and LAPACK_LIBS in distutils/system_info +* `#18739 `__: MAINT: Add exception chaining where appropriate +* `#18741 `__: DOC: Emphasize distinctions between np.copy and ndarray.copy +* `#18745 `__: CI: remove shippable CI +* `#18750 `__: MAINT: Allow more recursion depth for scalar tests. +* `#18751 `__: BUG: Regression #18075 | Fixing Ufunc TD generation order +* `#18753 `__: BLD: Negative zero handling with ifort +* `#18755 `__: MAINT: Bump sphinx from 3.5.3 to 3.5.4 +* `#18757 `__: MAINT: Bump hypothesis from 6.8.4 to 6.9.1 +* `#18758 `__: DOC: Update howto-docs with link to NumPy tutorials. +* `#18761 `__: DOC: Small fixes (including formatting) for NEP 43 +* `#18765 `__: ENH: Improve the placeholder annotations for the main numpy namespace +* `#18766 `__: ENH, SIMD: Replace libdivide functions of signed integer division... +* `#18770 `__: DOC: More concise "How to import NumPy" description +* `#18771 `__: DOC: Use: from numpy.testing import ... +* `#18772 `__: CI: Use informational mode for codecov +* `#18773 `__: CI: Fixing typo in Azure job run +* `#18777 `__: DOC: update random and asserts in test guidelines +* `#18778 `__: MAINT: Relax the integer-type-constraint of `npt._ShapeLike` +* `#18779 `__: DOC: fix spelling of "reccomended" ("recommended") +* `#18780 `__: ENH: Improve the placeholder annotations for the main numpy namespace... +* `#18781 `__: ENH: Add `__all__` to a number of public modules +* `#18785 `__: DOC: change `dec.parametrize` to `pytest.mark.parametrize` +* `#18786 `__: DOC: add note for clip() special case a_min > a_max See #18782 +* `#18787 `__: DOC: Document newer pytest conventions +* `#18789 `__: DEV: Pin pydata-sphinx-theme to 0.5.2. +* `#18790 `__: CI: Use `towncrier build` explicitly +* `#18791 `__: DOC: Fixes small things in the genfromtext docstring +* `#18792 `__: MAINT: Use recent towncrier releases on PyPI. +* `#18795 `__: SIMD, TEST: Workaround for misaligned stack GCC BUG ABI on WIN64 +* `#18796 `__: DOC: Misc Numpydoc and formatting for proper parsing. +* `#18797 `__: DOC: Update random c-api documentation +* `#18799 `__: MAINT: Improve the placeholder annotations for the main numpy... +* `#18800 `__: MAINT: Relax miscellaneous integer-type constraints +* `#18801 `__: DOC: fix typo in frexp docstring +* `#18802 `__: DOC: Improve random.choice() documentation +* `#18805 `__: NEP: propose new nep for allocator policies +* `#18806 `__: MAINT: Bump hypothesis from 6.9.1 to 6.10.0 +* `#18807 `__: MAINT: Bump cython from 0.29.22 to 0.29.23 +* `#18809 `__: MAINT: runtests help text cleanup +* `#18812 `__: DOC: Document howto build documentation in a virtual environment +* `#18813 `__: BUG: Initialize the full nditer buffer in case of error +* `#18818 `__: ENH: Add annotations for 4 objects in `np.core.numerictypes` +* `#18820 `__: MAINT: Remove incorrect inline +* `#18822 `__: DEV: general Gitpod enhancements +* `#18823 `__: MAINT: Minor fix to add reference link to numpy.fill_diagonal... +* `#18825 `__: MAINT: Update README.md +* `#18831 `__: BUG: Prevent nan being used in percentile +* `#18834 `__: DOC: Fix typo in random docs +* `#18836 `__: MAINT: Generalize and shorten the ufunc "trivially iterable"... +* `#18837 `__: ENH, SIMD: Add support for dispatching C++ sources +* `#18839 `__: DOC: Add Gitpod development documentation +* `#18841 `__: DOC: Add favicon +* `#18842 `__: ENH: Improve the placeholder annotations within sub-modules +* `#18843 `__: DOC: Clarify isreal docstring +* `#18845 `__: DOC: Move Sphinx numpy target in reference index. +* `#18851 `__: MAINT: Disable pip version check for azure lint check. +* `#18853 `__: ENH: Improve the placeholder annotations within sub-modules (part... +* `#18855 `__: STY: change CRLF line terminators to Unix +* `#18856 `__: MAINT: Fix the typo "implment" +* `#18862 `__: TST: Skip f2py TestSharedMemory for LONGDOUBLE on macos/arm64 +* `#18863 `__: ENH: Add max values comparison for floating point +* `#18864 `__: MAINT: Remove dead codepath in generalized ufuncs +* `#18868 `__: Upgrade to GitHub-native Dependabot +* `#18869 `__: MAINT: Fix azure linter problems with pip 21.1 +* `#18871 `__: MAINT: Bump hypothesis from 6.10.0 to 6.10.1 +* `#18874 `__: BLD, ENH: Enable Accelerate Framework +* `#18877 `__: MAINT: Update PyPy version used by CI +* `#18880 `__: API: Ensure that casting does not affect ufunc loop +* `#18882 `__: ENH: Add min values comparison for floating point +* `#18885 `__: MAINT: Remove unsafe unions and ABCs from return-annotations +* `#18889 `__: ENH: Add SIMD operations for min and max value comparision +* `#18890 `__: MAINT: ssize_t -> Py_ssize_t and other fixes for Python v3.10.0 +* `#18891 `__: MAINT: Bump typing-extensions from 3.7.4.3 to 3.10.0.0 +* `#18893 `__: DOC: Add a set of standard replies. +* `#18895 `__: DOC: Improve cumsum documentation +* `#18896 `__: MAINT: Explicitly mark text files in .gitattributes. +* `#18897 `__: MAINT: Add ".csv" some data file names. +* `#18899 `__: BLD, BUG: Fix compiler optimization log AttributeError +* `#18900 `__: BLD: remove unnecessary flag `-faltivec` on macOS +* `#18903 `__: MAINT, CI: treats _SIMD module build warnings as errors through... +* `#18906 `__: ENH: Add PCG64DXSM BitGenerator +* `#18908 `__: MAINT: Adjust NumPy float hashing to Python's slightly changed... +* `#18909 `__: ENH: Improve the placeholder annotations within sub-modules (part... +* `#18910 `__: BUG : for MINGW, threads.h existence test requires GLIBC > 2.12 +* `#18911 `__: BLD, BUG: Fix bdist_wheel duplicate building +* `#18912 `__: CI: fix the GitHub Actions trigger in docker.yml +* `#18918 `__: DOC: fix documentation of cloning over ssh +* `#18919 `__: ENH: Add placeholder annotations for two missing `np.testing`... +* `#18920 `__: BUG: Report underflow condition in AVX implementation of np.exp +* `#18927 `__: NEP: add mailing list thread, fixes from review +* `#18930 `__: BUG: Make changelog recognize ``gh-`` as a PR number prefix. +* `#18931 `__: BUG: Fix refcounting in string-promotion deprecation code path +* `#18933 `__: BUG: Fix underflow error in AVX512 implementation of ufunc exp/f64 +* `#18934 `__: DOC: Add a release note for the improved placeholder annotations +* `#18935 `__: API: Add `npt.NDArray`, a runtime-subscriptable alias for `np.ndarray` +* `#18936 `__: DOC: Update performance for new PRNG +* `#18940 `__: ENH: manually inline PCG64DXSM code for performance. +* `#18943 `__: TST: xfail `TestCond.test_nan` unconditionally +* `#18944 `__: ENH: Add annotations for `np.lib.utils` +* `#18954 `__: DOC: Update beginners docu for sum function with axis +* `#18955 `__: DOC: add an extra example in runtests.py help test +* `#18956 `__: DOC: change copyright SciPy to NumPy +* `#18957 `__: DOC: Improve datetime64 docs. +* `#18958 `__: MAINT: Do not use deprecated ``mktemp()`` +* `#18959 `__: DOC: improve numpy.histogram2d() documentation +* `#18960 `__: BUG: fixed ma.average ignoring masked weights +* `#18961 `__: DOC: add note and examples to `isrealobj` docstring +* `#18962 `__: DOC: Update a page title with proper case +* `#18963 `__: DEP: remove PolyBase from np.polynomial.polyutils +* `#18965 `__: DOC: Improve description of array scalar in glossary +* `#18967 `__: BUG: fix np.ma.masked_where(copy=False) when input has no mask +* `#18970 `__: MAINT, SIMD: Hardened the AVX compile-time tests +* `#18972 `__: ENH: Include co-authors in changelog. +* `#18973 `__: MAINT: Bump sphinx from 3.5.4 to 4.0.0 +* `#18974 `__: MAINT: Bump hypothesis from 6.10.1 to 6.12.0 +* `#18976 `__: MAINT: Bump pytest from 6.2.3 to 6.2.4 +* `#18980 `__: DOC: Gitpod documentation enhancements +* `#18982 `__: MAINT: Cleanup tools/changelog.py +* `#18983 `__: REL: Update main after 1.20.3 release. +* `#18985 `__: MAINT: Remove usage of the PEP 604 pipe operator +* `#18987 `__: BUG: Update coordinates in PyArray_ITER_GOTO1D +* `#18989 `__: BUG: fix potential buffer overflow(#18939) +* `#18990 `__: ENH: Add annotations for `np.lib.NumpyVersion` +* `#18996 `__: MAINT: Remove warning when checking AVX512f on MSVC +* `#18998 `__: ENH: Improve annotations of the `item`, `tolist`, `take` and... +* `#18999 `__: DEP: Ensure the string promotion FutureWarning is raised +* `#19001 `__: DEP: Deprecate error clearing for special method in array-coercion +* `#19002 `__: ENH: Add annotations for `np.broadcast` and `np.DataSource` +* `#19005 `__: ENH: Add dtype-support to 11 `ndarray` / `generic` methods +* `#19007 `__: BUG: fix potential use of null pointer in nditer buffers +* `#19008 `__: BUG: fix variable misprint in multiarray test code +* `#19009 `__: BUG: fix variable misprint checking wrong variable in umath tests +* `#19011 `__: BUG: fix ValueError in PyArray_Std on win_amd64 +* `#19012 `__: MAINT: Small cleanups in `PyArray_NewFromDescr_int` +* `#19014 `__: Revert "BUG: Update coordinates in PyArray_ITER_GOTO1D" +* `#19018 `__: DOC: "NumPy" <- "numpy" in NumPy Fundamentals - Indexing +* `#19021 `__: DOC: Add comment for ifdef macro guard +* `#19024 `__: MAINT: Bump pytest-cov from 2.11.1 to 2.12.0 +* `#19025 `__: MAINT: Bump sphinx from 4.0.0 to 4.0.1 +* `#19026 `__: DOC: Clarify minimum numpy version needed to use random c-api +* `#19029 `__: ENH: Improve the annotations of `np.core._internal` +* `#19031 `__: DEP: Deprecate 4 `ndarray.ctypes` methods +* `#19035 `__: MAINT: Python3 classes do not need to inherit from object +* `#19037 `__: BUG: do not use PyLong_FromLong for intp +* `#19041 `__: DOC: Improve trapz docstring +* `#19043 `__: DOC: Fix typo in release notes for v1.21 +* `#19046 `__: BUG, SIMD: Fix unexpected result of uint8 division on X86 +* `#19047 `__: BUG, SIMD: Fix NumPy build on ppc64le(IBM/Power) for old versions... +* `#19048 `__: BUG: Fix duplicate variable names in compiler check for AVX512_SKX +* `#19049 `__: BLD,API: (distutils) Force strict floating point error model... +* `#19052 `__: ENH: Improve the `np.ufunc` annotations +* `#19055 `__: DOC: Forward port missing 1.18.5 release note. +* `#19063 `__: ENH: Stubs for array_equal appear out of date. +* `#19066 `__: BUG: Fixed an issue wherein `nanmedian` could return an array... +* `#19068 `__: MAINT: Update mailmap +* `#19073 `__: REL: Prepare 1.21.0 release +* `#19074 `__: BUG: Fix compile-time test of POPCNT +* `#19075 `__: BUG: Fix test_numpy_version. From 86a4dd08444313b17b4e698017f505431e379fd3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 22 May 2021 17:56:04 -0600 Subject: [PATCH 1233/1270] DOC: Create 1.21.0 release note --- .../upcoming_changes/15271.compatibility.rst | 31 - .../upcoming_changes/16987.improvement.rst | 17 - .../upcoming_changes/17492.deprecation.rst | 7 - .../upcoming_changes/17586.deprecation.rst | 7 - .../upcoming_changes/17727.performance.rst | 7 - .../upcoming_changes/17843.new_feature.rst | 22 - .../upcoming_changes/17900.expired.rst | 2 - .../upcoming_changes/17921.compatibility.rst | 6 - .../upcoming_changes/18070.improvement.rst | 12 - doc/release/upcoming_changes/18110.change.rst | 5 - doc/release/upcoming_changes/18116.future.rst | 29 - .../upcoming_changes/18322.new_feature.rst | 22 - .../upcoming_changes/18629.new_feature.rst | 10 - .../upcoming_changes/18657.performance.rst | 10 - .../upcoming_changes/18658.compatibility.rst | 6 - .../upcoming_changes/18666.improvement.rst | 9 - .../upcoming_changes/18695.new_feature.rst | 3 - .../upcoming_changes/18697.expired.rst | 5 - doc/release/upcoming_changes/18718.c_api.rst | 13 - .../upcoming_changes/18718.compatibility.rst | 59 -- .../upcoming_changes/18737.new_feature.rst | 12 - doc/release/upcoming_changes/18874.change.rst | 11 - .../upcoming_changes/18880.compatibility.rst | 34 -- .../upcoming_changes/18906.new_function.rst | 17 - .../upcoming_changes/18934.improvement.rst | 5 - .../upcoming_changes/18935.new_feature.rst | 28 - .../upcoming_changes/18963.expired.rst | 8 - .../upcoming_changes/19001.deprecation.rst | 8 - .../upcoming_changes/19031.deprecation.rst | 12 - .../upcoming_changes/19049.compatibility.rst | 6 - doc/source/release/1.21.0-notes.rst | 557 ++++++++++++++++++ 31 files changed, 557 insertions(+), 423 deletions(-) delete mode 100644 doc/release/upcoming_changes/15271.compatibility.rst delete mode 100644 doc/release/upcoming_changes/16987.improvement.rst delete mode 100644 doc/release/upcoming_changes/17492.deprecation.rst delete mode 100644 doc/release/upcoming_changes/17586.deprecation.rst delete mode 100755 doc/release/upcoming_changes/17727.performance.rst delete mode 100644 doc/release/upcoming_changes/17843.new_feature.rst delete mode 100644 doc/release/upcoming_changes/17900.expired.rst delete mode 100644 doc/release/upcoming_changes/17921.compatibility.rst delete mode 100644 doc/release/upcoming_changes/18070.improvement.rst delete mode 100644 doc/release/upcoming_changes/18110.change.rst delete mode 100644 doc/release/upcoming_changes/18116.future.rst delete mode 100644 doc/release/upcoming_changes/18322.new_feature.rst delete mode 100644 doc/release/upcoming_changes/18629.new_feature.rst delete mode 100644 doc/release/upcoming_changes/18657.performance.rst delete mode 100644 doc/release/upcoming_changes/18658.compatibility.rst delete mode 100644 doc/release/upcoming_changes/18666.improvement.rst delete mode 100644 doc/release/upcoming_changes/18695.new_feature.rst delete mode 100644 doc/release/upcoming_changes/18697.expired.rst delete mode 100644 doc/release/upcoming_changes/18718.c_api.rst delete mode 100644 doc/release/upcoming_changes/18718.compatibility.rst delete mode 100644 doc/release/upcoming_changes/18737.new_feature.rst delete mode 100644 doc/release/upcoming_changes/18874.change.rst delete mode 100644 doc/release/upcoming_changes/18880.compatibility.rst delete mode 100644 doc/release/upcoming_changes/18906.new_function.rst delete mode 100644 doc/release/upcoming_changes/18934.improvement.rst delete mode 100644 doc/release/upcoming_changes/18935.new_feature.rst delete mode 100644 doc/release/upcoming_changes/18963.expired.rst delete mode 100644 doc/release/upcoming_changes/19001.deprecation.rst delete mode 100644 doc/release/upcoming_changes/19031.deprecation.rst delete mode 100644 doc/release/upcoming_changes/19049.compatibility.rst diff --git a/doc/release/upcoming_changes/15271.compatibility.rst b/doc/release/upcoming_changes/15271.compatibility.rst deleted file mode 100644 index 7deefe2566a2..000000000000 --- a/doc/release/upcoming_changes/15271.compatibility.rst +++ /dev/null @@ -1,31 +0,0 @@ -Error type changes in universal functions ------------------------------------------ -The universal functions may now raise different errors -on invalid input in some cases. -The main changes should be that a ``RuntimeError`` was -replaced with a more fitting ``TypeError``. -When multiple errors were present in the same call, -NumPy may now raise a different one. - - -``__array_ufunc__`` argument validation ---------------------------------------- -NumPy will now partially validate arguments before calling -``__array_ufunc__``. Previously, it was possible to pass -on invalid arguments (such as a non-existing keyword -argument) when dispatch was known to occur. - - -``__array_ufunc__`` and additional positional arguments -------------------------------------------------------- -Previously, all positionally passed arguments were checked for -``__array_ufunc__`` support. In the case of ``reduce``, -``accumulate``, and ``reduceat`` all arguments may be passed by -position. This means that when they were passed by -position, they could previously have been asked to handle -the ufunc call via ``__array_ufunc__``. -Since this depended on the way the arguments were passed -(by position or by keyword), NumPy will now only dispatch -on the input and output array. -For example, NumPy will never dispatch on the ``where`` array -in a reduction such as ``np.add.reduce``. diff --git a/doc/release/upcoming_changes/16987.improvement.rst b/doc/release/upcoming_changes/16987.improvement.rst deleted file mode 100644 index dc592a068401..000000000000 --- a/doc/release/upcoming_changes/16987.improvement.rst +++ /dev/null @@ -1,17 +0,0 @@ -Arbitrary ``period`` option for `numpy.unwrap` ----------------------------------------------- -The size of the interval over which phases are unwrapped is no longer restricted to ``2 * pi``. -This is especially useful for unwrapping degrees, but can also be used for other intervals. - -.. code:: python - - >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 - >>> phase_deg - array([-180., -140., -100., -60., -20., 20., 60., 100., 140., - -180., -140., -100., -60., -20., 20., 60., 100., 140., - -180.]) - - >>> unwrap(phase_deg, period=360) - array([-180., -140., -100., -60., -20., 20., 60., 100., 140., - 180., 220., 260., 300., 340., 380., 420., 460., 500., - 540.]) diff --git a/doc/release/upcoming_changes/17492.deprecation.rst b/doc/release/upcoming_changes/17492.deprecation.rst deleted file mode 100644 index 50005aed75b6..000000000000 --- a/doc/release/upcoming_changes/17492.deprecation.rst +++ /dev/null @@ -1,7 +0,0 @@ -Inexact matches for `numpy.convolve` and `numpy.correlate` are deprecated -------------------------------------------------------------------------- - -`numpy.convolve` and `numpy.correlate` now emits a warning when there are case -insensitive and/or inexact matches found for ``mode`` argument in the functions. -Pass full ``"same"``, ``"valid"``, ``"full"`` strings instead of -``"s"``, ``"v"``, ``"f"`` for the ``mode`` argument. diff --git a/doc/release/upcoming_changes/17586.deprecation.rst b/doc/release/upcoming_changes/17586.deprecation.rst deleted file mode 100644 index 845615315447..000000000000 --- a/doc/release/upcoming_changes/17586.deprecation.rst +++ /dev/null @@ -1,7 +0,0 @@ -``np.typeDict`` has been formally deprecated --------------------------------------------- -``np.typeDict`` is a deprecated alias for ``np.sctypeDict`` and -has been so for over 14 years (6689502_). -A deprecation warning will now be issued whenever getting ``np.typeDict``. - -.. _6689502: https://github.com/numpy/numpy/commit/668950285c407593a368336ff2e737c5da84af7d diff --git a/doc/release/upcoming_changes/17727.performance.rst b/doc/release/upcoming_changes/17727.performance.rst deleted file mode 100755 index 55ab7752baef..000000000000 --- a/doc/release/upcoming_changes/17727.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Improved performance in integer division of NumPy arrays --------------------------------------------------------- -Integer division of NumPy arrays now uses `libdivide ` -when the divisor is a constant. With the usage of libdivide and -other minor optimizations, there is a large speedup. -The ``//`` operator and ``np.floor_divide`` makes use -of the new changes. diff --git a/doc/release/upcoming_changes/17843.new_feature.rst b/doc/release/upcoming_changes/17843.new_feature.rst deleted file mode 100644 index d2b9d2adc84c..000000000000 --- a/doc/release/upcoming_changes/17843.new_feature.rst +++ /dev/null @@ -1,22 +0,0 @@ -Added a mypy plugin for handling platform-specific `numpy.number` precisions ----------------------------------------------------------------------------- - -A mypy_ plugin is now available for automatically assigning the (platform-dependent) -precisions of certain `~numpy.number` subclasses, including the likes of -`~numpy.int_`, `~numpy.intp` and `~numpy.longlong`. See the documentation on -:ref:`scalar types ` for a comprehensive overview -of the affected classes. - -Note that while usage of the plugin is completely optional, without it the -precision of above-mentioned classes will be inferred as `~typing.Any`. - -To enable the plugin, one must add it to their mypy `configuration file`_: - -.. code-block:: ini - - [mypy] - plugins = numpy.typing.mypy_plugin - - -.. _mypy: http://mypy-lang.org/ -.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html diff --git a/doc/release/upcoming_changes/17900.expired.rst b/doc/release/upcoming_changes/17900.expired.rst deleted file mode 100644 index 810d672414e9..000000000000 --- a/doc/release/upcoming_changes/17900.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* The ``shape`` argument `numpy.unravel_index` cannot be passed - as ``dims`` keyword argument anymore. (Was deprecated in NumPy 1.16.) diff --git a/doc/release/upcoming_changes/17921.compatibility.rst b/doc/release/upcoming_changes/17921.compatibility.rst deleted file mode 100644 index a1e2fb2d0408..000000000000 --- a/doc/release/upcoming_changes/17921.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -Validate input values in ``Generator.uniform`` ----------------------------------------------- -Checked that ``high - low >= 0`` in ``np.random.Generator.uniform``. Raises -``ValueError`` if ``low > high``. Previously out-of-order inputs were accepted -and silently swapped, so that if ``low > high``, the value generated was -``high + (low - high) * random()``. diff --git a/doc/release/upcoming_changes/18070.improvement.rst b/doc/release/upcoming_changes/18070.improvement.rst deleted file mode 100644 index ae750fb1287c..000000000000 --- a/doc/release/upcoming_changes/18070.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -``np.unique`` now returns single ``NaN`` ----------------------------------------- -When ``np.unique`` operated on an array with multiple ``NaN`` entries, -its return included a ``NaN`` for each entry that was ``NaN`` in the original array. -This is now improved such that the returned array contains just one ``NaN`` as the -last element. - -Also for complex arrays all ``NaN`` values are considered equivalent -(no matter whether the ``NaN`` is in the real or imaginary part). As the -representant for the returned array the smallest one in the -lexicographical order is chosen - see ``np.sort`` for how the lexicographical -order is defined for complex arrays. \ No newline at end of file diff --git a/doc/release/upcoming_changes/18110.change.rst b/doc/release/upcoming_changes/18110.change.rst deleted file mode 100644 index 7dbf8e5b79be..000000000000 --- a/doc/release/upcoming_changes/18110.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -`numpy.piecewise` output class now matches the input class ----------------------------------------------------------- -When `numpy.ndarray` subclasses are used on input to `numpy.piecewise`, -they are passed on to the functions. The output will now be of the -same subclass as well. diff --git a/doc/release/upcoming_changes/18116.future.rst b/doc/release/upcoming_changes/18116.future.rst deleted file mode 100644 index 1eb14d5f785c..000000000000 --- a/doc/release/upcoming_changes/18116.future.rst +++ /dev/null @@ -1,29 +0,0 @@ -Promotion of strings with numbers and bools is deprecated ---------------------------------------------------------- -Any promotion of numbers and strings is deprecated and will -give a ``FutureWarning`` the main affected functionalities -are: - -* `numpy.promote_types` and `numpy.result_type` which will raise - an error in this case in the future. -* `numpy.concatenate` will raise an error when concatenating a string - and numeric array. You can use ``dtype="S"`` to explicitly request - a string result. -* `numpy.array` and related functions will start returning ``object`` - arrays because these functions use ``object`` as a fallback when - no common dtype can be found. However, it may happen that future - releases of NumPy will generally error in these cases. - -This will mainly affect code such as:: - - np.asarray(['string', 0]) - -and:: - - np.concatenate((['string'], [0])) - -in both cases adding ``dtype="U"`` or ``dtype="S"`` will give the -previous (string) result, while ``dtype=object`` will ensure an array with -object dtype is returned. - -Comparisons, universal functions, and casting are not affected by this. diff --git a/doc/release/upcoming_changes/18322.new_feature.rst b/doc/release/upcoming_changes/18322.new_feature.rst deleted file mode 100644 index 5330b9a978e0..000000000000 --- a/doc/release/upcoming_changes/18322.new_feature.rst +++ /dev/null @@ -1,22 +0,0 @@ -Let the mypy plugin manage extended-precision `numpy.number` subclasses ------------------------------------------------------------------------ - -The mypy_ plugin, introduced in `numpy/numpy#17843`_, has been expanded: -the plugin now removes annotations for platform-specific extended-precision -types that are not available to the platform in question. -For example, it will remove `~numpy.float128` when not available. - -Without the plugin *all* extended-precision types will, as far as mypy is concerned, -be available on all platforms. - -To enable the plugin, one must add it to their mypy `configuration file`_: - -.. code-block:: ini - - [mypy] - plugins = numpy.typing.mypy_plugin - - -.. _mypy: http://mypy-lang.org/ -.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html -.. _`numpy/numpy#17843`: https://github.com/numpy/numpy/pull/17843 diff --git a/doc/release/upcoming_changes/18629.new_feature.rst b/doc/release/upcoming_changes/18629.new_feature.rst deleted file mode 100644 index 7d75c323f40b..000000000000 --- a/doc/release/upcoming_changes/18629.new_feature.rst +++ /dev/null @@ -1,10 +0,0 @@ -New ``min_digits`` argument for printing float values ------------------------------------------------------ -A new ``min_digits`` argument has been added to the dragon4 float printing -functions `np.format_float_positional` and `np.format_float_scientific` . This -kwd guarantees that at least the given number of digits will be printed when -printing in unique=True mode, even if the extra digits are unnecessary to -uniquely specify the value. It is the counterpart to the precision argument -which sets the maximum number of digits to be printed. When unique=False in -fixed precision mode, it has no effect and the precision argument fixes the -number of digits. diff --git a/doc/release/upcoming_changes/18657.performance.rst b/doc/release/upcoming_changes/18657.performance.rst deleted file mode 100644 index b9d436725620..000000000000 --- a/doc/release/upcoming_changes/18657.performance.rst +++ /dev/null @@ -1,10 +0,0 @@ -Improve performance of ``np.save`` and ``np.load`` for small arrays -------------------------------------------------------------------- -``np.save`` is now a lot faster for small arrays. - -``np.load`` is also faster for small arrays, -but only when serializing with a version >= `(3, 0)`. - -Both are done by removing checks that are only relevant for Python 2, -while still maintaining compatibility with arrays -which might have been created by Python 2. diff --git a/doc/release/upcoming_changes/18658.compatibility.rst b/doc/release/upcoming_changes/18658.compatibility.rst deleted file mode 100644 index bb4052b9daef..000000000000 --- a/doc/release/upcoming_changes/18658.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -``/usr/include`` removed from default include paths ---------------------------------------------------- -The default include paths when building a package with ``numpy.distutils`` no -longer include ``/usr/include``. This path is normally added by the compiler, -and hardcoding it can be problematic. In case this causes a problem, please -open an issue. A workaround is documented in PR 18658. diff --git a/doc/release/upcoming_changes/18666.improvement.rst b/doc/release/upcoming_changes/18666.improvement.rst deleted file mode 100644 index 70b87ecf4e35..000000000000 --- a/doc/release/upcoming_changes/18666.improvement.rst +++ /dev/null @@ -1,9 +0,0 @@ -``Generator.rayleigh`` and ``Generator.geometric`` performance improved ------------------------------------------------------------------------ -The performance of Rayleigh and geometric random variate generation -in ``Generator`` has improved. These are both transformation of exponential -random variables and the slow log-based inverse cdf transformation has -been replaced with the Ziggurat-based exponential variate generator. - -This change breaks the stream of variates generated when variates from -either of these distributions are produced. diff --git a/doc/release/upcoming_changes/18695.new_feature.rst b/doc/release/upcoming_changes/18695.new_feature.rst deleted file mode 100644 index a1f1081768f3..000000000000 --- a/doc/release/upcoming_changes/18695.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -f2py now recognizes Fortran abstract interface blocks ------------------------------------------------------ -`np.f2py` can now parse abstract interface blocks. diff --git a/doc/release/upcoming_changes/18697.expired.rst b/doc/release/upcoming_changes/18697.expired.rst deleted file mode 100644 index 5a45ce216924..000000000000 --- a/doc/release/upcoming_changes/18697.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -* The function ``PyUFunc_GenericFunction`` has been disabled. - It was deprecated in NumPy 1.19. Users should call the ufunc - directly using the Python API. -* The function ``PyUFunc_SetUsesArraysAsData`` has been disabled. - It was deprecated in NumPy 1.19. diff --git a/doc/release/upcoming_changes/18718.c_api.rst b/doc/release/upcoming_changes/18718.c_api.rst deleted file mode 100644 index eb9121ab645f..000000000000 --- a/doc/release/upcoming_changes/18718.c_api.rst +++ /dev/null @@ -1,13 +0,0 @@ -Use of ``ufunc->type_resolver`` and "type tuple" ------------------------------------------------- -NumPy now normalizes the "type tuple" argument to the -type resolver functions before calling it. Note that in -the use of this type resolver is legacy behaviour and NumPy -will not do so when possible. -Calling ``ufunc->type_resolver`` or ``PyUFunc_DefaultTypeResolver`` -is strongly discouraged and will now enforce a normalized -type tuple if done. -Note that this does not affect providing a type resolver, which -is expected to keep working in most circumstances. -If you have an unexpected use-case for calling the type resolver, -please inform the NumPy developers so that a solution can be found. diff --git a/doc/release/upcoming_changes/18718.compatibility.rst b/doc/release/upcoming_changes/18718.compatibility.rst deleted file mode 100644 index 18bf7158da50..000000000000 --- a/doc/release/upcoming_changes/18718.compatibility.rst +++ /dev/null @@ -1,59 +0,0 @@ -Changes to comparisons with ``dtype=...`` ------------------------------------------ -When the ``dtype=`` (or ``signature``) arguments to comparison -ufuncs (``equal``, ``less``, etc.) is used, this will denote -the desired output dtype in the future. -This means that: - - np.equal(2, 3, dtype=object) - -will give a ``FutureWarning`` that it will return an ``object`` -array in the future, which currently happens for: - - np.equal(None, None, dtype=object) - -due to the fact that ``np.array(None)`` is already an object -array. (This also happens for some other dtypes.) - -Since comparisons normally only return boolean arrays, providing -any other dtype will always raise an error in the future and -give a ``DeprecationWarning`` now. - - -Changes to ``dtype`` and ``signature`` arguments in ufuncs ----------------------------------------------------------- -The universal function arguments ``dtype`` and ``signature`` -which are also valid for reduction such as ``np.add.reduce`` -(which is the implementation for ``np.sum``) will now issue -a warning when the ``dtype`` provided is not a "basic" dtype. - -NumPy almost always ignored metadata, byteorder or time units -on these inputs. NumPy will now always ignore it and raise an -error if byteorder or time unit changed. -The following are the most important examples of changes which -will give the error. In some cases previously the information -stored was not ignored, in all of these an error is now raised:: - - # Previously ignored the byte-order (affect if non-native) - np.add(3, 5, dtype=">i32") - - # The biggest impact is for timedelta or datetimes: - arr = np.arange(10, dtype="m8[s]") - # The examples always ignored the time unit "ns": - np.add(arr, arr, dtype="m8[ns]") - np.maximum.reduce(arr, dtype="m8[ns]") - - # The following previously did use "ns" (as opposed to `arr.dtype`) - np.add(3, 5, dtype="m8[ns]") # Now return generic time units - np.maximum(arr, arr, dtype="m8[ns]") # Now returns "s" (from `arr`) - -The same applies for functions like ``np.sum`` which use these internally. -This change is necessary to achieve consistent handling within NumPy. - -If you run into these, in most cases pass for example ``dtype=np.timedelta64`` -which clearly denotes a general ``timedelta64`` without any unit or byte-order -defined. If you need to specify the output dtype precisely, you may do so -by either casting the inputs or providing an output array using `out=`. - -NumPy may choose to allow providing an exact output ``dtype`` here in the -future, which would be preceded by a ``FutureWarning``. diff --git a/doc/release/upcoming_changes/18737.new_feature.rst b/doc/release/upcoming_changes/18737.new_feature.rst deleted file mode 100644 index e451ac90ab69..000000000000 --- a/doc/release/upcoming_changes/18737.new_feature.rst +++ /dev/null @@ -1,12 +0,0 @@ -BLAS and LAPACK configuration via environment variables -------------------------------------------------------- -Autodetection of installed BLAS and LAPACK libraries can be bypassed by using -the ``NPY_BLAS_LIBS`` and ``NPY_LAPACK_LIBS`` environment variables. Instead, -the link flags in these environment variables will be used directly, and the -language is assumed to be F77. This is especially useful in automated builds -where the BLAS and LAPACK that are installed are known exactly. A use case is -replacing the actual implementation at runtime via stub library links. - -If ``NPY_CBLAS_LIBS`` is set (optional in addition to ``NPY_BLAS_LIBS``), this -will be used as well, by defining ``HAVE_CBLAS`` and appending the environment -variable content to the link flags. diff --git a/doc/release/upcoming_changes/18874.change.rst b/doc/release/upcoming_changes/18874.change.rst deleted file mode 100644 index c86fed83e739..000000000000 --- a/doc/release/upcoming_changes/18874.change.rst +++ /dev/null @@ -1,11 +0,0 @@ -Enable Accelerate Framework ----------------------------- -With the release of macOS 11.3, several different issues that -numpy was encountering when using Accelerate Framework's -implementation of BLAS and LAPACK should be resolved. This -change enables the Accelerate Framework as an option on macOS. -If additional issues are found, please file a bug report -against Accelerate using the developer feedback assistant -tool (https://developer.apple.com/bug-reporting/). We -intend to address issues promptly and plan to continue -supporting and updating our BLAS and LAPACK libraries. diff --git a/doc/release/upcoming_changes/18880.compatibility.rst b/doc/release/upcoming_changes/18880.compatibility.rst deleted file mode 100644 index 4951463cf032..000000000000 --- a/doc/release/upcoming_changes/18880.compatibility.rst +++ /dev/null @@ -1,34 +0,0 @@ -Ufunc ``signature=...`` and ``dtype=`` generalization and ``casting`` ---------------------------------------------------------------------- -The behaviour for ``np.ufunc(1.0, 1.0, signature=...)`` or -``np.ufunc(1.0, 1.0, dtype=...)`` can now yield different loops in 1.21 -compared to 1.20 because of changes in promotion. -When ``signature`` was previously used, the casting check on inputs -was relaxed, which could lead to downcasting inputs unsafely especially -if combined with ``casting="unsafe"``. - -Casting is now guaranteed to be safe. If a signature is only -partially provided, for example using ``signature=("float64", None, None)``, -this could lead to no loop being found (an error). -In that case, it is necessary to provide the complete signature -to enforce casting the inputs. -If ``dtype="float64"`` is used or only outputs are set (e.g. -``signature=(None, None, "float64")`` the is unchanged. -We expect that very few users are affected by this change. - -Further, the meaning of ``dtype="float64"`` has been slightly modified and -now strictly enforces only the correct output (and not input) DTypes. -This means it is now always equivalent to:: - - signature=(None, None, "float64") - -(If the ufunc has two inputs and one output). Since this could lead -to no loop being found in some cases, NumPy will normally also search -for the loop:: - - signature=("float64", "float64", "float64") - -if the first search failed. -In the future, this behaviour may be customized to achieve the expected -results for more complex ufuncs. (For some universal functions such as -``np.ldexp`` inputs can have different DTypes.) diff --git a/doc/release/upcoming_changes/18906.new_function.rst b/doc/release/upcoming_changes/18906.new_function.rst deleted file mode 100644 index 38444009d3cd..000000000000 --- a/doc/release/upcoming_changes/18906.new_function.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. currentmodule:: numpy.random - -Add `PCG64DXSM` `BitGenerator` ------------------------------- - -Uses of the `PCG64` `BitGenerator` in a massively-parallel context have been -shown to have statistical weaknesses that were not apparent at the first -release in numpy 1.17. Most users will never observe this weakness and are -safe to continue to use `PCG64`. We have introduced a new `PCG64DXSM` -`BitGenerator` that will eventually become the new default `BitGenerator` -implementation used by `default_rng` in future releases. `PCG64DXSM` solves -the statistical weakness while preserving the performance and the features of -`PCG64`. - -See :ref:`upgrading-pcg64` for more details. - -.. currentmodule:: numpy diff --git a/doc/release/upcoming_changes/18934.improvement.rst b/doc/release/upcoming_changes/18934.improvement.rst deleted file mode 100644 index 582062f2f5fb..000000000000 --- a/doc/release/upcoming_changes/18934.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Placeholder annotations have been improved ------------------------------------------- -All placeholder annotations, that were previously annotated as ``typing.Any``, -have been improved. Where appropiate they have been replaced with explicit -function definitions, classes or other miscellaneous objects. diff --git a/doc/release/upcoming_changes/18935.new_feature.rst b/doc/release/upcoming_changes/18935.new_feature.rst deleted file mode 100644 index 8c6e25da6800..000000000000 --- a/doc/release/upcoming_changes/18935.new_feature.rst +++ /dev/null @@ -1,28 +0,0 @@ -A runtime-subcriptable alias has been added for `ndarray` ---------------------------------------------------------- -`numpy.typing.NDArray` has been added, a runtime-subscriptable alias for -``np.ndarray[Any, np.dtype[~Scalar]]``. The new type alias can be used -for annotating arrays with a given dtype and unspecified shape. :sup:`1` - -:sup:`1` NumPy does not support the annotating of array shapes as of 1.21, -this is expected to change in the future though (see :pep:`646`). - -Examples -~~~~~~~~ - -.. code-block:: python - - >>> import numpy as np - >>> import numpy.typing as npt - - >>> print(npt.NDArray) - numpy.ndarray[typing.Any, numpy.dtype[~ScalarType]] - - >>> print(npt.NDArray[np.float64]) - numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] - - >>> NDArrayInt = npt.NDArray[np.int_] - >>> a: NDArrayInt = np.arange(10) - - >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: - ... return np.array(a) diff --git a/doc/release/upcoming_changes/18963.expired.rst b/doc/release/upcoming_changes/18963.expired.rst deleted file mode 100644 index d93b8a06a24b..000000000000 --- a/doc/release/upcoming_changes/18963.expired.rst +++ /dev/null @@ -1,8 +0,0 @@ -Remove deprecated ``PolyBase`` and unused ``PolyError`` and ``PolyDomainError`` -------------------------------------------------------------------------------- - -The class ``PolyBase`` has been removed (deprecated in numpy 1.9.0). Please use -the abstract ``ABCPolyBase`` class instead. - -Furthermore, the unused ``PolyError`` and ``PolyDomainError`` exceptions are -removed from the `numpy.polynomial`. diff --git a/doc/release/upcoming_changes/19001.deprecation.rst b/doc/release/upcoming_changes/19001.deprecation.rst deleted file mode 100644 index 48087f8a5e6a..000000000000 --- a/doc/release/upcoming_changes/19001.deprecation.rst +++ /dev/null @@ -1,8 +0,0 @@ -Exceptions will be raised during array-like creation ----------------------------------------------------- -When an object raised an exception during access of the special -attributes ``__array__`` or ``__array_interface__``, this exception -was usually ignored. -A warning is now given when the exception is anything but AttributeError. -To silence the warning, the type raising the exception has to be adapted -to raise an ``AttributeError``. diff --git a/doc/release/upcoming_changes/19031.deprecation.rst b/doc/release/upcoming_changes/19031.deprecation.rst deleted file mode 100644 index de92e18dfd45..000000000000 --- a/doc/release/upcoming_changes/19031.deprecation.rst +++ /dev/null @@ -1,12 +0,0 @@ -Four `ndarray.ctypes` methods have been deprecated --------------------------------------------------- -Four methods of the `ndarray.ctypes` object have been deprecated, -as they are (undocumentated) implementation artifacts of their respective -properties. - -The methods in question are: - -* ``_ctypes.get_data`` (use ``_ctypes.data`` instead) -* ``_ctypes.get_shape`` (use ``_ctypes.shape`` instead) -* ``_ctypes.get_strides`` (use ``_ctypes.strides`` instead) -* ``_ctypes.get_as_parameter`` (use ``_ctypes._as_parameter_`` instead) diff --git a/doc/release/upcoming_changes/19049.compatibility.rst b/doc/release/upcoming_changes/19049.compatibility.rst deleted file mode 100644 index 5a6eadc7afbc..000000000000 --- a/doc/release/upcoming_changes/19049.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -Distutils forces strict floating point model on clang ------------------------------------------------------ -NumPy distutils will now always add the ``-ffp-exception-behavior=strict`` -compiler flag when compiling with clang. Clang defaults to a non-strict -version, which allows the compiler to generate code that does not set -floating point warnings/errors correctly. diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst index 5fda1f631804..4b6d4d11595f 100644 --- a/doc/source/release/1.21.0-notes.rst +++ b/doc/source/release/1.21.0-notes.rst @@ -3,4 +3,561 @@ ========================== NumPy 1.21.0 Release Notes ========================== +The NumPy 1.21.0 release highlights are +* continued SIMD work covering more functions and platforms, +* initial work on the new dtype infrastructure and casting, +* improved documentation, +* improved annotations, +* the new ``PCG64DXSM`` bitgenerator for random numbers. + +In addition there are the usual large number of bug fixes and other improvements. + +The Python versions supported for this release are 3.7-3.9. Official support +for Python 3.10 will be added when it is released. + + +New functions +============= + +.. currentmodule:: numpy.random + +Add `PCG64DXSM` `BitGenerator` +------------------------------ + +Uses of the ``PCG64`` ``BitGenerator`` in a massively-parallel context have been +shown to have statistical weaknesses that were not apparent at the first +release in numpy 1.17. Most users will never observe this weakness and are +safe to continue to use ``PCG64``. We have introduced a new ``PCG64DXSM`` +``BitGenerator`` that will eventually become the new default ``BitGenerator`` +implementation used by ``default_rng`` in future releases. ``PCG64DXSM`` solves +the statistical weakness while preserving the performance and the features of +``PCG64``. + +See :ref:`upgrading-pcg64` for more details. + +.. currentmodule:: numpy + +(`gh-18906 `__) + + +Expired deprecations +==================== + +* The ``shape`` argument `~numpy.unravel_index` cannot be passed + as ``dims`` keyword argument anymore. (Was deprecated in NumPy 1.16.) + + (`gh-17900 `__) + +* The function ``PyUFunc_GenericFunction`` has been disabled. + It was deprecated in NumPy 1.19. Users should call the ufunc + directly using the Python API. + + (`gh-18697 `__) + +* The function ``PyUFunc_SetUsesArraysAsData`` has been disabled. + It was deprecated in NumPy 1.19. + + (`gh-18697 `__) + +* The class ``PolyBase`` has been removed (deprecated in numpy 1.9.0). Please + use the abstract ``ABCPolyBase`` class instead. + + (`gh-18963 `__) + +* The unused ``PolyError`` and ``PolyDomainError`` exceptions are + removed. + + (`gh-18963 `__) + + +Deprecations +============ + +Inexact matches for ``numpy.convolve`` and ``numpy.correlate`` are deprecated +----------------------------------------------------------------------------- + +`~numpy.convolve` and `~numpy.correlate` now emit a warning when there are case +insensitive and/or inexact matches found for ``mode`` argument in the functions. +Pass full ``"same"``, ``"valid"``, ``"full"`` strings instead of +``"s"``, ``"v"``, ``"f"`` for the ``mode`` argument. + +(`gh-17492 `__) + +``np.typeDict`` has been formally deprecated +-------------------------------------------- +``np.typeDict`` is a deprecated alias for ``np.sctypeDict`` and +has been so for over 14 years (6689502_). +A deprecation warning will now be issued whenever getting ``np.typeDict``. + +.. _6689502: https://github.com/numpy/numpy/commit/668950285c407593a368336ff2e737c5da84af7d + +(`gh-17586 `__) + +Exceptions will be raised during array-like creation +---------------------------------------------------- +When an object raised an exception during access of the special +attributes ``__array__`` or ``__array_interface__``, this exception +was usually ignored. +A warning is now given when the exception is anything but AttributeError. +To silence the warning, the type raising the exception has to be adapted +to raise an ``AttributeError``. + +(`gh-19001 `__) + +Four ``ndarray.ctypes`` methods have been deprecated +---------------------------------------------------- +Four methods of the `ndarray.ctypes` object have been deprecated, +as they are (undocumentated) implementation artifacts of their respective +properties. + +The methods in question are: + +* ``_ctypes.get_data`` (use ``_ctypes.data`` instead) +* ``_ctypes.get_shape`` (use ``_ctypes.shape`` instead) +* ``_ctypes.get_strides`` (use ``_ctypes.strides`` instead) +* ``_ctypes.get_as_parameter`` (use ``_ctypes._as_parameter_`` instead) + +(`gh-19031 `__) + + +Future Changes +============== + +Promotion of strings with numbers and bools will be deprecated +-------------------------------------------------------------- +Any promotion of numbers and strings is deprecated and will +give a ``FutureWarning`` the main affected functionalities +are: + +* `~numpy.promote_types` and `~numpy.result_type` which will raise + an error in this case in the future. +* `~numpy.concatenate` will raise an error when concatenating a string + and numeric array. You can use ``dtype="S"`` to explicitly request + a string result. +* `~numpy.array` and related functions will start returning ``object`` + arrays because these functions use ``object`` as a fallback when + no common dtype can be found. However, it may happen that future + releases of NumPy will generally error in these cases. + +This will mainly affect code such as:: + + np.asarray(['string', 0]) + +and:: + + np.concatenate((['string'], [0])) + +in both cases adding ``dtype="U"`` or ``dtype="S"`` will give the +previous (string) result, while ``dtype=object`` will ensure an array with +object dtype is returned. + +Comparisons, universal functions, and casting are not affected by this. + +(`gh-18116 `__) + + +Compatibility notes +=================== + +Error type changes in universal functions +----------------------------------------- +The universal functions may now raise different errors on invalid input in some +cases. The main changes should be that a ``RuntimeError`` was replaced with a +more fitting ``TypeError``. When multiple errors were present in the same +call, NumPy may now raise a different one. + +(`gh-15271 `__) + +``__array_ufunc__`` argument validation +--------------------------------------- +NumPy will now partially validate arguments before calling ``__array_ufunc__``. +Previously, it was possible to pass on invalid arguments (such as a +non-existing keyword argument) when dispatch was known to occur. + +(`gh-15271 `__) + +``__array_ufunc__`` and additional positional arguments +------------------------------------------------------- +Previously, all positionally passed arguments were checked for +``__array_ufunc__`` support. In the case of ``reduce``, ``accumulate``, and +``reduceat`` all arguments may be passed by position. This means that when +they were passed by position, they could previously have been asked to handle +the ufunc call via ``__array_ufunc__``. Since this depended on the way the +arguments were passed (by position or by keyword), NumPy will now only dispatch +on the input and output array. For example, NumPy will never dispatch on the +``where`` array in a reduction such as ``np.add.reduce``. + +(`gh-15271 `__) + +Validate input values in ``Generator.uniform`` +---------------------------------------------- +Checked that ``high - low >= 0`` in ``np.random.Generator.uniform``. Raises +``ValueError`` if ``low > high``. Previously out-of-order inputs were accepted +and silently swapped, so that if ``low > high``, the value generated was +``high + (low - high) * random()``. + +(`gh-17921 `__) + +``/usr/include`` removed from default include paths +--------------------------------------------------- +The default include paths when building a package with ``numpy.distutils`` no +longer include ``/usr/include``. This path is normally added by the compiler, +and hardcoding it can be problematic. In case this causes a problem, please +open an issue. A workaround is documented in PR 18658. + +(`gh-18658 `__) + +Changes to comparisons with ``dtype=...`` +----------------------------------------- +When the ``dtype=`` (or ``signature``) arguments to comparison +ufuncs (``equal``, ``less``, etc.) is used, this will denote +the desired output dtype in the future. +This means that: + + np.equal(2, 3, dtype=object) + +will give a ``FutureWarning`` that it will return an ``object`` +array in the future, which currently happens for: + + np.equal(None, None, dtype=object) + +due to the fact that ``np.array(None)`` is already an object +array. (This also happens for some other dtypes.) + +Since comparisons normally only return boolean arrays, providing +any other dtype will always raise an error in the future and +give a ``DeprecationWarning`` now. + +(`gh-18718 `__) + +Changes to ``dtype`` and ``signature`` arguments in ufuncs +---------------------------------------------------------- +The universal function arguments ``dtype`` and ``signature`` +which are also valid for reduction such as ``np.add.reduce`` +(which is the implementation for ``np.sum``) will now issue +a warning when the ``dtype`` provided is not a "basic" dtype. + +NumPy almost always ignored metadata, byteorder or time units +on these inputs. NumPy will now always ignore it and raise an +error if byteorder or time unit changed. +The following are the most important examples of changes which +will give the error. In some cases previously the information +stored was not ignored, in all of these an error is now raised:: + + # Previously ignored the byte-order (affect if non-native) + np.add(3, 5, dtype=">i32") + + # The biggest impact is for timedelta or datetimes: + arr = np.arange(10, dtype="m8[s]") + # The examples always ignored the time unit "ns": + np.add(arr, arr, dtype="m8[ns]") + np.maximum.reduce(arr, dtype="m8[ns]") + + # The following previously did use "ns" (as opposed to `arr.dtype`) + np.add(3, 5, dtype="m8[ns]") # Now return generic time units + np.maximum(arr, arr, dtype="m8[ns]") # Now returns "s" (from `arr`) + +The same applies for functions like ``np.sum`` which use these internally. +This change is necessary to achieve consistent handling within NumPy. + +If you run into these, in most cases pass for example ``dtype=np.timedelta64`` +which clearly denotes a general ``timedelta64`` without any unit or byte-order +defined. If you need to specify the output dtype precisely, you may do so +by either casting the inputs or providing an output array using `out=`. + +NumPy may choose to allow providing an exact output ``dtype`` here in the +future, which would be preceded by a ``FutureWarning``. + +(`gh-18718 `__) + +Ufunc ``signature=...`` and ``dtype=`` generalization and ``casting`` +--------------------------------------------------------------------- +The behaviour for ``np.ufunc(1.0, 1.0, signature=...)`` or +``np.ufunc(1.0, 1.0, dtype=...)`` can now yield different loops in 1.21 +compared to 1.20 because of changes in promotion. +When ``signature`` was previously used, the casting check on inputs +was relaxed, which could lead to downcasting inputs unsafely especially +if combined with ``casting="unsafe"``. + +Casting is now guaranteed to be safe. If a signature is only +partially provided, for example using ``signature=("float64", None, None)``, +this could lead to no loop being found (an error). +In that case, it is necessary to provide the complete signature +to enforce casting the inputs. +If ``dtype="float64"`` is used or only outputs are set (e.g. +``signature=(None, None, "float64")`` the is unchanged. +We expect that very few users are affected by this change. + +Further, the meaning of ``dtype="float64"`` has been slightly modified and +now strictly enforces only the correct output (and not input) DTypes. +This means it is now always equivalent to:: + + signature=(None, None, "float64") + +(If the ufunc has two inputs and one output). Since this could lead +to no loop being found in some cases, NumPy will normally also search +for the loop:: + + signature=("float64", "float64", "float64") + +if the first search failed. +In the future, this behaviour may be customized to achieve the expected +results for more complex ufuncs. (For some universal functions such as +``np.ldexp`` inputs can have different DTypes.) + +(`gh-18880 `__) + +Distutils forces strict floating point model on clang +----------------------------------------------------- +NumPy distutils will now always add the ``-ffp-exception-behavior=strict`` +compiler flag when compiling with clang. Clang defaults to a non-strict +version, which allows the compiler to generate code that does not set +floating point warnings/errors correctly. + +(`gh-19049 `__) + + +C API changes +============= + +Use of ``ufunc->type_resolver`` and "type tuple" +------------------------------------------------ +NumPy now normalizes the "type tuple" argument to the type resolver functions +before calling it. Note that in the use of this type resolver is legacy +behaviour and NumPy will not do so when possible. Calling +``ufunc->type_resolver`` or ``PyUFunc_DefaultTypeResolver`` is strongly +discouraged and will now enforce a normalized type tuple if done. Note that +this does not affect providing a type resolver, which is expected to keep +working in most circumstances. If you have an unexpected use-case for calling +the type resolver, please inform the NumPy developers so that a solution can be +found. + +(`gh-18718 `__) + + +New Features +============ + +Added a mypy plugin for handling platform-specific ``numpy.number`` precisions +------------------------------------------------------------------------------ +A mypy_ plugin is now available for automatically assigning the (platform-dependent) +precisions of certain `~numpy.number` subclasses, including the likes of +`~numpy.int_`, `~numpy.intp` and `~numpy.longlong`. See the documentation on +:ref:`scalar types ` for a comprehensive overview +of the affected classes. + +Note that while usage of the plugin is completely optional, without it the +precision of above-mentioned classes will be inferred as `~typing.Any`. + +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + + +.. _mypy: http://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html + +(`gh-17843 `__) + +Let the mypy plugin manage extended-precision ``numpy.number`` subclasses +------------------------------------------------------------------------- +The mypy_ plugin, introduced in `numpy/numpy#17843`_, has been expanded: +the plugin now removes annotations for platform-specific extended-precision +types that are not available to the platform in question. +For example, it will remove `~numpy.float128` when not available. + +Without the plugin *all* extended-precision types will, as far as mypy is concerned, +be available on all platforms. + +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + + +.. _mypy: http://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html +.. _`numpy/numpy#17843`: https://github.com/numpy/numpy/pull/17843 + +(`gh-18322 `__) + +New ``min_digits`` argument for printing float values +----------------------------------------------------- +A new ``min_digits`` argument has been added to the dragon4 float printing +functions `~numpy.format_float_positional` and `~numpy.format_float_scientific` +. This kwd guarantees that at least the given number of digits will be printed +when printing in unique=True mode, even if the extra digits are unnecessary to +uniquely specify the value. It is the counterpart to the precision argument +which sets the maximum number of digits to be printed. When unique=False in +fixed precision mode, it has no effect and the precision argument fixes the +number of digits. + +(`gh-18629 `__) + +f2py now recognizes Fortran abstract interface blocks +----------------------------------------------------- +`~numpy.f2py` can now parse abstract interface blocks. + +(`gh-18695 `__) + +BLAS and LAPACK configuration via environment variables +------------------------------------------------------- +Autodetection of installed BLAS and LAPACK libraries can be bypassed by using +the ``NPY_BLAS_LIBS`` and ``NPY_LAPACK_LIBS`` environment variables. Instead, +the link flags in these environment variables will be used directly, and the +language is assumed to be F77. This is especially useful in automated builds +where the BLAS and LAPACK that are installed are known exactly. A use case is +replacing the actual implementation at runtime via stub library links. + +If ``NPY_CBLAS_LIBS`` is set (optional in addition to ``NPY_BLAS_LIBS``), this +will be used as well, by defining ``HAVE_CBLAS`` and appending the environment +variable content to the link flags. + +(`gh-18737 `__) + +A runtime-subcriptable alias has been added for ``ndarray`` +----------------------------------------------------------- +``numpy.typing.NDArray`` has been added, a runtime-subscriptable alias for +``np.ndarray[Any, np.dtype[~Scalar]]``. The new type alias can be used +for annotating arrays with a given dtype and unspecified shape. :sup:`1` + +:sup:`1` NumPy does not support the annotating of array shapes as of 1.21, +this is expected to change in the future though (see :pep:`646`). + +Examples +~~~~~~~~ + +.. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[typing.Any, numpy.dtype[~ScalarType]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + +(`gh-18935 `__) + + +Improvements +============ + +Arbitrary ``period`` option for ``numpy.unwrap`` +------------------------------------------------ +The size of the interval over which phases are unwrapped is no longer restricted to ``2 * pi``. +This is especially useful for unwrapping degrees, but can also be used for other intervals. + +.. code:: python + + >>> phase_deg = np.mod(np.linspace(0,720,19), 360) - 180 + >>> phase_deg + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + -180., -140., -100., -60., -20., 20., 60., 100., 140., + -180.]) + + >>> unwrap(phase_deg, period=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + +(`gh-16987 `__) + +``np.unique`` now returns single ``NaN`` +---------------------------------------- +When ``np.unique`` operated on an array with multiple ``NaN`` entries, +its return included a ``NaN`` for each entry that was ``NaN`` in the original array. +This is now improved such that the returned array contains just one ``NaN`` as the +last element. + +Also for complex arrays all ``NaN`` values are considered equivalent +(no matter whether the ``NaN`` is in the real or imaginary part). As the +representant for the returned array the smallest one in the +lexicographical order is chosen - see ``np.sort`` for how the lexicographical +order is defined for complex arrays. + +(`gh-18070 `__) + +``Generator.rayleigh`` and ``Generator.geometric`` performance improved +----------------------------------------------------------------------- +The performance of Rayleigh and geometric random variate generation +in ``Generator`` has improved. These are both transformation of exponential +random variables and the slow log-based inverse cdf transformation has +been replaced with the Ziggurat-based exponential variate generator. + +This change breaks the stream of variates generated when variates from +either of these distributions are produced. + +(`gh-18666 `__) + +Placeholder annotations have been improved +------------------------------------------ +All placeholder annotations, that were previously annotated as ``typing.Any``, +have been improved. Where appropiate they have been replaced with explicit +function definitions, classes or other miscellaneous objects. + +(`gh-18934 `__) + + +Performance improvements +======================== + +Improved performance in integer division of NumPy arrays +-------------------------------------------------------- +Integer division of NumPy arrays now uses +`libdivide `__ when the divisor is a constant. With the +usage of libdivide and other minor optimizations, there is a large speedup. +The ``//`` operator and ``np.floor_divide`` makes use of the new changes. + +(`gh-17727 `__) + +Improve performance of ``np.save`` and ``np.load`` for small arrays +------------------------------------------------------------------- +``np.save`` is now a lot faster for small arrays. + +``np.load`` is also faster for small arrays, +but only when serializing with a version >= ``(3, 0)``. + +Both are done by removing checks that are only relevant for Python 2, +while still maintaining compatibility with arrays +which might have been created by Python 2. + +(`gh-18657 `__) + + +Changes +======= + +`numpy.piecewise` output class now matches the input class +---------------------------------------------------------- +When `~numpy.ndarray` subclasses are used on input to `~numpy.piecewise`, +they are passed on to the functions. The output will now be of the +same subclass as well. + +(`gh-18110 `__) + +Enable Accelerate Framework +---------------------------- +With the release of macOS 11.3, several different issues that numpy was +encountering when using Accelerate Framework's implementation of BLAS and +LAPACK should be resolved. This change enables the Accelerate Framework as an +option on macOS. If additional issues are found, please file a bug report +against Accelerate using the developer feedback assistant tool +(https://developer.apple.com/bug-reporting/). We intend to address issues +promptly and plan to continue supporting and updating our BLAS and LAPACK +libraries. + +(`gh-18874 `__) From 106088d14d54e8ccff79a3d04fb42c3a0b449c27 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 25 May 2021 15:29:22 +0200 Subject: [PATCH 1234/1270] BUG: Fixed an issue wherein `_GenericAlias.__getitem__` would raise for underlying types with >1 parameters --- numpy/typing/_generic_alias.py | 3 ++- numpy/typing/tests/test_generic_alias.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py index f98fca62edf3..d83979aaf913 100644 --- a/numpy/typing/_generic_alias.py +++ b/numpy/typing/_generic_alias.py @@ -63,7 +63,8 @@ def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T: elif isinstance(i, _GenericAlias): value = _reconstruct_alias(i, parameters) elif hasattr(i, "__parameters__"): - value = i[next(parameters)] + prm_tup = tuple(next(parameters) for _ in i.__parameters__) + value = i[prm_tup] else: value = i args.append(value) diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py index 13072051a83f..5f86c40012d2 100644 --- a/numpy/typing/tests/test_generic_alias.py +++ b/numpy/typing/tests/test_generic_alias.py @@ -11,6 +11,8 @@ from numpy.typing._generic_alias import _GenericAlias ScalarType = TypeVar("ScalarType", bound=np.generic) +T1 = TypeVar("T1") +T2 = TypeVar("T2") DType = _GenericAlias(np.dtype, (ScalarType,)) NDArray = _GenericAlias(np.ndarray, (Any, DType)) @@ -50,6 +52,7 @@ class TestGenericAlias: ("__getitem__", lambda n: n[np.float64]), ("__getitem__", lambda n: n[ScalarType][np.float64]), ("__getitem__", lambda n: n[Union[np.int64, ScalarType]][np.float64]), + ("__getitem__", lambda n: n[Union[T1, T2]][np.float32, np.float64]), ("__eq__", lambda n: n == n), ("__ne__", lambda n: n != np.ndarray), ("__dir__", lambda n: dir(n)), From 2ed19415920fff8e6fd7b54af747e4aed3a2bff9 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Tue, 25 May 2021 15:30:08 +0200 Subject: [PATCH 1235/1270] MAINT: Change the variance of the `npt.ArrayLike` to covariant --- numpy/typing/_add_docstring.py | 4 ++-- numpy/typing/_generic_alias.py | 2 +- numpy/typing/tests/test_generic_alias.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/typing/_add_docstring.py b/numpy/typing/_add_docstring.py index 34dbdb0c6e77..56ef41cfdb3c 100644 --- a/numpy/typing/_add_docstring.py +++ b/numpy/typing/_add_docstring.py @@ -114,7 +114,7 @@ def _parse_docstrings() -> str: add_newdoc('NDArray', repr(NDArray), """ A :term:`generic ` version of - `np.ndarray[Any, np.dtype[~ScalarType]] `. + `np.ndarray[Any, np.dtype[+ScalarType]] `. Can be used during runtime for typing arrays with a given dtype and unspecified shape. @@ -127,7 +127,7 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[typing.Any, numpy.dtype[~ScalarType]] + numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]] >>> print(npt.NDArray[np.float64]) numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py index d83979aaf913..68523827a71a 100644 --- a/numpy/typing/_generic_alias.py +++ b/numpy/typing/_generic_alias.py @@ -196,7 +196,7 @@ def __getattribute__(self, name: str) -> Any: else: _GENERIC_ALIAS_TYPE = (_GenericAlias,) -ScalarType = TypeVar("ScalarType", bound=np.generic) +ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True) if TYPE_CHECKING: NDArray = np.ndarray[Any, np.dtype[ScalarType]] diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py index 5f86c40012d2..0b99174392f0 100644 --- a/numpy/typing/tests/test_generic_alias.py +++ b/numpy/typing/tests/test_generic_alias.py @@ -10,7 +10,7 @@ import numpy as np from numpy.typing._generic_alias import _GenericAlias -ScalarType = TypeVar("ScalarType", bound=np.generic) +ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True) T1 = TypeVar("T1") T2 = TypeVar("T2") DType = _GenericAlias(np.dtype, (ScalarType,)) From c3eb82ae4136b382e8e987c9db83bef1ca5bfd94 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 24 May 2021 14:24:47 -0600 Subject: [PATCH 1236/1270] BUG: Linter should on run on pull requests. That was the original intent, but linter was only disabled for merges to the main branch and still ran, and failed, on maintenance branches. Closes #19079. --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index dd189b8e9131..14a59e8800af 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,7 +19,7 @@ stages: jobs: - job: Lint - condition: and(succeeded(), ne(variables['Build.SourceBranch'], 'refs/heads/main')) # skip for PR merges + condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) pool: vmImage: 'ubuntu-18.04' steps: From 9b297420e2500c5ab7a7eeb9564a866f40f182ed Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 25 May 2021 13:35:42 -0600 Subject: [PATCH 1237/1270] BUG: Fix setup.py to work in maintenance branches. This fixes setup.py to correctly detect the release status and base version from the full version string provided by versioneer. Before this fix, versions like '1.22.0.dev0+...' were correctly handled, but versions like '1.21.0rc1+...' were not. --- setup.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 247dc512a2b1..82661046637f 100755 --- a/setup.py +++ b/setup.py @@ -25,6 +25,7 @@ import textwrap import warnings import builtins +import re # Python supported version checks. Keep right after stdlib imports to ensure we @@ -46,8 +47,14 @@ # The version components are changed from ints to strings, but only VERSION # seems to matter outside of this module and it was already a str. FULLVERSION = versioneer.get_version() -ISRELEASED = 'dev' not in FULLVERSION -MAJOR, MINOR, MICRO = FULLVERSION.split('.')[:3] + +# Capture the version string: +# 1.22.0.dev0+ ... -> ISRELEASED == False, VERSION == 1.22.0 +# 1.22.0rc1+ ... -> ISRELEASED == False, VERSION == 1.22.0 +# 1.22.0 ... -> ISRELEASED == True, VERSION == 1.22.0 +# 1.22.0rc1 ... -> ISRELEASED == True, VERSION == 1.22.0 +ISRELEASED = re.search(r'(dev|\+)', FULLVERSION) is None +MAJOR, MINOR, MICRO = re.match(r'(\d+)\.(\d+)\.(\d+)', FULLVERSION).groups() VERSION = '{}.{}.{}'.format(MAJOR, MINOR, MICRO) # The first version not in the `Programming Language :: Python :: ...` classifiers above From 264cc43729acb08913f6c4759c325e07bc9facff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Kieffer?= Date: Mon, 31 May 2021 15:46:54 +0200 Subject: [PATCH 1238/1270] expose `short_version` as previously in version.py --- numpy/version.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/version.py b/numpy/version.py index 48bdb32dac07..70f9f098254f 100644 --- a/numpy/version.py +++ b/numpy/version.py @@ -5,6 +5,7 @@ vinfo = get_versions() version: str = vinfo["version"] full_version: str = vinfo['version'] +short_version: str = vinfo['version'] git_revision: str = vinfo['full-revisionid'] release = 'dev0' not in version and '+' not in version From e297b3dcd5dfbb5de0a895be60e00e0a2bc13009 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Kieffer?= Date: Mon, 31 May 2021 16:21:10 +0200 Subject: [PATCH 1239/1270] BUG: re-introduce short-version as it was --- numpy/tests/test_numpy_version.py | 8 ++++++++ numpy/version.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 28595026e212..f9399cc93d93 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -32,3 +32,11 @@ def test_valid_numpy_version(): res = re.match(version_pattern + dev_suffix + '$', np.__version__) assert_(res is not None, np.__version__) + + +def test_short_version(): + # Check numpy.short_version actually exists + if np.version.release: + assert_(numpy.__version__ == numpy.version.short_version, "short_version in release") + else: + assert_(numpy.__version__.split("+")[0] == numpy.version.short_version, "short_version not in release") diff --git a/numpy/version.py b/numpy/version.py index 70f9f098254f..4159a1c0eb82 100644 --- a/numpy/version.py +++ b/numpy/version.py @@ -5,8 +5,8 @@ vinfo = get_versions() version: str = vinfo["version"] full_version: str = vinfo['version'] -short_version: str = vinfo['version'] git_revision: str = vinfo['full-revisionid'] release = 'dev0' not in version and '+' not in version +short_version: str = vinfo['version'].split("+")[0] del get_versions, vinfo From 10f444a5f9cda8096dd0ccb204624b509459b398 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Kieffer?= Date: Mon, 31 May 2021 16:26:45 +0200 Subject: [PATCH 1240/1270] TST: fix pylint --- numpy/tests/test_numpy_version.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index f9399cc93d93..01f5ea0e199a 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -37,6 +37,8 @@ def test_valid_numpy_version(): def test_short_version(): # Check numpy.short_version actually exists if np.version.release: - assert_(numpy.__version__ == numpy.version.short_version, "short_version in release") + assert_(numpy.__version__ == numpy.version.short_version, + "short_version in release") else: - assert_(numpy.__version__.split("+")[0] == numpy.version.short_version, "short_version not in release") + assert_(numpy.__version__.split("+")[0] == numpy.version.short_version, + "short_version not in release") From 8cd38bb7d1966bafdd058e19af548ca397244d99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Kieffer?= Date: Mon, 31 May 2021 16:31:18 +0200 Subject: [PATCH 1241/1270] TST Fix typo --- numpy/tests/test_numpy_version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 01f5ea0e199a..eddd0710d0f2 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -37,8 +37,8 @@ def test_valid_numpy_version(): def test_short_version(): # Check numpy.short_version actually exists if np.version.release: - assert_(numpy.__version__ == numpy.version.short_version, + assert_(np.__version__ == np.version.short_version, "short_version in release") else: - assert_(numpy.__version__.split("+")[0] == numpy.version.short_version, + assert_(np.__version__.split("+")[0] == np.version.short_version, "short_version not in release") From a9594086c765b3b6d81c32d55eaa9610b3f50a86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Kieffer?= Date: Mon, 31 May 2021 20:23:21 +0200 Subject: [PATCH 1242/1270] TST fix comment of assert+indentation --- numpy/tests/test_numpy_version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index eddd0710d0f2..bccbcb8e9cf7 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -38,7 +38,7 @@ def test_short_version(): # Check numpy.short_version actually exists if np.version.release: assert_(np.__version__ == np.version.short_version, - "short_version in release") + "short_version mismatch in release version") else: assert_(np.__version__.split("+")[0] == np.version.short_version, - "short_version not in release") + "short_version mismatch in development version") From 334b7cd7bdcebbeb75e2e45b4aa55a8b22176832 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 4 Jun 2021 12:32:10 -0500 Subject: [PATCH 1243/1270] API: Delay string and number promotion deprecation/future warning Unfortunately, this seems to cause some pain in pandas at least to circumvent the warning. Delaying may complicate the creation of ufuncs for strings, but otherwise should not be a big problem. We have to make sure that we can reimplement the change quickly though, it is an important change in my opinion. --- doc/source/release/1.21.0-notes.rst | 45 ++++++++---------- numpy/core/src/multiarray/dtypemeta.c | 13 ------ numpy/core/tests/test_deprecations.py | 35 -------------- numpy/core/tests/test_half.py | 6 +-- numpy/core/tests/test_numeric.py | 67 ++++++++++++--------------- numpy/core/tests/test_regression.py | 4 +- numpy/lib/tests/test_regression.py | 3 +- 7 files changed, 54 insertions(+), 119 deletions(-) diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst index 4b6d4d11595f..754c90aa9283 100644 --- a/doc/source/release/1.21.0-notes.rst +++ b/doc/source/release/1.21.0-notes.rst @@ -121,40 +121,35 @@ The methods in question are: (`gh-19031 `__) -Future Changes -============== +Expired deprecations +==================== + +* The ``shape`` argument `numpy.unravel_index` cannot be passed + as ``dims`` keyword argument anymore. (Was deprecated in NumPy 1.16.) -Promotion of strings with numbers and bools will be deprecated --------------------------------------------------------------- -Any promotion of numbers and strings is deprecated and will -give a ``FutureWarning`` the main affected functionalities -are: + (`gh-17900 `__) -* `~numpy.promote_types` and `~numpy.result_type` which will raise - an error in this case in the future. -* `~numpy.concatenate` will raise an error when concatenating a string - and numeric array. You can use ``dtype="S"`` to explicitly request - a string result. -* `~numpy.array` and related functions will start returning ``object`` - arrays because these functions use ``object`` as a fallback when - no common dtype can be found. However, it may happen that future - releases of NumPy will generally error in these cases. +* The function ``PyUFunc_GenericFunction`` has been disabled. + It was deprecated in NumPy 1.19. Users should call the ufunc + directly using the Python API. -This will mainly affect code such as:: + (`gh-18697 `__) - np.asarray(['string', 0]) +* The function ``PyUFunc_SetUsesArraysAsData`` has been disabled. + It was deprecated in NumPy 1.19. -and:: + (`gh-18697 `__) - np.concatenate((['string'], [0])) +Remove deprecated ``PolyBase`` and unused ``PolyError`` and ``PolyDomainError`` +------------------------------------------------------------------------------- -in both cases adding ``dtype="U"`` or ``dtype="S"`` will give the -previous (string) result, while ``dtype=object`` will ensure an array with -object dtype is returned. +The class ``PolyBase`` has been removed (deprecated in numpy 1.9.0). Please use +the abstract ``ABCPolyBase`` class instead. -Comparisons, universal functions, and casting are not affected by this. +Furthermore, the unused ``PolyError`` and ``PolyDomainError`` exceptions are +removed from the `numpy.polynomial`. -(`gh-18116 `__) +(`gh-18963 `__) Compatibility notes diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index 40ca9ee2a396..4ee721964a91 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -415,19 +415,6 @@ string_unicode_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) Py_INCREF(Py_NotImplemented); return (PyArray_DTypeMeta *)Py_NotImplemented; } - if (other->type_num != NPY_STRING && other->type_num != NPY_UNICODE) { - /* Deprecated 2020-12-19, NumPy 1.21. */ - if (DEPRECATE_FUTUREWARNING( - "Promotion of numbers and bools to strings is deprecated. " - "In the future, code such as `np.concatenate((['string'], [0]))` " - "will raise an error, while `np.asarray(['string', 0])` will " - "return an array with `dtype=object`. To avoid the warning " - "while retaining a string result use `dtype='U'` (or 'S'). " - "To get an array of Python objects use `dtype=object`. " - "(Warning added in NumPy 1.21)") < 0) { - return NULL; - } - } /* * The builtin types are ordered by complexity (aside from object) here. * Arguably, we should not consider numbers and strings "common", but diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index ffe0147b2d08..42e632e4aa2a 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -1105,41 +1105,6 @@ def check_parametrize(base, power, expected): self.assert_deprecated(_test_parametrize) -class TestStringPromotion(_DeprecationTestCase): - # Deprecated 2020-12-19, NumPy 1.21 - warning_cls = FutureWarning - message = "Promotion of numbers and bools to strings is deprecated." - - @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG") - @pytest.mark.parametrize("string_dt", ["S", "U"]) - def test_deprecated(self, dtype, string_dt): - self.assert_deprecated(lambda: np.promote_types(dtype, string_dt)) - - # concatenate has to be able to promote to find the result dtype: - arr1 = np.ones(3, dtype=dtype) - arr2 = np.ones(3, dtype=string_dt) - self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=0)) - self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=None)) - - self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]])) - - @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG") - @pytest.mark.parametrize("string_dt", ["S", "U"]) - def test_not_deprecated(self, dtype, string_dt): - # The ufunc type resolvers run into this, but giving a futurewarning - # here is unnecessary (it ends up as an error anyway), so test that - # no warning is given: - arr1 = np.ones(3, dtype=dtype) - arr2 = np.ones(3, dtype=string_dt) - - # Adding two arrays uses result_type normally, which would fail: - with pytest.raises(TypeError): - self.assert_not_deprecated(lambda: arr1 + arr2) - # np.equal uses a different type resolver: - with pytest.raises(TypeError): - self.assert_not_deprecated(lambda: np.equal(arr1, arr2)) - - class TestSingleElementSignature(_DeprecationTestCase): # Deprecated 2021-04-01, NumPy 1.21 message = r"The use of a length 1" diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py index 449a01d211f1..1b6fd21e14bb 100644 --- a/numpy/core/tests/test_half.py +++ b/numpy/core/tests/test_half.py @@ -71,10 +71,8 @@ def test_half_conversions(self): def test_half_conversion_to_string(self, string_dt): # Currently uses S/U32 (which is sufficient for float32) expected_dt = np.dtype(f"{string_dt}32") - with pytest.warns(FutureWarning): - assert np.promote_types(np.float16, string_dt) == expected_dt - with pytest.warns(FutureWarning): - assert np.promote_types(string_dt, np.float16) == expected_dt + assert np.promote_types(np.float16, string_dt) == expected_dt + assert np.promote_types(string_dt, np.float16) == expected_dt arr = np.ones(3, dtype=np.float16).astype(string_dt) assert arr.dtype == expected_dt diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index aba90ece5e11..f5113150e8f7 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -848,12 +848,10 @@ def test_promote_types_endian(self): assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) - with pytest.warns(FutureWarning, - match="Promotion of numbers and bools to strings"): - assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) - assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) - assert_equal(np.promote_types('i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) @@ -901,37 +899,32 @@ def test_promote_types_strings(self, swap, string_dtype): S = string_dtype - with pytest.warns(FutureWarning, - match="Promotion of numbers and bools to strings") as record: - # Promote numeric with unsized string: - assert_equal(promote_types('bool', S), np.dtype(S+'5')) - assert_equal(promote_types('b', S), np.dtype(S+'4')) - assert_equal(promote_types('u1', S), np.dtype(S+'3')) - assert_equal(promote_types('u2', S), np.dtype(S+'5')) - assert_equal(promote_types('u4', S), np.dtype(S+'10')) - assert_equal(promote_types('u8', S), np.dtype(S+'20')) - assert_equal(promote_types('i1', S), np.dtype(S+'4')) - assert_equal(promote_types('i2', S), np.dtype(S+'6')) - assert_equal(promote_types('i4', S), np.dtype(S+'11')) - assert_equal(promote_types('i8', S), np.dtype(S+'21')) - # Promote numeric with sized string: - assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) - assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) - assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) - assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) - assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) - # Promote with object: - assert_equal(promote_types('O', S+'30'), np.dtype('O')) - - assert len(record) == 22 # each string promotion gave one warning - + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S+'5')) + assert_equal(promote_types('b', S), np.dtype(S+'4')) + assert_equal(promote_types('u1', S), np.dtype(S+'3')) + assert_equal(promote_types('u2', S), np.dtype(S+'5')) + assert_equal(promote_types('u4', S), np.dtype(S+'10')) + assert_equal(promote_types('u8', S), np.dtype(S+'20')) + assert_equal(promote_types('i1', S), np.dtype(S+'4')) + assert_equal(promote_types('i2', S), np.dtype(S+'6')) + assert_equal(promote_types('i4', S), np.dtype(S+'11')) + assert_equal(promote_types('i8', S), np.dtype(S+'21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) + assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) + assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) + assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) + assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) + # Promote with object: + assert_equal(promote_types('O', S+'30'), np.dtype('O')) @pytest.mark.parametrize(["dtype1", "dtype2"], [[np.dtype("V6"), np.dtype("V10")], diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index dbfb75c9a897..312d0683de0e 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -782,9 +782,7 @@ def test_mem_string_arr(self): # Ticket #514 s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" t = [] - with pytest.warns(FutureWarning, - match="Promotion of numbers and bools to strings"): - np.hstack((t, s)) + np.hstack((t, s)) def test_arr_transpose(self): # Ticket #516 diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 94fac7ef0079..373226277acd 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -64,8 +64,7 @@ def test_mem_polymul(self): def test_mem_string_concat(self): # Ticket #469 x = np.array([]) - with pytest.warns(FutureWarning): - np.append(x, 'asdasd\tasdasd') + np.append(x, 'asdasd\tasdasd') def test_poly_div(self): # Ticket #553 From 838aae0efcca716241ec8cb43b8295be3a5b92bc Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 26 May 2021 00:34:57 +0200 Subject: [PATCH 1244/1270] BUG, SIMD: Fix detect host/native CPU features on ICC during compile-time Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, whether or not the build options for those features are specified. Therefore, we must test #definitions of CPU features when option native/host is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise the test will be broken and leads to enable all possible features. --- numpy/distutils/ccompiler_opt.py | 57 ++++++++++++++++--------- numpy/distutils/checks/cpu_avx.c | 13 ++++++ numpy/distutils/checks/cpu_avx2.c | 13 ++++++ numpy/distutils/checks/cpu_avx512_clx.c | 13 ++++++ numpy/distutils/checks/cpu_avx512_cnl.c | 13 ++++++ numpy/distutils/checks/cpu_avx512_icl.c | 13 ++++++ numpy/distutils/checks/cpu_avx512_knl.c | 13 ++++++ numpy/distutils/checks/cpu_avx512_knm.c | 13 ++++++ numpy/distutils/checks/cpu_avx512_skx.c | 13 ++++++ numpy/distutils/checks/cpu_avx512cd.c | 13 ++++++ numpy/distutils/checks/cpu_avx512f.c | 13 ++++++ numpy/distutils/checks/cpu_f16c.c | 13 ++++++ numpy/distutils/checks/cpu_fma3.c | 13 ++++++ numpy/distutils/checks/cpu_popcnt.c | 15 ++++++- numpy/distutils/checks/cpu_sse.c | 13 ++++++ numpy/distutils/checks/cpu_sse2.c | 13 ++++++ numpy/distutils/checks/cpu_sse3.c | 13 ++++++ numpy/distutils/checks/cpu_sse41.c | 13 ++++++ numpy/distutils/checks/cpu_sse42.c | 13 ++++++ numpy/distutils/checks/cpu_ssse3.c | 13 ++++++ 20 files changed, 284 insertions(+), 22 deletions(-) diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index ae1e6a180625..47d07ad4ab22 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -552,7 +552,7 @@ def dist_compile(self, sources, flags, ccompiler=None, **kwargs): ccompiler = self._ccompiler return ccompiler.compile(sources, extra_postargs=flags, **kwargs) - def dist_test(self, source, flags): + def dist_test(self, source, flags, macros=[]): """Return True if 'CCompiler.compile()' able to compile a source file with certain flags. """ @@ -569,7 +569,7 @@ def dist_test(self, source, flags): test = False try: self.dist_compile( - [source], flags, output_dir=self.conf_tmp_path + [source], flags, macros=macros, output_dir=self.conf_tmp_path ) test = True except CompileError as e: @@ -1172,20 +1172,23 @@ def __init__(self): self.feature_is_cached = True - def feature_names(self, names=None, force_flags=None): + def feature_names(self, names=None, force_flags=None, macros=[]): """ Returns a set of CPU feature names that supported by platform and the **C** compiler. Parameters ---------- - 'names': sequence or None, optional + names: sequence or None, optional Specify certain CPU features to test it against the **C** compiler. if None(default), it will test all current supported features. **Note**: feature names must be in upper-case. - 'force_flags': list or None, optional - If None(default), default compiler flags for every CPU feature will be used - during the test. + force_flags: list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during the test. + + macros : list of tuples, optional + A list of C macro definitions. """ assert( names is None or ( @@ -1198,7 +1201,9 @@ def feature_names(self, names=None, force_flags=None): names = self.feature_supported.keys() supported_names = set() for f in names: - if self.feature_is_supported(f, force_flags=force_flags): + if self.feature_is_supported( + f, force_flags=force_flags, macros=macros + ): supported_names.add(f) return supported_names @@ -1433,20 +1438,23 @@ def feature_flags(self, names): return self.cc_normalize_flags(flags) @_Cache.me - def feature_test(self, name, force_flags=None): + def feature_test(self, name, force_flags=None, macros=[]): """ Test a certain CPU feature against the compiler through its own check file. Parameters ---------- - 'name': str + name: str Supported CPU feature name. - 'force_flags': list or None, optional + force_flags: list or None, optional If None(default), the returned flags from `feature_flags()` will be used. - """ + + macros : list of tuples, optional + A list of C macro definitions. + """ if force_flags is None: force_flags = self.feature_flags(name) @@ -1462,24 +1470,29 @@ def feature_test(self, name, force_flags=None): if not os.path.exists(test_path): self.dist_fatal("feature test file is not exist", test_path) - test = self.dist_test(test_path, force_flags + self.cc_flags["werror"]) + test = self.dist_test( + test_path, force_flags + self.cc_flags["werror"], macros=macros + ) if not test: self.dist_log("testing failed", stderr=True) return test @_Cache.me - def feature_is_supported(self, name, force_flags=None): + def feature_is_supported(self, name, force_flags=None, macros=[]): """ Check if a certain CPU feature is supported by the platform and compiler. Parameters ---------- - 'name': str + name: str CPU feature name in uppercase. - 'force_flags': list or None, optional - If None(default), default compiler flags for every CPU feature will be used - during test. + force_flags: list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during test. + + macros : list of tuples, optional + A list of C macro definitions. """ assert(name.isupper()) assert(force_flags is None or isinstance(force_flags, list)) @@ -1487,9 +1500,9 @@ def feature_is_supported(self, name, force_flags=None): supported = name in self.feature_supported if supported: for impl in self.feature_implies(name): - if not self.feature_test(impl, force_flags): + if not self.feature_test(impl, force_flags, macros=macros): return False - if not self.feature_test(name, force_flags): + if not self.feature_test(name, force_flags, macros=macros): return False return supported @@ -1812,7 +1825,9 @@ def _parse_arg_features(self, arg_name, req_features): self.dist_fatal(arg_name, "native option isn't supported by the compiler" ) - features_to = self.feature_names(force_flags=native) + features_to = self.feature_names( + force_flags=native, macros=[("DETECT_FEATURES", 1)] + ) elif TOK == "MAX": features_to = self.feature_supported.keys() elif TOK == "MIN": diff --git a/numpy/distutils/checks/cpu_avx.c b/numpy/distutils/checks/cpu_avx.c index cee4f36ab3f4..26ae18466740 100644 --- a/numpy/distutils/checks/cpu_avx.c +++ b/numpy/distutils/checks/cpu_avx.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX__ + #error "HOST/ARCH doesn't support AVX" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx2.c b/numpy/distutils/checks/cpu_avx2.c index 15b6c919b089..ddde868f1b58 100644 --- a/numpy/distutils/checks/cpu_avx2.c +++ b/numpy/distutils/checks/cpu_avx2.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX2__ + #error "HOST/ARCH doesn't support AVX2" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512_clx.c b/numpy/distutils/checks/cpu_avx512_clx.c index 4baa8fea0475..81edcd067005 100644 --- a/numpy/distutils/checks/cpu_avx512_clx.c +++ b/numpy/distutils/checks/cpu_avx512_clx.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512VNNI__ + #error "HOST/ARCH doesn't support CascadeLake AVX512 features" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512_cnl.c b/numpy/distutils/checks/cpu_avx512_cnl.c index f2ff3725ea93..5799f122b511 100644 --- a/numpy/distutils/checks/cpu_avx512_cnl.c +++ b/numpy/distutils/checks/cpu_avx512_cnl.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__) + #error "HOST/ARCH doesn't support CannonLake AVX512 features" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512_icl.c b/numpy/distutils/checks/cpu_avx512_icl.c index 085b947e05bf..3cf44d73164b 100644 --- a/numpy/distutils/checks/cpu_avx512_icl.c +++ b/numpy/distutils/checks/cpu_avx512_icl.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support IceLake AVX512 features" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512_knl.c b/numpy/distutils/checks/cpu_avx512_knl.c index 10ba52bcc5a7..b3f4f6976514 100644 --- a/numpy/distutils/checks/cpu_avx512_knl.c +++ b/numpy/distutils/checks/cpu_avx512_knl.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512ER__) || !defined(__AVX512PF__) + #error "HOST/ARCH doesn't support Knights Landing AVX512 features" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512_knm.c b/numpy/distutils/checks/cpu_avx512_knm.c index d03b0fe8beb3..2c426462bd34 100644 --- a/numpy/distutils/checks/cpu_avx512_knm.c +++ b/numpy/distutils/checks/cpu_avx512_knm.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support Knights Mill AVX512 features" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512_skx.c b/numpy/distutils/checks/cpu_avx512_skx.c index 04761876295f..8840efb7e5ee 100644 --- a/numpy/distutils/checks/cpu_avx512_skx.c +++ b/numpy/distutils/checks/cpu_avx512_skx.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error "HOST/ARCH doesn't support SkyLake AVX512 features" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512cd.c b/numpy/distutils/checks/cpu_avx512cd.c index 52f4c7f8be0d..5e29c79e34a7 100644 --- a/numpy/distutils/checks/cpu_avx512cd.c +++ b/numpy/distutils/checks/cpu_avx512cd.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512CD__ + #error "HOST/ARCH doesn't support AVX512CD" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_avx512f.c b/numpy/distutils/checks/cpu_avx512f.c index 22d861471ced..d0eb7b1ad5c6 100644 --- a/numpy/distutils/checks/cpu_avx512f.c +++ b/numpy/distutils/checks/cpu_avx512f.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512F__ + #error "HOST/ARCH doesn't support AVX512F" + #endif +#endif + #include int main(int argc, char **argv) diff --git a/numpy/distutils/checks/cpu_f16c.c b/numpy/distutils/checks/cpu_f16c.c index 678c582e410c..fdf36cec580c 100644 --- a/numpy/distutils/checks/cpu_f16c.c +++ b/numpy/distutils/checks/cpu_f16c.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __F16C__ + #error "HOST/ARCH doesn't support F16C" + #endif +#endif + #include #include diff --git a/numpy/distutils/checks/cpu_fma3.c b/numpy/distutils/checks/cpu_fma3.c index 2f879c3b357f..bfeef22b5f0e 100644 --- a/numpy/distutils/checks/cpu_fma3.c +++ b/numpy/distutils/checks/cpu_fma3.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__FMA__) && !defined(__AVX2__) + #error "HOST/ARCH doesn't support FMA3" + #endif +#endif + #include #include diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/distutils/checks/cpu_popcnt.c index 540c98dab05d..813c461f05b3 100644 --- a/numpy/distutils/checks/cpu_popcnt.c +++ b/numpy/distutils/checks/cpu_popcnt.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE4_2__) && !defined(__POPCNT__) + #error "HOST/ARCH doesn't support POPCNT" + #endif +#endif + #ifdef _MSC_VER #include #else @@ -11,7 +24,7 @@ int main(int argc, char **argv) unsigned long long a = *((unsigned long long*)argv[argc-1]); unsigned int b = *((unsigned int*)argv[argc-2]); -#if defined(_M_X64) || defined(__x86_64__) +#if defined(_M_X64) || defined(__x86_64__) a = _mm_popcnt_u64(a); #endif b = _mm_popcnt_u32(b); diff --git a/numpy/distutils/checks/cpu_sse.c b/numpy/distutils/checks/cpu_sse.c index bb98bf63c0b9..602b74e7bc43 100644 --- a/numpy/distutils/checks/cpu_sse.c +++ b/numpy/distutils/checks/cpu_sse.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE__ + #error "HOST/ARCH doesn't support SSE" + #endif +#endif + #include int main(void) diff --git a/numpy/distutils/checks/cpu_sse2.c b/numpy/distutils/checks/cpu_sse2.c index 658afc9b4abf..33826a9ed1a5 100644 --- a/numpy/distutils/checks/cpu_sse2.c +++ b/numpy/distutils/checks/cpu_sse2.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE2__ + #error "HOST/ARCH doesn't support SSE2" + #endif +#endif + #include int main(void) diff --git a/numpy/distutils/checks/cpu_sse3.c b/numpy/distutils/checks/cpu_sse3.c index aece1e60174c..d47c20f74be1 100644 --- a/numpy/distutils/checks/cpu_sse3.c +++ b/numpy/distutils/checks/cpu_sse3.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE3__ + #error "HOST/ARCH doesn't support SSE3" + #endif +#endif + #include int main(void) diff --git a/numpy/distutils/checks/cpu_sse41.c b/numpy/distutils/checks/cpu_sse41.c index bfdb9feacc47..7c80238a3bc1 100644 --- a/numpy/distutils/checks/cpu_sse41.c +++ b/numpy/distutils/checks/cpu_sse41.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_1__ + #error "HOST/ARCH doesn't support SSE41" + #endif +#endif + #include int main(void) diff --git a/numpy/distutils/checks/cpu_sse42.c b/numpy/distutils/checks/cpu_sse42.c index 24f5d93fe1d4..f60e18f3c4f1 100644 --- a/numpy/distutils/checks/cpu_sse42.c +++ b/numpy/distutils/checks/cpu_sse42.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_2__ + #error "HOST/ARCH doesn't support SSE42" + #endif +#endif + #include int main(void) diff --git a/numpy/distutils/checks/cpu_ssse3.c b/numpy/distutils/checks/cpu_ssse3.c index ad0abc1e66fb..fde390d6a37d 100644 --- a/numpy/distutils/checks/cpu_ssse3.c +++ b/numpy/distutils/checks/cpu_ssse3.c @@ -1,3 +1,16 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSSE3__ + #error "HOST/ARCH doesn't support SSSE3" + #endif +#endif + #include int main(void) From e5d26244ac933dd98976620a6a6a62b40b9cbfa3 Mon Sep 17 00:00:00 2001 From: i-shenl Date: Tue, 9 Mar 2021 07:38:30 -0800 Subject: [PATCH 1245/1270] Set c99 to intel icc compiler so numpy will build Numpy will not build if this is not set because it has code that follows the c99 standard. icc is set to c89 by default. Look below at IntelCCompilerW which is the icc equivalent on Windows and that one already has c99 set. --- numpy/distutils/intelccompiler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py index 0388ad577518..0fa1c11dd676 100644 --- a/numpy/distutils/intelccompiler.py +++ b/numpy/distutils/intelccompiler.py @@ -58,7 +58,7 @@ def __init__(self, verbose=0, dry_run=0, force=0): v = self.get_version() mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 ' + self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' '-fomit-frame-pointer -{}').format(mpopt) compiler = self.cc_exe From bbdc753a9ccb038a13ad4ec8e3cb9361483dc0de Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 7 Jun 2021 13:17:45 -0500 Subject: [PATCH 1246/1270] NEP: Accept NEP 35 as final This accepts NEP 35 as final. There has been no discussion about it in a long time. The current mode is strict about type input (`like=` must be an array-like). So that most of the "open" points are OK to remain open. Unless we need to discuss the name `like` or the fact that we pass an array-like itself, the previously noted open points gh-17075 all seem not very relevant anymore. --- ...nep-0035-array-creation-dispatch-with-array-function.rst | 4 ++-- numpy/core/overrides.py | 6 +----- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst index 427d91b7d4d6..3a689a4dcd24 100644 --- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst +++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst @@ -5,11 +5,11 @@ NEP 35 — Array Creation Dispatching With __array_function__ =========================================================== :Author: Peter Andreas Entschev -:Status: Draft +:Status: Final :Type: Standards Track :Created: 2019-10-15 :Updated: 2020-11-06 -:Resolution: +:Resolution: https://mail.python.org/pipermail/numpy-discussion/2021-May/081761.html Abstract -------- diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py index c2b5fb7fa34d..70085d896fe7 100644 --- a/numpy/core/overrides.py +++ b/numpy/core/overrides.py @@ -18,11 +18,7 @@ NumPy arrays. If an array-like passed in as ``like`` supports the ``__array_function__`` protocol, the result will be defined by it. In this case, it ensures the creation of an array object - compatible with that passed in via this argument. - - .. note:: - The ``like`` keyword is an experimental feature pending on - acceptance of :ref:`NEP 35 `.""" + compatible with that passed in via this argument.""" ) def set_array_function_like_doc(public_api): From 82622df5b5d1850b1c376ad7b6425849083d22ea Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 5 Jun 2021 13:34:23 -0500 Subject: [PATCH 1247/1270] MAINT,BUG: Adapt `castingimpl.casting` to denote a minimal level This also allows skipping the call to `resolve_descriptors()` when the casting level guarantees that the cast will be successfull. This is sufficient to "tape over" problems with object to string casts, because it means that an "unsafe" cast is known to work (no matter the string length). Note that there is a theoretical anomaly remaining: Casting object to string without first discovering the correct string length will fail (because it is unclear which string length to pick), even though no matter which string length is picked "unsafe" is correct. NumPy should never do this, we always first discover the string length when casting from `object` DType specifically (object is special that way -- not string itself). In general, this seems fine to me, in practice, the behaviour would probably not be quite fixed yet. --- doc/neps/nep-0042-new-dtypes.rst | 9 +- doc/neps/nep-0043-extensible-ufuncs.rst | 10 +- numpy/core/src/multiarray/array_method.c | 65 +++--- numpy/core/src/multiarray/convert_datatype.c | 207 ++++++++++++------- numpy/core/src/multiarray/datetime.c | 3 +- numpy/core/tests/test_api.py | 13 ++ 6 files changed, 201 insertions(+), 106 deletions(-) diff --git a/doc/neps/nep-0042-new-dtypes.rst b/doc/neps/nep-0042-new-dtypes.rst index 1738bd1abf3e..bb85f1d10cba 100644 --- a/doc/neps/nep-0042-new-dtypes.rst +++ b/doc/neps/nep-0042-new-dtypes.rst @@ -1334,7 +1334,7 @@ Although verbose, the API will mimic the one for creating a new DType: typedef struct{ int flags; /* e.g. whether the cast requires the API */ int nin, nout; /* Number of Input and outputs (always 1) */ - NPY_CASTING casting; /* The default casting level */ + NPY_CASTING casting; /* The "minimal casting level" */ PyArray_DTypeMeta *dtypes; /* input and output DType class */ /* NULL terminated slots defining the methods */ PyType_Slot *slots; @@ -1342,7 +1342,7 @@ Although verbose, the API will mimic the one for creating a new DType: The focus differs between casting and general ufuncs. For example, for casts ``nin == nout == 1`` is always correct, while for ufuncs ``casting`` is -expected to be usually `"safe"`. +expected to be usually `"no"`. **Notes:** We may initially allow users to define only a single loop. Internally NumPy optimizes far more, and this should be made public @@ -1357,6 +1357,11 @@ incrementally in one of two ways: * Or, more likely, expose the ``get_loop`` function which is passed additional information, such as the fixed strides (similar to our internal API). +* The casting level denotes the minimal guaranteed casting level and can be + ``-1`` if the cast may be impossible. For most non-parametric casts, this + value will be the casting level. NumPy may skip the ``resolve_descriptors`` + call for ``np.can_cast()`` when the result is ``True`` based on this level. + The example does not yet include setup and error handling. Since these are similar to the UFunc machinery, they will be defined in :ref:`NEP 43 ` and then incorporated identically into casting. diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 3c64077284de..cd73108e4fbb 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -262,8 +262,8 @@ to define string equality, will be added to a ufunc. if given_descrs[2] is None: out_descr = DTypes[2]() - # The operation is always "safe" casting (most ufuncs are) - return (given_descrs[0], given_descrs[1], out_descr), "safe" + # The operation is always "no" casting (most ufuncs are) + return (given_descrs[0], given_descrs[1], out_descr), "no" def strided_loop(context, dimensions, data, strides, innerloop_data): """The 1-D strided loop, similar to those used in current ufuncs""" @@ -434,7 +434,7 @@ a new ``ArrayMethod`` object: # Casting safety information (almost always "safe", necessary to # unify casting and universal functions) - casting: Casting = "safe" + casting: Casting = "no" # More general flags: flags: int @@ -751,7 +751,7 @@ This step is required to allocate output arrays and has to happen before casting can be prepared. While the returned casting-safety (``NPY_CASTING``) will almost always be -"safe" for universal functions, including it has two big advantages: +"no" for universal functions, including it has two big advantages: * ``-1`` indicates that an error occurred. If a Python error is set, it will be raised. If no Python error is set this will be considered an "impossible" @@ -767,7 +767,7 @@ While the returned casting-safety (``NPY_CASTING``) will almost always be perspective. Currently, this would use ``int64 + int64 -> int64`` and then cast to ``int32``. An implementation that skips the cast would have to signal that it effectively includes the "same-kind" cast and is - thus not considered "safe". + thus not considered "no". ``get_loop`` method diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c index e13da12de5f9..3ecc20d1d60c 100644 --- a/numpy/core/src/multiarray/array_method.c +++ b/numpy/core/src/multiarray/array_method.c @@ -210,10 +210,12 @@ validate_spec(PyArrayMethod_Spec *spec) case NPY_UNSAFE_CASTING: break; default: - PyErr_Format(PyExc_TypeError, - "ArrayMethod has invalid casting `%d`. (method: %s)", - spec->casting, spec->name); - return -1; + if (spec->casting != -1) { + PyErr_Format(PyExc_TypeError, + "ArrayMethod has invalid casting `%d`. (method: %s)", + spec->casting, spec->name); + return -1; + } } for (int i = 0; i < nargs; i++) { @@ -301,6 +303,13 @@ fill_arraymethod_from_slots( /* Check whether the slots are valid: */ if (meth->resolve_descriptors == &default_resolve_descriptors) { + if (spec->casting == -1) { + PyErr_Format(PyExc_TypeError, + "Cannot set casting to -1 (invalid) when not providing " + "the default `resolve_descriptors` function. " + "(method: %s)", spec->name); + return -1; + } for (int i = 0; i < meth->nin + meth->nout; i++) { if (res->dtypes[i] == NULL) { if (i < meth->nin) { @@ -573,6 +582,8 @@ boundarraymethod__resolve_descripors( /* * The casting flags should be the most generic casting level (except the * cast-is-view flag. If no input is parametric, it must match exactly. + * + * (Note that these checks are only debugging checks.) */ int parametric = 0; for (int i = 0; i < nin + nout; i++) { @@ -581,34 +592,34 @@ boundarraymethod__resolve_descripors( break; } } - if (!parametric) { - /* - * Non-parametric can only mismatch if it switches from no to equiv - * (e.g. due to byteorder changes). - */ - if (self->method->casting != (casting & ~_NPY_CAST_IS_VIEW) && - !(self->method->casting == NPY_NO_CASTING && - casting == NPY_EQUIV_CASTING)) { - PyErr_Format(PyExc_RuntimeError, - "resolve_descriptors cast level did not match stored one " - "(expected %d, got %d) for method %s", - self->method->casting, (casting & ~_NPY_CAST_IS_VIEW), - self->method->name); - Py_DECREF(result_tuple); - return NULL; - } - } - else { + if (self->method->casting != -1) { NPY_CASTING cast = casting & ~_NPY_CAST_IS_VIEW; - if (cast != PyArray_MinCastSafety(cast, self->method->casting)) { + if (self->method->casting != + PyArray_MinCastSafety(cast, self->method->casting)) { PyErr_Format(PyExc_RuntimeError, - "resolve_descriptors cast level did not match stored one " - "(expected %d, got %d) for method %s", - self->method->casting, (casting & ~_NPY_CAST_IS_VIEW), - self->method->name); + "resolve_descriptors cast level did not match stored one. " + "(set level is %d, got %d for method %s)", + self->method->casting, cast, self->method->name); Py_DECREF(result_tuple); return NULL; } + if (!parametric) { + /* + * Non-parametric can only mismatch if it switches from equiv to no + * (e.g. due to byteorder changes). + */ + if (cast != self->method->casting && + self->method->casting != NPY_EQUIV_CASTING) { + PyErr_Format(PyExc_RuntimeError, + "resolve_descriptors cast level changed even though " + "the cast is non-parametric where the only possible " + "change should be from equivalent to no casting. " + "(set level is %d, got %d for method %s)", + self->method->casting, cast, self->method->name); + Py_DECREF(result_tuple); + return NULL; + } + } } return Py_BuildValue("iN", casting, result_tuple); diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 01ee56d1681d..1a962ef788d7 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -358,6 +358,45 @@ PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) } +static NPY_CASTING +_get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, + PyArray_DTypeMeta *dtypes[2], PyArray_Descr *from, PyArray_Descr *to) +{ + PyArray_Descr *descrs[2] = {from, to}; + PyArray_Descr *out_descrs[2]; + + NPY_CASTING casting = castingimpl->resolve_descriptors( + castingimpl, dtypes, descrs, out_descrs); + if (casting < 0) { + return -1; + } + /* The returned descriptors may not match, requiring a second check */ + if (out_descrs[0] != descrs[0]) { + NPY_CASTING from_casting = PyArray_GetCastSafety( + descrs[0], out_descrs[0], NULL); + casting = PyArray_MinCastSafety(casting, from_casting); + if (casting < 0) { + goto finish; + } + } + if (descrs[1] != NULL && out_descrs[1] != descrs[1]) { + NPY_CASTING from_casting = PyArray_GetCastSafety( + descrs[1], out_descrs[1], NULL); + casting = PyArray_MinCastSafety(casting, from_casting); + if (casting < 0) { + goto finish; + } + } + + finish: + Py_DECREF(out_descrs[0]); + Py_DECREF(out_descrs[1]); + /* NPY_NO_CASTING has to be used for (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW) */ + assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW)); + return casting; +} + + /** * Given two dtype instances, find the correct casting safety. * @@ -375,7 +414,6 @@ NPY_NO_EXPORT NPY_CASTING PyArray_GetCastSafety( PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype) { - NPY_CASTING casting; if (to != NULL) { to_dtype = NPY_DTYPE(to); } @@ -389,41 +427,67 @@ PyArray_GetCastSafety( } PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth; - PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype}; - PyArray_Descr *descrs[2] = {from, to}; - PyArray_Descr *out_descrs[2]; - - casting = castingimpl->resolve_descriptors( - castingimpl, dtypes, descrs, out_descrs); + NPY_CASTING casting = _get_cast_safety_from_castingimpl(castingimpl, + dtypes, from, to); Py_DECREF(meth); - if (casting < 0) { + + return casting; +} + + +/** + * Check whether a cast is safe, see also `PyArray_GetCastSafety` for + * a similiar function. Unlike GetCastSafety, this function checks the + * `castingimpl->casting` when available. This allows for two things: + * + * 1. It avoids calling `resolve_descriptors` in some cases. + * 2. Strings need to discover the length, but in some cases we know that the + * cast is valid (assuming the string length is discovered first). + * + * The latter means that a `can_cast` could return True, but the cast fail + * because the parametric type cannot guess the correct output descriptor. + * (I.e. if `object_arr.astype("S")` did _not_ inspect the objects, and the + * user would have to guess the string length.) + * + * @param casting the requested casting safety. + * @param from + * @param to The descriptor to cast to (may be NULL) + * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this + * is ignored). + * @return 0 for an invalid cast, 1 for a valid and -1 for an error. + */ +static int +PyArray_CheckCastSafety(NPY_CASTING casting, + PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype) +{ + if (to != NULL) { + to_dtype = NPY_DTYPE(to); + } + PyObject *meth = PyArray_GetCastingImpl(NPY_DTYPE(from), to_dtype); + if (meth == NULL) { return -1; } - /* The returned descriptors may not match, requiring a second check */ - if (out_descrs[0] != descrs[0]) { - NPY_CASTING from_casting = PyArray_GetCastSafety( - descrs[0], out_descrs[0], NULL); - casting = PyArray_MinCastSafety(casting, from_casting); - if (casting < 0) { - goto finish; - } + if (meth == Py_None) { + Py_DECREF(Py_None); + return -1; } - if (descrs[1] != NULL && out_descrs[1] != descrs[1]) { - NPY_CASTING from_casting = PyArray_GetCastSafety( - descrs[1], out_descrs[1], NULL); - casting = PyArray_MinCastSafety(casting, from_casting); - if (casting < 0) { - goto finish; - } + PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth; + + if (PyArray_MinCastSafety(castingimpl->casting, casting) == casting) { + /* No need to check using `castingimpl.resolve_descriptors()` */ + return 1; } - finish: - Py_DECREF(out_descrs[0]); - Py_DECREF(out_descrs[1]); - /* NPY_NO_CASTING has to be used for (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW) */ - assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW)); - return casting; + PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype}; + NPY_CASTING safety = _get_cast_safety_from_castingimpl(castingimpl, + dtypes, from, to); + Py_DECREF(meth); + /* If casting is the smaller (or equal) safety we match */ + if (safety < 0) { + return -1; + } + return PyArray_MinCastSafety(safety, casting) == casting; } @@ -565,6 +629,8 @@ NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, NPY_CASTING casting) { + PyArray_DTypeMeta *to_dtype = NPY_DTYPE(to); + /* * NOTE: This code supports U and S, this is identical to the code * in `ctors.c` which does not allow these dtypes to be attached @@ -576,21 +642,21 @@ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * TODO: We should grow support for `np.can_cast("d", "S")` being * different from `np.can_cast("d", "S0")` here, at least for * the python side API. + * The `to = NULL` branch, which considers "S0" to be "flexible" + * should probably be deprecated. + * (This logic is duplicated in `PyArray_CanCastArrayTo`) */ - NPY_CASTING safety; if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) { - safety = PyArray_GetCastSafety(from, NULL, NPY_DTYPE(to)); - } - else { - safety = PyArray_GetCastSafety(from, to, NPY_DTYPE(to)); + to = NULL; /* consider mainly S0 and U0 as S and U */ } - if (safety < 0) { + int is_valid = PyArray_CheckCastSafety(casting, from, to, to_dtype); + /* Clear any errors and consider this unsafe (should likely be changed) */ + if (is_valid < 0) { PyErr_Clear(); return 0; } - /* If casting is the smaller (or equal) safety we match */ - return PyArray_MinCastSafety(safety, casting) == casting; + return is_valid; } @@ -610,28 +676,22 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, /* * If the two dtypes are actually references to the same object * or if casting type is forced unsafe then always OK. + * + * TODO: Assuming that unsafe casting always works is not actually correct */ if (scal_type == to || casting == NPY_UNSAFE_CASTING ) { return 1; } - /* NOTE: This is roughly the same code as `PyArray_CanCastTypeTo`: */ - NPY_CASTING safety; - if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) { - safety = PyArray_GetCastSafety(scal_type, NULL, NPY_DTYPE(to)); - } - else { - safety = PyArray_GetCastSafety(scal_type, to, NPY_DTYPE(to)); - } - if (safety < 0) { - PyErr_Clear(); - return 0; - } - safety = PyArray_MinCastSafety(safety, casting); - if (safety == casting) { + int valid = PyArray_CheckCastSafety(casting, scal_type, to, NPY_DTYPE(to)); + if (valid == 1) { /* This is definitely a valid cast. */ return 1; } + if (valid < 0) { + /* Probably must return 0, but just keep trying for now. */ + PyErr_Clear(); + } /* * If the scalar isn't a number, value-based casting cannot kick in and @@ -692,14 +752,29 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, NPY_CASTING casting) { PyArray_Descr *from = PyArray_DESCR(arr); + PyArray_DTypeMeta *to_dtype = NPY_DTYPE(to); - /* If it's a scalar, check the value */ - if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr)) { + /* NOTE, TODO: The same logic as `PyArray_CanCastTypeTo`: */ + if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) { + to = NULL; + } + + /* + * If it's a scalar, check the value. (This only currently matters for + * numeric types and for `to == NULL` it can't be numeric.) + */ + if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) { return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting); } - /* Otherwise, use the standard rules */ - return PyArray_CanCastTypeTo(from, to, casting); + /* Otherwise, use the standard rules (same as `PyArray_CanCastTypeTo`) */ + int is_valid = PyArray_CheckCastSafety(casting, from, to, to_dtype); + /* Clear any errors and consider this unsafe (should likely be changed) */ + if (is_valid < 0) { + PyErr_Clear(); + return 0; + } + return is_valid; } @@ -2122,13 +2197,6 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth) meth->method->name); return -1; } - if ((meth->method->casting & ~_NPY_CAST_IS_VIEW) != NPY_NO_CASTING) { - PyErr_Format(PyExc_TypeError, - "A cast where input and output DType (class) are identical " - "must signal `no-casting`. (method: %s)", - meth->method->name); - return -1; - } if (meth->dtypes[0]->within_dtype_castingimpl != NULL) { PyErr_Format(PyExc_RuntimeError, "A cast was already added for %S -> %S. (method: %s)", @@ -2400,7 +2468,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) /* Find the correct casting level, and special case no-cast */ if (dtypes[0]->kind == dtypes[1]->kind && from_itemsize == to_itemsize) { - spec.casting = NPY_NO_CASTING; + spec.casting = NPY_EQUIV_CASTING; /* When there is no casting (equivalent C-types) use byteswap loops */ slots[0].slot = NPY_METH_resolve_descriptors; @@ -2558,7 +2626,6 @@ cast_to_string_resolve_descriptors( dtypes[1]->type_num == NPY_STRING); return NPY_UNSAFE_CASTING; } - assert(self->casting == NPY_SAFE_CASTING); if (loop_descrs[1]->elsize >= size) { return NPY_SAFE_CASTING; @@ -2600,9 +2667,9 @@ add_other_to_and_from_string_cast( .dtypes = dtypes, .slots = slots, }; - /* Almost everything can be safely cast to string (except unicode) */ + /* Almost everything can be same-kind cast to string (except unicode) */ if (other->type_num != NPY_UNICODE) { - spec.casting = NPY_SAFE_CASTING; + spec.casting = NPY_SAME_KIND_CASTING; /* same-kind if too short */ } else { spec.casting = NPY_UNSAFE_CASTING; @@ -2722,7 +2789,7 @@ PyArray_InitializeStringCasts(void) {0, NULL}}; PyArrayMethod_Spec spec = { .name = "string_to_string_cast", - .casting = NPY_NO_CASTING, + .casting = NPY_UNSAFE_CASTING, .nin = 1, .nout = 1, .flags = (NPY_METH_REQUIRES_PYAPI | @@ -2935,7 +3002,7 @@ PyArray_GetGenericToVoidCastingImpl(void) method->name = "any_to_void_cast"; method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_SAFE_CASTING; + method->casting = -1; method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; method->get_strided_loop = &nonstructured_to_structured_get_loop; @@ -3074,7 +3141,7 @@ PyArray_GetVoidToGenericCastingImpl(void) method->name = "void_to_any_cast"; method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_UNSAFE_CASTING; + method->casting = -1; method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; method->get_strided_loop = &structured_to_nonstructured_get_loop; @@ -3306,7 +3373,7 @@ PyArray_InitializeVoidToVoidCast(void) {0, NULL}}; PyArrayMethod_Spec spec = { .name = "void_to_void_cast", - .casting = NPY_NO_CASTING, + .casting = -1, /* may not cast at all */ .nin = 1, .nout = 1, .flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_SUPPORTS_UNALIGNED, diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index fdf4c083955d..48dc153406aa 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -3952,7 +3952,6 @@ time_to_string_resolve_descriptors( return -1; } - assert(self->casting == NPY_UNSAFE_CASTING); return NPY_UNSAFE_CASTING; } @@ -4059,7 +4058,7 @@ PyArray_InitializeDatetimeCasts() .name = "datetime_casts", .nin = 1, .nout = 1, - .casting = NPY_NO_CASTING, + .casting = NPY_UNSAFE_CASTING, .flags = NPY_METH_SUPPORTS_UNALIGNED, .slots = slots, .dtypes = dtypes, diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py index 9e99e0bc3a06..291cdae8908f 100644 --- a/numpy/core/tests/test_api.py +++ b/numpy/core/tests/test_api.py @@ -281,6 +281,19 @@ class MyNDArray(np.ndarray): a = np.array(1000, dtype='i4') assert_raises(TypeError, a.astype, 'U1', casting='safe') +@pytest.mark.parametrize("dt", ["S", "U"]) +def test_array_astype_to_string_discovery_empty(dt): + # See also gh-19085 + arr = np.array([""], dtype=object) + # Note, the itemsize is the `0 -> 1` logic, which should change. + # The important part the test is rather that it does not error. + assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize + + # check the same thing for `np.can_cast` (since it accepts arrays) + assert np.can_cast(arr, dt, casting="unsafe") + assert not np.can_cast(arr, dt, casting="same_kind") + # as well as for the object as a descriptor: + assert np.can_cast("O", dt, casting="unsafe") @pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"]) def test_array_astype_to_void(dt): From 5567fa98069f412e7b69cd0c1088329145e1c635 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 4 Jun 2021 10:55:33 -0600 Subject: [PATCH 1248/1270] DOC: Prepare for NumPy 1.20.0rc2 release. --- .mailmap | 1 + doc/changelog/1.21.0-changelog.rst | 17 +++++++++++++++-- doc/source/release/1.21.0-notes.rst | 3 ++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/.mailmap b/.mailmap index 3fb20e3ce46b..3f45904fcd8b 100644 --- a/.mailmap +++ b/.mailmap @@ -192,6 +192,7 @@ Helder Oliveira Hemil Desai Hiroyuki V. Yamazaki Hugo van Kemenade +I-Shen Leong Inessa Pawson Irvin Probst Isabela Presedo-Floyd diff --git a/doc/changelog/1.21.0-changelog.rst b/doc/changelog/1.21.0-changelog.rst index a0891b232120..b9ccd748ea38 100644 --- a/doc/changelog/1.21.0-changelog.rst +++ b/doc/changelog/1.21.0-changelog.rst @@ -2,7 +2,7 @@ Contributors ============ -A total of 171 people contributed to this release. People with a "+" by their +A total of 173 people contributed to this release. People with a "+" by their names contributed a patch for the first time. * @8bitmp3 + @@ -79,6 +79,7 @@ names contributed a patch for the first time. * Greg Lucas + * Hollow Man + * Holly Corbett + +* I-Shen Leong + * Inessa Pawson * Isabela Presedo-Floyd * Ismael Jimenez + @@ -90,6 +91,7 @@ names contributed a patch for the first time. * Jody Klymak + * Joseph Fox-Rabinovitz * Jérome Eertmans + +* Jérôme Kieffer + * Kamil Choudhury + * Kasia Leszek + * Keller Meier + @@ -180,7 +182,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 561 pull requests were merged for this release. +A total of 571 pull requests were merged for this release. * `#13578 `__: DEP: Deprecate `data_type.dtype` if attribute is not already... * `#15269 `__: ENH: Implement faster keyword argument parsing capable of ``METH_FASTCALL`` @@ -743,3 +745,14 @@ A total of 561 pull requests were merged for this release. * `#19073 `__: REL: Prepare 1.21.0 release * `#19074 `__: BUG: Fix compile-time test of POPCNT * `#19075 `__: BUG: Fix test_numpy_version. +* `#19094 `__: BUG: Fixed an issue wherein `_GenericAlias.__getitem__` would... +* `#19100 `__: BUG: Linter should only run on pull requests. +* `#19120 `__: BUG: Fix setup.py to work in maintenance branches. +* `#19144 `__: BUG: expose short_version as previously in version.py +* `#19175 `__: API: Delay string and number promotion deprecation/future warning +* `#19178 `__: BUG, SIMD: Fix detect host/native CPU features on ICC at compile-time +* `#19180 `__: BUG: Add -std=c99 to intel icc compiler flags on linux +* `#19193 `__: NEP: Accept NEP 35 as final +* `#19194 `__: MAINT, BUG: Adapt `castingimpl.casting` to denote a minimal level +* `#19197 `__: DOC: Prepare for NumPy 1.20.0rc2 release. + diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst index 754c90aa9283..5b634aa5844e 100644 --- a/doc/source/release/1.21.0-notes.rst +++ b/doc/source/release/1.21.0-notes.rst @@ -7,9 +7,10 @@ The NumPy 1.21.0 release highlights are * continued SIMD work covering more functions and platforms, * initial work on the new dtype infrastructure and casting, +* universal2 wheels for Python 3.8 and Python 3.9 on Mac, * improved documentation, * improved annotations, -* the new ``PCG64DXSM`` bitgenerator for random numbers. +* new ``PCG64DXSM`` bitgenerator for random numbers. In addition there are the usual large number of bug fixes and other improvements. From 4d1442ad896b22516dd4d7029204b2ad3168b1cc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 10 Jun 2021 10:28:25 +0200 Subject: [PATCH 1249/1270] MAINT: Add annotations for the missing `period` parameter to `np.unwrap` --- numpy/lib/function_base.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi index da24ab21dfe7..69c615c9c603 100644 --- a/numpy/lib/function_base.pyi +++ b/numpy/lib/function_base.pyi @@ -30,7 +30,7 @@ def gradient(f, *varargs, axis=..., edge_order=...): ... def diff(a, n=..., axis=..., prepend = ..., append = ...): ... def interp(x, xp, fp, left=..., right=..., period=...): ... def angle(z, deg=...): ... -def unwrap(p, discont = ..., axis=...): ... +def unwrap(p, discont = ..., axis=..., *, period=...): ... def sort_complex(a): ... def trim_zeros(filt, trim=...): ... def extract(condition, arr): ... From 7a29f548c4fbb12d5ad77f0f2ac0fdcaad367451 Mon Sep 17 00:00:00 2001 From: Bas van Beek <43369155+BvB93@users.noreply.github.com> Date: Thu, 10 Jun 2021 18:10:27 +0200 Subject: [PATCH 1250/1270] MAINT: Add `complex` as allowed type for the `np.complexfloating` constructor --- numpy/__init__.pyi | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ac37eb8ad409..a3722cd1ccb0 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3042,11 +3042,24 @@ class datetime64(generic): if sys.version_info >= (3, 8): _IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex] _FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex] - _ComplexValue = Union[None, _CharLike_co, SupportsFloat, SupportsComplex, SupportsIndex] + _ComplexValue = Union[ + None, + _CharLike_co, + SupportsFloat, + SupportsComplex, + SupportsIndex, + complex, # `complex` is not a subtype of `SupportsComplex` + ] else: _IntValue = Union[SupportsInt, _CharLike_co] _FloatValue = Union[None, _CharLike_co, SupportsFloat] - _ComplexValue = Union[None, _CharLike_co, SupportsFloat, SupportsComplex] + _ComplexValue = Union[ + None, + _CharLike_co, + SupportsFloat, + SupportsComplex, + complex, + ] class integer(number[_NBit1]): # type: ignore # NOTE: `__index__` is technically defined in the bottom-most From 2def753bb508369b7de17d7e56a0d7befe996c26 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 9 Jun 2021 09:57:27 -0700 Subject: [PATCH 1251/1270] TST: Ignore exp FP exceptions test for glibc ver < 2.17 --- numpy/core/tests/test_umath.py | 35 +++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 9d1b13b53a86..44e59be742b4 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -4,6 +4,7 @@ import itertools import pytest import sys +import os from fractions import Fraction from functools import reduce @@ -17,6 +18,15 @@ _gen_alignment_data, assert_array_almost_equal_nulp, assert_warns ) +def get_glibc_version(): + ver = 0.0 + try: + ver = float(os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]) + except: + print("Couldn't capture GLIBC version") + + return ver + def on_powerpc(): """ True if we are running on a Power PC platform.""" return platform.processor() == 'powerpc' or \ @@ -986,17 +996,20 @@ def test_exp_values(self): yf = np.array(y, dtype=dt) assert_equal(np.exp(yf), xf) - with np.errstate(over='raise'): - assert_raises(FloatingPointError, np.exp, np.float32(100.)) - assert_raises(FloatingPointError, np.exp, np.float32(1E19)) - assert_raises(FloatingPointError, np.exp, np.float64(800.)) - assert_raises(FloatingPointError, np.exp, np.float64(1E19)) - - with np.errstate(under='raise'): - assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) - assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) - assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) - assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + # Older version of glibc may not raise the correct FP exceptions + # See: https://github.com/numpy/numpy/issues/19192 + if (get_glibc_version() >= 2.17): + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.exp, np.float32(100.)) + assert_raises(FloatingPointError, np.exp, np.float32(1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(800.)) + assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) def test_log_values(self): with np.errstate(all='ignore'): From 45a322c2a908facb34e2d54d425d9e7977c6fda4 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 9 Jun 2021 12:45:50 -0700 Subject: [PATCH 1252/1270] BUG: Do not use bare except --- numpy/core/tests/test_umath.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 44e59be742b4..66e75098aa76 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -22,8 +22,8 @@ def get_glibc_version(): ver = 0.0 try: ver = float(os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]) - except: - print("Couldn't capture GLIBC version") + except Exception as inst: + print(inst) return ver From 9ad1ccb59c9ebfe29aaae7803d31be981966d9c3 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Thu, 10 Jun 2021 20:47:54 -0700 Subject: [PATCH 1253/1270] TST: Use conditional xfail --- numpy/core/tests/test_umath.py | 38 ++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 66e75098aa76..8fa2d22a4b76 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -19,14 +19,19 @@ ) def get_glibc_version(): - ver = 0.0 + ver = '0.0' try: - ver = float(os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]) + ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] except Exception as inst: print(inst) return ver +glibcver = get_glibc_version() +glibc_newerthan_2_17 = pytest.mark.xfail( + glibcver != '0.0' and glibcver < '2.17', + reason="Older glibc versions may not raise appropriate FP exceptions") + def on_powerpc(): """ True if we are running on a Power PC platform.""" return platform.processor() == 'powerpc' or \ @@ -996,20 +1001,21 @@ def test_exp_values(self): yf = np.array(y, dtype=dt) assert_equal(np.exp(yf), xf) - # Older version of glibc may not raise the correct FP exceptions - # See: https://github.com/numpy/numpy/issues/19192 - if (get_glibc_version() >= 2.17): - with np.errstate(over='raise'): - assert_raises(FloatingPointError, np.exp, np.float32(100.)) - assert_raises(FloatingPointError, np.exp, np.float32(1E19)) - assert_raises(FloatingPointError, np.exp, np.float64(800.)) - assert_raises(FloatingPointError, np.exp, np.float64(1E19)) - - with np.errstate(under='raise'): - assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) - assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) - assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) - assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + # Older version of glibc may not raise the correct FP exceptions + # See: https://github.com/numpy/numpy/issues/19192 + @glibc_newerthan_2_17 + def test_exp_exceptions(self): + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.exp, np.float32(100.)) + assert_raises(FloatingPointError, np.exp, np.float32(1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(800.)) + assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) def test_log_values(self): with np.errstate(all='ignore'): From 58805d7f8b481d40bcac0d1cf565fafd219df192 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Thu, 10 Jun 2021 20:54:55 -0700 Subject: [PATCH 1254/1270] TST: 2 blank lines after class def --- numpy/core/tests/test_umath.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 8fa2d22a4b76..555ed24b559d 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -27,6 +27,7 @@ def get_glibc_version(): return ver + glibcver = get_glibc_version() glibc_newerthan_2_17 = pytest.mark.xfail( glibcver != '0.0' and glibcver < '2.17', From 1342b96edfec6cd276d6d6dc61df1a113cf1d55d Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Fri, 11 Jun 2021 20:55:50 -0700 Subject: [PATCH 1255/1270] TST: Getting rid of print --- numpy/core/tests/test_umath.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 555ed24b559d..2378b11e95a4 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -19,11 +19,10 @@ ) def get_glibc_version(): - ver = '0.0' try: ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] except Exception as inst: - print(inst) + ver = '0.0' return ver From 2aab9c8df5335ec850a1e376d31e680cda6f6f59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Thu, 27 May 2021 06:52:05 -0300 Subject: [PATCH 1256/1270] MAINT: replace imgmath with mathjax for docs (#19036) --- .gitmodules | 3 +++ doc/source/_static/scipy-mathjax | 1 + doc/source/conf.py | 6 +++--- 3 files changed, 7 insertions(+), 3 deletions(-) create mode 160000 doc/source/_static/scipy-mathjax diff --git a/.gitmodules b/.gitmodules index e69de29bb2d1..0d6857868837 100644 --- a/.gitmodules +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "doc/source/_static/scipy-mathjax"] + path = doc/source/_static/scipy-mathjax + url = https://github.com/scipy/scipy-mathjax.git diff --git a/doc/source/_static/scipy-mathjax b/doc/source/_static/scipy-mathjax new file mode 160000 index 000000000000..3d21c58225c0 --- /dev/null +++ b/doc/source/_static/scipy-mathjax @@ -0,0 +1 @@ +Subproject commit 3d21c58225c09243d5a088b1557654d280925e02 diff --git a/doc/source/conf.py b/doc/source/conf.py index bf2fbdce9623..5ba7f70b8ff5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -83,11 +83,9 @@ class PyTypeObject(ctypes.Structure): 'matplotlib.sphinxext.plot_directive', 'IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive', - 'sphinx.ext.imgmath', + 'sphinx.ext.mathjax', ] -imgmath_image_format = 'svg' - # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -188,6 +186,8 @@ def setup(app): pngmath_use_preview = True pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent'] +mathjax_path = "scipy-mathjax/MathJax.js?config=scipy-mathjax" + plot_html_show_formats = False plot_html_show_source_link = False From 1befa1bc58114901b9d9faa3c4a1654389688e13 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Tue, 8 Jun 2021 20:14:26 -0400 Subject: [PATCH 1257/1270] Fix out-of-bounds array access in convert_datetime_divisor_to_multiple(). If the NPY_DATETIMEUNIT is >= NPY_FR_s, the _multiples_table lookup will be out of bounds. It appears the correct logic appears a few lines down and should be used instead in this case. --- numpy/core/src/multiarray/datetime.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 48dc153406aa..b9d81e8368c5 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -950,10 +950,6 @@ convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta, return -1; } - ind = ((int)meta->base - (int)NPY_FR_Y)*2; - totry = _multiples_table[ind]; - baseunit = _multiples_table[ind + 1]; - num = 3; if (meta->base == NPY_FR_W) { num = 4; @@ -962,6 +958,7 @@ convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta, num = 2; } if (meta->base >= NPY_FR_s) { + /* _multiplies_table only has entries up to NPY_FR_s */ ind = ((int)NPY_FR_s - (int)NPY_FR_Y)*2; totry = _multiples_table[ind]; baseunit = _multiples_table[ind + 1]; @@ -974,6 +971,11 @@ convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta, num = 0; } } + else { + ind = ((int)meta->base - (int)NPY_FR_Y)*2; + totry = _multiples_table[ind]; + baseunit = _multiples_table[ind + 1]; + } for (i = 0; i < num; i++) { q = totry[i] / den; From d336c26e394b90b3a5c7d8b4de21acc42d121e63 Mon Sep 17 00:00:00 2001 From: Kenichi Maehashi Date: Thu, 10 Jun 2021 12:02:15 +0000 Subject: [PATCH 1258/1270] ENH: support major version larger than 9 in numpy.lib.NumpyVersion --- numpy/lib/_version.py | 2 +- numpy/lib/tests/test__version.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index 00e00e9a78ad..145c54855d69 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -54,7 +54,7 @@ class NumpyVersion(): def __init__(self, vstring): self.vstring = vstring - ver_main = re.match(r'\d\.\d+\.\d+', vstring) + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) if not ver_main: raise ValueError("Not a valid numpy version string") diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py index 1825046316a9..e6d41ad93932 100644 --- a/numpy/lib/tests/test__version.py +++ b/numpy/lib/tests/test__version.py @@ -7,7 +7,7 @@ def test_main_versions(): assert_(NumpyVersion('1.8.0') == '1.8.0') - for ver in ['1.9.0', '2.0.0', '1.8.1']: + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: assert_(NumpyVersion('1.8.0') < ver) for ver in ['1.7.0', '1.7.1', '0.9.9']: From 34c7db0e6b33012f18b4b8e863a4858f3a8faf5d Mon Sep 17 00:00:00 2001 From: Kenichi Maehashi Date: Thu, 10 Jun 2021 12:07:16 +0000 Subject: [PATCH 1259/1270] DOC: fix typo --- numpy/lib/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index 145c54855d69..bfac5f814501 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -15,7 +15,7 @@ class NumpyVersion(): """Parse and compare numpy version strings. NumPy has the following versioning scheme (numbers given are examples; they - can be > 9) in principle): + can be > 9 in principle): - Released version: '1.8.0', '1.8.1', etc. - Alpha: '1.8.0a1', '1.8.0a2', etc. From be642f5e0e615d1ef48aaa8ff09ab784609f68f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Thu, 17 Jun 2021 12:41:40 -0300 Subject: [PATCH 1260/1270] DOC: fix duplicate navbar in development documentation index (#19264) Also update pinned version of sphinx in environment.yml Fixes gh-19028 --- doc/source/dev/index.rst | 17 ----------------- environment.yml | 2 +- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 8843d380a165..aaf9fe4a091c 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -4,23 +4,6 @@ Contributing to NumPy ##################### -.. TODO: this is hidden because there's a bug in the pydata theme that won't render TOC items under headers - -.. toctree:: - :hidden: - - Git Basics - development_environment - development_gitpod - development_workflow - development_advanced_debugging - ../benchmarking - NumPy C style guide - releasing - governance/index - howto-docs - - Not a coder? Not a problem! NumPy is multi-faceted, and we can use a lot of help. These are all activities we'd like to get help with (they're all important, so we list them in alphabetical order): diff --git a/environment.yml b/environment.yml index 5e1d4df235d4..22ce617af4b2 100644 --- a/environment.yml +++ b/environment.yml @@ -21,7 +21,7 @@ dependencies: - mypy=0.812 - typing_extensions # For building docs - - sphinx=3.5.4 + - sphinx=4.0.1 - numpydoc=1.1.0 - ipython - scipy From a831463ba80c17c12aebc6ed78b86fb71eef9289 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 12 Jun 2021 19:38:50 +0530 Subject: [PATCH 1261/1270] BUG: Remove TypeError on invalid dtypes comparison --- numpy/core/src/multiarray/descriptor.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index f0dfac55dee8..3f06acd5822e 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -3228,7 +3228,22 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) { PyArray_Descr *new = _convert_from_any(other, 0); if (new == NULL) { - return NULL; + /* Cannot convert `other` to dtype */ + + PyErr_Clear(); + + switch (cmp_op) { + case Py_LT: + case Py_LE: + case Py_EQ: + case Py_GT: + case Py_GE: + Py_RETURN_FALSE; + case Py_NE: + Py_RETURN_TRUE; + default: + Py_RETURN_NOTIMPLEMENTED; + } } npy_bool ret; From 73245321ce34b1cd36913a29b950ea2f4c44ded6 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Sat, 12 Jun 2021 19:38:59 +0530 Subject: [PATCH 1262/1270] TST: Check invalid dtypes comparison --- numpy/core/tests/test_dtype.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 8a6b7dcd5f95..3a43bba6885c 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -88,6 +88,19 @@ def test_invalid_types(self): assert_raises(TypeError, np.dtype, 'q8') assert_raises(TypeError, np.dtype, 'Q8') + @pytest.mark.parametrize( + ['operator', 'expected'], + [('==', False), ('!=', True), + ('>', False), ('<', False), + ('>=', False), ('<=', False)]) + def test_richcompare_invalid_dtype(self, operator, expected): + # Make sure objects that cannot be converted to valid + # dtypes results in False when compared to valid dtypes. + # Here 7 cannot be converted to dtype. No exceptions should be raised + + assert eval(f"np.dtype(np.int32) {operator} 7") == expected,\ + f"dtype richcompare failed for {operator}" + @pytest.mark.parametrize("dtype", ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64', 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64', From 5f8680970907fb507aeaf2edc74b2aacaf0070a9 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 15 Jun 2021 20:27:41 +0530 Subject: [PATCH 1263/1270] BUG: Return NotImplemented for unrecognized dtypes --- numpy/core/src/multiarray/descriptor.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index 3f06acd5822e..b8b477e5d70f 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -3229,21 +3229,8 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) PyArray_Descr *new = _convert_from_any(other, 0); if (new == NULL) { /* Cannot convert `other` to dtype */ - PyErr_Clear(); - - switch (cmp_op) { - case Py_LT: - case Py_LE: - case Py_EQ: - case Py_GT: - case Py_GE: - Py_RETURN_FALSE; - case Py_NE: - Py_RETURN_TRUE; - default: - Py_RETURN_NOTIMPLEMENTED; - } + Py_RETURN_NOTIMPLEMENTED; } npy_bool ret; From 98d88c6f60fd04ab4d8587f0e13e529bc96a41a5 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 15 Jun 2021 20:28:51 +0530 Subject: [PATCH 1264/1270] TST: Check invalid dtypes for equality | Check TypeError for comparisions --- numpy/core/tests/test_dtype.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 3a43bba6885c..3d15009ea765 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -88,18 +88,23 @@ def test_invalid_types(self): assert_raises(TypeError, np.dtype, 'q8') assert_raises(TypeError, np.dtype, 'Q8') - @pytest.mark.parametrize( - ['operator', 'expected'], - [('==', False), ('!=', True), - ('>', False), ('<', False), - ('>=', False), ('<=', False)]) - def test_richcompare_invalid_dtype(self, operator, expected): + def test_richcompare_invalid_dtype_equality(self): # Make sure objects that cannot be converted to valid - # dtypes results in False when compared to valid dtypes. + # dtypes results in False/True when compared to valid dtypes. # Here 7 cannot be converted to dtype. No exceptions should be raised - assert eval(f"np.dtype(np.int32) {operator} 7") == expected,\ - f"dtype richcompare failed for {operator}" + assert not np.dtype(np.int32) == 7, "dtype richcompare failed for ==" + assert np.dtype(np.int32) != 7, "dtype richcompare failed for !=" + + @pytest.mark.parametrize( + 'operation', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_richcompare_invalid_dtype_comparison(self, operation): + # Make sure TypeError is raised for comparison operators + # for invalid dtypes. Here 7 is an invalid dtype. + + with pytest.raises(TypeError): + operation(np.dtype(np.int32), 7) @pytest.mark.parametrize("dtype", ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64', From d80e4738f781a1d206bbc04a2e863299e5f2e104 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 15 Jun 2021 20:30:33 +0530 Subject: [PATCH 1265/1270] BUG: Removed typing for == and != in dtypes --- numpy/__init__.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a3722cd1ccb0..4ec46aea01a4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1075,8 +1075,6 @@ class dtype(Generic[_DTypeScalar_co]): # literals as of mypy 0.800. Set the return-type to `Any` for now. def __rmul__(self, value: int) -> Any: ... - def __eq__(self, other: DTypeLike) -> bool: ... - def __ne__(self, other: DTypeLike) -> bool: ... def __gt__(self, other: DTypeLike) -> bool: ... def __ge__(self, other: DTypeLike) -> bool: ... def __lt__(self, other: DTypeLike) -> bool: ... From fa5754e8c159a37fcd9345df261cf82821088ea0 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 18 Jun 2021 15:57:17 -0500 Subject: [PATCH 1266/1270] BUG: Add missing DECREF in new path Pretty, harmless reference count leak (the method is a singleton) --- numpy/core/src/multiarray/convert_datatype.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 1a962ef788d7..e7407535e5f7 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -476,6 +476,7 @@ PyArray_CheckCastSafety(NPY_CASTING casting, if (PyArray_MinCastSafety(castingimpl->casting, casting) == casting) { /* No need to check using `castingimpl.resolve_descriptors()` */ + Py_DECREF(meth); return 1; } From 7d25b81025a50cc0368f5727c65e875ca769469a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 18 Jun 2021 16:49:58 -0500 Subject: [PATCH 1267/1270] BUG: Fix refcount leak in ResultType This slightly reorganizes the error path handling (duplicating the freeing for the non-error path). It just seemed a bit clearer. --- numpy/core/src/multiarray/convert_datatype.c | 23 +++++++++++--------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index e7407535e5f7..d197a4bea31e 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -1649,14 +1649,14 @@ PyArray_ResultType( Py_DECREF(all_DTypes[i]); } if (common_dtype == NULL) { - goto finish; + goto error; } if (common_dtype->abstract) { /* (ab)use default descriptor to define a default */ PyArray_Descr *tmp_descr = common_dtype->default_descr(common_dtype); if (tmp_descr == NULL) { - goto finish; + goto error; } Py_INCREF(NPY_DTYPE(tmp_descr)); Py_SETREF(common_dtype, NPY_DTYPE(tmp_descr)); @@ -1689,20 +1689,18 @@ PyArray_ResultType( PyObject *tmp = PyArray_GETITEM( arrs[i-ndtypes], PyArray_BYTES(arrs[i-ndtypes])); if (tmp == NULL) { - Py_SETREF(result, NULL); - goto finish; + goto error; } curr = common_dtype->discover_descr_from_pyobject(common_dtype, tmp); Py_DECREF(tmp); } if (curr == NULL) { - Py_SETREF(result, NULL); - goto finish; + goto error; } Py_SETREF(result, common_dtype->common_instance(result, curr)); Py_DECREF(curr); if (result == NULL) { - goto finish; + goto error; } } } @@ -1723,16 +1721,21 @@ PyArray_ResultType( * Going from error to success should not really happen, but is * probably OK if it does. */ - Py_SETREF(result, NULL); - goto finish; + goto error; } /* Return the old "legacy" result (could warn here if different) */ Py_SETREF(result, legacy_result); } - finish: + Py_DECREF(common_dtype); PyMem_Free(info_on_heap); return result; + + error: + Py_XDECREF(result); + Py_XDECREF(common_dtype); + PyMem_Free(info_on_heap); + return NULL; } From 07d7e72ab6880c05b5fdd98482cf88982e778393 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Jun 2021 09:30:35 -0600 Subject: [PATCH 1268/1270] MAINT: Remove accidentally created directory. The removed directory also contained a release fragment that this commit adds to the 1.21.0-release notes. --- changelog/13578.deprecation.rst | 7 ------- doc/source/release/1.21.0-notes.rst | 10 ++++++++++ 2 files changed, 10 insertions(+), 7 deletions(-) delete mode 100644 changelog/13578.deprecation.rst diff --git a/changelog/13578.deprecation.rst b/changelog/13578.deprecation.rst deleted file mode 100644 index 58ec7e58967f..000000000000 --- a/changelog/13578.deprecation.rst +++ /dev/null @@ -1,7 +0,0 @@ -The ``.dtype`` attribute must return a ``dtype`` ------------------------------------------------- - -A ``DeprecationWarning`` is now given if the ``.dtype`` attribute -of an object passed into ``np.dtype`` or as a ``dtype=obj`` argument -is not a dtype. NumPy will stop attempting to recursively coerce the -result of ``.dtype``. diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst index 5b634aa5844e..7787a38cc5c0 100644 --- a/doc/source/release/1.21.0-notes.rst +++ b/doc/source/release/1.21.0-notes.rst @@ -75,6 +75,16 @@ Expired deprecations Deprecations ============ +The ``.dtype`` attribute must return a ``dtype`` +------------------------------------------------ + +A ``DeprecationWarning`` is now given if the ``.dtype`` attribute +of an object passed into ``np.dtype`` or as a ``dtype=obj`` argument +is not a dtype. NumPy will stop attempting to recursively coerce the +result of ``.dtype``. + +(`gh-13578 `__) + Inexact matches for ``numpy.convolve`` and ``numpy.correlate`` are deprecated ----------------------------------------------------------------------------- From 493b64bfe9c5396498325b87e5e80e1917555c41 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Jun 2021 09:53:37 -0600 Subject: [PATCH 1269/1270] MAINT: Update 1.21.0-changelog.rst --- doc/changelog/1.21.0-changelog.rst | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/doc/changelog/1.21.0-changelog.rst b/doc/changelog/1.21.0-changelog.rst index b9ccd748ea38..947da4da740e 100644 --- a/doc/changelog/1.21.0-changelog.rst +++ b/doc/changelog/1.21.0-changelog.rst @@ -2,7 +2,7 @@ Contributors ============ -A total of 173 people contributed to this release. People with a "+" by their +A total of 175 people contributed to this release. People with a "+" by their names contributed a patch for the first time. * @8bitmp3 + @@ -95,6 +95,7 @@ names contributed a patch for the first time. * Kamil Choudhury + * Kasia Leszek + * Keller Meier + +* Kenichi Maehashi * Kevin Sheppard * Kulin Seth + * Kumud Lakara + @@ -144,6 +145,7 @@ names contributed a patch for the first time. * Panos Mavrogiorgos + * Patrick T. Komiske III + * Pearu Peterson +* Peter Hawkins + * Raghuveer Devulapalli * Ralf Gommers * Raúl Montón Pinillos + @@ -182,7 +184,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 571 pull requests were merged for this release. +A total of 581 pull requests were merged for this release. * `#13578 `__: DEP: Deprecate `data_type.dtype` if attribute is not already... * `#15269 `__: ENH: Implement faster keyword argument parsing capable of ``METH_FASTCALL`` @@ -754,5 +756,14 @@ A total of 571 pull requests were merged for this release. * `#19180 `__: BUG: Add -std=c99 to intel icc compiler flags on linux * `#19193 `__: NEP: Accept NEP 35 as final * `#19194 `__: MAINT, BUG: Adapt `castingimpl.casting` to denote a minimal level -* `#19197 `__: DOC: Prepare for NumPy 1.20.0rc2 release. - +* `#19197 `__: REL: Prepare for NumPy 1.20.0rc2 release. +* `#19213 `__: MAINT: Add annotations for the missing `period` parameter to... +* `#19219 `__: MAINT: Add `complex` as allowed type for the `np.complexfloating`... +* `#19233 `__: TST: Ignore exp FP exceptions test for glibc ver < 2.17 +* `#19238 `__: MAINT: replace imgmath with mathjax for docs +* `#19239 `__: BUG: Fix out-of-bounds access in convert_datetime_divisor_to_multiple +* `#19240 `__: ENH: Support major version larger than 9 in NumpyVersion +* `#19268 `__: DOC: fix duplicate navbar in development documentation index +* `#19269 `__: BUG: Invalid dtypes comparison should not raise TypeError +* `#19280 `__: BUG: Add missing DECREF in new path +* `#19283 `__: REL: Prepare for 1.21.0 release From 34aebc2824cf8c2bdbe19040b82f98f18557c8ba Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Jun 2021 10:12:23 -0600 Subject: [PATCH 1270/1270] MAINT: Update 1.21.0-notes.rst --- doc/source/release/1.21.0-notes.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst index 7787a38cc5c0..270cc32de918 100644 --- a/doc/source/release/1.21.0-notes.rst +++ b/doc/source/release/1.21.0-notes.rst @@ -17,6 +17,16 @@ In addition there are the usual large number of bug fixes and other improvements The Python versions supported for this release are 3.7-3.9. Official support for Python 3.10 will be added when it is released. +.. warning:: + There are unresolved problems compiling NumPy 1.20.0 with gcc-11.1. + + * Optimization level `-O3` results in many incorrect warnings when + running the tests. + * On some hardware NumPY will hang in an infinite loop. + + + + New functions =============

    B-s5JyyQY(R*#s& z(z$%zUwktrPcI*rzy97v%wqo1KbOQcgbYb<2h|Prmv_pqQ^DAIz{m>7cwh>r0~)aza(BeH!o!+yVpj?KW? zM&awK>y!P!i`|#9RJH~R6PpeETdu7P?p|YM{b%Wm*O=q7%K+VfYHvmOBAcZAI6XI2 z$6`}iZ!S$Yw4G3g9ph3f!WoXsT5A8^t@7S7qh#vqZg~$Dk6#EF=!>zqurKsvLuh~s z+@GsA!Gm6FsR1J--N)X3?;Sycl-)iODiG6%s5d2g`%we$FY}+Yg%=7!zxa^C%={Ei z9k|E?Q>f#F*C&&r2LRJ|?Qs?{$ipYle0@~ioJst!`S|sc8Nf>vKo=q+^IpUg-Hvyt zF@N`%II#or!BbHFCE&Q$Mj~s1)#XC?^@Q{Jxo_=vNp2Y&m|(iro z_U27k0a){$Q!g<8!6PL;HUPhpwVDf39NVn^`22kiE)>`6m z*|p}3rh@~5GDqA0$b_&}o?1{5K*LiI9MFe~KJ*^a<#vdZg0t4Eb`F`iqMZtdRkfdS zx8=uxk$X{0O)IVre@;Msg7e}-LzGgM~a?S1}=MexI{3xZk@p>}Q zf5Tj|zAaT!p6$F|Kkzu4?}{THoWJz6S(Ik>dgF4>v*(7F%1u{)UZv1<9CjRS;MXn@ zcJU(o)h>V$c;q@IZALgZoSY(L`O!WvuJWCE#6Wa$jr+5b=Vo(6S2vbz^$vegPrGNW5HsT{4H#c$N?6*bK_4{j$rJ0tPU@3^D}l1qoWh!ZJs zAWz|q+HBy?_bXPLct0qXd_#2(gtB2GEy6vhf)f z@YId8lC+sO_vXdU1xkctQnY+@G{vq`pIC}72y$kVgQK}B=}|8%*OdF*gFs;)$}@h^ zQp~7pkTJ-Hhtn*}?L>gSKT4X=F~Ig~sSxuRFIWL4)Jfc+vY%b3{oR*VGJT&UoWg#T z2M5rwqYC7l1|QD~A z_#vfU1T{u$0?-G{W_Y9T#eNbSN00Dxq0wP93kTc)pySYX@k?8uxsyct?U<5O@c!ZW zwoBW+Yu_LcWBE=ju&k6a2fkelw?Y_b2Pm!5RA&hk$Iq~#;>2-id7!^&Jt@7;lIGa1 zYV2d621`^S0hgQO(83rH;@uEa8&cg4Ou?BtEL^qrn55+CaoaNzqS!Quu{6BDl&5hqOPDA#7` z92Qc8%bns&%Orx*hdcT~BU?LF2NjkL9Fm%l1&9t3|`4!WMBnyME{!30=rh zV$Y%DF!MF7(EnAkA5r1$hP)^kkxl9zpY?eu<0<34g6nDaD51{fs{E7*HNjuX5WLT! zalTZRedW#wR;((p%GdKXFBerju1^VE*#XFNC6zTaao}zXKPkq^kafT>>2SukEqI?~ zdZEkM3g_*kgmH{>BjQb?)eVIMP*MzqA(o9i^aW)*HF6vD#A1Wd-zUN0(G-iI=c}%& ztGUYI-0Eo}mD|ME%M?^6TbP=|H03f$GX;?_A=A8Z6zz4>}L#HO^G#v-EFmk4^w>9 z4@36u^BxyKwG&pp3?(-0m4NTDnTorsO&+q4Aau!xuL$Z{SfC3WcUJi-LmGZBW~vH_ ziRa%*@Wy89$%}dTxeUwi2$5acM4JAgRHh0q5=Nt_Q_jSNmfGw9aDF1OKq5GAa9qU; zq*Tf=IA9_C6oztcJoFQFLzi#!m|X}2gEi|E|i9F7ur~7@f(UrE%7GA4*?GQXM*5qB0lsz-mmU3 zUYY4Dt(ED3=$)&S>;1ynuz!{6dz!rTLY5!fse^tS0+_;P}Z5IIw7mo#I>9UQDGb(Vom%nWT zvY7>QB6?4Sfj_6CdT8b~^1AJ)OzQaQzI>oa_0f>^0gjfRGB6Pa;D|uphxc8wHy4USR?dW&pXCsA_kc5QWb`U&Ay1F43(-@%XIz%qG0U!T-A?_*R1vI;cY zD=6OCId%Q^oDbZz-ct`Vqz+uYnAzQQR6q4KE0=@QsI$~@;?6_`99kcnVL2({;b*vI6W8m)YGC8Ee7PYLAF3O3q@sb9$m!bj`NZdjZ7)`I9yZeY4a#X!Y#gFb;_nfPHJ2 zxhH*pa3Ovp_X8+21Ji7hYt0M88g5;_sh+zhV_1iWu2u9*h`eEd{WF7js3VE+g-SL^ zF{NihJvT85eQX-E51Uz_dObH?l?DGDk?wWp1RG- za$W4Twy>D1jtjC0rcrD|QDC&4YoLoP8t1*2fji-hS)TiH4bM<|f0ww1um5tuqlc%Y z_@ezxzY6%Ed_ovN^MVeX_Qw^20t}rHh2V`WRggA<_XUy&9hM5KJp$h(eu-T&gUt+) zJim>-dWZ9n`+Q9wL{5lEG2&S9R#ja$TDhfx+sFhZ{{l1Bp4t=_fczW)`X37$Y|(Ao zP1jv*D|0)?vSRv6v>yQ)`bN0?S2XegREyMH62A^n5|D#mda|LOv7)XPcYz8w#bLo3 zCO)NN%?JqAH=o7&|D`(-;44ui&v6nfD)`-sc|a+aJ(&1BxX;moL$9-4!^q%K=RD$Wxmc zzX43w)kc{9waQZ3KNqz-zmenMZvLr0LNEcsnwnJJr*6N!3_LCXqr*t)GKgBLKeFi( zO?fx`6YuB5gjt>*YzTw2hFO4V&{wfjLYGgcC?e4HFEW0J%TR!NOWmsF66P3(JRS~%% zGXb3IW(GoPfUzV6*l~QJqz$pl@-`^`{y~vd%wjkPL$>HPIVT`4q`sNZ|BJ1^0E)7G z-~Vw1MOs2qI;EG87LaZP>0X4Tkx)`VWa*M#Qjt=+LlAUnc4>(Pq(cN$y5WEEJkR?x z-}%jdXPD6$ad+?gy019S<9MC$6Oi{wV6!^=`s{i8QN%MUfx^@;MN$_43JFd+q-zAA z9`?fZv*#IRD5(2TYEj}YSph)uO{0x{e~0w{%&^auz`pIlwt~|)_X$LN>uOZ=Yc2>6fHy(I8|;o0nZ8#NtL;N44GL5oH%Ix~NY@NP5`xc{fpk1sf_g{N zceT}d)OK_oQ+X~*u${HZa`U z-);M&8g^zis~-jm%YVE~q;HrV?;&=FB@mr%VBc%d95lBCjynR?+AlW$Yz7-rf%R2L zojqXWgVNI(L-kK0rQ3~LoRF^}2K(TRCC=7mvwU`x?saC3RbJ23*YyD?!%a_WL7~j( z9&h&UL_G3yd_vW5i~E9}io=86!lc^LP23+~>DiwBT!U#_ri8e?oJvE~P*JOCuc1d; zDU;yRBx_54(m?25fF6@m@UyXZmm)VKsUM#IRFTc}V?%T=TEjw?qk7io8&ol{ELS!r zF3FRXWOBdLjpFMN%>)<76#D!eVn&!&20M1wES-43L&`90K08MvpD zh)*`Sv|K2L@7Z(~^QIPzHg6PVaqAKk=d%3BZ>hTN{3AbT-V?=VcTHSpf$+TibKwU{ zFKxsI%0y$w9pc~=ZzrYwrEJ$%{?@QW^53>1f=%)Bza0ice8D?0Rw5LCG^DZoX)Hgq zb^qz}tMjrb{Xr~qZ0$%au|g8Z#PCjyl(Jmi{F5u6uNucqH6a#A?hMd6%6WJ$K!}ay zi?^tZs_blPXZUaZ0=m|NC_)^vpKhZ;p`n&fCh{T}GLJ=K`ZX$Psy`_@vH!6ste#uM z0L^kv-I4CsV;x;asLbKiGp|tE#|j5+OpnfIEmW6B%F%ZiF8j>Na`E41nLU>DRDAi;}iS3S)`tY9uadeDR?u7lax@iW{TBq1P9`+xT@!$!O{8qRIgz@8NUe}jwz6_-M7FKn`*9?MGJeRS)TtwW9RzcH35v$Mb* zQLWiggpOAC=EJ$QlKjCw7LtX`q|FpBNy;d6$F03jDrzTtb-t&SlQSB7i-~1m#k1V6 zSi-?-i3Oce7SZC={h-OoeJ2eEuH~M}PuG@%?&|OWrWmwf4=%i*ncJ3QrKirgm8A;T zZq*bn>X+;P>yrwd2mIFJeerUx8cfw34e9OW&s>>!jqzT7)Gtel6;sqqkl$G!8Ieuu zXT>?Bq-{tOKuXH`m-zzJ{hOE#xum3i0wC)ns-U%PJk!F_N;kn4(N`nc%+^GkcWy3 ziF{i`Lc)5d9FFkY8rRZ^TjwAzPuyQBJj!KB8!i1MS9?G5ULBZ2mZi&N*`Cnse3N_A zn(8)i_F*$@%sCtvx~1D%3MZA{v3x|tPT~Ol151b8#~J7LDPc+s1c@NBdcS_9DUPis zLM(r75o2nKd$NE3>de_3=r>s4F zx;2n;6_YCd1|_WDFF;WZrHZK3{LiSz$F0;$m%3$xVn$C8+jNw9k;2GeOLiTRlGlRz z)<#%VL#k=3?}!qW<$@<2LZw;}U%c~cPSShDVZ}+-+-h*Uoe7+!ryY_yaaeUm`|pYE zNo*RG`s>OXcZ~ecS(qOVEKzq>`UG7D*@?rvygLW|!2a5UvSG?51~+-YqmZrVkP%v= zNi6yEePq+q7fBNA7&HlNk8%u@4VPy>;_fNmciji&!NSt7QPPU`W?724&uD-B#v!AE zDpyxI)Q%Sj9zC?@(^JgWlZl{{IYZYXvAA%(ryV&>9N(#a&K4>cVWepK`5`6=C(!fb zzGfnq0Y|P)x#MZoM8%s~BemqW56ap~R3OQ=MF-+lH>A88vsKHK4{m{?ktYE1M?5e* zA@}^)pvF<^*|_$$d#!&qAUIcn8Ca+-r|z`=TTlno#E{nFCVQ) zPd7JH%JxBl8WP(@?#N*oKxUG#ZU~py&fCXWL?U&_VJyN$eql#R(7;C;f_Sm)4ZWT%;cB=b(w5Lc0Gw6&Su{Psb zUOw=8Fv^9uL~?e1`6b<68QE(64a?rnV?bu_o7PAiu(cmQUPhAVDO|quj z@k|hjoz~5=Np|yNt=&4E6JK>Lai2Epo3zpIJ;Ct4wWcV|@FK0Gj20=oHgRFNi58k* zz@|kQHb$*QwV12sv@jg)=bN5U_&R{p5li%K6Ykzx)^p36CbE=`9Mx2wxs|3{QCLY7 zd8xr-T8CT|y_&SQ=rXNf(Jma*b`OF2YTKt}a1q93AMW$gFaIfsqnE$-tCiOB(;8E_ z(T(S2Aaf8~$M-gPT18sd^5iUT!OiaX7l9#1X7rC-mdfNAxv`8Lo@wmY_mtEz@0!vf z31UH3gUfIUoec<-<5?LCJ~kX4Nmj2zqZsP&UXm zx{E!E`sjkGCl+Z5>HTnDqI?2-&nq#k?bhmkDR(Gj`B{CEUfP$H&&_Q zu=_GmwqG<$80)=VDN5)X2=S=N{vRvvwqQIBSeu=k37IW%G(177(MB1u-65T*?%fM< zGp|`ajjChz8vUq_RhkIV8O#vWIhB`5!^%~?`*ev0cSnyYX;pId<7my{{ubzy!ax;z zul%U<%*IcR?Qqc7klS7oFNK}0!6SRzD$%C(uhVLo{6XC9T=S0+PI)G%2|A=RnHGLtZvc;Z=TZwn2*c9T8 zjg1RZ`XF36v{ZIR9DPu8??=M$jNMu;risa%2C#-tW)k9l|0M2*e`e?B;N}Wja%b`& zg?W*cBuEyDFn8F0kTerR^$zE$4YCyoZcVdCP5$uMmDEJ!dr4t^ZksYnZ(Nqg&gPqq za%LIxPiBk&wG_!;Fs&a7!lgljRZB+zCQ#i5?)in5{jSN~*k*^6(lat0ZN!-;!^Ui) zGiid;9XS~VM1IqKNh*!!F^0bPDfwiS}=o&wfZwfBR2$yh(=+h@c&2nmQiW`xE+ zRs?(VhqR>$Kg|FEUp+7o4taL-li!znf$)qAccVRdA6(5*59)_CPx$A{ZjdEwU;e#& zJ;_ezDJMx&Fr&%iah&u__vOxG>f>#B8f`Zrz4XxUzgS6v*g{|lJpz%2qMiO2+0Jkp z*)ZSpQMW`$%c?4f+1#XxA@VweDkIYQE!2^*v)>A|+tm@$m`~$6AiQ*!92Tf!_O)@$ z;bUpa??MVSrJGg<8x#{oU^z#&6anuVVMTXw$`I~G^v!E*sULZu1c&jaaCQ3i}D2e{} z)PqN-%EXg5`nDo|imf;vZd~D!RrgR@)#g^@iUb$^slX>sesg|UI0SoKz1DUN`vYNT z8Ln`a8k8Z|Au5KnTT-29l($vU8#_t?5mSxOMzlMnIS5liMXAKN*o;LP4etC9!*e7| zbjPD`^P;v48}BCsrM)aIwdWt?3j`}NqvI!eHv$3W8-bZ6LH2Ei6a)GDw-O=;D7YA;w z3W-Km2L$*twQ-OZvCT?sJb4rH32)+u8E${oeM7MGLtn`wc1GC_l?3H%nJP0I+6|x>~DkTIs+V?QAw8izFno8ePt&Pn70)P=sPP z;2&EZ*Fb1#YUQI!g~t{3MO{_*Amp3Jx%8OM@obq0>?n|)=@TSQzD3qj;G&hv!_Zsd z>|~aFFr`;wyiGwj-^?ui9qCn$?y$(=ZkMqWu&^A^{1h{EXZ1O+Hd0}$tgQiW&HW3-zWC~La)e`-MF5gvmiYsW(brRv&0ZvXI@t&S~nSp5j~n}JWv zNS6vJyJ9Qr8(nc~B`t}>N|Nl-AetP22<6OMR0ObXBpdPtp~}PtYSS%%BlX; zm)@h4*%Rc~;m(-S63~3XVx8el&7Cjbz`;UnE%Z=OqnCSBA4(&WSyxL0W0>1Vdvb)7 z8x}IMpq+7HxsW@ij?Kc>1JoYF5Dp!enncWYP0bZkH$C0jNXU2PuG%96Nw+wHi zee1pEF*Hp_bzUQ{Zx!9V&?;_v((m|ueMP!((17K!GebM6F^gC*LR#!1Qb_Z%m&<75 zsm^fLTtMBdsM`4rwn(|48_?!owOI62&qo9AUWqgy^bFVH+(S{@JG0hv@#m9VJ4{}x z;(ytIUDR}i>+Pb*?>08WG+4UV|2GW4>|iKj(-CW=t&VtZ@>ztq+PU_@%?Os9Om?`R zX9J<`TmHd~x9BugiCjDYe@%8b9C+ENntTH zY!)E1>|7f8bm9rprKwwV(gCUI@Ay#izD2m@AQ94KO*ddl6Kjl=RT;#Y=%W7#dZBS% zj91kTCS6JLcHT7t2)hRhkUC&_^jITtv3fv9^n*tL^BNMG*XDxQDf^#QA7gIEQfwt^ zrX2gcb@4Pt;PaEOFN3g%mGD&|8o0GBnQ*>{iBbMxkxar9#fJ~;n%jKg6Hagm5=(X2 zyab3@+FRq{q(O_C8^wQHI_h(KLgoM&Gxi~Je-KIodxsZ23 z%NDm#cmkG(klpj{mOtg6{k*(sLP%iK>M8NPq8nG=Pv7+m)gZy=ZI7d+$ivHXT z<@?k0>TNMq9&W9et_h2UwhmW^T?)K!zcbWGMuzj#x?c=yqki1ha)O*XtKb0R^pm+w zRU}6Jy_2>dP3^44UAllBnP`27dmV_MP275lAqb;`Z|v$(xrol|=s$Dgh`Q%wGe-Pt zq*w|ffZgM#m`%~ZGa7?L&xOMx4aC#FEj&^za=z1#zG}}A+*yU5p=v!Y2uO>2f!%&U z*_NVY;hM5W-)EuZ&VtGAPi^-Tx^pBIUZ8qv=|wO1r&2tNNQE|kq8mSa=6F0F5kzbA zHhf$NdzWR}Syy$e0jdSgCrs~jn#j~l0^ES_KL!$jQhKuONk^hY*s?&5L#x#l^11D_WLx&x|n_P^N$2ysQ94je8mLE`(WKJ0VM|;Qed4 zB!O}iiRddu+*)QqvytmE7@7>39o*rG%o}FH@hn-$|HET~1F%SO3MX5qM>nI>U0xCO zYaHe$Ji=$StRV%JJmB5C<%?HrMgE(MLfvK8e@sivQu2B3yBqxsl8-c85cRhY#qrF- zsj-*Q9tIX1-`0+NYkF-87QA9s;{gN9;;=(E;&5af+LuB0iWK&=(U>-ih<ls$d@Z zNxA=Ehk*l)^w*tQZ6J=J613i4$Z&4}57=6aTl*hSmnlTz?kx+nNj`&{#5xul6zF}8 zw@B74vWf0SgxyC-SN9d%<(TzMBkbcs(ycvXL1BI9jS9gchuGOy#w2wypjA0TLTYAb za3OPE&9*!4%a7s|G;$A<_@-Y^As8bl)nVRluU<^lS5t^QSKfBV32E}$!d_YoqFA>` zOQmYAEbHBV_@0VUrR0;}p-~+nr*cbbzllRR6`ibRR}vn#(1)a)K>F{Gb4C`1j}@JG z(xQ)bn)M%-5DDN=vLF7?w?Y*hg*6AcTe*#;f0E??s=N<-C-f#j(PrH0hI!kp%N_1D z)0hIhmVPS!PU?(02l>N_$GbBAo};XE4}{)~5@X8;%=yGEW`<>v(@O$%A|QnEa8~~L zdN_nfBgaEp5Fki2TjdRN^wbT5yLHV1dm(WyU(7=T*vvO6_As6k@&WUkTVdm`i4Ek4 z^$zPbe|{`ey-q-VOt86=%cXf-~Yta4Y| zK$AE7V)N9Pi!&p*e0!JFXsY>1E-`yN_V)BgOrCc#6T>HG7)exQ*eqc_zI;=uM<%3Kh&wJ?=P9M1oC0IP1>Ss{%Ye z-skUN=T8m#CTNoiN92c2`?-;HiN1{o`p=rtBz+m3;U>hpeP8lLpe7#SpRjy&B@UnP z?Ec+i6n@>7_EqQQvQrw>lidB9H53G*P~KpzZ%S z)oFP`<~aVtCqp`0DQ4)z9A~mV=DDYU8fAhFil&hukxCkz(@Xvw( z*?02c1KhWKj45VYxEW!|Gm&+EFF*Zj>rqU~mg`GH5y~W5Y_7V)uG(rt{%EtIaZTlp zAHR=(Vs!3Nzl#Qn@_h+Uw0o zF!Llpp6{b7L2;uy#5q|`X8jabn51S=TQ|PmUra8$pY?+-FC0@vZD<0Am(;#4e~KjI zWJY(r6?_oQ;8C){E68d?PO=kc{A+hOQb{uk5&=&c)9`U!`(Z>$G9huBC2n~{*hlqE zqd82KhSI=lzs?)X)K`B>^Smpc8a)lPze!X273)L!@`AfK#um?uAq^vK8hS$T;LvT z4x6RXBk02~@MFy*u~in#v26J^s&*3z2P(>QTSj4bN1d)|h~UL;$w11R?i@S3cFX27 zA(m2V=+9>=?DuW1i;}~CSRrzV7``m!ZVk~K-oLt$>J5%KcBKz3ub(O4Iyk)SAz3?` zyIPI~CAI-TG1$g_ykhXC#=}+v+c_u$69KWGvpRX^gDL*j%l<9W;rmA;)1p|eXb3AC zD;)EBleeWu%>l|J^Y3bSWqE1~~feaGzd1*g@rn#ngE~8Yi zxjziim#*URkP7LpbCJRdL~b&bb>!RJaq|etHpv?~JBXs?K>s*BvYjEY7p#ta6a!J9 zeR`HIZjtME9-e6S)^!>pQD7I2S6fxe`)2=op7NL3&{90t5aow0WIRUD+~<@gwD>C6 zbL^e($ntKwiKle3IRO%o{?Cu)4XJ>U zwRx+iK1s3FYi^$}wklxXL=MoCF8P72wkkXzmuw2^)69GA7zmG}7TZ6G@cF~3u|JoK z7ZJI^QAd@)gTVxAAcb{Mj%d}{x&QdMdw8=YzuoCcvJhNiw&VLdL8p~MTNEj?jmhzA zU7e^r&G3;e87Dcm9KdEw<)dZXcJL9?L}avnF#9#3BBg;d$UWsA+Y}BtO6{`lcF+DI zGV9x5ar>dsohS-L#i9+v*N_&{xcAOBpGu_^b3v;3^Qh4+dvcQJSvUAT{lMLnhZiX# ze&l>8cYB*dCf!Yu!Jg1WIfk2P5&V(UyKmNXE;H99fb`0%Eo_8@`2K#_-q-JOQun+Q zkZGLq=nrwN=HhmNkFCz$d``T}j%MI~XE4+N@!Gi_-m{*ktC+!O^G9g#hzV~Q-O`w@1wB-BxE@?JOXK3z~w-^!y5OToK+z9>y8C@~95yhL@h-_vsyMMC2LB&t2K zHJdKh)2U09vUCMRM+Pti;M&LE6lOvj{=N1}<@W>O({3k~5QH-@FOc@V=wx7n*vl+Q z`Y}9=yI0hc{kUqYaeauY?Q$@nGzD^B=(CVq@^U6L`6@-%jX6n7um1QJaa6R76QlT| zMt{Ya06K!$X(`o={Dm8}duwODW|VC{g4${7vSVK!VSs({RZSkDTXg)J_(5g(`aUVl zb|!~d`0E3S$%eBM&W^Sk;}gT!+6PuNkz`$*mK@FVB(TP8cM_bbFwMXCt8Wyp96KUs zmuJNrNsdg@ZqP*=4c`daa;8yyuubTY*EOw&BeOrOz`C-6t8Ptg94NGNW8Yq?U?j)B zBe+#L|5dZVLh~1yIuLQTs?j>VS!^|;Ka@`qb-{K9vCsLqPU$bzh%4Nr?vII=6$b9M zH6p9?B`S-oqDaIe6pszdHwNDvY~-9sW$OEov_6~1A<-eK+7A-9SJ2gZb=yp8W&?G7tC2rHCXP8$E(EU#imet0afd;MXB#SlrRpn0rAThjs~diWksX2m?8gf z5NV^_ZUYa3@#fOg6T+^S?Wqd6Iyx5S0|(E#%!;-gWvBg=)Ad|HM5M*u;%!}Vy&ei*8yzN4N?D-5OUF(Iqt@*U|7h#QN>*$&CG?~Z z3~#%yA8iGgp_Y7QO@JHnKMw%Nm@j_xFdp!iuq;FEJhZ=sMQpOL{CU>u=|V#KmLk3e z2lyDZD1GGUzJ+RxrR}U+L}4l6vRCm$Qy@YMzdng(Q7*w)C2~;?*o->Ll#Ck8Ba{#X zO1agmO}&&4FrJ@m^9L-jVF1C)EF8!~24iH(54?tXq;5??&M1!~71>*W7xDUT|6?A5 zOHj{CH0$tgJz+BuIaS%+deSsVc5mHOP%ZhXTKB-WUhjeWYGGW+c0~$cY=Azw9bGpq z{PC8o-UHhPRFr&ew2dDl6xk;mDIif>sd>qI9oaP&QbBIrheL!HeyJm>S6M);9vDgM zYOZUQ+?XjLJTJg@qylEHe~rV^pS?Wa8BgW)Ze?p0ao>2E_FjB4S}ikMU5Meh=g1F4 zwr(AdmZo^cLP9NjZ`EDD@!@lx+g0q@NfJ!0;p2l0LJ4RUNal%GrSS!Yqqn+#pYV}W zc-Ls#^U^){L7ITpRZdS3^?nvUkSrk!#FW~W`-}$#KUwW|z71M5-#9wBwRQfnB~9vL z)lI1K#o!)Ao_QO_Ytgqka6A+x&^{Zs)cmUP@SALV@tYexkCN6=^9C6B{DiaT(?@SE zKF@YAb+1Mzy9V2W`#HOxMDHg3`$icmTjAOD+<2DIQhCEZ-j~K5CJzo$z#;b~_WyAe ziVnJ;j&&%)P-DxdnhaZr`dw^^o&g_@mz3Cb+K)@T&i~qJbBp zFz&aY98__Ve`4l&`xnTZDp}SA+Ihx3jw_D7E2e3p&7T=9HtUYft>MXImI&gDLV+p$ zEpJ!6vmwcZKB$X+qc$N^x8Q@f!B9aY=6S0If-i)jb{?=U=M0#0aRWlp3f%%paYbgP;L;P@R88~Au@oK+-{vAW05nzZ?4CU1d zCuag@)A{ag;DGx4Rr3SbQM2}yKK~*t%sL6EB;ppeI0 z1OX#3>$F{LKYvQviK8YwY`Wb5Sm0zc_>qJtB8R8fj0NTj=ywai4gDoMQx0kxr&AO< zJXx?~aVUpEMaMN=;VvVW&wKxrYTz643DH~m4eSOvos3>+!Ng~GuJH^VX#XD?oeRCM z+DcGyF5p@u>-){dN`z$7&zs|`^W6XVj#_Z{K+uOB8V+oK^7gIlmeR_vv0G z^_Is=oqb9-+Lmvj>KEy95aMedX`2C`(yrH}y<;3JCi32G`!aFvN3c??T6~v#I^v7B zLc+r_3JB6JkM*fbiG7q7vovKy)8_$tMi)6p381? zI5X!}lEEuypJ%XGNbtJUyzbET>NEb`0S6yRgnmn%=AY*=`oq8`P63~ONgXaFkLU}y zQ0bP^1f|VrmDlMS2#|jNE;$R38Fn&yzvT{QNwLg#?rpWeF)j~xTD?oh8}mXVdj@=7 zig7jW6XvFlQ(>+bt|!tG^NINB=`PO z^7`_&`eHgFcOIvkkFlel1K?`Mm|N4{M!J;>qTT(}&Y1*SftdkIAFrOK8n~21swXPe zhBv(;CG8YCwwTeq<4s;JSG^3xTxd-(hAQ$yuL%PQk&;deZ~kg&`B^M1vv|*lbkY!!^?0#(DsA4Ht@x96<677#|l4JmhD(tzTd0L_+TMHv;Tj5 zuZF|Oc|}X+KU`GGAp7@nI_L~5t9Z|zsA9>MLX^&xshzIMm|K3>P2wJz?cTUbgo$0TAA!h zFGaGhI3`i4;r`}UNN!Fs=sK_-b%|@Q;WOkNr4xkKzvN5QAm+I_Uv*thPlKobXy})~15w49t(tagUKTGMSg+QQ3M1F@x^$R8Q6bl^^lPY78Ds6CeuOR;f{t4g+Q@7vc3|XwK42OjEAR$3*nd2vs#1hxK zt(aTS-hUYLzrJHp!albL^9CNb31FS{!@1LCysrsr*reLPpyGuA=~~SS>s%atqmQ=t zi+HznZAx4V*UI3SI4G)(rUQPG8}NIZVYDqb8#4B7&Avs?@>O4J2H0E2K^TL_%ikR@ z`c2MVOY87*(Ovq{)=}OhR1E$lXFMQgcWQa7-zrD zD@)5I+~1K^ZWk*%1bY4?Yr5aC+GaL{Q%TkCN^w^;EAfSFfRUKVmf; zRyzKgs5V=LBOWemLwNehvJcvg!iQ>SiHDJCBrDXe{7XqEf)?4N}b&^-YiL00dPTg8SK zdT*UnWs~84Nr&j4lFxdZip}>luAY|XCft7M9B1-benRYyCleN}nCjvAgFtbiLa~O~ zVTe7%t!_)U(oc8w7K<%T$ku6{`So%s0T_kcn4m-&>Y9Fyh7+79dGCixxkyD;q2((z$vaQ2Cr_{ zghvCheg>+D4vY}#w^?~aoH$lN`s(Bh0BR5r?4q&HCgqP1jE-9KPPz@xMzN!pS_Unm z2zmwO5o$iDN1DC01SXuByRa(?C1c_(9*rdjGI`(S6x_X(nunJFcwWFyZL$x6D!Y;M zM^sZJ<0M3{`I^v5H0QOndMh>EBW?6-sec$l1U+2+=nZ^mP)ah`;YUHEBEgV;Q(3Y@ z?kHPNUb_2hUu$y^iRD>gU9}37I07iuaF$grSSNob)M!xQo8)1A|wrS~LHNX;eQw|Vj zXIk%V&}*ZY!7ohG%JZyn<*_5OCTpM*IuE}3b5x($AhjGToFMi4PUcy455v`0nyLEf z{eB)QP~O{`Hi-cIuOty+myTJm4R=Ab`}I9Uiak0m_*?f!fZ|lenO%_wj(Nfkar=vd z_Tc({qEd`%C7RhXJM)Pa^+XmFDM`g)V@lZI5@}y|!W1+|R1jzVNmEr-pf}2(MC>P+ zMk?)}gW&xUm0+N^XcI}NTZn9 z&c1j}th^{zLl8aD%{GBjyAn>Ly$FRS^f=qAz)|X793_13{|}B5w*D?ioej;=_>t~F z84|U$kLn~treW$6AwK#w>1eK0mu+N?2U!DQUW<6`4U=j^jdLX`3*I$sEX}8zoyUuG zoaon_ldJ1FK9?o`WOrsB{z_BHMMejo3@0OP=8nH=941sW$IVv?et1rtS|y{P6~L@P5DCnap8?C@xW^0P%gp(Ii> z$|w(qJPTLmM+K1$Klsgxeq?tbWX5Ah%tuFjG^t-}P>R?ysm}h4os@IN8b{EG_~Wffw+A5uO3ODWhyPRr z@O6LHFI!$Kc~IDqWE}xXm!*zUFjPSJC2}D(FTi>u=*i!JIrv^a%f^saq|Id}DNF1( z&X9QD2Rcm6iN1+KdpbTtXs`I1gdeb=cOMUS*{`O)V_5u(G3~9|mDEOzvk|{+nfNZk z>8z3gaFni$u!L%n8ZzS$Eksut<$skK8*?;GG$&`a+&0&$?2_FT>GRIlG#?V;K=1iX z8eU--ZHk5O^?SB9|-3OqWsn6a43JAR$bmOA!%Y@+Ze52P0gDbKsls6$*>YhTz2@5Ox8zeh z(wJ|;!EAfIPlTeDL3GIrjR02t7O={-67t?8U32M*h0K&T{ZlGIN(VfCltdJDEzGc@ z|Ch!3&Y^d5C$TG;z#_qlq}(O)pe8tyNVo)aJ90q+0ss1EN5~pg4F$pw?%rsjh8m*s zl{_?a*Pn_%!6O#VsHpoDpq zbx3Wqv>$#GchSgFm-zl4?!+Huz;f}A#F8?y8`)Fora4vPJ~DOO6Yq-&p7NaBpt7~+ zVigSp4XnhyJ^?Wt{ka&pn%s%*<9R*pvmCe<;J^$4&YErW&RMHl|Gqu1p4_Xl^jg_k zIWmG{M!}aj_%AQL{3-@k1>-0yQ*4pDCbl)EU#_EL;6+-p1;N$%E!p3lu{ZzYNeLUe z{;t=x993-f4{Gt+UKbeZ())&Qk2MBFWyP-L`v0yS3sUn?W97cU1SyBgsMJ#HG-0=i zV=p|J$!4)KaNC(by@{XmeY~~bq=nB*!_&z0+lsqe6jrm%(Y%t0#{O(Gz?%8bm#4y< z=oI`tdnCuuktS$cv-;}A@LJQ^?!mVV3?!K^f=`x+K6PsgbS0?)G2V^z14E9GYN%z6 ztp^aPX#P*6(nSCBSuV*Xez#^aZWTm{xkMUM6UDPN^Q#2X`Rra{@~g`M%%Vj4-Px`_o%ZT8Ay5AK>(P>u0TbrV4 z^_BDAl?wF+ox8}bcQSyA^t3O0H|gT%vjk*hI@l{*=TjQQTi|1rYN}f`0e>z|h6d#; zE8BiiJ-x_Vtw_BdsBw@XB5Fm@Vp%kqopyne&$Mn7gT9y+>Z+#wEF{5%=ZpI{pnCS! z=Q-?QABnEw82-<_JuVYL;p;>1+GpuY`uK7(56McKVc2Eqn-dfc6&cKE_d$vE*gs!^ z_q8=LGOQ~yVd(`88$ryFWxsjtTMESzz%ZkcSg)$Q7r}y(GqpbWcgTTx0_zEzM2N8f zCy9lli^=^C0A~z5V6UlTa#4q}<@m$IX zzB0a(0oIuu_e-naSD`(YzJ*JF0)L%+32J+?(&aO|?mk_84Jv+!^K2|$>bC=u`&(?t z%|393{enz?7kqhg9XWK*bVN5eN$rV)5Z1wT|H=Yu^>v)J<@e_Z(ACP5{O2hkt~tJD z^jD2tLDrJU6qfW<_Z)M@w2aL=kDJ$GzdJ$OAI=9~U0)3*^dm}?T_Kv+#Fh=s`ev1M zW|`E_Q`%Z8%b8W-a+vV{(9ZvsYgN!$Efc6k^krZz)_l%2mY)YWG4!%Aci5L!L0B0C zJ{f!$Zf0k|0e>0na-uSN1b~s4S)CokfPqE=jIr z7jg#eP|P^77|W$hk_;1~fs)AX!7={KI^jI8nkDx4lhZC;p3Xc>fl$?ECc`li6U8Ix z^`vWD4<|KA))-<|qwV#1g~(udpa1&~HJt?mYD^qLD|ZK;_^$mMC{>6`SGk2cFu`-x zW-3B4J<*qll)0AMubJ$ju7$pR%7~7%{3Ln-j0@tM3P3Xy-T98Qo&q=Vf;<{k=E?2= z`M*bu+W(3@zynY&HXryWippx6Y|Ur>YFW6ljk#(ku{aD;$>N*$go&1mA`}ABmG5R= zjA@Y)niZ#AZUe=!4KO*?T0uCDHi^tJ@b@y~Ra5WF36qE#Hx|Av5Ewn==j;Rhql9Jk zhc8AFf>M-hy3BnS!-_7gj*(O=?>SN&&-#U?$^kGmDddK+x#m)3@Wtkw&(YF%ock`; zs_b^tcIg(sEOFnHiLM2$@>reps>mdn2`k^Vm9jL`$l~TIGz|o5lVHEjBIPOcCv5!F zP4+pL+JSv}+}hEe+Ct;fB)t;q@qES4(b5u&L}Iff20vNR{iC#wAIlr(P8IO><^ulY zbWS)Jz3)k^b8WzI35Y6L$tj2Y zICFl%@v`6fk^WrYxf0ZEtJpF;-TC@j?VqU++@3bs5K8e~M1}?SRkeIMoaR|9X-4d% z(R_3zg$ZLfDvz5q-CZ7Ff1lJt=M{T4Ugek}<3N<~ZT{lNylYJ#M?pOd>rXIs&HVM? zy~jaw_%0{o_KQ-vzPO&ahlres6 zOnM*R(msKEM@2fogARalUVf@;T|)YngB_^ozOk*bH#~eUidN6mAcILM9Pc`QqG*}b z|C*xoJ`dV}fh3TuS-&KO338;TTlzaXy7+s*2ZZ8;vYxj!H+u>S7Gye1HzjMvdQA1E znYl%!y}wz3U%45oODOh0h}`ri_n0|PurFySC=}|?i8q0O+5#UIi8}UimZmq5NG(aU zGOL#=d?%hPB*Axd3g1?@m|uo}3UTRizP}5b$s*U0){UZd!fzbDo5Jv1uqGMy?!~BD z?&mfa%rAK7=coI4x7CTc9e;>G@tT^z4doUP3>{jL)kJj6eg{N~%etDP>E%qSvsCOy zA6tI6ZUUS+L)-1f4gTvaB=p^ATiVTc%2wa}ZN9Dd<>`cLe={9({>7nJ$z>7M^s$$l zm{|7QeSV~F`{o_Gw}bWPym!o2-h})H{A9L8y2bMtTlqZN7k>nuFOi zrX+))U!)uYFGB;AkRrHM*k?>Z_8LsjkSx-Zc!W9tdqHzUxiR8@cr`(51dTIH(NH8JQSA%9*e>=pi;mnnSZMAC9rNxbiWC)usGA%4fWA2<5MnLn#n^ zDU}tnRgRJ*o9k+5lzazKanaNn3rmG#m}y)TV_fl!H{;E+ZMz4!3+Kb;FAp1)0{MKm zdtbcq)Xlfa%bShea>ji!sox^+f^L5BQzvLQc`DULWaZYo2gJC}L`FN_bXk8ZDjY8P zUa_4|Vu3{U6V#73JJzuV(Z+FY#TREpHV6g8onj$`VQn6SNfS1)}m`|4yUc!C|xSL?cI3c4wT9>vrje3bM0 z34>$I|7;f=o3lU)nu$ISBa1#wuj)^3j1@~+SWlrIOgcQzqpMItY;?fB|3a#Rwmh$5 z2xc70UiJ)hKlFdxPLNjZNHv|f0g>~0E2c=lnC8E1bFqzwTcY$B71yj2RU^xvdL%^d)qi@xACn z)0=2q>oCENeIJCC|3+bUQXHRb{p|!lX0*gU4?oz1q+t)bF+7vz-S#7_4Y&0Dc5((9 z#R>Pg<*_jMq;#3o=LD#1FvHFK>~a!nAXfxbVF+ou}pr;NgJe+Iy^>OT)htc{GR-*%1B zI(F_G{D_rxSFhj?3Adm$0o+7RV9q!_d+4O1r~9rE*&3X`G|bh+bTfX>H9p0ZD~JDu z_|*&BG(oxSXoZ(9Errd_ZfRx{C1alkr%lp#AC!OJB?{LdG{9Syu&C^R4m>f{KdX&I zvPNh!8tR#=MI*ylP|6YWv5)`~AjkxQ%1L?GiS9?4m8kR0mv&7;tg>BnkDFHP`rRpR z3#|)zzOczFM1T8lZ2nBLX#Z-i?1*|PXSWQ&F*>jv$(SF=pRWMDTg0iE zt>@WbL>jn<$n)8(KeOkZfL1}2@Cc>7R0>EeTlb#_A9T)NdH&cY?0HSk1RV^OI!qFN z`ST^2mYHG}+Bwoj8qWT~iC>p1z?h@5AJ7@;7~IROOM=X}JxRB5l`9MA;d`!pm$NR{!7FTDjo?d$*jkz{MDpN&Vt*4KN%TnyR-jL_oYF~Pu4oK)ZrcQ3;^-NH#b88ST z*Uzm~t%{U&-0(;`PUfuLc=1Du*c1_(e-m%(MTlzvi}%Ys`i#-%1TRl%*@e^HJaC1x z{nj$J3Y;6xlJ-kJTR(qLoOwrN=%j<{sO5T#(_e!CpCw2DpTMIEM9s zx5ZDw^v+I=AvUxOj@3!O(f1DtCw!}{uR}ThnZH9J=|?XeQB3TWOpheJKBVu_hBcel zkoVDrVLoqs3x6eLfOz)?c!Kk+qM&Q3t{M4|8K3U7M*`a%Q14rfF6v674>v*U+2-vD zvmvP?I)2Qa#GXu*-2#>hbfP5MK7QZhFWk?XWG$c23R+`}t6cqre5}F#Dzm|5*AC1= znw!&vd2%x~xcChJKgQlWp6WOJA1@)YSIElV9FCF9luaRfALlsADkC#QMr6-(?CdR0 zWMr>!kWGn%qhymkez)GA&-eHH{quYL{;EfRbng3g-}iMrujh4L&rAOK;$#{OJJs}! zKrm=z@+*3~I3+xE-><3>=hf~Y0maA)?mpb8&)85YB59Hocb#;gcY0yUIqe_9ZtmfK zD9u#KtkE$|yZ)J3juNSuTj)4DWPeWnKW^$8-3QX{?3s#EIZ0qa!i3FZKCeY^aJbu| zDs&z_y9%pgW9b05t`3Mq#$4tpb}LzqRm@YueVgMd{VxZ&4mmZ?UhgQ^@~^Y#6i6EI zfrMchTrU=L_xb(o>EVVxRjwN}RSmxf-)V?``q7c!(OGmQq+)Af*ybCdvA^ z*4fE`gO1E7hmSz!a}aq({{sAw6-G$#U{7he$>aH{4JZVCy%T+7`fFD9=Hk)Hpc%C3 z8(JQ_hF(d{O!jJaT(6b(^Qib+zH$L;cM?KHixnCLo62C533j_8Xe4i+F}$^!8RB9} zEGReZ>DT5_K{F=wFw)%mvJ!@EyN4P#-*X*_Gfi}2dC=|@C03ar>}(-&B}LG_Z~8e( zZ$QX!Ln{YuCweW7Hu1J{jnAmgzPN)aIkcDpO76Y~|xmd*Dlc=~2^ZQ6{Id@8DJ2V^GUxIkb8o~sRibpfUiMGoN;>?iz z?O1UyR46ktrSve=%qZDu%?=;)RkMTw!;I8l7!PM`FOPF_^KPZZGB;lYqXn$jl(?ke zD%M)URpw)#6U4}Od&vGc-bOi+(Ypm)CG0VQOi{w4go7!u&ot4Zx42P-Bbjh;d)A{V-JsTs*P(+vH@={crb)C7}D_6psw~WS1e&Sh~Xv z`B8e^yjsQNQj*m(L4?LpAD{V=v6*i=IDG9&PS|FW^|HNZEQ*S*wgJCIf&^bBit(v_ zLq<==33eH$PpLPf^aJ8xYRs0j+Yk~NuppdgRh_C~qY8PA59NW9LseSG!(}1^^c*n- z1&z7Z;}rKP4)%Y3`*JVxs$t7C`q|7UyWOB`CyW=%Td?k@(6M^oWCC}BC<2Ta*l6oc zzhFLJqF=cYR!2uin_B$LHkcW%S&z9^Ap(G6_WNty3#QGW;^T?oAQzm+)a}{>zD&1w z$=TS_W4UX+3(*K&3YN=LRJ|)x$te~iFr#ivjN~K2Dckec+8UYj$90zHTEDVU6;@N< zkH-&2dpi*$nQ@d@!)3@dThxbNJ0yCQkD`U_(#$H%KT6?Yw%Z#_MxWkTc51h{`tN$& z#Nv0v=W1_MQ^z(q!V=cRC7|!=$fAl1XJpIPULD&7x;%-S)UJupNSVDxWs$nV>zGVx zpCXZ16e!6!eqZhKs`L%mI?_qJ5hNLqi#FX5CB}~Sk=Aeg8qJeHDhbdeS9m)Wh`~k6 zG(AHQWxwbPM7U6fdnAL*&IK*EG_!7+eDj{t9({%;*Dt5uVR10ky~`vM-B@QM0>6A? zJ;5{VGd&FO{_Sb1BH)mwvHCXwUh2%Q-4{k;HYjJRgD}Xv+qN``$KZg(vB;clT{WU zYN4K^&y6ZTbB2SL`l5ACgIVDvD$c1Cu?V3Vq;)Ph{skH_b?#N2cK1R>2czVW4M0ZL zJnbW81dBt%v)~zU9fugr&13xo=lZJ@kCl7TBx!OUKldtzg^*!W*8;!3{5h+_^N03w zMF}aAy4H5f!ifP`h7aFBc|f5yPLfzWkwgcjsGUUfPO@qd6S!d6|1wL)y)aFx^92nS zOb(eZrp^OjMKM6mI_rdEzeMJ=^Y6Vx-bct6Z)~E(mhu+My&)5=N7)7<|?{1?_^HCN}%6k4NZS;qg8R~-HOwu+9^DRL);F5#sC6`2 z6))>oHCnW*5U>Zl7Rc1Cq(gd(09s_*O4gM>hML$FPY{dU-O>$#4chnEJz90KPFZx0@D-5r#6 zWF?}HJtH{s4#^y2d@JvV$!P#&x%&zzpL?$U-0Zx-rf_^P3;$ij(Y;@#!NS8jh=ten zBje|%crWc09FKOE8*A60Jq)~%UTWVpAp#$AcNDhq=PPF9njIQ@qmR5+sQu;Xq3bfl zy-p3X2yws59gW6E%3!InRXpQHrB?qwOWB-@-TBgyR@5Jo%WHR+UGS~Rdi(^BCc3A& zfRFww{{@&>ZzH%F`M^#pepOpQJUxkaki5KxB0Y&^P=I(eZRL)0#Jk%l6))ohlo;oX8qg}CzB6J?fH6OKHNELgJxey9K+sVlcXgReJZaa$d(fNq1u z)afrl;G%gcbO{yAILju_ZDw5=tS9OZ1HApypXNpFjh7!2UBAPks%;8sY7!E~c>6Sr zti+`G`Xx4mRT{oLG<8n#FvU(jXPN=doFFf!TyxDA4H2ymf+I)sH5LTm$&2B?)`L+F`wPU%*e#`?V&$kk zRJ#^G5fYDz)A2ayIx8Q;kW=mtvfqJPDMBNp&!-G1vC?zioFdY>%{-TObp_%`xd=h$ z{YoVqjWwpebsI&O>@U_%KvZi%-lqx?j0T3yUU{haI`tBS6EzXZ2#OEbkl~1Hii5zo z47Q=(@uMQ@=iJx0b`C2J9zl?WXL-9xLs@m_FG4<8X_GTWftPj@JKHHHN;!=E;-Xph z*?7G(`my&A81e)U`YO2(PvEpm|dT>&cA2Y_ai5<&M(% z;E~a6toYcYC^4Qi^{2$zVGoBQBCT_ZopkrbwRFj$`lfm_4jteXwhM(Ho#8tGRJPg~ zWi!|8Cr!S)-?hbm+)beed)$19h@uRJtg)xV*|D@d7d8rVx@W*(YV>`a#4rJWKPZKv>Oof&uPuUqb*Rke|4#Jo5x8yc?#!08wK@5+hk} z>F)`aB4`pSKn@1kcrE9u-hf=@17JBQI&-ZpVnG4+(@HXeT5KXRplav*w2(sK@Uk>- z2!kK+wuwf6#NtXlL*i+(%lErH+so(`UO4uu&nd)j-)Vn=1}zOd`{lK^bZ?KkN_}-2 z-Wy{V#-z{_+!Vh>T7KfNLE^=wK26fj%<4u8vHGAW(+7LZ#@`-|dvyb#I4y)@ zNuQgX=PU0dGu7<{;Xk>ros1epD;1s~-Bh+rP zH0{i3jAL@+;x9fOHZ!EQbEsa~?R@&;@9ISPs;T4@lYI4+eq&0yo>%vu=1}@%ISY<{ z1D+ZV3`gZV8=TZ@Kgr)#K|Yl_@s-+q8OSYdWn)px#w7H~W~-uf#^05CpsXixG^!=K z9DD?Vb&(5wLfg~BZMPQzK($(BfX}GAKUwgCvwbiD7+hpdv)yld;aamHn9+4)!@F`# zsWMHev?FD_cI-7Z33r3OupxZjw$CtCs9?D1WxO#xK!k%C?g>h`+trfR_h*>xwtimzhH3Nw-=(T?kst^}!I+zl1gGaa_t*&pa^BiIMG>|Ok%SThw zLF}(+GrKleQ1zVhIh3uvmnxxX(IT)ts$J{#vTohg-!|3IMbri0GrD!O<`PXewvYe* zVN}x*b(?en`4v?AeUXpJ97&dCdNQP(?8pE>%{YDi@cU!ynKPEM{EpJ)5x18Y07>UE zgyZ8^W|OsqnHk{-_uuy*n65>Gmai8$Q1_w6 zMgwiJ!OAIGLU6R<#H|H~H0#!&s{+1Y!u__~YwrWIPz2L|2V)djQ(Wj=Q~CV?i(??+ zGoBm%>3?sEXPEX-aqODHJv%#=If)dJ<(GS#<-F6pW0=b$l?B~=v*tM_WjLItEW-)7aWVC|OX2`gMB)pk{+qEz6#vi~^ zkVfiKeLN%KjJ$@yXa%fA*p}^|yIf71Tb!dTL2n!bBrCfz_P9}c9i+FIm|WMBzeTYO z%eB(NE89(uLgYfHrT*s?d{?7AwFv&=$niniW!9u6=egRO;c{m9y@6ugfvY0NAYGJp zpaJsAZz}F=rDeiu0KE*=qb2$IYUWhBJ|x)>hv0kacJI8M?qutdgP<{|l%t!33adYGqt7UL z&7aspKNOJ&gq{1SRw36dy8phV|MB#JPlCMTiwWtQSb+Z9myPEm7VR8t<_$BNsK1UV=4vPjz5fn(rG?yX-1FU+D2qH+F_yf0i#X^Y4p73jREhO=G%kPLex5Zo+Y`?auFwLzVBbXFfq)0bz-7Mxbk2u{I%}E-(+K}5da_B*W#SuQDVuWY)2*`Wz4Uiqk3M0diE;u$6^}n-oa6cl-w0QF+ z#o*?x0R$2&PK_GD#`lo=u;Df~TnzhEg}u0*qd+m}o)-HIH5P@XJ@W-hvFdQvRT(UB z&e;v8zj>z=WCQkXiEw!CbsI+q=mA1?NSPU_|0;v~qz#rv_C{H5ND1nA)_qt7^zUZ^ z_XeTx@Dc<#Nc1;-sh=+En%tfHg>A|c!1(cHv7)cF99wyV~ zq=^8uhFkK)L-ZHNlxp+XBkY=nT%G5)WL+M2e)_MM#w_fKx~GRt%suY`Jw|i(nVhxd zxIwDeLE$oHBwq;PwQ@~Ex8X?DePZRBGtHBH_?`pdK27obp7@+x|o0m<<+A})ym1_(+(!rLf5-J+4~#AiOOQY)ED2?&Y+vvUkgvi3dPz?RL8=2 zUxK1k=nr;gS@%Z%a0HOf&vfFbG>2xynq-2@xr!g2DeL93ru|r&kdqR8dzJ*}OOQzA z&oUWBwDL{=Gg_j{}>UH910I|rFA!Fe(UMShyG+2S-0%l|`a20p3D5YXMX53w{ z6%$D3yy8Xgp3XB;5B0inp#XAwL7u59>ne>sVc0b3n{s40t2>HdJ1%YJ!&4fydTB5* zn%&dtqYKMxs<;2(X%E95@v| z!KZH5en1^;n%2Y{PAdEL?(~Dw-8N$6L+&UUM+mFD@7~%Brj|l%n$L>Rqtamsah?szE|ZawJwDi1(IbvKT)*5J1KE`J84< zU8A2VS(x7e;3!y}z|3gRGM-I?TS^q!WvHpY+=8EGx-XuC7DID#G5Mj_?8a-Dk-BVv z`uWwVEyBL+x0oIoF4{djKCFR&S#! zup){q-k~cy5Se-ZadG_BK8!~XJr<3N?+4&BW z&P?KluJM|lyFL0y?CU@eajgv-6NkqxvMVl7b4J=aTxG3iVlI-chdB|C3`MF9fZnCE zd7tkl6D+?pU8r}3If1-=z`V!E@R`^o8#*6c*2iI8zNJ%}Dy;jh$Vh}wu~ z==Ax3P?xCFj zgN%E|RoXdR?eMN8u4Y?&HbiFA(|o-iZu$&SHYX@+RwxF~t@qna+|Ee8LspZ?8S&4U z5-#stb!CZgHRw}q)P&r>2cf^KRYC`tK#O`DL(j zjX3@O*w6Li82e!v-#r$>iLI}W-%>=a+jANk>=rsw`(>SGr$2aaC)`kj{BuCf(d*MST*gGrIS> zzUDo447Gfo&?c0Lef1|sL|zk4m@lq36WMEY3%XxZiU2kWxak)}?b=zl(;D`j3&_{z zFN!X#i~=f5$JFco-oAl+iZpA@MLY7LR08oa|8dZ`8ni`SXlrS6xgkr25~jGJLVh=Y zIh-v3wfA&pfZ6@OHGs&b>*?FzX6Dcu_Q-(hE-T#XeWyAJDEmytDlx!KfYt)9BXi%kiLO)e3OE4oS`0h=yNkPUigQq-x=F5BU z;IVQIzxLZstR%-*&53E4Dg!4<(CL;3h{GCcY;d7W7oef?UC_!1W~@G_DItoTi=fTs?mt{mCp=3ARKRfVKE zhfo`fh-sZ7oP~|2Cr>Lw$h*&(L;Ovab-%LLq%N-im)zjz6~>H9a_83&ilc*q8WsMU z>}mM}>cf(#5BJv!ua+5D$cG^HvcFU3{EaV3J~A(suN@0tEXodOtg&&KId*v9Q2lkr z=aWNw)XJyK3ev7zmAC(oMcG#{BW8A-0=W3egKvLD z>W%;VOc{fjpOfFNYhAMF|6h#eFj&z4o6-EK-(kq3T$7%jUgj*QQoVk5vY%{?;F*fI z>CsmccHgcPLQ$}tYqeJt8R+a%A`tRHz@co~Ees0^c2=X?}n#qi& z?@A45_cNyi$+_dS>w21Q$5{9mN^I=e5MKtqoy-mWVx1-fvoIq(=zw6vCrnZrgfo2^ zl9ys?8|ujoJY&$2N$~Y5+OuOvWd}4fa702}er3uvCno)Zr9Km(HkGQvTI`$C+l@Wo znC+95cH8mR_?GK(Yx%?Cam`3hvlhtks0~N`f%`(q0QSkCv zc*=7yC!oe~)0UTL+T97dn z6B%sA`XyeN9>^C%zGK?jDCnz8zHtx0X9k-ZA(w*{C6wQueA;R8)8&G=62 zwa4RbAt1}}_{bJ~&3;%)ryLffEXK=CF{OF~w;SFfk*|!gRV7C~J(5_itEat+r?8eR z&r||vPU7Oapi%i+k&V<%tgxB_gY1iT8pn^R1iRPSvYP&)yZ?%Sk9rfk6Vf0VfgC6g zqrf`2+zGc7N;G{2vzX52P84{_l8F7aj_QlDdqOYd$a7ug^2kKla)Opz*_&mUk?h-_ z_0weVF`w-0#Iv<-qwK^5qZw;w=jI-ggJT)NhEp#9E|D(bEYt?~rPB;@z!LDYBTD3y zv@}SOdf*&D!eNW^y+p@s7Dd16+bEfEZL@mhV!PKLCGbN+s6UbSXfHt#Gm^}fnFoY0 zqe{oyn#$MkLE=J`r~b;jlmYe`B4mNs0E~x=Df~z&m>FORGck|=Ww0EGb7~oEeBtOf z;K@VPI1C{D3M7@leX%8v2-oU=?sG{IUZ#I8oxl^00&WQ7T^U}1fS}x(dou$<_hhi@ zb}Dvd#2`GoOtT%(6x%IB$|#`0J_j4$?_j^t=E;GcGH;l=vSvf^;MF-i{fwKy|P((}YNA3MZ` zuec0dJcKzZitEhtYG!r6FAP|R>UPRB6PrQ(XgAx*s)mB_$3S@G3m{%YSo$99H z?N1CeZFzYVLLAzs^SE%&dzzK^LS`1^?Ha19@7v!M4N#H8-n$!E|)I~ z|646H&FaI;yTh+#XB6t;X2r)eSZ+6`D{u*7h1#P!wMVLo6Li$;j=^Mv#vZ&+aj#TGL65WH<2q zJMY95(RCS5YS`)I2`_`Aev)#{pMOT}@>Wforvyyumn}0RU)Ug%ua+eYmL^0aTjG=e zz{x}BX-awU+?h}VR54)ss6pzx$4V7qY`#}UQ6=ek>&WvAWRXlDoDsWI&o2Qr36eUY z?~xIx5YXmPL_?fr!E6lAylmaO^I8wlk*MTfyT+4#N9Y25qTnH zN*BCLb6+i@IN|%;2ze;zoB`oYN~gB^KD|n7WqVr)!ZDHHfANaIJ2LU3zJ6FtR-fFV z@h!|~vZPG}H0NxlsHrSnHhqEp(hdWq8Uv5w<CB7s;BTox9%T|whxj$py(2bB2 zf44uJA>+Q4$v~lS?LuuK(1b&h&sT!x&+rT)|HdtW= zE&GhYX|877`P))33<(&5|MJfbzAlZw3-#h`PCs+T+PLnm>mG73=U;fWnH(^Oe|ylw z8E05#v^@;y&~Mc!2miJ9i@dnsZGp&84Fmk^*;3A5DUVii9>ocg9=9#^CGzT|b=fFI zTug-xUC17(CfZ7G+mHD8N7gFt84x5dTLh3W2Qf>43%Z1$=Y< zwUl7L-@^Ry*`{KXz&6|Yd+q-I;eM^=>V4$nciwQKXII&~qKVS^Qi}D+^*C;Xyw>)3 z@PIOyPyN9I-RsOlL(gv}zrMqV*Fi)s4q*wRD)OJS*^SCcOjbXAx+M;dH&Ca)Cc=M3cqq* zcXfi^4Ak0#p1d;4-K{5SuD?;6#Jc^M#tG%E!?#s$fI^bvcYd9$}1R zD2eg#+si#4_B(yfz)!UfXn6n1F>~F@FrqWJz=O}#h5fx4F_ipq_{PIo(S}4J?`c?g z^Sbjh&K+Aku1{TM-yZp|K91&l+4XGIvhShzMgl{c*j(L$u?G+wEH|%fipZ zDsCqN+D9t>>SIJvLeXs9e1PAwPp|A8Op2Y@H$rhIKKAo0_xR$CNv-$u0Vmb8@=I~| z^IfapF)~Q~tFvOuw0TPi<#_(r_6CH@MSfh`iBA%D0Va(zFM1NxmGN??um87h*wS|k zkoi0H{N~IX0H&6>(@XLTS&hXS9?F(Eb=iq!^fiM-oOTvaFfmKWbnRiRaq}uT+bdGc z{9Ijx;1G0sIh5tUKeJQPJM~8*5v%urfay(@Y9CF>N6;q2|02N+FFEb5>%4H?w6ft^ z?UdE=F6j7J0wV!4%p~LUbrK&@3ID}vH;)R=fhN@aCjIVBAiZC}XjCT>{ejt_nyv*K zCduY+x#^npxUIdaALo`92m-;eEU9~v1 zD813Bb;ZujG#KxhCi*vT)1ZWIqK%?Y>e5EB8Atehttg1%o7QTyI((a&81nSx?95wU z`g}kB@tPtf;-`#utl=woti05-$fC)nLZ>~E@llW&D=jS;>5Lz)4nYTnN#L1XeN3mg z<={wU_xbJ9+V?1zg}*oEBLMdc}{{>M1NHj+hP8!3aHqKh+}#(}-$1 zG*y1`Iwm}b=h$XD`=!>+dF6pQHKcfp1ACes(T@0u*DHKwOv#9Q>oZLJ9ryHFL8_Gi z&l~xo#}72t)qA)LzePw)QF9BL9)4nS2GhB;Qbm$u@y+rNUWw4hUMqGV-O9(K9HrHF zsqZ4@&A6TQQ~s?=#88$cnK~ovF0Ggh9PB1yQoki-15!6Vp%vRgA%w(&JDxMsM41=IK-R;f5i<9OC zRMz{_^arRhzF)=mXw_+L4=ZZ6pq$ZI;r0@z&CB*faSk6V>2`Ho1o+ zxAA9Ios1aXlJV8>B?!#67?GUp$jcuW%1)0Hsnp@p5Hb4$jBGvd%riB1-l4CQd9rxA z*bVi6YLVohdPIO?`>wG_8%K46JF35=tKN5~W(eCFH5P(Lkz-0L=18J}mcPrz*5{_K z^MF8F6BW^VtOUAO7AR;k)kxypQtAeESg75(DYu?Flk7X{aE3Ky{f6=ME$g-L(^_fIlC^)DC$rdGd8bmP7}Hy)PMO>|g%b&dRUsENf~ z@Pe1{hsVJt`iK;PZmF~U9Y4dn4hfkK*ERk6ehn8r-BZqD>C~QP4sz_JMlNB#n#q!X z`igdV_lh>+ia^5G%8qcZsi}M^*v9q>A3F4G3J)pjB>Zo7wBuA|bgI*E4U#3Qj2)Xp z4o_g|SUc(Xoyt!-p)hd^2f8k|`UORL1JnXdw%hm}i&uzYuAY_d86gu6*bf*&Mv3yZPm67zO^<0xi_jAATFbCifPX zMo`_3#@hCYN+0#E+40<}UAh7*bbc!(6!*yC5wAvxs$NkcJGR(3RfV+cfB~1lt(Bk@ zb)Q4wtAMDvIAp&??Vc4JeGp#9Prn4rLX(u5&+61ij7Z&rEmXOYCkIxA00q zsg{*{>k8~)UVg7;6aleX&{{n2-|Pi1onvO4q1}w$uw)5ObR=~#)|Z>bx1`8t>iJ3c zTYA6MPq)&PTv+!(ui&90HPUDDTst8Xbc2pb__8CCbCcuW+(O2QCN4-~=bqRxc-PiG zZHy+39^Kijgbl%p-3T;4T4Ytj6Ua=PEj~1?Rr90sU}0GrQe9l;Cq2JY2?@+LglwdW zZYDZWC`>3|H-ey?ZB>7~1_q^@Jjsc7N$-G466#Km%s~F`Ll?%|5X4&`8o5V=!NX|V z6#Q7Aw@s;6iWO33o2sKpV{%1+l9)$&FcXR+TPJB(bdLxfM-HzTDc{>(=&uUh#f!qhR0 zrY|RzwyFNw?e!28RZBQA#xp-aEzAxNMnrH#8LH8YV67{E(q_`JwsZHxmSjpnGajE| zs(peZ{FftDhPg+owihw9F7dGV>(B3IbU=*})-!6aiZV^`l98#N<8G9|yD1Kd%t(8H znt2+&`Il93zwOe7H}mH5k-&IndE1X^a4r=NNgRN_6N0H4?_se!Q7mK#<$goIN{(Z4 zxgiLw@&;)`^zWuh7bEqd&Jfc)Qfl1xnf;;VfWx}>jXmPskhCr;uIU$3mh@gq*JV#D z30NuZrHnglEo`4|HJXSXlzL46Hm1TYsIt8XTTy=DsIK=J0K>~O#jLU>PR-hSu;+gy z!u&UDnGm98Wm2}kor1n*+weXSBQ`jR_QF~j|P5wHBQ5G67mPzLLk;BLbfXx zY(Gbo;WKKQ{FXGGZ{lzCy%zn$ipr+Bzfy!DV0 zEJXYg5KB`tPIMbx2F=(9HgWZ)CvJY7nwP$7Aj)El>Ejip$xOIl6KWVBBgx({TJ!4!Ub^92*^N6hS9B7 zQ+8|?J@**;t+Q%Ub{3?Fi^!Q=1I6?P;`q~S%IpZs0qfynoH0uMlNOjRxUEZ+e4S$D z_f@-&z4zryazK4&i@8#XhVd@qZc&WD;4#&ICzXB`HuNF&qf(C9mjTJsO2hQ9n#n}f zd(o6J)PNY~fW?^IiGY6;=Z3L**Xo%*{Ly%Jub&bbO2H=b{Xe-%c}(d*paj+~Au|~} z0y!aMRQz692J1svBp+D1@wL>NLdN9*}bJfgORI- z?+4yO9C_s@y6z9HUZplLN*i=6e66o5n^aV08x)7%nMw~lW{cbMsxW<~?^3AZ8Z}NW z+h=ZJokC&fOXBdc`~kITKG3pU_$Sqew$&G-x{0w(@i3>AAbF)h#K+OY!AzAH7U{H_ z&_%*2V9BoFEAZ%^wg(-n7XF7%==UOLNg*<;7W=uVEA4t=Vry&F_cU#4kaBd<+}P^I z2Lkn`kxh9ccznG4=v8fY)BPJo8TmX(hP9M=BcygHpcpL!nNa6m@=o z8r~w(!Q4~OoFwp8PMQ)HW6lG!EmAIR(+DxOvl_Wt3L*1ygU#M4p)oWFVeI`ZM=rpT zh;lxNv+H~nq;b#!Oc-MN=;3;U#WfYE;%VhQ zzT86zufJqxJ|8rsHQ_@=x!{t8Mpp$>XQj$p6uF z;NC4ZYG5LQO-kfB$!H?AUgRQ%ZXq9EIjDGDn17QDCX(o+Q>FKzz7B{Bk5vtZLekmW zQVf4WU`Adth{J`g`z&9i=wbUK)W5jnUTYqR6?^Bon~ig@h5t;x7oO$eQ>6~xjpF_q z$#Co5&RMLYye5h*;8+Iwj2wui-sAu(Z&OWk$ycZp6{<(5>*CAH7rXC%6H4@I5{xsh zFr(w07NO!HK3n_Dxv8*{Avus*rEoBt&FUH=;gxYbku7TV@+7r|O9*E?e~R-T5RJ#B z3<~)RP#VMa?NXf@H^a3L_n#~_gzHYfAe3r+leWQq^F38e(8E~LuZchC>Lb;n?iHvK zW4H@lbx22lF~CWbZl-D^=pj=?#kkoA{g_SlptP_Q)t6pZ-7W54v;VX_!`I<8`o2y$ zd(?%+x+{;MC$8yjW#5@=)!?!8T_uMejS!zb+LQKi_EJiLJI9A36BJ#>d3#xWrHc7_ z4Ce3gjmN!MiW@C>_n(XpY$rs&OxCDttnmd3_F;@Q6c~-NuiLFWht{@?+jjBbqrY5`@Zr9*-#Z-7lS*l3$#m>MK z=p?9`(s5SY8DnCpRM?b*i9Ol6W^64S{a(F@%LGUcXyD2N`PqO3Ew52Uf(<7n?)N=r zgJ}5T;-MEqcQaKfx|Zjj`;W1k_R@(%DLau#<%&e4or(RBg)NB=lf6|t*u$PokA^+5 zi!4j;Hr-`3`_O7aXV~k0{u|*GNz~pq-%+_HWEJh@w##HB_a4@( z-mm3K1-~b*ZGJtzuhlS6p6YWf7jC`23Xy-dtH)Ec)e&b-uOU)D8Ayb=s0@>N6E+hl z7r_7W6CxeC6O1q~S?i1vwikBZhxH8ECJag(#)g<~q+f#-YKzv%$-|5y$o)$vYKhGT zq81OzCLz!ZrP!HvC=-NgC^b)=CXonpQ(O-=Di~tQqh6!%u0!V=ik&I&$O95(_fv~RWa{2S4*&!Bp6f^R>mgjw<-D9FN3A4S#E-s##4m~RAD2|@aE=yd$gSK~& z=TKPKmqpuw^IaW{{3~41RocQxkx~$OpWg@a#+109SkulZ$Yb#_yBY)WnULA5L)y(w z0a7tFQx&k5RWg^lJ{n|lXnUCOqerM~k4e01)ZfTUb@>~Qnl~n5?m+JU5i*YXEva>F z{yQboovEBBVf3TLx2V>acQ3UVyQiv}3G7|!W=QBSrE%;Pm$0Ibg~BP`ve!m^*D}4* z=u(+E$S>BRcdtpn_jjXi{Y#gX7#VGH%m#N;yPKvth6u|re$Rl4*=TWY+D0OSEKiAI zoTAK+aBEL&d!vqWR72j3i8k8t%#06MQHC%o zqxjMBvJYE?x`1}Fyl}Nbce^gN4T0E*j|XluA(|-U@7v6K{i>kX9AbAWh7`H4SD5v zEORrxAcKXm!Zy~0k}FV{3fc#qK*H%21U_Jw(Co%SiQJ!N$4X#TQ1orL{wcPwz1%@F zZTe6Owk&L159ywLIp6tFe(UpQz(r)+e0Kekymgb(65i$Q0E0Gu2lw>@v!!@Mp^fbe#ch(FpxFhXC<)xWo6SCb9 zUTL5o_P;ZQZwi}7Gh5Hd89y>B%lM9xmj6#%>=VKZ?LLAVmGiO$VrpLN)z>o6VQBd} z3f*aOd7z90=8F8-8+a19au?DEG{lIYWbSB$wQD!suB~~=|3eR{0sY^&5;FG+!%BvQ zQ!h!ZY1o*F84NOADAYR%vRSw|0 zg7@`LGOAQJcy@h9(TH#}p(r;Zpc54DBnUi9;di&BZ;D2~5 zYTn~?)m;M1j$NyM=PWpJdF27hX3nF+T@r*B`_C?r$>MJvFKmD26fk|}-=Ee%%45|3 zper06-j?A4P1NIcSW|N6Xpe`)OJ?CSOP4j?bo|>$Wt3#%t@m!vUe6V@o7=bs1&9X< z%ud#vG7w{BTF%C<2ONEYx*JiH1^7*5#y&(JejIMu3l$7JTOtvC_Df$fgn`~Y3`||* z)%DXniuJuT&vsX#U93znLZcZ4?i4Y_dU6SDZ3P%#H$E`!hDMA*tOL*fPFeXmPeP>0 zz#jg|khw2k*8 z?}=%U)bBKcLi%E!d3Q1??4pOUJ|%I|>Ge+d{4d`M*Li>YHplZx?4G{y@CQST$%1|6 zi^=ALIaz;SgM}#QkP??${lT1H>x%p)dZuFj{D|%RV!!o{;=-e>)*X5Z1^LD6M`QOz zPQLzi#K+_nittZxi)Ut# z>*R1THt>)6yjge}k3&X7_mJ%7X?3i>6KApEw&DBX)eK6UY58-;EbwN>iazp;aUZXi zQ{pK6Pg1NEk7xVCvt%}L;hu`?_D{IP8-Ppj{&|#2bLafTtM%{L{Ctj`qa%u@_&Gm% z;I}c}LhQ6S*DFXb$Q=-hved~rBmmRT2=145%^} z!Oxd-kv@QHL+(g@5lCGgNKKbzmY-;AAP{Dh&GoSw`Xzsu%^W>%a6o^6tPJh!gRHk=GxMFCu22wVqvSeywErMHW#|x$pGxHhozDsw4qb6|%D^f2DIAZ)Qh)x_gx*1^ z-8osq$iq?hm0G+aNRtOkg7oQz zLh(n&Gr6q<=9bfbHu9!lF&~9nZqcZ!Z3g}f)%3wdp3HBvO=&fJk5D|{UhVY@Ir3@^ zd=Xo9;Z#(0x)hpft--_a`)0WH_Q>Yk{_LiehV9Jx$_qzCaIWITC>!U!t1&I0B8lMq zguYTE^!st!*fW`(((vr_%q?k``e*5)Rx2ADJ7u-QH(_(Td+7{jS~$;(mQ&-Z#^>Mf zv%HuQ5Q{vB-<)m4ZPP!P`}^5F;S5&-9@<6fjvirnQH9`XKAxj5p%C6!-aD!Ie#JLJ zS+goWr@D)r=H>&+_w z7)T*4Ui8X0Wq#TlCYOIY2!CXFTE!Z$w%Kwh93RkY<@p*l6Y3*5KlR24!4DrK72h7I z71FhtgqaxB)g8Og)+y;ifrOePA_sIJe!SZ!g!BfKccSl#b9OtKvP#$jCOQLqC=%k7 z)Yj<|qLKIoQynt1D*E#T4F9f&M%Kk0f<&vww0RGy5xuM<4bEvK$_5cF=4LBXd_RJq zi|Aw?C?{Jc{KbkS-J$1M6S#WOH~%ZCjlT6zuhYM5@Fw@!Z*AVj8GP);8XY@l`322 z<~a?!zCioBx)IV-kLaT&)tnC8q?*mL&J_3(9km7LR>1mE-CfXkijja{Y{a2nx%W}>+|t0%M_@;>K#znpk2 z_!^?BIlh1US8+?eqIwlt`Ssi0Qksazeol8fk6O1vMSmF_e{ys`66|Os(at}pAEGb9=@G#J&DSmjh#QYRy*68nLo2SZIHfDl@otDB5ow~au_~L z4?90A3(P6$kxGm}FqC+s)nEOL44DpM^PXF`uc)8rjtEg1*m#gYGGUk7@Z^(R9i!@~-Tvg;@Eg9#52 zHfJSkv(!jhl|^zIB;JkN&EQ(DiNJYk-rSqcP`Og0g85DOG$BD^QP=jWSV!L%Zdx$ViYA&=lU+s=HK>C!9!ytZ zjh0GhHRM4*m0wPJyl0PBIed?yM4cl$$sxxvL4NBM40Wu}m-~EBo}b#eqV3Ao!q(Eb zk3Xtz&et)jnpO9a(vNqt4W8#mD`Oe<3PXvLnl=#=K2*r>g;){hgjV+_=XTUUmYO$O zRm_9L+XN<@VSbk=7n6vpgGb%nNhpwt(?xR|L*P9oz*|Ul2%7!kg=sHBa4iDW$!y)G4^2oHq z%HE9JDa(_+P32dQT5zn}Z~mOGoN{<6+WdCb5Gt!-r+@`4q3PZG-T77V)Fqp%x8TFg zAAZj9Hia~R4O`eiX;tZh!loh=3_Te_cmbYx*t!E`O$7kuG+#xDyoG$&3M9Ej4fbJD z`QG#EAofk?8g-DsrpJ$o(n-85g`ER9*qi0QaFlA*a+$r-i|=Ah1^sbbn8rTU1gti#-v%S;-< z{AqqF57P0opEm>rzaY_GjhsM#BeQr&nco+&GyBPp6($t*PCwu$P4*kROqfT`2l)TR z*IP$b*|lxIf`Ei{hjiznK}tn>AxKF`EMftINJ*y>(hUm`P$}tHbm~HoSfF%BNJ~pM z?8$vU@Ap3Y+hc70bl}iy#&ykkp1B6l!f*Rzht7NP~9s3M9-pv>D_0@wBKj< zbVR>~3C{Pp>A5_VbVy@q>Q5wtX9c&UWr`^MHcc1Q?iCBa>p7diXZmFe{_d>N zv3m1Qo1#0R6k1L{H$HtGrt17lYVlsv8cKu9*VqSo>c%l9A?XFjOjq+vfbPkGp`iZZ zAG&PC#YL!CWYwNV?|=JV8(8=N`rI!iqiI#pr1zU*&2VvfwX2lcXbW)Qd2LSG{pG{A z7U^i*AEjPMi>gC$BGp_|OjxUAw4|{%clHIafII78x^k}hzSIswF+8F96MT`nVIc^u z;Ql18j=)P>6A0@yH2VHQab?}e5HAa#Erq|>@z6X67$Dsy{%jPg(Px~Kr#Dc^D3oWM z@oVg%CS8mbu|jbi_6wLeBqQRhNNim>Wn-kRG5O`^byDu$Sv?OolcdB3bbA>xz>ugZY9km`kmgXSXWM|viC!i3n zPKiRxKWF|e{EMgb+}0tRLc3Dyef=I?eOOh9cf*&B@D)~>>j(-e1*&eViR$^zO7ict zZFw%o%NvRk`}0REL?!{a_c(zm|TnyWmKBPWG$p zDdN?U_~n;7@?SC%e#Wcn5~boq91z0$vQ_t;i$!3_Ja!z6IWBv@LqajAjYyTIw%Zn| z3d7g2{4|!y*wYvUxM_zEvZ3dUT+S7;-3R1t1*uPWkrjrgnJSXpumRibX`Tn$yQCcE zAcSt7$`peo`Z3B+DGiuQo-bae+M(MVddM%`Z8%X?}y zWM$6y{iMxn*YfS#r@vZ1vBo21t!y-LMJw+%bnWUhFO%aZFa@i=lD}J?-wb)CPC;<{ zeH)4^WlTgGT9GQHY@Giz0XmU}(;?KV$*B_E_04l0H!y)s7(@{TJ1z(PI#F+A&yR#< zx}MpQ3g?4v3h_aDRla6k9XCiD<|H_9p0o1Hc2=->IZv)#Ekk5bYwh!^>~)yzNS=YV z^i*szzs@a7TA4c3_oMVRJmj9&gRO-@+zkpOrZE1fpf#>Ia~C;>KbGBrT2YI`?3r53 zM$Yilx`GbQ$)~G6Op8dmgm=B3iJvCgt)JjA_ejeW7d-LXOOW3s*>WH9$_<4j7JDy} zth7ui)l|D1T@$kn;hXj)w6s(m8%=*LD$J(q`Xw$P{M>dkF|!Hu-*eL(bVH2Dk5BUK z8-R%Qv6ko4M{7|nECFYfWwEL=%>|}tU2E6j4l?IvP|?K@hH~W4!eTYOh;gVqclvQh z&druuNF;t`rF1V?hvLB`sNZ{GS|7VUIyFQ+kJEikX*p-p-n8-0jyZVJKB9nF*qZKYIl?E}JuU2Db?yl7PFd-!{j1UFm%q5;V16w9Oq5{wmdB(jnm3>_5}Z znAtEaw1{0Bwz^qm^C-HQL9^mXf+5}r+fdY>Vy3C(RY$a*=elyaghyl{SQ%giBQmtbud=R52=^v`ayLAg@FmO^f$oiV?w#8F(-SDI_NH|30 zwSDAQ-tQA01_oFSYk2$4IkW;dzsassG?vUSU(U%=Go2K6@mrw70Wpk!8=M*_<)>_c zcPMuJOTwi|Qyrn?fgNn&ln03Nsf{yoI=kt$aR|BjEpLQ!4*#rm{kZJZ@8>BDezw$~ zhAU3AIE_bOq#r-SuzRH>BEMS>JdD2kU1aue`Ff8MB1TV_Yz6u_gPk{Rs%e=U#<_(k zBY#&|RV_B)9z@gV9fJ6-e*SR+?fOtb?tLdm0HoA(IU89TfB=dtg7N^w4$RE|_TzO2QQt*0K1Ymm4x^ zC=8YnZ(tC_?M>)5YpTtb-=Gj9Rk>nzjhlXH?xIcACgYhbP;~f zce&zT6GQ(KOpBSVHuX_9w9!2|z2E7dNLMaw$%~!}O-k#3N%jBWfB_0{z7e>NR}rDR ziDe3k-aT!e{jERuL4hJo%|i?=pZm<^EHu{%71Ah$T%PJoRa<|d>qQ1TN-#iD+Ka^< zqZo9uXe9v_kjXjQnn)v-C+$e6EFucQ!xDt<(iNz%p>1cJGdsA(PrrAihmOl%PKS*_ zbWdn1AG1O@3QpeJYIRqMDO;1+=Tdq4f4(K0KM3%zOdq{@d79hv7#T z96NVqZs#O>u%0H@dDLH)cP%gFwTX8_se86@6?xQQ0882uAwZe9{9QC)-rOi z*gQi?)*qb_eSq(5t9ctTse3$8Ko1MT3(q~m-FL3T)k%ltIwLg@&H4A3(H0rrbaRhb z=#6>ZlkMc8U>%CGq8HE68?5M|ydM_g@X2KopLlWHyca`A$^6vuA^7<6?HKUS5FJeL z9QM438xZ(s5sIZEuvHKpdbPE|3C;yM1mQZxXsO$ZT?;)thy3Xfojka<1I)6Ad(o!T z>PrMVjgl5c%hl17uy2nmT1~y~bk~-FLL+HVn2V@92EzYkIX&Prom^}Ov@W$xEPGVk z#=yYV11c%eYZw)=tXg;s~bIb(0()3ykuKgf8PN2A6cB80Wno z2xdO}yDRAz_c+jS=jxvw$P@0*!E1PKoAs^kkrbZKt?-cJJ&bnsk2Egl% zOD`Y?g1lewvG(rC->4l6_n53OWt5<~>HN?8W&xEQ7P~zsb12@+eS{J%pWm>*mPe~~ z&eh_1B=L^l9=`j*osxjQesQ5ORsOU4P{bDYR%x;ePTfwT$vsz`ROI`3=_TIH*zlao z3rf$?As=K|TXcI+!zZ`CkgTa7`pOA9ykW$`WCnMDM@^rxK=>O;ry^t)@F#c;D(l*mNq;rCk%OH-WRC9pqMli9&;DgBJQD2x2Qv_M9%se z#rntyF6-0iKw4+4T*9?+&2MMNtb&J3P;e^w`mS)MnUA9BcH1Yi)Wo4Oq^eBGt%D_~ z&=a38z#m)quBaAi{ZMO>$q%V1jShHUFHiTkhsKjIVK8=-~+a2NAAh{{Bc_iY)hn|gM5ZWZPIBIGd+(3>#RtEN{>Do41LI zPr~b4-Pj3`#bem>6oBB1Nt-Ay#}f!f&}yt|`!l%C3vu8U4ct>iDEuumxmYz5w3QRg z)~~DW*AL=X9ng6>wRj?6rML}@3W&Z_N%0+-?*!Z-9AxLxZd#t*TiIT}$yS%&#c<)H zgXcO}1jO@F*dDpW`GE6)S#`|q=y!NcyDk^qJ37% z28|tqg$?~eu!QfDLt>@$jsF0O($V%*rT>i`C5H>8g#7DC(4$^?^Z#m+$?h*`5Vd>s z_y4%sUKVshCH z!jE?9pAVEF#VNQrY}F%Y;>Ek@+p@Nf z{a%xJU-yKuK-vyjTwDn~ZS%6INr-gK&md0QM`UWM0%Z!YCv|Y^h?L0Ihj^ldfp=xp z@~5`N@!Vv6DZw3ZjdAbU7F=01H3I8VoMSES;B)3a&pUpdN+)A?0NOVrr>i%c5PWlC zY?6BzMNZ*<2wqo=RH1#<*m%Wj`tSCVxVrh|t?%owp*KJymay=Xk3{{{GtI=m7jZLA zax+#FOC|$Wt-}rZMI(%_6! zDgAz_?b=t3G-cW_>xz0=fHAV@DI( z=a7xjLC9NdYWNw+ie;aC^=_i(&@Bl~fRzR3lD{~ITr+7Kg|IBVgF~`{AepIOT(Lh( z>DBxQgOq$kn2?7qn)d5k;fdDP9q`W72QRUg_9wE;o2?WGzvJu(Xa_afu9)Wb$Aagb zrqVS*r)TDK?GKM(V;V?Ka*{_Px=#|28ykJj9K3HPb`Eiov)hh!{kU5R{CTWlZ?t2- zhLK9>yz>=cv(m!~lBPKf^OxtUy2FBQEl~52rAg~Nw6nkSqU$kdEUApLxS9YY;tiQ( zei44OF-`DYn1RLPPRO1Qo@nJdw60ZT)fpImuSS-6?LD6TkkO^$kPJ!NwdQBkIGhQ6 zV)+P`3{jzHBiE+CKlE1~-Rj77n68#zw%&U?l{^c=oio4pR{U1#kl@;0xO?LpSEBki zX&gM>>P#l{NoUa~LsQF+v(1c;PypImpfQy>I7@a#@2$M>+27sGCY)5Z(^+)I+?%`l z73d3mhDsSvnvZ&XZ7|U)aWin~CuP#({I=ukvbN&!sy>xaShKA1%+R}1T_TQc>HdK? ztEI7F+q{1$I7`PS`n+A&^KW$C`rQ%M3F@ z$h4m7wzOYt5`R*8**-saec8w~P#Q+BUNs%HR+PpEDK;ArS+O1a`mAAt1N1F#lmLb{ zwkABALI#tA5YcBn?(GNYp@0HLIBS;ihY)G)Ln}{v&POrI z>O7YYd(MOsN5+L%V4pZ4a=1~liwxaSA&h<4Y6=h}ldv8=YKPK za^jq%ZUjY+@TzXyu$B7f2HdC?ob9ug46 zj~TAj!~v&vMnr*RF?{$U!t>9ZSfHH%ufv#mLvvO&GR4)9TEJiL=Wh~Gt+PK#mr>Nt zjk2riAm9%C$TM{&`BF`N?0txODnf8xfen(3{u@Jg^zOg^6I|oIuRViSD}yvV9n zx+WaL6sNvsmw7M^g>rM9R? zF*r!6LYk#&%i+e&VE_i>@HM!Sn~Df<(ycW4vZ3AH0bT>1XsS`g3qGg-_f2x_USIyy zZxg541AhAeTFt^aIsLSAjk@kGv>F+d-p9)cHc@+7DVde%cTDP0$|v zH*P|3ZXga7(#}^DomS88>7J$*OZ!;8dxGFGWeAOT1a-%ezMkL@Y(qxt8+L`*U4rRU zx}NCT7|1KX8@$8)b>C~lkVfq#X#y)GvD;c|A3OcaZ9$7Lgte~7{)tk@ri^6`F=&** z6SKt8&+$&EzOkPlg2P?vrKfn0Ld)5U3~QW;+iB73>Hj@4c45N$7~v=v{d>pb-CJi4 zACpE8vg?CAca;&VAjS;f3u%Y1*HRxsuO9lXrTg3LG2P%2V9>{Qd!Ren|c9q(Ha@pnfxlLjd)4x}U^c0xY zl~E9i((D`AfD}8mI&3tq=my9-eD7^sXg+T4TV~VK+MU1pp1e-h(P!&Xnj}g#>VNXb z%3xG%@yU1xiG$Bp1$&&&F2L(c@n)2TCg;0yiM}+UrN!#R&>&yD;T+$2<#QT_E!g-; zj_WohX;sU12$pjIXpGCFr)yo$#LLng&tS<2j3e$SL6qN?`1swETS*T%L4MV(8~TIh zcX`7?WQ^Z1PMe^gYLkHuaP>VtO<(Pl5Z=t>f>?ck0;yMD>5k*YZ^~yal^$hZp3evO z30-HI_7>uNT?*!_s0lu~yRBqam6<_=d^iq~;ZtVjA^*dX6u6z(v9=Fd?2Eu5=p#$- z624->>IHSR_^ba$=SVphIWm1{VSsf3NuPA^U+O?D8Y0a(IR?2-fPDY$OT%CFl1lt2 z8GEq%?^gZHO5l(>+_@1-2g|;puHXpQ(R;_69(zLyG6#9$adIi$x`FOV9ow(AhL@&7 z3E}Zagmf{I#*_Y@*{L46w{|P<)lec(^n@NUD>@BC{^7~6bJm)CbHHh@=+~X_{P&nqN{*NrQq06`5_T@MR@p`@VI|2AO^0&~N50 zo_Y4JfKaZravOS~$)`MN_{LBX()a@V8w!JNh2-P+&eZ)l7A~Dzi8x7Ro#^?VWsYC} zMy&sXHjx5xT=TXiS8Jpfd6MJ%be~Cz7YsHPCLMmG^pE<(zZOj4^xZD6T%2zJfH)hpQ8k>)F(Vg4FaDH;6lVP{9b+UnngoD235QnxnC zG*iJzCz+{n&1{Ino-J^i7=04eVI#Bcb)3kdJ2jGCXj=$@BJR{_QmvUo%M@ejq<`^n zam*`6>slsC;c>2&D|LaWwiNsfrV;CfWJJM#HbAj#(?3b;|AKpl9k9OBGmC+?6_P@8 z_daNKQJR2-GJ+)}JrQj5;R=v(_oUsgv%JOiexdn6hbySScBTu-USB+I2aI2up!qEB z{$0Q=?OXk5`+K1wG_4kBOq{`%F+efH%x z@wd6UX;%%3ZJ>CR(BrrQ^v;(*?6W{TL8ulXc2}(@H;y5X7~>pmE064?GErR{0l`RXltdMg2vV#9B+i5$4khc|6Hxhc$yX0`=cqp3OrjMg7&+$-IUdbP#h!g^IO z#T8T5lLegNzlDcFi=WZb5b4X3n)kmuVAlH?;r`td7{v1YT9rR;)Y0a9n_OJwsyg%O zj{O2i{(ftzxAH`0Ye}0^kOx(&K0|=+98l*w$u6Sp8EPK-x>u>fipNP9~;&k`cA+zsPphACC~S+y|2$V z7e}hwPXoKAloVXfxAq=FeT5^1B}~F4FE-kF*N5saucHnYIwbFT=UFqZ_nLZYI`IUR zUdX<(40t_dKlckx-}7AX@7v?HXG?i1hycQU(D}D|lUuJSvh0i+q>m!~z#({N{HfHj z0kc&LQR@`Qc-h34kv6g@J&C0K) zbgN5wV+RuF2tdvpXr(_Zzl}mOf`od+(-80C8!As0in#6QT{^ePZ*{{glCIENk-=`L z&2I_-XQa18KeE^6SL{+zIC~0Z3$X@LVsd4LcZE_BA0M;N@+EdVe0Z1?ziw@*+YDh{ z>mN0BA5>yh`@AQndhhkf(*e5nXoqjQ%9UfW_z%3iRAq?9#CznoYKARy{AON{&qt1E zv-aPMOlHZq`7`z1^yYrMov!h}s%`p~KB#{;lmCg2i)4uZWS##7EG?W6X|&rYtu1cIi%(xm z+I9)?P~qm{eHRMXtH*Pf+qDW;!Z^))c}Yzn-u<1_1hTFjghuR8nD1e>@6}O3Cd5#> z<-{Dyi2b@@;lo=-(<)SJ7a2TZNOorawGL}*QZ->!;Ga7RU*?tCE)IrSIUWS%71{0!8c1OzMA2= z%KWrRFvE;U-R7fk@cQKo-}YIl+{jDQXG7C7finxCY5jzB2-bSvLVI(x^pv@J)L(0i zxhy}Q>S)?`8UHtDYeT+5vLbcKN;+DngH%SG2Ow7^VFv#2tkKfHs5dyAiR&CO84{?A z2)FW!Kc3w7woL93QaG2zl`bk1K7%AHL&rCeSdAi{#du9DGY-MteQFrz3O+TelthEh z)V8V?#%9|`p@Qc&L8`QUK6iaZQYNx(+TyhffpvXRM2OY3 zUFwdmFY`H;I(WQTibSv{mmv8OPj5`rFj|vEi%>AAzHo)S$RcvwZFt$^X*6~GorE?f z;b1Cwv>vu{BRP>6YXI4O-emwWr`MxCs7a_Nh4=4&S`dZwD1Y2=ao!8Qk`_fp6VEl3 z`~J?U&vJgmDg-4{GzG2lPQ6#U0cbEyW$%X7bOHj(cy2?ee>FQ>J+&mFg|a+w%U zv!aQx!yexD+rg%09wlR2ADJmoI zZ^>TuV}dC7a)8e_()z$P4bL36*z3{N?DLXW0sFDK?LK|03Oj!XhP9~8nk_rC%&(hH zd<9$nrsl@A9Zpy!LG${|(>Wad{^ffiV~jGX6A!gopT|~zd>?RHXm)fcOwBX^-8={e zS=)IZ9QjiG5y8J7x4;rLMe-N{a()&BI7*40?bCjPu5z_H&CJ!0VW){2g1cD;Wheq% z&US^r!NdTJ$K6l*(aybh$8(lXb^~H)@i^MhRrR$uNMjCzi2CqPQQ@lysKs1-B#PEl zLhvQsQtL3Jxl~^%-TiY&2Cw^u#{`%|m`b_8ZHsRKh+@*YneH+Ut6R@^t3M5iIe8dy z=oi|??Rxw8mq&3_tJj4@@r_)48MU%UzjH1yU;$GA**MNSVX{5z1G+d0r^o%8HOkN_U}RY((hPNW;83G%OJJQSxv;@HAiQU}`b2~2b%2TgN`RJ-}*v$9X6t`?l8?W3#jbwf@IwEWdAX!NE7 z>I1I!=jTs&IgauiXU?v!MP>@$b%|pCNw1iDrLdOOIzu2Y4r2@m0ZpYe*4eU4f~eEO zM2?alGCLnP%J@Li_d50; zo}7f1EV%;u*>%#2`i7K^f24@$NOvFv7mEYMNrTAlUiQpmLOQTBi(lqHLfC#?dlt+g z;e+-)egv99q9%&@aBzv+#F{Z7F zKRO5nSgxp?QaRenNx<=_Tr6^80FbVuol33Fzw?;Fyj4Q9F${&q10IQdO&2g`^pC&s zw)i1cR}=Qi$jF!HISn)Vn@fwJ!SHR!Wx7rMHg^810tlB@sjYy1<*<;SiOCZd)UO;g zpZJu;iq=03mbIEG>%kF2BZu%o-Aas?*C==rYB1R$okz(jeTffa{F4JF4EiB7-GpGc ze|C4Yf|HQm(fw&qKN1U?p@Jvs>qvme%12P{PYBdUk1B;j=8igr@bA9Wp?;fhYj8K( z)ah;^7ua@VN?cIM?WywD#}&ZC`dJkV6DOKt&C{kx0qPZkfszeJT>m@h|G%)k(U1~@ zfY9WUnZ*S{96h#RWO$6zSaDsaqZGI5WKz_)ilz(GWlC(V4IH5kQ5vSnh!oLm<%;Qw zSI+d-N&H-yY^^!WIKX?0Lx(k`ko~T@z#dZ=zb$8)5(4i#neKg3=O|f|VikoXOvGbS zBpy&h+{y7<>GEZKj3_gtDnGfY5eui0OxrSjgm%YWzCO^>uub5wQpXid>@W-6io~L* z?Cg!^VKo1tvd&2gNBRhNq?Vx-5m6Pas+I>19c6ZFgmg}}(7(c8UggTgqNXvQQaZi4&2c0J)I+Cb0ul;1DrFYs7hO&n%=!v9)hWXjrW*{Wb#C~0&LonH8(QZDdi;GtQi*3CYZ*77NU9@@) zF!J0Z2-7(NT(UIYsJv=oHc99DCDQj zyJW3{X2J;+i&q6;f&=^zt9ZP4L#&|Bg&TI^$}Yw-P$zC zaWMliJ!ohkTS#EvLYJ#YXc*m0<@Jhj49fbnTHR8};+P_|QMJR#{^RcG^pvizq|y^! z0ZyvGDGUjdsMP!=RdfS?+qwrGieHiK0jiTrH^<9IaJG9afI@dnKe%S- zn(zmj24!mUxEHQ(Qgp|6+5@+~d@H1;OZVD{^bJXW_0rbLlx!oV}GSDM}(vATlclS$I@<-XZfoiY@$I}YKPKivYeG?v(O4Bsr7KkH%HZn%`; zn0)?k5vl-IUfWBVRC%MWBS2h||N793)RTa5b}9k7@SLkqsRQb*=;He0zBc^t2T~u( zy66vs8}2~QlF`d9Gu_}=9B%3tg9;U-Mu3zO5f&`!aIL~<+H4~ti;wAN8u?FS>eGc-p}e*h;IrWOLEXT=(xXlAIZPQNjJ z#B$;N*-BB;O~)ha*tm^sTgP3@U|pgJsei}WYQQ)TtEauZ*EHm2oC`D&IresTb`yUQ z>9|j?As)6of68Hk9Byq=B!ozwL+*57qXCL$>DoXdqYRcs*ju0UjPIB$b|6@Fvf!)U zFLkA!XhxH&>6oSOCG`;_+|L&trTwr)Io`WhR~j&$11Z^FXeHV;Ay#^kcb=cQ*P0f+ zv_-+;EOh6^dxV(=PQirIkh*X=qgjXJY?gQ4trw6xM#H?0&&3n}SyUJp;5G>ys5TX=XPo#pU zbbAAV1IXeo9|3~pXaZOCw|Q4!AHtqIxb0(I%A>E&d^Z<6dxVQ*B5VwzRRRn1HK<^m z;a?AbW*$SVrNgYTNk)yY6@lG*Bc`97Xi?(v+HycyCm6k!zaE4*yv-BCMln7J!WLiL zDz&XkvJSy1At9Ff9h4WnqLQS=infdP#RJmEmiY&d`ajK`;+*RZz7VOls1<(O_pH?L z;pPC{Ojyh?{=aI-Dc{NQ7@tX)dv|MVfk>#HWHy{cM+7^$ERV16FIvrZmjK{uv;%z$@Qg^Dk+ zqo8`PsW(2oP5@q1nim(3|B+o0YIZy?{@q}GKAE60^(wtB?QDFz7x+kf{MoEM-&7P} z2!*_?to&BU28@c!Gr$S0(u*4m`>u^OE)|tG>K3SI|iG-IvH5ey|R^~0y z^YaM9+$BqYjo%NU6*HVP&Xh^(;f4t+iqltdk|MWxsOr|xf$(4b_PcKeF0*LKi4 z(r#E)Ntk+XtcW|r{w;Q;&6GtrN{v4~2#I<1EXy0d!#`M9I5|MKV$7L^1hrdA@D0rf zlV+d1(WN~9KaEe`4H3bGFhjjg!Drws~r_k6p=bcRA$J&6~B9_`G~Y#ZEZ)R+cY- zCNag`av|@T9_Pze8h<;nB+x%tB=u3hDOA_^Bj{m_x9C-0_|aj`hhl!B_AK5MwUzwE zp^-XqY`rr6qYzUM7+eWp6HAEL{?NxpR9ILS%ikgU;08aIcv)!vb6E6zrz>O=mpUdU zaPxE8CD2M{1w8af6|vl?;rDY{5$OR_CSToLMls>VO$ShxA6O@qWhwTFeTQj5iU~?7 zlC&s4r#lu|&O%-6k4aZ%Yp0zCS%#0plwMi?xzqZ|*kmGV(3h?g?1+Ef2+9aaNt7cY zwO07`p3rq-$5Tg;4?u>60*-j!aT6A7ZzX<)K9XO>BO$)9X2vLcE5a0TGLQ6~Yrp7g z2X&~cjdXK`9DMEm`!;ofdt8AcsU+0})^jWnH^&i7bin-Eoq*%!$%k$_m_9=C;*v1b z-CVs%7)kc@>*N^G)%NC(6co0C?dCx4DOza0%#o?)aRgIFG*+@4TlT%nyaFAIg#!n9 z*t{!ye?g=QVT)17BvOw+v#Ao4lm0zY!Rm%ij)eRpwrT=%1{^c&f_1*o*Qq95ej)Vr zaHQf%?-(R}@33RDhLkem#9~gv{Z>^M2=TT{A(qIJ%l;J70DE7>=~*zUpCJ>=YBMU6eDi zzBh(5!1|x3l6PP-GohE#;AED!mrz8Nn$}HN5+HGwcJejSp0J|tFnU?s(;3+W)ew#f z-6`k6J?g++YSnJvxpOn#jB>+vAd*u*l?>dM{MOHiw7fhys&6+PNWp9S2w^(yR;fu) zy}0Oq6(G7{lpeQ!S2OFwt`jd|zGszxYq!~%(>fV_3dc|jUUncTlc;Sbo5W}P|7PHg zgGR3Zb;w0GPNO=DG49#x$hV$fZOvVkJwbE_Uabb+IO@Or#%)$XuKRmrC0>!JNcN@6 z!ep>gRRrU*OHcFcp`gC7Hsl24`j!NI6SYe2F$1aH+S!Acc@H&t8*_auIM?$3aMHJ( zbHmCVAIA0S%WPJ@eTEizggK*Hr7OTHdb(F}eO5uVoa-e1UVYZSFe@>xnB2aXvn^Jx zf-B=78mS)yD|u!5$kLY^AWf}D3sFL2rL6WX;#G+GtB&Yt2Ub@gc})9&EBdlfxhO!; zpLE5CRICZBxbv5u&>O{_*y%jIsg|Im*;>NL!x4dSg;j;{Tp)zIja0-4|JV}~{$;SSYvjM&Z&8}Q#rptpP&t7F0#a(RSY~^2Ybig_1?(4&povWqoJD?+NXJ_~m=uRq6s|RMgpNdCa49@!QZ+6nxIVb;UG1GuI zlKz;$i&`c0HRQSXS}c9EJVvj#q9|>a0T(Ha{Ykp{F**L@Q$P_;Yz^td45U6*&M52A z(cY8Ore8Q^qSRuI8xh=mtAmd8Qn$WtZahFXh{gK?Q$d0m$JG6nHwDz$p=@#!>5Q}MSz)-abYBKVE} znua;qjv7b2;vOyc4?7Ih0{h=Mw_^SLEzV4@W4w^Y>ii7yBAn;3XadDueOOj+-Y9Uz zT>g3TA%ydJh&aAew6nU^$D#v!bz+mrb>F0HmpXh%#FY)+TmRpKC&w+Tazh(`TOE?R zjFMdblvfVO{tQGD;`wSGV7#)V)R)^>QvbZtip{#L9U?9CW?VM|l!`lXIYoXDkrj@% z7?6!55C3VFP{4D$LPm^yPTPFXYteT)_PYmMgmM@|CfkiZdiB8lZ!L42&x#*E`k^6# zdVbT07VT+^92R1DEl&@>kP+s&)9cT}i#Z~bXs#rgN70s9c|hoFsUk2wAO z*dHnBs@K1;(|n*4Q0W4n=gxUa%VE^mM-?l%3CkCARXzI}sWyQ%Ha?>9iYUYzG@oWc z0|ra5afxi9cG(ZuIwj_RQ{|+LxdoHkwbl$DJWSZ*`4lymxHz~8Dd8(`U?tG(lqorS zoYALhnI@BW^i>#0{`-n)=o+X5NFP{nFC+>y6rre}ve0rd23Ep-N7yrcTKpcaDLgq9 zLdnJxrxb9p#(r-lnTl8ph44K{cOxc&8_cm>plk%9FV!WqA(8?q)rAG3?mgf3pxi-S z`u6i*?my59mUTXcp*CaJ3rlm0+?xGf1D95)tOV<x0o!jBL@-UT_A?e_%wKrm%YzDJ>}=02xa-*dLfRx)>dCqC+;`kYz`a#wJ` zM=3NaEUW<5wl;U27UC>DoTk(Wc%k(_8v8MNnFcb6%SH_s<$7TsJLY zdcPIXoQFgzi$$f1shC$r%Ma9*^_tf5g*5@)sfbGqq}}_tPl?ErfuKT1-u*4(G>jQa zPLE<3J%+%DZeyvxk~FPzG~&6v3@Nh6*7$;cvaHq<0ny_9k%#&FwmroKFHTqJ{0mx6 zwBtt}4L;3-iOy%O{KIP8A9s|}-F};RwO9rlB|CJ*6|DmXAsJBYjKkY> z@TA8$%DE&$lLbl{YDaG#Dw=dmv!N?m&ccuVw%+&;JijW-nXc==o4e{I4j2HEn6nam?RwS@`Q!t$ie22}Zt+&Ok$kfM*=UcYQl5=%g`T z+?O$u#dE=kE*$djzz+m!RXxv1`MH8s?M{mo$$O{jd)QR?qzO5pC}iV9j0z%3pyabx zPeIL{Et(`p2gGEj7~%e#$H0$3IhKRZiQ0;G|N3NeJkVoDks#wAa}7dK6hq1$@Vruf z#?PF{nNCNG;jtdayIc_{XKeHM{uJCHJN&R`p=_6&&8&J{Pe`K$SS6q&Js!TeTm2&W z0CW%ad06B}G6K3dY;*kUobN5?i#HPg*-Uj#InZi}3mW@8@g;Md@=7rgk6-$(gl>_->toAaAGboBHCfT% zsNk)NP>k~xmOFICRvzs$>UzGYHLd}q6lZ=n7c+h17VGc4fB=U0_sxR`}Q(b`$z#wl_ zMBQY@D$Ktc;?K6G($f9U5ZjZ(hhV!QWPGG}qPV#sw0Kp&+1z{c6(bQRa>sH36ls;y^9r4Fk611V_>>2w z4?1cug7HLWfi>#8$mR_^7WPEaSUH0J`GXLnGpPhtYui?ClgUY-j>L|Nxy4zOUA7l9 zToJq%O~p+3QBypr818D0`obnq%-yU zO>0|qs~}z~nTk8Pz&Kn**Cvs2rq#hdB}qAOpe!8Ejb9vSLYcIBl{_fLFDK)dX%DUE z8DSnmSvfduW>`BPPJp-BP&4NhkWBrx+gI;+UfN(Ax7A*M17N=g2sQzJJT^KAcs!*6 zBN^qChq`pvr=T?u8)g(ZR?Qn@s)*|*+G7?Ekfl?H@o8xN`@gO2{zFA6?cVP5WZ;{A z^YUMx*(oq7fY#BSrWL-|@mYaw312!-nhVij?-c%)OQJ)`U7y>OE5nmL? zONk6jxjeszY8Lm_oaMo`~fsj%Mfjdc9m4(i;)-}Q__`x zCh2H`=9&IgcjVZ|<~XvIUMlNQ!522dMrktWXOto@jSu9b1&g2USfx%AfR_?K)vev~ zGk#TaLgzD@Y@^-R0-Oa;6T8anQH=>q+X{OCM1<;(tYB3&7Bl}Qov ziF#JV7+H9>Jq5pO>X4SVFRxUM9F#c2mRh*=Es&)Qe{R4iG#xT+93&D$t<8?rLf z=gL2BF&a2wBHsnSAzg@?^O=?E&o=V@E4KtYYbIvHH>*2)?P<-(ky*_i7L`7Jx=DF! z(8dl34>tSE9(ROF@W|lsdTSh9(L8~3^tl%d*HOg9jXQ!|BY1GIZjvXBJz5gfO zS`k{U8Tjo6=Y*S717>}r!d8ovaDSzZSTxOr9|~1Mt2^Q1h!&2s_fTPSpmFY6QG>nd z4Tl6=98Ip0oBjOHi;^d`kFqYMXH39kl%Kz##ae@>>*E7Sxz`6%^sf%cg>D|9Fl2c# zS@CTBaa#fL762`!=^z-K%LZE_ICbQ;p{4u9UYyyb6$TO>gQf4N4KRN4_J!pN&Mv`5 zou1G2lcT0b1;$~dD~}Rv9i;`UZ#;fECwqVX#{?EG`LM%rLUJk`5Q@OHjt;)EEl()i zGdnE~(5$Lyqj_`zYeOi*6E^^;WDro?JVG`QaDw+qOcCsAjZ{VZo(lc6+!9tMT#9}2 zW4DR?^(l<9>aZl$Gb4qH$LQWvLN~v3?go&8FDn0fT=(GLs-oK?MBiDHd*9F3XB=$@ z=r)nlxG+%CMJ58`aE>`7UBKrVI1N$XR3$($0(cN9Zy>C#`oz$n)D|!5aLeI9^O0!S z{HbYXk!CpOsbjH|1gq}&9VWj@nsOn#i_X6fS0VOdl=VsZurXD5L)Mu?`ny%*{J!cYkcdqZh5pWc0csc8%o{$_AO9Np7@ zX4~cUuQeBXzD`H`!pKXnWSTGpGAtTbz3i&5edAGQ;4%#C%`sxBZf;amph7E;h3h8e z=5}@%=)AHv%+@38R%~||b(s;uOv3y{49!{_-%l1&v?su^D#83q_Jx4MlasVG<>oWT z{V*;wB+IYlA9T?UUNuH_nZ&J*|0#=sx41`+zp{R>hbh$+sL8@f_rwB_?HA?<<$8YQ zmcB*BUmuZ{|6I$#7W@D3sjkP^G)5zRL7`7YfEDmz^(b)M5<>&iGY#Cqm__1NsfUPk@a>ZIY&8$iQk`j&W*#2$L zg>%vl(ho7moXS-})M}Ek_d{UtVod@8?HgkEpVbM*X&GVl_-qhN5dkwA*&H;vjAE8^ zg9QIuos*D<35GCwmR0|kqoQ&m!S=R72rY0{BeE?Q2Y@h#sEI`mv2UVnFQsjPnFJ-h z%H#&HrauhR-ambd{WX;UF2cS2647SelgJ3G%RR%m!(c(Ah(OlDQ0P>Xh2F9E;U1KB z6Yufd{#o{#9T!Wco)HI&D;e2FL9nAuL5tM45Ry2{l$S5{fF>vcY^yL@-Q__Ww5ioXWo3e$o!5M@I{= zwaUJDahLEfu$3C(GRXS@v2=`l^p>Sdsxs_ry7VP4%2A7FY}~jHMhY+E2$mbG<=ASk zESQ2e*s%T2Oo}#If4}FMM`%`KH+WO>I{)ej@?R`J`;oH+TN}ADq=;0c8@+;#1xw)v z&-*1RFTli5vRt5rWyd+GJ(qx=mgYDj9zqL@`|6Q;b(R1nCLsi-{}`y~Kwx%ybBsZL zCxS_Y8i#lz%{oePc4yPqQvEY`VQ4C;-A9?jj{I%koLR!S&MmkwUb^AB)qH;+B0Zvd z0QwalU?gi3ugGx|8^E0w*0Q(G#2C08stHU|J87W!{O&ZlFB2PGwwD#F{Z3tnOWaOc z-8Gv$ZV8KmP6lwhc5VMnT2^bm+3XQ5U~VjL<*o7 znkcq}kaPcwkEvppdRoO1+R+_?wGG6{Ks-Sph}vv^D_Z|Dzn~NQ-j|#B5HHh*CDBv% z)H9=cxx~wxfAoVrww^A7D}4;fvl*8kPFzBlNJTB<{Rv&l>l}r%Qwfzo#U&yxKa-0t zb~H|wWXn9E$*a~wXCkyD`&(ccwl`rFH(GE0idG7D0-&u}D=ez>TseMw$q|EBl5M*& z^o15-)UTcXP-pWwBKsx&AHLr5FUqj(8YQJcDUnnapne#f%IM!OnTE7t??RdBryW&K)EFFNs3J^TA ztzR>iTkwx3et;^Qj5IEL!n)P5zG$jxzZP8;xw6AA>FECN(OgZk@6;C7(b4x%e^LE$ zP-5QmaQFPrMX_s#LB!X=Fn6QfqzrzoeG`u(&Q2-Zza!_xF-v>(zSE}TRwT%7JdUPC zKs8SCxxsvkBGT$Z{||U|m%Az~68D-yn1W7&&z9;{RwRyNp}*PP$uAr04uKChW`@Gq zvG3h}y&@7Z^gIa#MbDcjGYOnd4sNhg4Qwa-x?Y^|*wYN}k%0|g;Ne>Ek{!xtXOsFsdq1Yky*CoyKPA`(34idk(2h_tPAhz zgRd2yM!U{sb?v(kCboZWcLp7dSwQGy6PBW*nQXl6eglM>{|5E|1Z@`LiUz6K(P=+U z#6#b0IUBUC39&T$>A2~W-))@`>qL;lhK7{(uRuYN2kto-Npc}jYtHN{5Z6@u5LD2d6VQ_a`6-)bAPn{!v@mJBueK?6F&4T ztb;=8qcF&^8Ro}}WgV(!n>sydGUaCQ$!9uwQ69m^4is7So7QzEZquyBup=SI*SCG={9t%UQF(wz#Rs76S;e7#%cAQ~5fI^xN3HSihaHIVta4iFP9qYH3?~njw#Oix zom`3m2ki@){u*Gq;G=8CoDFIXUHhITcP^t zSWo4M_zmc_5*oGk13(2VI4WhBE0$12dQi8u{>9`T4ecd<8~u`6>N#S$8O3$=l;qB{ z$UtmRTK)iAV+sg0>ecA}SFF?a-QV{1Pa^lB*;Db6VhU0)?tV9kirxBPEUu5jPf_tV zcBJ047KRCxfcINZ2*JIh@6Cdx!1cp|V~3bO88Y1Xls=OifTN>@cUXl|+5RO>3*r353L~ff8S!%-EK3oBw6xB-KI$L)rQBGSEuli2 zJ~$D6-s{TXUvCe@9&TAn9uDHOPWO`mPe-qo+Z{-}o3|PkQt3I(BPq1t?=DG?`rO!U zCBH8h6v;Lsi>~_u_BE{l(4bFd9gq4<7WrZ!C!IuUv^)&3UGLJnX!I>#jcg?0pJWM@T*e#mb1#y8+ftx%Nm&OgV)4?)MtIPBH@rRbV4em`t5__HtB_pK+i1snymo^GSNNp} z5nu=(|7H+NEs$%9sY3DwwBMgqj4*H#gz(%h&V>kY3^U>D<85#InEmT=6D>17sipTE zvX@U)bd4pEd%D4=b%-$7W!@{f&D=%!|Kqx{8blfnzVM1P|7 z4v$^m_Pp% zfZ9DR`r9ONbgN%~(?FsR8T*~xb0I{`hrjgH?UX*>lK6?ISe!6RhWtjF^ebgoa&xAa zf=X+@eEYRV=ymiX`274tPn8Xc9}R96hFej zvdlGeNCw?(8JJ9oAtr~Zt*;I%+NNa=Fvugi8mRAm){X~5mtS`@>-l)sEuh!rJM2G! z7b@+@+(qMT00EH))zRu(`#{8&spgqAmr9-pztX0tR8(mnjnnT}s5k`g(!Y>6o4CRH;n$G%!SH#e*wk#o%}JWDS@P5-0iim<<{4j%gf!^#*dj% z!6TZrzK5mDw7`%88{JhLMU!$denQtj@*ZmIO%4^!XyR57El1#)T2v|d6@`JT@0A-~ zV;C9_Jz^tS5*7C)t=P)Hu}~Tp3Uh?olL7)bQ6elhqhnL}?1!3+o;>;Z$eDtdJfkid zl!DHigEmq|HaF46NWCM~Ht{pPriW#JX(ZF(<>afrzb=dN@ zg8tkade0!|9siA)K@q#c)0{5-a>S!2TgYmP`eQjCXgb&IoM`A&9D= zY2gElv|18vQ;>LMDvv|Kwub{xVtG83?2Xn!vsML{t<(DEUtb3Wdq2kR0DI-PD;R`< z*7B0=ii3?q{CFjQ?`&GE5{u{La9QY-oWHNaGoMZB^n%541uP2n(fcFOOxi>B-m4j= zMRL{l@S3qlpkd*3Fu!ewB$qutzg`NUQ z5Sf7MW|*c{4icAEvC(`ZHua3$`pTH#K7nt8I}_-O8EwQI4|11O9+wR1+xO!rnIN9nGeXKYiDZJi6jAg! z-#Qb%tYu_f?gPyHQt%PJgvYKadc&EeM6^Ue~mtt+zq6g)s~9)wcn&2i5D zFHQ{|8-xeJoCcc~|IPiLS;D}EP;be?I#ghM2O`8#So@+L_+Ip!UtvdZFu1PeCIIhs z4D3gX$wugo@?Vq_JBIWkKp#YBL#$_y|F=INVWjP6KeMl7*z+V){XF(O7JW9lX{Rx9-T~tXXw>&cPINqaH9Vlm#ZeV9E$lo>07i-ba^pZx7+|br%6ez$T{^~crtp8 z$B37HdOmX`9i-DrHCZ`Q##G!JOo&3Yx0dvUY^Hn>4cYmJvjI=?J>wG%D~76QWWE+B z@*TA9M;*3anJqRVC$eSB`zzRi{PI{@#6ByMQ;|v?3t7fJURs^Q&4j!VSFfLpkVx$U z`3a5%jE%BS8H(OdhrR7&1jz~VV<8yA4H4!`6)Y>~IwUzw#h=>TLU1OyNI`<*dwpl=aP2}f%V|k>HY`@-+ zvD(iepscEeY)<5jTZ(Z5mM8Rw(}JIzjIqZZr=!)}iylzi$x#$X92vEZwNS!#75A>+ z@{coji6QbVhChZdQbLq?2zZ(2vNzBm_9C9Jv-8wu?SiZ^y`m#)7&<}`{@bNZyI3Ka z5W!?}dF|?Tb{Vfyh6Ot-W1av-eBL!qM&Y|1?ceD@P~qaY9&e8}@qJhHpNm+Q&mL=F zzRo|?(D;XjWC}IXYsphgW8Xk~67;92G;%nYJ)fQHRF$cGi9nO{(?o|kQAr0C8e~m- zyRAU+H>#lY9#m*KdfD1IYWEA%&4ZY?j9w zt;^K@=QtxMYe0$}x~s$q&cvW0ke~I>jy(aDzPtRN2F1Z_w~qHu7NWnjys*GR7O?57 zMKOY!Sg6H?|8c&fn97n)V{;o$e9B1-^(Gl*1qHtt_yA@u){@NG&FvTn2~M4sT&x}@ z;3EZ%VK*Z`>dB2?hF>Ad_b~uhPD6;t9k>l4k&O;XKhI5?&H2K*f&<3$DOE zD-qWgXwut$&<7`l3#YJ=iRs7;`s-Ct`9{_lb=ZT4iPpaP&HnGf)JwGSy5$u@r5eF= z5yzZ%z3_dS*Max9)7ylFr7lJ1vmG;B#8e2)!ZSWp`B?U;;zlD}1S8mRUGaPT;E(b59;csuXpA#Zu0tXe{v6q4;x z5D)(*LJ#kKA5h62HX7RdXhu4&h6AI;j@BD|E=dY}+fJrCbVu0cL?<*h8AO=k?J{)M z670lcjS>PrKID(A2!JSrs;j2EYxDm$duIH&%kt6kgoWEj!0#ctcD~wet=0=4P~vIc_@2seQdrUPlWb?J|1Q~1+R ziess9^!bK|?$GhE2puTC3v8$ROT{C#KFGJ95tE_VqIbn;n(n-r*cAY64yB^i6i2-0 z9^(2rnH~bC_GV^#$_3~UMo3|84rfQT6Ur0TLKJn^@-;zBlWC~zeR626LSX6nxxES5 zrsrO@UN4VPYOruLHAY+F#msehowEN$Ui4X(ZLMhKLQ{s`LkpGxo# z{NAXh;pZ;fAgJBM5UA^B{`+F#@)Jb$z^S(lkCe9<9j!h`M8 zt*TA#!o_({jOPIpjb*&W@elLbfv0U>s9+_p^Wg|pQHM2izqx5=LU$7?kky0}^{=(D zb0D-;1}*wNBn1zaYmi^iuZ{^^TvauZbGpcH7#i&`3##rjzMpRpj~({|9t>JMi@ORb zskq%=2FARyfXQixwu5=w+qHkO1r-9%fS!9Bj-MF%ugWrD^n95tegRJvu{)) z{to`!r@p?)fgN*8)%kqIJcjcv3#m;ivh+H?_2(|B2Bh|cJw*Onq_V#gb&U0x0xfoG zSu(S)>ly2qcwls>HNhxQ(kOPxCRPRgTLx2c_2=)pSs$!$y*A^BS^J+2=~0fL>ew$$ z?>JdQm6`O=5UJlMA>50G{w0R0LtLnKhcAd4IA&tlbZxQ`mrBwyNSu%*yXXiQ)zB20 z?}SVWeZJor@s%#++Uz|+cE-qO10Or&bC}Ht>E)`l$c*?Wzj(%vvB)Thjr*i1C2*svBs0>l#IS+FiwO|WucTmzd>b05VYeam zh3^3@U%{Ym$>JwH1_B!Sx%}}Gg@wdwWe#*MDhOjDOA{&55(1@^ZLhXbYVrrw3cTE} z=g&cXY6t4PGub9z{Q5?D!_a{^34_R|jzQyTsTSx+J3!^Fm(;d4IoREf@<+McY+AIG zveYbYKX$LVqJ3w8L;R}me))4(fw*Vi{{5HB-xE4$*wg+^3Rp-Z78TJmQhVqh<~myN z5~rGcGdl%eoA}JjSiZb@`iPkK15;9m1r^jiKh_3LT{qr#z2XS>|8<#=G25cQZsg;d z8lK*l?GlAC!MC2l|qNmor_3fcXRi5^vIn8 zcP~1~Vj|{@rY}H_?{R!!tb8wiz;K^>9Gbn4EPeOkKs5Z9l>pDG7RiRXc;EG;2~^&r z63uAq8?NH%c8XYz-`u)lQ;!2%BrJ`g z8{mP6k|Ii}H!RGzYr);vAFFqN6q(_3e@poNoM6RpKvIL%$bWsf6Qu@`!_8To(9Lc8 z*}0fwIG?B}awX9?2NkjO{)>Kp=aTkZOtB<}0BN-P6w1r@;@9q6P$?rhr-e~N9~n44 zLyb+?8&Osy`>ldB0hkcBxx>0?S3w@jkp@d_a980gwaWhX4dQ$%UW@Rq`s4NWZu!TahGmz9 znRb^B{}`*VXi;w_KQNjr0M_`s8_4^)6UD!V6uucptg%t_AH){DjAR(y2!G};Cppb9 z%?VLveu1Fe{ZSG5z3l8wuO4K6wVD!?HVC2OlL5+xJ^NcVAkpi6HP%f&Tov}4&F^-? z!TZz|TS$nC(@$->;1qGh_#Q1+ZcpvKf+}u~n`&p}{XfH(v7f{kbk~#mEJJ^9=@&_R z$UBLD{!>aq%GovJChzqNcJ*ak8eWFu%p;BYFEOvYDIwW`Ja0BpeocQpP#B}+H#BET z*6+K%Y}8LfTA7dmhN@)#CWdc>+4ihJN}bu5@alq-oH(J1Z02CaL9L%qCvRRH#=MNK zhz$x9zF5dG`fWOYmqC&0vLGG$^Mn10v2jPlpJP?EV8mJ$e^dg?nIq~+?X&t_rtwSD z3^i`84L)zVf;s9;x+KZV{JKRHB)b^>Ps!G`sA7{8=$hiDVX=qhZ$f3f<*87g*x1au zK)K zIu2M)H{KUG;(-N}`UM~b6wctKDY#);kP>xhu|gvMt!{UZgbuw5s$X!=WR}bDh*18d zX0)pFoT&&IWj3r>IWHqKyIgUev1@soC_W^13&lKmDJ~^pjkQwKQau!s{99S%c=VHc zi)xTgW=T(OP<$rY@xs%{_lUEqKPdU*qsh1*v0H`IRwS5?4oY{~lMzr;*=Z??`W6?Z={lJSSofbO~2A4BbpEUF)uvlS;h9sfV{Be3hGn>BF9wGj{#{=t+ll9emULdbZ z4DnT7AbyYfx~DNDK+F@y70QJPvFq=?ZnmwcQ>{w(!8|9CI6WyVGIe=E!ywqx-07&t zQFZip)>+MpP{10c#(0*%j@oFN`|;~;kLRI7fyR*21%d=T)WYP1@N_r#c?|f2%?DaC zH(j>PSnw9kC_MIXf2CNbu!%506+Tq)0dS>81GUfavRC?n=`aC8-070SZkTNjHn$~- z0!&vx$~CHSz|76p^IZ`g)uBT0bN~z-^mE_NHX%H!=DIor2rDmOprd`^nF)}`L^L@m zQ_1&=dR!)Pzl7W8sDfl%<#u$Wff*EOX08K<<9u;=KzxnSWeF93cW zzh@0gu~^<7tq2yB6}r5xUl7+2&w5yV@cSjiBoZ|ygUVh>cikiloR`y95QB4`vcNS% z14>Vn0-Cq$aQ~;)+I1VZ#Lrj0>HU6W>2?CM`5Tst$x!z=8<^L(eLn8}em(*`o}se_ zhYaE)8R@I1v5l1pcVcv|NxH^y6ls@b=tr;Wi&3d1~Z48@|n`guqJ!-Z%a^um33`8e>&|xWFFR2IU|M zM7`G)eixN~r@Ho+O=CXjt(y0|HqZ?Jx0d{y{#*QZO5kCsW*6Rg;E}p=umP&+uof~~ z3=0to_1Q?UlZPN8vU7|HLAe{$*Bhy?5sF=k)-d}f%eeh+ZnqwG&my_=O^FAg?p^mQ z*8`&2yBdsFxr-=#6y*!NProjH7-xA7 z!fFb`GlHK3f^o?d5&CGBkQXTyYhHcj$gyeJpl7`Bh^o41mKHl-u+MRk@7&R7uJRM= zt6fRj>njO>KB)g&(9(2&z1whX6QHco&=^iXqqO^4`=*Rv`H**Rv~`g;9e{iPyu)vf z>@|-u53AwVXm2H7SEk)?R(XQ_zbhf+EAxGWQafPii^VSQ3ae&RH*Fe5EU*_SApwb7 z;;7Iu-nHfQID6^$ti0xN_Cz&T3-kmhR^rgzA;CYobUl>p4i6;+Bi$n(t1bQKG-U@ivp~h+Qv>@)`t)U|!_`Jl^HWT$vaJ8W1t zU4N_dn50E^DbR*CU^H54KF|kZzDY7_EHIn+{?CAqd(ig%0g$ua)@o5-{hur4vEn(1 z==4-4zAdLTOx)Svu@Y`4!Neg z`h}e}F_Y_YVla8B&*~<9SPR!ea1uJ=U@`m&MY3|XGw|LnH?R80Zv?k<50t$7*x#xF zhJ@@poi|CP>hnCmkc`ZS?g?NbRhCcATH3GRBXqv2OIR-OwUNaOrA!IWy)Op}537Sj z_ovU`k9_S?HQbJ+6UZeKJa%R{;0uQu-vV^4CnngK3$NLcMc1j6)6J6z6|zR{T-#^y z3Pgo{mg9*A(&^vpg)jc^t_W!=bhC)wKeWDZ0@%-VjcNtPmsBtu@PGUy^+N`D`XO1i zU;;oUgFpI`;*7Ve3+049-mdh9C95r0e98AgCmE#%d`2R%oC;SQX{Y2O;mf{gzi8X> zOY1_K=2eJ`0~RKYMS!t1gR!n{W>6B%7C#1ZPsPO|`Nb>Q*I<_d()}tE9O#05L)iWj z_p&W!kH)1!+!a%(8wSb~pREO8Q#^FgJ5>D3~p770}e zt>{sQD{DB|07c@W@nl4z<65Z?E8T51%=k?#3Co**7B@TMME<_J4L;#2fr^HVtVM~% z6bk4Ni3B$smQy0sgToCy(tkh{X(Qw~OC7wA<&=2+#ZUqd9g>3SRbsG*20J*g(W zFom$DOerA*>PgztX+DwfW!D$axs;rrHq>Vg)_RZm?)=7r#R}jAGP^gl=Ww!R#f}cvB5O zmMol-Ivq4^P6wt}T60HWZ7lt+RbkR&f?jVI80^6Yi=;UsdIda^Ft;CB7_*p46|Ys2 zqsdG$W!ZZVHZyze1&XLj}C!PN8 z#Rzm*C=~fnm>cB*bnL$UOvx09|8waLe%Pi&W{arnZ#X4{q`=BBVbCp|eN%z50k$~^ zjT$$gwQ^E*u?9}ulnW#+V8#Pvz>kX|N{bv@S2WJX(3AeBlmeLNEGiuCcp&rs{U?|d-1hqph5Rd&rf1N&jN92(Q2a2{xS#P0GIhd5$NkyUoU!Mgg@)_L-o&j~ z2s1zRX;PlB)}Zo7{zf*sftb4=dn5y5dhlgMxQQcvugU5ecXhN~We?7|a~Kqkcr%k~ zw8-r-7H%j!ko^V&h=toF_Eohnw>XvkF{Ap*Hgi^10+%@Obui5kp2_8h*dPT<3DiYM z#S^L#`RlM=lxOiKYGFZvtOnltAma&QoFHxNkOnkJ^n`o+X&!gt#j4hB7<-Nvb_2ZL zt3U}?mB~EgV}l_RUt*Khi?ph8CxHNCkL^6PpjUO!GbX2^slBq7cD3a28W{>DI6xth*BJVFBSW6QT`zUZw(d{J*# zu|>N7w{D2H;fCp=)uV*$GOK`%=ECwk+zzR|0PFupi=+{8Sx>N&EN)`rar;j}DORH+ zVg~ND4R#83YC_$|vQ4Uu#1B3@OjL_xa#Yee+(~xKJ9WML=B|5N^Hc2?Vkv3O)vw7- zq);@`OC(aD9;+)_BTcY>u-Pa14+`vz%z3cGb8Kp^>(>9_Xy)N6aK>w8ciDY~&U@S) z?01!U=;I48F?VQ)?ZNM-h|4u;59YZuve6G3_MQ;-_vGYDXcChu*{RoIItbh>g06Na z@!O{`TRy8RY)BMgQe$ktv{LmQ`xYMa(B&>ii(%`f^A|M5no2Js=#Aczb!L0&z1J&{ zNac^XpD0aKOlt-b7SFr>kiSuI-`eb!%#$*)mwJ}?UKZt2Mq#cTvm*yiZI7mdiTj@M z17-R=^PI4v0|&3q)NB{Rh>?3vTl#H`O(K3Jr>oqJ-jlJyMh)zQea^5rHnkoNHnk4N zLUR12L=7$r%y>K86;7@^Ij8}@W4CJEs5P+1YnW_CdHwSYIs#TvA;LQ_i!7%Bwy3E^ z62B(&nvJ;d*UhqHTn}Lac1Dn`i5`Peim$_`=FK#J#wH?0Dzx+eI%)vIAj<6TfE% zt|wMny9Cn53w0W`2iA7kk=qg`IF)VXi8g~@(O=~cr#=0I3I*PEcduZ&D_xA;d<9y-4%o3ok4Z05H|#eJ5~K0Of*3@5uCIdgj4v5?7V? zZ)G4S4FOu zGQEiFf|fOZPp_UYdb3iy1Hk;k@Kgt|=h-_wM}>s1 z0Ss}do<=rcjCHO~aFQD>>U5^1v*a;2w3u_hN7InT1NF4g6@DmhU5!8Isv)7e1LOgQ zjNyLnD#Y>1@=v1b0X!FhpT=`8=yF@=RULY@sN$P>^Kg&6Xr$Ahp+(NyhX;l33}pMi z?zBVa<8pql3`x()v4AYGfj$I&mLHc&4*`iWFI7?)!Xt8oxhi6yZm{m z#Dn1Qo$$u7fl8~hcY#G(@+N$OsHi6cD?y12_sK|e0 zzP9PBq`vQ116p2Y(VBc>e4=mC=IgQ(0-f!UjmyFIzLLweuvQ{?h5N#JDhICleIVk+ zz(8Cf?~0z^Pp{ck0h?@8gtkFY7A5f@B~uxudH(__xc;6V)#ahO`urpkt%g@eYIgi5 zU-CIllKdF^q>g--@Ts)zd%T9@9RILZRK$MV7qS3~H$>dA9)^Sn=;Pa)LqsHdWk5%vItt!~<{3K1^@LbG7PloLq}(~&aj zZL7?a^i4OtMit?0#%O4(0&%5A7r1Yo+0SHb$Yw!w`?{1$cqOp_G4SCwls=lXZ2h<6 zoFN4PhrONpov<(O_-GsKCysmqB1V_}EI}@o@{EhP#cQ4L7x4vya680MaL2$61!$)(T{L zXUoed@8!V~N0JgI1pi@|ZktQQ!y|W>tDY}EeHiiCI9%%j-H1qB7u2EFuzeLa%tyOM$APg=7a#E)rLP&5Q7&9hGkKJKmK^!TyBbr%=W2pjBKsJ{2?9FBn$)~|6Tl4g89W=!vL4CY=V=u~3 zAAb3e+1V_L;vgzs0zP|L=wWBN=0eN_prs1LI>$r^U79P%0g^{bS(He~O8%f|SW}}* zr-r8h)srXLi?vT_D9cYIm6*P5HFR@kbG_ze9@~qKfuQq41I(N3q?R!s5Ds3jRV&{X|I;$=Hr~RkG zGF;kZvmUv%br`=CzaryOH~#XCh)y__$%<+!Xmp4U4(Q{XX%Mz2qU<)gH~oZOlPlsC z<4Qf@%Kr(5?g!_wnwF|!Yw+4r@SX85D2H%q$jNuSLO5bpVpne$ScFCvVEGaB3sD$Q zLZAY^yN%pAZn?(HA$H-M09bfoK7+0gpb+b{5FiF1bnaXJZE|KD&D9b*5k{PtihBEb z-OYAnhWA|u>dBu6sO%uxO&Hzv?0+aMrDT>&kAXV%FBJqZbIO5ZXz?I#zQgt6n8vB? zAeXyS%K|e0y(4*mezfW{-4>&qM&*7*F#-w&d1fMg82ep~JQ31fn@bm6U=;azn_pJH zbenC@kn70=HhLL;)NEH{iI;W>rV__lBM{_UD>r3x{DvgPQ{-9e@9v3}G(e^#qhE!n|*u*k^rFYGfe z0&*J+B>*QK|46;OGU#7@J;o6bBaHpsVRYUMwMHCtuZx!6XJi(q)TZ|_8(LCp8rPUh zT4&1;_MRWpi2Te8kPH>h_6H=sN6A%3s*6;&a)Fh_l2+9aYQIP7upA7${8tlN*n_1$ z8ZfNELthsAsnUPZp;Wx($UsFlJxTy7XUeyvT|$`W7{J<{M^?C+O@j=i;Nq8LGK@~J zxT3#ToHn0#Pcmrom=q-ImPG%|m+Ww;Go25OA4Dw$GzG}}7e=wF@fX-I3T4+=#))Z| zM7r=+dlycT&#gCw!3{~avoo7s3K8oAl-1s$YL}vX*ItDn6Z4!9(}F^Quv3}l!Q<4e zv=dQ{E5Me^U+TcA0}q4E!g*2h-;|@n5AL&k>of>zPy#G5vJi`Gz@AR(!kxg>bPpsttEWRDhedoC2bMs`T$C&ru&$p`67E1 zz8*X3XE8AzDYit=H?W17g8PJXOf{*$6t$JgbXtG&56m^pBn*ETFfacR{?c(;5{hHZG0$JnCaL6~?&MUOgP3P~iB>@O>mM z#(1A!YpMo1jOvVkM!DE>dWXW!Wmo*`H8ZfX7}25Hb1yVj``;7yvTCHI;3b9?+)#mm z6`%f|QWN?Dn1Bw{4ktbL`2F%1Vm>Z0oI2rdPz>QJ&S9;bw40KmYrbaS(BAKFcHKT^ zukP~p=DR^!%U zNcUHL$#*Q}utDOFal5knuldL0cDS|mG5J0J1QJRU<-+MzKkS6x6eZt#-gdQi>Jy*#7V|HVQr zM{{h(6Ob(h0eHAZ^BPp7wm%$}iKjt5J5s^gt#S&`qa1}L^{$s}bhU^gY7!B3fzX4&50{ySFL#)NIo z_oIOww4AA6WIed>^hC<9-D+d2PbR#*uuEz|t4nN)cGb8P4n zz$QS*osu!2VCBmI9Dvw7R#r64I!?)kn$0yMj=%^VVS$0jP0OpMh_%i8h7nJF{n42j zw_dTj4g(D~XO0ETSmMC|WQ6RCQi4d{;-(!{%^LhsQe*Y!<zS8O#I9gB)zDzzh4#HR72LYB-1j+bVGfHdb&)&5ClvFN{D>qiK&z1Kpxl>wCZtu1iiiYGi_lm# zazoM{X@5O}V=Ib0n>YFnR~ZV$Mi+DC5q;^CJv41RVazGhZNoy*Uj^+nRNK`Da~r_r zKeSPhrd_b2ia8f`E)qshf;P1A5Uw)Rx#Ua~%7|0ZJKMa@-YePF!q}eW6#=BRuRWNM z1xn${dul<&=RZJRU>2QW5AcaXOf8Dl(5+;Aa*#pNvBz|sx&biBX01!1zLT7G6et7jga*lx@Fb;xMU*)4XQp@g z+i5IN&)$&CAcYBtdZ0d z)7C`QY-$FE3{wIK8DZA6 z$!wE*kLIyZdl%T;Ad~5U05B1tU$h7)A+4)_9Q|f^WT5lh&=K;UBWTF~hl_nYb?x8d zO>jUv%ToxVgX&zsj;5mLav`{sAFK?J13?QFZ^JDO zoRY>QX!CJux=oWy2=_l2-|pIqffuofF7}dTvI-La_(%=Q!nGL z>@OIsGjMAtecNo!O!c_L|NkRR0ZF?FcI?h^o`Bhk#6{DH5bKSe*>XHTVmJfJ)DXAf z5YBas`^B`joGf^idd9@Jtp=k2HC+)-Z~3&E2_VEJt|mM7+_u!x1cX6{!u_$Yf`V5v zV@w#{AB$uhP-%t)gYTwbzpAd(M?CU-gqzz;MKQ=0S`Bam4Q@0G?$0Mu zgPojHZ2VsA>UBSq8iio7o+1-TUzFZIwWq92_EU9?grYcfAo`Lu@hgWMiX`Fq!b z_a~-BR)k!r{`Z71(*A$X65M?pv~a7@d~F&sGzRKx%yG_HHW6g0|>vAtJ;!>x0SE8#8t4Kyp1mJY{ZBr5C|nX&!>dsnMep;oVijE zM~V#pk~=6T0Q_oAJWqxMH+AwUpBsMmk4H~8vkLCoCY=6o&U(2Azuoq1_n8r-yS!Rc zS-gQ0y4|MSwD+I%Mc=320N@?%N0SG^3r7*K%gc2RldA!x=xI~hD5Di!J@MK}-bc{ajaR0~TNvu8R-=#N@fAgrGFq7%O*Rguo0b0LstiY7Zod!s@^-c+!iY%hRP|nT@KooppL3w*=YW6SH6X`TsToZ=BTY`! z#QV%Un3bceGORH`B!2n#a{Uk&&iTa3T=>ix8^Jc7o}_2JNuC0P$*K`CzmO8NEFJjz zTu6}rx7wKS=&P?QI?xe%2Xd{o05+zPd26oO&A6l=h!Kg0Tg~$FOPG1oSg1l;v6Wv2 zJlW7GEFnDJiV8+hKFt~QD(ck+pAKGt7TViIsd0xac|Qua9lh0BpW;#}TN(j->2;wW zTDDbq!;}L%`!$bx9WFNDQ2`8Lq4j9oH)@SnY z2)`?$%n+ev`r)eZ`Bb9bqCq@(_{KC;#K`zQ#~2pLTp=+?wW&0LJldFEWeUQ(gxN}x zgbkQNS-GmFZf7_mn*>psK}36Uy4dz?9{*SKKUH=2C4g`z0B1t7xolZq-`g3)VbWl# zkBbERa5bX}h&N@bS=CX)@K(UIiGn#_;D2811}8Y_geIB)t-4in zM+E*BS-w&ZjPc~$%*=b;y-f*ZFc2;{^A1ScqQ04rEUlf}{`Ni1>Yy1VrJj*Z5a*k5 za)CLgsAgE5{|llub%v5s%N0mDIU}#D<3CN4)J4M^d?(w@9arnPq5Vyl16D0UR5zrL zayV&|kp@@AlK6j)(??}enN6J1DmHC&yP5V6~-Ai@w%{m03Lm+C;yPw(*^^TZ4* zGq(w5krqSIBKHY~3YcR&JO_n0qw@{$hw&5w9U%^(1bg2G+zzsxzK@H8lMh$CQA4$+ zP)yV;!_vF@R=~b&4m>VvXagOtT)CU)X^~dsuxHBHmH3cvb;9v(uI*5bWVHn+?3ax4 zRjo7iJoUw!o28ifz=XIU87?lF}>|Qt6*;{ocpitbYNcT6o!@?&J}^ z`G_Oemlia3W_hU_4)g_l5LGK#dSbMhD-VFVkzxpSTh`T(RLRLri9bcQPXnSF=Nr1P zwqFU&kHkIKO!1mK+T~d5l(zlVm_I(npZc{`9;QvL8k8n=17Jc6(a_oL{RDtz($?sy62fDAy@oc z6u`?2Km$>-H;Uxyo8;?xWV-C)KE0N2Q{( z_xYEk4)X;SoPT;wk_;T)cT`FDhC(s6Hi)HdxwECw5vCNc>~9({SP>z9dRCh(#_P*V0QuJH45)~{g%X7MtiDENp=EJORP|Ubvq936^rnWS( zUq*dxo@SycP092V2qv&psB!+aRCSeXa6|GL);~6CKDK`grAx$0|680(osZR7Zc4~_ zqu@r)l8mo%OJ6m>JlD3z-A@Y%{90NK#Q_HD`~bNURKbilNes~hwCQ|qKVN)RX4ww) z5tHk04+0c7h@V3#3DtVx-aSav)5}|>%*kJXsd1ke76PK&eil2fOL4#iN0?sILLg+| zxLPYTC>8`V8&TC!#qjq69~~;?i-Ay_IJ`SI_kqP^x5Hbh^~L7r4hz7s*ZuTdSi^ADBBrVDbhB`8E=G1KPb*b)&PFi~@V3O-X`~yZMCoSe29<7UDH&>LDd|=L>4u@DyE~+j8oFD$JET1u zzrT3jbDj761L8Ha=h@GFueCnwpAk>`|KE0-nVLxj`D;EL`W`qVk9VG60sURF8*diH5O*M^QRKuSiyPW!}Iv(x^JZQcWh8Wn)q!1hl2o$jcB_~ zv1tTvM1M`>;;Zr0tDLz+vl2*Z^BK?aYzHwrHOrvqZS&U))#n&$=2yu zw&rhyg0EtSSlv22&g!Gy2mI1h_A}-61QnKn;hgedSC&mOq{0Abf$lml`l%9g0PH?& z<~B;er7<=BI-p4gA0}81zap7%#gXuKaGnM^@kbx@-L*BJ}6_RD|n zGiJ?j+(}tgk!9=pI&<6n+m@>67}k_pAFCZMjRZ@z%gG{_~pbLiI9K6$JFwN%%D z@S(3aPB!KJnVx3xFffw3_`KN&nH@#n7f_m@jJ zr`Me30uLQj_Z{D~-92dJOVKls=o^fmXZ~h1$DW~oZ$UDswx-?03>@`gL!Yc{!So3z z@X`m6Ad#O81MO{3p-_c+M?S}mE~-a+_-ca* zWSCI6%b7)si6tv%PqDA%ofp z_N2m5a)t_K$WI$7uDv5BkhzI{R&Kg2BKIf}YkmXj`nJ`pwZ1aiFVan8Vz0^~>F`rg za56EVOYyjT%F*(0DsGGHNlzbaJtau}uEAo0H6cZ);{zYI1fo;$kOxvgj1-U4ra5!u zl~UjOr0*RhYGBwcT*Bq@3ns4Ct7zEmx_(k|dk<~rdxrNs=<3J3YDa*#zF7(vKdfW=8DJR+@nMAClBtIdrH z_5~_vy+2QdWO+^^6EQP94kn}YIiy;y*aL&ANSNDPo}^JH#&b#ji*8o$WNe-9LLb7* z4(5Pn(}x~25ZoV+Vcr_lcw;{uHC_-`PgPZ4G(PuysTNm=&>*@(H+5d-Cpp=UxOQ$m z3}@fxFEbKg^MHpN6=GLFlr_|oL%~7iA$S7F|3W)2u`yhTi}?2O$__7{@pS(1DMad4 z`();?Mt>R`xj+2!x}2zsV#iO&#lUHRDLo=qpR@5$pdL!$y68aM`da{;zLUTmF!=vA zO+)acsf@4ljD9}(x;eRN|GK7ut?0j9(~VTOwOEq1W5%k4APdEmohlz-ej(Okk)d?f zm=ig^$`a-a${wWjxLVt*3Z>Q9de7lmT1#QeAu}hc{7i$ADK^hO!k3y?OfPNMHjY&04DMK%2T6zJMK11 zI{r#gpB3-lyeT}@Jl-qTKu_w5ukW2a>$Je{j^2YYfye80VA*+OWB{%J~A($jKaO_fu4F~!X!=1R@u?24U+}& z5xqOGts|JM4hx zbiXYqiBO7!ZRTiGz?daGK`_u&9xs3xjjh<@-)?j3*UmWAJ~Zt%ETmwqttHxKg+!&O zzb`#tf_HCz2Il6jBV8tSRJMJM_kIv5I=fhbM#(T@Xs3vC?4-sMRLUdPjLvYGx8xH{ zL0v}|zA0r%y4^PRHG-aRG>Z6JACd8Jrz@N*D+KzYAKE&PXVmoWcC;xl=QaY_bver1m=nt_9h9GCx6r-T8h+ z>4lV_=EN@8W7g(32L=GCsoQN`yv$iG#;~^u$E^#jT&AHQAb62!i)E7tT> zg>(J>sH-msu-pfff|D=_cWNvV!`1xuunl+Sh15*5&&r6?j8lPfSI?l7LeG7@Hfi|m ze<8>WtFola0FD`p4mPN%JuJBBtD4gawdfQjzq~3L_QA7!OOyWApX*~9`v9Dyp;L_S zV=-;=8tzB()AxP$ht>!z#YMMP%+Z42t8=#|s7Zsc-@;9r6Iu$#c+v%|MO_`tj#XEr z`io+jo^(Wk_HIC@bq(iNaZ=;=US`dYpTE;ZFc4m-O6bPSNw3Yo!ES`rSZ6BmZ6av& z647Kb9ciy-B^{y<0BJDE&lf}S!)%rrZ;E=AGe>tbz^ej6@h?AN0iSLP{lB9$=hKO} zme^KY$K?j)JgRw16!hPC4M|@14rc&P-cOkmO8GUP7vJc|KmipH+b}+l#p$-7B$%|Q zJZk$b7{$PhK<~Lu^o;hTb>8(mC3Z#w4CqjxjL4kqd5tOcAu9Y9p@C%Tfr%jF12o!+ zFckm`(7wQyGV|ym(nbm*V*=)}Pl2b~1h%yMJB#?@k}IQ>`|5@_&#Fx%mvCnjf|Prb z-?9nC!Gp3d?8$`p&a{o=iq*kd2+CZ-fYXHV8U4i+;m7>bjmG)+@?qC~eO|K~fH0Uw zp}LO4_7bk`HceqxKbmix+dDt6JrzIHt>A<;h?g&vjObN8*b*Z3i!S*cTesxbL_fMD zTUsBib_$UfH(>zA4O)~~9o{%UeiCOh2NQ+CZ@o9Ob_23DT0lpE-ZdOdD9R9U8Rv+i z{&k7`dwVYoDkh{)DKXNZooaeq-1k7t!X*d3YJ-&m%DKU9=sB+tl|JT+IZM zQNYQHYqU!mPM+phYoLG@ds&Piz{Ua31U5zn|G<$eSsg5XQ%+cSpj-+qzsN z351iLM6I}>!xSUDO@AEUTNUHefYU=6=2d&dSUGtFm_uT21ORdty^)jDyzJ9Yp4e_) zGY6{ggbqp6x9>nkD8NwqNZ1dwOKKHM;VJE7*|~Ma!0HIvP_Ud-Py1dhtc)FP!Z|?& z^Gy>soB`(WKX_<1iA|`fUa6{leYu9Ekr@7@4EB@J8cZ2*+!Mw+ z5pA)>1D#&XM1DU#HM}kPswhG)swhx`Y{6w)NGE~lRtqU42u^;Znfes*)`Ih_z@M14 zi5|`|KT0E$AO0!fddW)l@UD;4d}WkSAdYt?zyD zSD5(MPJE2v;w56`&egh%?)qfK}k>(f~ z>)A1rpLsT_0Kq`u?&4E#!iMR|Tc{d?Evsa)%-EHGB%zf__<3Ehg$#a8Z^FFuCykYj zb#AuDh)GlL5r+k`Ij1KZW54ElZ9@%80U+g0_WVtt-=%#RIZhyay#b2pDU_{79)9_} z)9)xCnL$*N4y%a1bcAYyygB_nC5PAAT1aHM@_$A$j0xU_i#_Us*)8(r5`4s2o3gov zR2M?_HOoEXdkeke*$ovx%e+KdvJ|exlHTHR#6PYV3v6C~IC2eo6)#TbJ{)c3cT^!5 zoRq?@{b4lClob`?iC&fVo+XCq<7F*SoSj%PN1Y}&vM9{E)OEo2eYFOqBdz8Y|3|%K z-nIh;*E9X335CGD(784w2KDq@m;0zWzWS^Wz=>&81`iF5l_8#gvnji%%H?#YM-Jt5 zvNfUa@`Sn`%totMja9vgb=yUN2|hug;UVRjxfG3c;qJx9MnG&2@x0dXe$Q#;EE@2O zNj@V4`iMZSCs#VI$KSq53X}TLX6x#v*xSMd5wh+3vv- zskeYUsI$GbOig_ps2H)N5~iqlnGgiDNPodv*5kvlZqOOP%agukGvFcs^QGhm?zOi} zDORQJt1k(ptbW%n%nWpGFKVT@SFnL%z9PWw>tz+0*$~hWG*WEn&;w?Y7Qv5o-<{u8 zlUiNdVndD4AQ&LjpcvggwpkuW`5Zq|qX&#S6SX5eFA??*M|=UzL;YlOx(S~s?u5Yh zgP?B()G%T*(qLW3rXay?DG^<_^l-v8a}8oYQtuMXhymclc-5F0UHrv9xicrI1`e-D zy?U?y1<7IMSpLONn##PFk!Trps;I_z7c4Wu0}01&1H;cgmC7il#P;T*JJ!tNLZzm@ z$#jg@Zq*Bdz2-IK$s8af_Hpe-vX{b%71Ghl6B9o9f$cW?o)v;|A6~Q6Ug}+8Oo^RV zf0}mxbq&(Ow`fVWUT*k%R@>6gIU_m=0vKrqfQp@B`rFaSY0hL&iU($D8l#kz1KY&;aX+^xf#obfFbMOT)Fb1PWz7ybzm@FHH*I{YLsa z?@^`A^eMtH{B>55Q${f!$~s**DZ$xDkxA2E*M4g;oEB>C5+G~7K+UroJxo_wJE2ck zt%HM~sait$^XsxiV*Wd4n70iE`I1nAjxRpjTGX=tEu+yPSN`Dz@d3}wp_&yBLjfY> z-XODZqn;v#E%s||XiZwWM}BgQQ&HYy@@Q@(u?o}Xpuyg)$VfLxRz@6V2*u_T(xRUFRitZI;moQ11vEkFb zjHwavBw7*Ea?sIKI6-fuCZ8|aL2#WI5i#1Pc;!LxLfOb&2oeP zKG}N^r4a8>BSAcr(F^|ixPHawO3MsS@~3*>yE>B2Bm>i? zJF{x6RY;PB%N$#Pu7G#wg(9%ES{AnJ0h4+H8Q9lDR z-jjX#ah`S@=KRihIjRgj?V@!T`znpIh7V^cn>F0s+!?uVb@u=Yhk? zCml=b_My?jIrBR*Y-cV0OC!g~Vt0Y!r-xY-ol^~LANKbyLT}Vr5S+%8%_K&s{duyn zS$K~yeH;(3Qx#cdiFG7%o3Oz1Vb_@^#-PbU!k!ee)X=)5v4S5Po}bJ zidHmvEvWRgkrfu4yZ^m5gOTboomJKsq5C;+DLcuwta+_;a9Br?m%CO%Qr^>-xJ8i= zds6M3d+@C|K5sMd1>fKAmKKo0|jyd8TjJumxnObLMU3&fDgLvd^R! zo!!*dU2cki_FmW%@dMBr|nJseKUzcnkbBJ#{1OAW&VlNG=l=0!{9mN zVajI-U8pcZpaip{z>QCO*v8rn=z(Y)+YsuVJOK_yhmC~?pl%Ri&yitSM5FLu6rufq zx2Y9{jrQ!le#by%@CTHrJBo1inHU+C&YrI(1rFpE0rJ+qhQ=|rV@-=14NhZ_>LlZB z_Ws6~XPR3VJmJs*75qqawVY|>t*o?x!1SIp19zsH=S2JmNmxPBteTe50k`#)>aJwA zG#Hkz0{d6Lh#06(9`X;gEs5ppSRis=+THN@%{k)BkP)?i1VG91ko@@l-PP^5u%2(M zp0*l_93^iAq;zb;bt|;8INjt*k8Px$3;_X8stYAlu=D2xVte2QMHm}a^;}|`^CC$H z47hc{Y$^c{PAMiAgXG{CC>C&sbm{*F2`dc|ei{*_Ti}%1VpR97U!I#B<|Hb<+bR*5 zyDeUS#DKm*7CDL5?Ltb6>_=Mvahr*T&QjZ;odEnLj@yamIRo+Q37Nq3E57Va@juPS zj^*lI2fU>2 za9uQVyS^n$Wkhz~y<`U&lgvup!nPHV7oNfgUwxiTL6+f1hfNIB4#)h|>NJFI8cV+| zKDfjgnaP@}A!a@NQ|x^xQ=mi+x(nenLW+U-1c6% zgV$}nS+?6)7y1oSUkn!+v)k=%k%3GBE9r73kRF8WE?uE<(88MdD#qU)aM6&n_;(a#cxQ_5O_!)Y=u3Ux4)c^yWx~*AD5O z-pByTnSN<-;Q=BQr>1Z%i?}s5hC~X@ULKhl*Sm0lS~LN?AmCueDH1v~a)m7sf5?4LNlKGLKmJEJ11K$LeMe(=@ zoU0U&f4$df`UGxWZ?%IrF*g&&UHB&hWcO!X<<-T4-EFnoZT*&d6*u{n6;PRoV7nX^ z!uCQ{LeQWP3+7{XhJ`zlL(gGc;Xh}}6dc4|Zg)y=J%pY?f%J-1>2^xmzcD(13FH|@*^@6qA`uD7HF;QWMIT;^z<(Vh-+%>U zrxCo}$tJ=^EEXmBH|AoXKxFBs$RxK4M~TMwn4EaTlacDV6eBI3mEPwD3lzZP3zZ4%G;}oM86j?ct@JSj2?<@ z#QY3tV^-UwNV-N{=aIG3b}Accq)76npt1O;+3K>P3c#H=1CbyB{C)D zks&0h)@7Hi^->1KD|Fm12uOz7XOnp!pQI~bDye#RTdQWY?nUcx!rFE4*F_qxv}%gI zt{j+dV?zH^M{BOKcbE^dx0xDa0K@<;Len(ht9u+^i{te>HD07l>tu4Ko-;t z4g0=C%;JQUU~7?s8e}51{nf#B?%@gaQ@Df9IMH%<0W;OhX;dIF&yo7k609~srttVL zrcH0_1g~ zMBAbP8F(2{sY;tzi4q$&S%7rtdO1RBD8M!FU-15_(Z9j_E=ME-G<=+#{dL}d!7Nix+9Xn}t$&_Y*6U>#3vKhl$w!`qc5t@%^i} zeXrgwus76a3v~MnLeHaImuOPWKTRm{JA;=C6#3ciQaE$p2}Up`p~-yoo(C{MjX{E1 zb2PCwe?DCzvCS~eeK7iKd8DM=0}q-9|3EyK1}+}|mKYilxoN-qTL%4GhKn$lf3N+) zp@*nh2;xyh(bD^4Y9raYtiLcxT}8kCt-*RoYRJfs!*11Y_`X08&LlIl?F;$MOk?~J z*(W*pp$#h)!J~$C3WLUZyE~>i2}gDt_M!8WZwI4iqc4O%Csl`;!J)HFa0CA>;JKJJ zD6Th;Q39@Jq*7gfy^Oyt%7DNK>OZDe0NC?4X>is46yZJu(HBCBDMsR2_6?5bU01wk zaJg&ar&|sDlHTE!Yt1Z>K@A_z5}VsVfHJ$FwELPmY-1dCmUZU32=P3b7gnp?=(Tp` zT3|Gc0`VQ=`d?(@zm2XX75;P7Bu4Rvqa5ZM(W6zOfJqNA_CWYIYwW@QHsM}C*cBFl zVCC<5YmQ`|bE)+1Q$BV`7uu&zs_S;tHOr%&7|`hEbo)!m&SrP9CV`dlpOR=yd}1{N z?9PKDl$UEd6kcBYTyIwc1aCHC=}aybyRd~Z>$}z660?4shTFx-{{EEw*m^T$hf*LW zME~>PYBjj!*!$QC$8h{u?75hEs)O`Lii^>KF_2*yvj%o&wny|rIn+)y)^0bg0T~xI zWR>M{mo87GWhDjXB!IsBfQR7(!xQf_mnT^0cmtaAn$*9 zDJ@SPfvrG|e!ERA;}b*JYBZ%xbKz$RolF-67!s*;Rq_u-{lB|z6HMBZVb|6PWsS)< z8UzpwoMyz87DKoh4-CcapW%vP7nwM#uZ@h|ynxtyCr(^I?vNN>9+)vYo~tIbN*N@H z${=8Kx`|fSoq@FIv0wVC{&lv5X21rH2D8XGm~;@8;&z7v_Byjb0-Cw8=fl);e>RBd zNI&57%%D9?Qr-x;%nL0LvP!4a5D9IwpO+!9_!!Zhx)u^}!vj&x?Oy!O-XbD|#>H6t zomOS_B>x9%7Z^%gJ0Mx{wzjbE+jbM@2XNt~M8E{&IPHfi*~kg2#{70zdTRdRkP|6u z(zLUm&T^pPyUF@_qnlchDNtWLo_#&GCRM_64R#L!gm1h7;4 zx=dyt;fDu5(9bP@2Us5pB+;TUgRaHgK|Z)Gz?GUvi6gqV)|ig4!{H`(cV z?%gQ`#BTaku<{CTzHkNAJmM>9UB#UtK> zGCu3!BsX_;DXk{}ar^Xs{`Viw^v54tf0}Nd62yndu$d2@rGnj0n_)zTUgEgY4BgmQ z*|p2@o-Bi8WHDnJs!%n%v9xx^MbY9Xi$_8M@+*nV3yeAGorkw?;1!OCqeSy? zi54_YzHmNIl>Q=$0Ljttcdr`U{C@O>T1>iOH;5p;xih6jx)|+nfTuo3AZds@9<1Ra zI0f9+=?4`2l0H-_r$!i8V8PIhWIsaodTP$hZeI-9Z_dl0Y-3%4ofYXqD@#8zN(W3c zs%iF-+`x@w(f$X->Y;@G1*<0a!<2vY`?rX=bm$qjv;yw?@oT;ZEnK zjkuHgXM`TtM*RhwU*{M>6DSJy>xXW43oGfU;gX+Y9bL9gEqN}jKy=>!UrtG0H)FW7 zCM6}ntMS(nSg)CwRc**O27J6$TOn>o>yhewVVzH4_lq8^LA zV&21884DlUZFIq`hR@%xE31Ig|2_1vLpS(MG zq#T|<4F(Iafzbj7&)m^r{!{PYwE- zIZ#ku-st7y?>6y#s{c-67Cn8~>>?L~a-HxH^N%t*qA-&9rGg~6ZRL@*Gd*1$lt0AC zMd9u}jH#k&I1?yzjT=l}fyHb>e>9GOoKvMb2LCcP*_nt1It;;d!S`x`Kl!$z;}dxJ zSxnnkbBD8cd)*xP#WpcZgO4#hZUu^tYl*)B082XsR<*Q3jeC~n_6tb+!5SwSgc!>- z1&G|`Dy(!p5yr&!lZ9o{NfzCIul_lVoI!)-y|n2ML%KVt#t|MV`IE%|TpJS;rSts{ z-Fj|Ns_)~CC$J&M_;7;og;XVok;1+m{xcqS76yP0)pfWH?$;OH|Mdmuik17z*%QnT zD|hZdG926x(zeW&AM-53ToV%$h7FkE4buP+m)C`-mKR-mxo-_7={f`GWaN zRWOW>b(8HUybI+!lF@mRA7shss_%79@{+%YX9?<5mmP+k4?Ae!d zNwEhXeJnU<1t(PSfnqJZbx0Cz4*`hWIp9iks>|{$u^`XqoG>R|lvbTXfrRW74O|0; z++qMCUW|wBGO3yKo-+s>|6V2fw@|90>&E+X3qN&_?#~MI&+G8a=>!7z71F8SlO~X0 zdY1De!J4Y(eMEk{Y-pS1e_#CN?Kg+-H_M|iv_L*)u?JkPttQTPy379$NGplb5AYL$ zBv6Ni3}CeK0L;o}xvRQp;8src*kb~#C~vxB*>rkXS^VScv&EhcY$+cBih`V`mWvY2 z>}3F4>HJyC^;)Q1<0f%6DzSqI_YPSLmLuOg!Ll#;A7I~{f$Gn*?UzZ}6PVDQ{YJ$K z&D^q`b-_jsR7a`!5gb8R{(S4eEqJ+^KjcIBUYuO z$*05yN?@*mQ&l7t2O=nrUlWlgWLQYWz6F?XS&bKJNQ9A+x2I$h_L#*m=MPrDYqzn= zb$e>IN}c}Z;=T!SrKY6MP!z&?8?O6aPl79;b2+33jnk*c;H&+J= ziunV*mF#dSG#JDHZa1X=>+3T2WtRn8zAhFd1vsZ6ALj5FV3s~GmG6CH(>@_d;6@uB zBe?pNAhn7BeD!Q%2oP@%N6S-HF_~MuHL4cBu}1~hwoEF6*6QMbFB+vrHv)}rO6eu4 z_>OH!FB>Yvvaczf=!eaBGZkShyCFsy9)(~t8cX&a<}kt+w4t~$M4*Mtb~cDmE(&P- znVI-x_)h3XF|etE0$=-|%eKdvPp**dW%;8;$AdBAYXSL?KP$J>^~}`qol7VXj#^N~ zJE7{1YfJd^oU&~{K>=oHt2TFE79ZHdlV^>;dkV*&p{Q+urx1=NxT+I;hI9fDXf#MmdVyXUK3_`dr8yFUOAU*Qwg9 zW=Nvkl6XBMW%?2; zd)m?p5k?`cl;6S_bvuwv_xf{Y@)ypo5KT$x(XTw(gQjm=pM7$O?|C|7Qod_=#GUKc z468Mga5atXX!}1@D-_) zN(9fl5%?^*C2Y+X?U7$0T`9iiMB^XpV`~O?Lg#P$jM(ya*vWQBDJz9ZFt_WW-7PL; z%Y&i5_qF~}3J8TpJ@^mJFV8wHyguzyI1Y_%b{-8?B26C;R`J5~~q^qOF zLB0s>I>G+$y2@}0sUn{ufM^2tESCk^|5j06qRK=y1HVkYbt3pw|C%s|jE0`to-!#= zxwcXeTxRb6=6PSvD48AVn>I@nojzGXLJ+|qwN4DwdM{?i-~AI{-2EAQ*H6ls6PSQk zr7Cvk2X>0MdplUvUtlgpUvvMG6l%KtVoQiFMa~5fhTV9s4~*(yWB%@*PiqO1hux*w z>y8XDF45O_Y4wjiLP}Lh`f3^xB3R;ajt=HMKl~&~ex?yk|Fl`wcv;U%MuzBy1%@Dy z@q5n0>A~b5>beQ6`RrPURkIK;FT&h0_s9ZXKAQBs7V(y7r>O7Mk(pu2$32J&_OFr4 zSxsfL9_U6Y7yyaac%{c*N%Ab*mRwD9(W`#K==mL6u+tUgb!N1H5!W3L+tw&PGdz6+aJC47>;Syv{cC$+29l-jVY1rN#5j~VZ|@yb zv8a@FSnUHl~IuG@FoLv6c=q?+FQL_)KHqW$mG6X zqC($TaQDY3RId|RQMVdn=0>Zli1Y=)OHgseTm!C2`aS-3R#E^~3uLY4)bNd24?iY~ zstbX(?bxG}(__-gJRBZ-rGzQ>qrl8!0J@rt-@!-ofPuU%mr zA-cO1@$3HJ#AVEUXNG-vi0HS~MS8`D=zxWS&+uR5xc4f^yY~b(jZ&Nxdn==Ae_RB5 zltGO=l_=nw5J0`N&*Qex#kR^RoT}Y*DjM|K&K!o?58yzaWyXT6ul)pt8;~T@d7^GI zLFgfSIGa|wTv-~4=9zv;rHNHNAimAIYy-8%CzyP|B@ZoPQ5Iq@mxX#|6gk3{!A#V6 zCw%p$Ep~9~m)RQPSK*5~;#|5wsG}=pZm0U&sz6=zJ6~ilq~xQ&AN@e*J)R83*x&8p z=780LCfJE1g#F!KH=#Lskeh4*#GSej?D!CIiGldY?|?V z(-UBN(1R%skwZSD09#La)U6qVOPzN8KM_B!QPP!fn5ksUpV)!=+rVNim&O=E0cY}o zoGMkqM8C=SVJ6NA%AHhbE?u_0wyux$v9k@x2Gqdb-Q%hs#!*mICp|-?6$B@pDB95Z zF+*y)=a^G8jVC6JV-O%ICQhYN1H=vVSbp5?iZ+RB5{^R~;1Pn7U^ zmSO;_$lC71G+|X-1rn6}>0TP0T9STdIap(=D9Iyucfo@bv_IQ5=kFd2fQ|od0~K?u z8MsMp?Oq&xz~;WQt_d*}=eFU)mWF;BBLzbwK1>e6L!PW^srTmc;6o@B1{&aI`959& z2;I#orgjM2DW8Oh5mo@}6 zVLmKaYj`OpmeY*o`*|`h$S^45dQ`ctF6ktiT3pop#mm~IV^>GzRp+HZYt(S zjC`E-C9F-VDWL^cjSFj{Q>t6fa?Ry0X7F1kmW2C4^1OHKba+;^hd*bKO9{-6)v>tN z>b`f9z-N$LJi=m1S)8`_zgn#Jt8v)gUF=Gwp8?{~z=;BE-pBp_qOY5l#b+sG>evY) z-fqesRLU)N*(gQ^CcTf~A0TfHO(0DJK;Wh_sV|?$+9c=Fp4{h>^Q$+U( zhSjASy(Y1O`pbIIq>csD@xfc-y#2~%>$bwF9Tk_D9kM>3GhXqZd$3T5XueR1=Emu> z1E`oX5u)-!pz=2eKop;8Sfm;*NNvw6j~!s$2WIZvAcnKZ4%q<2IoT&l#1r`b#BGCH zjU&;d=)*AIme2<#@g)d zwV#7!q_+kIQ|1if*(F6!>czt;r$?$c@OJ4p344iZQIcjf-q zHC`g(h50{Cmw?f{Hb~I(0oZ>}J_-I5agAe6umeWdCix8U(t2mst6a^iy zjVhI+4dV^72qveW6EPW1i{xMNa`Mx8a40z5Re`!USFK#q?m1c$zW>jz-yPUKJViaR zVLkmc7>C~-X^$9_8XrorV9>@o&e&B^n6S%1AJd%0TtDU4Kp-On)GfwPf~DP+^Bo!E zZDHOx=fhHCHI{;F^46~hKPo9TbecYcUGXhQM#LV<)(p+I05n79_|LaM!&#)RDgNtN zR@0h^H3$1LA&B|0RuNDoUB;(jlQ1Yfp&V}-eZp3THIg_AL z0oi{Tt9-d8{}|m@v1ARurS@jd{5Ehz&WouwDJCw(U^T8xj?UJ%+PtB@#nr!e5#}P!zDB44v)5Se7Y(ohv(d2s(2ey&yLsv zmel2ReJhbDwl`+3&^}#T06%^?##;J72P&0K)Yj~0YFny5*S&&y#=+WoVV|>Y$aopB zEG^O6&f?1#H3bW=8b8}4vAz%d&D9E2Uq?m%>$3w2qgR>oQ^&DQVv1B9MAoZ_2pcJ( zn={K@F3&}>S7r^Y8R$m+8@=USX5BpCW}{NSROT8<4?a#i7$R}Y{7?i{M-%{dp?lrr zH~+&f^k**PAqly|)?VJ_Np%$W9+^45-9i-iy$|-Jn)MGCcUs}!FK+v@s)Fq^le>=! zu`l+WSr<=B%0hge7ot8^h6(FNSe-!HdV+srXGGqS!b1je69J00RlC>SVLiX!+%uz# zc^80j#>G1+j^W-8kNUR`bBJg{fYg#d2QZV7Kz|4T1jwt$yW=)sk*q`gNs&qAjKTMe z;fW9LEc_qU>^gN@Bq;F8eZ(`p{Iz4}etAneAd?M`Gp6?Z4AQ!Jif;NGp(u^4{aRmm zg7bG`IGyz~DE!=M1Zsex=SnxU_X>u$Y7FV}8&r?0RB8l200nIsPzt?G2lBf}AdL;b zCY)j&NoP871UGP1E}o_LQo2&``=V?REvt(RmMa5Y>+*X*{rdittl&;rE`uK3P#>kx z`g(Q7gN@F@=>w%%^yA{T7(dRq!ZBfgSWys_PXh?OgW^Z)!||VoHWt%I4&R$Wrq*ko zy$Fes_6hBuT+?bIfW*v*M83CW|*3HSfhY;U^PiNT@qCf(Sgy-7C?;6PjUxVn-+y3^sS&pl4tLd5?HcK=}7(W-_nPRMLn*H`R-Phc#+3{|4f}B`%IQ+~5egI#E~~ z0;Cc+jo6=h!+kEz_%ob`(j4|BfLm*z-qAzzB_ek~Iw___BOsZrn?PGu1IgF;#wO4A zG@UtQUbvf%>PEuaY%F~D6Q4=}>il(5)7Q3vz3m9#*v;@Eh&ti1IsdIsV~hyJ5(%Ul z3+eEi>^r`kG33yFg(d@!7HUBii~M6R>a+xx%3~+9Rp_*vfV|xXKWZ-K%fs7hp$FF= z`v%fF>BBeG&ju#$X>jI&&2MZ6b8OrdfX6ULs6+szgygmtM|_JT_mdeTa~*PAtFwRk zg#yTxki%TpEk9?$wxM3S>*3}}AD-_9Xo$5YBSPZY!x|gL6u7>V=xgLVVaZt$B{Ikd zhVECh+VW^#ClN5tHV*J=;_La~2S@Te*Lh$bl3?PsGMX0yZom^3ST~NDq$fGMn)B5< zP&{iM-9&S=63t27u;-HdEWCOwlI;4pCFwvE(M3W{KQGgYPm7_S%xaA<`!)@5psPiDXXdE=A*2ca8$`b7&^NE^>a0M}SXv9LjS z)30fTHbA6Ih>5$2feq8O=IlM2+Al`NmOlHPsJC76dOSC~u(4_Suov{f&~)hyh~~S6 zs0M$QKT+$M;(|izvlS;%QI^Z_&Q&>O;X|H45VPusZhN3oeut>oAbzLN4dva$uMU?f zN1IQG^?wc%J2UZ-p)CLU-7t%K*CJUnSgziyXG+4;pC0@Exx4X)HsS|;pAil^8T{Ck$zH|y&Bh2#Jq{eqi zZcT0Q5Vlu9eGpLh;FlCLSF7e{XjJqutI)bj`F`NdT-#!o*LJl?3F3L((>3X7D_v6c zw3WWOF68jW0_}@TW5Sjvc0`7DmA-_ip`Tn7fn&OaG9GnBq$#_q-Cv*jMeI(htMHxx zk$*$GZ&QW2z1!GP_Ij}pu~L>9r4jh*^hp7$iWTDSaR3!n{65qhH&gi?bS( zUG490^MgAS(*xz`@>qqm*7XsgJ@!ux8Ds$?d83%)K4ygc8>6Y+qf1WJQ7|KGCDCVx zEcV3*i-92kvKmU%0F51%uCYRjp!aj+mcyFMVy%uw)g<)cX)lS`Oj++PMegM5 zY0|ZBA8y+k(7Dn4>J4Gda+>T%BlE*Inh4N2fHu#i4R4Eco@4@QPr?Rx z4N+h4*7FH!E%Ay+{EV#V$orU_Y?|@~om8o$O*c+hCa?YAr1UF49x5NjMQ(vWGnRAD z2iyy_)gcRl?%epPx#DF+vTFhn1b!dqAbaJt9D9OFTSaQVM zaMu0<{k2Ag?(f1ybw)|}{!Rs~4nNf^87T}rvdacvHjW>ET5E8onTHWVYq99`10-0) zv;~4i!h;H0buV5CQS0*{yoFZ;#k_UMOK{HA)t$oDXAS@4G1-W3-NJ+wYeV%$SBwm= zMV+GX9jkV@p`iV@7OURprF^whOrU%oS#oG;SZ`W-3(tLYqjW7O#nenA{iDf?y`=m^ zh3KjQdTA#U{ezToO2BWpiB%<;Wy>f50F_{YKF7d~95B~X-fpH{=GjC_v^}AgN+!PR zJuZco4Zql;62EmixxYOA)8`JhMuEI8&Yq;IR1N-;{}`a>+pY9NOIJQify*-x3qkQW zsNP?&NYH?Lo@a(XnK;T+y*SvjvX>CLgo|4XcgUHEl-*;yO%_DRNsPIn(_ijW7t_@3 zMh^N?X)MTsK`}k~#i)C)q@-4<3b8DNzY73HEqFr7pB_qo(GE7y%D`dHPx6t0PQf5m zU_w$egXBFwP2p?zU*0CecP(9VL;AW77RU!2Ne$b~Ghfjf$mnZ{CGZ7Q{I7!%I8}eK zW8Fv*aLV$}Z`YXSjpa35=Z|Gs6Erlk@<^(h*dP-HsQ4urNW@*~oTG!(aU5|+V&<4Y z_2e<236eRQ3(UOyalg2YCdBgue5_W+kBmIpxd~xsAh)O+PG<7ql>BxIL}fb`Ze1l7 z-BCQymomTxy)cl8w?;rkv&ag5Qqrq6BbroJgllcF9fI2=>G=F~j?j^vTjC$p!aRE~ z6$&tMLzJl9txkkaGI<^Sl9uRY9OeHJ#}!BE1cb)l>nSQ#5mb47{=CaZR}#`AV;$h+ zZ2rpTeI4Xau!|gI8-K-u950xaI=sNcd8S|=kDVDeGzG9G1)h!R-CCi;N{ETeW9$?mu2yRnIMYDRA^*IM73>Uk6GDA!KT-7b8Z5+;Tw)z5c`CR=F zil~8N#>YVu3b$ITXhi7z4e)S~wc~^N(0dPmeEWasdJCYa;x=xS5Rg_<8l-z+=@3z* zL%JK524QKWluqfT1*E$>q+@9iNy$Z!lp~c%dBrqBB$!|!pK_9^04x6F}$Yn3$9vPPG$S5$*^(U}$QBu$a z;N|@Gu}S!?a`>nSx?feH8ImdVr@E*H!m5@J{eh_?3PRz2eMG|Za`FrbJe!I)6Geuy z^KxXBP^mg@dv64BSfyzogD8&2YdFrRI&&j zMcHmP4(pl;V8M;xdbI4OYR66w`<=6wHZ$u$k3IthYC@aqi=O-QlQ-EoS(wcopI98kNovq`*a^~jND-C@v<4q0 zOk@Mrf`du+D)`NjQQ()hmj*2ZNVPqUd6g81mp6v>&*}=kKKUAkDh8}~3H^~ndAAy` zdYLM39x&kbX zs)$9}0SyinGQBGHd|`56y%oTQEl$eQwN{q=MuEuB?Pd7_UWuY7M_SP;*-6ACljyaJ z$FIotJTB=LinmR~hpn16ESfZsh`%u6Xke@cxpXiLIWntAmgOUBQwcnzt zALkPT?}^}J;-~aE>1&}v_B&BzFqDPL&<4%l8z{RSA|ocSQTNo^_3&yxNyjBgX`y`i zNd()fJ8w#66>VwA>Qj}azk#|jFYp#~(wmNYJ}zD3hA%g!)PHosoz2P1zbBTv!FOr{ z0IzhVBEDqgjo%uiC?^JMQM{$k*;4srf0YPvevC+=JFnA>IQU(fx+egbOZO(D)>Z=f zPOI#B*Bq@gY~eQXm3##GbRGb0i?>Zy$D^GRwoN`;fsQB^ch@ybi1Tj78)kC*ozj^V zlbY%qoH`qKmTADA&$Ic@`PIVsIPIduSz%lFRKe!SBOyWHvGhA^I@JI2Q+FQ!_LA;F zI&G6b-QtAO@vp0VXlkQ!c>!X6Pp?It#!an7yLOkc3kMaxK!?RyZT%hHaSbns2xJvD zdrT%Ds{T>pxOeY<7_%K+cB76p(tGj`T2=}wH00Svs)6i_e!_i#e>a2^B2G zH^&!HJRtyL7e}_gz=c4e_D^BBCs@Bez%WbovPzfsyb?SyBi9?th~EdHLm=;eizcO> z3#;>jC*`w10z{kjX3**r4!CU1*9oy07yG9G;|qdm`(<&7tEq4N$nD};Lp{HSex3R5Q% zA(R`AQ$U8?2shV?GkD>EtoC24dKOzsI3;^|X-o`DIXTTq@A&{*@uytQDCf$#U)oXe zE(^i0)MZHDof<5s-uZ~|7pFWQbl$cuVoc3c5POIdZ(~!xN1y-n(XK<7T|rD22#F8? zncK&z<9XAqv8HN9dtXchcP+7G4BO+eMHUOZ9Oiju8~yP9E75~lU)?^D>SIWmJ)N8u zCSQPV@Js!2Sxg*gH#S}KjQDEMgqMmR2^P|8dJTBKHv$+#eRKG0}aWKt$f3%^! zz2o#|Uc>sLZcw-}GSJmF-3cg*EQKWmh5G9F)o2#YLQ=|QdONnNLhq?$2h?-60;&@I9!3MHo>{XH2?p6!!N z9K?WATrz+TvQm2|H{yU`2&M#@Zrn66Vj#Hah;};_BP7^Hqe5eg2}HN!wvno9ugv|~ zANbRT@p+(q_z^~3uNn0H#GVAwn;}GpMv-PQP?6OO7wyU*$H#|RMMbTFlM=r*hghWL zs+GK$rWlMwx3`UCDRKqS4*u|Nwoz{PZnva3%(^R)amqa!BED$qED550yCp>8g4c(> znWc{{3(Xa%v%PyiWbj7V(Nrg8Nu_p>U6z%@mcIdvhEH+LKHW#-Op9B&67b0ZNi@6G zV~;wiOni>m?|`1@wp0-BXq$y;y_vk>O+xWkb9uP|9jA&-U8%upwRtj0m$V+`3()5t zqP#an`t?%jwZ|uml&7;rqCa&jYZIkpS{Q1f;WXj*zqH+O+?0*!$0B-6u1wE=H2l$n zBotR^EtvsAmOx=ux!I?%v&HD{Vz@MsXZJH3*;aD752RYN&1T4x0w5KIvbzE)X~qd8 zL@$D|DMeOeVh&G(E+!7D=`nYhA6*HUELt-_d}e3mag{V9A}Y2=>Zi$M{k zOd>ppzVX%m=>&Ifc|V(vMf`p?IdhG1p+%`9piP)<;CTWqj)aKN6D`h<@OzSf){BDX316qt;)ZV(h5hXu#l7`gBi1c0 zTYA}9`mEM<k#R?v9yB9!ov{#@hLm z!@LiGbj!f$p=9H-ZA5`xj4v&mWj}C{jIf6ZR4zC4cxplR?VgpoXpE46>pSf?;E)I2YApba->F`J;l1HUkGP{{kR zu&=(xJc>JiN}z9q;xHMf{I({)yPCo~0a<}%fj-}#x zT5!%EKT*Rm^G`4Vx|>+kkD>+g=3q#GJ^lzoxqf4Zk+VI$4RrEcUZ`LZu(ZUy^%R@S6jwDCSchdCix%B z=5&ig14@4@?T~e&6R2iw517Nn{xr0KbYZlcCkfcd{&^$!g&XJHl)9MK2unUXJC5W> zV%Fv5>>-=NMzh>AV-Kc+gFzH$lvIuSDE}7|<;kKOiiEw~Eu>mxo=4(;0NF=X?!|ui zgg7|@3G8D(RheYVQ%i^qI%zOR>VinZ&+hAHN|EFPjJdGk6rmABPR(yU+M-#erF}cYvwe0b+H63 zcVLLJJy~p`I{N+o<@XHt)z|M{jhURrd724s`$2%i`;U>-f4x{sacLN0R*$WH08Enp zoslU51@Sw1ClwlogbDfsRGeg-+vu$@3v4R_J_WpZ@lD*0CdrNjE3{A^)C?vB2-e4XqGCY^Ru|f zFFg@qg%iw?y|quyt!EG6uWu%1d^reP$^h~GBdGgNjPq!YT!ZNY+hFlvO$Ee*Lfx^} zslCWB^CVnYIPz0Y6{Q+U^+y6U4j=|EP6-DlT*BHs<~%ZAJreAv2rM_%f|{K;6^Jef z6o`&A!9RjZ1ZS-hFwelwJw$ zY%1H>z0msR8fq~KD@YCVYQoFHV>gSe><7@~9j$oB0a0hD_*CK$N~!Q7V10Q7kQ#3#4=@O6yX-lo zkWvt(ZuNr>P9py4xxttfhmfQB|9fyYYu2O7>g7zQ%I%gfKtwvzdbl=z_(R&ZidOk? zi@km0xb1wwP6ANwINS4AgLb^%-a)R~HmD8Sw8Dx)@B8efe-Nc<2YdS&=Ss0z*)gn)`2(1OkVUH5W5p+liU}n}J4HTfvW>lXV z8k9yDHYr}|A4DuN4#m-l#CY$i|Or~O_A%I!?r#=iBPC7kwA{l2H{ z$v=@HxmeZCx4|-la?&U_vySN5|38?W*Zt92w)4Zj#O*4;1zi7?7c1Uj8JL>W3*Z)l ze?Ok2|FZ9H=U0*|H`;j-n$z}#+bzaw`kGCMk zP!NX_r$v62qfGPU-+IW)Qa|068xN6tPJKMlQ^At8?2r6vFHia-*HI}c)reyU(`@IPl=`< z>tZ?5*t^R>{ExJ$nyi%EUPB5cUkymQFzkw3)e!oEL{p8fg>3C{8tD9wuxDQx%Zs4_ zs2(qgXwwO;UD!)2>@Qs@hVEo4fyRI-fBjxh3hA68$*rYkuZ%nZOuE{=730qW>?h+3 zf8C!uJv2OTd0tN7HyLTPx$y{kdI5xI#oERoTgl;#Fp$nlc4{b2|$W7WRQ(d8N?bX;4p%^fK4G}vz}cInfN4 zD>iKu0Hw$YRPlOVMSgRU7kJD0|8X+4PxB~z+X=D-V!Y`7U)C;IFrc5i+!(+89hVIR zy#gW2WgX$&MhkEMo$AB)zxDppF=^LoUCkue-v2*m^bgUCs zB85`=aRs1Mll6LG?34LVsYpdy%}`~V$BHL^$Hw?>?|&-e-+Npg>#JDTH9m-ZL+HXr z=Y@|J(NOn6rxh5rw(mPE>GDs=dX`9bRgUWDS~F-8syRywy;HS(#YsjyuqOHQ z#S8YeqPJ^&4~l%mnIu!5Kc?2xhCao9(j{#IU{i4obL_V@`*T(GjAx9Z(bk+?Pgl#H zuX2Wbal&yY<_LO-ysKg(>O_Ey6Qv4vKPlSNSdPZy9Gc>R0n;pk+yheqN(>ml zbQ@lqs06lC@ox$kkWBRDzlTPyZs(xd?;Tq!oi{)5{r>_8**;)#c6~2#m2HO#v7j)a zE_qv=Ghz6gHKM3e@#6TyA_E28`;*<}+xObRe7npb;?aeZJ@!IWoIu@_0rAu4Q6e*n z;`D?M#j9kh!QeZ28P=+U`k5@CqlxBFTnkmh>jxosdy2emSu)d(%!^mz;Uc>}wnx#% z4#6@$o~gwToF^nDpyhY*1*UUWXe~foco4B-4WXu*9lv1KLcsOQU6fw}KhiC=(s>ZL zcR=K>iSm|rE}FT@kj!+7m;$<&!1*sO&Wcoyt~uMq)BY`{5UckNSS)eT){OjUzoV^@ zpn3B?MZ2{~8gzwOxVrZu9tnF6xbOiQ|2FiEKAy=r?|Pc;9I=hADD?c2`|4Z+I<{%E z6Z4o+;_E4sw%MK}^?GHKh7uNP@$H4=H)W?+;d9$IDB$Y9rGI{sbSZM=!TNh0$OQL$ z#n@f}#Epjh{%Ftgh$#X#FC8Tcu=v!&bVddBkIwrT^tB01 zq(^}5Fg}!w2(U~Qs%~&B9)m0SK$4b~Y8s;fu;=4s_R1X~d02Aa+R>(YIN>&jWDWz< zP$US#fdPb5yU5Yhjp_C^#R-_8zZmx{R`~mNSnl>&K8eN&(TpiNe(FyXQs$@QsstlC z-{oE1Zwt+?w^&;SGYlE(Ws@E%kPy-&1xI;E;qgQEI06H39h0m#bwvImTsISV5 zbrDB}mqRN1YC50AW`iA8!5hMH{eRjPk%5)>EALDuO8O_h69s(@5RzDr*h<-@kF(<4 zMr`#cuZ)gmlDxVKoayUA=a*^`Ih@N3gwk6AOt(KoFb`XK&Z=zdE;ZQqm-krNqn$FI z%{ndAzNeFbl$y?8T5xJea*;JN26p_uQyfs1Thevr^P54xp8!74Ko9e0eV#-CN4^K! zrJu~|()ym$w51h`Hu(Aj-siOK2+Dn_R{dh~<0^G8s@L?`YO;4MUc|3eSrm;~rNK7< zHdXs!n6=P?A`JHHB%JQtL(aAnc zrZ@CqSFc-Bb$2FQ{+?oUHJKDX8KEsw#OK4Ld(4)`_dEK0%riP6vCZy#IUJjw+)Q7r zVGKvyoxP7OLX>RQlD@tr7N1G#EcG&^kCM~ZRG6-y|8IUh^asTTcXP(3u)CjxDis*8ksuuzAU2R#rX64; z{1|Dp`Z=RDXezWplr*b5vW7x&?!u6DpNZzV^J3Bj8T@36CD0uuajKPbe%01<z*OJ(^ZNnOkW>>5tO9xs)ySc(&y$YaV`Pw#I;YiUAA* z)eRWTTMEudDM%Y|V2G0_pzAbChSJRnLhYQwp4Gk$52PRGtQeU@N-k;(uabm)4ZaZX zLa=A)_9a~IMPe3Q=ZK%z{dvQsN^X^5#_*D@b5`Pn=(A?+lL<-8*Vqm^n-dbofEfiS zR=*;&n9YPFeDC-9yryNd`X@r#pF=F+Cd~OIMcA+u3(0{`+QC*rU9Rl>>(1!!59@zj zYqm|6C(+3=BJ<~H93F2j0t(i1Pux9;*gW3iRtBL zyNh*UNr23>wDq};LZtaXt z$gfa}Ua7I(L_B}isJuw1?X{$s%Iu;-`uZKzhqdMjQ>;8k`7st66c&z}S<*$o&n%fgX6_?vW>S55x8aaOaIhP|>{q%x}~b<(8E*`rfp6kQp;CJgQ0ItZLT@QvLOy!b;to;zWU zTASEK5clU}k60h^#9)-w*prqxG?eqp-zm@{HKs_2vdC!>@`(CCd8;52exPt3?c~|O zfW4-DD(;QfX~qa9$R5Xqqu+}3k8M|(#wa$U&Z9&{UIn7W^R^AwuzdVu zX90jI-f``tMk;*@c6xCm%Ikmzd)!h|Ex3+WiQUKtX8wk|7#$*Eg9`r2-t=zkGFjHA6$m%?B&pgIk%Qhe_BgrFs;{+8CG}tS?WTu;8$9Mw;8MK zrUYME!dO1i*tBE8NFjm5Fb>j5O$G)Z2Znl8`r>)(KDi$;U?T5~h3&9{W%V-2!x;vB zCol&VWfKZ$q*zRgyhxFvs|-y(Rz|(;k3u1jUCT_kvwp0#AhRytYu#W&o|fFFNmVNn01xYo#cZBusyvo-d=h6 zAk&t}9pwy8)*n85SR7aZCpsArW~RO zn(uiYqO4wz3g)eB89djV^Aa?4azY;?^afoN-O$6hdi}0^||nG~;n2Qu;(ss=&mUD66w0)_Q{JiV*R z282sQXItE)KbXk76sJP2io!CfR|;&ZOH;&r6Td`5k|9p>)(h|g&R&7&LQ-kS3c^gzEY2vtBic9LCh40 zutzIE7?>|nL(FqIfW*t9lsr?W|9VTiIs!U}-}nkWOBaCXY!~oGqLc91bH>`vzTG2j zWx0RIZf^~UV`x6V>s)_$2t48igTcS-AdjyV{shR}?MjU{#K&V4&Ks4w-yXsP=F~Z) ze<;h-u(VWEx;RyA+@`FTSDlQDlT0G}CB&=@#G{CZbBv7gYh=lX`X2n|3iCdhep*JZ zCquNtsiyfeWHY>zV8?Q`T?fhd0;OFQ?5LY~E~~sWTD9@X?iQWDPLXDS6HMa4<7ik@ zWBP)U`kZZZ&XmABMigyNB*KP6*zlL<%HJwMi7%hKXkBO*Z-wU|wE{ycfXXr{Z?~#L zmyQMOM1jD5KWC=dMbE}gPLnp0A7rVXA5p+O{m zsxgxo02^p22m^vS>;x4q=#KtOEjQlZlqQv0+`;qNjLzVAFR1!dGSiR8_!l_{T$s)E ziwH!K3^n1ppV8XMBc3$AFEx=acEeix4l*}4mI!@$Uf-qYb&!%gG0=1LuFj3jzYnIVq7 zS^)h-Ec7cDY=dZoKF}J~h&!}3h10wV5LW)DJvo0XkQMEZuxSr%&bMR^@H$io@M@Hg zY%KEto$TnGH`pt|m4{LMZ`3Y(!DL$mc!Gi00|$n}(gDqZ^G_Dq#JoX~j?QNc2F9sT zX0ba&*^quA!8shQ>ad2b8n~sQk*r*un(Pe-1v*T~ZJcxx(xpfU8)nrXZzc=OZ% zt^8-JUa!H4WRly#7x)|32a6I zq$%S+d4_;sF(5AV{3kgZIiv=Nk^fuj9HN`yM|RXZ^;Q5>yO0+V8`3;(>S$wQvf|UV z)EzJkrwZDugh7&1u1cSGw z(+dz>a&1+G6Eg||zL~OshOwa(zRD9Y%6+Og<%9xvb(Yu6^-7s|iA0&A(ZEI#$AhD(akMM>bEW7f;RKm_AY z^R4P#{vN;MhkT+&jNVly!}pxtXc;*_s8puEi*ifA<^6pP2||FQl$a|vMSebHal;G? z?1Tpo`BgXle1xfMuiFL0w2cC3E!*heN)Ofc1~Xf}=gpJZM{t>&ko>T|5-!Ti-(@rz zJJ@Q8kou#rKHazTdH*V!uvtZ>|NQ% zSD{OFpFugz-ML$}ua95+(JID*fs^L`&W?Q`zhsoG0wEQ+CwT>Uy$_D3{ZV&KK5aNW z9*j%|Ki4-0zhI z!yrVJMS~L5og>$JYub8Deq-jf4)u&(VlN;ttX_n)Ya2&Jxno$i0!dWCfJ|%aUlo;p zlG^%3+y%;a1H<&jg|8ZC#sA1l%|B}$G{l2j`Wm)a^159!cAUajc(0;D0JHDf1R#k) z90iu@qsR}pZVqT}`;Wv)f&_4PA+(-fpQ%QJb8_Osae?=LAL(JNOh`r);~ZW+idBMT z#6K}jFAX#BujB`snqfQxxM~1D%gGogs%}Lm_r#!eRzRv88NVLOT(v~a(~-+gAx!#5 zzf2DEhbIgN^#$*yKEG1p_$fS{sx3$veegaWHf4Pl1S z)m&A+z~cx!l8^6z2PXlcGni3p;&iiH#PLfcZj#`dAk{FfKH+2Obvk-QQmHoD8el|mm#I83?jaFiHN zMn@zAFLVStqMeIJ@ZHE>dukQ$@My<8@LO?QIDcH$ZYp*>eU4=|G9+95(F_+Fg$L;V zxaVg^0)z+TwdCiHk+&3gSa;MYqa~(Xml-n8M}o9M_3wVXkvEEr?WUJU{5}bnI=dN2fAsEvYe%ZIYFVKFtTt zuEuYJi!0|1OVkl{)@yno4ju{n7OhplBj>N&JNx>c2peRujK6XQp^3MJn-l}R51Vyh zn46YiYkZ-n^i$SvOHEJUv>WtshsaqJ`E?yriLL2@SKX0*A6EbT`kMiJ_%rhB+A+rL z-#IZ(buD!*O-)VJ0DNaBbKt^lhyLR`M?vW{S6Ipq z@ZGR5eCsrlzAL4pF4HeA+*&sa2YDNd6ACU`-rga;jPYp8t6h!bEEj+IE@%C!X&e0u zJ(d!_vvZBCaSp5B55_88oyHwmyi>Y5$J!^kNb75S=19InfS?Tx{?~)_qUECfkTbgV zkF18l*7qK)&iF!$TU;|6(p|q)6G(XW72In0WHUj9BzvKLPub+viNoPd-p#?yS*yg# z_FeYF?g@7r%Q4OEQp){Z+=F&o=f=2S9;}`Hzt7db2h1T}X;)lr?eBlQOwnN4%E=KP zjg;)~a!q5Vxj)5EJbIaSDI$TNU<1lXfN4vOID?njL?qjI*LNHl)VVYX8rKV5&V2i- zLC73mAz_08e<~39GT_H^GVE3>6sV}frGzUjLng@|9;1X z3Ht06m<+5G*syO7_cx)hs3iRFM1P%J1djVUpWx{0 z>FJTfC}5AD?%UOmcBPD%GwFTXt`yL^P61G80$Q8L-`ybr2uhN!PuzR#9a5#O0P{d0JU{rTVmk<4l-vK!EwE=A#tD$$T3)E=A0B@aBfmJ$P3p`~|bb5mCq zDzFAPeZ07#y{c<$)4og;?A)%2mAHe9dq2-S*;f(0XwSa-H2An$EYsq`zXdku=jR7{ ziNeJ+g)q|Ty*i(6auR9)j;q)+fs0QB|HB{ymg$BrVJ+K-`#Z%Nuc3mRUT3#54LevP zyGZyaQulQWTwJNEJAwzm)M$#@ykSD<3K`!sfV1~yNH;Me*px<($5xk>mk_E2le zDNiFp0{-+7|KluE*Z#{uKm+KIjCNT!JA7}>DS;ZrM{9!f^X8tZUpM;-Ruaw8r#Vo6 z$21vt`&7ahQL_wi!1;mAk3atoy~AeSYK1j;7*lFU@r?zGOZjwVMh|7_9&v)bKlRxY znxqu_Hfdl9J>$I?qI^Tc46JMWVT=);6->+-A3jk>-kDQu<^q<#RV#&@_s@>078P0)^GaXkJ|FkdnhX&Y6`W*u*@i(5T&^Ty!aym;+z%)M7JRb>I|5Ghagl7ljsRRfQ-v8F%Rs9LV zSk5q#DEfVKRo~w$G*09a+wkE(9^^D4NBI%LRu3|ge;YgfJB|iAj)>PzI&RODYHa_z z8@XTR`+B3uyv`Uk-EDrGTAM8ffQG!_0(8*yG9+Y19*^(&rE|Qxiuf!K7fNf6^Vi5) zy}RCqRD61UTRKq|)rkXpcV*b45vp3blE3pXyxX;h(q)#X=#gAROrJ4c93l)L%5SK7 z#dX<2K|PPVON|P8^CajG5e1CQoX}f-QJZWVAUz2bnr6W)I?Dj-3q5yHP-yhka7gd{ ztA}VOONrXS1f!D}C%?m6nT)#-g;(b@xKO+Qp27W4_O0B+%MVXB@$Xmh4*<>9;d=yg zmgoAz{jUf8uHK8LjhNPL@x$g|MOs0OX3Sp*fyTI4N8%O2PaW_b`~@`s3lIOsJ0Gg# z{+u5P;haH>9B?HkZ*@ce)DZ*p+t=zcEe8TF?qOwVFE@9%5A^0*erQ&>B;i5_H3z}(+tF_)T$X$-Svi^JbGA?r05A=p<>LFuK%U%LjRTzH(_{WSc$v|+ zOhaMB5d-q7y~_E{@Z{Sz!+r4K{RcC96ZeBen7R7@TsOjf!_gN>cqRE~Rx#6cz;i@D z*-@!#f_@7qh9-BCe*E(XL8oH1+DldS4~w93ENMZtxqD|9strV=r0 z>wFzc{7ZILGw%66sZY00K(!8VYNDW!=9nZP{mGk*6TonB!T7@&>{j?Ej&q>KIzAA7 zud1u65KVj?@OtbSB=$`4TLCW(&Lctp6G0XK>*;p{M%tHe1Dn>lJ(@@6S5rK}gZ_7O z{=qB{m$46*z09vv6*q@e{Axz}jE^I_<+VE=f6Sm3+Bd7!!`9Vc6f^yTLLw?2gHbZK+t6SI`M4h{Z;lu z^G~=2OoCLn3EPr2-Kdo$xH@_8*JbptFVgBwpq-seKGg%yP|>A8O)KK~zo6jW()gsU zTHY{?yxQ?sK5&AHmNB;q^}W-&?lY5}?NTHmG9-b$(>yFZuz zY^hPwh*uFHA3bDyja#aDwG?MWzWnXE)ytB_{f`` zI(cbF*<&ic#R%cu%&{H)EcN%Xy0Wk(Iy{f30KWUu=yS6tPpS#^o?tb{1p+=e%LU zJ(*4}NhK0e3%tgXFH>SWaw=-d$JMA93hRd-w?)khff0V1p1ny62NXsJot+cnm9&%m zUKQ#WepgG5>v-L^Ev`a0tIGK-u9P9+HB~v0Y{8HCz#bx_B*sC4@bl;H7B(N)b!4Zx zEL)n=sET*-q*SAO{AQ`>TWY2T6v;Q_D$!8%$aR&}bvI-_5t~p)Ra=Q|kaY(b2dclw z2yri&d%}WFbNUtIYPaAB9S_qkl{cclOEtE&o-QtHN@jwJ2g`27>+w6s{FL5I#5T_(1U)^okWu2at z9Lk&W%5pINc5MHOw$y%Ks#5+Ew|{eOp(o>Yu)>gmawox4iCoKgB$F7;2FZ7Pi3PB9 z3si)?c}&Rs7lALg&%z#wd@?)~sz2fv7+N2<+5G%1SOU;jKoU$61_Oe;PG&3p+)Tmm z2xXAhqu^CVLzZzAJV{Y(Lr<10_86Lhdg7y+p))>}m~iaf~-q{7rD2ubo(1`IRr2LsI~1ZNmN9gOr!F zfUAIkQiq%Q#6`6rW>5)<2pMe270p6TqT@5&EZ30d%xCYGpzv`lkj#}aS|F+-{+^81 zQp^Nr0~xtso|x8Nz#)U+Th2VqahS~08%_=X=VFM`z_{1NB5oI0ApCwEX^0mP5b#Cp zupX!vu-PXWD6-K%6O=5Z|GI_PSfndQ*@|n(wThPRU9Yv*?9%bJ{64akPRyswWU0ag z#fnx`w9K_15ZlhSwYp2|e!!9@3i=*E)3*n3 zj&})IcoX+Bpks1$&&PI`>YSQ z=46pF28cdbAeBS*_z1Pv>H$T&@}}Nnd_Ee#peAE4Oe7TFk@4n4LI;*2pMLAp&>B&s zN*@pK;_ELkEC#Xud;vT3=)CE3HF)<(V|wvIBbdZF;?X`OvxoN?wVlR!HyaM_c!(5hA zFd+YYgN+;^Fyshy@sEwXFaoHT15=4hUvC10zeZLUNDt3!NHH**M*R`ap;75xUn+@W z#+J(36n@jr^u~FZxyqApVtDD)_di#^EwSavnne#o#Fw&l718}Vou0A4RO+G57hlK9 zza~lb(P3b#;egN~v#DmE!|N?*C4zfp)fMJ~aM7zUu6kLT!gA+E~9rTDeKc zm+At9Ta=EA7O|d=>0-lm$yj?Xk-!J}5gG3JquVy_T0a!J6lo6ZR+GQ|G5yI$^S&^$ zFbVC89dIA^PbF#Lvf)0<9~5T+mAav;(Gn7a^-Vrg)i-CDyC~}JYp)ui!8Pae*V@$} zq-Kz1w&%X%}ko1cseX`1;s*sc7o5K@aKgzvJ9%XAw zebg01>47O#WPO{`(yzH>thu?w;NzFa3hD`PQP=ogT4yOEzce?;m|(vbN-3WdEp0vf zLHl=pi6TKoD-q$3bM2EIUJzb9r!$J^IIkNXYj7Gut$DTv<&L>zNK5CbhSv8otIxoM zF-BN0JXweKdSx^))l}eP$))v5e^(SW`$LO7&h)9;l-fLu$WOpa}OqxyJrE$E86S9hsDf*e0rt4zL`O0;mJjFBgMY{1Dvo*@6ii=GCin7Gg z^ZXcye2S!L?ljE=jW6_N+;6a}-9w}^aQY~w2|!jUWh(V~M?}d`p`hvt+z4lO&G&)B zCd_>siI}5>Xfy9P1Cp!$;s^ygfspH^Luh4$$kgOz{+KYUR}sQ^fG=lR^3!?dP1^bh z7|1cDY1897%D>M>!Putt7K(VnKYD47gdhFHekrr&O9cngw5uDZnF?vC8;=FQvV%^h zykZl7oC3jyH0Tv+u0eaMyzZe6GUEFSdXSJIYtODRK&l(w(I`thAvPb#ci(vz-BV-m zg+p+PqwdE~Pp$djHF3h8nvYsOjuuq%u-?%ZIHD{dZZ`wNBxLn{#Zy(Bu>o^Ybke_Z z1$)(hJih7Lx0C4d(lC@1B*kadEa=2Pm?%2Zz)n)%enS}5oKzCjWq9B!XQf&AoFPH7 z*{Hea%P?iVhVFZrB_y@lfV$_=9(sNltnRE97W}VEe63 zzi0#+XnCz$(gZ);iZ7BiP@Gd>S(TTvVyWyy`ir=>SFvcvMuL6zCM$F{CR{m4WvQ;C zv7_|9T8hV?hb#%krz_8@G~w#J9jZHJiXX($**og|C3!pX0;|7uv8PHkN2egdp}UIR zClO8ZCWNYgn@G97n{QqCa2dtf9hQEP+`_ffQ=(e-w4ZCF)#EU1qneaFwt7`L6Tu7Z z;bD5!EZJ2l7s4nOJDs*@BQNf=iK!9jRaOYEx4<48`f=jPr zc?7dpi<}c%aXj(mH5Y-N78>o4Cmury4@b1 zUu9JB#y9oGt$m!1YjhpKiX5#|_)jT0O}(YYX+IG4oup*PC&|)&M;=<34eETj>9j&+ zswxa7PFRsJXy(C#V>P;q>hZ7h1)FV#EyvWb;5L;84bimApq{K?AeLF6^yB(gli7GLqL5TRw zB|zBcj6cQ-Z#CDHi4UH5HN#0QBM{)ZRWEl=X9sgYGGR46GrRGq*8iLkBH32KKWRlr z`;kKd^#uCs`1_pyX4_$g|M%80QKTs2wg>u`omcHA5;wC>h(ly<&rwSn|CYfXb~h6w z7f6tK^hfOBc67+P>dmyo-8A3>g{ZbnD~uJ4K}Qgq&bqq>rjC|G>&xu98}YSHOa!2@ zDl9yX@u>(Bv?=)KcJs@1EC~@S1a79L)STW5BhN`94?h-Ka^F#-AU;;GB>$-=uvezl zPYHkhFKzu1DeR%d1)CyEn&QDClaaLnOhu(bI#RA-2*$Ugttpc7G<3kM?2A4XEWv z-~_!JTV4#bAc3Px(q(=@u=*$)2KB>V?uT_F!L~Kr6s(YyeAg?(1DBMK(x&OYQu8MkroWWWF!fe~>qFfY5ac{7FF|^U8cDUl z+~Sx@di66izb>M19wVvVI(KwgVZCXdlEDw?x0A)dvb}I0v8WtNtf#XI?+6<3<&~hY zLR-85T^h?ejUEDJyS=@5ue0tc7@d7Z-CF_Q>h9t99{a}wG?gR|03$zH)=8;R)&u)l z>C9(on5L<3gCBYK8TLBK;3xY#5~3m5u{m4DPV56)i#PF@tsYy&;$IZZL$W04SvuVX zTy95RUQao#J;D|}lXEZ6=u6+l#ehbm8Az%QofU6fC;hJr{dWr*V)Y&z0LbXE;{0JT zA`0wty=6ry>>zD!D+;JXJqAaynx$zu|(F9yK%WB%MM}z$9LWRlqoB2Ra z38|dQABr)2oqQG-)qXQiF)SL$FmOW;UV_5r$l*dt0jDhix^ZQc2_~;>+ZjmHP_I&K za0^K@7=`=|HVvvGSR*y{Su@8A?85~eHKe|I)wUOyZDyonQAl^Ae*yKVn)c&#bIM@D zI**qRYlmO$a~@_&cO2(EDDB@SIr)3Y1PcGQBf@-pV$vnYpfgd{NPgpednI18P%mQLYX%Mj zSPpTJD?gO*Heliv$M%&Y_3P+_Hl?z6)cWqRE1BLA-g?SQtHjpC^3c|gs|mc=&lIxO z9Pqb1Z-{hHEzyWFUzXaO^`!m~5?2d4AyH)=(!pX4|9B~>%Vs;~I{9jTk~8CSwFSjm z&~!XCpVGBxAZED5fP14fKXFCQS;tJn8abcpwCUB~83I~qybgj7rX?G0eO8#)hA5Ez zuV@CplSr(XZKVG&buWl@=CBqTscwf`)6`ygxMoEV1 zmUN3Px;Dy=y&GPC%{`|VAfSSpyh&O(+z$)f%|95T9B9b!`eYH+&IgHN;Su&?jP1v`#-{8V{uAuJ!;p;5}qKvw(VY;Qe8|k6DQyS?I7-9q&Kp81fLP}bCXpoX_ zkj9~52$4={k&=)Wz6=0wbx#Yr)o6+OAuABIbD1zMR&E0 z)7JOy_Q-#Eged5SkH-cu>S?L_$N!2;+b6{w#DW0Q%@1;d27r-ru>|rj8Xz(0FZdng z4@+0Z06Gr-yntF&`;RY5G-lvhjP)v?1FZdP8as;pFaiqxSiiCtnFzOLIa; z{XI=azM(<%30j?sXXksYJqNzqzS%PxqnvZGQ`g!b#z2+{x$SoI}a%;s5(w z&ssC)FH3secYBigNoy1P9w?cJ$7dAA9~zzOj;Qve9%tJhlF=YRhFV9%s4_(;KQm}w z;vtplYQNBs8u;JaQ%}$6yoGL84n*^B)bk2* zJ`Vq{_%;S&kIEjs$!y|gBxux1?-t}VnXkdAxbw)>W$55mL_~M(07m1^6v`8hOhm?8=5Uc#*&4ft1>0AFS^~GOY z3p{hBL}w8}jj^}0`|Nh@@&Q=LWQr$2zNK0ikBeMxhu<@45(TB#+9EzEyK`RK*(e^z zu%oqPQ!=BDMW~<^zkxKgeDnzr@eEEaz(mFeURm9pDCsvk_|aZXdjzJvxk_OIxNIa7 zAQ8vkzwt~sNmu(5Q?n~ET-U_cWruoeQa<*?a<1dVYC+Q>WOn^}ScO*|)pI?bc>^4= z9PpAcG&QkhY$Rem2~K@r|M{*Ag@uyz886B1vGj_5^l)Kl;Nw?W2j{^Sgt*wPp8VI9 zjWLy|^#Odm7u~0p-`a;!&@6V8tuv6pHir1@^!4wR;{7i_TvR=sclIgprmRZd{U8N0 zdsabX#wH16>xOcJ5)5=D_+!)ItolbB)y)KEK#O(=C5SH63t!>03L0eo?jHD4%|?&aQ~ZQ*h7DM< zgnvthwY|Pc@lSbo?QuG~ZewtZUaB(K@eSc=Fgo@+2Oa++x%z~`f5}h9j*!%?XF3HF zz`~&+Ia!@)1_%x25ela+su}tW#9J?mz^`dRHcSfW+wdo3WDG`Ds1@%8Fjxn@QIIX6 z#_r!BZC;V-u^5Msp(g!-v6XM1!s+BR$~9;Kb$XHhyNO`I%HG9RzML_?l3=L?GorZe z{;9+nXA-+z=HM{6)ROZ1tT#tb*!!2>RYh0zy4Q-?HBDm3q7B_`Q8Ky%bM(KR*-9M? z_$cBMVFI4N9h0Bq1o9F0jT`taxqhtV3Kw+^PDefG?5@x>y2NFwlOEeXs0~l=Gfd~r z1x&e(>+Y5xTzYTv*!i8cmUoUa);rEgW7f(5q@<02tf9C{@M~?WMuq{hz(D?v%9T>C;8K!@53E|F=D{7 z&>t#E_DIA6NSTC&#MNTH3kqu3zxntGc`hxK3-D9M!Hnpqrq1c+N!{tmH->;H-5*tG zeNwn7kDYA%Dlz=sB#OS!<;S4;AH<+UQ&Ku_;=$gG6yHNuKBsdilb?&7JkWtFO}HAC zKN_*UxVZ9oTYl#?G>~)so2pC6ZYOAdLhLk-i$}klIjP2}B};~aNiRo>bDh#uHe9wz z%Mdu}lDwb1MWSR!K2{1r!t||J97EpTbQ6qV$`b3sC_M0xh8f%?nmwIm#6eUWE{}g` ze2rr>dLlU->T7^yX=F0P{DC8{Gue&)wNne&3F=Jj^UeB1%Gj<+QGK?=M>Y;1TSdFl z@L)Y#2Xu54NB3F`%N82#`%uGvNvbHDqT7w7TE_MyhJ6xim-zXrZ^l*NxIcq5@4A2J zy?eH)sy(*MZeBazyGGX1@0IRfctXimJz5i=#bZ>wPwS7F#xolN;ZMf6VgLlMd}GyV z#2k#q82r)A+mwt#*4R$3X~LMzoT0`t-wCM4iv0 zy#MxiukwWdL((e!K+x2y)}*+bI>K&6Q%nbcqEn}<**>c4H2U{}q-7HZu!64z#Hcy$IELdoIAbvAFg)_Ig;Pvhh2JfL zro-7X5j4UnP8B=&yc4%0P#J42--}2YXZyoWG$fQSG}qBjeqydu(|gz>sl*v_soqpKB5{jz}E?ulKOZ&WHm^(Skk%h z76o~XnpBnr$_XHt@}M%#gt z=VDlCY(~OCXKh-B%SuJrFEJn>===*_2-Kl&Y{eh5rDS~u6RDkTrI6d`fQ60?pI*-D zz9zA=p=~gwObwcELmvJWowcKeCtDx9FuO^yy`t#)5^a0C-W~uXrL6w|r$_$}8|)<~ zAo2|eYk3O;RHq(PxUN(&Eu?-P4QPV{_VEy$m8s&e)XRLPXS*yYfO7|>Mk03676{Zw zGR7W6gP!>+YL@BNFq+)qJsrpu^ zwk0T?3hQXZexdepJk@jZ{l&Cb6yBOKekOCbC9g5C+`O}CaO=dnHPKJE0n7(r8x3eB ztUq6CG4-Rx0r}I7xkzb2PJ4uh#O3U4o}Vb9kkBG- zPntHHjkUFU$@*aUgMT07{a=ittcV z4t~ajrznzr2jC0MN+I`~;I55n`>3EJ)LKfk#Cc1RT!+aa=NY`HxWIE&&Ae8itFbkp z)8nxrAYq)dZDqv25J$r0rU0=|nb3C6D*o5>xZ6xN(j#Hs{xH9nQ_b&?pAuCqzvlXE z&r6^TO6Hz! zB?Nr$_C?d5UI+}i0c@E6p~-1~av}iTWW9g`Zi?^vH>fP*(g4f&q|fxcDKK~e7($o@ zgQLQ9>UJptFp!?B1psEJ^ID|2f$_=A+PZiJ@S6X~9)18&V!9Z(Aras(jtWCReUN4L z7Da;%fl=Eq;pMU-s96(EZ}}~~^LEciy3d3KSi*d)+#ey)|9>cT(ExDxK&g{?9RkzJ zDX~wCO)pKG-|06zCjzGK_rK&_HT`@1e_w%U@W9=t?Mw9rJhrDDr9SD`L;u`DR^e?7 zsmQ9>$mSk~<>`XvuFnq`_KfA+3C6B+&gPi>&Aw{g?iRfvyBBMbH7lm-f+&l`x-ut= za2f%&>Go~?Z#AwH+xtJZG{ccJgGaYxN6fr7SSF~E{1oOA%a{EU|F{{)x8GiBQ86Y3 zl`Dsx32v1{m}l}AC6N|Oi=F~GLt<2t!jFqd5(XW*gCv&K%#xhS|cqa*DpGfj@6f{*-YDdXBf?YwJ|RI@>blRVakD znbbqR5A}3iw#00ku5{l*go+9NZE{?YJy00el?TS8S>Mu2yMY6!gsjD{Eu6B-?a)t^ z4E;J|{mP!_FbSv)^JIeh)kpkpdJ(z3yHv}5q_e)iS*2){Q3XZ5?uNRI0=9Bfl-gZ*_1^H*@q4D=AB$67L`4y2!Z7RIuCOS|)_ zFi{lNga)EN>_AQ0d&OH&SoD7E%nSceeW#j%E&Bs?5}D5m$t=8U4@RD?piVL@1^4)6_Y&yLW#%Bdq2X-`MBf(WEf)b+q^8Ph)>ko6cKUyhd)|ENAPm>#cV!*Tn}(IWhs!wwUzS6WrYQ2&HQ=`Xpy;!RjRQCZ zGuQ!^({_h)Kq~NmyB)1h0AJir)wo3~hJcKJW_IO-v>wwG8-4Q8dq15K;N>QdvvvQI+2>@Zt-;@YJ|df5_pxHE%r5%*;^P$K_U)05NZgW#L*8+XUnL=4`W^j zdD59EA_A4Z2^D!evw9JucCVdS&`YH~#CVPyyZ3p5Q9RtR51-EIyj$3&5-g3nFM9^HpOS8*dNVJOe(TIVK(gEFLtnNQWHlS zA&Ty);TEl&?Z$Se&yJ@wV0QhQcbIPuri$b#a-Q$m8rda{)5D7lN0(~Zd9=u0nN!db zh4ZVW&dJDaweDxk*aJ*g3cmF+l+!PM*t5T8ELA^O4!$l4`2|HoiZR+VDqwH>)fIh9 zp&*MzM>t4!P{Lyy0HJ6+fTfdlZU)y4x&p46MN8morbZVqtaiz)*KoF52~Ed`lU41& zRs_-fMle!F7~6sX_$l;QJ4gy??f-C{wgcGD`O)e)`{LA;N)M>qq3Ap){DIvWe1&Z8 zIEp`PJDNSZ^Ub{oyel8?`u(X_;lS(Y7GMKEyWPt5?a!3IUbR0@g;&glh{j*QPrZ*f z7xHi3v=8Urp6TB=sJ%AI<#)8V*-~p$;dotDmsZtXMe0%T*_o%`N{dLitAg6KVe;o! zwi#jXfq4{eP%1z|O-C8sc;GhL++WX{LA>VO9vyPHU5jn)v@LB3{DXm`_LWO*4{cBZ zVvMyA7Nn^TZ6-y_3_@2@TJkcIOEG=Q@<~h!_gs=xpB~gl&7BY#!;eC~)*XJ>1WqbXUFc!w(l4GN<}e)jIk(%##Jp zHB!iA&;(HlL3{afe|H7c!H!Yl=+aV+CutWB;~jd+P%az60H2K$u%(%~&Im9ID60IL z)d@$hHwzl5ICCOnvc_m&Sz zuL4M*hrOvp^OrvU(2%!n*zi-HnlsAa@Y~_1(VHiY{wmEHVV%+pIX?ry_I8vYR0)poahvm z=Z&zSiV#x}kyxnlr;Qlu`xsD8^SbT$;?m~Kw2AHdC()9T2CE(HHuy+#1JOxj ze99x4O0Vl8nj-LTWK0qbj=HkZdIz6#i5gGKR?Cn1g~2lA`P3d`==gVdjeUHul;U33 zLLf>9{%9M?KfdHhYsk}Ge6ZNFvW*7W2&#vb@tL|4h>)8Kv6DC5nvm1m?HxB1m-&5Q zUaZ;M0GKJ;hNWl}RSe~f*Ax()#SnS!G=TrOVgJI$<{?J;zVutj?DzW5)hvjGRQ?cJfXrolLm6NMdJvqtdYz9J-e<1JIJ<~KR{u))&t^5vu z(tWP)m1@~qThY+^G}91fF2Ff|{5gyNOU^Ge*ZN23 zC2=?}2$8*K(&9sYkNg<>O=(=r{Ggw0n54+Q9NM-Nl&F%I1=s%Avlcb{B^34g(0GQ;UZW;x1~hB26-6?(W89O4j+Do>19e#Nq=BpeCU$@7yA144 zU~$8_#HOm?mN6S5FbX2ojmu&INWo^Co_LdOGlXy=@2|^OnfsaU7x<;7Z$A+QUJDA| z?yCj*oYt@SAEyVNURWLXPN2L;i`1F}EVi^=C@s^b=B>n-US%XcGX913DbV)>P#KeQ z4H@D+Q%C!5`k^Q##@pvbqTBSbiEy27Ow$ z(1&2IflvhTeI~%6LtUukDlTP3=7h;gwsS!Y7W`oRyqWXXBY8|tzI@F3C$1kpBkZiD z-fB;XF1HPHC_fjI#Kd``?Py$WxRF?BFEHvSCv_iCITcX$r{m_IZ_? zhF_$AoF;}Sqj)_E&%Jww_11iENR^*>pQ^i9V-Lzdze(3x5Wm5XvXyJW6yz=kAFqe$3BlHSi9*8h#uI`1riIM+Gyc&3M2X z#>;Zm>myZS*l0d`Mzs=np1$V(vq6@7sR|98XsMTI#yutMPCl$+tu`8q5k%CYsvc(L zx{cK?#(uT37U)%;)?#Q5Mx#4zWcx8Xs%=>jx&3m9P;L5?3`5Y#i?Lsg^-?fmDPQ}~ zCs7mjIi11V&lrZuMowROVQtOJVijo5Z(E_TXpht-28_~%k(MJQB^t-b1_nV^&O=TG zRUM8yBEc;Q;yUA?v_vUhLe-uupMmE(bv>!kpuMv!u=pYyaz+v@a(c=8r6`gGxnB#O zb}h;PeZN-tLOH2V7sSYVN4Ty#eC82Kw5Co5`Hs0P{CyAje}UE~(;h&b-ha!*m3?~A zyj-gmnqF1$C5gL$npsGC%|4}fYyFvIP>{qzb&Vu=QWjCOPf)*A665c-oicZ@^O*hx zDx=m{8G)J>B3-qJ%^Fs4zc=WklgZ%MXMFDNiMqibN5n}*9=m0@I&gVmCdMO2e`q}& zbw2qDnK$8ip((R&9Fz7!XpLG&tJqj7GJ-HNS4Dq_2lKo;JG}#;U2*`{WXdi!)~Hbxsx7Zn5UE)N_M1zi)sUvK59^EfFt{45* z68;&ht7=&#dF~?7Zta@Ua_FXuq*pi@raEfB|jw_aLJ*4jmn?c`9_( zAWS@rSJ28^5QXiQT2e>SlUU!_VF5Ozlu63axAV8X?yxMB6f`VSUpB1s4s+tL99ajy zz5H@w$P`to$=9`1YawYaNADJ>ddlVg$5*Ob7y%(mS$GbUS8sQgVTlY{MPz<3I!Db2A0Hb+b?4 zL^zU@{Zlo#r`ul%GKJ0$n{=7J_-ZwZK{8)qUUn}{U zEQQ+=>j;Bkx^pg%hlLT)a(y&PVUFm`7M*CcGtYAJl<-j%l|+V$6BY6|I&iXcUM~w% zOry$P=+$=UybTJO>U|M6#lPL(6FOf{iBm7M&6~6>Yc|Us=s_ROhO&tOJ<-4`v78Vx z;Qr&epcV9}rC`JXVJ!qJvF$7EpFmuf8yCdIQq+54L3Q8-h)P3@5lc>5$a1!Z4#l0X zAp@nk2dviV1DfBhKqzRV6^4)v?CTG;nAVb$7pPnv^O(e|?4!K(na4+qp(T{ruPGx$ zv{%4(Jlx5)IgzpI1+`ZqNL=GD$-q!3FY2Zcl!mlMuaAcg9JegfBr7qb4u{16Q+2KY zRfiI*;T;aF%D#yb7FJUR+&>WhfIUErFi_edE0W>OTa-T>h(t@$Qvc5~s*?)7QQgco zt{<)4{C|3)aveMVIZ5lvdhJfX89|SBMOBGr7_%9Q{voR;Uw%6fnqJ6m%VV|vC_5{P zok^sYjBQ8OcuteFIGMPKh7Iabd{T(Z(wb26cgYN0n^S8{JpqNE)Ap}f1}8G}tC+++ z3ez_4jctH8AgEcZ7H~0^8`bW9anSYV=h{WUT6yxg)!G?v^FdPN&@n>v;|MkLm9*r9UaBYw=p}KOeq1)#15V z&j{lh$$uCAhLVO*P&j5jaB!Jz*?S@26c=sm62rQvGJ-W31RA%kMFo$|YrD3@hQHbw z!>v~fUCQfhr=LEYq!%JsQx;4;IH4il{&%G6oe@jUfN~6d;y#%wrM>{b=vS(!V<|~{ z7Y^ECepgWk!v#EM;bx-9$3*5oJj10fKvs@YQ-$EW$)j4r5?ft(<0X;DP096lju86S z1T9JX%66Vr1Rmrn?%lo&+MGCv%Bt^sN7fs|gYMYeLbbV2^Hq1A*!8H$u9?$1n!^V< z0^$JmH0Jx16F?lZ$VT(-q}Tt6op5r{Z?2qJ+Y|5@SE9H&TueE@+&ep&&Ffgf(Lfx$LSS=oJigKYXJFQBUB7Lc*S8L?5O~jFwt5@FFWnRK z325{8*ay|V=BMt58YPiFQAGJ=D**kV@+}Him!Fy76?S&D(X0}r`NW47BK-220%p{$ z{z=bzixHCSV2x??LKbU4!N}K_1oE4A*GWe3puMI4gWw?=CTHA^#T6A#TV;u%6 zXsjK;N>Z;(UJyB+6XtVG@3kV_xRRO;0n!&o3N?&ld68DU{v61$pwVh;2L8Gr)$oj+ z{KxH3n&|DvUh}-ERyHLDqq|>s$;W-^l1=*FSgk9I*~lynPvso+ZwC zE9b;Y!6AWGjCA(EA4O>A;wQG0g)IFe>r^-}2CRPHXNGKw#Ow+9p`?S+9Y z;QH_nLF+81L7rVl?~amC#&^t3epGtA<4q7yczwXwID`h~wBn*i=)5j=)YJh4+Uelp z0dE3JyP85liv`~*oob}RgBxh(ztK~T)dyXk#`ymdGWdXVh7PU3qy4%hT{o4T=FLm8 zox7beu%lRE=Gltow+D1Ipp;;#n`=j5vqB;&QKOk=2WjC?&+2+_tS9O%w0& z{bNKJRwJ_jUqTTd#`6N`?FQPF+NWbG`XK(N~Egl~iKV`UHsq z;sbi7&I>Rq4};c^YZizMGx7@ef%)M7I@lr`Bj+(R^hywyFPD6q@T(v(r1QnEayja) z1aqO6H=IapEY{JZXImE{Cqkm z91gt@k0xB8_pvo)is^?4MLvdJ5yIh)JB&a#a`XWmrG6^ZK1u@-Pb?<|0=>A<>%m*1 z?MUupLydvopBxFn>Z%Tz*`j)M;nd?BX6#6Lm!J|+-)Er@B!L)8(sB|662KOGb6EL` zzDh77gINam4eT96L+9mAbm2Y%^z}#%(O3r5z0j-R4QH%osgZ@BW)-+FvhC~-z}o`! z@}0Nad$$DjCY;tf-Ln(hzC-F?E98TK{@Mm|i-V3B0-Bzsfn-TNhpWJ0;lO++*k*dy zj0_SRuz1j{dv7h;$cL>PBl`4kk7W#^4EJJm6WNNdn&i-Q`EV>7poYjQkyPl12Hh9_ zA%&nvufV@RP-ukW#)o)Z;BU>`sjES!9^hhpWK(PlCZgiw)Ciu3B#lX54~>@yb2HUM zXbZ3p`hl4E?;jbU9g#H6(GJI(*1Vk&%MdO=HJ)GYOQyakyG>oXql({<`j*{SZE=J( zCtwpUGIW>yy_&)gO15pgbP;`YxFYxW<`|eE<-`8l3*%eC!4V-u^4YZCJzo6=ZF<-k z5kRju3p64N9+-%iJ2#KA-aLmp#HEvMU+EXO&^+e*Y@`>{(J*?9y4{5!pbnK<<{xbd zrdYL0UTYlK2!)*}-Tjq44$q)f`p-`IpSPyvUFjj{`xMPTySs&9&l?MHrH_))wDpOb zV!m~sF>b%yHeas8ukwd8#6JRW%a8&4dx6~_KRaL-1RvOwVraL&MR}8!==aNeM9E%; z!}*ll{7{mK4&<0Y!Ste=;Lb^PjBOxY0_+YP7*3=voDN79NOU~`-5DFn6L9EH#lT)o{ynI6afJa?VcF^f?=T{@9=d0Vq)OaNd5f;F17*MKL{Z1E@p!y zGC+YRNB75h8{>By1^O3PO}th9KND6t?^I^rZMbmda@|=RbzGkN2e3f)?Ape{YMq~V z9)5ZLitYXrRfndX4DkH_*&uJO;NcydOO5-* zIgYwYG|D+4Co^CrlE3`n1E}-sznM(R0cOCZL%UX<3(We;vCrUl|cbGY9C@iu@voB|@*fzp2`v%sA@ci0J=F!#%u)xSS90`Km$Lp`pB zmi;dxx2Z4^i=sf$$*i@H4*^P!{Rz<1DGw6!vR+lw;JcXL+8(cjonQMMzd>|8q(oLcu;zW6jJK zS%sH=7d$R{-ZZ=k7-I+zjXeR*WTg@g;AUS!3xT%a!2E{BXdtHZIDke%5)_&q?)RMd zp>X0vmfUyQ7Ci0T*tsx6Hm+Wk+#gBhl3d-nb^#f+tv?AT4u&vU>hhP5=BIJp^*Mj| zel7j@SrAVMhV=8HcyA~>JDl_Be#Ic~s+$APN9@m~CAU1O;Z-U9jmSb85gCvD!iXB`>rGbVPPR|Za@zybo3BVs zT9)qwo#|4M&#}fLSPb{{*SMuRJb2Q)y|>tKAkm>-jte;nn=7Xo>v?MJYV+ zwj6%%rH~Ohl*B2o4h!_c;UQ%excPaF|J=jnlWunl9AGO{b?sdNHxjicOSe4Z$ zcrcz2y{1fd$A>kwu%2Qf_ARXOJjB4_84sDz@DL}myr1%`;DXYwk1k<6lJj7lP+5jQs*)9+)n+ z0+>!IkDT!>1j>oP@s2nE$70MeQP+U|hy_WE(9u`X7~C~Ge7Fh)017T1HeFX79<+*tG|W%b#>3wyPls{_b4Nr-b%YmNS18yC6RzxD>wp()e2EJ) z6f#auql_u#HYE}dYUku0Q!tNrdsZ9)%X>B@Jv=-5?|vCQn834pOHbhtbN5ro>pi=! zj#%IWTW2%1n)4|vHM*uH6|u?RE2GW8-J&$cSG^>qd}TT#JLMW9 z=lk{JKw?J*_KfVQd`N!~?b127n_RPCVsLbu+;Oav+J4f|9S1IOvHH>j7KOxNEm`b2 zl~>bpgk$?S`^?k?Z{P1%E(x^%R^G|)cRAM&bVH?YKM}|RuCxtDna)F*K=UynujuUR zkBXIiYVaP5Pk)_?sB`%2mWL*j9FTdVLUD5r=?eC_*GKK780K{Q=ZBD=T;0y6lr}4WSYxb{RGr`uMtY8^>(v4aKa?8+Jl;RO( zdM{3)8_`?y@H(wA>Wbth#os!PCYzX4*368VYJ+mxbC9w#XDLl)Iq5RlaLj;1Zq`Sf z+(l^=8=>Y};S5zv@70BTuQcT=XO`8A}DhXDc5d`rxBEdruz9tfhhgaHN; zj}lwkyAvQM1tw@*b?c%fe$m+`Z1PsOlX^_Kp|+M2DZ|~wIE4Q2LFmFj_SRUFV|f~< ze7t(^$RfSIb47r@({Yiuo%e(m>9~tEA(^k`dnJu2b+a=Z9ez-Hv|A#xKy)gR3V)`I zb|WxJi+9B(Q`Pfps-vm(Ki$@+a)F6`SZSZ}mor#UBn+3amJpAv65 zyTJ5!T5P7bHK2&-t?vmleFbV->fu0P<r&WPfN@CaLg8## zEsr#(|R zhS;UyX)IZn7^w{)N2FOHlSNA`b(E+4$bb6SHr9@?RGpKKSMget9|;=PAu;KC2WXRx z_6mEXo|K^I$k<*YsAe89(5}KSOnAYnA)f9LtwdPyyUI=+sre;&fix69ZT-BCSFgYr z87%}}2i;$%^BOk5Ez?+1#WNNSD=g20*j}XI8a+% zCkaB8rbzKeRc;P_(p`9u=_h@k7!tNJ7LZtr<8Y#$7}th(z{VI|vz}BF_O}cN_N8d> zw?v_U>U%snD(uNMcA+&!ptp<{P&XlYiZSB(-C97Gc*cM|H>^4!3>umxxxmBNobWtD zNCH36JM>vwUVaT4*qj-uH*^JlXM)uLTYwj4oF&`Sou_zC*c)9|`U+EfX$r*Ph*;E< zb=c;Z%P@~|MGp|CHhPmlUMrhkQw^}uMDZ=8THgFp8g%??T94x@&}{5jphQRux-c;4 zoi?G$z}cG7atXe!tYW{RNka7hZVxH`%^VME`aAp4DD8!#>R5u}L6%UujbaXF&&$t;pOgU%7jRUlU{QBnVJ!zBdK&r=0gI%Mv2gTC!Ql?ITOShuROa=USzB;6d8I0iW< zTu$Dbw=MHCnLJ^?%$ns8r2Jcjk0U!0sG^WMA8=##>Y1+63&EmV3WdTEfp*7lqw4A< z^*o3Qq$hL#rkQaYBTsqEqeMB2$6kVE>O&$DoVi@oo8rPlIV6xzu=Qj0sCDWLytH}o zu+2BPBK2)NW!1)K4=afOr?Qyy*wqi|I1#Bgv{j|5m`V_$bVCl-b1%Z|d;iv?op_ry zH$ypum_eoJm(t}68euz51i37NlwFAU*NfLzl_!N9UJ<@e*Sq;F`bsLFci-^zhl5N6C}+n9(g&G7e<#xp(=yz8?{QPrzsq8>Zgf