Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

MAINT: remove np.product and inf/nan aliases in favor of canonical names #25741

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion 2 benchmarks/bench_20newsgroups.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
print(f"X_train.shape = {X_train.shape}")
print(f"X_train.format = {X_train.format}")
print(f"X_train.dtype = {X_train.dtype}")
print(f"X_train density = {X_train.nnz / np.product(X_train.shape)}")
print(f"X_train density = {X_train.nnz / np.prod(X_train.shape)}")
print(f"y_train {y_train.shape}")
print(f"X_test {X_test.shape}")
print(f"X_test.format = {X_test.format}")
Expand Down
2 changes: 1 addition & 1 deletion 2 doc/developers/develop.rst
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ general only be determined at runtime.
The current set of estimator tags are:

allow_nan (default=False)
whether the estimator supports data with missing values encoded as np.NaN
whether the estimator supports data with missing values encoded as np.nan

binary_only (default=False)
whether estimator supports binary classification but lacks multi-class
Expand Down
4 changes: 2 additions & 2 deletions 4 doc/modules/grid_search.rst
Original file line number Diff line number Diff line change
Expand Up @@ -660,8 +660,8 @@ Robustness to failure
Some parameter settings may result in a failure to ``fit`` one or more folds
of the data. By default, this will cause the entire search to fail, even if
some parameter settings could be fully evaluated. Setting ``error_score=0``
(or `=np.NaN`) will make the procedure robust to such failure, issuing a
warning and setting the score for that fold to 0 (or `NaN`), but completing
(or `=np.nan`) will make the procedure robust to such failure, issuing a
warning and setting the score for that fold to 0 (or `nan`), but completing
the search.

.. _alternative_cv:
Expand Down
2 changes: 1 addition & 1 deletion 2 sklearn/_loss/glm_distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def power(self, power):

if power <= 0:
# Extreme Stable or Normal distribution
self._lower_bound = DistributionBoundary(-np.Inf, inclusive=False)
self._lower_bound = DistributionBoundary(-np.inf, inclusive=False)
elif 0 < power < 1:
raise ValueError(
"Tweedie distribution is only defined for power<=0 and power>=1."
Expand Down
2 changes: 1 addition & 1 deletion 2 sklearn/decomposition/_nmf.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def _beta_divergence(X, W, H, beta, square_root=False):
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))

# beta-divergence, beta not in (0, 1, 2)
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,9 +294,9 @@ def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected):

X = [
[1, 1, 0],
[np.NaN, np.NaN, 0],
[np.nan, np.nan, 0],
[2, 1, 0],
[np.NaN, 2, 1],
[np.nan, 2, 1],
[3, 2, 1],
[4, 1, 0],
]
Expand Down
4 changes: 2 additions & 2 deletions 4 sklearn/ensemble/tests/test_bagging.py
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ def test_bagging_regressor_with_missing_inputs():
[2, None, 6],
[2, np.nan, 6],
[2, np.inf, 6],
[2, np.NINF, 6],
[2, -np.inf, 6],
]
)
y_values = [
Expand Down Expand Up @@ -872,7 +872,7 @@ def test_bagging_classifier_with_missing_inputs():
[2, None, 6],
[2, np.nan, 6],
[2, np.inf, 6],
[2, np.NINF, 6],
[2, -np.inf, 6],
]
)
y = np.array([3, 6, 6, 6, 6])
Expand Down
8 changes: 4 additions & 4 deletions 8 sklearn/feature_selection/tests/test_from_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -532,8 +532,8 @@ def test_fit_accepts_nan_inf():
model = SelectFromModel(estimator=clf)

nan_data = data.copy()
nan_data[0] = np.NaN
nan_data[1] = np.Inf
nan_data[0] = np.nan
nan_data[1] = np.inf

model.fit(data, y)

Expand All @@ -546,8 +546,8 @@ def test_transform_accepts_nan_inf():
model = SelectFromModel(estimator=clf)
model.fit(nan_data, y)

nan_data[0] = np.NaN
nan_data[1] = np.Inf
nan_data[0] = np.nan
nan_data[1] = np.inf

model.transform(nan_data)

Expand Down
4 changes: 2 additions & 2 deletions 4 sklearn/feature_selection/tests/test_rfe.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,8 +505,8 @@ def test_rfe_allow_nan_inf_in_x(cv):
y = iris.target

# add nan and inf value to X
X[0][0] = np.NaN
X[0][1] = np.Inf
X[0][0] = np.nan
X[0][1] = np.inf

clf = MockClassifier()
if cv is not None:
Expand Down
4 changes: 2 additions & 2 deletions 4 sklearn/feature_selection/tests/test_variance_threshold.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ def test_zero_variance_floating_point_error():
def test_variance_nan():
arr = np.array(data, dtype=np.float64)
# add single NaN and feature should still be included
arr[0, 0] = np.NaN
arr[0, 0] = np.nan
# make all values in feature NaN and feature should be rejected
arr[:, 1] = np.NaN
arr[:, 1] = np.nan

for X in [arr, csr_matrix(arr), csc_matrix(arr), bsr_matrix(arr)]:
sel = VarianceThreshold().fit(X)
Expand Down
2 changes: 1 addition & 1 deletion 2 sklearn/mixture/tests/test_bayesian_mixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ def test_monotonic_likelihood():
random_state=rng,
tol=1e-3,
)
current_lower_bound = -np.infty
current_lower_bound = -np.inf
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
Expand Down
2 changes: 1 addition & 1 deletion 2 sklearn/mixture/tests/test_gaussian_mixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -986,7 +986,7 @@ def test_monotonic_likelihood():
random_state=rng,
tol=1e-7,
)
current_log_likelihood = -np.infty
current_log_likelihood = -np.inf
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
Expand Down
4 changes: 2 additions & 2 deletions 4 sklearn/model_selection/_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def __iter__(self):

def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
# Product function that can handle iterables (np.prod can't).
product = partial(reduce, operator.mul)
return sum(
product(len(v) for v in p.values()) if p else 1 for p in self.param_grid
Expand Down Expand Up @@ -184,7 +184,7 @@ def __getitem__(self, ind):
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
total = np.prod(sizes)

if ind >= total:
# Try the next grid
Expand Down
2 changes: 1 addition & 1 deletion 2 sklearn/utils/sparsefuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,7 @@ def _sparse_min_or_max(X, axis, min_or_max):
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
if X.nnz != np.prod(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
Expand Down
2 changes: 1 addition & 1 deletion 2 sklearn/utils/tests/test_pprint.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def test_changed_only():
expected = """SimpleImputer(missing_values=0)"""
assert imputer.__repr__() == expected

# Defaults to np.NaN, trying with float('NaN')
# Defaults to np.nan, trying with float('NaN')
imputer = SimpleImputer(missing_values=float("NaN"))
expected = """SimpleImputer()"""
assert imputer.__repr__() == expected
Expand Down
Morty Proxy This is a proxified and sanitized view of the page, visit original site.