Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

TST replace assert_raises by pytest.raises in test_least_angle, test_omp, test_test_theil_sen #19406

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Apr 9, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions 10 sklearn/linear_model/tests/test_least_angle.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@
import numpy as np
import pytest
abdulelahsm marked this conversation as resolved.
Show resolved Hide resolved
from scipy import linalg

from sklearn.base import clone
from sklearn.model_selection import train_test_split
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import TempMemmap
from sklearn.utils.fixes import np_version, parse_version
Expand Down Expand Up @@ -96,8 +94,8 @@ def test_lars_path_gram_equivalent(method, return_path):
def test_x_none_gram_none_raises_value_error():
# Test that lars_path with no X and Gram raises exception
Xy = np.dot(X.T, y)
assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None,
Xy=Xy)
with pytest.raises(ValueError):
linear_model.lars_path(None, y, Gram=None, Xy=Xy)


def test_all_precomputed():
Expand Down Expand Up @@ -489,7 +487,9 @@ def test_lasso_lars_ic():

# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)

with pytest.raises(ValueError):
lars_broken.fit(X, y)


def test_lars_path_readonly_data():
Expand Down
26 changes: 12 additions & 14 deletions 26 sklearn/linear_model/tests/test_omp.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import numpy as np
import pytest

from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
Expand Down Expand Up @@ -33,16 +32,16 @@

def test_correct_shapes():
assert (orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape ==
(n_features,))
(n_features,))
assert (orthogonal_mp(X, y, n_nonzero_coefs=5).shape ==
(n_features, 3))
(n_features, 3))


def test_correct_shapes_gram():
assert (orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape ==
(n_features,))
(n_features,))
assert (orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape ==
(n_features, 3))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's ok for this time, but next time please try to refrain to change the formatting of the lines that are fare away from the real changes of your PR to keep the review focused on the main topic (and keep the git history meaningful).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I apologize, I have a lot to learn and I appreciate your time and effort. Thank you for the valuable feedback

(n_features, 3))


def test_n_nonzero_coefs():
Expand Down Expand Up @@ -88,15 +87,14 @@ def test_unreachable_accuracy():
n_nonzero_coefs=n_features))


def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)])
@pytest.mark.parametrize(
"keyword_params",
[{"tol": -1}, {"n_nonzero_coefs": -1}, {"n_nonzero_coefs": n_features + 1}]
)
def test_bad_input(positional_params, keyword_params):
with pytest.raises(ValueError):
orthogonal_mp(*positional_params, **keyword_params)


def test_perfect_signal_recovery():
Expand Down
15 changes: 10 additions & 5 deletions 15 sklearn/linear_model/tests/test_theil_sen.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model._theil_sen import _modified_weiszfeld_step
from sklearn.utils._testing import assert_almost_equal, assert_raises
from sklearn.utils._testing import assert_almost_equal


@contextmanager
Expand Down Expand Up @@ -209,19 +209,23 @@ def test_calc_breakdown_point():
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(max_subpopulation=-1, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)

with pytest.raises(ValueError):
theil_sen.fit(X, y)


def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(n_subsamples=1, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)
with pytest.raises(ValueError):
theil_sen.fit(X, y)


def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(n_subsamples=101, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)
with pytest.raises(ValueError):
theil_sen.fit(X, y)


def test_checksubparams_n_subsamples_if_less_samples_than_features():
Expand All @@ -230,7 +234,8 @@ def test_checksubparams_n_subsamples_if_less_samples_than_features():
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)
with pytest.raises(ValueError):
theil_sen.fit(X, y)


def test_subpopulation():
Expand Down
Morty Proxy This is a proxified and sanitized view of the page, visit original site.