Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit cc13313

Browse filesBrowse files
authored
TST remove assert_warns in feature_extraction/tests/ module (#19439)
1 parent 66f67dd commit cc13313
Copy full SHA for cc13313

File tree

1 file changed

+13
-12
lines changed
Filter options

1 file changed

+13
-12
lines changed

‎sklearn/feature_extraction/tests/test_text.py

Copy file name to clipboardExpand all lines: sklearn/feature_extraction/tests/test_text.py
+13-12Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,7 @@
2929
from numpy.testing import assert_array_equal
3030
from sklearn.utils import IS_PYPY
3131
from sklearn.utils._testing import (assert_almost_equal,
32-
assert_warns_message, assert_raise_message,
33-
assert_no_warnings,
32+
assert_raise_message,
3433
fails_if_pypy,
3534
assert_allclose_dense_sparse,
3635
skip_if_32bit)
@@ -386,8 +385,8 @@ def test_countvectorizer_uppercase_in_vocab():
386385
" be matched with any documents")
387386

388387
vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary)
389-
assert_warns_message(UserWarning, message,
390-
vectorizer.fit_transform, vocabulary)
388+
with pytest.warns(UserWarning, match=message):
389+
vectorizer.fit_transform(vocabulary)
391390

392391

393392
def test_tf_idf_smoothing():
@@ -429,8 +428,8 @@ def test_tfidf_no_smoothing():
429428
tr = TfidfTransformer(smooth_idf=False, norm='l2')
430429

431430
in_warning_message = 'divide by zero'
432-
assert_warns_message(RuntimeWarning, in_warning_message,
433-
tr.fit_transform, X).toarray()
431+
with pytest.warns(RuntimeWarning, match=in_warning_message):
432+
tr.fit_transform(X).toarray()
434433

435434

436435
def test_sublinear_tf():
@@ -1213,27 +1212,29 @@ def _check_stop_words_consistency(estimator):
12131212

12141213
@fails_if_pypy
12151214
def test_vectorizer_stop_words_inconsistent():
1216-
lstr = "['and', 'll', 've']"
1215+
lstr = r"\['and', 'll', 've'\]"
12171216
message = ('Your stop_words may be inconsistent with your '
12181217
'preprocessing. Tokenizing the stop words generated '
12191218
'tokens %s not in stop_words.' % lstr)
12201219
for vec in [CountVectorizer(),
12211220
TfidfVectorizer(), HashingVectorizer()]:
12221221
vec.set_params(stop_words=["you've", "you", "you'll", 'AND'])
1223-
assert_warns_message(UserWarning, message, vec.fit_transform,
1224-
['hello world'])
1222+
with pytest.warns(UserWarning, match=message):
1223+
vec.fit_transform(['hello world'])
12251224
# reset stop word validation
12261225
del vec._stop_words_id
12271226
assert _check_stop_words_consistency(vec) is False
12281227

12291228
# Only one warning per stop list
1230-
assert_no_warnings(vec.fit_transform, ['hello world'])
1229+
with pytest.warns(None) as record:
1230+
vec.fit_transform(['hello world'])
1231+
assert not len(record)
12311232
assert _check_stop_words_consistency(vec) is None
12321233

12331234
# Test caching of inconsistency assessment
12341235
vec.set_params(stop_words=["you've", "you", "you'll", 'blah', 'AND'])
1235-
assert_warns_message(UserWarning, message, vec.fit_transform,
1236-
['hello world'])
1236+
with pytest.warns(UserWarning, match=message):
1237+
vec.fit_transform(['hello world'])
12371238

12381239

12391240
@skip_if_32bit

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.