1
1
import os
2
+ import re
2
3
import warnings
3
4
import numpy as np
4
5
from numpy .testing import assert_allclose , assert_almost_equal
18
19
from sklearn .model_selection import cross_val_score
19
20
from sklearn .preprocessing import LabelEncoder , StandardScaler
20
21
from sklearn .utils import compute_class_weight , _IS_32BIT
21
- from sklearn .utils ._testing import assert_raise_message
22
- from sklearn .utils ._testing import assert_raises
23
22
from sklearn .utils ._testing import assert_warns
24
23
from sklearn .utils ._testing import ignore_warnings
25
24
from sklearn .utils ._testing import assert_warns_message
@@ -79,24 +78,33 @@ def test_predict_2_classes():
79
78
def test_error ():
80
79
# Test for appropriate exception on errors
81
80
msg = "Penalty term must be positive"
82
- assert_raise_message (ValueError , msg ,
83
- LogisticRegression (C = - 1 ).fit , X , Y1 )
84
- assert_raise_message (ValueError , msg ,
85
- LogisticRegression (C = "test" ).fit , X , Y1 )
81
+
82
+ with pytest .raises (ValueError , match = msg ):
83
+ LogisticRegression (C = - 1 ).fit (X , Y1 )
84
+
85
+ with pytest .raises (ValueError , match = msg ):
86
+ LogisticRegression (C = "test" ).fit (X , Y1 )
86
87
87
88
msg = "is not a valid scoring value"
88
- assert_raise_message (ValueError , msg ,
89
- LogisticRegressionCV (scoring = 'bad-scorer' , cv = 2 ).fit ,
90
- X , Y1 )
89
+ with pytest .raises (ValueError , match = msg ):
90
+ LogisticRegressionCV (scoring = 'bad-scorer' , cv = 2 ).fit (X , Y1 )
91
91
92
92
for LR in [LogisticRegression , LogisticRegressionCV ]:
93
93
msg = "Tolerance for stopping criteria must be positive"
94
- assert_raise_message (ValueError , msg , LR (tol = - 1 ).fit , X , Y1 )
95
- assert_raise_message (ValueError , msg , LR (tol = "test" ).fit , X , Y1 )
94
+
95
+ with pytest .raises (ValueError , match = msg ):
96
+ LR (tol = - 1 ).fit (X , Y1 )
97
+
98
+ with pytest .raises (ValueError , match = msg ):
99
+ LR (tol = "test" ).fit (X , Y1 )
96
100
97
101
msg = "Maximum number of iteration must be positive"
98
- assert_raise_message (ValueError , msg , LR (max_iter = - 1 ).fit , X , Y1 )
99
- assert_raise_message (ValueError , msg , LR (max_iter = "test" ).fit , X , Y1 )
102
+
103
+ with pytest .raises (ValueError , match = msg ):
104
+ LR (max_iter = - 1 ).fit (X , Y1 )
105
+
106
+ with pytest .raises (ValueError , match = msg ):
107
+ LR (max_iter = "test" ).fit (X , Y1 )
100
108
101
109
102
110
def test_logistic_cv_mock_scorer ():
@@ -196,39 +204,46 @@ def test_predict_iris():
196
204
@pytest .mark .parametrize ('solver' , ['lbfgs' , 'newton-cg' , 'sag' , 'saga' ])
197
205
def test_multinomial_validation (solver ):
198
206
lr = LogisticRegression (C = - 1 , solver = solver , multi_class = 'multinomial' )
199
- assert_raises (ValueError , lr .fit , [[0 , 1 ], [1 , 0 ]], [0 , 1 ])
207
+
208
+ with pytest .raises (ValueError ):
209
+ lr .fit ([[0 , 1 ], [1 , 0 ]], [0 , 1 ])
200
210
201
211
202
212
@pytest .mark .parametrize ('LR' , [LogisticRegression , LogisticRegressionCV ])
203
213
def test_check_solver_option (LR ):
204
214
X , y = iris .data , iris .target
205
215
206
- msg = ("Logistic Regression supports only solvers in ['liblinear', "
207
- "'newton-cg', 'lbfgs', 'sag', 'saga'], got wrong_name." )
216
+ msg = (r "Logistic Regression supports only solvers in \ ['liblinear', "
217
+ r "'newton-cg', 'lbfgs', 'sag', 'saga'\ ], got wrong_name." )
208
218
lr = LR (solver = "wrong_name" , multi_class = "ovr" )
209
- assert_raise_message (ValueError , msg , lr .fit , X , y )
219
+ with pytest .raises (ValueError , match = msg ):
220
+ lr .fit (X , y )
210
221
211
222
msg = ("multi_class should be 'multinomial', 'ovr' or 'auto'. "
212
223
"Got wrong_name" )
213
224
lr = LR (solver = 'newton-cg' , multi_class = "wrong_name" )
214
- assert_raise_message (ValueError , msg , lr .fit , X , y )
225
+ with pytest .raises (ValueError , match = msg ):
226
+ lr .fit (X , y )
215
227
216
228
# only 'liblinear' solver
217
229
msg = "Solver liblinear does not support a multinomial backend."
218
230
lr = LR (solver = 'liblinear' , multi_class = 'multinomial' )
219
- assert_raise_message (ValueError , msg , lr .fit , X , y )
231
+ with pytest .raises (ValueError , match = msg ):
232
+ lr .fit (X , y )
220
233
221
234
# all solvers except 'liblinear' and 'saga'
222
235
for solver in ['newton-cg' , 'lbfgs' , 'sag' ]:
223
236
msg = ("Solver %s supports only 'l2' or 'none' penalties," %
224
237
solver )
225
238
lr = LR (solver = solver , penalty = 'l1' , multi_class = 'ovr' )
226
- assert_raise_message (ValueError , msg , lr .fit , X , y )
239
+ with pytest .raises (ValueError , match = msg ):
240
+ lr .fit (X , y )
227
241
for solver in ['newton-cg' , 'lbfgs' , 'sag' , 'saga' ]:
228
242
msg = ("Solver %s supports only dual=False, got dual=True" %
229
243
solver )
230
244
lr = LR (solver = solver , dual = True , multi_class = 'ovr' )
231
- assert_raise_message (ValueError , msg , lr .fit , X , y )
245
+ with pytest .raises (ValueError , match = msg ):
246
+ lr .fit (X , y )
232
247
233
248
# only saga supports elasticnet. We only test for liblinear because the
234
249
# error is raised before for the other solvers (solver %s supports only l2
@@ -237,12 +252,14 @@ def test_check_solver_option(LR):
237
252
msg = ("Only 'saga' solver supports elasticnet penalty, got "
238
253
"solver={}." .format (solver ))
239
254
lr = LR (solver = solver , penalty = 'elasticnet' )
240
- assert_raise_message (ValueError , msg , lr .fit , X , y )
255
+ with pytest .raises (ValueError , match = msg ):
256
+ lr .fit (X , y )
241
257
242
258
# liblinear does not support penalty='none'
243
259
msg = "penalty='none' is not supported for the liblinear solver"
244
260
lr = LR (penalty = 'none' , solver = 'liblinear' )
245
- assert_raise_message (ValueError , msg , lr .fit , X , y )
261
+ with pytest .raises (ValueError , match = msg ):
262
+ lr .fit (X , y )
246
263
247
264
248
265
@pytest .mark .parametrize ('solver' , ['lbfgs' , 'newton-cg' , 'sag' , 'saga' ])
@@ -318,11 +335,13 @@ def test_inconsistent_input():
318
335
319
336
# Wrong dimensions for training data
320
337
y_wrong = y_ [:- 1 ]
321
- assert_raises (ValueError , clf .fit , X , y_wrong )
338
+
339
+ with pytest .raises (ValueError ):
340
+ clf .fit (X , y_wrong )
322
341
323
342
# Wrong dimensions for test data
324
- assert_raises ( ValueError , clf . fit ( X_ , y_ ). predict ,
325
- rng .random_sample ((3 , 12 )))
343
+ with pytest . raises ( ValueError ):
344
+ clf . fit ( X_ , y_ ). predict ( rng .random_sample ((3 , 12 )))
326
345
327
346
328
347
def test_write_parameters ():
@@ -340,7 +359,9 @@ def test_nan():
340
359
Xnan = np .array (X , dtype = np .float64 )
341
360
Xnan [0 , 1 ] = np .nan
342
361
logistic = LogisticRegression (random_state = 0 )
343
- assert_raises (ValueError , logistic .fit , Xnan , Y1 )
362
+
363
+ with pytest .raises (ValueError ):
364
+ logistic .fit (Xnan , Y1 )
344
365
345
366
346
367
def test_consistency_path ():
@@ -422,8 +443,8 @@ def test_liblinear_dual_random_state():
422
443
assert_array_almost_equal (lr1 .coef_ , lr2 .coef_ )
423
444
# different results for different random states
424
445
msg = "Arrays are not almost equal to 6 decimals"
425
- assert_raise_message (AssertionError , msg ,
426
- assert_array_almost_equal , lr1 .coef_ , lr3 .coef_ )
446
+ with pytest . raises (AssertionError , match = msg ):
447
+ assert_array_almost_equal ( lr1 .coef_ , lr3 .coef_ )
427
448
428
449
429
450
def test_logistic_loss_and_grad ():
@@ -1042,7 +1063,8 @@ def test_logreg_intercept_scaling():
1042
1063
msg = ('Intercept scaling is %r but needs to be greater than 0.'
1043
1064
' To disable fitting an intercept,'
1044
1065
' set fit_intercept=False.' % clf .intercept_scaling )
1045
- assert_raise_message (ValueError , msg , clf .fit , X , Y1 )
1066
+ with pytest .raises (ValueError , match = msg ):
1067
+ clf .fit (X , Y1 )
1046
1068
1047
1069
1048
1070
def test_logreg_intercept_scaling_zero ():
@@ -1616,14 +1638,15 @@ def test_LogisticRegressionCV_elasticnet_attribute_shapes():
1616
1638
@pytest .mark .parametrize ('l1_ratio' , (- 1 , 2 , None , 'something_wrong' ))
1617
1639
def test_l1_ratio_param (l1_ratio ):
1618
1640
1619
- msg = "l1_ratio must be between 0 and 1; got (l1_ratio=%r)" % l1_ratio
1620
- assert_raise_message (ValueError , msg ,
1621
- LogisticRegression (penalty = 'elasticnet' ,
1622
- solver = 'saga' ,
1623
- l1_ratio = l1_ratio ). fit , X , Y1 )
1641
+ msg = r "l1_ratio must be between 0 and 1; got \ (l1_ratio=%r\ )" % l1_ratio
1642
+ with pytest . raises (ValueError , match = msg ):
1643
+ LogisticRegression (penalty = 'elasticnet' , solver = 'saga ' ,
1644
+ l1_ratio = l1_ratio ). fit ( X , Y1 )
1645
+
1624
1646
if l1_ratio is not None :
1625
1647
msg = ("l1_ratio parameter is only used when penalty is 'elasticnet'."
1626
1648
" Got (penalty=l1)" )
1649
+
1627
1650
assert_warns_message (UserWarning , msg ,
1628
1651
LogisticRegression (penalty = 'l1' , solver = 'saga' ,
1629
1652
l1_ratio = l1_ratio ).fit , X , Y1 )
@@ -1634,11 +1657,12 @@ def test_l1_ratios_param(l1_ratios):
1634
1657
1635
1658
msg = ("l1_ratios must be a list of numbers between 0 and 1; got "
1636
1659
"(l1_ratios=%r)" % l1_ratios )
1637
- assert_raise_message (ValueError , msg ,
1638
- LogisticRegressionCV (penalty = 'elasticnet' ,
1639
- solver = 'saga' ,
1640
- l1_ratios = l1_ratios , cv = 2 ).fit ,
1641
- X , Y1 )
1660
+
1661
+ with pytest .raises (ValueError , match = re .escape (msg )):
1662
+ LogisticRegressionCV (penalty = 'elasticnet' ,
1663
+ solver = 'saga' ,
1664
+ l1_ratios = l1_ratios , cv = 2 ).fit (X , Y1 )
1665
+
1642
1666
if l1_ratios is not None :
1643
1667
msg = ("l1_ratios parameter is only used when penalty is "
1644
1668
"'elasticnet'. Got (penalty=l1)" )
@@ -1756,12 +1780,12 @@ def test_penalty_none(solver):
1756
1780
assert_array_equal (pred_none , pred_l2_C_inf )
1757
1781
1758
1782
lr = LogisticRegressionCV (penalty = 'none' )
1759
- assert_raise_message (
1760
- ValueError ,
1783
+ err_msg = (
1761
1784
"penalty='none' is not useful and not supported by "
1762
- "LogisticRegressionCV" ,
1763
- lr .fit , X , y
1785
+ "LogisticRegressionCV"
1764
1786
)
1787
+ with pytest .raises (ValueError , match = err_msg ):
1788
+ lr .fit (X , y )
1765
1789
1766
1790
1767
1791
@pytest .mark .parametrize (
0 commit comments