Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 7cb6b8f

Browse filesBrowse files
TST add py_loss for tests in _sgd_fast.pyx (#18924)
1 parent e21319f commit 7cb6b8f
Copy full SHA for 7cb6b8f

File tree

2 files changed

+68
-55
lines changed
Filter options

2 files changed

+68
-55
lines changed

‎sklearn/linear_model/_sgd_fast.pyx

Copy file name to clipboardExpand all lines: sklearn/linear_model/_sgd_fast.pyx
+7Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,13 @@ cdef class LossFunction:
7373
"""
7474
return self.dloss(p, y)
7575

76+
def py_loss(self, double p, double y):
77+
"""Python version of `loss` for testing.
78+
79+
Pytest needs a python function and can't use cdef functions.
80+
"""
81+
return self.loss(p, y)
82+
7683
cdef double dloss(self, double p, double y) nogil:
7784
"""Evaluate the derivative of the loss function with respect to
7885
the prediction `p`.

‎sklearn/linear_model/tests/test_sgd.py

Copy file name to clipboardExpand all lines: sklearn/linear_model/tests/test_sgd.py
+61-55Lines changed: 61 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1434,118 +1434,124 @@ def test_tol_parameter():
14341434
assert model_3.n_iter_ == 3
14351435

14361436

1437-
def _test_gradient_common(loss_function, cases):
1438-
# Test gradient of different loss functions
1437+
def _test_loss_common(loss_function, cases):
1438+
# Test the different loss functions
14391439
# cases is a list of (p, y, expected)
1440-
for p, y, expected in cases:
1441-
assert_almost_equal(loss_function.py_dloss(p, y), expected)
1440+
for p, y, expected_loss, expected_dloss in cases:
1441+
assert_almost_equal(loss_function.py_loss(p, y), expected_loss)
1442+
assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss)
14421443

14431444

1444-
def test_gradient_hinge():
1445+
def test_loss_hinge():
14451446
# Test Hinge (hinge / perceptron)
14461447
# hinge
14471448
loss = sgd_fast.Hinge(1.0)
14481449
cases = [
1449-
# (p, y, expected)
1450-
(1.1, 1.0, 0.0), (-2.0, -1.0, 0.0),
1451-
(1.0, 1.0, -1.0), (-1.0, -1.0, 1.0), (0.5, 1.0, -1.0),
1452-
(2.0, -1.0, 1.0), (-0.5, -1.0, 1.0), (0.0, 1.0, -1.0)
1450+
# (p, y, expected_loss, expected_dloss)
1451+
(1.1, 1.0, 0.0, 0.0), (-2.0, -1.0, 0.0, 0.0),
1452+
(1.0, 1.0, 0.0, -1.0), (-1.0, -1.0, 0.0, 1.0), (0.5, 1.0, 0.5, -1.0),
1453+
(2.0, -1.0, 3.0, 1.0), (-0.5, -1.0, 0.5, 1.0), (0.0, 1.0, 1, -1.0)
14531454
]
1454-
_test_gradient_common(loss, cases)
1455+
_test_loss_common(loss, cases)
14551456

14561457
# perceptron
14571458
loss = sgd_fast.Hinge(0.0)
14581459
cases = [
1459-
# (p, y, expected)
1460-
(1.0, 1.0, 0.0), (-0.1, -1.0, 0.0),
1461-
(0.0, 1.0, -1.0), (0.0, -1.0, 1.0), (0.5, -1.0, 1.0),
1462-
(2.0, -1.0, 1.0), (-0.5, 1.0, -1.0), (-1.0, 1.0, -1.0),
1460+
# (p, y, expected_loss, expected_dloss)
1461+
(1.0, 1.0, 0.0, 0.0), (-0.1, -1.0, 0.0, 0.0),
1462+
(0.0, 1.0, 0.0, -1.0), (0.0, -1.0, 0.0, 1.0), (0.5, -1.0, 0.5, 1.0),
1463+
(2.0, -1.0, 2.0, 1.0), (-0.5, 1.0, 0.5, -1.0), (-1.0, 1.0, 1.0, -1.0),
14631464
]
1464-
_test_gradient_common(loss, cases)
1465+
_test_loss_common(loss, cases)
14651466

14661467

14671468
def test_gradient_squared_hinge():
14681469
# Test SquaredHinge
14691470
loss = sgd_fast.SquaredHinge(1.0)
14701471
cases = [
1471-
# (p, y, expected)
1472-
(1.0, 1.0, 0.0), (-2.0, -1.0, 0.0), (1.0, -1.0, 4.0),
1473-
(-1.0, 1.0, -4.0), (0.5, 1.0, -1.0), (0.5, -1.0, 3.0)
1472+
# (p, y, expected_loss, expected_dloss)
1473+
(1.0, 1.0, 0.0, 0.0), (-2.0, -1.0, 0.0, 0.0), (1.0, -1.0, 4.0, 4.0),
1474+
(-1.0, 1.0, 4.0, -4.0), (0.5, 1.0, 0.25, -1.0), (0.5, -1.0, 2.25, 3.0)
14741475
]
1475-
_test_gradient_common(loss, cases)
1476+
_test_loss_common(loss, cases)
14761477

14771478

1478-
def test_gradient_log():
1479+
def test_loss_log():
14791480
# Test Log (logistic loss)
14801481
loss = sgd_fast.Log()
14811482
cases = [
1482-
# (p, y, expected)
1483-
(1.0, 1.0, -1.0 / (np.exp(1.0) + 1.0)),
1484-
(1.0, -1.0, 1.0 / (np.exp(-1.0) + 1.0)),
1485-
(-1.0, -1.0, 1.0 / (np.exp(1.0) + 1.0)),
1486-
(-1.0, 1.0, -1.0 / (np.exp(-1.0) + 1.0)),
1487-
(0.0, 1.0, -0.5), (0.0, -1.0, 0.5),
1488-
(17.9, -1.0, 1.0), (-17.9, 1.0, -1.0),
1483+
# (p, y, expected_loss, expected_dloss)
1484+
(1.0, 1.0, np.log(1.0 + np.exp(-1.0)), -1.0 / (np.exp(1.0) + 1.0)),
1485+
(1.0, -1.0, np.log(1.0 + np.exp(1.0)), 1.0 / (np.exp(-1.0) + 1.0)),
1486+
(-1.0, -1.0, np.log(1.0 + np.exp(-1.0)), 1.0 / (np.exp(1.0) + 1.0)),
1487+
(-1.0, 1.0, np.log(1.0 + np.exp(1.0)), -1.0 / (np.exp(-1.0) + 1.0)),
1488+
(0.0, 1.0, np.log(2), -0.5), (0.0, -1.0, np.log(2), 0.5),
1489+
(17.9, -1.0, 17.9, 1.0), (-17.9, 1.0, 17.9, -1.0),
14891490
]
1490-
_test_gradient_common(loss, cases)
1491+
_test_loss_common(loss, cases)
14911492
assert_almost_equal(loss.py_dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
1493+
assert_almost_equal(loss.py_loss(18.1, 1.0), np.exp(-18.1), 16)
14921494
assert_almost_equal(loss.py_dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
1495+
assert_almost_equal(loss.py_loss(-18.1, 1.0), 18.1, 16)
14931496

14941497

1495-
def test_gradient_squared_loss():
1498+
def test_loss_squared_loss():
14961499
# Test SquaredLoss
14971500
loss = sgd_fast.SquaredLoss()
14981501
cases = [
1499-
# (p, y, expected)
1500-
(0.0, 0.0, 0.0), (1.0, 1.0, 0.0), (1.0, 0.0, 1.0),
1501-
(0.5, -1.0, 1.5), (-2.5, 2.0, -4.5)
1502+
# (p, y, expected_loss, expected_dloss)
1503+
(0.0, 0.0, 0.0, 0.0), (1.0, 1.0, 0.0, 0.0), (1.0, 0.0, 0.5, 1.0),
1504+
(0.5, -1.0, 1.125, 1.5), (-2.5, 2.0, 10.125, -4.5)
15021505
]
1503-
_test_gradient_common(loss, cases)
1506+
_test_loss_common(loss, cases)
15041507

15051508

1506-
def test_gradient_huber():
1509+
def test_loss_huber():
15071510
# Test Huber
15081511
loss = sgd_fast.Huber(0.1)
15091512
cases = [
1510-
# (p, y, expected)
1511-
(0.0, 0.0, 0.0), (0.1, 0.0, 0.1), (0.0, 0.1, -0.1),
1512-
(3.95, 4.0, -0.05), (5.0, 2.0, 0.1), (-1.0, 5.0, -0.1)
1513+
# (p, y, expected_loss, expected_dloss)
1514+
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.005, 0.1), (0.0, 0.1, 0.005, -0.1),
1515+
(3.95, 4.0, 0.00125, -0.05), (5.0, 2.0, 0.295, 0.1),
1516+
(-1.0, 5.0, 0.595, -0.1)
15131517
]
1514-
_test_gradient_common(loss, cases)
1518+
_test_loss_common(loss, cases)
15151519

15161520

1517-
def test_gradient_modified_huber():
1518-
# Test ModifiedHuber
1521+
def test_loss_modified_huber():
1522+
# (p, y, expected_loss, expected_dloss)
15191523
loss = sgd_fast.ModifiedHuber()
15201524
cases = [
1521-
# (p, y, expected)
1522-
(1.0, 1.0, 0.0), (-1.0, -1.0, 0.0), (2.0, 1.0, 0.0),
1523-
(0.0, 1.0, -2.0), (-1.0, 1.0, -4.0), (0.5, -1.0, 3.0),
1524-
(0.5, -1.0, 3.0), (-2.0, 1.0, -4.0), (-3.0, 1.0, -4.0)
1525+
# (p, y, expected_loss, expected_dloss)
1526+
(1.0, 1.0, 0.0, 0.0), (-1.0, -1.0, 0.0, 0.0), (2.0, 1.0, 0.0, 0.0),
1527+
(0.0, 1.0, 1.0, -2.0), (-1.0, 1.0, 4.0, -4.0), (0.5, -1.0, 2.25, 3.0),
1528+
(-2.0, 1.0, 8, -4.0), (-3.0, 1.0, 12, -4.0)
15251529
]
1526-
_test_gradient_common(loss, cases)
1530+
_test_loss_common(loss, cases)
15271531

15281532

1529-
def test_gradient_epsilon_insensitive():
1533+
def test_loss_epsilon_insensitive():
15301534
# Test EpsilonInsensitive
15311535
loss = sgd_fast.EpsilonInsensitive(0.1)
15321536
cases = [
1533-
(0.0, 0.0, 0.0), (0.1, 0.0, 0.0), (-2.05, -2.0, 0.0),
1534-
(3.05, 3.0, 0.0), (2.2, 2.0, 1.0), (2.0, -1.0, 1.0),
1535-
(2.0, 2.2, -1.0), (-2.0, 1.0, -1.0)
1537+
# (p, y, expected_loss, expected_dloss)
1538+
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.0, 0.0), (-2.05, -2.0, 0.0, 0.0),
1539+
(3.05, 3.0, 0.0, 0.0), (2.2, 2.0, 0.1, 1.0), (2.0, -1.0, 2.9, 1.0),
1540+
(2.0, 2.2, 0.1, -1.0), (-2.0, 1.0, 2.9, -1.0)
15361541
]
1537-
_test_gradient_common(loss, cases)
1542+
_test_loss_common(loss, cases)
15381543

15391544

1540-
def test_gradient_squared_epsilon_insensitive():
1545+
def test_loss_squared_epsilon_insensitive():
15411546
# Test SquaredEpsilonInsensitive
15421547
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
15431548
cases = [
1544-
(0.0, 0.0, 0.0), (0.1, 0.0, 0.0), (-2.05, -2.0, 0.0),
1545-
(3.05, 3.0, 0.0), (2.2, 2.0, 0.2), (2.0, -1.0, 5.8),
1546-
(2.0, 2.2, -0.2), (-2.0, 1.0, -5.8)
1549+
# (p, y, expected_loss, expected_dloss)
1550+
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.0, 0.0), (-2.05, -2.0, 0.0, 0.0),
1551+
(3.05, 3.0, 0.0, 0.0), (2.2, 2.0, 0.01, 0.2), (2.0, -1.0, 8.41, 5.8),
1552+
(2.0, 2.2, 0.01, -0.2), (-2.0, 1.0, 8.41, -5.8)
15471553
]
1548-
_test_gradient_common(loss, cases)
1554+
_test_loss_common(loss, cases)
15491555

15501556

15511557
def test_multi_thread_multi_class_and_early_stopping():

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.