From 16717070c534942a037c38f4a1b4c305db361f4c Mon Sep 17 00:00:00 2001 From: nuffe Date: Sat, 15 Oct 2016 17:29:56 +0200 Subject: [PATCH 01/13] Raise ValueError if l1_ratio=0 --- sklearn/linear_model/coordinate_descent.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/coordinate_descent.py b/sklearn/linear_model/coordinate_descent.py index 5002a8af44408..2f06039f5fc79 100644 --- a/sklearn/linear_model/coordinate_descent.py +++ b/sklearn/linear_model/coordinate_descent.py @@ -49,10 +49,10 @@ def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True, Xy = np.dot(X.T, y) that can be precomputed. l1_ratio : float - The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``. - For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For - l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < - 1``, the penalty is a combination of L1 and L2. + The elastic net mixing parameter, with ``0 < l1_ratio <= 1``. + For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not + supported) ``For l1_ratio = 1`` it is an L1 penalty. For + ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. eps : float, optional Length of the path. ``eps=1e-3`` means that @@ -77,6 +77,11 @@ def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True, copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. """ + if l1_ratio == 0: + raise ValueError("Automatic alpha grid generation is not supported for" + " l1_ratio=0. Please supply a grid by providing " + "your estimator with the appropriate `alphas=` " + "argument.") n_samples = len(y) sparse_center = False From b6b78c49ff91df736c09cf0e7691de8e5aba8648 Mon Sep 17 00:00:00 2001 From: nuffe Date: Sun, 16 Oct 2016 11:28:50 +0200 Subject: [PATCH 02/13] Add test --- .../tests/test_coordinate_descent.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 41d687b6b3830..7110eb6eefaeb 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -17,6 +17,7 @@ from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex +from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings @@ -712,3 +713,18 @@ def test_enet_float_precision(): assert_array_almost_equal(intercept[np.float32], intercept[np.float64], decimal=4) + + +def test_enet_l1_ratio(): + # Test that an error message is raised if an estimator that + # uses _alpha_grid is called with l1_ratio=0 + msg = ("Automatic alpha grid generation is not supported for l1_ratio=0. " + "Please supply a grid by providing your estimator with the " + "appropriate `alphas=` argument.") + X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T + y = np.array([12, 10, 11, 21, 5]) + + assert_raise_message(ValueError, msg, + ElasticNetCV(l1_ratio=0).fit, X, y) + assert_raise_message(ValueError, msg, + MultiTaskElasticNetCV(l1_ratio=0).fit, X, X) From 3fa201b3a8493826e336a36e2496c1481b0167fd Mon Sep 17 00:00:00 2001 From: nuffe Date: Sun, 16 Oct 2016 11:42:10 +0200 Subject: [PATCH 03/13] Add test - fix typo --- sklearn/linear_model/tests/test_coordinate_descent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 7110eb6eefaeb..f0f5512a097fa 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -718,7 +718,7 @@ def test_enet_float_precision(): def test_enet_l1_ratio(): # Test that an error message is raised if an estimator that # uses _alpha_grid is called with l1_ratio=0 - msg = ("Automatic alpha grid generation is not supported for l1_ratio=0. " + msg = ("Automatic alpha grid generation is not supported for l1_ratio=0. " "Please supply a grid by providing your estimator with the " "appropriate `alphas=` argument.") X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T From 02f0d4d2e1306d8bd44b7a39029a83a7161f69ee Mon Sep 17 00:00:00 2001 From: nuffe Date: Mon, 17 Oct 2016 15:49:58 +0200 Subject: [PATCH 04/13] Extend test --- .../linear_model/tests/test_coordinate_descent.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index f0f5512a097fa..d25de8538b9ae 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -728,3 +728,17 @@ def test_enet_l1_ratio(): ElasticNetCV(l1_ratio=0).fit, X, y) assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(l1_ratio=0).fit, X, X) + + # Test that l1_ratio=0 is allowed if we supply a grid manually + alphas = [0.1, 10] + est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) + est = ElasticNetCV(l1_ratio=0, alphas=alphas) + est_desired.fit(X, y) + est.fit(X, y) + assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) + est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) + est.fit(X, X) + est_desired.fit(X, X) + assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) From 6b818303381bb49df6cc6f7fd91dee35769b2217 Mon Sep 17 00:00:00 2001 From: nuffe Date: Mon, 17 Oct 2016 18:15:13 +0200 Subject: [PATCH 05/13] Blank commit to restart AppVeyor --- sklearn/linear_model/tests/test_coordinate_descent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index d25de8538b9ae..5efd8cd6f3e1a 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -729,7 +729,7 @@ def test_enet_l1_ratio(): assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(l1_ratio=0).fit, X, X) - # Test that l1_ratio=0 is allowed if we supply a grid manually + # Test that l1_ratio=0 is allowed if we supply a grid manually. alphas = [0.1, 10] est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) est = ElasticNetCV(l1_ratio=0, alphas=alphas) From 8bf6966c802950ecf060d28e79f0f3b0ebb6042a Mon Sep 17 00:00:00 2001 From: nuffe Date: Mon, 17 Oct 2016 20:59:25 +0200 Subject: [PATCH 06/13] Same as test passing commit #3fa201b with diff commmented out --- .../tests/test_coordinate_descent.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 5efd8cd6f3e1a..a16469f1d11e3 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -729,16 +729,16 @@ def test_enet_l1_ratio(): assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(l1_ratio=0).fit, X, X) - # Test that l1_ratio=0 is allowed if we supply a grid manually. - alphas = [0.1, 10] - est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) - est = ElasticNetCV(l1_ratio=0, alphas=alphas) - est_desired.fit(X, y) - est.fit(X, y) - assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) - - est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) - est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) - est.fit(X, X) - est_desired.fit(X, X) - assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) + # Test that l1_ratio=0 is allowed if we supply a grid manually + # alphas = [0.1, 10] + # est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) + # est = ElasticNetCV(l1_ratio=0, alphas=alphas) + # est_desired.fit(X, y) + # est.fit(X, y) + # assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) + # + # est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) + # est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) + # est.fit(X, X) + # est_desired.fit(X, X) + # assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) From 51719329f471af954a484d01274c2e1d5c5f15b8 Mon Sep 17 00:00:00 2001 From: nuffe Date: Mon, 17 Oct 2016 21:50:53 +0200 Subject: [PATCH 07/13] Recommit extended test (with warnings supressed) --- .../tests/test_coordinate_descent.py | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index a16469f1d11e3..1e624a6f0054d 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -730,15 +730,17 @@ def test_enet_l1_ratio(): MultiTaskElasticNetCV(l1_ratio=0).fit, X, X) # Test that l1_ratio=0 is allowed if we supply a grid manually - # alphas = [0.1, 10] - # est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) - # est = ElasticNetCV(l1_ratio=0, alphas=alphas) - # est_desired.fit(X, y) - # est.fit(X, y) - # assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) - # - # est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) - # est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) - # est.fit(X, X) - # est_desired.fit(X, X) - # assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) + alphas = [0.1, 10] + est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) + est = ElasticNetCV(l1_ratio=0, alphas=alphas) + with ignore_warnings(): + est_desired.fit(X, y) + est.fit(X, y) + assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) + est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) + with ignore_warnings(): + est.fit(X, X) + est_desired.fit(X, X) + assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) From 043e6a61ea28c59de35fa8f57782066e14632418 Mon Sep 17 00:00:00 2001 From: nuffe Date: Mon, 17 Oct 2016 22:33:36 +0200 Subject: [PATCH 08/13] See what happens with different assert_almost_equal function --- sklearn/linear_model/tests/test_coordinate_descent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 1e624a6f0054d..87fb6ff0483e2 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -736,11 +736,11 @@ def test_enet_l1_ratio(): with ignore_warnings(): est_desired.fit(X, y) est.fit(X, y) - assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) with ignore_warnings(): est.fit(X, X) est_desired.fit(X, X) - assert_almost_equal(est.coef_, est_desired.coef_, decimal=5) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) From fc4f5700dfedd0369355521d4287459b8f772d36 Mon Sep 17 00:00:00 2001 From: nuffe Date: Tue, 18 Oct 2016 17:26:09 +0200 Subject: [PATCH 09/13] Trigger build one more time --- sklearn/linear_model/tests/test_coordinate_descent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 87fb6ff0483e2..9924c18b54703 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -729,7 +729,7 @@ def test_enet_l1_ratio(): assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(l1_ratio=0).fit, X, X) - # Test that l1_ratio=0 is allowed if we supply a grid manually + # Test that l1_ratio=0 is allowed if we supply a grid manually. alphas = [0.1, 10] est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) est = ElasticNetCV(l1_ratio=0, alphas=alphas) From 0122cc0a847b9389fb4bfe027b60f3fc56bd7db8 Mon Sep 17 00:00:00 2001 From: nuffe Date: Tue, 18 Oct 2016 21:29:32 +0200 Subject: [PATCH 10/13] Replace weird expression --- sklearn/linear_model/tests/test_coordinate_descent.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 9924c18b54703..2570158981d66 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -727,7 +727,7 @@ def test_enet_l1_ratio(): assert_raise_message(ValueError, msg, ElasticNetCV(l1_ratio=0).fit, X, y) assert_raise_message(ValueError, msg, - MultiTaskElasticNetCV(l1_ratio=0).fit, X, X) + MultiTaskElasticNetCV(l1_ratio=0).fit, X, y[:, None]) # Test that l1_ratio=0 is allowed if we supply a grid manually. alphas = [0.1, 10] @@ -741,6 +741,6 @@ def test_enet_l1_ratio(): est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) with ignore_warnings(): - est.fit(X, X) - est_desired.fit(X, X) + est.fit(X, y[:, None]) + est_desired.fit(X, y[:, None]) assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) From 28422463e4dccb0b5889771bd75f4d0001fac9de Mon Sep 17 00:00:00 2001 From: nuffe Date: Sun, 23 Oct 2016 19:11:01 +0200 Subject: [PATCH 11/13] Try and trigger build one more time --- sklearn/linear_model/tests/test_coordinate_descent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 2570158981d66..004abb6838cd6 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -729,7 +729,7 @@ def test_enet_l1_ratio(): assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(l1_ratio=0).fit, X, y[:, None]) - # Test that l1_ratio=0 is allowed if we supply a grid manually. + # Test that l1_ratio=0 is allowed if we supply a grid manually alphas = [0.1, 10] est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) est = ElasticNetCV(l1_ratio=0, alphas=alphas) From 6de9d1a6b139b7c2816a6a51ca424087502b1d02 Mon Sep 17 00:00:00 2001 From: nuffe Date: Mon, 24 Oct 2016 18:20:01 +0200 Subject: [PATCH 12/13] Add random state to test --- .../tests/test_coordinate_descent.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 004abb6838cd6..9af4f1d123d1b 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -724,22 +724,23 @@ def test_enet_l1_ratio(): X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T y = np.array([12, 10, 11, 21, 5]) - assert_raise_message(ValueError, msg, - ElasticNetCV(l1_ratio=0).fit, X, y) - assert_raise_message(ValueError, msg, - MultiTaskElasticNetCV(l1_ratio=0).fit, X, y[:, None]) + assert_raise_message(ValueError, msg, ElasticNetCV( + l1_ratio=0, random_state=42).fit, X, y) + assert_raise_message(ValueError, msg, MultiTaskElasticNetCV( + l1_ratio=0, random_state=42).fit, X, y[:, None]) # Test that l1_ratio=0 is allowed if we supply a grid manually alphas = [0.1, 10] - est_desired = ElasticNetCV(l1_ratio=0.00001, alphas=alphas) - est = ElasticNetCV(l1_ratio=0, alphas=alphas) + estkwds = {'alphas': alphas, 'random_state': 42} + est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds) + est = ElasticNetCV(l1_ratio=0, **estkwds) with ignore_warnings(): est_desired.fit(X, y) est.fit(X, y) assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) - est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, alphas=alphas) - est = MultiTaskElasticNetCV(l1_ratio=0, alphas=alphas) + est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds) + est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds) with ignore_warnings(): est.fit(X, y[:, None]) est_desired.fit(X, y[:, None]) From 50b1b363c99236e264a6e9abf72bdb523d914faa Mon Sep 17 00:00:00 2001 From: nuffe Date: Mon, 24 Oct 2016 18:33:18 +0200 Subject: [PATCH 13/13] Add random state to failed test, test_logistic_regression_sample_weights --- sklearn/linear_model/tests/test_logistic.py | 30 ++++++++++++--------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/sklearn/linear_model/tests/test_logistic.py b/sklearn/linear_model/tests/test_logistic.py index cc840335c8288..8d35bb220c958 100644 --- a/sklearn/linear_model/tests/test_logistic.py +++ b/sklearn/linear_model/tests/test_logistic.py @@ -588,24 +588,28 @@ def test_logistic_regression_sample_weights(): # Test that passing sample_weight as ones is the same as # not passing them at all (default None) for solver in ['lbfgs', 'liblinear']: - clf_sw_none = LR(solver=solver, fit_intercept=False) + clf_sw_none = LR(solver=solver, fit_intercept=False, + random_state=42) clf_sw_none.fit(X, y) - clf_sw_ones = LR(solver=solver, fit_intercept=False) + clf_sw_ones = LR(solver=solver, fit_intercept=False, + random_state=42) clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0])) assert_array_almost_equal( clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4) # Test that sample weights work the same with the lbfgs, # newton-cg, and 'sag' solvers - clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False) + clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False, random_state=42) clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight) - clf_sw_n = LR(solver='newton-cg', fit_intercept=False) + clf_sw_n = LR(solver='newton-cg', fit_intercept=False, random_state=42) clf_sw_n.fit(X, y, sample_weight=sample_weight) - clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10) + clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10, + random_state=42) # ignore convergence warning due to small dataset with ignore_warnings(): clf_sw_sag.fit(X, y, sample_weight=sample_weight) - clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False) + clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False, + random_state=42) clf_sw_liblinear.fit(X, y, sample_weight=sample_weight) assert_array_almost_equal( clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4) @@ -619,9 +623,9 @@ def test_logistic_regression_sample_weights(): # to be 2 for all instances of class 2 for solver in ['lbfgs', 'liblinear']: clf_cw_12 = LR(solver=solver, fit_intercept=False, - class_weight={0: 1, 1: 2}) + class_weight={0: 1, 1: 2}, random_state=42) clf_cw_12.fit(X, y) - clf_sw_12 = LR(solver=solver, fit_intercept=False) + clf_sw_12 = LR(solver=solver, fit_intercept=False, random_state=42) clf_sw_12.fit(X, y, sample_weight=sample_weight) assert_array_almost_equal( clf_cw_12.coef_, clf_sw_12.coef_, decimal=4) @@ -630,19 +634,21 @@ def test_logistic_regression_sample_weights(): # since the patched liblinear code is different. clf_cw = LogisticRegression( solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2}, - penalty="l1", tol=1e-5) + penalty="l1", tol=1e-5, random_state=42) clf_cw.fit(X, y) clf_sw = LogisticRegression( - solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5) + solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5, + random_state=42) clf_sw.fit(X, y, sample_weight) assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) clf_cw = LogisticRegression( solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2}, - penalty="l2", dual=True) + penalty="l2", dual=True, random_state=42) clf_cw.fit(X, y) clf_sw = LogisticRegression( - solver="liblinear", fit_intercept=False, penalty="l2", dual=True) + solver="liblinear", fit_intercept=False, penalty="l2", dual=True, + random_state=42) clf_sw.fit(X, y, sample_weight) assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)