@@ -974,6 +974,9 @@ def inner_solve(self, X, y, sample_weight):
974
974
atol = eta * norm_G / (self .A_norm * self .r_norm ),
975
975
btol = self .tol ,
976
976
maxiter = max (n_samples , n_features ) * n_classes , # default is min(A.shape)
977
+ # default conlim = 1e8, for compatible systems 1e12 is still reasonable,
978
+ # see LSMR documentation
979
+ conlim = 1e12 ,
977
980
show = self .verbose >= 3 ,
978
981
)
979
982
# We store the estimated Frobenius norm of A and norm of residual r in
@@ -988,6 +991,8 @@ def inner_solve(self, X, y, sample_weight):
988
991
conda ,
989
992
normx ,
990
993
) = result
994
+ if self .verbose >= 2 :
995
+ print (f" Inner iterations in LSMR = { itn } " )
991
996
if self .coef .dtype == np .float32 :
992
997
self .coef_newton = self .coef_newton .astype (np .float32 )
993
998
if not self .linear_loss .base_loss .is_multiclass :
@@ -1010,7 +1015,7 @@ def inner_solve(self, X, y, sample_weight):
1010
1015
if self .iteration == 1 :
1011
1016
return
1012
1017
# Note: We could detect too large steps by comparing norm(coef_newton) = normx
1013
- # with norm(gradient) o with the already available condition number of A, e.g.
1018
+ # with norm(gradient) or with the already available condition number of A, e.g.
1014
1019
# conda.
1015
1020
if istop == 7 :
1016
1021
self .use_fallback_lbfgs_solve = True
@@ -1033,7 +1038,7 @@ def inner_solve(self, X, y, sample_weight):
1033
1038
msg
1034
1039
+ "It will now resort to lbfgs instead.\n "
1035
1040
"This may be caused by singular or very ill-conditioned Hessian "
1036
- " matrix. "
1041
+ "matrix. "
1037
1042
"Further options are to use another solver or to avoid such situation "
1038
1043
"in the first place. Possible remedies are removing collinear features"
1039
1044
"of X or increasing the penalization strengths." ,
0 commit comments