8000 CLN remove super rare line search checks · scikit-learn/scikit-learn@d304ce9 · GitHub
[go: up one dir, main page]

Skip to content

Commit d304ce9

Browse files
committed
CLN remove super rare line search checks
1 parent 2ffa621 commit d304ce9

File tree

2 files changed

+0
-51
lines changed

2 files changed

+0
-51
lines changed

sklearn/linear_model/_glm/glm.py

Lines changed: 0 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -241,8 +241,6 @@ def line_search(self, X, y, sample_weight):
241241

242242
# np.sum(np.abs(self.gradient_old))
243243
sum_abs_grad_old = -1
244-
sum_abs_grad_previous = -1 # Used to track sum|gradients| of i-1
245-
has_improved_sum_abs_grad_previous = False
246244

247245
is_verbose = self.verbose >= 2
248246
if is_verbose:
@@ -298,52 +296,6 @@ def line_search(self, X, y, sample_weight):
298296
)
299297
if check:
300298
break
301-
# 2.2 Deal with relative gradient differences around machine precision.
302-
tiny_grad = sum_abs_grad_old * eps
303-
abs_grad_improvement = np.abs(sum_abs_grad - sum_abs_grad_old)
304-
check = abs_grad_improvement <= tiny_grad
305-
if is_verbose:
306-
print(
307-
" check |sum(|gradient|) - sum(|gradient_old|)| <= eps * "
308-
"sum(|gradient_old|):"
309-
f" {abs_grad_improvement} <= {tiny_grad} {check}"
310-
)
311-
if check:
312-
break
313-
# 2.3 This is really the last resort.
314-
# Check that sum(|gradient_{i-1}|) < sum(|gradient_{i-2}|)
315-
# = has_improved_sum_abs_grad_previous
316-
# If now sum(|gradient_{i}|) >= sum(|gradient_{i-1}|), this iteration
317-
# made things worse and we should have stopped at i-1.
318-
check = (
319-
has_improved_sum_abs_grad_previous
320-
and sum_abs_grad >= sum_abs_grad_previous
321-
)
322-
if is_verbose:
323-
print(
324-
" check if previously "
325-
f"sum(|gradient_{i-1}|) < sum(|gradient_{i-2}|) but now "
326-
f"sum(|gradient_{i}|) >= sum(|gradient_{i-1}|) {check}"
327-
)
328-
if check:
329-
t /= beta # we go back to i-1
330-
self.coef = self.coef_old + t * self.coef_newton
331-
raw = self.raw_prediction + t * raw_prediction_newton
332-
self.loss_value, self.gradient = self.linear_loss.loss_gradient(
333-
coef=self.coef,
334-
X=X,
335-
y=y,
336-
sample_weight=sample_weight,
337-
l2_reg_strength=self.l2_reg_strength,
338-
n_threads=self.n_threads,
339-
raw_prediction=raw,
340-
)
341-
break
342-
# Calculate for the next iteration
343-
has_improved_sum_abs_grad_previous = (
344-
sum_abs_grad < sum_abs_grad_previous
345-
)
346-
sum_abs_grad_previous = sum_abs_grad
347299

348300
t *= beta
349301
else:

sklearn/linear_model/_glm/tests/test_glm.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1109,9 +1109,6 @@ def test_newton_solver_verbosity(capsys, verbose):
11091109
"check loss improvement <= armijo term:",
11101110
"check loss |improvement| <= eps * |loss_old|:",
11111111
"check sum(|gradient|) < sum(|gradient_old|):",
1112-
"check |sum(|gradient|) - sum(|gradient_old|)| <= eps *"
1113-
" sum(|gradient_old|):",
1114-
"check if previously sum(|gradient",
11151112
]
11161113
for m in msg:
11171114
assert m in captured.out

0 commit comments

Comments
 (0)
0