|
516 | 516 | max_iter=20, n_components=1, transform_algorithm="lasso_lars"
|
517 | 517 | )
|
518 | 518 | },
|
| 519 | + ElasticNetCV: {"check_sample_weight_equivalence": dict(max_iter=100, tol=1e-2)}, |
519 | 520 | FactorAnalysis: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
|
520 | 521 | FastICA: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
|
521 | 522 | FeatureAgglomeration: {"check_dict_unchanged": dict(n_clusters=1)},
|
522 | 523 | GammaRegressor: {
|
523 | 524 | "check_sample_weight_equivalence": [
|
524 |
| - dict(solver="newton-cholesky"), |
525 |
| - dict(solver="lbfgs"), |
| 525 | + dict(solver="newton-cholesky", max_iter=1_000, tol=1e-12), |
| 526 | + dict(solver="lbfgs", max_iter=1_000, tol=1e-12), |
526 | 527 | ]
|
527 | 528 | },
|
528 | 529 | GaussianMixture: {"check_dict_unchanged": dict(max_iter=5, n_init=2)},
|
529 | 530 | GaussianRandomProjection: {"check_dict_unchanged": dict(n_components=1)},
|
| 531 | + HuberRegressor: { |
| 532 | + "check_sample_weight_equivalence": dict(tol=1e-12, max_iter=1_000) |
| 533 | + }, |
530 | 534 | IncrementalPCA: {"check_dict_unchanged": dict(batch_size=10, n_components=1)},
|
531 | 535 | Isomap: {"check_dict_unchanged": dict(n_components=1)},
|
532 | 536 | KMeans: {"check_dict_unchanged": dict(max_iter=5, n_clusters=1, n_init=2)},
|
533 | 537 | KernelPCA: {"check_dict_unchanged": dict(n_components=1)},
|
534 | 538 | LassoLars: {"check_non_transformer_estimators_n_iter": dict(alpha=0.0)},
|
| 539 | + LassoCV: {"check_sample_weight_equivalence": dict(max_iter=100, tol=1e-2)}, |
535 | 540 | LatentDirichletAllocation: {
|
536 | 541 | "check_dict_unchanged": dict(batch_size=10, max_iter=5, n_components=1)
|
537 | 542 | },
|
538 | 543 | LinearDiscriminantAnalysis: {"check_dict_unchanged": dict(n_components=1)},
|
539 | 544 | LocallyLinearEmbedding: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
|
540 | 545 | LogisticRegression: {
|
541 | 546 | "check_sample_weight_equivalence": [
|
542 |
| - dict(solver="lbfgs"), |
543 |
| - dict(solver="liblinear"), |
544 |
| - dict(solver="newton-cg"), |
545 |
| - dict(solver="newton-cholesky"), |
| 547 | + dict(solver="lbfgs", max_iter=
685C
1_000, tol=1e-12), |
| 548 | + # liblinear has more problems with higher regularization apparently... |
| 549 | + dict(solver="liblinear", C=0.01, max_iter=1_000, tol=1e-12), |
| 550 | + dict(solver="newton-cg", max_iter=1_000, tol=1e-12), |
| 551 | + dict(solver="newton-cholesky", max_iter=1_000, tol=1e-12), |
| 552 | + ] |
| 553 | + }, |
| 554 | + LogisticRegressionCV: { |
| 555 | + "check_sample_weight_equivalence": [ |
| 556 | + dict( |
| 557 | + solver="newton-cholesky", |
| 558 | + Cs=np.logspace(-3, 3, 5), |
| 559 | + max_iter=1_000, |
| 560 | + tol=1e-12, |
| 561 | + ), |
546 | 562 | ]
|
547 | 563 | },
|
548 | 564 | MDS: {"check_dict_unchanged": dict(max_iter=5, n_components=1, n_init=2)},
|
|
571 | 587 | PLSSVD: {"check_dict_unchanged": dict(n_components=1)},
|
572 | 588 | PoissonRegressor: {
|
573 | 589 | "check_sample_weight_equivalence": [
|
574 |
| - dict(solver="newton-cholesky"), |
575 |
| - dict(solver="lbfgs"), |
| 590 | + dict(solver="newton-cholesky", max_iter=100), |
| 591 | + dict(solver="lbfgs", max_iter=100), |
576 | 592 | ]
|
577 | 593 | },
|
578 | 594 | PolynomialCountSketch: {"check_dict_unchanged": dict(n_components=1)},
|
|
626 | 642 | TruncatedSVD: {"check_dict_unchanged": dict(n_components=1)},
|
627 | 643 | TweedieRegressor: {
|
628 | 644 | "check_sample_weight_equivalence": [
|
629 |
| - dict(solver="newton-cholesky"), |
630 |
| - dict(solver="lbfgs"), |
| 645 | + dict(solver="newton-cholesky", max_iter=1_000, tol=1e-12), |
| 646 | + dict(solver="lbfgs", max_iter=1_000, tol=1e-12), |
631 | 647 | ]
|
632 | 648 | },
|
633 | 649 | }
|
|
0 commit comments