7
7
8
8
from . import libsvm , liblinear
9
9
from . import libsvm_sparse
10
- from ..base import BaseEstimator , ClassifierMixin , RegressorMixin
10
+ from ..base import BaseEstimator , ClassifierMixin
11
11
from ..preprocessing import LabelEncoder
12
12
from ..utils import check_array , check_random_state , column_or_1d
13
13
from ..utils import ConvergenceWarning , compute_class_weight
14
14
from ..utils .extmath import safe_sparse_dot
15
15
from ..utils .validation import check_is_fitted
16
16
from ..externals import six
17
17
18
-
19
18
LIBSVM_IMPL = ['c_svc' , 'nu_svc' , 'one_class' , 'epsilon_svr' , 'nu_svr' ]
20
19
21
20
@@ -70,7 +69,7 @@ def __init__(self, impl, kernel, degree, gamma, coef0,
70
69
tol , C , nu , epsilon , shrinking , probability , cache_size ,
71
70
class_weight , verbose , max_iter , random_state ):
72
71
73
- if not impl in LIBSVM_IMPL : # pragma: no cover
72
+ if impl not in LIBSVM_IMPL : # pragma: no cover
74
73
raise ValueError ("impl should be one of %s, %s was given" % (
75
74
LIBSVM_IMPL , impl ))
76
75
@@ -384,7 +383,7 @@ def decision_fu
10000
nction(self, X):
384
383
385
384
def _validate_for_predict (self , X ):
386
385
check_is_fitted (self , 'support_' )
387
-
386
+
388
387
X = check_array (X , accept_sparse = 'csr' , dtype = np .float64 , order = "C" )
389
388
if self ._sparse and not sp .isspmatrix (X ):
390
389
X = sp .csr_matrix (X )
@@ -604,63 +603,63 @@ def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
604
603
- loss
605
604
- dual
606
605
607
- The same number is internally by LibLinear to determine which
608
- solver to use.
606
+ The same number is also internally used by LibLinear to determine
607
+ which solver to use.
609
608
"""
610
-
611
- # nested dicts containing level 1: available loss functions,
609
+ # nested dicts containing level 1: available loss functions,
612
610
# level2: available penalties for the given loss functin,
613
611
# level3: wether the dual solver is available for the specified
614
612
# combination of loss function and penalty
615
613
_solver_type_dict = {
616
614
'logistic_regression' : {
617
615
'l1' : {False : 6 },
618
616
'l2' : {False : 0 , True : 7 }},
619
- 'hinge' : {
620
- 'l2' : {True : 3 }},
617
+ 'hinge' : {
618
+ 'l2' : {True : 3 }},
621
619
'squared_hinge' : {
622
- 'l1' : {False : 5 },
620
+ 'l1' : {False : 5 },
623
621
'l2' : {False : 2 , True : 1 }},
624
622
'epsilon_insensitive' : {
625
623
'l2' : {True : 13 }},
626
624
'squared_epsilon_insensitive' : {
627
625
'l2' : {False : 11 , True : 12 }},
628
626
'crammer_singer' : 4
629
627
}
630
-
631
628
632
629
if multi_class == 'crammer_singer' :
633
630
return _solver_type_dict [multi_class ]
634
631
elif multi_class != 'ovr' :
635
632
raise ValueError ("`multi_class` must be one of `ovr`, "
636
633
"`crammer_singer`, got %r" % multi_class )
637
634
638
- _solver_pen = _solver_type_dict .get (loss , None )
635
+ # FIXME loss.lower() --> loss in 0.18
636
+ _solver_pen = _solver_type_dict .get (loss .lower (), None )
639
637
if _solver_pen is None :
640
- error_string = ("Loss %s is not supported" % loss )
638
+ error_string = ("loss='%s' is not supported" % loss )
641
639
else :
642
- _solver_dual = _solver_pen .get (penalty , None )
640
+ # FIME penalty.lower() --> penalty in 0.18
641
+ _solver_dual = _solver_pen .get (penalty .lower (), None )
643
642
if _solver_dual is None :
644
643
error_string = ("The combination of penalty='%s'"
645
644
"and loss='%s' is not supported"
646
- % (loss , penalty ))
645
+ % (penalty , loss ))
647
646
else :
648
647
solver_num = _solver_dual .get (dual , None )
649
648
if solver_num is None :
650
649
error_string = ("loss='%s' and penalty='%s'"
651
650
"are not supported when dual=%s"
652
- % (loss , penalty , dual ))
651
+ % (penalty , loss , dual ))
653
652
else :
654
653
return solver_num
655
- raise ValueError ( 'Unsupported set of arguments: %s, '
656
- 'Parameters: penalty=%r, loss=%r, dual=%r '
657
- % ( error_string , penalty , loss , dual ) )
658
- return _solver_type_dict [ solver_type ]
654
+
655
+ raise ValueError (( 'Unsupported set of arguments: %s, '
656
+ 'Parameters: penalty=%r, loss=%r , dual=%r' )
657
+ % ( error_string , penalty , loss , dual ))
659
658
660
659
661
660
def _fit_liblinear (X , y , C , fit_intercept , intercept_scaling , class_weight ,
662
661
penalty , dual , verbose , max_iter , tol ,
663
- random_state = None , multi_class = 'ovr' ,
662
+ random_state = None , multi_class = 'ovr' ,
664
663
loss = 'logistic_regression' , epsilon = 0.1 ):
665
664
"""Used by Logistic Regression (and CV) and LinearSVC.
666
665
@@ -722,7 +721,7 @@ def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
722
721
If `crammer_singer` is chosen, the options loss, penalty and dual will
723
722
be ignored.
724
723
725
- loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
724
+ loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
726
725
'epsilon_insensitive', 'squared_epsilon_insensitive}
727
726
The loss function used to fit the model.
728
727
@@ -743,7 +742,23 @@ def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
743
742
n_iter_ : int
744
743
Maximum number of iterations run across all classes.
745
744
"""
746
- if loss not in ['epsilon_insensitive' , 'squared_epsilon_insensitive' ]:
745
+ # FIXME Remove case insensitivity in 0.18 ---------------------
746
+ loss_l , penalty_l = loss .lower (), penalty .lower ()
747
+
748
+ msg = ("loss='%s' has been deprecated in favor of "
749
+ "loss='%s' as of 0.16. Backward compatibility"
750
+ " for the uppercase notation will be removed in %s" )
751
+ if (not loss .islower ()) and loss_l not in ('l1' , 'l2' ):
752
+ warnings .warn (msg % (loss , loss_l , "0.18" ),
753
+ DeprecationWarning )
754
+ if not penalty .islower ():
755
+ warnings .warn (msg .replace ("loss" , "penalty" )
756
+ % (penalty , penalty_l , "0.18" ),
757
+ DeprecationWarning )
758
+ # -------------------------------------------------------------
759
+
760
+ # FIXME loss_l --> loss in 0.18
761
+ if loss_l not in ['epsilon_insensitive' , 'squared_epsilon_insensitive' ]:
747
762
enc = LabelEncoder ()
748
763
y_ind = enc .fit_transform (y )
749
764
classes_ = enc .classes_
@@ -772,7 +787,7 @@ def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
772
787
# LibLinear wants targets as doubles, even for classification
773
788
y_ind = np .asarray (y_ind , dtype = np .float64 ).ravel ()
774
789
solver_type = _get_liblinear_solver_type (multi_class , penalty , loss , dual )
775
- raw_coef_ , n_iter_ = liblinear .train_wrap (
790
+ raw_coef_ , n_iter_ = liblinear .train_wrap (
776
791
X , y_ind , sp .isspmatrix (X ), solver_type , tol , bias , C ,
777
792
class_weight_ , max_iter , rnd .randint (np .iinfo ('i' ).max ),
778
793
epsilon
0 commit comments