@@ -185,10 +185,11 @@ def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
185
185
186
186
187
187
# XXX : could be moved to the linear_model module
188
- def sparse_encode (X , dictionary , gram = None , cov = None , algorithm = 'lasso_lars' ,
189
- n_nonzero_coefs = None , alpha = None , copy_cov = True , init = None ,
190
- max_iter = 1000 , n_jobs = None , check_input = True , verbose = 0 ,
191
- positive = False ):
188
+ @_deprecate_positional_args
189
+ def sparse_encode (X , dictionary , * , gram = None , cov = None ,
190
+ algorithm = 'lasso_lars' , n_nonzero_coefs = None , alpha = None ,
191
+ copy_cov = True , init = None , max_iter = 1000 , n_jobs = None ,
192
+ check_input = True , verbose = 0 , positive = False ):
192
193
"""Sparse coding
193
194
194
195
Each row of the result is the solution to a sparse coding problem.
@@ -421,7 +422,8 @@ def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
421
422
return dictionary
422
423
423
424
424
- def dict_learning (X , n_components , alpha , max_iter = 100 , tol = 1e-8 ,
425
+ @_deprecate_positional_args
426
+ def dict_learning (X , n_components , * , alpha , max_iter = 100 , tol = 1e-8 ,
425
427
method = 'lars' , n_jobs = None , dict_init = None , code_init = None ,
426
428
callback = None , verbose = False , random_state = None ,
427
429
return_n_iter = False , positive_dict = False ,
@@ -615,7 +617,8 @@ def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
615
617
return code , dictionary , errors
616
618
617
619
618
- def dict_learning_online (X , n_components = 2 , alpha = 1 , n_iter = 100 ,
620
+ @_deprecate_positional_args
621
+ def dict_learning_online (X , n_components = 2 , * , alpha = 1 , n_iter = 100 ,
619
622
return_code = True , dict_init = None , callback = None ,
620
623
batch_size = 3 , verbose = False , shuffle = True ,
621
624
n_jobs = None , method = 'lars' , iter_offset = 0 ,
@@ -1230,7 +1233,7 @@ def fit(self, X, y=None):
1230
1233
n_components = self .n_components
1231
1234
1232
1235
V , U , E , self .n_iter_ = dict_learning (
1233
- X , n_components , self .alpha ,
1236
+ X , n_components , alpha = self .alpha ,
1234
1237
tol = self .tol , max_iter = self .max_iter ,
1235
1238
method = self .fit_algorithm ,
1236
1239
method_max_iter = self .transform_max_iter ,
@@ -1434,7 +1437,7 @@ def fit(self, X, y=None):
1434
1437
X = self ._validate_data (X )
1435
1438
1436
1439
U , (A , B ), self .n_iter_ = dict_learning_online (
1437
- X , self .n_components , self .alpha ,
1440
+ X , self .n_components , alpha = self .alpha ,
1438
1441
n_iter = self .n_iter , return_code = False ,
1439
1442
method = self .fit_algorithm ,
1440
1443
method_max_iter = self .transform_max_iter ,
@@ -1486,7 +1489,7 @@ def partial_fit(self, X, y=None, iter_offset=None):
1486
1489
if iter_offset is None :
1487
1490
iter_offset = getattr (self , 'iter_offset_' , 0 )
1488
1491
U , (A , B ) = dict_learning_online (
1489
- X , self .n_components , self .alpha ,
1492
+ X , self .n_components , alpha = self .alpha ,
1490
1493
n_iter = self .n_iter , method = self .fit_algorithm ,
1491
1494
method_max_iter = self .transform_max_iter ,
1492
1495
n_jobs = self .n_jobs , dict_init = dict_init ,
0 commit comments