@@ -338,6 +338,23 @@ def sparse_encode(
338
338
sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
339
339
SparseCoder : Find a sparse representation of data from a fixed precomputed
340
340
dictionary.
341
+
342
+ Examples
343
+ --------
344
+ >>> import numpy as np
345
+ >>> from sklearn.decomposition import sparse_encode
346
+
8000
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
347
+ >>> dictionary = np.array(
348
+ ... [[0, 1, 0],
349
+ ... [-1, -1, 2],
350
+ ... [1, 1, 1],
351
+ ... [0, 1, 1],
352
+ ... [0, 2, 1]],
353
+ ... dtype=np.float64
354
+ ... )
355
+ >>> sparse_encode(X, dictionary, alpha=1e-10)
356
+ array([[ 0., 0., -1., 0., 0.],
357
+ [ 0., 1., 1., 0., 0.]])
341
358
"""
342
359
if check_input :
343
360
if algorithm == "lasso_cd" :
@@ -804,6 +821,32 @@ def dict_learning_online(
804
821
learning algorithm.
805
822
SparsePCA : Sparse Principal Components Analysis.
806
823
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
824
+
825
+ Examples
826
+ --------
827
+ >>> import numpy as np
828
+ >>> from sklearn.datasets import make_sparse_coded_signal
829
+ >>> from sklearn.decomposition import dict_learning_online
830
+ >>> X, _, _ = make_sparse_coded_signal(
831
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
832
+ ... random_state=42,
833
+ ... )
834
+ >>> U, V = dict_learning_online(
835
+ ... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
836
+ ... )
837
+
838
+ We can check the level of sparsity of `U`:
839
+
840
+ >>> np.mean(U == 0)
841
+ 0.53...
842
+
843
+ We can compare the average squared euclidean norm of the reconstruction
844
+ error of the sparse coded signal relative to the squared euclidean norm of
845
+ the original signal:
846
+
847
+ >>> X_hat = U @ V
848
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
849
+ 0.05...
807
850
"""
808
851
# TODO(1.6): remove in 1.6
809
852
if max_iter is None :
@@ -982,6 +1025,30 @@ def dict_learning(
982
1025
of the dictionary learning algorithm.
983
1026
SparsePCA : Sparse Principal Components Analysis.
984
1027
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1028
+
1029
+ Examples
1030
+ --------
1031
+ >>> import numpy as np
1032
+ >>> from sklearn.datasets import make_sparse_coded_signal
1033
+ >>> from sklearn.decomposition import dict_learning
1034
+ >>> X, _, _ = make_sparse_coded_signal(
1035
+ ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1036
+ ... random_state=42,
1037
+ ... )
1038
+ >>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)
1039
+
1040
+ We can check the level of sparsity of `U`:
1041
+
1042
+ >>> np.mean(U == 0)
1043
+ 0.6...
1044
+
1045
+ We can compare the average squared euclidean norm of the reconstruction
1046
+ error of the sparse coded signal relative to the squared euclidean norm of
1047
+ the original signal:
1048
+
1049
+ >>> X_hat = U @ V
1050
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1051
+ 0.01...
985
1052
"""
986
1053
estimator = DictionaryLearning (
987
1054
n_components = n_components ,
0 commit comments