@@ -1006,7 +1006,28 @@ class SparseCoder(SparseCodingMixin, BaseEstimator):
1006
1006
components_ : array, [n_components, n_features]
1007
1007
The unchanged dictionary atoms
1008
1008
1009
- See also
1009
+ Examples
1010
+ --------
1011
+ >>> import numpy as np
1012
+ >>> from sklearn.decomposition import SparseCoder
1013
+ >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
1014
+ >>> dictionary = np.array(
1015
+ ... [[0, 1, 0],
1016
+ ... [-1, -1, 2],
1017
+ ... [1, 1, 1],
1018
+ ... [0, 1, 1],
1019
+ ... [0, 2, 1]],
1020
+ ... dtype=np.float64
1021
+ ... )
1022
+ >>> coder = SparseCoder(
1023
+ ... dictionary=dictionary, transform_algorithm='lasso_lars',
1024
+ ... transform_alpha=1e-10,
1025
+ ... )
1026
+ >>> coder.transform(X)
1027
+ array([[ 0., 0., -1., 0., 0.],
1028
+ [ 0., 1., 1., 0., 0.]])
1029
+
1030
+ See Also
1010
1031
--------
1011
1032
DictionaryLearning
1012
1033
MiniBatchDictionaryLearning
@@ -1060,7 +1081,7 @@ class DictionaryLearning(SparseCodingMixin, BaseEstimator):
1060
1081
1061
1082
Solves the optimization problem::
1062
1083
1063
- (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
1084
+ (U^*,V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
1064
1085
(U,V)
1065
1086
with || V_k ||_2 = 1 for all 0 <= k < n_components
1066
1087
@@ -1173,6 +1194,33 @@ class DictionaryLearning(SparseCodingMixin, BaseEstimator):
1173
1194
n_iter_ : int
1174
1195
Number of iterations run.
1175
1196
1197
+ Examples
1198
+ --------
1199
+ >>> import numpy as np
1200
+ >>> from sklearn.datasets import make_sparse_coded_signal
1201
+ >>> from sklearn.decomposition import DictionaryLearning
1202
+ >>> X, dictionary, code = make_sparse_coded_signal(
1203
+ ... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
1204
+ ... random_state=42,
1205
+ ... )
1206
+ >>> dict_learner = DictionaryLearning(
1207
+ ... n_components=15, transform_algorithm='lasso_lars', random_state=42,
1208
+ ... )
1209
+ >>> X_transformed = dict_learner.fit_transform(X)
1210
+
1211
+ We can check the level of sparsity of `X_transformed`:
1212
+
1213
+ >>> np.mean(X_transformed == 0)
1214
+ 0.88...
1215
+
1216
+ We can compare the average squared euclidean norm of the reconstruction
1217
+ error of the sparse coded signal relative to the squared euclidean norm of
1218
+ the original signal:
1219
+
1220
+ >>> X_hat = X_transformed @ dict_learner.components_
1221
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1222
+ 0.07...
1223
+
1176
1224
Notes
1177
1225
-----
1178
1226
**References:**
@@ -1258,7 +1306,7 @@ class MiniBatchDictionaryLearning(SparseCodingMixin, BaseEstimator):
1258
1306
1259
1307
Solves the optimization problem::
1260
1308
1261
- (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
1309
+ (U^*,V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
1262
1310
(U,V)
1263
1311
with || V_k ||_2 = 1 for all 0 <= k < n_components
1264
1312
@@ -1378,6 +1426,32 @@ class MiniBatchDictionaryLearning(SparseCodingMixin, BaseEstimator):
1378
1426
RandomState instance that is generated either from a seed, the random
1379
1427
number generattor or by `np.random`.
1380
1428
1429
+ Examples
1430
+ --------
1431
+ >>> import numpy as np
1432
+ >>> from sklearn.datasets import make_sparse_coded_signal
1433
+ >>> from sklearn.decomposition import MiniBatchDictionaryLearning
1434
+ >>> X, dictionary, code = make_sparse_coded_signal(
1435
+ ... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
1436
+ ... random_state=42)
1437
+ >>> dict_learner = MiniBatchDictionaryLearning(
1438
+ ... n_components=15, transform_algorithm='lasso_lars', random_state=42,
1439
+ ... )
1440
+ >>> X_transformed = dict_learner.fit_transform(X)
1441
+
1442
+ We can check the level of sparsity of `X_transformed`:
1443
+
1444
+ >>> np.mean(X_transformed == 0)
1445
+ 0.87...
1446
+
1447
+ We can compare the average squared euclidean norm of the reconstruction
1448
+ error of the sparse coded signal relative to the squared euclidean norm of
1449
+ the original signal:
1450
+
1451
+ >>> X_hat = X_transformed @ dict_learner.components_
1452
+ >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1453
+ 0.10...
1454
+
1381
1455
Notes
1382
1456
-----
1383
1457
**References:**
0 commit comments