8000 DOC add examples in docstring for decomposition (#28131) · scikit-learn/scikit-learn@65c907d · GitHub
[go: up one dir, main page]

Skip to content

Commit 65c907d

Browse files
yuanx749glemaitre
andauthored
DOC add examples in docstring for decomposition (#28131)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
1 parent 0040de8 commit 65c907d

File tree

1 file changed

+67
-0
lines changed

1 file changed

+67
-0
lines changed

sklearn/decomposition/_dict_learning.py

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,23 @@ def sparse_encode(
338338
sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
339339
SparseCoder : Find a sparse representation of data from a fixed precomputed
340340
dictionary.
341+
342+
Examples
343+
--------
344+
>>> import numpy as np
345+
>>> from sklearn.decomposition import sparse_encode
346+
8000 >>> X = np.array([[-1, -1, -1], [0, 0, 3]])
347+
>>> dictionary = np.array(
348+
... [[0, 1, 0],
349+
... [-1, -1, 2],
350+
... [1, 1, 1],
351+
... [0, 1, 1],
352+
... [0, 2, 1]],
353+
... dtype=np.float64
354+
... )
355+
>>> sparse_encode(X, dictionary, alpha=1e-10)
356+
array([[ 0., 0., -1., 0., 0.],
357+
[ 0., 1., 1., 0., 0.]])
341358
"""
342359
if check_input:
343360
if algorithm == "lasso_cd":
@@ -804,6 +821,32 @@ def dict_learning_online(
804821
learning algorithm.
805822
SparsePCA : Sparse Principal Components Analysis.
806823
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
824+
825+
Examples
826+
--------
827+
>>> import numpy as np
828+
>>> from sklearn.datasets import make_sparse_coded_signal
829+
>>> from sklearn.decomposition import dict_learning_online
830+
>>> X, _, _ = make_sparse_coded_signal(
831+
... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
832+
... random_state=42,
833+
... )
834+
>>> U, V = dict_learning_online(
835+
... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
836+
... )
837+
838+
We can check the level of sparsity of `U`:
839+
840+
>>> np.mean(U == 0)
841+
0.53...
842+
843+
We can compare the average squared euclidean norm of the reconstruction
844+
error of the sparse coded signal relative to the squared euclidean norm of
845+
the original signal:
846+
847+
>>> X_hat = U @ V
848+
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
849+
0.05...
807850
"""
808851
# TODO(1.6): remove in 1.6
809852
if max_iter is None:
@@ -982,6 +1025,30 @@ def dict_learning(
9821025
of the dictionary learning algorithm.
9831026
SparsePCA : Sparse Principal Components Analysis.
9841027
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
1028+
1029+
Examples
1030+
--------
1031+
>>> import numpy as np
1032+
>>> from sklearn.datasets import make_sparse_coded_signal
1033+
>>> from sklearn.decomposition import dict_learning
1034+
>>> X, _, _ = make_sparse_coded_signal(
1035+
... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
1036+
... random_state=42,
1037+
... )
1038+
>>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)
1039+
1040+
We can check the level of sparsity of `U`:
1041+
1042+
>>> np.mean(U == 0)
1043+
0.6...
1044+
1045+
We can compare the average squared euclidean norm of the reconstruction
1046+
error of the sparse coded signal relative to the squared euclidean norm of
1047+
the original signal:
1048+
1049+
>>> X_hat = U @ V
1050+
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
1051+
0.01...
9851052
"""
9861053
estimator = DictionaryLearning(
9871054
n_components=n_components,

0 commit comments

Comments
 (0)
0