diff --git a/doc/modules/decomposition.rst b/doc/modules/decomposition.rst index 54ed20cc36be4..e58796fa85b46 100644 --- a/doc/modules/decomposition.rst +++ b/doc/modules/decomposition.rst @@ -231,7 +231,7 @@ problem solved is a PCA problem (dictionary learning) with an .. math:: (U^*, V^*) = \underset{U, V}{\operatorname{arg\,min\,}} & \frac{1}{2} ||X-UV||_{\text{Fro}}^2+\alpha||V||_{1,1} \\ - \text{subject to } & ||U_k||_2 = 1 \text{ for all } + \text{subject to } & ||U_k||_2 <= 1 \text{ for all } 0 \leq k < n_{components} :math:`||.||_{\text{Fro}}` stands for the Frobenius norm and :math:`||.||_{1,1}` @@ -513,7 +513,7 @@ dictionary fixed, and then updating the dictionary to best fit the sparse code. .. math:: (U^*, V^*) = \underset{U, V}{\operatorname{arg\,min\,}} & \frac{1}{2} ||X-UV||_{\text{Fro}}^2+\alpha||U||_{1,1} \\ - \text{subject to } & ||V_k||_2 = 1 \text{ for all } + \text{subject to } & ||V_k||_2 <= 1 \text{ for all } 0 \leq k < n_{\mathrm{atoms}} diff --git a/doc/whats_new/v1.0.rst b/doc/whats_new/v1.0.rst index 17c2b845d40d9..1cc84b74198ce 100644 --- a/doc/whats_new/v1.0.rst +++ b/doc/whats_new/v1.0.rst @@ -2,6 +2,26 @@ .. currentmodule:: sklearn +.. _changes_1_0_2: + +Version 1.0.2 +============= + +**In Development** + +Changelog +--------- + +:mod:`sklearn.decomposition` +............................ + +- |Fix| Fixed the constraint on the objective function of + :class:`decomposition.DictionaryLearning`, + :class:`decomposition.MiniBatchDictionaryLearning`, :class:`decomposition.SparsePCA` + and :class:`decomposition.MiniBatchSparsePCA` to be convex and match the referenced + article. :pr:`19210` by :user:`Jérémie du Boisberranger `. + + .. _changes_1_0_1: Version 1.0.1 diff --git a/sklearn/decomposition/_dict_learning.py b/sklearn/decomposition/_dict_learning.py index e4edaf31c9c32..aee341bd88b05 100644 --- a/sklearn/decomposition/_dict_learning.py +++ b/sklearn/decomposition/_dict_learning.py @@ -480,8 +480,8 @@ def _update_dict( if positive: np.clip(dictionary[k], 0, None, out=dictionary[k]) - # Projection on the constraint set ||V_k|| == 1 - dictionary[k] /= linalg.norm(dictionary[k]) + # Projection on the constraint set ||V_k|| <= 1 + dictionary[k] /= max(linalg.norm(dictionary[k]), 1) if verbose and n_unused > 0: print(f"{n_unused} unused atoms resampled.") @@ -1331,7 +1331,7 @@ class DictionaryLearning(_BaseSparseCoding, BaseEstimator): (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 (U,V) - with || V_k ||_2 = 1 for all 0 <= k < n_components + with || V_k ||_2 <= 1 for all 0 <= k < n_components ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm which is the sum of the absolute values @@ -1608,7 +1608,7 @@ class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator): (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 (U,V) - with || V_k ||_2 = 1 for all 0 <= k < n_components + with || V_k ||_2 <= 1 for all 0 <= k < n_components ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm which is the sum of the absolute values