1
1
import numpy as np
2
2
from scipy import linalg
3
- from sklearn .decomposition import nmf
3
+ from sklearn .decomposition import (NMF , ProjectedGradientNMF ,
4
+ non_negative_factorization )
5
+ from sklearn .decomposition import nmf # For testing internals
4
6
from scipy .sparse import csc_matrix
5
7
6
8
from sklearn .utils .testing import assert_true
@@ -30,17 +32,17 @@ def test_parameter_checking():
30
32
A = np .ones ((2 , 2 ))
31
33
name = 'spam'
32
34
msg = "Invalid solver parameter: got 'spam' instead of one of"
33
- assert_raise_message (ValueError , msg , nmf . NMF (solver = name ).fit , A )
35
+ assert_raise_message (ValueError , msg , NMF (solver = name ).fit , A )
34
36
msg = "Invalid init parameter: got 'spam' instead of one of"
35
- assert_raise_message (ValueError , msg , nmf . NMF (init = name ).fit , A )
37
+ assert_raise_message (ValueError , msg , NMF (init = name ).fit , A )
36
38
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
37
- assert_raise_message (ValueError , msg , nmf . NMF (sparseness = name ).fit , A )
39
+ assert_raise_message (ValueError , msg , NMF (sparseness = name ).fit , A )
38
40
39
41
msg = "Negative values in data passed to"
40
- assert_raise_message (ValueError , msg , nmf . NMF ().fit , - A )
42
+ assert_raise_message (ValueError , msg , NMF ().fit , - A )
41
43
assert_raise_message (ValueError , msg , nmf ._initialize_nmf , - A ,
42
44
2 , 'nndsvd' )
43
- clf = nmf . NMF (2 , tol = 0.1 ).fit (A )
45
+ clf = NMF (2 , tol = 0.1 ).fit (A )
44
46
assert_raise_message (ValueError , msg , clf .transform , - A )
45
47
46
48
@@ -76,8 +78,8 @@ def test_nmf_fit_nn_output():
76
78
5 * np .ones (5 ) + np .arange (1 , 6 )]
77
79
for solver in ('pg' , 'cd' ):
78
80
for init in (None , 'nndsvd' , 'nndsvda' , 'nndsvdar' ):
79
- model = nmf . NMF (n_components = 2 , solver = solver , init = init ,
80
- random_state = 0 )
81
+ model = NMF (n_components = 2 , solver = solver , init = init ,
82
+ random_state = 0 )
81
83
transf = model .fit_transform (A )
82
84
assert_false ((model .components_ < 0 ).any () or
83
85
(transf < 0 ).any ())
@@ -87,7 +89,7 @@ def test_nmf_fit_nn_output():
87
89
def test_nmf_fit_close ():
88
90
# Test that the fit is not too far away
89
91
for solver in ('pg' , 'cd' ):
90
- pnmf = nmf . NMF (5 , solver = solver , init = 'nndsvd' , random_state = 0 )
92
+ pnmf = NMF (5 , solver = solver , init = 'nndsvd' , random_state = 0 )
91
93
X = np .abs (random_state .randn (6 , 5 ))
92
94
assert_less (pnmf .fit (X ).reconstruction_err_ , 0.05 )
93
95
@@ -112,8 +114,7 @@ def test_nmf_transform():
112
114
# Test that NMF.transform returns close values
113
115
A = np .abs (random_state .randn (6 , 5 ))
114
116
for solver in ('pg' , 'cd' ):
115
- m = nmf .NMF (solver = solver , n_components = 4 , init = 'nndsvd' ,
116
- random_state = 0 )
117
+ m = NMF (solver = solver , n_components = 4 , init = 'nndsvd' , random_state = 0 )
117
118
ft = m .fit_transform (A )
118
119
t = m .transform (A )
119
120
assert_array_almost_equal (ft , t , decimal = 2 )
@@ -123,7 +124,7 @@ def test_nmf_transform():
123
124
def test_n_components_greater_n_features ():
124
125
# Smoke test for the case of more components than features.
125
126
A = np .abs (random_state .randn (30 , 10 ))
126
- nmf . NMF (n_components = 15 , random_state = 0 , tol = 1e-2 ).fit (A )
127
+ NMF (n_components = 15 , random_state = 0 , tol = 1e-2 ).fit (A )
127
128
128
129
129
130
@ignore_warnings
@@ -133,14 +134,13 @@ def test_projgrad_nmf_sparseness():
133
134
# part where they are applied.
134
135
tol = 1e-2
135
136
A = np .abs (random_state .randn (10 , 10 ))
136
- m = nmf .ProjectedGradientNMF (n_components = 5 , random_state = 0 ,
137
- tol = tol ).fit (A )
138
- data_sp = nmf .ProjectedGradientNMF (n_components = 5 , sparseness = 'data' ,
139
- random_state = 0 ,
140
- tol = tol ).fit (A ).data_sparseness_
141
- comp_sp = nmf .ProjectedGradientNMF (n_components = 5 , sparseness = 'components' ,
142
- random_state = 0 ,
143
- tol = tol ).fit (A ).comp_sparseness_
137
+ m = ProjectedGradientNMF (n_components = 5 , random_state = 0 , tol = tol ).fit (A )
138
+ data_sp = ProjectedGradientNMF (n_components = 5 , sparseness = 'data' ,
139
+ random_state = 0 ,
140
+ tol = tol ).fit (A ).data_sparseness_
141
+ comp_sp = ProjectedGradientNMF (n_components = 5 , sparseness = 'components' ,
142
+ random_state = 0 ,
143
+ tol = tol ).fit (A ).comp_sparseness_
144
144
assert_greater (data_sp , m .data_sparseness_ )
145
145
assert_greater (comp_sp , m .comp_sparseness_ )
146
146
@@ -155,8 +155,8 @@ def test_sparse_input():
155
155
A_sparse = csc_matrix (A )
156
156
157
157
for solver in ('pg' , 'cd' ):
158
- est1 = nmf . NMF (solver = solver , n_components = 5 , init = 'random' ,
159
- random_state = 0 , tol = 1e-2 )
158
+ est1 = NMF (solver = solver , n_components = 5 , init = 'random' ,
159
+ random_state = 0 , tol = 1e-2 )
160
160
est2 = clone (est1 )
161
161
162
162
W1 = est1 .fit_transform (A )
@@ -177,8 +177,7 @@ def test_sparse_transform():
177
177
A = csc_matrix (A )
178
178
179
179
for solver in ('pg' , 'cd' ):
180
- model = nmf .NMF (solver = solver , random_state = 0 , tol = 1e-4 ,
181
- n_components = 2 )
180
+ model = NMF (solver = solver , random_state = 0 , tol = 1e-4 , n_components = 2 )
182
181
A_fit_tr = model .fit_transform (A )
183
182
A_tr = model .transform (A )
184
183
assert_array_almost_equal (A_fit_tr , A_tr , decimal = 1 )
@@ -192,12 +191,12 @@ def test_non_negative_factorization_consistency():
192
191
A [:, 2 * np .arange (5 )] = 0
193
192
194
193
for solver in ('pg' , 'cd' ):
195
- W_nmf , H , _ = nmf . non_negative_factorization (
194
+ W_nmf , H , _ = non_negative_factorization (
196
195
A , solver = solver , random_state = 1 , tol = 1e-2 )
197
- W_nmf_2 , _ , _ = nmf . non_negative_factorization (
196
+ W_nmf_2 , _ , _ = non_negative_factorization (
198
197
A , H = H , update_H = False , solver = solver , random_state = 1 , tol = 1e-2 )
199
198
200
- model_class = nmf . NMF (solver = solver , random_state = 1 , tol = 1e-2 )
199
+ model_class = NMF (solver = solver , random_state = 1 , tol = 1e-2 )
201
200
W_cls = model_class .fit_transform (A )
202
201
W_cls_2 = model_class .transform (A )
203
202
assert_array_almost_equal (W_nmf , W_cls , decimal = 10 )
@@ -208,7 +207,7 @@ def test_non_negative_factorization_consistency():
208
207
def test_non_negative_factorization_checking ():
209
208
A = np .ones ((2 , 2 ))
210
209
# Test parameters checking is public function
211
- nnmf = nmf . non_negative_factorization
210
+ nnmf = non_negative_factorization
212
211
msg = "Number of components must be positive; got (n_components='2')"
213
212
assert_raise_message (ValueError , msg , nnmf , A , A , A , '2' )
214
213
msg = "Negative values in data passed to NMF (input H)"
0 commit comments