@@ -466,23 +466,21 @@ def test_kernel_pca_solvers_equivalence(n_components):
466
466
assert_array_almost_equal (np .abs (r_pred ), np .abs (ref_pred ))
467
467
468
468
469
- @pytest .mark .parametrize ("kernel" ,
470
- ["linear" , "poly" , "rbf" , "sigmoid" , "cosine" ])
471
- def test_kernel_pca_inverse_transform (kernel ):
472
- """Check that transform + inverse transform = identity
473
-
474
- Makes sure that whatever the solver, transforming and then inverse
475
- transforming the training set leads to the train set, if the
476
- number of components is large enough.
469
+ def test_kernel_pca_inverse_transform_reconstruction ():
470
+ """Test if the reconstruction is a good approximation.
471
+
472
+ Note that in general it is not possible to get an arbitrarily good
473
+ reconstruction because of kernel centering that does not
474
+ preserve all the information of the original data.
477
475
"""
478
- X , * _ = make_blobs (n_samples = 100 , n_features = 4 , centers = [[ 1 , 1 , 1 , 1 ]],
479
- random_state = 0 )
480
-
481
- kp = KernelPCA ( n_components = 2 , kernel = kernel , fit_inverse_transform = True ,
482
- random_state = 0 )
483
- X_trans = kp .fit_transform (X )
484
- X_inv = kp .inverse_transform (X_trans )
485
- assert_allclose ( X , X_inv )
476
+ X , * _ = make_blobs (n_samples = 100 , n_features = 4 , random_state = 0 )
477
+
478
+ kpca = KernelPCA (
479
+ n_components = 20 , kernel = 'rbf' , fit_inverse_transform = True , alpha = 1e-3
480
+ )
481
+ X_trans = kpca .fit_transform (X )
482
+ X_reconst = kpca .inverse_transform (X_trans )
483
+ assert np . linalg . norm ( X - X_reconst ) / np . linalg . norm ( X ) < 1e-1
486
484
487
485
488
486
def test_32_64_decomposition_shape ():
0 commit comments