diff --git a/sklearn/cluster/tests/test_affinity_propagation.py b/sklearn/cluster/tests/test_affinity_propagation.py index 0c79947456511..a814a5167bb0a 100644 --- a/sklearn/cluster/tests/test_affinity_propagation.py +++ b/sklearn/cluster/tests/test_affinity_propagation.py @@ -160,5 +160,5 @@ def test_equal_similarities_and_preferences(): assert_false(_equal_similarities_and_preferences(S, np.array([0, 1]))) # Same preferences - assert_true(_equal_similarities_and_preferences(S, np.array([0, 0]))) - assert_true(_equal_similarities_and_preferences(S, np.array(0))) + assert _equal_similarities_and_preferences(S, np.array([0, 0])) + assert _equal_similarities_and_preferences(S, np.array(0)) diff --git a/sklearn/cluster/tests/test_bicluster.py b/sklearn/cluster/tests/test_bicluster.py index d217d51373512..ab65b46e28332 100644 --- a/sklearn/cluster/tests/test_bicluster.py +++ b/sklearn/cluster/tests/test_bicluster.py @@ -51,7 +51,7 @@ def test_get_submatrix(): submatrix[:] = -1 if issparse(X): X = X.toarray() - assert_true(np.all(X != -1)) + assert np.all(X != -1) def _test_shape_indices(model): diff --git a/sklearn/cluster/tests/test_feature_agglomeration.py b/sklearn/cluster/tests/test_feature_agglomeration.py index 5c992109ffaba..cb61413efc22f 100644 --- a/sklearn/cluster/tests/test_feature_agglomeration.py +++ b/sklearn/cluster/tests/test_feature_agglomeration.py @@ -18,24 +18,24 @@ def test_feature_agglomeration(): pooling_func=np.median) assert_no_warnings(agglo_mean.fit, X) assert_no_warnings(agglo_median.fit, X) - assert_true(np.size(np.unique(agglo_mean.labels_)) == n_clusters) - assert_true(np.size(np.unique(agglo_median.labels_)) == n_clusters) - assert_true(np.size(agglo_mean.labels_) == X.shape[1]) - assert_true(np.size(agglo_median.labels_) == X.shape[1]) + assert np.size(np.unique(agglo_mean.labels_)) == n_clusters + assert np.size(np.unique(agglo_median.labels_)) == n_clusters + assert np.size(agglo_mean.labels_) == X.shape[1] + assert np.size(agglo_median.labels_) == X.shape[1] # Test transform Xt_mean = agglo_mean.transform(X) Xt_median = agglo_median.transform(X) - assert_true(Xt_mean.shape[1] == n_clusters) - assert_true(Xt_median.shape[1] == n_clusters) - assert_true(Xt_mean == np.array([1 / 3.])) - assert_true(Xt_median == np.array([0.])) + assert Xt_mean.shape[1] == n_clusters + assert Xt_median.shape[1] == n_clusters + assert Xt_mean == np.array([1 / 3.]) + assert Xt_median == np.array([0.]) # Test inverse transform X_full_mean = agglo_mean.inverse_transform(Xt_mean) X_full_median = agglo_median.inverse_transform(Xt_median) - assert_true(np.unique(X_full_mean[0]).size == n_clusters) - assert_true(np.unique(X_full_median[0]).size == n_clusters) + assert np.unique(X_full_mean[0]).size == n_clusters + assert np.unique(X_full_median[0]).size == n_clusters assert_array_almost_equal(agglo_mean.transform(X_full_mean), Xt_mean) diff --git a/sklearn/cluster/tests/test_hierarchical.py b/sklearn/cluster/tests/test_hierarchical.py index 2456f61c872c5..5910820ee6ce1 100644 --- a/sklearn/cluster/tests/test_hierarchical.py +++ b/sklearn/cluster/tests/test_hierarchical.py @@ -72,7 +72,7 @@ def test_structured_linkage_tree(): children, n_components, n_leaves, parent = \ tree_builder(X.T, connectivity) n_nodes = 2 * X.shape[1] - 1 - assert_true(len(children) + n_leaves == n_nodes) + assert len(children) + n_leaves == n_nodes # Check that ward_tree raises a ValueError with a connectivity matrix # of the wrong shape assert_raises(ValueError, @@ -114,7 +114,7 @@ def test_height_linkage_tree(): for linkage_func in _TREE_BUILDERS.values(): children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity) n_nodes = 2 * X.shape[1] - 1 - assert_true(len(children) + n_leaves == n_nodes) + assert len(children) + n_leaves == n_nodes def test_agglomerative_clustering_wrong_arg_memory(): @@ -152,7 +152,7 @@ def test_agglomerative_clustering(): linkage=linkage) clustering.fit(X) labels = clustering.labels_ - assert_true(np.size(np.unique(labels)) == 10) + assert np.size(np.unique(labels)) == 10 finally: shutil.rmtree(tempdir) # Turn caching off now @@ -166,7 +166,7 @@ def test_agglomerative_clustering(): labels), 1) clustering.connectivity = None clustering.fit(X) - assert_true(np.size(np.unique(clustering.labels_)) == 10) + assert np.size(np.unique(clustering.labels_)) == 10 # Check that we raise a TypeError on dense matrices clustering = AgglomerativeClustering( n_clusters=10, @@ -226,12 +226,12 @@ def test_ward_agglomeration(): connectivity = grid_to_graph(*mask.shape) agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity) agglo.fit(X) - assert_true(np.size(np.unique(agglo.labels_)) == 5) + assert np.size(np.unique(agglo.labels_)) == 5 X_red = agglo.transform(X) - assert_true(X_red.shape[1] == 5) + assert X_red.shape[1] == 5 X_full = agglo.inverse_transform(X_red) - assert_true(np.unique(X_full[0]).size == 5) + assert np.unique(X_full[0]).size == 5 assert_array_almost_equal(agglo.transform(X_full), X_red) # Check that fitting with no samples raises a ValueError @@ -265,7 +265,7 @@ def assess_same_labelling(cut1, cut2): ecut = np.zeros((n, k)) ecut[np.arange(n), cut] = 1 co_clust.append(np.dot(ecut, ecut.T)) - assert_true((co_clust[0] == co_clust[1]).all()) + assert (co_clust[0] == co_clust[1]).all() def test_scikit_vs_scipy(): diff --git a/sklearn/cluster/tests/test_k_means.py b/sklearn/cluster/tests/test_k_means.py index bb4623ee0986a..b7ba8c483cb5e 100644 --- a/sklearn/cluster/tests/test_k_means.py +++ b/sklearn/cluster/tests/test_k_means.py @@ -107,8 +107,8 @@ def test_labels_assignment_and_inertia(): labels_gold[dist < mindist] = center_id mindist = np.minimum(dist, mindist) inertia_gold = mindist.sum() - assert_true((mindist >= 0.0).all()) - assert_true((labels_gold != -1).all()) + assert (mindist >= 0.0).all() + assert (labels_gold != -1).all() sample_weight = None @@ -565,9 +565,9 @@ def test_k_means_non_collapsed(): assert_equal(len(np.unique(km.labels_)), 3) centers = km.cluster_centers_ - assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1) - assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1) - assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1) + assert np.linalg.norm(centers[0] - centers[1]) >= 0.1 + assert np.linalg.norm(centers[0] - centers[2]) >= 0.1 + assert np.linalg.norm(centers[1] - centers[2]) >= 0.1 @pytest.mark.parametrize('algo', ['full', 'elkan']) @@ -689,7 +689,7 @@ def test_n_init(): failure_msg = ("Inertia %r should be decreasing" " when n_init is increasing.") % list(inertia) for i in range(len(n_init_range) - 1): - assert_true(inertia[i] >= inertia[i + 1], failure_msg) + assert inertia[i] >= inertia[i + 1], failure_msg def test_k_means_function(): diff --git a/sklearn/cluster/tests/test_mean_shift.py b/sklearn/cluster/tests/test_mean_shift.py index 441f822cdbded..e75ed3451cbaa 100644 --- a/sklearn/cluster/tests/test_mean_shift.py +++ b/sklearn/cluster/tests/test_mean_shift.py @@ -31,7 +31,7 @@ def test_estimate_bandwidth(): # Test estimate_bandwidth bandwidth = estimate_bandwidth(X, n_samples=200) - assert_true(0.9 <= bandwidth <= 1.5) + assert 0.9 <= bandwidth <= 1.5 def test_estimate_bandwidth_1sample(): @@ -125,14 +125,14 @@ def test_bin_seeds(): ground_truth = set([(1., 1.), (2., 1.), (0., 0.)]) test_bins = get_bin_seeds(X, 1, 1) test_result = set([tuple(p) for p in test_bins]) - assert_true(len(ground_truth.symmetric_difference(test_result)) == 0) + assert len(ground_truth.symmetric_difference(test_result)) == 0 # With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be # found ground_truth = set([(1., 1.), (2., 1.)]) test_bins = get_bin_seeds(X, 1, 2) test_result = set([tuple(p) for p in test_bins]) - assert_true(len(ground_truth.symmetric_difference(test_result)) == 0) + assert len(ground_truth.symmetric_difference(test_result)) == 0 # With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found # we bail and use the whole data here. diff --git a/sklearn/compose/tests/test_column_transformer.py b/sklearn/compose/tests/test_column_transformer.py index 7b9afaf5b4375..10b81cd0c1f9d 100644 --- a/sklearn/compose/tests/test_column_transformer.py +++ b/sklearn/compose/tests/test_column_transformer.py @@ -227,7 +227,7 @@ def fit(self, X, y=None): return self def transform(self, X, y=None): - assert_true(isinstance(X, (pd.DataFrame, pd.Series))) + assert isinstance(X, (pd.DataFrame, pd.Series)) if isinstance(X, pd.Series): X = X.to_frame() return X @@ -309,7 +309,7 @@ def test_column_transformer_sparse_array(): ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder, sparse_threshold=0.8) - assert_true(sparse.issparse(ct.fit_transform(X_sparse))) + assert sparse.issparse(ct.fit_transform(X_sparse)) assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res) assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res) @@ -317,7 +317,7 @@ def test_column_transformer_sparse_array(): for col in [[0, 1], slice(0, 2)]: ct = ColumnTransformer([('trans', Trans(), col)], sparse_threshold=0.8) - assert_true(sparse.issparse(ct.fit_transform(X_sparse))) + assert sparse.issparse(ct.fit_transform(X_sparse)) assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both) assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both) @@ -352,7 +352,7 @@ def test_column_transformer_sparse_stacking(): sparse_threshold=0.8) col_trans.fit(X_array) X_trans = col_trans.transform(X_array) - assert_true(sparse.issparse(X_trans)) + assert sparse.issparse(X_trans) assert_equal(X_trans.shape, (X_trans.shape[0], X_trans.shape[0] + 1)) assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0])) assert len(col_trans.transformers_) == 2 @@ -597,11 +597,11 @@ def test_column_transformer_named_estimators(): ('trans2', StandardScaler(with_std=False), [1])]) assert_false(hasattr(ct, 'transformers_')) ct.fit(X_array) - assert_true(hasattr(ct, 'transformers_')) - assert_true(isinstance(ct.named_transformers_['trans1'], StandardScaler)) - assert_true(isinstance(ct.named_transformers_.trans1, StandardScaler)) - assert_true(isinstance(ct.named_transformers_['trans2'], StandardScaler)) - assert_true(isinstance(ct.named_transformers_.trans2, StandardScaler)) + assert hasattr(ct, 'transformers_') + assert isinstance(ct.named_transformers_['trans1'], StandardScaler) + assert isinstance(ct.named_transformers_.trans1, StandardScaler) + assert isinstance(ct.named_transformers_['trans2'], StandardScaler) + assert isinstance(ct.named_transformers_.trans2, StandardScaler) assert_false(ct.named_transformers_.trans2.with_std) # check it are fitted transformers assert_equal(ct.named_transformers_.trans1.mean_, 1.) @@ -613,12 +613,12 @@ def test_column_transformer_cloning(): ct = ColumnTransformer([('trans', StandardScaler(), [0])]) ct.fit(X_array) assert_false(hasattr(ct.transformers[0][1], 'mean_')) - assert_true(hasattr(ct.transformers_[0][1], 'mean_')) + assert hasattr(ct.transformers_[0][1], 'mean_') ct = ColumnTransformer([('trans', StandardScaler(), [0])]) ct.fit_transform(X_array) assert_false(hasattr(ct.transformers[0][1], 'mean_')) - assert_true(hasattr(ct.transformers_[0][1], 'mean_')) + assert hasattr(ct.transformers_[0][1], 'mean_') def test_column_transformer_get_feature_names(): diff --git a/sklearn/cross_decomposition/tests/test_pls.py b/sklearn/cross_decomposition/tests/test_pls.py index a9ef55a5ed3aa..7160cd704d9a3 100644 --- a/sklearn/cross_decomposition/tests/test_pls.py +++ b/sklearn/cross_decomposition/tests/test_pls.py @@ -317,7 +317,7 @@ def test_predict_transform_copy(): assert_array_equal(X_copy, X) assert_array_equal(Y_copy, Y) # also check that mean wasn't zero before (to make sure we didn't touch it) - assert_true(np.all(X.mean(axis=0) != 0)) + assert np.all(X.mean(axis=0) != 0) def test_scale_and_stability(): diff --git a/sklearn/datasets/tests/test_20news.py b/sklearn/datasets/tests/test_20news.py index b36acd09b75e3..95be2c6a7faae 100644 --- a/sklearn/datasets/tests/test_20news.py +++ b/sklearn/datasets/tests/test_20news.py @@ -67,14 +67,14 @@ def test_20news_vectorized(): # test subset = train bunch = datasets.fetch_20newsgroups_vectorized(subset="train") - assert_true(sp.isspmatrix_csr(bunch.data)) + assert sp.isspmatrix_csr(bunch.data) assert_equal(bunch.data.shape, (11314, 130107)) assert_equal(bunch.target.shape[0], 11314) assert_equal(bunch.data.dtype, np.float64) # test subset = test bunch = datasets.fetch_20newsgroups_vectorized(subset="test") - assert_true(sp.isspmatrix_csr(bunch.data)) + assert sp.isspmatrix_csr(bunch.data) assert_equal(bunch.data.shape, (7532, 130107)) assert_equal(bunch.target.shape[0], 7532) assert_equal(bunch.data.dtype, np.float64) @@ -85,7 +85,7 @@ def test_20news_vectorized(): # test subset = all bunch = datasets.fetch_20newsgroups_vectorized(subset='all') - assert_true(sp.isspmatrix_csr(bunch.data)) + assert sp.isspmatrix_csr(bunch.data) assert_equal(bunch.data.shape, (11314 + 7532, 130107)) assert_equal(bunch.target.shape[0], 11314 + 7532) assert_equal(bunch.data.dtype, np.float64) diff --git a/sklearn/datasets/tests/test_base.py b/sklearn/datasets/tests/test_base.py index bf03c4e3075a6..fbe282b057644 100644 --- a/sklearn/datasets/tests/test_base.py +++ b/sklearn/datasets/tests/test_base.py @@ -74,7 +74,7 @@ def test_data_home(data_home): # get_data_home will point to a pre-existing folder data_home = get_data_home(data_home=data_home) assert_equal(data_home, data_home) - assert_true(os.path.exists(data_home)) + assert os.path.exists(data_home) # clear_data_home will delete both the content and the folder it-self clear_data_home(data_home=data_home) @@ -82,7 +82,7 @@ def test_data_home(data_home): # if the folder is missing it will be created again data_home = get_data_home(data_home=data_home) - assert_true(os.path.exists(data_home)) + assert os.path.exists(data_home) def test_default_empty_load_files(load_files_root): @@ -126,7 +126,7 @@ def test_load_sample_images(): res = load_sample_images() assert_equal(len(res.images), 2) assert_equal(len(res.filenames), 2) - assert_true(res.DESCR) + assert res.DESCR except ImportError: warnings.warn("Could not load sample images, PIL is not available.") @@ -166,9 +166,9 @@ def test_load_missing_sample_image_error(): def test_load_diabetes(): res = load_diabetes() assert_equal(res.data.shape, (442, 10)) - assert_true(res.target.size, 442) + assert res.target.size, 442 assert_equal(len(res.feature_names), 10) - assert_true(res.DESCR) + assert res.DESCR # test return_X_y option check_return_X_y(res, partial(load_diabetes)) @@ -179,9 +179,9 @@ def test_load_linnerud(): assert_equal(res.data.shape, (20, 3)) assert_equal(res.target.shape, (20, 3)) assert_equal(len(res.target_names), 3) - assert_true(res.DESCR) - assert_true(os.path.exists(res.data_filename)) - assert_true(os.path.exists(res.target_filename)) + assert res.DESCR + assert os.path.exists(res.data_filename) + assert os.path.exists(res.target_filename) # test return_X_y option check_return_X_y(res, partial(load_linnerud)) @@ -192,8 +192,8 @@ def test_load_iris(): assert_equal(res.data.shape, (150, 4)) assert_equal(res.target.size, 150) assert_equal(res.target_names.size, 3) - assert_true(res.DESCR) - assert_true(os.path.exists(res.filename)) + assert res.DESCR + assert os.path.exists(res.filename) # test return_X_y option check_return_X_y(res, partial(load_iris)) @@ -204,7 +204,7 @@ def test_load_wine(): assert_equal(res.data.shape, (178, 13)) assert_equal(res.target.size, 178) assert_equal(res.target_names.size, 3) - assert_true(res.DESCR) + assert res.DESCR # test return_X_y option check_return_X_y(res, partial(load_wine)) @@ -215,8 +215,8 @@ def test_load_breast_cancer(): assert_equal(res.data.shape, (569, 30)) assert_equal(res.target.size, 569) assert_equal(res.target_names.size, 2) - assert_true(res.DESCR) - assert_true(os.path.exists(res.filename)) + assert res.DESCR + assert os.path.exists(res.filename) # test return_X_y option check_return_X_y(res, partial(load_breast_cancer)) @@ -227,8 +227,8 @@ def test_load_boston(): assert_equal(res.data.shape, (506, 13)) assert_equal(res.target.size, 506) assert_equal(res.feature_names.size, 13) - assert_true(res.DESCR) - assert_true(os.path.exists(res.filename)) + assert res.DESCR + assert os.path.exists(res.filename) # test return_X_y option check_return_X_y(res, partial(load_boston)) @@ -265,4 +265,4 @@ def test_bunch_pickle_generated_with_0_16_and_read_with_0_17(): def test_bunch_dir(): # check that dir (important for autocomplete) shows attributes data = load_iris() - assert_true("data" in dir(data)) + assert "data" in dir(data) diff --git a/sklearn/datasets/tests/test_rcv1.py b/sklearn/datasets/tests/test_rcv1.py index de16b9afbf3d7..1b1952d81e2a9 100644 --- a/sklearn/datasets/tests/test_rcv1.py +++ b/sklearn/datasets/tests/test_rcv1.py @@ -27,8 +27,8 @@ def test_fetch_rcv1(): cat_list, s1 = data1.target_names.tolist(), data1.sample_id # test sparsity - assert_true(sp.issparse(X1)) - assert_true(sp.issparse(Y1)) + assert sp.issparse(X1) + assert sp.issparse(Y1) assert_equal(60915113, X1.data.size) assert_equal(2606875, Y1.data.size) diff --git a/sklearn/datasets/tests/test_samples_generator.py b/sklearn/datasets/tests/test_samples_generator.py index 1e1f110d9c41b..8567433a16920 100644 --- a/sklearn/datasets/tests/test_samples_generator.py +++ b/sklearn/datasets/tests/test_samples_generator.py @@ -160,7 +160,7 @@ def test_make_multilabel_classification_return_sequences(): if not allow_unlabeled: assert_equal(max([max(y) for y in Y]), 2) assert_equal(min([len(y) for y in Y]), min_length) - assert_true(max([len(y) for y in Y]) <= 3) + assert max([len(y) for y in Y]) <= 3 def test_make_multilabel_classification_return_indicator(): @@ -170,7 +170,7 @@ def test_make_multilabel_classification_return_indicator(): allow_unlabeled=allow_unlabeled) assert_equal(X.shape, (25, 20), "X shape mismatch") assert_equal(Y.shape, (25, 3), "Y shape mismatch") - assert_true(np.all(np.sum(Y, axis=0) > min_length)) + assert np.all(np.sum(Y, axis=0) > min_length) # Also test return_distributions and return_indicator with True X2, Y2, p_c, p_w_c = make_multilabel_classification( @@ -193,7 +193,7 @@ def test_make_multilabel_classification_return_indicator_sparse(): allow_unlabeled=allow_unlabeled) assert_equal(X.shape, (25, 20), "X shape mismatch") assert_equal(Y.shape, (25, 3), "Y shape mismatch") - assert_true(sp.issparse(Y)) + assert sp.issparse(Y) def test_make_hastie_10_2(): diff --git a/sklearn/decomposition/tests/test_dict_learning.py b/sklearn/decomposition/tests/test_dict_learning.py index b5852f470187d..caeb0b9afe1e4 100644 --- a/sklearn/decomposition/tests/test_dict_learning.py +++ b/sklearn/decomposition/tests/test_dict_learning.py @@ -55,7 +55,7 @@ def test_dict_learning_shapes(): def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) - assert_true(dico.components_.shape == (n_components, n_features)) + assert dico.components_.shape == (n_components, n_features) # positive lars deprecated 0.22 @@ -83,13 +83,13 @@ def test_dict_learning_positivity(transform_algorithm, positive_code=positive_code, positive_dict=positive_dict).fit(X) code = dico.transform(X) if positive_dict: - assert_true((dico.components_ >= 0).all()) + assert (dico.components_ >= 0).all() else: - assert_true((dico.components_ < 0).any()) + assert (dico.components_ < 0).any() if positive_code: - assert_true((code >= 0).all()) + assert (code >= 0).all() else: - assert_true((code < 0).any()) + assert (code < 0).any() def test_dict_learning_reconstruction(): @@ -137,7 +137,7 @@ def test_dict_learning_nonzero_coefs(): dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[np.newaxis, 1]) - assert_true(len(np.flatnonzero(code)) == 3) + assert len(np.flatnonzero(code)) == 3 dico.set_params(transform_algorithm='omp') code = dico.transform(X[np.newaxis, 1]) @@ -199,26 +199,26 @@ def test_dict_learning_online_positivity(transform_algorithm, positive_code=positive_code, positive_dict=positive_dict).fit(X) code = dico.transform(X) if positive_dict: - assert_true((dico.components_ >= 0).all()) + assert (dico.components_ >= 0).all() else: - assert_true((dico.components_ < 0).any()) + assert (dico.components_ < 0).any() if positive_code: - assert_true((code >= 0).all()) + assert (code >= 0).all() else: - assert_true((code < 0).any()) + assert (code < 0).any() code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng, positive_dict=positive_dict, positive_code=positive_code) if positive_dict: - assert_true((dictionary >= 0).all()) + assert (dictionary >= 0).all() else: - assert_true((dictionary < 0).any()) + assert (dictionary < 0).any() if positive_code: - assert_true((code >= 0).all()) + assert (code >= 0).all() else: - assert_true((code < 0).any()) + assert (code < 0).any() def test_dict_learning_online_verbosity(): @@ -243,21 +243,21 @@ def test_dict_learning_online_verbosity(): finally: sys.stdout = old_stdout - assert_true(dico.components_.shape == (n_components, n_features)) + assert dico.components_.shape == (n_components, n_features) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) - assert_true(dico.components_.shape == (n_components, n_features)) + assert dico.components_.shape == (n_components, n_features) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) - assert_true(dico.components_.shape == (n_components, n_features)) + assert dico.components_.shape == (n_components, n_features) def test_dict_learning_online_initialization(): @@ -324,9 +324,9 @@ def test_sparse_encode_positivity(positive): for algo in ('lasso_lars', 'lasso_cd', 'lars', 'threshold'): code = sparse_encode(X, V, algorithm=algo, positive=positive) if positive: - assert_true((code >= 0).all()) + assert (code >= 0).all() else: - assert_true((code < 0).any()) + assert (code < 0).any() try: sparse_encode(X, V, algorithm='omp', positive=positive) @@ -353,7 +353,7 @@ def test_sparse_encode_error(): V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) - assert_true(not np.all(code == 0)) + assert not np.all(code == 0) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) @@ -380,7 +380,7 @@ def test_sparse_coder_estimator(): V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) - assert_true(not np.all(code == 0)) + assert not np.all(code == 0) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) diff --git a/sklearn/decomposition/tests/test_fastica.py b/sklearn/decomposition/tests/test_fastica.py index b237f4a15def5..08ff5737553a1 100644 --- a/sklearn/decomposition/tests/test_fastica.py +++ b/sklearn/decomposition/tests/test_fastica.py @@ -140,7 +140,7 @@ def test_fastica_nowhiten(): # test for issue #697 ica = FastICA(n_components=1, whiten=False, random_state=0) assert_warns(UserWarning, ica.fit, m) - assert_true(hasattr(ica, 'mixing_')) + assert hasattr(ica, 'mixing_') def test_fastica_convergence_fail(): diff --git a/sklearn/decomposition/tests/test_nmf.py b/sklearn/decomposition/tests/test_nmf.py index 87fb4ef8c30b2..695e101cec5dd 100644 --- a/sklearn/decomposition/tests/test_nmf.py +++ b/sklearn/decomposition/tests/test_nmf.py @@ -64,7 +64,7 @@ def test_initialize_close(): W, H = nmf._initialize_nmf(A, 10, init='nndsvd') error = linalg.norm(np.dot(W, H) - A) sdev = linalg.norm(A - A.mean()) - assert_true(error <= sdev) + assert error <= sdev def test_initialize_variants(): diff --git a/sklearn/decomposition/tests/test_online_lda.py b/sklearn/decomposition/tests/test_online_lda.py index 0abc2efe75ec2..35aa882dc943d 100644 --- a/sklearn/decomposition/tests/test_online_lda.py +++ b/sklearn/decomposition/tests/test_online_lda.py @@ -64,7 +64,7 @@ def test_lda_fit_batch(): for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) - assert_true(tuple(sorted(top_idx)) in correct_idx_grps) + assert tuple(sorted(top_idx)) in correct_idx_grps def test_lda_fit_online(): @@ -80,7 +80,7 @@ def test_lda_fit_online(): for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) - assert_true(tuple(sorted(top_idx)) in correct_idx_grps) + assert tuple(sorted(top_idx)) in correct_idx_grps def test_lda_partial_fit(): @@ -97,7 +97,7 @@ def test_lda_partial_fit(): correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) - assert_true(tuple(sorted(top_idx)) in correct_idx_grps) + assert tuple(sorted(top_idx)) in correct_idx_grps def test_lda_dense_input(): @@ -112,7 +112,7 @@ def test_lda_dense_input(): for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) - assert_true(tuple(sorted(top_idx)) in correct_idx_grps) + assert tuple(sorted(top_idx)) in correct_idx_grps def test_lda_transform(): @@ -124,7 +124,7 @@ def test_lda_transform(): lda = LatentDirichletAllocation(n_components=n_components, random_state=rng) X_trans = lda.fit_transform(X) - assert_true((X_trans > 0.0).any()) + assert (X_trans > 0.0).any() assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0])) @@ -219,7 +219,7 @@ def test_lda_multi_jobs(method): correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) - assert_true(tuple(sorted(top_idx)) in correct_idx_grps) + assert tuple(sorted(top_idx)) in correct_idx_grps @if_safe_multiprocessing_with_blas @@ -236,7 +236,7 @@ def test_lda_partial_fit_multi_jobs(): correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) - assert_true(tuple(sorted(top_idx)) in correct_idx_grps) + assert tuple(sorted(top_idx)) in correct_idx_grps def test_lda_preplexity_mismatch(): diff --git a/sklearn/decomposition/tests/test_pca.py b/sklearn/decomposition/tests/test_pca.py index c852e4bed0e58..faed4f567d730 100644 --- a/sklearn/decomposition/tests/test_pca.py +++ b/sklearn/decomposition/tests/test_pca.py @@ -586,7 +586,7 @@ def test_pca_score2(): pca = PCA(n_components=2, whiten=True, svd_solver=solver) pca.fit(X) ll2 = pca.score(X) - assert_true(ll1 > ll2) + assert ll1 > ll2 def test_pca_score3(): @@ -603,7 +603,7 @@ def test_pca_score3(): pca.fit(Xl) ll[k] = pca.score(Xt) - assert_true(ll.argmax() == 1) + assert ll.argmax() == 1 def test_pca_score_with_different_solvers(): diff --git a/sklearn/decomposition/tests/test_sparse_pca.py b/sklearn/decomposition/tests/test_sparse_pca.py index e02d077f50e8b..45bef740fa7fc 100644 --- a/sklearn/decomposition/tests/test_sparse_pca.py +++ b/sklearn/decomposition/tests/test_sparse_pca.py @@ -93,7 +93,7 @@ def test_fit_transform_parallel(norm_comp): spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha, random_state=0, normalize_components=norm_comp).fit(Y) U2 = spca.transform(Y) - assert_true(not np.all(spca_lars.components_ == 0)) + assert not np.all(spca_lars.components_ == 0) assert_array_almost_equal(U1, U2) @@ -186,7 +186,7 @@ def test_mini_batch_fit_transform(norm_comp): random_state=0, normalize_components=norm_comp) U2 = spca.fit(Y).transform(Y) - assert_true(not np.all(spca_lars.components_ == 0)) + assert not np.all(spca_lars.components_ == 0) assert_array_almost_equal(U1, U2) # Test that CD gives similar results spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha, diff --git a/sklearn/ensemble/tests/test_bagging.py b/sklearn/ensemble/tests/test_bagging.py index 7ada3467a8892..b9553071be87d 100644 --- a/sklearn/ensemble/tests/test_bagging.py +++ b/sklearn/ensemble/tests/test_bagging.py @@ -548,19 +548,19 @@ def test_base_estimator(): n_jobs=3, random_state=0).fit(X_train, y_train) - assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) + assert isinstance(ensemble.base_estimator_, DecisionTreeClassifier) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) - assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) + assert isinstance(ensemble.base_estimator_, DecisionTreeClassifier) ensemble = BaggingClassifier(Perceptron(tol=1e-3), n_jobs=3, random_state=0).fit(X_train, y_train) - assert_true(isinstance(ensemble.base_estimator_, Perceptron)) + assert isinstance(ensemble.base_estimator_, Perceptron) # Regression X_train, X_test, y_train, y_test = train_test_split(boston.data, @@ -571,18 +571,18 @@ def test_base_estimator(): n_jobs=3, random_state=0).fit(X_train, y_train) - assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) + assert isinstance(ensemble.base_estimator_, DecisionTreeRegressor) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) - assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) + assert isinstance(ensemble.base_estimator_, DecisionTreeRegressor) ensemble = BaggingRegressor(SVR(gamma='scale'), n_jobs=3, random_state=0).fit(X_train, y_train) - assert_true(isinstance(ensemble.base_estimator_, SVR)) + assert isinstance(ensemble.base_estimator_, SVR) def test_bagging_with_pipeline(): diff --git a/sklearn/ensemble/tests/test_base.py b/sklearn/ensemble/tests/test_base.py index f2a87d8fb559f..e7a02c50e0806 100644 --- a/sklearn/ensemble/tests/test_base.py +++ b/sklearn/ensemble/tests/test_base.py @@ -40,10 +40,10 @@ def test_base(): assert_equal(3, len(ensemble)) assert_equal(3, len(ensemble.estimators_)) - assert_true(isinstance(ensemble[0], Perceptron)) + assert isinstance(ensemble[0], Perceptron) assert_equal(ensemble[0].random_state, None) - assert_true(isinstance(ensemble[1].random_state, int)) - assert_true(isinstance(ensemble[2].random_state, int)) + assert isinstance(ensemble[1].random_state, int) + assert isinstance(ensemble[2].random_state, int) assert_not_equal(ensemble[1].random_state, ensemble[2].random_state) np_int_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), @@ -86,11 +86,11 @@ def test_set_random_states(): assert_equal(clf1.random_state, None) # check random_state is None still sets _set_random_states(clf1, None) - assert_true(isinstance(clf1.random_state, int)) + assert isinstance(clf1.random_state, int) # check random_state fixes results in consistent initialisation _set_random_states(clf1, 3) - assert_true(isinstance(clf1.random_state, int)) + assert isinstance(clf1.random_state, int) clf2 = Perceptron(tol=1e-3, random_state=None) _set_random_states(clf2, 3) assert_equal(clf1.random_state, clf2.random_state) @@ -104,8 +104,8 @@ def make_steps(): est1 = Pipeline(make_steps()) _set_random_states(est1, 3) - assert_true(isinstance(est1.steps[0][1].estimator.random_state, int)) - assert_true(isinstance(est1.steps[1][1].random_state, int)) + assert isinstance(est1.steps[0][1].estimator.random_state, int) + assert isinstance(est1.steps[1][1].random_state, int) assert_not_equal(est1.get_params()['sel__estimator__random_state'], est1.get_params()['clf__random_state']) diff --git a/sklearn/ensemble/tests/test_forest.py b/sklearn/ensemble/tests/test_forest.py index b601ba206b4d4..4735440ea81ea 100644 --- a/sklearn/ensemble/tests/test_forest.py +++ b/sklearn/ensemble/tests/test_forest.py @@ -248,7 +248,7 @@ def check_importances(name, criterion, dtype, tolerance): est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion) est.fit(X, y, sample_weight=sample_weight) importances = est.feature_importances_ - assert_true(np.all(importances >= 0.0)) + assert np.all(importances >= 0.0) for scale in [0.5, 100]: est = ForestEstimator(n_estimators=10, random_state=0, @@ -1163,7 +1163,7 @@ def check_warm_start_oob(name): clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15) clf_2.fit(X, y) - assert_true(hasattr(clf_2, 'oob_score_')) + assert hasattr(clf_2, 'oob_score_') assert_equal(clf.oob_score_, clf_2.oob_score_) # Test that oob_score is computed even if we don't need to train @@ -1171,7 +1171,7 @@ def check_warm_start_oob(name): clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True, random_state=1, bootstrap=True, oob_score=False) clf_3.fit(X, y) - assert_true(not(hasattr(clf_3, 'oob_score_'))) + assert not(hasattr(clf_3, 'oob_score_')) clf_3.set_params(oob_score=True) ignore_warnings(clf_3.fit)(X, y) diff --git a/sklearn/ensemble/tests/test_gradient_boosting.py b/sklearn/ensemble/tests/test_gradient_boosting.py index e407ca8ef2554..f237695901f59 100644 --- a/sklearn/ensemble/tests/test_gradient_boosting.py +++ b/sklearn/ensemble/tests/test_gradient_boosting.py @@ -72,7 +72,7 @@ def check_classification_toy(presort, loss): assert_equal(10, len(clf.estimators_)) deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:]) - assert_true(np.any(deviance_decrease >= 0.0)) + assert np.any(deviance_decrease >= 0.0) leaves = clf.apply(X) assert_equal(leaves.shape, (6, 10, 1)) @@ -338,7 +338,7 @@ def test_feature_importances(): min_samples_split=2, random_state=1, presort=presort) clf.fit(X, y) - assert_true(hasattr(clf, 'feature_importances_')) + assert hasattr(clf, 'feature_importances_') def test_probability_log(): @@ -352,8 +352,8 @@ def test_probability_log(): # check if probabilities are in [0, 1]. y_proba = clf.predict_proba(T) - assert_true(np.all(y_proba >= 0.0)) - assert_true(np.all(y_proba <= 1.0)) + assert np.all(y_proba >= 0.0) + assert np.all(y_proba <= 1.0) # derive predictions from probabilities y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0) @@ -449,7 +449,7 @@ def test_max_feature_regression(): max_features=2, random_state=1) gbrt.fit(X_train, y_train) deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test)) - assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance) + assert deviance < 0.5, "GB failed with deviance %.4f" % deviance @pytest.mark.network @@ -581,7 +581,7 @@ def test_staged_functions_defensive(Estimator): with warnings.catch_warnings(record=True): staged_result = list(staged_func(X)) staged_result[1][:] = 0 - assert_true(np.all(staged_result[0] != 0)) + assert np.all(staged_result[0] != 0) def test_serialization(): @@ -1158,8 +1158,8 @@ def test_probability_exponential(): # check if probabilities are in [0, 1]. y_proba = clf.predict_proba(T) - assert_true(np.all(y_proba >= 0.0)) - assert_true(np.all(y_proba <= 1.0)) + assert np.all(y_proba >= 0.0) + assert np.all(y_proba <= 1.0) score = clf.decision_function(T).ravel() assert_array_almost_equal(y_proba[:, 1], 1.0 / (1.0 + np.exp(-2 * score))) diff --git a/sklearn/ensemble/tests/test_voting_classifier.py b/sklearn/ensemble/tests/test_voting_classifier.py index 2f386fcf4282f..c414a96dc49e1 100644 --- a/sklearn/ensemble/tests/test_voting_classifier.py +++ b/sklearn/ensemble/tests/test_voting_classifier.py @@ -307,7 +307,7 @@ def test_sample_weight_kwargs(): class MockClassifier(BaseEstimator, ClassifierMixin): """Mock Classifier to check that sample_weight is received as kwargs""" def fit(self, X, y, *args, **sample_weight): - assert_true('sample_weight' in sample_weight) + assert 'sample_weight' in sample_weight clf = MockClassifier() eclf = VotingClassifier(estimators=[('mock', clf)], voting='soft') @@ -326,13 +326,13 @@ def test_set_params(): clf3 = GaussianNB() eclf1 = VotingClassifier([('lr', clf1), ('rf', clf2)], voting='soft', weights=[1, 2]) - assert_true('lr' in eclf1.named_estimators) - assert_true(eclf1.named_estimators.lr is eclf1.estimators[0][1]) - assert_true(eclf1.named_estimators.lr is eclf1.named_estimators['lr']) + assert 'lr' in eclf1.named_estimators + assert eclf1.named_estimators.lr is eclf1.estimators[0][1] + assert eclf1.named_estimators.lr is eclf1.named_estimators['lr'] eclf1.fit(X, y) - assert_true('lr' in eclf1.named_estimators_) - assert_true(eclf1.named_estimators_.lr is eclf1.estimators_[0]) - assert_true(eclf1.named_estimators_.lr is eclf1.named_estimators_['lr']) + assert 'lr' in eclf1.named_estimators_ + assert eclf1.named_estimators_.lr is eclf1.estimators_[0] + assert eclf1.named_estimators_.lr is eclf1.named_estimators_['lr'] eclf2 = VotingClassifier([('lr', clf1), ('nb', clf3)], voting='soft', weights=[1, 2]) @@ -347,8 +347,8 @@ def test_set_params(): eclf1.set_params(lr__C=10.0) eclf2.set_params(nb__max_depth=5) - assert_true(eclf1.estimators[0][1].get_params()['C'] == 10.0) - assert_true(eclf2.estimators[1][1].get_params()['max_depth'] == 5) + assert eclf1.estimators[0][1].get_params()['C'] == 10.0 + assert eclf2.estimators[1][1].get_params()['max_depth'] == 5 assert_equal(eclf1.get_params()["lr__C"], eclf1.get_params()["lr"].get_params()['C']) @@ -372,11 +372,11 @@ def test_set_estimator_none(): eclf2.set_params(rf=None).fit(X, y) assert_array_equal(eclf1.predict(X), eclf2.predict(X)) - assert_true(dict(eclf2.estimators)["rf"] is None) - assert_true(len(eclf2.estimators_) == 2) + assert dict(eclf2.estimators)["rf"] is None + assert len(eclf2.estimators_) == 2 assert_true(all([not isinstance(est, RandomForestClassifier) for est in eclf2.estimators_])) - assert_true(eclf2.get_params()["rf"] is None) + assert eclf2.get_params()["rf"] is None eclf1.set_params(voting='soft').fit(X, y) eclf2.set_params(voting='soft').fit(X, y) diff --git a/sklearn/ensemble/tests/test_weight_boosting.py b/sklearn/ensemble/tests/test_weight_boosting.py index e6a6c9d36f442..a613e876c5de0 100755 --- a/sklearn/ensemble/tests/test_weight_boosting.py +++ b/sklearn/ensemble/tests/test_weight_boosting.py @@ -68,7 +68,7 @@ def predict_proba(self, X): samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs)) assert_array_equal(samme_proba.shape, probs.shape) - assert_true(np.isfinite(samme_proba).all()) + assert np.isfinite(samme_proba).all() # Make sure that the correct elements come out as smallest -- # `_samme_proba` should preserve the ordering in each example. @@ -146,7 +146,7 @@ def test_boston(): assert score > 0.85 # Check we used multiple estimators - assert_true(len(reg.estimators_) > 1) + assert len(reg.estimators_) > 1 # Check for distinct random states (see issue #7408) assert_equal(len(set(est.random_state for est in reg.estimators_)), len(reg.estimators_)) diff --git a/sklearn/feature_extraction/tests/test_dict_vectorizer.py b/sklearn/feature_extraction/tests/test_dict_vectorizer.py index 66d678421e90b..d5171cff46169 100644 --- a/sklearn/feature_extraction/tests/test_dict_vectorizer.py +++ b/sklearn/feature_extraction/tests/test_dict_vectorizer.py @@ -75,7 +75,7 @@ def test_one_of_k(): assert_equal(D_out[0], {"version=1": 1, "ham": 2}) names = v.get_feature_names() - assert_true("version=2" in names) + assert "version=2" in names assert_false("version" in names) diff --git a/sklearn/feature_extraction/tests/test_feature_hasher.py b/sklearn/feature_extraction/tests/test_feature_hasher.py index 77a21ff4364a7..3acc3cb74f335 100644 --- a/sklearn/feature_extraction/tests/test_feature_hasher.py +++ b/sklearn/feature_extraction/tests/test_feature_hasher.py @@ -39,7 +39,7 @@ def test_feature_hasher_strings(): assert_equal(X.shape[0], len(raw_X)) assert_equal(X.shape[1], n_features) - assert_true(np.all(X.data > 0)) + assert np.all(X.data > 0) assert_equal(X[0].sum(), 4) assert_equal(X[1].sum(), 3) @@ -158,13 +158,13 @@ def test_hasher_negative(): X = [{"foo": 2, "bar": -4, "baz": -1}.items()] Xt = FeatureHasher(alternate_sign=False, non_negative=False, input_type="pair").fit_transform(X) - assert_true(Xt.data.min() < 0 and Xt.data.max() > 0) + assert Xt.data.min() < 0 and Xt.data.max() > 0 Xt = FeatureHasher(alternate_sign=False, non_negative=True, input_type="pair").fit_transform(X) - assert_true(Xt.data.min() > 0) + assert Xt.data.min() > 0 Xt = FeatureHasher(alternate_sign=True, non_negative=False, input_type="pair").fit_transform(X) - assert_true(Xt.data.min() < 0 and Xt.data.max() > 0) + assert Xt.data.min() < 0 and Xt.data.max() > 0 Xt = FeatureHasher(alternate_sign=True, non_negative=True, input_type="pair").fit_transform(X) - assert_true(Xt.data.min() > 0) + assert Xt.data.min() > 0 diff --git a/sklearn/feature_extraction/tests/test_image.py b/sklearn/feature_extraction/tests/test_image.py index 516c18c2b9281..439fe7f1014af 100644 --- a/sklearn/feature_extraction/tests/test_image.py +++ b/sklearn/feature_extraction/tests/test_image.py @@ -38,22 +38,22 @@ def test_grid_to_graph(): mask[-roi_size:, -roi_size:] = True mask = mask.reshape(size ** 2) A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) - assert_true(connected_components(A)[0] == 2) + assert connected_components(A)[0] == 2 # Checking that the function works whatever the type of mask is mask = np.ones((size, size), dtype=np.int16) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask) - assert_true(connected_components(A)[0] == 1) + assert connected_components(A)[0] == 1 # Checking dtype of the graph mask = np.ones((size, size)) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool) - assert_true(A.dtype == np.bool) + assert A.dtype == np.bool A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int) - assert_true(A.dtype == np.int) + assert A.dtype == np.int A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64) - assert_true(A.dtype == np.float64) + assert A.dtype == np.float64 @ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face @@ -214,7 +214,7 @@ def test_reconstruct_patches_perfect_color(): def test_patch_extractor_fit(): faces = face_collection extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0) - assert_true(extr == extr.fit(faces)) + assert extr == extr.fit(faces) def test_patch_extractor_max_patches(): @@ -227,7 +227,7 @@ def test_patch_extractor_max_patches(): extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches, random_state=0) patches = extr.transform(faces) - assert_true(patches.shape == (expected_n_patches, p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w) max_patches = 0.5 expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1) @@ -235,7 +235,7 @@ def test_patch_extractor_max_patches(): extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches, random_state=0) patches = extr.transform(faces) - assert_true(patches.shape == (expected_n_patches, p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w) def test_patch_extractor_max_patches_default(): @@ -252,7 +252,7 @@ def test_patch_extractor_all_patches(): expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) - assert_true(patches.shape == (expected_n_patches, p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w) def test_patch_extractor_color(): @@ -262,7 +262,7 @@ def test_patch_extractor_color(): expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) - assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3)) + assert patches.shape == (expected_n_patches, p_h, p_w, 3) def test_extract_patches_strided(): @@ -303,7 +303,7 @@ def test_extract_patches_strided(): ndim = len(image_shape) - assert_true(patches.shape[:ndim] == expected_view) + assert patches.shape[:ndim] == expected_view last_patch_slices = tuple(slice(i, i + j, None) for i, j in zip(last_patch, patch_size)) assert_true((patches[(-1, None, None) * ndim] == diff --git a/sklearn/feature_extraction/tests/test_text.py b/sklearn/feature_extraction/tests/test_text.py index d6b1b2b64b4c0..c674472d8828a 100644 --- a/sklearn/feature_extraction/tests/test_text.py +++ b/sklearn/feature_extraction/tests/test_text.py @@ -333,7 +333,7 @@ def test_tf_idf_smoothing(): [1, 0, 0]] tr = TfidfTransformer(smooth_idf=True, norm='l2') tfidf = tr.fit_transform(X).toarray() - assert_true((tfidf >= 0).all()) + assert (tfidf >= 0).all() # check normalization assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.]) @@ -344,7 +344,7 @@ def test_tf_idf_smoothing(): [1, 0, 0]] tr = TfidfTransformer(smooth_idf=True, norm='l2') tfidf = tr.fit_transform(X).toarray() - assert_true((tfidf >= 0).all()) + assert (tfidf >= 0).all() def test_tfidf_no_smoothing(): @@ -353,7 +353,7 @@ def test_tfidf_no_smoothing(): [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') tfidf = tr.fit_transform(X).toarray() - assert_true((tfidf >= 0).all()) + assert (tfidf >= 0).all() # check normalization assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.]) @@ -497,11 +497,11 @@ def test_tfidf_vectorizer_setters(): tv.norm = 'l1' assert_equal(tv._tfidf.norm, 'l1') tv.use_idf = True - assert_true(tv._tfidf.use_idf) + assert tv._tfidf.use_idf tv.smooth_idf = True - assert_true(tv._tfidf.smooth_idf) + assert tv._tfidf.smooth_idf tv.sublinear_tf = True - assert_true(tv._tfidf.sublinear_tf) + assert tv._tfidf.sublinear_tf @fails_if_pypy @@ -515,10 +515,10 @@ def test_hashing_vectorizer(): # By default the hashed values receive a random sign and l2 normalization # makes the feature values bounded - assert_true(np.min(X.data) > -1) - assert_true(np.min(X.data) < 0) - assert_true(np.max(X.data) > 0) - assert_true(np.max(X.data) < 1) + assert np.min(X.data) > -1 + assert np.min(X.data) < 0 + assert np.max(X.data) > 0 + assert np.max(X.data) < 1 # Check that the rows are normalized for i in range(X.shape[0]): @@ -532,12 +532,12 @@ def test_hashing_vectorizer(): # ngrams generate more non zeros ngrams_nnz = X.nnz - assert_true(ngrams_nnz > token_nnz) - assert_true(ngrams_nnz < 2 * token_nnz) + assert ngrams_nnz > token_nnz + assert ngrams_nnz < 2 * token_nnz # makes the feature values bounded - assert_true(np.min(X.data) > 0) - assert_true(np.max(X.data) < 1) + assert np.min(X.data) > 0 + assert np.max(X.data) < 1 # Check that the rows are normalized for i in range(X.shape[0]): @@ -573,7 +573,7 @@ def test_feature_names(): feature_names = cv.get_feature_names() assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water'], feature_names) - assert_true(cv.fixed_vocabulary_) + assert cv.fixed_vocabulary_ for idx, name in enumerate(feature_names): assert_equal(idx, cv.vocabulary_.get(name)) @@ -622,7 +622,7 @@ def test_vectorizer_max_df(): test_data = ['abc', 'dea', 'eat'] vect = CountVectorizer(analyzer='char', max_df=1.0) vect.fit(test_data) - assert_true('a' in vect.vocabulary_.keys()) + assert 'a' in vect.vocabulary_.keys() assert_equal(len(vect.vocabulary_.keys()), 6) assert_equal(len(vect.stop_words_), 0) @@ -630,14 +630,14 @@ def test_vectorizer_max_df(): vect.fit(test_data) assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain - assert_true('a' in vect.stop_words_) + assert 'a' in vect.stop_words_ assert_equal(len(vect.stop_words_), 2) vect.max_df = 1 vect.fit(test_data) assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain - assert_true('a' in vect.stop_words_) + assert 'a' in vect.stop_words_ assert_equal(len(vect.stop_words_), 2) @@ -645,7 +645,7 @@ def test_vectorizer_min_df(): test_data = ['abc', 'dea', 'eat'] vect = CountVectorizer(analyzer='char', min_df=1) vect.fit(test_data) - assert_true('a' in vect.vocabulary_.keys()) + assert 'a' in vect.vocabulary_.keys() assert_equal(len(vect.vocabulary_.keys()), 6) assert_equal(len(vect.stop_words_), 0) @@ -653,14 +653,14 @@ def test_vectorizer_min_df(): vect.fit(test_data) assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain - assert_true('c' in vect.stop_words_) + assert 'c' in vect.stop_words_ assert_equal(len(vect.stop_words_), 4) vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4 vect.fit(test_data) assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains - assert_true('c' in vect.stop_words_) + assert 'c' in vect.stop_words_ assert_equal(len(vect.stop_words_), 5) @@ -871,7 +871,7 @@ def test_tfidf_vectorizer_with_fixed_vocabulary(): X_1 = vect.fit_transform(ALL_FOOD_DOCS) X_2 = vect.transform(ALL_FOOD_DOCS) assert_array_almost_equal(X_1.toarray(), X_2.toarray()) - assert_true(vect.fixed_vocabulary_) + assert vect.fixed_vocabulary_ def test_pickling_vectorizer(): @@ -1019,7 +1019,7 @@ def func(): def test_tfidfvectorizer_binary(): # Non-regression test: TfidfVectorizer used to ignore its "binary" param. v = TfidfVectorizer(binary=True, use_idf=False, norm=None) - assert_true(v.binary) + assert v.binary X = v.fit_transform(['hello world', 'hello hello']).toarray() assert_array_equal(X.ravel(), [1, 1, 1, 0]) diff --git a/sklearn/feature_selection/tests/test_feature_select.py b/sklearn/feature_selection/tests/test_feature_select.py index 14e621473090a..90052db47a63c 100644 --- a/sklearn/feature_selection/tests/test_feature_select.py +++ b/sklearn/feature_selection/tests/test_feature_select.py @@ -40,8 +40,8 @@ def test_f_oneway_vs_scipy_stats(): X2 = 1 + rng.randn(10, 3) f, pv = stats.f_oneway(X1, X2) f2, pv2 = f_oneway(X1, X2) - assert_true(np.allclose(f, f2)) - assert_true(np.allclose(pv, pv2)) + assert np.allclose(f, f2) + assert np.allclose(pv, pv2) def test_f_oneway_ints(): @@ -69,11 +69,11 @@ def test_f_classif(): F, pv = f_classif(X, y) F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y) - assert_true((F > 0).all()) - assert_true((pv > 0).all()) - assert_true((pv < 1).all()) - assert_true((pv[:5] < 0.05).all()) - assert_true((pv[5:] > 1.e-4).all()) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.e-4).all() assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) @@ -85,11 +85,11 @@ def test_f_regression(): shuffle=False, random_state=0) F, pv = f_regression(X, y) - assert_true((F > 0).all()) - assert_true((pv > 0).all()) - assert_true((pv < 1).all()) - assert_true((pv[:5] < 0.05).all()) - assert_true((pv[5:] > 1.e-4).all()) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.e-4).all() # with centering, compare with sparse F, pv = f_regression(X, y, center=True) @@ -144,11 +144,11 @@ def test_f_classif_multi_class(): class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) - assert_true((F > 0).all()) - assert_true((pv > 0).all()) - assert_true((pv < 1).all()) - assert_true((pv[:5] < 0.05).all()) - assert_true((pv[5:] > 1.e-4).all()) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.e-4).all() def test_select_percentile_classif(): @@ -193,7 +193,7 @@ def test_select_percentile_classif_sparse(): assert_array_equal(support, gtruth) X_r2inv = univariate_filter.inverse_transform(X_r2) - assert_true(sparse.issparse(X_r2inv)) + assert sparse.issparse(X_r2inv) support_mask = safe_mask(X_r2inv, support) assert_equal(X_r2inv.shape, X.shape) assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) diff --git a/sklearn/feature_selection/tests/test_from_model.py b/sklearn/feature_selection/tests/test_from_model.py index dfe0904c57a01..868f7e5445aa4 100644 --- a/sklearn/feature_selection/tests/test_from_model.py +++ b/sklearn/feature_selection/tests/test_from_model.py @@ -42,7 +42,7 @@ def test_input_estimator_unchanged(): est = RandomForestClassifier() transformer = SelectFromModel(estimator=est) transformer.fit(data, y) - assert_true(transformer.estimator is est) + assert transformer.estimator is est @pytest.mark.parametrize( @@ -169,7 +169,7 @@ def test_feature_importances(): for threshold, func in zip(["mean", "median"], [np.mean, np.median]): transformer = SelectFromModel(estimator=est, threshold=threshold) transformer.fit(X, y) - assert_true(hasattr(transformer.estimator_, 'feature_importances_')) + assert hasattr(transformer.estimator_, 'feature_importances_') X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) @@ -233,7 +233,7 @@ def test_2d_coef(): threshold=threshold, norm_order=order) transformer.fit(X, y) - assert_true(hasattr(transformer.estimator_, 'coef_')) + assert hasattr(transformer.estimator_, 'coef_') X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) @@ -257,7 +257,7 @@ def test_partial_fit(): transformer.partial_fit(data, y, classes=np.unique(y)) new_model = transformer.estimator_ - assert_true(old_model is new_model) + assert old_model is new_model X_transform = transformer.transform(data) transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) diff --git a/sklearn/feature_selection/tests/test_rfe.py b/sklearn/feature_selection/tests/test_rfe.py index 41b4a9e767c1b..60dfad7c24512 100644 --- a/sklearn/feature_selection/tests/test_rfe.py +++ b/sklearn/feature_selection/tests/test_rfe.py @@ -33,7 +33,7 @@ def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): - assert_true(len(X) == len(Y)) + assert len(X) == len(Y) self.coef_ = np.ones(X.shape[1], dtype=np.float64) return self diff --git a/sklearn/gaussian_process/tests/test_gpr.py b/sklearn/gaussian_process/tests/test_gpr.py index 18f82b00fb7f1..f16d480c4ea2c 100644 --- a/sklearn/gaussian_process/tests/test_gpr.py +++ b/sklearn/gaussian_process/tests/test_gpr.py @@ -349,12 +349,12 @@ def test_K_inv_reset(kernel): # Test that self._K_inv is reset after a new fit gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) - assert_true(hasattr(gpr, '_K_inv')) - assert_true(gpr._K_inv is None) + assert hasattr(gpr, '_K_inv') + assert gpr._K_inv is None gpr.predict(X, return_std=True) - assert_true(gpr._K_inv is not None) + assert gpr._K_inv is not None gpr.fit(X2, y2) - assert_true(gpr._K_inv is None) + assert gpr._K_inv is None gpr.predict(X2, return_std=True) gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2) gpr2.predict(X2, return_std=True) diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 1001300cf643f..cba59d788185d 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -200,13 +200,13 @@ def test_lasso_cv_positive_constraint(): clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) clf_unconstrained.fit(X, y) - assert_true(min(clf_unconstrained.coef_) < 0) + assert min(clf_unconstrained.coef_) < 0 # On same data, constrained fit has non-negative coefficients clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, positive=True, cv=2, n_jobs=1) clf_constrained.fit(X, y) - assert_true(min(clf_constrained.coef_) >= 0) + assert min(clf_constrained.coef_) >= 0 def test_lasso_path_return_models_vs_new_return_gives_same_coefficients(): @@ -329,11 +329,11 @@ def test_lasso_positive_constraint(): lasso = Lasso(alpha=0.1, max_iter=1000, positive=True) lasso.fit(X, y) - assert_true(min(lasso.coef_) >= 0) + assert min(lasso.coef_) >= 0 lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True) lasso.fit(X, y) - assert_true(min(lasso.coef_) >= 0) + assert min(lasso.coef_) >= 0 def test_enet_positive_constraint(): @@ -342,7 +342,7 @@ def test_enet_positive_constraint(): enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True) enet.fit(X, y) - assert_true(min(enet.coef_) >= 0) + assert min(enet.coef_) >= 0 def test_enet_cv_positive_constraint(): @@ -354,13 +354,13 @@ def test_enet_cv_positive_constraint(): max_iter=max_iter, cv=2, n_jobs=1) enetcv_unconstrained.fit(X, y) - assert_true(min(enetcv_unconstrained.coef_) < 0) + assert min(enetcv_unconstrained.coef_) < 0 # On same data, constrained fit has non-negative coefficients enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1) enetcv_constrained.fit(X, y) - assert_true(min(enetcv_constrained.coef_) >= 0) + assert min(enetcv_constrained.coef_) >= 0 @pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22 @@ -400,11 +400,11 @@ def test_multi_task_lasso_and_enet(): Y = np.c_[y, y] # Y_test = np.c_[y_test, y_test] clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) - assert_true(0 < clf.dual_gap_ < 1e-5) + assert 0 < clf.dual_gap_ < 1e-5 assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y) - assert_true(0 < clf.dual_gap_ < 1e-5) + assert 0 < clf.dual_gap_ < 1e-5 assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1) @@ -430,7 +430,7 @@ def test_multi_task_lasso_readonly_data(): with TempMemmap((X, Y)) as (X, Y): Y = np.c_[y, y] clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) - assert_true(0 < clf.dual_gap_ < 1e-5) + assert 0 < clf.dual_gap_ < 1e-5 assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) @@ -643,7 +643,7 @@ def test_enet_path_positive(): # Test that the coefs returned by positive=True in enet_path are positive for path in [enet_path, lasso_path]: pos_path_coef = path(X, Y[:, 0], positive=True)[1] - assert_true(np.all(pos_path_coef >= 0)) + assert np.all(pos_path_coef >= 0) # For multi output, positive parameter is not allowed # Test that an error is raised @@ -700,7 +700,7 @@ def test_enet_copy_X_False_check_input_False(): enet.fit(X, y, check_input=False) # No copying, X is overwritten - assert_true(np.any(np.not_equal(original_X, X))) + assert np.any(np.not_equal(original_X, X)) def test_overrided_gram_matrix(): diff --git a/sklearn/linear_model/tests/test_least_angle.py b/sklearn/linear_model/tests/test_least_angle.py index 9c9a883f96383..c3c7a50ae7136 100644 --- a/sklearn/linear_model/tests/test_least_angle.py +++ b/sklearn/linear_model/tests/test_least_angle.py @@ -50,10 +50,10 @@ def test_simple(): eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: - assert_true(ocur == i + 1) + assert ocur == i + 1 else: # no more than max_pred variables can go into the active set - assert_true(ocur == X.shape[1]) + assert ocur == X.shape[1] finally: sys.stdout = old_stdout @@ -72,10 +72,10 @@ def test_simple_precomputed(): eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: - assert_true(ocur == i + 1) + assert ocur == i + 1 else: # no more than max_pred variables can go into the active set - assert_true(ocur == X.shape[1]) + assert ocur == X.shape[1] def test_all_precomputed(): @@ -123,7 +123,7 @@ def test_collinearity(): f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) - assert_true(not np.isnan(coef_path_).any()) + assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded @@ -146,7 +146,7 @@ def test_no_path(): diabetes.data, diabetes.target, method="lar", return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) - assert_true(alpha_ == alphas_[-1]) + assert alpha_ == alphas_[-1] def test_no_path_precomputed(): @@ -161,7 +161,7 @@ def test_no_path_precomputed(): return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) - assert_true(alpha_ == alphas_[-1]) + assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): @@ -178,7 +178,7 @@ def test_no_path_all_precomputed(): X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) - assert_true(alpha_ == alphas_[-1]) + assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22 @@ -304,7 +304,7 @@ def test_lasso_lars_path_length(): lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of alphas is always decreasing - assert_true(np.all(np.diff(lasso.alphas_) < 0)) + assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): @@ -376,7 +376,7 @@ def test_lars_add_features(): H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) - assert_true(np.all(np.isfinite(clf.coef_))) + assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): @@ -444,7 +444,7 @@ def test_lars_cv_max_iter(): X = np.c_[X, x, x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) - assert_true(len(w) == 0) + assert len(w) == 0 def test_lasso_lars_ic(): @@ -507,13 +507,13 @@ def test_lars_path_positive_constraint(): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method=method, positive=False) - assert_true(coefs.min() < 0) + assert coefs.min() < 0 alpha, active, coefs = \ linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method=method, positive=True) - assert_true(coefs.min() >= 0) + assert coefs.min() >= 0 # now we gonna test the positive option for all estimator classes @@ -535,10 +535,10 @@ def test_estimatorclasses_positive_constraint(): params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(diabetes['data'], diabetes['target']) - assert_true(estimator.coef_.min() < 0) + assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(diabetes['data'], diabetes['target']) - assert_true(min(estimator.coef_) >= 0) + assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(verbose=False): diff --git a/sklearn/linear_model/tests/test_logistic.py b/sklearn/linear_model/tests/test_logistic.py index 04a857ccfff34..1a40684c56698 100644 --- a/sklearn/linear_model/tests/test_logistic.py +++ b/sklearn/linear_model/tests/test_logistic.py @@ -314,7 +314,7 @@ def test_sparsify(): pred_d_d = clf.decision_function(iris.data) clf.sparsify() - assert_true(sp.issparse(clf.coef_)) + assert sp.issparse(clf.coef_) pred_s_d = clf.decision_function(iris.data) sp_data = sp.coo_matrix(iris.data) diff --git a/sklearn/linear_model/tests/test_omp.py b/sklearn/linear_model/tests/test_omp.py index d083e745f8299..7e80e5fcb84fb 100644 --- a/sklearn/linear_model/tests/test_omp.py +++ b/sklearn/linear_model/tests/test_omp.py @@ -55,8 +55,8 @@ def test_tol(): tol = 0.5 gamma = orthogonal_mp(X, y[:, 0], tol=tol) gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True) - assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol) - assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol) + assert np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol + assert np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol def test_with_without_gram(): @@ -123,12 +123,12 @@ def test_estimator(): omp.fit(X, y[:, 0]) assert_equal(omp.coef_.shape, (n_features,)) assert_equal(omp.intercept_.shape, ()) - assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs) + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs omp.fit(X, y) assert_equal(omp.coef_.shape, (n_targets, n_features)) assert_equal(omp.intercept_.shape, (n_targets,)) - assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs) + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs coef_normalized = omp.coef_[0].copy() omp.set_params(fit_intercept=True, normalize=False) @@ -137,14 +137,14 @@ def test_estimator(): omp.set_params(fit_intercept=False, normalize=False) omp.fit(X, y[:, 0]) - assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs) + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs assert_equal(omp.coef_.shape, (n_features,)) assert_equal(omp.intercept_, 0) omp.fit(X, y) assert_equal(omp.coef_.shape, (n_targets, n_features)) assert_equal(omp.intercept_, 0) - assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs) + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs def test_identical_regressors(): diff --git a/sklearn/linear_model/tests/test_passive_aggressive.py b/sklearn/linear_model/tests/test_passive_aggressive.py index 77776b4c3c59d..d02169da5e3cd 100644 --- a/sklearn/linear_model/tests/test_passive_aggressive.py +++ b/sklearn/linear_model/tests/test_passive_aggressive.py @@ -82,10 +82,10 @@ def test_classifier_accuracy(): score = clf.score(data, y) assert_greater(score, 0.79) if average: - assert_true(hasattr(clf, 'average_coef_')) - assert_true(hasattr(clf, 'average_intercept_')) - assert_true(hasattr(clf, 'standard_intercept_')) - assert_true(hasattr(clf, 'standard_coef_')) + assert hasattr(clf, 'average_coef_') + assert hasattr(clf, 'average_intercept_') + assert hasattr(clf, 'standard_intercept_') + assert hasattr(clf, 'standard_coef_') # 0.23. warning about tol not having its correct default value. @@ -102,10 +102,10 @@ def test_classifier_partial_fit(): score = clf.score(data, y) assert_greater(score, 0.79) if average: - assert_true(hasattr(clf, 'average_coef_')) - assert_true(hasattr(clf, 'average_intercept_')) - assert_true(hasattr(clf, 'standard_intercept_')) - assert_true(hasattr(clf, 'standard_coef_')) + assert hasattr(clf, 'average_coef_') + assert hasattr(clf, 'average_intercept_') + assert hasattr(clf, 'standard_intercept_') + assert hasattr(clf, 'standard_coef_') # 0.23. warning about tol not having its correct default value. @@ -243,10 +243,10 @@ def test_regressor_mse(): pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) if average: - assert_true(hasattr(reg, 'average_coef_')) - assert_true(hasattr(reg, 'average_intercept_')) - assert_true(hasattr(reg, 'standard_intercept_')) - assert_true(hasattr(reg, 'standard_coef_')) + assert hasattr(reg, 'average_coef_') + assert hasattr(reg, 'average_intercept_') + assert hasattr(reg, 'standard_intercept_') + assert hasattr(reg, 'standard_coef_') # 0.23. warning about tol not having its correct default value. @@ -265,10 +265,10 @@ def test_regressor_partial_fit(): pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) if average: - assert_true(hasattr(reg, 'average_coef_')) - assert_true(hasattr(reg, 'average_intercept_')) - assert_true(hasattr(reg, 'standard_intercept_')) - assert_true(hasattr(reg, 'standard_coef_')) + assert hasattr(reg, 'average_coef_') + assert hasattr(reg, 'average_intercept_') + assert hasattr(reg, 'standard_intercept_') + assert hasattr(reg, 'standard_coef_') # 0.23. warning about tol not having its correct default value. diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index d42e0f8743007..feee05dd35e28 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -464,7 +464,7 @@ def _test_ridge_classifiers(filter_): reg = RidgeClassifierCV(cv=cv) reg.fit(filter_(X_iris), y_iris) y_pred = reg.predict(filter_(X_iris)) - assert_true(np.mean(y_iris == y_pred) >= 0.8) + assert np.mean(y_iris == y_pred) >= 0.8 def _test_tolerance(filter_): @@ -476,7 +476,7 @@ def _test_tolerance(filter_): ridge2.fit(filter_(X_diabetes), y_diabetes) score2 = ridge2.score(filter_(X_diabetes), y_diabetes) - assert_true(score >= score2) + assert score >= score2 def check_dense_sparse(test_func): diff --git a/sklearn/linear_model/tests/test_sgd.py b/sklearn/linear_model/tests/test_sgd.py index bc826c2c087bd..a89b32a46e747 100644 --- a/sklearn/linear_model/tests/test_sgd.py +++ b/sklearn/linear_model/tests/test_sgd.py @@ -226,10 +226,10 @@ def test_plain_has_no_average_attr(self): clf = self.factory(average=True, eta0=.01) clf.fit(X, Y) - assert_true(hasattr(clf, 'average_coef_')) - assert_true(hasattr(clf, 'average_intercept_')) - assert_true(hasattr(clf, 'standard_intercept_')) - assert_true(hasattr(clf, 'standard_coef_')) + assert hasattr(clf, 'average_coef_') + assert hasattr(clf, 'average_intercept_') + assert hasattr(clf, 'standard_intercept_') + assert hasattr(clf, 'standard_coef_') clf = self.factory() clf.fit(X, Y) @@ -538,7 +538,7 @@ def test_sgd_multiclass_with_init_coef(self): clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3)) assert_equal(clf.coef_.shape, (3, 2)) - assert_true(clf.intercept_.shape, (3,)) + assert clf.intercept_.shape, (3,) pred = clf.predict(T2) assert_array_equal(pred, true_result2) @@ -609,14 +609,14 @@ def test_sgd_proba(self): clf = self.factory(loss=loss, alpha=0.01, max_iter=10) clf.fit(X, Y) p = clf.predict_proba([[3, 2]]) - assert_true(p[0, 1] > 0.5) + assert p[0, 1] > 0.5 p = clf.predict_proba([[-1, -1]]) - assert_true(p[0, 1] < 0.5) + assert p[0, 1] < 0.5 p = clf.predict_log_proba([[3, 2]]) - assert_true(p[0, 1] > p[0, 0]) + assert p[0, 1] > p[0, 0] p = clf.predict_log_proba([[-1, -1]]) - assert_true(p[0, 1] < p[0, 0]) + assert p[0, 1] < p[0, 0] # log loss multiclass probability estimates clf = self.factory(loss="log", alpha=0.01, max_iter=10).fit(X2, Y2) @@ -625,7 +625,7 @@ def test_sgd_proba(self): p = clf.predict_proba([[.1, -.1], [.3, .2]]) assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1)) assert_almost_equal(p[0].sum(), 1) - assert_true(np.all(p[0] >= 0)) + assert np.all(p[0] >= 0) p = clf.predict_proba([[-1, -1]]) d = clf.decision_function([[-1, -1]]) @@ -679,13 +679,13 @@ def test_sgd_l1(self): # test sparsify with dense inputs clf.sparsify() - assert_true(sp.issparse(clf.coef_)) + assert sp.issparse(clf.coef_) pred = clf.predict(X) assert_array_equal(pred, Y) # pickle and unpickle with sparse coef_ clf = pickle.loads(pickle.dumps(clf)) - assert_true(sp.issparse(clf.coef_)) + assert sp.issparse(clf.coef_) pred = clf.predict(X) assert_array_equal(pred, Y) @@ -841,7 +841,7 @@ def test_partial_fit_binary(self): clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated - assert_true(id1, id2) + assert id1, id2 y_pred = clf.predict(T) assert_array_equal(y_pred, true_result) @@ -860,7 +860,7 @@ def test_partial_fit_multiclass(self): clf.partial_fit(X2[third:], Y2[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated - assert_true(id1, id2) + assert id1, id2 def test_partial_fit_multiclass_average(self): third = X2.shape[0] // 3 @@ -940,7 +940,7 @@ def test_multiple_fit(self): # Test multiple calls of fit w/ different shaped inputs. clf = self.factory(alpha=0.01, shuffle=False) clf.fit(X, Y) - assert_true(hasattr(clf, "coef_")) + assert hasattr(clf, "coef_") # Non-regression test: try fitting with a different label set. y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)] @@ -1093,7 +1093,7 @@ def test_sgd_epsilon_insensitive(self): fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) - assert_true(score > 0.99) + assert score > 0.99 # simple linear function with noise y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() @@ -1103,7 +1103,7 @@ def test_sgd_epsilon_insensitive(self): fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) - assert_true(score > 0.5) + assert score > 0.5 def test_sgd_huber_fit(self): xmin, xmax = -5, 5 @@ -1170,7 +1170,7 @@ def test_partial_fit(self): clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated - assert_true(id1, id2) + assert id1, id2 def _test_partial_fit_equal_fit(self, lr): clf = self.factory(alpha=0.01, max_iter=2, eta0=0.01, @@ -1244,13 +1244,13 @@ def test_underflow_or_overlow(): X = rng.normal(size=(n_samples, n_features)) X[:, :2] *= 1e300 - assert_true(np.isfinite(X).all()) + assert np.isfinite(X).all() # Use MinMaxScaler to scale the data without introducing a numerical # instability (computing the standard deviation naively is not possible # on this data) X_scaled = MinMaxScaler().fit_transform(X) - assert_true(np.isfinite(X_scaled).all()) + assert np.isfinite(X_scaled).all() # Define a ground truth on the scaled data ground_truth = rng.normal(size=n_features) @@ -1261,7 +1261,7 @@ def test_underflow_or_overlow(): # smoke test: model is stable on scaled data model.fit(X_scaled, y) - assert_true(np.isfinite(model.coef_).all()) + assert np.isfinite(model.coef_).all() # model is numerically unstable on unscaled data msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*" @@ -1278,7 +1278,7 @@ def test_numerical_stability_large_gradient(): eta0=0.001, random_state=0, tol=None) with np.errstate(all='raise'): model.fit(iris.data, iris.target) - assert_true(np.isfinite(model.coef_).all()) + assert np.isfinite(model.coef_).all() @pytest.mark.parametrize('penalty', ['l2', 'l1', 'elasticnet']) diff --git a/sklearn/linear_model/tests/test_sparse_coordinate_descent.py b/sklearn/linear_model/tests/test_sparse_coordinate_descent.py index 6b4c09d9742e0..a869158036ad1 100644 --- a/sklearn/linear_model/tests/test_sparse_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_sparse_coordinate_descent.py @@ -19,7 +19,7 @@ def test_sparse_coef(): clf = ElasticNet() clf.coef_ = [1, 2, 3] - assert_true(sp.isspmatrix(clf.sparse_coef_)) + assert sp.isspmatrix(clf.sparse_coef_) assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_) diff --git a/sklearn/manifold/tests/test_locally_linear.py b/sklearn/manifold/tests/test_locally_linear.py index 2b7f4e1670ef3..2f5c0bd9b40a0 100644 --- a/sklearn/manifold/tests/test_locally_linear.py +++ b/sklearn/manifold/tests/test_locally_linear.py @@ -58,7 +58,7 @@ def test_lle_simple_grid(): for solver in eigen_solvers: clf.set_params(eigen_solver=solver) clf.fit(X) - assert_true(clf.embedding_.shape[1] == n_components) + assert clf.embedding_.shape[1] == n_components reconstruction_error = linalg.norm( np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2 @@ -92,7 +92,7 @@ def test_lle_manifold(): for solver in eigen_solvers: clf.set_params(eigen_solver=solver) clf.fit(X) - assert_true(clf.embedding_.shape[1] == n_components) + assert clf.embedding_.shape[1] == n_components reconstruction_error = linalg.norm( np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2 details = ("solver: %s, method: %s" % (solver, method)) diff --git a/sklearn/manifold/tests/test_spectral_embedding.py b/sklearn/manifold/tests/test_spectral_embedding.py index d236c17e5dbb5..1db82b889469f 100644 --- a/sklearn/manifold/tests/test_spectral_embedding.py +++ b/sklearn/manifold/tests/test_spectral_embedding.py @@ -102,11 +102,11 @@ def test_spectral_embedding_two_components(seed=36): # Test of internal _graph_connected_component before connection component = _graph_connected_component(affinity, 0) - assert_true(component[:n_sample].all()) - assert_true(not component[n_sample:].any()) + assert component[:n_sample].all() + assert not component[n_sample:].any() component = _graph_connected_component(affinity, -1) - assert_true(not component[:n_sample].any()) - assert_true(component[n_sample:].all()) + assert not component[:n_sample].any() + assert component[n_sample:].all() # connection affinity[0, n_sample + 1] = 1 @@ -140,7 +140,7 @@ def test_spectral_embedding_precomputed_affinity(seed=36): embed_rbf = se_rbf.fit_transform(S) assert_array_almost_equal( se_precomp.affinity_matrix_, se_rbf.affinity_matrix_) - assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05)) + assert _check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05) def test_spectral_embedding_callable_affinity(seed=36): @@ -179,7 +179,7 @@ def test_spectral_embedding_amg_solver(seed=36): random_state=np.random.RandomState(seed)) embed_amg = se_amg.fit_transform(S) embed_arpack = se_arpack.fit_transform(S) - assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05)) + assert _check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05) @pytest.mark.filterwarnings("ignore:the behavior of nmi will " diff --git a/sklearn/metrics/tests/test_pairwise.py b/sklearn/metrics/tests/test_pairwise.py index 6f1f0ac9f9f25..384e4685f92cf 100644 --- a/sklearn/metrics/tests/test_pairwise.py +++ b/sklearn/metrics/tests/test_pairwise.py @@ -136,7 +136,7 @@ def test_pairwise_boolean_distance(metric): for Z in [Y, None]: res = pairwise_distances(X, Z, metric=metric) res[np.isnan(res)] = 0 - assert_true(np.sum(res != 0) == 0) + assert np.sum(res != 0) == 0 @pytest.mark.parametrize('func', [pairwise_distances, pairwise_kernels]) @@ -156,11 +156,11 @@ def test_pairwise_precomputed(func): # Test not copied (if appropriate dtype) S = np.zeros((5, 5)) S2 = func(S, metric="precomputed") - assert_true(S is S2) + assert S is S2 # with two args S = np.zeros((5, 3)) S2 = func(S, np.zeros((3, 3)), metric="precomputed") - assert_true(S is S2) + assert S is S2 # Test always returns float dtype S = func(np.array([[1]], dtype='int'), metric='precomputed') @@ -168,7 +168,7 @@ def test_pairwise_precomputed(func): # Test converts list to array-like S = func([[1.]], metric='precomputed') - assert_true(isinstance(S, np.ndarray)) + assert isinstance(S, np.ndarray) def test_pairwise_precomputed_non_negative(): @@ -573,16 +573,16 @@ def test_cosine_distances(): D = cosine_distances(XA) assert_array_almost_equal(D, [[0., 0.], [0., 0.]]) # check that all elements are in [0, 2] - assert_true(np.all(D >= 0.)) - assert_true(np.all(D <= 2.)) + assert np.all(D >= 0.) + assert np.all(D <= 2.) # check that diagonal elements are equal to 0 assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.]) XB = np.vstack([x, -x]) D2 = cosine_distances(XB) # check that all elements are in [0, 2] - assert_true(np.all(D2 >= 0.)) - assert_true(np.all(D2 <= 2.)) + assert np.all(D2 >= 0.) + assert np.all(D2 <= 2.) # check that diagonal elements are equal to 0 and non diagonal to 2 assert_array_almost_equal(D2, [[0., 2.], [2., 0.]]) @@ -591,8 +591,8 @@ def test_cosine_distances(): D = cosine_distances(X) # check that diagonal elements are equal to 0 assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0]) - assert_true(np.all(D >= 0.)) - assert_true(np.all(D <= 2.)) + assert np.all(D >= 0.) + assert np.all(D <= 2.) # Paired distances @@ -632,8 +632,8 @@ def test_chi_square_kernel(): K = chi2_kernel(Y) assert_array_equal(np.diag(K), 1) # check off-diagonal is < 1 but > 0: - assert_true(np.all(K > 0)) - assert_true(np.all(K - np.diag(np.diag(K)) < 1)) + assert np.all(K > 0) + assert np.all(K - np.diag(np.diag(K)) < 1) # check that float32 is preserved X = rng.random_sample((5, 4)).astype(np.float32) Y = rng.random_sample((10, 4)).astype(np.float32) @@ -644,7 +644,7 @@ def test_chi_square_kernel(): # check that zeros are handled X = rng.random_sample((10, 4)).astype(np.int32) K = chi2_kernel(X, X) - assert_true(np.isfinite(K).all()) + assert np.isfinite(K).all() assert_equal(K.dtype, np.float) # check that kernel of similar things is greater than dissimilar ones @@ -717,8 +717,8 @@ def test_laplacian_kernel(): assert_array_almost_equal(np.diag(K), np.ones(5)) # off-diagonal elements are < 1 but > 0: - assert_true(np.all(K > 0)) - assert_true(np.all(K - np.diag(np.diag(K)) < 1)) + assert np.all(K > 0) + assert np.all(K - np.diag(np.diag(K)) < 1) @pytest.mark.parametrize('metric, pairwise_func', @@ -733,7 +733,7 @@ def test_pairwise_similarity_sparse_output(metric, pairwise_func): # should be sparse K1 = pairwise_func(Xcsr, Ycsr, dense_output=False) - assert_true(issparse(K1)) + assert issparse(K1) # should be dense, and equal to K1 K2 = pairwise_func(X, Y, dense_output=True) @@ -771,7 +771,7 @@ def test_check_dense_matrices(): # Check that if XB is None, XB is returned as reference to XA XA = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, None) - assert_true(XA_checked is XB_checked) + assert XA_checked is XB_checked assert_array_equal(XA, XA_checked) @@ -823,15 +823,15 @@ def test_check_sparse_arrays(): XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse) # compare their difference because testing csr matrices for # equality with '==' does not work as expected. - assert_true(issparse(XA_checked)) + assert issparse(XA_checked) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) - assert_true(issparse(XB_checked)) + assert issparse(XB_checked) assert_equal(abs(XB_sparse - XB_checked).sum(), 0) XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse) - assert_true(issparse(XA_checked)) + assert issparse(XA_checked) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) - assert_true(issparse(XA_2_checked)) + assert issparse(XA_2_checked) assert_equal(abs(XA_2_checked - XA_checked).sum(), 0) diff --git a/sklearn/metrics/tests/test_score_objects.py b/sklearn/metrics/tests/test_score_objects.py index a676541743b71..1a222cd7da353 100644 --- a/sklearn/metrics/tests/test_score_objects.py +++ b/sklearn/metrics/tests/test_score_objects.py @@ -162,7 +162,7 @@ def check_scoring_validator_for_single_metric_usecases(scoring_validator): estimator = EstimatorWithFitAndScore() estimator.fit([[1]], [1]) scorer = scoring_validator(estimator) - assert_true(scorer is _passthrough_scorer) + assert scorer is _passthrough_scorer assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) estimator = EstimatorWithFitAndPredict() @@ -176,13 +176,13 @@ def check_scoring_validator_for_single_metric_usecases(scoring_validator): estimator = EstimatorWithFit() scorer = scoring_validator(estimator, "accuracy") - assert_true(isinstance(scorer, _PredictScorer)) + assert isinstance(scorer, _PredictScorer) # Test the allow_none parameter for check_scoring alone if scoring_validator is check_scoring: estimator = EstimatorWithFit() scorer = scoring_validator(estimator, allow_none=True) - assert_true(scorer is None) + assert scorer is None def check_multimetric_scoring_single_metric_wrapper(*args, **kwargs): @@ -194,7 +194,7 @@ def check_multimetric_scoring_single_metric_wrapper(*args, **kwargs): # For all single metric use cases, it should register as not multimetric assert_false(is_multi) if args[0] is not None: - assert_true(scorers is not None) + assert scorers is not None names, scorers = zip(*scorers.items()) assert_equal(len(scorers), 1) assert_equal(names[0], 'score') @@ -220,11 +220,11 @@ def test_check_scoring_and_check_multimetric_scoring(): estimator.fit([[1], [2], [3]], [1, 1, 0]) scorers, is_multi = _check_multimetric_scoring(estimator, scoring) - assert_true(is_multi) - assert_true(isinstance(scorers, dict)) + assert is_multi + assert isinstance(scorers, dict) assert_equal(sorted(scorers.keys()), sorted(list(scoring))) - assert_true(all([isinstance(scorer, _PredictScorer) - for scorer in list(scorers.values())])) + assert all([isinstance(scorer, _PredictScorer) + for scorer in list(scorers.values())]) if 'acc' in scoring: assert_almost_equal(scorers['acc']( @@ -257,11 +257,11 @@ def test_check_scoring_gridsearchcv(): grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]}) scorer = check_scoring(grid, "f1") - assert_true(isinstance(scorer, _PredictScorer)) + assert isinstance(scorer, _PredictScorer) pipe = make_pipeline(LinearSVC()) scorer = check_scoring(pipe, "f1") - assert_true(isinstance(scorer, _PredictScorer)) + assert isinstance(scorer, _PredictScorer) # check that cross_val_score definitely calls the scorer # and doesn't make any assumptions about the estimator apart from having a diff --git a/sklearn/mixture/tests/test_gaussian_mixture.py b/sklearn/mixture/tests/test_gaussian_mixture.py index f68db77cd480a..752d3040f536c 100644 --- a/sklearn/mixture/tests/test_gaussian_mixture.py +++ b/sklearn/mixture/tests/test_gaussian_mixture.py @@ -749,8 +749,8 @@ def test_gaussian_mixture_aic_bic(): bic = (2 * n_samples * sgh + np.log(n_samples) * g._n_parameters()) bound = n_features / np.sqrt(n_samples) - assert_true((g.aic(X) - aic) / n_samples < bound) - assert_true((g.bic(X) - bic) / n_samples < bound) + assert (g.aic(X) - aic) / n_samples < bound + assert (g.bic(X) - bic) / n_samples < bound def test_gaussian_mixture_verbose(): @@ -920,7 +920,7 @@ def test_monotonic_likelihood(): if gmm.converged_: break - assert_true(gmm.converged_) + assert gmm.converged_ def test_regularisation(): diff --git a/sklearn/model_selection/tests/test_search.py b/sklearn/model_selection/tests/test_search.py index 27fd330e35586..dfdcb504912f1 100644 --- a/sklearn/model_selection/tests/test_search.py +++ b/sklearn/model_selection/tests/test_search.py @@ -82,7 +82,7 @@ def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): - assert_true(len(X) == len(Y)) + assert len(X) == len(Y) self.classes_ = np.unique(Y) return self @@ -146,8 +146,8 @@ def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) - assert_true(isinstance(grid1, Iterable)) - assert_true(isinstance(grid1, Sized)) + assert isinstance(grid1, Iterable) + assert isinstance(grid1, Sized) assert_equal(len(grid1), 3) assert_grid_iter_equals_getitem(grid1) @@ -284,8 +284,8 @@ def test_grid_search_score_method(): score_auc = search_auc.score(X, y) # ensure the test is sane - assert_true(score_auc < 1.0) - assert_true(score_accuracy < 1.0) + assert score_auc < 1.0 + assert score_accuracy < 1.0 assert_not_equal(score_auc, score_accuracy) assert_almost_equal(score_accuracy, score_no_scoring) @@ -357,11 +357,11 @@ def test_trivial_cv_results_attr(): clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}) grid_search.fit(X, y) - assert_true(hasattr(grid_search, "cv_results_")) + assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1) random_search.fit(X, y) - assert_true(hasattr(grid_search, "cv_results_")) + assert hasattr(grid_search, "cv_results_") @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 @@ -487,7 +487,7 @@ def test_grid_search_sparse(): y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C - assert_true(np.mean(y_pred == y_pred2) >= .9) + assert np.mean(y_pred == y_pred2) >= .9 assert_equal(C, C2) @@ -543,7 +543,7 @@ def test_grid_search_precomputed_kernel(): cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) - assert_true(cv.best_score_ >= 0) + assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) @@ -551,7 +551,7 @@ def test_grid_search_precomputed_kernel(): y_pred = cv.predict(K_test) - assert_true(np.mean(y_pred == y_test) >= 0) + assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse @@ -577,7 +577,7 @@ def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): - assert_true(not hasattr(self, 'has_been_fit_')) + assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): @@ -608,7 +608,7 @@ def test_gridsearch_nd(): clf = CheckingClassifier(check_X=check_X, check_y=check_y) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) - assert_true(hasattr(grid_search, "cv_results_")) + assert hasattr(grid_search, "cv_results_") @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 @@ -621,7 +621,7 @@ def test_X_as_list(): cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) - assert_true(hasattr(grid_search, "cv_results_")) + assert hasattr(grid_search, "cv_results_") @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 @@ -634,7 +634,7 @@ def test_y_as_list(): cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) - assert_true(hasattr(grid_search, "cv_results_")) + assert hasattr(grid_search, "cv_results_") @ignore_warnings @@ -665,7 +665,7 @@ def check_series(x): grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) - assert_true(hasattr(grid_search, "cv_results_")) + assert hasattr(grid_search, "cv_results_") @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 @@ -722,8 +722,8 @@ def test_param_sampler(): samples = [x for x in sampler] assert_equal(len(samples), 10) for sample in samples: - assert_true(sample["kernel"] in ["rbf", "linear"]) - assert_true(0 <= sample["C"] <= 1) + assert sample["kernel"] in ["rbf", "linear"] + assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} @@ -741,9 +741,9 @@ def test_param_sampler(): def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ - assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray) - for param in param_keys)) - assert_true(all(cv_results[key].dtype == object for key in param_keys)) + assert all(isinstance(cv_results[param], np.ma.MaskedArray) + for param in param_keys) + assert all(cv_results[key].dtype == object for key in param_keys) assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys)) assert_true(all(cv_results[key].dtype == np.float64 @@ -752,15 +752,15 @@ def check_cv_results_array_types(search, param_keys, score_keys): scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: - assert_true(cv_results['rank_test_%s' % key].dtype == np.int32) + assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) - assert_true(all(cv_results[key].shape == (n_cand,) - for key in param_keys + score_keys)) + assert all(cv_results[key].shape == (n_cand,) + for key in param_keys + score_keys) def test_grid_search_cv_results(): @@ -791,7 +791,7 @@ def test_grid_search_cv_results(): assert_equal(iid, search.iid) cv_results = search.cv_results_ # Check if score and timing are reasonable - assert_true(all(cv_results['rank_test_score'] >= 1)) + assert all(cv_results['rank_test_score'] >= 1) assert_true(all(cv_results[k] >= 0) for k in score_keys if k is not 'rank_test_score') assert_true(all(cv_results[k] <= 1) for k in score_keys @@ -874,7 +874,7 @@ def test_search_iid_param(): return_train_score=True) for search in (grid_search, random_search): search.fit(X, y) - assert_true(search.iid or search.iid is None) + assert search.iid or search.iid is None test_cv_scores = np.array(list(search.cv_results_['split%d_test_score' % s_i][0] @@ -1008,7 +1008,7 @@ def compare_cv_results_multimetric_with_single( single metric cv_results from single metric grid/random search""" assert_equal(search_multi.iid, iid) - assert_true(search_multi.multimetric_) + assert search_multi.multimetric_ assert_array_equal(sorted(search_multi.scorer_), ('accuracy', 'recall')) @@ -1113,16 +1113,16 @@ def test_search_cv_timing(): for key in ['mean_fit_time', 'std_fit_time']: # NOTE The precision of time.time in windows is not high # enough for the fit/score times to be non-zero for trivial X and y - assert_true(np.all(search.cv_results_[key] >= 0)) - assert_true(np.all(search.cv_results_[key] < 1)) + assert np.all(search.cv_results_[key] >= 0) + assert np.all(search.cv_results_[key] < 1) for key in ['mean_score_time', 'std_score_time']: - assert_true(search.cv_results_[key][1] >= 0) - assert_true(search.cv_results_[key][0] == 0.0) - assert_true(np.all(search.cv_results_[key] < 1)) + assert search.cv_results_[key][1] >= 0 + assert search.cv_results_[key][0] == 0.0 + assert np.all(search.cv_results_[key] < 1) - assert_true(hasattr(search, "refit_time_")) - assert_true(isinstance(search.refit_time_, float)) + assert hasattr(search, "refit_time_") + assert isinstance(search.refit_time_, float) assert_greater_equal(search.refit_time_, 0) @@ -1142,7 +1142,7 @@ def test_grid_search_correct_score_results(): expected_keys = (("mean_test_score", "rank_test_score") + tuple("split%d_test_score" % cv_i for cv_i in range(n_splits))) - assert_true(all(np.in1d(expected_keys, result_keys))) + assert all(np.in1d(expected_keys, result_keys)) cv = StratifiedKFold(n_splits=n_splits) n_splits = grid_search.n_splits_ @@ -1382,7 +1382,7 @@ def test_parameters_sampler_replacement(): samples = list(sampler) assert_equal(len(samples), 6) for values in ParameterGrid(params): - assert_true(values in samples) + assert values in samples # test sampling without replacement in a large grid params = {'a': range(10), 'b': range(10), 'c': range(10)} diff --git a/sklearn/model_selection/tests/test_split.py b/sklearn/model_selection/tests/test_split.py index 637b4dca5537f..b7cba19688543 100644 --- a/sklearn/model_selection/tests/test_split.py +++ b/sklearn/model_selection/tests/test_split.py @@ -411,7 +411,7 @@ def test_kfold_balance(): for _, test in kf: sizes.append(len(test)) - assert_true((np.max(sizes) - np.min(sizes)) <= 1) + assert (np.max(sizes) - np.min(sizes)) <= 1 assert_equal(np.sum(sizes), i) @@ -430,7 +430,7 @@ def test_stratifiedkfold_balance(): for _, test in skf: sizes.append(len(test)) - assert_true((np.max(sizes) - np.min(sizes)) <= 1) + assert (np.max(sizes) - np.min(sizes)) <= 1 assert_equal(np.sum(sizes), i) @@ -846,7 +846,7 @@ def test_leave_one_p_group_out(): # Third test: # The number of groups in test must be equal to p_groups_out - assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out) + assert np.unique(groups_arr[test]).shape[0], p_groups_out # check get_n_splits() with dummy parameters assert_equal(logo.get_n_splits(None, None, ['a', 'b', 'c', 'b', 'c']), 3) @@ -1043,8 +1043,8 @@ def test_train_test_split(): # don't convert lists to anything else by default split = train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split - assert_true(isinstance(y_train, list)) - assert_true(isinstance(y_test, list)) + assert isinstance(y_train, list) + assert isinstance(y_test, list) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) @@ -1088,8 +1088,8 @@ def train_test_split_pandas(): # X dataframe X_df = InputFeatureType(X) X_train, X_test = train_test_split(X_df) - assert_true(isinstance(X_train, InputFeatureType)) - assert_true(isinstance(X_test, InputFeatureType)) + assert isinstance(X_train, InputFeatureType) + assert isinstance(X_test, InputFeatureType) def train_test_split_sparse(): @@ -1100,16 +1100,16 @@ def train_test_split_sparse(): for InputFeatureType in sparse_types: X_s = InputFeatureType(X) X_train, X_test = train_test_split(X_s) - assert_true(isinstance(X_train, csr_matrix)) - assert_true(isinstance(X_test, csr_matrix)) + assert isinstance(X_train, csr_matrix) + assert isinstance(X_test, csr_matrix) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = train_test_split(X_df) - assert_true(isinstance(X_train, MockDataFrame)) - assert_true(isinstance(X_test, MockDataFrame)) + assert isinstance(X_train, MockDataFrame) + assert isinstance(X_test, MockDataFrame) X_train_arr, X_test_arr = train_test_split(X_df) @@ -1378,7 +1378,7 @@ def test_time_series_cv(): def _check_time_series_max_train_size(splits, check_splits, max_train_size): for (train, test), (check_train, check_test) in zip(splits, check_splits): assert_array_equal(test, check_test) - assert_true(len(check_train) <= max_train_size) + assert len(check_train) <= max_train_size suffix_start = max(len(train) - max_train_size, 0) assert_array_equal(check_train, train[suffix_start:]) diff --git a/sklearn/model_selection/tests/test_validation.py b/sklearn/model_selection/tests/test_validation.py index 22a15df1613dc..a7352972173fc 100644 --- a/sklearn/model_selection/tests/test_validation.py +++ b/sklearn/model_selection/tests/test_validation.py @@ -420,7 +420,7 @@ def check_cross_validate_single_metric(clf, X, y, scores): mse_scores_dict = cross_validate(clf, X, y, cv=5, scoring='neg_mean_squared_error', return_train_score=False) - assert_true(isinstance(mse_scores_dict, dict)) + assert isinstance(mse_scores_dict, dict) assert_equal(len(mse_scores_dict), dict_len) assert_array_almost_equal(mse_scores_dict['test_score'], test_mse_scores) @@ -435,7 +435,7 @@ def check_cross_validate_single_metric(clf, X, y, scores): else: r2_scores_dict = cross_validate(clf, X, y, cv=5, scoring=['r2'], return_train_score=False) - assert_true(isinstance(r2_scores_dict, dict)) + assert isinstance(r2_scores_dict, dict) assert_equal(len(r2_scores_dict), dict_len) assert_array_almost_equal(r2_scores_dict['test_r2'], test_r2_scores) @@ -475,7 +475,7 @@ def check_cross_validate_multi_metric(clf, X, y, scores): else: cv_results = cross_validate(clf, X, y, cv=5, scoring=scoring, return_train_score=False) - assert_true(isinstance(cv_results, dict)) + assert isinstance(cv_results, dict) assert_equal(set(cv_results.keys()), keys_with_train if return_train_score else keys_sans_train) @@ -702,8 +702,8 @@ def test_permutation_score(): score_group, _, pvalue_group = permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", groups=np.ones(y.size), random_state=0) - assert_true(score_group == score) - assert_true(pvalue_group == pvalue) + assert score_group == score + assert pvalue_group == pvalue # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') @@ -712,8 +712,8 @@ def test_permutation_score(): svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", groups=np.ones(y.size), random_state=0) - assert_true(score_group == score) - assert_true(pvalue_group == pvalue) + assert score_group == score + assert pvalue_group == pvalue # test with custom scoring object def custom_score(y_true, y_pred): @@ -1273,7 +1273,7 @@ def test_check_is_permutation(): rng = np.random.RandomState(0) p = np.arange(100) rng.shuffle(p) - assert_true(_check_is_permutation(p, 100)) + assert _check_is_permutation(p, 100) assert_false(_check_is_permutation(np.delete(p, 23), 100)) p[0] = 23 diff --git a/sklearn/neighbors/tests/test_neighbors.py b/sklearn/neighbors/tests/test_neighbors.py index d70508b700585..23707d539b109 100644 --- a/sklearn/neighbors/tests/test_neighbors.py +++ b/sklearn/neighbors/tests/test_neighbors.py @@ -603,7 +603,7 @@ def test_kneighbors_regressor(n_samples=40, knn.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) - assert_true(np.all(abs(y_pred - y_target) < 0.3)) + assert np.all(abs(y_pred - y_target) < 0.3) def test_KNeighborsRegressor_multioutput_uniform_weight(): @@ -657,7 +657,7 @@ def test_kneighbors_regressor_multioutput(n_samples=40, y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_equal(y_pred.shape, y_target.shape) - assert_true(np.all(np.abs(y_pred - y_target) < 0.3)) + assert np.all(np.abs(y_pred - y_target) < 0.3) def test_radius_neighbors_regressor(n_samples=40, @@ -683,7 +683,7 @@ def test_radius_neighbors_regressor(n_samples=40, neigh.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = neigh.predict(X[:n_test_pts] + epsilon) - assert_true(np.all(abs(y_pred - y_target) < radius / 2)) + assert np.all(abs(y_pred - y_target) < radius / 2) # test that nan is returned when no nearby observations for weights in ['uniform', 'distance']: @@ -698,7 +698,7 @@ def test_radius_neighbors_regressor(n_samples=40, empty_warning_msg, neigh.predict, X_test_nan) - assert_true(np.all(np.isnan(pred))) + assert np.all(np.isnan(pred)) def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight(): @@ -755,7 +755,7 @@ def test_RadiusNeighborsRegressor_multioutput(n_samples=40, y_pred = rnn.predict(X[:n_test_pts] + epsilon) assert_equal(y_pred.shape, y_target.shape) - assert_true(np.all(np.abs(y_pred - y_target) < 0.3)) + assert np.all(np.abs(y_pred - y_target) < 0.3) def test_kneighbors_regressor_sparse(n_samples=40, @@ -780,7 +780,7 @@ def test_kneighbors_regressor_sparse(n_samples=40, for sparsev in SPARSE_OR_DENSE: X2 = sparsev(X) - assert_true(np.mean(knn.predict(X2).round() == y) > 0.95) + assert np.mean(knn.predict(X2).round() == y) > 0.95 X2_pre = sparsev(pairwise_distances(X, metric='euclidean')) if issparse(sparsev(X2_pre)): @@ -803,7 +803,7 @@ def test_neighbors_iris(): clf.set_params(n_neighbors=9, algorithm=algorithm) clf.fit(iris.data, iris.target) - assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95) + assert np.mean(clf.predict(iris.data) == iris.target) > 0.95 rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm) rgs.fit(iris.data, iris.target) @@ -1354,7 +1354,7 @@ def test_dtype_convert(): def test_sparse_metric_callable(): def sparse_metric(x, y): # Metric accepting sparse matrix input (only) - assert_true(issparse(x) and issparse(y)) + assert issparse(x) and issparse(y) return x.dot(y.T).A.item() X = csr_matrix([ # Population matrix diff --git a/sklearn/neural_network/tests/test_rbm.py b/sklearn/neural_network/tests/test_rbm.py index bf171b7fd2555..6298a085786db 100644 --- a/sklearn/neural_network/tests/test_rbm.py +++ b/sklearn/neural_network/tests/test_rbm.py @@ -136,7 +136,7 @@ def test_gibbs_smoke(): X_sampled = rbm1.gibbs(X) assert_all_finite(X_sampled) X_sampled2 = rbm1.gibbs(X) - assert_true(np.all((X_sampled != X_sampled2).max(axis=1))) + assert np.all((X_sampled != X_sampled2).max(axis=1)) def test_score_samples(): @@ -148,7 +148,7 @@ def test_score_samples(): rbm1 = BernoulliRBM(n_components=10, batch_size=2, n_iter=10, random_state=rng) rbm1.fit(X) - assert_true((rbm1.score_samples(X) < -300).all()) + assert (rbm1.score_samples(X) < -300).all() # Sparse vs. dense should not affect the output. Also test sparse input # validation. diff --git a/sklearn/neural_network/tests/test_stochastic_optimizers.py b/sklearn/neural_network/tests/test_stochastic_optimizers.py index aad1462d484fc..1c54556521ef7 100644 --- a/sklearn/neural_network/tests/test_stochastic_optimizers.py +++ b/sklearn/neural_network/tests/test_stochastic_optimizers.py @@ -15,7 +15,7 @@ def test_base_optimizer(): for lr in [10 ** i for i in range(-3, 4)]: optimizer = BaseOptimizer(params, lr) - assert_true(optimizer.trigger_stopping('', False)) + assert optimizer.trigger_stopping('', False) def test_sgd_optimizer_no_momentum(): @@ -55,7 +55,7 @@ def test_sgd_optimizer_trigger_stopping(): optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive') assert_false(optimizer.trigger_stopping('', False)) assert_equal(lr / 5, optimizer.learning_rate) - assert_true(optimizer.trigger_stopping('', False)) + assert optimizer.trigger_stopping('', False) def test_sgd_optimizer_nesterovs_momentum(): diff --git a/sklearn/preprocessing/tests/test_data.py b/sklearn/preprocessing/tests/test_data.py index d7de994f2af66..932d12b0e7269 100644 --- a/sklearn/preprocessing/tests/test_data.py +++ b/sklearn/preprocessing/tests/test_data.py @@ -440,12 +440,12 @@ def test_scaler_2d_arrays(): assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has been copied - assert_true(X_scaled is not X) + assert X_scaled is not X # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) - assert_true(X_scaled_back is not X) - assert_true(X_scaled_back is not X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled assert_array_almost_equal(X_scaled_back, X) X_scaled = scale(X, axis=1, with_std=False) @@ -456,14 +456,14 @@ def test_scaler_2d_arrays(): assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0]) # Check that the data hasn't been modified - assert_true(X_scaled is not X) + assert X_scaled is not X X_scaled = scaler.fit(X).transform(X, copy=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied - assert_true(X_scaled is X) + assert X_scaled is X X = rng.randn(4, 5) X[:, 0] = 1.0 # first feature is a constant, non zero feature @@ -473,7 +473,7 @@ def test_scaler_2d_arrays(): assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied - assert_true(X_scaled is not X) + assert X_scaled is not X def test_handle_zeros_in_scale(): @@ -622,7 +622,7 @@ def test_standard_scaler_partial_fit_numerical_stability(): # Regardless of magnitude, they must not differ more than of 6 digits tol = 10 ** (-6) - assert_true(scaler.mean_ is not None) + assert scaler.mean_ is not None assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol) assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol) @@ -834,22 +834,22 @@ def test_scaler_without_centering(): assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) - assert_true(X_scaled is not X) - assert_true(X_csr_scaled is not X_csr) + assert X_scaled is not X + assert X_csr_scaled is not X_csr X_scaled_back = scaler.inverse_transform(X_scaled) - assert_true(X_scaled_back is not X) - assert_true(X_scaled_back is not X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) - assert_true(X_csr_scaled_back is not X_csr) - assert_true(X_csr_scaled_back is not X_csr_scaled) + assert X_csr_scaled_back is not X_csr + assert X_csr_scaled_back is not X_csr_scaled assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) - assert_true(X_csc_scaled_back is not X_csc) - assert_true(X_csc_scaled_back is not X_csc_scaled) + assert X_csc_scaled_back is not X_csc + assert X_csc_scaled_back is not X_csc_scaled assert_array_almost_equal(X_csc_scaled_back.toarray(), X) @@ -986,22 +986,22 @@ def test_scaler_int(): assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) - assert_true(X_scaled is not X) - assert_true(X_csr_scaled is not X_csr) + assert X_scaled is not X + assert X_csr_scaled is not X_csr X_scaled_back = scaler.inverse_transform(X_scaled) - assert_true(X_scaled_back is not X) - assert_true(X_scaled_back is not X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) - assert_true(X_csr_scaled_back is not X_csr) - assert_true(X_csr_scaled_back is not X_csr_scaled) + assert X_csr_scaled_back is not X_csr + assert X_csr_scaled_back is not X_csr_scaled assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) - assert_true(X_csc_scaled_back is not X_csc) - assert_true(X_csc_scaled_back is not X_csc_scaled) + assert X_csc_scaled_back is not X_csc + assert X_csc_scaled_back is not X_csc_scaled assert_array_almost_equal(X_csc_scaled_back.toarray(), X) @@ -1381,7 +1381,7 @@ def test_quantile_transform_subsampling(): diff = (np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_)) inf_norm = np.max(np.abs(diff)) - assert_true(inf_norm < 1e-2) + assert inf_norm < 1e-2 inf_norm_arr.append(inf_norm) # each random subsampling yield a unique approximation to the expected # linspace CDF @@ -1399,7 +1399,7 @@ def test_quantile_transform_subsampling(): diff = (np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_)) inf_norm = np.max(np.abs(diff)) - assert_true(inf_norm < 1e-1) + assert inf_norm < 1e-1 inf_norm_arr.append(inf_norm) # each random subsampling yield a unique approximation to the expected # linspace CDF @@ -1556,7 +1556,7 @@ def test_scale_function_without_centering(): [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied - assert_true(X_scaled is not X) + assert X_scaled is not X X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) @@ -1821,12 +1821,12 @@ def test_normalizer_l1(): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) - assert_true(X_norm is not X) + assert X_norm is not X X_norm1 = toarray(X_norm) normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) - assert_true(X_norm is X) + assert X_norm is X X_norm2 = toarray(X_norm) for X_norm in (X_norm1, X_norm2): @@ -1840,8 +1840,8 @@ def test_normalizer_l1(): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) - assert_true(X_norm is not X) - assert_true(isinstance(X_norm, sparse.csr_matrix)) + assert X_norm is not X + assert isinstance(X_norm, sparse.csr_matrix) X_norm = toarray(X_norm) for i in range(3): @@ -1870,12 +1870,12 @@ def test_normalizer_l2(): normalizer = Normalizer(norm='l2', copy=True) X_norm1 = normalizer.transform(X) - assert_true(X_norm1 is not X) + assert X_norm1 is not X X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='l2', copy=False) X_norm2 = normalizer.transform(X) - assert_true(X_norm2 is X) + assert X_norm2 is X X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): @@ -1888,8 +1888,8 @@ def test_normalizer_l2(): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) - assert_true(X_norm is not X) - assert_true(isinstance(X_norm, sparse.csr_matrix)) + assert X_norm is not X + assert isinstance(X_norm, sparse.csr_matrix) X_norm = toarray(X_norm) for i in range(3): @@ -1918,12 +1918,12 @@ def test_normalizer_max(): normalizer = Normalizer(norm='max', copy=True) X_norm1 = normalizer.transform(X) - assert_true(X_norm1 is not X) + assert X_norm1 is not X X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='max', copy=False) X_norm2 = normalizer.transform(X) - assert_true(X_norm2 is X) + assert X_norm2 is X X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): @@ -1937,8 +1937,8 @@ def test_normalizer_max(): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) - assert_true(X_norm is not X) - assert_true(isinstance(X_norm, sparse.csr_matrix)) + assert X_norm is not X + assert isinstance(X_norm, sparse.csr_matrix) X_norm = toarray(X_norm) for i in range(3): @@ -2010,13 +2010,13 @@ def test_binarizer(): binarizer = Binarizer(copy=True).fit(X) X_bin = toarray(binarizer.transform(X)) - assert_true(X_bin is not X) + assert X_bin is not X assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=True) X_bin = binarizer.transform(X) - assert_true(X_bin is not X) + assert X_bin is not X X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) @@ -2024,13 +2024,13 @@ def test_binarizer(): binarizer = Binarizer(copy=False) X_bin = binarizer.transform(X) if init is not list: - assert_true(X_bin is X) + assert X_bin is X binarizer = Binarizer(copy=False) X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64) X_bin = binarizer.transform(X_float) if init is not list: - assert_true(X_bin is X_float) + assert X_bin is X_float X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) @@ -2086,7 +2086,7 @@ def test_cv_pipeline_precomputed(): SVR(gamma='scale'))]) # did the pipeline set the _pairwise attribute? - assert_true(pipeline._pairwise) + assert pipeline._pairwise # test cross-validation, score should be almost perfect # NB: this test is pretty vacuous -- it's mainly to test integration @@ -2113,21 +2113,21 @@ def test_add_dummy_feature(): def test_add_dummy_feature_coo(): X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) - assert_true(sparse.isspmatrix_coo(X), X) + assert sparse.isspmatrix_coo(X), X assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csc(): X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) - assert_true(sparse.isspmatrix_csc(X), X) + assert sparse.isspmatrix_csc(X), X assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csr(): X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) - assert_true(sparse.isspmatrix_csr(X), X) + assert sparse.isspmatrix_csr(X), X assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) diff --git a/sklearn/preprocessing/tests/test_label.py b/sklearn/preprocessing/tests/test_label.py index 56550a9816a3f..e2af179e338e9 100644 --- a/sklearn/preprocessing/tests/test_label.py +++ b/sklearn/preprocessing/tests/test_label.py @@ -53,7 +53,7 @@ def test_label_binarizer(): # For sparse case: lb = LabelBinarizer(sparse_output=True) got = lb.fit_transform(inp) - assert_true(issparse(got)) + assert issparse(got) assert_array_equal(lb.classes_, ["pos"]) assert_array_equal(expected, got.toarray()) assert_array_equal(lb.inverse_transform(got.toarray()), inp) diff --git a/sklearn/svm/tests/test_bounds.py b/sklearn/svm/tests/test_bounds.py index fffd7fc787938..a927fab72462f 100644 --- a/sklearn/svm/tests/test_bounds.py +++ b/sklearn/svm/tests/test_bounds.py @@ -56,8 +56,8 @@ def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None): clf.C = min_c clf.fit(X, y) - assert_true((np.asarray(clf.coef_) == 0).all()) - assert_true((np.asarray(clf.intercept_) == 0).all()) + assert (np.asarray(clf.coef_) == 0).all() + assert (np.asarray(clf.intercept_) == 0).all() clf.C = min_c * 1.01 clf.fit(X, y) diff --git a/sklearn/svm/tests/test_sparse.py b/sklearn/svm/tests/test_sparse.py index ce14bda1db34e..069b68a7290e1 100644 --- a/sklearn/svm/tests/test_sparse.py +++ b/sklearn/svm/tests/test_sparse.py @@ -48,14 +48,14 @@ def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test): else: X_test_dense = X_test sparse_svm.fit(X_train, y_train) - assert_true(sparse.issparse(sparse_svm.support_vectors_)) - assert_true(sparse.issparse(sparse_svm.dual_coef_)) + assert sparse.issparse(sparse_svm.support_vectors_) + assert sparse.issparse(sparse_svm.dual_coef_) assert_array_almost_equal(dense_svm.support_vectors_, sparse_svm.support_vectors_.toarray()) assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray()) if dense_svm.kernel == "linear": - assert_true(sparse.issparse(sparse_svm.coef_)) + assert sparse.issparse(sparse_svm.coef_) assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray()) assert_array_almost_equal(dense_svm.support_, sparse_svm.support_) assert_array_almost_equal(dense_svm.predict(X_test_dense), @@ -199,7 +199,7 @@ def test_linearsvc(): clf = svm.LinearSVC(random_state=0).fit(X, Y) sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y) - assert_true(sp_clf.fit_intercept) + assert sp_clf.fit_intercept assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) @@ -252,7 +252,7 @@ def test_weight(): clf.set_params(class_weight={0: 5}) clf.fit(X_[:180], y_[:180]) y_pred = clf.predict(X_[180:]) - assert_true(np.sum(y_pred == y_[180:]) >= 11) + assert np.sum(y_pred == y_[180:]) >= 11 def test_sample_weights(): diff --git a/sklearn/svm/tests/test_svm.py b/sklearn/svm/tests/test_svm.py index 4a8e4ef735888..86d3c8d327ce0 100644 --- a/sklearn/svm/tests/test_svm.py +++ b/sklearn/svm/tests/test_svm.py @@ -59,7 +59,7 @@ def test_libsvm_iris(): for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) - assert_true(hasattr(clf, "coef_") == (k == 'linear')) + assert hasattr(clf, "coef_") == (k == 'linear') assert_array_equal(clf.classes_, np.sort(clf.classes_)) @@ -317,8 +317,8 @@ def test_probability(): prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) - assert_true(np.mean(np.argmax(prob_predict, 1) - == clf.predict(iris.data)) > 0.9) + assert np.mean(np.argmax(prob_predict, 1) + == clf.predict(iris.data)) > 0.9 assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) @@ -423,7 +423,7 @@ def test_weight(): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) - assert_true(f1_score(y_[100:], y_pred) > .3) + assert f1_score(y_[100:], y_pred) > .3 def test_sample_weights(): @@ -464,7 +464,7 @@ def test_auto_weight(): classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) - assert_true(np.argmax(class_weights) == 2) + assert np.argmax(class_weights) == 2 for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): @@ -472,7 +472,7 @@ def test_auto_weight(): y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) - assert_true(metrics.f1_score(y, y_pred, average='macro') + assert (metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) @@ -633,7 +633,7 @@ def test_linearsvc(): clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept - assert_true(clf.fit_intercept) + assert clf.fit_intercept assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) @@ -669,7 +669,7 @@ def test_linearsvc_crammer_singer(): cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same - assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) + assert (ovr_clf.coef_ != cs_clf.coef_).all() # test decision function assert_array_equal(cs_clf.predict(iris.data), @@ -741,8 +741,8 @@ def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) - assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) - assert_true(clf.fit_intercept) + assert clf.intercept_scaling == 1, clf.intercept_scaling + assert clf.fit_intercept # when intercept_scaling is low the intercept value is highly "penalized" # by regularization @@ -928,9 +928,9 @@ def test_hasattr_predict_proba(): # `probability` param G = svm.SVC(gamma='scale', probability=True) - assert_true(hasattr(G, 'predict_proba')) + assert hasattr(G, 'predict_proba') G.fit(iris.data, iris.target) - assert_true(hasattr(G, 'predict_proba')) + assert hasattr(G, 'predict_proba') G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) @@ -940,7 +940,7 @@ def test_hasattr_predict_proba(): # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True - assert_true(hasattr(G, 'predict_proba')) + assert hasattr(G, 'predict_proba') msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) @@ -993,7 +993,7 @@ def test_ovr_decision_function(): # Test if the first point has lower decision value on every quadrant # compared to the second point - assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) + assert np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]) def test_gamma_auto(): diff --git a/sklearn/tests/test_base.py b/sklearn/tests/test_base.py index 31c4d80967a1d..ebcb389255cd3 100644 --- a/sklearn/tests/test_base.py +++ b/sklearn/tests/test_base.py @@ -97,12 +97,12 @@ def test_clone(): selector = SelectFpr(f_classif, alpha=0.1) new_selector = clone(selector) - assert_true(selector is not new_selector) + assert selector is not new_selector assert_equal(selector.get_params(), new_selector.get_params()) selector = SelectFpr(f_classif, alpha=np.zeros((10, 2))) new_selector = clone(selector) - assert_true(selector is not new_selector) + assert selector is not new_selector def test_clone_2(): @@ -151,7 +151,7 @@ def test_clone_nan(): clf = MyEstimator(empty=np.nan) clf2 = clone(clf) - assert_true(clf.empty is clf2.empty) + assert clf.empty is clf2.empty def test_clone_sparse_matrices(): @@ -163,7 +163,7 @@ def test_clone_sparse_matrices(): sparse_matrix = cls(np.eye(5)) clf = MyEstimator(empty=sparse_matrix) clf_cloned = clone(clf) - assert_true(clf.empty.__class__ is clf_cloned.empty.__class__) + assert clf.empty.__class__ is clf_cloned.empty.__class__ assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray()) @@ -190,19 +190,19 @@ def test_str(): def test_get_params(): test = T(K(), K()) - assert_true('a__d' in test.get_params(deep=True)) - assert_true('a__d' not in test.get_params(deep=False)) + assert 'a__d' in test.get_params(deep=True) + assert 'a__d' not in test.get_params(deep=False) test.set_params(a__d=2) - assert_true(test.a.d == 2) + assert test.a.d == 2 assert_raises(ValueError, test.set_params, a__a=2) def test_is_classifier(): svc = SVC() - assert_true(is_classifier(svc)) - assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]}))) - assert_true(is_classifier(Pipeline([('svc', svc)]))) + assert is_classifier(svc) + assert is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})) + assert is_classifier(Pipeline([('svc', svc)])) assert_true(is_classifier(Pipeline( [('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))]))) @@ -302,7 +302,7 @@ def transform(self, X): cloned_e = clone(e) # the test - assert_true((e.df == cloned_e.df).values.all()) + assert (e.df == cloned_e.df).values.all() assert_equal(e.scalar_param, cloned_e.scalar_param) @@ -310,7 +310,7 @@ def test_pickle_version_warning_is_not_raised_with_matching_version(): iris = datasets.load_iris() tree = DecisionTreeClassifier().fit(iris.data, iris.target) tree_pickle = pickle.dumps(tree) - assert_true(b"version" in tree_pickle) + assert b"version" in tree_pickle tree_restored = assert_no_warnings(pickle.loads, tree_pickle) # test that we can predict with the restored decision tree classifier @@ -399,7 +399,7 @@ def test_pickling_when_getstate_is_overwritten_by_mixin(): estimator_restored = pickle.loads(serialized) assert_equal(estimator_restored.attribute_pickled, 5) assert_equal(estimator_restored._attribute_not_pickled, None) - assert_true(estimator_restored._restored) + assert estimator_restored._restored def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn(): @@ -417,7 +417,7 @@ def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn(): serialized['attribute_pickled'] = 4 estimator.__setstate__(serialized) assert_equal(estimator.attribute_pickled, 4) - assert_true(estimator._restored) + assert estimator._restored finally: type(estimator).__module__ = old_mod diff --git a/sklearn/tests/test_discriminant_analysis.py b/sklearn/tests/test_discriminant_analysis.py index d6bbf6948ff24..f11c38f28f7b7 100644 --- a/sklearn/tests/test_discriminant_analysis.py +++ b/sklearn/tests/test_discriminant_analysis.py @@ -79,7 +79,7 @@ def test_lda_predict(): # Primarily test for commit 2f34950 -- "reuse" of priors y_pred3 = clf.fit(X, y3).predict(X) # LDA shouldn't be able to separate those - assert_true(np.any(y_pred3 != y3), 'solver %s' % solver) + assert np.any(y_pred3 != y3), 'solver %s' % solver # Test invalid shrinkages clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231) @@ -231,12 +231,12 @@ def test_lda_store_covariance(): # 'store_covariance' has no effect on 'lsqr' and 'eigen' solvers for solver in ('lsqr', 'eigen'): clf = LinearDiscriminantAnalysis(solver=solver).fit(X6, y6) - assert_true(hasattr(clf, 'covariance_')) + assert hasattr(clf, 'covariance_') # Test the actual attribute: clf = LinearDiscriminantAnalysis(solver=solver, store_covariance=True).fit(X6, y6) - assert_true(hasattr(clf, 'covariance_')) + assert hasattr(clf, 'covariance_') assert_array_almost_equal( clf.covariance_, @@ -250,7 +250,7 @@ def test_lda_store_covariance(): # Test the actual attribute: clf = LinearDiscriminantAnalysis(solver=solver, store_covariance=True).fit(X6, y6) - assert_true(hasattr(clf, 'covariance_')) + assert hasattr(clf, 'covariance_') assert_array_almost_equal( clf.covariance_, @@ -278,7 +278,7 @@ def test_qda(): y_pred3 = clf.fit(X6, y7).predict(X6) # QDA shouldn't be able to separate those - assert_true(np.any(y_pred3 != y7)) + assert np.any(y_pred3 != y7) # Classes should have at least 2 elements assert_raises(ValueError, clf.fit, X6, y4) @@ -304,7 +304,7 @@ def test_qda_store_covariance(): # Test the actual attribute: clf = QuadraticDiscriminantAnalysis(store_covariance=True).fit(X6, y6) - assert_true(hasattr(clf, 'covariance_')) + assert hasattr(clf, 'covariance_') assert_array_almost_equal( clf.covariance_[0], @@ -323,7 +323,7 @@ def test_qda_regularization(): clf = QuadraticDiscriminantAnalysis() with ignore_warnings(): y_pred = clf.fit(X2, y6).predict(X2) - assert_true(np.any(y_pred != y6)) + assert np.any(y_pred != y6) # adding a little regularization fixes the problem clf = QuadraticDiscriminantAnalysis(reg_param=0.01) diff --git a/sklearn/tests/test_dummy.py b/sklearn/tests/test_dummy.py index 805c90a7e018e..99b038bd4086f 100644 --- a/sklearn/tests/test_dummy.py +++ b/sklearn/tests/test_dummy.py @@ -552,7 +552,7 @@ def test_constant_strategy_sparse_target(): clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0]) clf.fit(X, y) y_pred = clf.predict(X) - assert_true(sp.issparse(y_pred)) + assert sp.issparse(y_pred) assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])) @@ -593,7 +593,7 @@ def test_stratified_strategy_sparse_target(): X = [[0]] * 500 y_pred = clf.predict(X) - assert_true(sp.issparse(y_pred)) + assert sp.issparse(y_pred) y_pred = y_pred.toarray() for k in range(y.shape[1]): @@ -618,7 +618,7 @@ def test_most_frequent_and_prior_strategy_sparse_target(): clf.fit(X, y) y_pred = clf.predict(X) - assert_true(sp.issparse(y_pred)) + assert sp.issparse(y_pred) assert_array_equal(y_pred.toarray(), y_expected) diff --git a/sklearn/tests/test_isotonic.py b/sklearn/tests/test_isotonic.py index 967acb2324f19..8242728892959 100644 --- a/sklearn/tests/test_isotonic.py +++ b/sklearn/tests/test_isotonic.py @@ -32,7 +32,7 @@ def test_check_increasing_small_number_of_samples(): y = [1, 1.1, 1.05] is_increasing = assert_no_warnings(check_increasing, x, y) - assert_true(is_increasing) + assert is_increasing def test_check_increasing_up(): @@ -41,7 +41,7 @@ def test_check_increasing_up(): # Check that we got increasing=True and no warnings is_increasing = assert_no_warnings(check_increasing, x, y) - assert_true(is_increasing) + assert is_increasing def test_check_increasing_up_extreme(): @@ -50,7 +50,7 @@ def test_check_increasing_up_extreme(): # Check that we got increasing=True and no warnings is_increasing = assert_no_warnings(check_increasing, x, y) - assert_true(is_increasing) + assert is_increasing def test_check_increasing_down(): @@ -232,7 +232,7 @@ def test_isotonic_regression_auto_increasing(): # Check that relationship increases is_increasing = y_[0] < y_[-1] - assert_true(is_increasing) + assert is_increasing def test_assert_raises_exceptions(): @@ -371,7 +371,7 @@ def test_isotonic_duplicate_min_entry(): ir = IsotonicRegression(increasing=True, out_of_bounds="clip") ir.fit(x, y) all_predictions_finite = np.all(np.isfinite(ir.predict(x))) - assert_true(all_predictions_finite) + assert all_predictions_finite def test_isotonic_ymin_ymax(): diff --git a/sklearn/tests/test_kernel_approximation.py b/sklearn/tests/test_kernel_approximation.py index c2ba50f3728ff..e71554a11b79f 100644 --- a/sklearn/tests/test_kernel_approximation.py +++ b/sklearn/tests/test_kernel_approximation.py @@ -213,7 +213,7 @@ def test_nystroem_singular_kernel(): K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) - assert_true(np.all(np.isfinite(Y))) + assert np.all(np.isfinite(Y)) def test_nystroem_poly_kernel_params(): diff --git a/sklearn/tests/test_metaestimators.py b/sklearn/tests/test_metaestimators.py index 93e000132b4d4..e1cbe09e43a94 100644 --- a/sklearn/tests/test_metaestimators.py +++ b/sklearn/tests/test_metaestimators.py @@ -115,7 +115,7 @@ def score(self, X, y, *args, **kwargs): for method in methods: if method in delegator_data.skip_methods: continue - assert_true(hasattr(delegate, method)) + assert hasattr(delegate, method) assert_true(hasattr(delegator, method), msg="%s does not have method %r when its delegate does" % (delegator_data.name, method)) diff --git a/sklearn/tests/test_multiclass.py b/sklearn/tests/test_multiclass.py index 560a210a33814..e472f6c3ea49a 100644 --- a/sklearn/tests/test_multiclass.py +++ b/sklearn/tests/test_multiclass.py @@ -170,8 +170,8 @@ def test_ovr_fit_predict_sparse(): clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train)) Y_pred_sprs = clf_sprs.predict(X_test) - assert_true(clf.multilabel_) - assert_true(sp.issparse(Y_pred_sprs)) + assert clf.multilabel_ + assert sp.issparse(Y_pred_sprs) assert_array_equal(Y_pred_sprs.toarray(), Y_pred) # Test predict_proba @@ -303,7 +303,7 @@ def test_ovr_multilabel(): clf = OneVsRestClassifier(base_clf).fit(X, y) y_pred = clf.predict([[0, 4, 4]])[0] assert_array_equal(y_pred, [0, 1, 1]) - assert_true(clf.multilabel_) + assert clf.multilabel_ def test_ovr_fit_predict_svc(): @@ -328,7 +328,7 @@ def test_ovr_multilabel_dataset(): clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) - assert_true(clf.multilabel_) + assert clf.multilabel_ assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"), prec, decimal=2) @@ -364,7 +364,7 @@ def test_ovr_multilabel_predict_proba(): assert_false(hasattr(decision_only, 'predict_proba')) decision_only.fit(X_train, Y_train) assert_false(hasattr(decision_only, 'predict_proba')) - assert_true(hasattr(decision_only, 'decision_function')) + assert hasattr(decision_only, 'decision_function') # Estimator which can get predict_proba enabled after fitting gs = GridSearchCV(svm.SVC(gamma='scale', probability=False), @@ -372,7 +372,7 @@ def test_ovr_multilabel_predict_proba(): proba_after_fit = OneVsRestClassifier(gs) assert_false(hasattr(proba_after_fit, 'predict_proba')) proba_after_fit.fit(X_train, Y_train) - assert_true(hasattr(proba_after_fit, 'predict_proba')) + assert hasattr(proba_after_fit, 'predict_proba') Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) @@ -439,7 +439,7 @@ def test_ovr_gridsearch(): cv = GridSearchCV(ovr, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C - assert_true(best_C in Cs) + assert best_C in Cs def test_ovr_pipeline(): @@ -598,7 +598,7 @@ def test_ovo_decision_function(): # binary classifiers. # Therefore, sorting predictions based on votes would yield # mostly tied predictions: - assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.]))) + assert set(votes[:, class_idx]).issubset(set([0., 1., 2.])) # The OVO decision function on the other hand is able to resolve # most of the ties on this data as it combines both the vote counts @@ -617,7 +617,7 @@ def test_ovo_gridsearch(): cv = GridSearchCV(ovo, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C - assert_true(best_C in Cs) + assert best_C in Cs # 0.23. warning about tol not having its correct default value. @@ -718,7 +718,7 @@ def test_ecoc_gridsearch(): cv = GridSearchCV(ecoc, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C - assert_true(best_C in Cs) + assert best_C in Cs def test_ecoc_float_y(): @@ -758,7 +758,7 @@ def test_pairwise_attribute(): assert_false(ovr_false._pairwise) ovr_true = MultiClassClassifier(clf_precomputed) - assert_true(ovr_true._pairwise) + assert ovr_true._pairwise @pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22 diff --git a/sklearn/tests/test_pipeline.py b/sklearn/tests/test_pipeline.py index 7364a08697aab..d4193182c7405 100644 --- a/sklearn/tests/test_pipeline.py +++ b/sklearn/tests/test_pipeline.py @@ -259,10 +259,10 @@ def test_pipeline_fit_params(): pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())]) pipe.fit(X=None, y=None, clf__should_succeed=True) # classifier should return True - assert_true(pipe.predict(None)) + assert pipe.predict(None) # and transformer params should not be changed - assert_true(pipe.named_steps['transf'].a is None) - assert_true(pipe.named_steps['transf'].b is None) + assert pipe.named_steps['transf'].a is None + assert pipe.named_steps['transf'].b is None # invalid parameters should raise an error message assert_raise_message( TypeError, @@ -410,8 +410,8 @@ def test_fit_predict_with_intermediate_fit_params(): y=None, transf__should_get_this=True, clf__should_succeed=True) - assert_true(pipe.named_steps['transf'].fit_params['should_get_this']) - assert_true(pipe.named_steps['clf'].successful) + assert pipe.named_steps['transf'].fit_params['should_get_this'] + assert pipe.named_steps['clf'].successful assert_false('should_succeed' in pipe.named_steps['transf'].fit_params) @@ -422,7 +422,7 @@ def test_predict_with_predict_params(): pipe.fit(None, None) pipe.predict(X=None, got_attribute=True) - assert_true(pipe.named_steps['clf'].got_attribute) + assert pipe.named_steps['clf'].got_attribute def test_feature_union(): @@ -536,12 +536,12 @@ def test_set_pipeline_steps(): transf1 = Transf() transf2 = Transf() pipeline = Pipeline([('mock', transf1)]) - assert_true(pipeline.named_steps['mock'] is transf1) + assert pipeline.named_steps['mock'] is transf1 # Directly setting attr pipeline.steps = [('mock2', transf2)] - assert_true('mock' not in pipeline.named_steps) - assert_true(pipeline.named_steps['mock2'] is transf2) + assert 'mock' not in pipeline.named_steps + assert pipeline.named_steps['mock2'] is transf2 assert_equal([('mock2', transf2)], pipeline.steps) # Using set_params @@ -564,15 +564,15 @@ def test_pipeline_named_steps(): pipeline = Pipeline([('mock', transf), ("mult", mult2)]) # Test access via named_steps bunch object - assert_true('mock' in pipeline.named_steps) - assert_true('mock2' not in pipeline.named_steps) - assert_true(pipeline.named_steps.mock is transf) - assert_true(pipeline.named_steps.mult is mult2) + assert 'mock' in pipeline.named_steps + assert 'mock2' not in pipeline.named_steps + assert pipeline.named_steps.mock is transf + assert pipeline.named_steps.mult is mult2 # Test bunch with conflict attribute of dict pipeline = Pipeline([('values', transf), ("mult", mult2)]) - assert_true(pipeline.named_steps.values is not transf) - assert_true(pipeline.named_steps.mult is mult2) + assert pipeline.named_steps.values is not transf + assert pipeline.named_steps.mult is mult2 @pytest.mark.parametrize('passthrough', [None, 'passthrough']) @@ -678,12 +678,12 @@ def test_make_pipeline(): t1 = Transf() t2 = Transf() pipe = make_pipeline(t1, t2) - assert_true(isinstance(pipe, Pipeline)) + assert isinstance(pipe, Pipeline) assert_equal(pipe.steps[0][0], "transf-1") assert_equal(pipe.steps[1][0], "transf-2") pipe = make_pipeline(t1, t2, FitParamT()) - assert_true(isinstance(pipe, Pipeline)) + assert isinstance(pipe, Pipeline) assert_equal(pipe.steps[0][0], "transf-1") assert_equal(pipe.steps[1][0], "transf-2") assert_equal(pipe.steps[2][0], "fitparamt") @@ -781,7 +781,7 @@ def test_feature_union_feature_names(): ft.fit(JUNK_FOOD_DOCS) feature_names = ft.get_feature_names() for feat in feature_names: - assert_true("chars__" in feat or "words__" in feat) + assert "chars__" in feat or "words__" in feat assert_equal(len(feature_names), 35) ft = FeatureUnion([("tr1", Transf())]).fit([[1]]) @@ -1027,8 +1027,8 @@ def test_make_pipeline_memory(): else: memory = Memory(location=cachedir, verbose=10) pipeline = make_pipeline(DummyTransf(), SVC(), memory=memory) - assert_true(pipeline.memory is memory) + assert pipeline.memory is memory pipeline = make_pipeline(DummyTransf(), SVC()) - assert_true(pipeline.memory is None) + assert pipeline.memory is None shutil.rmtree(cachedir) diff --git a/sklearn/utils/estimator_checks.py b/sklearn/utils/estimator_checks.py index 5c226ac8ba8e7..ec5bccb3372e3 100644 --- a/sklearn/utils/estimator_checks.py +++ b/sklearn/utils/estimator_checks.py @@ -1230,7 +1230,7 @@ def check_estimators_pickle(name, estimator_orig): # pickle and unpickle! pickled_estimator = pickle.dumps(estimator) if estimator.__module__.startswith('sklearn.'): - assert_true(b"version" in pickled_estimator) + assert b"version" in pickled_estimator unpickled_estimator = pickle.loads(pickled_estimator) result = dict() @@ -1322,7 +1322,7 @@ def check_clustering(name, clusterer_orig, readonly_memmap=False): labels_sorted[-1] + 1)) # Labels are expected to start at 0 (no noise) or -1 (if noise) - assert_true(labels_sorted[0] in [0, -1]) + assert labels_sorted[0] in [0, -1] # Labels should be less than n_clusters - 1 if hasattr(clusterer, 'n_clusters'): n_clusters = getattr(clusterer, 'n_clusters') @@ -1416,7 +1416,7 @@ def check_classifiers_train(name, classifier_orig, readonly_memmap=False): classifier.fit(X, y) # with lists classifier.fit(X.tolist(), y.tolist()) - assert_true(hasattr(classifier, "classes_")) + assert hasattr(classifier, "classes_") y_pred = classifier.predict(X) assert_equal(y_pred.shape, (n_samples,)) # training set performance @@ -1581,7 +1581,7 @@ def check_estimators_fit_returns_self(name, estimator_orig, X, y = create_memmap_backed_data([X, y]) set_random_state(estimator) - assert_true(estimator.fit(X, y) is estimator) + assert estimator.fit(X, y) is estimator @ignore_warnings @@ -2008,13 +2008,13 @@ def check_sparsify_coefficients(name, estimator_orig): # test sparsify with dense inputs est.sparsify() - assert_true(sparse.issparse(est.coef_)) + assert sparse.issparse(est.coef_) pred = est.predict(X) assert_array_equal(pred, pred_orig) # pickle and unpickle with sparse coef_ est = pickle.loads(pickle.dumps(est)) - assert_true(sparse.issparse(est.coef_)) + assert sparse.issparse(est.coef_) pred = est.predict(X) assert_array_equal(pred, pred_orig) @@ -2074,7 +2074,7 @@ def check_parameters_default_constructible(name, Estimator): # test __repr__ repr(estimator) # test that set_params returns self - assert_true(estimator.set_params() is estimator) + assert estimator.set_params() is estimator # test if init does nothing but set parameters # this is important for grid_search etc. @@ -2114,7 +2114,7 @@ def param_filter(p): np.float64, types.FunctionType, Memory]) if init_param.name not in params.keys(): # deprecated parameter, not in get_params - assert_true(init_param.default is None) + assert init_param.default is None continue if (issubclass(Estimator, BaseSGD) and diff --git a/sklearn/utils/mocking.py b/sklearn/utils/mocking.py index db2e2ef319361..d2835c4e9a85e 100644 --- a/sklearn/utils/mocking.py +++ b/sklearn/utils/mocking.py @@ -87,11 +87,11 @@ def fit(self, X, y, **fit_params): **fit_params : dict of string -> object Parameters passed to the ``fit`` method of the estimator """ - assert_true(len(X) == len(y)) + assert len(X) == len(y) if self.check_X is not None: - assert_true(self.check_X(X)) + assert self.check_X(X) if self.check_y is not None: - assert_true(self.check_y(y)) + assert self.check_y(y) self.classes_ = np.unique(check_array(y, ensure_2d=False, allow_nd=True)) if self.expected_fit_params: @@ -112,7 +112,7 @@ def predict(self, T): T : indexable, length n_samples """ if self.check_X is not None: - assert_true(self.check_X(T)) + assert self.check_X(T) return self.classes_[np.zeros(_num_samples(T), dtype=np.int)] def score(self, X=None, Y=None): diff --git a/sklearn/utils/tests/test_class_weight.py b/sklearn/utils/tests/test_class_weight.py index 3c81e2f4700f6..36309f2dccdad 100644 --- a/sklearn/utils/tests/test_class_weight.py +++ b/sklearn/utils/tests/test_class_weight.py @@ -24,7 +24,7 @@ def test_compute_class_weight(): # total effect of samples is preserved class_counts = np.bincount(y)[2:] assert_almost_equal(np.dot(cw, class_counts), y.shape[0]) - assert_true(cw[0] < cw[1] < cw[2]) + assert cw[0] < cw[1] < cw[2] def test_compute_class_weight_not_present(): diff --git a/sklearn/utils/tests/test_estimator_checks.py b/sklearn/utils/tests/test_estimator_checks.py index bf8412b3e527d..961329ee46218 100644 --- a/sklearn/utils/tests/test_estimator_checks.py +++ b/sklearn/utils/tests/test_estimator_checks.py @@ -346,7 +346,7 @@ def test_check_estimator(): pass finally: sys.stdout = old_stdout - assert_true(msg in string_buffer.getvalue()) + assert msg in string_buffer.getvalue() # Large indices test on bad estimator msg = ('Estimator LargeSparseNotSupportedClassifier doesn\'t seem to ' diff --git a/sklearn/utils/tests/test_extmath.py b/sklearn/utils/tests/test_extmath.py index 54264484d5da2..7ea33139d92f0 100644 --- a/sklearn/utils/tests/test_extmath.py +++ b/sklearn/utils/tests/test_extmath.py @@ -405,7 +405,7 @@ def max_loading_is_positive(u, v): # Without transpose u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True) u_based, v_based = max_loading_is_positive(u_flipped, v_flipped) - assert_true(u_based) + assert u_based assert_false(v_based) # With transpose @@ -413,7 +413,7 @@ def max_loading_is_positive(u, v): mat, 3, flip_sign=True, transpose=True) u_based, v_based = max_loading_is_positive( u_flipped_with_transpose, v_flipped_with_transpose) - assert_true(u_based) + assert u_based assert_false(v_based) diff --git a/sklearn/utils/tests/test_metaestimators.py b/sklearn/utils/tests/test_metaestimators.py index 2a016ebefa565..f50dee16e04a8 100644 --- a/sklearn/utils/tests/test_metaestimators.py +++ b/sklearn/utils/tests/test_metaestimators.py @@ -66,7 +66,7 @@ class HasNoPredict(object): def test_if_delegate_has_method(): - assert_true(hasattr(MetaEst(HasPredict()), 'predict')) + assert hasattr(MetaEst(HasPredict()), 'predict') assert_false(hasattr(MetaEst(HasNoPredict()), 'predict')) assert_false( hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), 'predict')) diff --git a/sklearn/utils/tests/test_random.py b/sklearn/utils/tests/test_random.py index 866a2481f919d..c174500e73362 100644 --- a/sklearn/utils/tests/test_random.py +++ b/sklearn/utils/tests/test_random.py @@ -67,7 +67,7 @@ def check_sample_int(sample_without_replacement): assert_equal(len(s), n_samples) unique = np.unique(s) assert_equal(np.size(unique), n_samples) - assert_true(np.all(unique < n_population)) + assert np.all(unique < n_population) # test edge case n_population == n_samples == 0 assert_equal(np.size(sample_without_replacement(0, 0)), 0) @@ -110,7 +110,7 @@ def test_random_choice_csc(n_samples=10000, random_state=24): got = random_choice_csc(n_samples, classes, class_probabilities, random_state) - assert_true(sp.issparse(got)) + assert sp.issparse(got) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) @@ -123,7 +123,7 @@ def test_random_choice_csc(n_samples=10000, random_state=24): got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) - assert_true(sp.issparse(got)) + assert sp.issparse(got) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) @@ -135,7 +135,7 @@ def test_random_choice_csc(n_samples=10000, random_state=24): got = random_choice_csc(n_samples, classes, class_probabilities, random_state) - assert_true(sp.issparse(got)) + assert sp.issparse(got) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel(), @@ -149,7 +149,7 @@ def test_random_choice_csc(n_samples=10000, random_state=24): got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) - assert_true(sp.issparse(got)) + assert sp.issparse(got) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples diff --git a/sklearn/utils/tests/test_utils.py b/sklearn/utils/tests/test_utils.py index ab6f8f0ff1115..0b88cead5e790 100644 --- a/sklearn/utils/tests/test_utils.py +++ b/sklearn/utils/tests/test_utils.py @@ -25,17 +25,17 @@ def test_make_rng(): # Check the check_random_state utility function behavior - assert_true(check_random_state(None) is np.random.mtrand._rand) - assert_true(check_random_state(np.random) is np.random.mtrand._rand) + assert check_random_state(None) is np.random.mtrand._rand + assert check_random_state(np.random) is np.random.mtrand._rand rng_42 = np.random.RandomState(42) - assert_true(check_random_state(42).randint(100) == rng_42.randint(100)) + assert check_random_state(42).randint(100) == rng_42.randint(100) rng_42 = np.random.RandomState(42) - assert_true(check_random_state(rng_42) is rng_42) + assert check_random_state(rng_42) is rng_42 rng_42 = np.random.RandomState(42) - assert_true(check_random_state(43).randint(100) != rng_42.randint(100)) + assert check_random_state(43).randint(100) != rng_42.randint(100) assert_raises(ValueError, check_random_state, "some invalid seed") @@ -57,8 +57,8 @@ def ham(): assert_equal(spam, "spam") # function must remain usable assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, DeprecationWarning)) - assert_true("deprecated" in str(w[0].message).lower()) + assert issubclass(w[0].category, DeprecationWarning) + assert "deprecated" in str(w[0].message).lower() # ... then a class. with warnings.catch_warnings(record=True) as w: @@ -70,16 +70,16 @@ class Ham(object): ham = Ham() - assert_true(hasattr(ham, "SPAM")) + assert hasattr(ham, "SPAM") assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, DeprecationWarning)) - assert_true("deprecated" in str(w[0].message).lower()) + assert issubclass(w[0].category, DeprecationWarning) + assert "deprecated" in str(w[0].message).lower() def test_resample(): # Border case not worth mentioning in doctests - assert_true(resample() is None) + assert resample() is None # Check that invalid arguments yield ValueError assert_raises(ValueError, resample, [0], [0, 1]) diff --git a/sklearn/utils/tests/test_validation.py b/sklearn/utils/tests/test_validation.py index b0349d4d41731..843479c9e6696 100644 --- a/sklearn/utils/tests/test_validation.py +++ b/sklearn/utils/tests/test_validation.py @@ -62,7 +62,7 @@ def test_as_float_array(): X = X.astype(np.int64) X2 = as_float_array(X, copy=True) # Checking that the array wasn't overwritten - assert_true(as_float_array(X, False) is not X) + assert as_float_array(X, False) is not X assert_equal(X2.dtype, np.float64) # Test int dtypes <= 32bit tested_dtypes = [np.bool, @@ -80,10 +80,10 @@ def test_as_float_array(): # Here, X is of the right type, it shouldn't be modified X = np.ones((3, 2), dtype=np.float32) - assert_true(as_float_array(X, copy=False) is X) + assert as_float_array(X, copy=False) is X # Test that if X is fortran ordered it stays X = np.asfortranarray(X) - assert_true(np.isfortran(as_float_array(X, copy=True))) + assert np.isfortran(as_float_array(X, copy=True)) # Test the copy parameter with some matrices matrices = [ @@ -141,9 +141,9 @@ def test_ordering(): for A in X, X.T: for copy in (True, False): B = check_array(A, order='C', copy=copy) - assert_true(B.flags['C_CONTIGUOUS']) + assert B.flags['C_CONTIGUOUS'] B = check_array(A, order='F', copy=copy) - assert_true(B.flags['F_CONTIGUOUS']) + assert B.flags['F_CONTIGUOUS'] if copy: assert_false(A is B) @@ -228,10 +228,10 @@ def test_check_array(): else: assert_equal(X_checked.dtype, X.dtype) if order == 'C': - assert_true(X_checked.flags['C_CONTIGUOUS']) + assert X_checked.flags['C_CONTIGUOUS'] assert_false(X_checked.flags['F_CONTIGUOUS']) elif order == 'F': - assert_true(X_checked.flags['F_CONTIGUOUS']) + assert X_checked.flags['F_CONTIGUOUS'] assert_false(X_checked.flags['C_CONTIGUOUS']) if copy: assert_false(X is X_checked) @@ -240,7 +240,7 @@ def test_check_array(): if (X.dtype == X_checked.dtype and X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS'] and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']): - assert_true(X is X_checked) + assert X is X_checked # allowed sparse != None X_csc = sp.csc_matrix(X_C) @@ -260,7 +260,7 @@ def test_check_array(): message = str(w[0].message) messages = ["object dtype is not supported by sparse matrices", "Can't check dok sparse matrix for nan or inf."] - assert_true(message in messages) + assert message in messages else: assert_equal(len(w), 0) if dtype is not None: @@ -278,19 +278,19 @@ def test_check_array(): else: # doesn't copy if it was already good if (X.dtype == X_checked.dtype and X.format == X_checked.format): - assert_true(X is X_checked) + assert X is X_checked # other input formats # convert lists to arrays X_dense = check_array([[1, 2], [3, 4]]) - assert_true(isinstance(X_dense, np.ndarray)) + assert isinstance(X_dense, np.ndarray) # raise on too deep lists assert_raises(ValueError, check_array, X_ndim.tolist()) check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise # convert weird stuff to arrays X_no_array = NotAnArray(X_dense) result = check_array(X_no_array) - assert_true(isinstance(result, np.ndarray)) + assert isinstance(result, np.ndarray) # deprecation warning if string-like array with dtype="numeric" expected_warn_regex = r"converted to decimal numbers if dtype='numeric'" @@ -388,7 +388,7 @@ def test_check_array_dtype_warning(): dtype=[np.float64, np.float32], accept_sparse=True) assert_equal(X_checked.dtype, np.float32) - assert_true(X_checked is X) + assert X_checked is X X_checked = assert_no_warnings(check_array, X, dtype=[np.float64, np.float32], @@ -579,9 +579,9 @@ def test_check_array_complex_data_error(): def test_has_fit_parameter(): assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight")) - assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight")) - assert_true(has_fit_parameter(SVR, "sample_weight")) - assert_true(has_fit_parameter(SVR(), "sample_weight")) + assert has_fit_parameter(RandomForestRegressor, "sample_weight") + assert has_fit_parameter(SVR, "sample_weight") + assert has_fit_parameter(SVR(), "sample_weight") class TestClassWithDeprecatedFitMethod: @deprecated("Deprecated for the purpose of testing has_fit_parameter")