diff --git a/examples/cluster/plot_linkage_comparison.py b/examples/cluster/plot_linkage_comparison.py index dc009d0110f7c..793fee059d797 100644 --- a/examples/cluster/plot_linkage_comparison.py +++ b/examples/cluster/plot_linkage_comparison.py @@ -33,28 +33,28 @@ from sklearn import cluster, datasets from sklearn.preprocessing import StandardScaler -np.random.seed(0) - # %% # Generate datasets. We choose the size big enough to see the scalability # of the algorithms, but not too big to avoid too long running times n_samples = 1500 -noisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05) -noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05) -blobs = datasets.make_blobs(n_samples=n_samples, random_state=8) -no_structure = np.random.rand(n_samples, 2), None +noisy_circles = datasets.make_circles( + n_samples=n_samples, factor=0.5, noise=0.05, random_state=170 +) +noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05, random_state=170) +blobs = datasets.make_blobs(n_samples=n_samples, random_state=170) +rng = np.random.RandomState(170) +no_structure = rng.rand(n_samples, 2), None # Anisotropicly distributed data -random_state = 170 -X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state) +X, y = datasets.make_blobs(n_samples=n_samples, random_state=170) transformation = [[0.6, -0.6], [-0.4, 0.8]] X_aniso = np.dot(X, transformation) aniso = (X_aniso, y) # blobs with varied variances varied = datasets.make_blobs( - n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state + n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=170 ) # %% diff --git a/examples/preprocessing/plot_all_scaling.py b/examples/preprocessing/plot_all_scaling.py index 5a08976bd2653..f53c50e33875a 100644 --- a/examples/preprocessing/plot_all_scaling.py +++ b/examples/preprocessing/plot_all_scaling.py @@ -102,11 +102,15 @@ ), ( "Data after quantile transformation (uniform pdf)", - QuantileTransformer(output_distribution="uniform").fit_transform(X), + QuantileTransformer( + output_distribution="uniform", random_state=42 + ).fit_transform(X), ), ( "Data after quantile transformation (gaussian pdf)", - QuantileTransformer(output_distribution="normal").fit_transform(X), + QuantileTransformer( + output_distribution="normal", random_state=42 + ).fit_transform(X), ), ("Data after sample-wise L2 normalizing", Normalizer().fit_transform(X)), ] diff --git a/examples/preprocessing/plot_discretization_classification.py b/examples/preprocessing/plot_discretization_classification.py index 71adf44474aa3..f3edcac0011d7 100644 --- a/examples/preprocessing/plot_discretization_classification.py +++ b/examples/preprocessing/plot_discretization_classification.py @@ -74,7 +74,7 @@ def get_name(estimator): ( make_pipeline( StandardScaler(), - KBinsDiscretizer(encode="onehot"), + KBinsDiscretizer(encode="onehot", random_state=0), LogisticRegression(random_state=0), ), { @@ -85,7 +85,7 @@ def get_name(estimator): ( make_pipeline( StandardScaler(), - KBinsDiscretizer(encode="onehot"), + KBinsDiscretizer(encode="onehot", random_state=0), LinearSVC(random_state=0, dual="auto"), ), {