diff --git a/examples/cluster/plot_kmeans_assumptions.py b/examples/cluster/plot_kmeans_assumptions.py index 94f8ff6c58f52..3bdfae86b4ff7 100644 --- a/examples/cluster/plot_kmeans_assumptions.py +++ b/examples/cluster/plot_kmeans_assumptions.py @@ -27,7 +27,7 @@ X, y = make_blobs(n_samples=n_samples, random_state=random_state) # Incorrect number of clusters -y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X) +y_pred = KMeans(n_clusters=2, n_init="auto", random_state=random_state).fit_predict(X) plt.subplot(221) plt.scatter(X[:, 0], X[:, 1], c=y_pred) @@ -36,7 +36,9 @@ # Anisotropicly distributed data transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) -y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso) +y_pred = KMeans(n_clusters=3, n_init=10, random_state=random_state).fit_predict( + X_aniso +) plt.subplot(222) plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) @@ -46,7 +48,9 @@ X_varied, y_varied = make_blobs( n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state ) -y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied) +y_pred = KMeans(n_clusters=3, n_init="auto", random_state=random_state).fit_predict( + X_varied +) plt.subplot(223) plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) @@ -54,7 +58,9 @@ # Unevenly sized blobs X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) -y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered) +y_pred = KMeans(n_clusters=3, n_init=10, random_state=random_state).fit_predict( + X_filtered +) plt.subplot(224) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)