diff --git a/examples/linear_model/plot_ols_3d.py b/examples/linear_model/plot_ols_3d.py index 2c9e0c7a91bc0..222226c6b28c2 100644 --- a/examples/linear_model/plot_ols_3d.py +++ b/examples/linear_model/plot_ols_3d.py @@ -7,19 +7,18 @@ Features 1 and 2 of the diabetes-dataset are fitted and plotted below. It illustrates that although feature 2 has a strong coefficient on the full model, it does not -give us much regarding `y` when compared to just feature 1 - +give us much regarding `y` when compared to just feature 1. """ # Code source: Gaƫl Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause -import matplotlib.pyplot as plt -import numpy as np -from mpl_toolkits.mplot3d import Axes3D +# %% +# First we load the diabetes dataset. -from sklearn import datasets, linear_model +from sklearn import datasets +import numpy as np X, y = datasets.load_diabetes(return_X_y=True) indices = (0, 1) @@ -29,16 +28,25 @@ y_train = y[:-20] y_test = y[-20:] +# %% +# Next we fit a linear regression model. + +from sklearn import linear_model + ols = linear_model.LinearRegression() -ols.fit(X_train, y_train) +_ = ols.fit(X_train, y_train) + + +# %% +# Finally we plot the figure from three different views. + +import matplotlib.pyplot as plt -# ############################################################################# -# Plot the figure def plot_figs(fig_num, elev, azim, X_train, clf): fig = plt.figure(fig_num, figsize=(4, 3)) plt.clf() - ax = Axes3D(fig, elev=elev, azim=azim) + ax = fig.add_subplot(111, projection="3d", elev=elev, azim=azim) ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c="k", marker="+") ax.plot_surface(