#!/usr/bin/env python # coding: utf-8 # **Chapter 8 – Dimensionality Reduction** # _This notebook contains all the sample code and solutions to the exercises in chapter 8._ # # # #
# Open In Colab # # #
# # Setup # This project requires Python 3.7 or above: # In[1]: import sys assert sys.version_info >= (3, 7) # It also requires Scikit-Learn ≥ 1.0.1: # In[2]: from packaging import version import sklearn assert version.parse(sklearn.__version__) >= version.parse("1.0.1") # As we did in previous chapters, let's define the default font sizes to make the figures prettier: # In[3]: import matplotlib.pyplot as plt plt.rc('font', size=14) plt.rc('axes', labelsize=14, titlesize=14) plt.rc('legend', fontsize=14) plt.rc('xtick', labelsize=10) plt.rc('ytick', labelsize=10) # And let's create the `images/dim_reduction` folder (if it doesn't already exist), and define the `save_fig()` function which is used through this notebook to save the figures in high-res for the book: # In[4]: from pathlib import Path IMAGES_PATH = Path() / "images" / "dim_reduction" IMAGES_PATH.mkdir(parents=True, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = IMAGES_PATH / f"{fig_id}.{fig_extension}" if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # # PCA # This chapter starts with several figures to explain the concepts of PCA and Manifold Learning. Below is the code to generate these figures. You can skip directly to the [Principal Components](#Principal-Components) section below if you want. # Let's generate a small 3D dataset. It's an oval shape, rotated in 3D space, with points distributed unevenly, and with quite a lot of noise: # In[5]: # extra code import numpy as np from scipy.spatial.transform import Rotation m = 60 X = np.zeros((m, 3)) # initialize 3D dataset np.random.seed(42) angles = (np.random.rand(m) ** 3 + 0.5) * 2 * np.pi # uneven distribution X[:, 0], X[:, 1] = np.cos(angles), np.sin(angles) * 0.5 # oval X += 0.28 * np.random.randn(m, 3) # add more noise X = Rotation.from_rotvec([np.pi / 29, -np.pi / 20, np.pi / 4]).apply(X) X += [0.2, 0, 0.2] # shift a bit # Plot the 3D dataset, with the projection plane. # In[6]: # extra code – this cell generates and saves Figure 8–2 import matplotlib.pyplot as plt from sklearn.decomposition import PCA pca = PCA(n_components=2) X2D = pca.fit_transform(X) # dataset reduced to 2D X3D_inv = pca.inverse_transform(X2D) # 3D position of the projected samples X_centered = X - X.mean(axis=0) U, s, Vt = np.linalg.svd(X_centered) axes = [-1.4, 1.4, -1.4, 1.4, -1.1, 1.1] x1, x2 = np.meshgrid(np.linspace(axes[0], axes[1], 10), np.linspace(axes[2], axes[3], 10)) w1, w2 = np.linalg.solve(Vt[:2, :2], Vt[:2, 2]) # projection plane coefs z = w1 * (x1 - pca.mean_[0]) + w2 * (x2 - pca.mean_[1]) - pca.mean_[2] # plane X3D_above = X[X[:, 2] >= X3D_inv[:, 2]] # samples above plane X3D_below = X[X[:, 2] < X3D_inv[:, 2]] # samples below plane fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(111, projection="3d") # plot samples and projection lines below plane first ax.plot(X3D_below[:, 0], X3D_below[:, 1], X3D_below[:, 2], "ro", alpha=0.3) for i in range(m): if X[i, 2] < X3D_inv[i, 2]: ax.plot([X[i][0], X3D_inv[i][0]], [X[i][1], X3D_inv[i][1]], [X[i][2], X3D_inv[i][2]], ":", color="#F88") ax.plot_surface(x1, x2, z, alpha=0.1, color="b") # projection plane ax.plot(X3D_inv[:, 0], X3D_inv[:, 1], X3D_inv[:, 2], "b+") # projected samples ax.plot(X3D_inv[:, 0], X3D_inv[:, 1], X3D_inv[:, 2], "b.") # now plot projection lines and samples above plane for i in range(m): if X[i, 2] >= X3D_inv[i, 2]: ax.plot([X[i][0], X3D_inv[i][0]], [X[i][1], X3D_inv[i][1]], [X[i][2], X3D_inv[i][2]], "r--") ax.plot(X3D_above[:, 0], X3D_above[:, 1], X3D_above[:, 2], "ro") def set_xyz_axes(ax, axes): ax.xaxis.set_rotate_label(False) ax.yaxis.set_rotate_label(False) ax.zaxis.set_rotate_label(False) ax.set_xlabel("$x_1$", labelpad=8, rotation=0) ax.set_ylabel("$x_2$", labelpad=8, rotation=0) ax.set_zlabel("$x_3$", labelpad=8, rotation=0) ax.set_xlim(axes[0:2]) ax.set_ylim(axes[2:4]) ax.set_zlim(axes[4:6]) set_xyz_axes(ax, axes) ax.set_zticks([-1, -0.5, 0, 0.5, 1]) save_fig("dataset_3d_plot", tight_layout=False) plt.show() # In[7]: # extra code – this cell generates and saves Figure 8–3 fig = plt.figure() ax = fig.add_subplot(1, 1, 1, aspect='equal') ax.plot(X2D[:, 0], X2D[:, 1], "b+") ax.plot(X2D[:, 0], X2D[:, 1], "b.") ax.plot([0], [0], "bo") ax.arrow(0, 0, 1, 0, head_width=0.05, length_includes_head=True, head_length=0.1, fc='b', ec='b', linewidth=4) ax.arrow(0, 0, 0, 1, head_width=0.05, length_includes_head=True, head_length=0.1, fc='b', ec='b', linewidth=1) ax.set_xlabel("$z_1$") ax.set_yticks([-0.5, 0, 0.5, 1]) ax.set_ylabel("$z_2$", rotation=0) ax.set_axisbelow(True) ax.grid(True) save_fig("dataset_2d_plot") # In[8]: from sklearn.datasets import make_swiss_roll X_swiss, t = make_swiss_roll(n_samples=1000, noise=0.2, random_state=42) # In[9]: # extra code – this cell generates and saves Figure 8–4 from matplotlib.colors import ListedColormap darker_hot = ListedColormap(plt.cm.hot(np.linspace(0, 0.8, 256))) axes = [-11.5, 14, -2, 23, -12, 15] fig = plt.figure(figsize=(6, 5)) ax = fig.add_subplot(111, projection='3d') ax.scatter(X_swiss[:, 0], X_swiss[:, 1], X_swiss[:, 2], c=t, cmap=darker_hot) ax.view_init(10, -70) set_xyz_axes(ax, axes) save_fig("swiss_roll_plot") plt.show() # In[10]: # extra code – this cell generates and saves plots for Figure 8–5 plt.figure(figsize=(10, 4)) plt.subplot(121) plt.scatter(X_swiss[:, 0], X_swiss[:, 1], c=t, cmap=darker_hot) plt.axis(axes[:4]) plt.xlabel("$x_1$") plt.ylabel("$x_2$", labelpad=10, rotation=0) plt.grid(True) plt.subplot(122) plt.scatter(t, X_swiss[:, 1], c=t, cmap=darker_hot) plt.axis([4, 14.8, axes[2], axes[3]]) plt.xlabel("$z_1$") plt.grid(True) save_fig("squished_swiss_roll_plot") plt.show() # In[11]: # extra code – this cell generates and saves plots for Figure 8–6 axes = [-11.5, 14, -2, 23, -12, 15] x2s = np.linspace(axes[2], axes[3], 10) x3s = np.linspace(axes[4], axes[5], 10) x2, x3 = np.meshgrid(x2s, x3s) positive_class = X_swiss[:, 0] > 5 X_pos = X_swiss[positive_class] X_neg = X_swiss[~positive_class] fig = plt.figure(figsize=(6, 5)) ax = plt.subplot(1, 1, 1, projection='3d') ax.view_init(10, -70) ax.plot(X_neg[:, 0], X_neg[:, 1], X_neg[:, 2], "y^") ax.plot_wireframe(5, x2, x3, alpha=0.5) ax.plot(X_pos[:, 0], X_pos[:, 1], X_pos[:, 2], "gs") set_xyz_axes(ax, axes) save_fig("manifold_decision_boundary_plot1") plt.show() fig = plt.figure(figsize=(5, 4)) ax = plt.subplot(1, 1, 1) ax.plot(t[positive_class], X_swiss[positive_class, 1], "gs") ax.plot(t[~positive_class], X_swiss[~positive_class, 1], "y^") ax.axis([4, 15, axes[2], axes[3]]) ax.set_xlabel("$z_1$") ax.set_ylabel("$z_2$", rotation=0, labelpad=8) ax.grid(True) save_fig("manifold_decision_boundary_plot2") plt.show() positive_class = 2 * (t[:] - 4) > X_swiss[:, 1] X_pos = X_swiss[positive_class] X_neg = X_swiss[~positive_class] fig = plt.figure(figsize=(6, 5)) ax = plt.subplot(1, 1, 1, projection='3d') ax.view_init(10, -70) ax.plot(X_neg[:, 0], X_neg[:, 1], X_neg[:, 2], "y^") ax.plot(X_pos[:, 0], X_pos[:, 1], X_pos[:, 2], "gs") ax.xaxis.set_rotate_label(False) ax.yaxis.set_rotate_label(False) ax.zaxis.set_rotate_label(False) ax.set_xlabel("$x_1$", rotation=0) ax.set_ylabel("$x_2$", rotation=0) ax.set_zlabel("$x_3$", rotation=0) ax.set_xlim(axes[0:2]) ax.set_ylim(axes[2:4]) ax.set_zlim(axes[4:6]) save_fig("manifold_decision_boundary_plot3") plt.show() fig = plt.figure(figsize=(5, 4)) ax = plt.subplot(1, 1, 1) ax.plot(t[positive_class], X_swiss[positive_class, 1], "gs") ax.plot(t[~positive_class], X_swiss[~positive_class, 1], "y^") ax.plot([4, 15], [0, 22], "b-", linewidth=2) ax.axis([4, 15, axes[2], axes[3]]) ax.set_xlabel("$z_1$") ax.set_ylabel("$z_2$", rotation=0, labelpad=8) ax.grid(True) save_fig("manifold_decision_boundary_plot4") plt.show() # In[12]: # extra code – this cell generates and saves Figure 8–7 angle = np.pi / 5 stretch = 5 m = 200 np.random.seed(3) X_line = np.random.randn(m, 2) / 10 X_line = X_line @ np.array([[stretch, 0], [0, 1]]) # stretch X_line = X_line @ [[np.cos(angle), np.sin(angle)], [np.sin(angle), np.cos(angle)]] # rotate u1 = np.array([np.cos(angle), np.sin(angle)]) u2 = np.array([np.cos(angle - 2 * np.pi / 6), np.sin(angle - 2 * np.pi / 6)]) u3 = np.array([np.cos(angle - np.pi / 2), np.sin(angle - np.pi / 2)]) X_proj1 = X_line @ u1.reshape(-1, 1) X_proj2 = X_line @ u2.reshape(-1, 1) X_proj3 = X_line @ u3.reshape(-1, 1) plt.figure(figsize=(8, 4)) plt.subplot2grid((3, 2), (0, 0), rowspan=3) plt.plot([-1.4, 1.4], [-1.4 * u1[1] / u1[0], 1.4 * u1[1] / u1[0]], "k-", linewidth=2) plt.plot([-1.4, 1.4], [-1.4 * u2[1] / u2[0], 1.4 * u2[1] / u2[0]], "k--", linewidth=2) plt.plot([-1.4, 1.4], [-1.4 * u3[1] / u3[0], 1.4 * u3[1] / u3[0]], "k:", linewidth=2) plt.plot(X_line[:, 0], X_line[:, 1], "ro", alpha=0.5) plt.arrow(0, 0, u1[0], u1[1], head_width=0.1, linewidth=4, alpha=0.9, length_includes_head=True, head_length=0.1, fc="b", ec="b", zorder=10) plt.arrow(0, 0, u3[0], u3[1], head_width=0.1, linewidth=1, alpha=0.9, length_includes_head=True, head_length=0.1, fc="b", ec="b", zorder=10) plt.text(u1[0] + 0.1, u1[1] - 0.05, r"$\mathbf{c_1}$", color="blue") plt.text(u3[0] + 0.1, u3[1], r"$\mathbf{c_2}$", color="blue") plt.xlabel("$x_1$") plt.ylabel("$x_2$", rotation=0) plt.axis([-1.4, 1.4, -1.4, 1.4]) plt.grid() plt.subplot2grid((3, 2), (0, 1)) plt.plot([-2, 2], [0, 0], "k-", linewidth=2) plt.plot(X_proj1[:, 0], np.zeros(m), "ro", alpha=0.3) plt.gca().get_yaxis().set_ticks([]) plt.gca().get_xaxis().set_ticklabels([]) plt.axis([-2, 2, -1, 1]) plt.grid() plt.subplot2grid((3, 2), (1, 1)) plt.plot([-2, 2], [0, 0], "k--", linewidth=2) plt.plot(X_proj2[:, 0], np.zeros(m), "ro", alpha=0.3) plt.gca().get_yaxis().set_ticks([]) plt.gca().get_xaxis().set_ticklabels([]) plt.axis([-2, 2, -1, 1]) plt.grid() plt.subplot2grid((3, 2), (2, 1)) plt.plot([-2, 2], [0, 0], "k:", linewidth=2) plt.plot(X_proj3[:, 0], np.zeros(m), "ro", alpha=0.3) plt.gca().get_yaxis().set_ticks([]) plt.axis([-2, 2, -1, 1]) plt.xlabel("$z_1$") plt.grid() save_fig("pca_best_projection_plot") plt.show() # ## Principal Components # In[13]: import numpy as np # X = [...] # the small 3D dataset was created earlier in this notebook X_centered = X - X.mean(axis=0) U, s, Vt = np.linalg.svd(X_centered) c1 = Vt[0] c2 = Vt[1] # Note: in principle, the SVD factorization algorithm returns three matrices, **U**, **Σ** and **V**, such that **X** = **UΣV**, where **U** is an _m_ × _m_ matrix, **Σ** is an _m_ × _n_ matrix, and **V** is an _n_ × _n_ matrix. But the `svd()` function returns **U**, **s** and **V** instead. **s** is the vector containing all the values on the main diagonal of the top _n_ rows of **Σ**. Since **Σ** is full of zeros elsewhere, your can easily reconstruct it from **s**, like this: # In[14]: # extra code – shows how to construct Σ from s m, n = X.shape Σ = np.zeros_like(X_centered) Σ[:n, :n] = np.diag(s) assert np.allclose(X_centered, U @ Σ @ Vt) # ## Projecting Down to d Dimensions # In[15]: W2 = Vt[:2].T X2D = X_centered @ W2 # ## Using Scikit-Learn # With Scikit-Learn, PCA is really trivial. It even takes care of mean centering for you: # In[16]: from sklearn.decomposition import PCA pca = PCA(n_components=2) X2D = pca.fit_transform(X) # In[17]: pca.components_ # ## Explained Variance Ratio # Now let's look at the explained variance ratio: # In[18]: pca.explained_variance_ratio_ # The first dimension explains about 76% of the variance, while the second explains about 15%. # By projecting down to 2D, we lost about 9% of the variance: # In[19]: 1 - pca.explained_variance_ratio_.sum() # extra code # ## Choosing the Right Number of Dimensions # In[20]: from sklearn.datasets import fetch_openml mnist = fetch_openml('mnist_784', as_frame=False, parser="auto") X_train, y_train = mnist.data[:60_000], mnist.target[:60_000] X_test, y_test = mnist.data[60_000:], mnist.target[60_000:] pca = PCA() pca.fit(X_train) cumsum = np.cumsum(pca.explained_variance_ratio_) d = np.argmax(cumsum >= 0.95) + 1 # d equals 154 # Note: I added `parser="auto"` when calling `fetch_openml()` to avoid a warning about the fact that the default value for that parameter will change in the future (it's irrelevant in this case). Please see the [documentation](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_openml.html) for more details. # In[21]: d # In[22]: pca = PCA(n_components=0.95) X_reduced = pca.fit_transform(X_train) # In[23]: pca.n_components_ # In[24]: pca.explained_variance_ratio_.sum() # extra code # In[25]: # extra code – this cell generates and saves Figure 8–8 plt.figure(figsize=(6, 4)) plt.plot(cumsum, linewidth=3) plt.axis([0, 400, 0, 1]) plt.xlabel("Dimensions") plt.ylabel("Explained Variance") plt.plot([d, d], [0, 0.95], "k:") plt.plot([0, d], [0.95, 0.95], "k:") plt.plot(d, 0.95, "ko") plt.annotate("Elbow", xy=(65, 0.85), xytext=(70, 0.7), arrowprops=dict(arrowstyle="->")) plt.grid(True) save_fig("explained_variance_plot") plt.show() # In[26]: from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV from sklearn.pipeline import make_pipeline clf = make_pipeline(PCA(random_state=42), RandomForestClassifier(random_state=42)) param_distrib = { "pca__n_components": np.arange(10, 80), "randomforestclassifier__n_estimators": np.arange(50, 500) } rnd_search = RandomizedSearchCV(clf, param_distrib, n_iter=10, cv=3, random_state=42) rnd_search.fit(X_train[:1000], y_train[:1000]) # In[27]: print(rnd_search.best_params_) # In[28]: from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV clf = make_pipeline(PCA(random_state=42), SGDClassifier()) param_grid = {"pca__n_components": np.arange(10, 80)} grid_search = GridSearchCV(clf, param_grid, cv=3) grid_search.fit(X_train[:1000], y_train[:1000]) # In[29]: grid_search.best_params_ # ## PCA for Compression # In[30]: pca = PCA(0.95) X_reduced = pca.fit_transform(X_train, y_train) # In[31]: X_recovered = pca.inverse_transform(X_reduced) # In[32]: # extra code – this cell generates and saves Figure 8–9 plt.figure(figsize=(7, 4)) for idx, X in enumerate((X_train[::2100], X_recovered[::2100])): plt.subplot(1, 2, idx + 1) plt.title(["Original", "Compressed"][idx]) for row in range(5): for col in range(5): plt.imshow(X[row * 5 + col].reshape(28, 28), cmap="binary", vmin=0, vmax=255, extent=(row, row + 1, col, col + 1)) plt.axis([0, 5, 0, 5]) plt.axis("off") save_fig("mnist_compression_plot") # ## Randomized PCA # In[33]: rnd_pca = PCA(n_components=154, svd_solver="randomized", random_state=42) X_reduced = rnd_pca.fit_transform(X_train) # ## Incremental PCA # In[34]: from sklearn.decomposition import IncrementalPCA n_batches = 100 inc_pca = IncrementalPCA(n_components=154) for X_batch in np.array_split(X_train, n_batches): inc_pca.partial_fit(X_batch) X_reduced = inc_pca.transform(X_train) # **Using NumPy's `memmap` class – a memory-map to an array stored in a binary file on disk.** # Let's create the `memmap` instance, copy the MNIST training set into it, and call `flush()` which ensures that any data still in cache is saved to disk. This would typically be done by a first program: # In[35]: filename = "my_mnist.mmap" X_mmap = np.memmap(filename, dtype='float32', mode='write', shape=X_train.shape) X_mmap[:] = X_train # could be a loop instead, saving the data chunk by chunk X_mmap.flush() # Next, another program would load the data and use it for training: # In[36]: X_mmap = np.memmap(filename, dtype="float32", mode="readonly").reshape(-1, 784) batch_size = X_mmap.shape[0] // n_batches inc_pca = IncrementalPCA(n_components=154, batch_size=batch_size) inc_pca.fit(X_mmap) # # Random Projection # **Warning**: this sections will use close to 2.5 GB of RAM. If your computer runs out of memory, just reduce _m_ and _n_: # In[37]: from sklearn.random_projection import johnson_lindenstrauss_min_dim m, ε = 5_000, 0.1 d = johnson_lindenstrauss_min_dim(m, eps=ε) d # In[38]: # extra code – show the equation computed by johnson_lindenstrauss_min_dim d = int(4 * np.log(m) / (ε ** 2 / 2 - ε ** 3 / 3)) d # In[39]: n = 20_000 np.random.seed(42) P = np.random.randn(d, n) / np.sqrt(d) # std dev = square root of variance X = np.random.randn(m, n) # generate a fake dataset X_reduced = X @ P.T # In[40]: from sklearn.random_projection import GaussianRandomProjection gaussian_rnd_proj = GaussianRandomProjection(eps=ε, random_state=42) X_reduced = gaussian_rnd_proj.fit_transform(X) # same result as above # **Warning**, the following cell may take several minutes to run: # In[41]: components_pinv = np.linalg.pinv(gaussian_rnd_proj.components_) X_recovered = X_reduced @ components_pinv.T # In[42]: # extra code – performance comparison between Gaussian and Sparse RP from sklearn.random_projection import SparseRandomProjection print("GaussianRandomProjection fit") get_ipython().run_line_magic('timeit', 'GaussianRandomProjection(random_state=42).fit(X)') print("SparseRandomProjection fit") get_ipython().run_line_magic('timeit', 'SparseRandomProjection(random_state=42).fit(X)') gaussian_rnd_proj = GaussianRandomProjection(random_state=42).fit(X) sparse_rnd_proj = SparseRandomProjection(random_state=42).fit(X) print("GaussianRandomProjection transform") get_ipython().run_line_magic('timeit', 'gaussian_rnd_proj.transform(X)') print("SparseRandomProjection transform") get_ipython().run_line_magic('timeit', 'sparse_rnd_proj.transform(X)') # # LLE # In[43]: from sklearn.datasets import make_swiss_roll from sklearn.manifold import LocallyLinearEmbedding X_swiss, t = make_swiss_roll(n_samples=1000, noise=0.2, random_state=42) lle = LocallyLinearEmbedding(n_components=2, n_neighbors=10, random_state=42) X_unrolled = lle.fit_transform(X_swiss) # In[44]: # extra code – this cell generates and saves Figure 8–10 plt.scatter(X_unrolled[:, 0], X_unrolled[:, 1], c=t, cmap=darker_hot) plt.xlabel("$z_1$") plt.ylabel("$z_2$", rotation=0) plt.axis([-0.055, 0.060, -0.070, 0.090]) plt.grid(True) save_fig("lle_unrolling_plot") plt.title("Unrolled swiss roll using LLE") plt.show() # In[45]: # extra code – shows how well correlated z1 is to t: LLE worked fine plt.title("$z_1$ vs $t$") plt.scatter(X_unrolled[:, 0], t, c=t, cmap=darker_hot) plt.xlabel("$z_1$") plt.ylabel("$t$", rotation=0) plt.grid(True) plt.show() # Note: I added `normalized_stress=False` below to avoid a warning about the fact that the default value for that hyperparameter will change in the future. Please see the [documentation](https://scikit-learn.org/stable/modules/generated/sklearn.manifold.MDS.html) for more details. # In[46]: from sklearn.manifold import MDS mds = MDS(n_components=2, normalized_stress=False, random_state=42) X_reduced_mds = mds.fit_transform(X_swiss) # In[47]: from sklearn.manifold import Isomap isomap = Isomap(n_components=2) X_reduced_isomap = isomap.fit_transform(X_swiss) # In[48]: from sklearn.manifold import TSNE tsne = TSNE(n_components=2, init="random", learning_rate="auto", random_state=42) X_reduced_tsne = tsne.fit_transform(X_swiss) # In[49]: # extra code – this cell generates and saves Figure 8–11 titles = ["MDS", "Isomap", "t-SNE"] plt.figure(figsize=(11, 4)) for subplot, title, X_reduced in zip((131, 132, 133), titles, (X_reduced_mds, X_reduced_isomap, X_reduced_tsne)): plt.subplot(subplot) plt.title(title) plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=t, cmap=darker_hot) plt.xlabel("$z_1$") if subplot == 131: plt.ylabel("$z_2$", rotation=0) plt.grid(True) save_fig("other_dim_reduction_plot") plt.show() # # Extra Material – Kernel PCA # In[50]: from sklearn.decomposition import KernelPCA rbf_pca = KernelPCA(n_components=2, kernel="rbf", gamma=0.04, random_state=42) X_reduced = rbf_pca.fit_transform(X_swiss) # In[51]: lin_pca = KernelPCA(kernel="linear") rbf_pca = KernelPCA(kernel="rbf", gamma=0.002) sig_pca = KernelPCA(kernel="sigmoid", gamma=0.002, coef0=1) kernel_pcas = ((lin_pca, "Linear kernel"), (rbf_pca, rf"RBF kernel, $\gamma={rbf_pca.gamma}$"), (sig_pca, rf"Sigmoid kernel, $\gamma={sig_pca.gamma}, r={sig_pca.coef0}$")) plt.figure(figsize=(11, 3.5)) for idx, (kpca, title) in enumerate(kernel_pcas): kpca.n_components = 2 kpca.random_state = 42 X_reduced = kpca.fit_transform(X_swiss) plt.subplot(1, 3, idx + 1) plt.title(title) plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=t, cmap=darker_hot) plt.xlabel("$z_1$") if idx == 0: plt.ylabel("$z_2$", rotation=0) plt.grid() plt.show() # # Exercise solutions # ## 1. to 8. # 1. The main motivations for dimensionality reduction are: # * To speed up a subsequent training algorithm (in some cases it may even remove noise and redundant features, making the training algorithm perform better) # * To visualize the data and gain insights on the most important features # * To save space (compression) # # The main drawbacks are: # * Some information is lost, possibly degrading the performance of subsequent training algorithms. # * It can be computationally intensive. # * It adds some complexity to your Machine Learning pipelines. # * Transformed features are often hard to interpret. # 2. The curse of dimensionality refers to the fact that many problems that do not exist in low-dimensional space arise in high-dimensional space. In Machine Learning, one common manifestation is the fact that randomly sampled high-dimensional vectors are generally far from one another, increasing the risk of overfitting and making it very difficult to identify patterns without having plenty of training data. # 3. Once a dataset's dimensionality has been reduced using one of the algorithms we discussed, it is almost always impossible to perfectly reverse the operation, because some information gets lost during dimensionality reduction. Moreover, while some algorithms (such as PCA) have a simple reverse transformation procedure that can reconstruct a dataset relatively similar to the original, other algorithms (such as t-SNE) do not. # 4. PCA can be used to significantly reduce the dimensionality of most datasets, even if they are highly nonlinear, because it can at least get rid of useless dimensions. However, if there are no useless dimensions—as in the Swiss roll dataset—then reducing dimensionality with PCA will lose too much information. You want to unroll the Swiss roll, not squash it. # 5. That's a trick question: it depends on the dataset. Let's look at two extreme examples. First, suppose the dataset is composed of points that are almost perfectly aligned. In this case, PCA can reduce the dataset down to just one dimension while still preserving 95% of the variance. Now imagine that the dataset is composed of perfectly random points, scattered all around the 1,000 dimensions. In this case roughly 950 dimensions are required to preserve 95% of the variance. So the answer is, it depends on the dataset, and it could be any number between 1 and 950. Plotting the explained variance as a function of the number of dimensions is one way to get a rough idea of the dataset's intrinsic dimensionality. # 6. Regular PCA is the default, but it works only if the dataset fits in memory. Incremental PCA is useful for large datasets that don't fit in memory, but it is slower than regular PCA, so if the dataset fits in memory you should prefer regular PCA. Incremental PCA is also useful for online tasks, when you need to apply PCA on the fly, every time a new instance arrives. Randomized PCA is useful when you want to considerably reduce dimensionality and the dataset fits in memory; in this case, it is much faster than regular PCA. Finally, Random Projection is great for very high-dimensional datasets. # 7. Intuitively, a dimensionality reduction algorithm performs well if it eliminates a lot of dimensions from the dataset without losing too much information. One way to measure this is to apply the reverse transformation and measure the reconstruction error. However, not all dimensionality reduction algorithms provide a reverse transformation. Alternatively, if you are using dimensionality reduction as a preprocessing step before another Machine Learning algorithm (e.g., a Random Forest classifier), then you can simply measure the performance of that second algorithm; if dimensionality reduction did not lose too much information, then the algorithm should perform just as well as when using the original dataset. # 8. It can absolutely make sense to chain two different dimensionality reduction algorithms. A common example is using PCA or Random Projection to quickly get rid of a large number of useless dimensions, then applying another much slower dimensionality reduction algorithm, such as LLE. This two-step approach will likely yield roughly the same performance as using LLE only, but in a fraction of the time. # ## 9. # Exercise: _Load the MNIST dataset (introduced in chapter 3) and split it into a training set and a test set (take the first 60,000 instances for training, and the remaining 10,000 for testing)._ # The MNIST dataset was loaded earlier. # In[52]: X_train = mnist.data[:60000] y_train = mnist.target[:60000] X_test = mnist.data[60000:] y_test = mnist.target[60000:] # Exercise: _Train a Random Forest classifier on the dataset and time how long it takes, then evaluate the resulting model on the test set._ # In[53]: rnd_clf = RandomForestClassifier(n_estimators=100, random_state=42) # In[54]: get_ipython().run_line_magic('time', 'rnd_clf.fit(X_train, y_train)') # In[55]: from sklearn.metrics import accuracy_score y_pred = rnd_clf.predict(X_test) accuracy_score(y_test, y_pred) # Exercise: _Next, use PCA to reduce the dataset's dimensionality, with an explained variance ratio of 95%._ # In[56]: from sklearn.decomposition import PCA pca = PCA(n_components=0.95) X_train_reduced = pca.fit_transform(X_train) # Exercise: _Train a new Random Forest classifier on the reduced dataset and see how long it takes. Was training much faster?_ # In[57]: rnd_clf_with_pca = RandomForestClassifier(n_estimators=100, random_state=42) get_ipython().run_line_magic('time', 'rnd_clf_with_pca.fit(X_train_reduced, y_train)') # Oh no! Training is actually about twice slower now! How can that be? Well, as we saw in this chapter, dimensionality reduction does not always lead to faster training time: it depends on the dataset, the model and the training algorithm. See figure 8-6 (the `manifold_decision_boundary_plot*` plots above). If you try `SGDClassifier` instead of `RandomForestClassifier`, you will find that training time is reduced by a factor of 3 when using PCA. Actually, we will do this in a second, but first let's check the precision of the new random forest classifier. # Exercise: _Next evaluate the classifier on the test set: how does it compare to the previous classifier?_ # In[58]: X_test_reduced = pca.transform(X_test) y_pred = rnd_clf_with_pca.predict(X_test_reduced) accuracy_score(y_test, y_pred) # It is common for performance to drop slightly when reducing dimensionality, because we do lose some potentially useful signal in the process. However, the performance drop is rather severe in this case. So PCA really did not help: it slowed down training *and* reduced performance. 😭 # Exercise: _Try again with an `SGDClassifier`. How much does PCA help now?_ # In[59]: from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier(random_state=42) get_ipython().run_line_magic('time', 'sgd_clf.fit(X_train, y_train)') # In[60]: y_pred = sgd_clf.predict(X_test) accuracy_score(y_test, y_pred) # Okay, so the `SGDClassifier` takes much longer to train on this dataset than the `RandomForestClassifier`, plus it performs worse on the test set. But that's not what we are interested in right now, we want to see how much PCA can help `SGDClassifier`. Let's train it using the reduced dataset: # In[61]: sgd_clf_with_pca = SGDClassifier(random_state=42) get_ipython().run_line_magic('time', 'sgd_clf_with_pca.fit(X_train_reduced, y_train)') # Nice! Reducing dimensionality led to roughly 5× speedup. :) Let's check the model's accuracy: # In[62]: y_pred = sgd_clf_with_pca.predict(X_test_reduced) accuracy_score(y_test, y_pred) # Great! PCA not only gave us a 5× speed boost, it also improved performance slightly. # So there you have it: PCA can give you a formidable speedup, and if you're lucky a performance boost... but it's really not guaranteed: it depends on the model and the dataset! # ## 10. # Exercise: _Use t-SNE to reduce the first 5,000 images of the MNIST dataset down to two dimensions and plot the result using Matplotlib. You can use a scatterplot using 10 different colors to represent each image's target class._ # Let's limit ourselves to the first 5,000 images of the MNIST training set, to speed things up a lot. # In[63]: X_sample, y_sample = X_train[:5000], y_train[:5000] # Let's use t-SNE to reduce dimensionality down to 2D so we can plot the dataset: # In[64]: from sklearn.manifold import TSNE tsne = TSNE(n_components=2, init="random", learning_rate="auto", random_state=42) get_ipython().run_line_magic('time', 'X_reduced = tsne.fit_transform(X_sample)') # Now let's use Matplotlib's `scatter()` function to plot a scatterplot, using a different color for each digit: # In[65]: plt.figure(figsize=(13, 10)) plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y_sample.astype(np.int8), cmap="jet", alpha=0.5) plt.axis('off') plt.colorbar() plt.show() # Isn't this just beautiful? :) Most digits are nicely separated from the others, even though t-SNE wasn't given the targets: it just identified clusters of similar images. But there is still a bit of overlap. For example, the 3s and the 5s overlap a lot (on the right side of the plot), and so do the 4s and the 9s (in the top-right corner). # Let's focus on just the digits 4 and 9: # In[66]: plt.figure(figsize=(9, 9)) cmap = plt.cm.jet for digit in ('4', '9'): plt.scatter(X_reduced[y_sample == digit, 0], X_reduced[y_sample == digit, 1], c=[cmap(float(digit) / 9)], alpha=0.5) plt.axis('off') plt.show() # Let's see if we can produce a nicer image by running t-SNE on just these 2 digits: # In[67]: idx = (y_sample == '4') | (y_sample == '9') X_subset = X_sample[idx] y_subset = y_sample[idx] tsne_subset = TSNE(n_components=2, init="random", learning_rate="auto", random_state=42) X_subset_reduced = tsne_subset.fit_transform(X_subset) # In[68]: plt.figure(figsize=(9, 9)) for digit in ('4', '9'): plt.scatter(X_subset_reduced[y_subset == digit, 0], X_subset_reduced[y_subset == digit, 1], c=[cmap(float(digit) / 9)], alpha=0.5) plt.axis('off') plt.show() # That's much better, although there's still a bit of overlap. Perhaps some 4s really do look like 9s, and vice versa. It would be nice if we could visualize a few digits from each region of this plot, to understand what's going on. In fact, let's do that now. # Exercise: _Alternatively, you can replace each dot in the scatterplot with the corresponding instance’s class (a digit from 0 to 9), or even plot scaled-down versions of the digit images themselves (if you plot all digits, the visualization will be too cluttered, so you should either draw a random sample or plot an instance only if no other instance has already been plotted at a close distance). You should get a nice visualization with well-separated clusters of digits._ # Let's create a `plot_digits()` function that will draw a scatterplot (similar to the above scatterplots) plus write colored digits, with a minimum distance guaranteed between these digits. If the digit images are provided, they are plotted instead. This implementation was inspired from one of Scikit-Learn's excellent examples ([plot_lle_digits](https://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html), based on a different digit dataset). # In[69]: from sklearn.preprocessing import MinMaxScaler from matplotlib.offsetbox import AnnotationBbox, OffsetImage def plot_digits(X, y, min_distance=0.04, images=None, figsize=(13, 10)): # Let's scale the input features so that they range from 0 to 1 X_normalized = MinMaxScaler().fit_transform(X) # Now we create the list of coordinates of the digits plotted so far. # We pretend that one is already plotted far away at the start, to # avoid `if` statements in the loop below neighbors = np.array([[10., 10.]]) # The rest should be self-explanatory plt.figure(figsize=figsize) cmap = plt.cm.jet digits = np.unique(y) for digit in digits: plt.scatter(X_normalized[y == digit, 0], X_normalized[y == digit, 1], c=[cmap(float(digit) / 9)], alpha=0.5) plt.axis("off") ax = plt.gca() # get current axes for index, image_coord in enumerate(X_normalized): closest_distance = np.linalg.norm(neighbors - image_coord, axis=1).min() if closest_distance > min_distance: neighbors = np.r_[neighbors, [image_coord]] if images is None: plt.text(image_coord[0], image_coord[1], str(int(y[index])), color=cmap(float(y[index]) / 9), fontdict={"weight": "bold", "size": 16}) else: image = images[index].reshape(28, 28) imagebox = AnnotationBbox(OffsetImage(image, cmap="binary"), image_coord) ax.add_artist(imagebox) # Let's try it! First let's show colored digits (not images), for all 5,000 images: # In[70]: plot_digits(X_reduced, y_sample) # Well that's okay, but not that beautiful. Let's try with the digit images: # In[71]: plot_digits(X_reduced, y_sample, images=X_sample, figsize=(35, 25)) # That's nicer! Now let's focus on just the 3s and the 5s: # In[72]: plot_digits(X_subset_reduced, y_subset, images=X_subset, figsize=(22, 22)) # Notice how similar-looking 4s are grouped together. For example, the 4s get more and more inclined as they approach the top of the figure. The inclined 9s are also closer to the top. Some 4s really do look like 9s, and vice versa. # Exercise: _Try using other dimensionality reduction algorithms such as PCA, LLE, or MDS and compare the resulting visualizations._ # Let's start with PCA. We will also time how long it takes: # In[73]: pca = PCA(n_components=2, random_state=42) get_ipython().run_line_magic('time', 'X_pca_reduced = pca.fit_transform(X_sample)') plot_digits(X_pca_reduced, y_sample) plt.show() # Wow, PCA is blazingly fast! But although we do see a few clusters, there's way too much overlap. Let's try LLE: # In[74]: lle = LocallyLinearEmbedding(n_components=2, random_state=42) get_ipython().run_line_magic('time', 'X_lle_reduced = lle.fit_transform(X_sample)') plot_digits(X_lle_reduced, y_sample) plt.show() # That took more time, and yet the result does not look good at all. Let's see what happens if we apply PCA first, preserving 95% of the variance: # In[75]: pca_lle = make_pipeline(PCA(n_components=0.95), LocallyLinearEmbedding(n_components=2, random_state=42)) get_ipython().run_line_magic('time', 'X_pca_lle_reduced = pca_lle.fit_transform(X_sample)') plot_digits(X_pca_lle_reduced, y_sample) plt.show() # The result is more or less as bad, but this time training was a bit faster. # Let's try MDS: # **Warning**, the following cell will take about 10-30 minutes to run, depending on your hardware: # In[76]: get_ipython().run_line_magic('time', 'X_mds_reduced = MDS(n_components=2, normalized_stress=False, random_state=42).fit_transform(X_sample)') plot_digits(X_mds_reduced, y_sample) plt.show() # Meh. This does not look great, all clusters overlap too much. Let's try with PCA first, perhaps it will be faster? # **Warning**, the following cell will take about 10-30 minutes to run, depending on your hardware: # In[77]: pca_mds = make_pipeline( PCA(n_components=0.95, random_state=42), MDS(n_components=2, normalized_stress=False, random_state=42) ) get_ipython().run_line_magic('time', 'X_pca_mds_reduced = pca_mds.fit_transform(X_sample)') plot_digits(X_pca_mds_reduced, y_sample) plt.show() # Same result, and not faster: PCA did not help in this case. # Let's try LDA now: # In[78]: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis lda = LinearDiscriminantAnalysis(n_components=2) get_ipython().run_line_magic('time', 'X_lda_reduced = lda.fit_transform(X_sample, y_sample)') plot_digits(X_lda_reduced, y_sample, figsize=(12, 12)) plt.show() # This one is very fast, and it looks nice at first, until you realize that several clusters overlap severely. # Well, it's pretty clear that t-SNE won this little competition, wouldn't you agree? # And that's all for today, I hope you enjoyed this chapter! # In[ ]: