Week 1, Day 4: Dimensionality Reduction
By Neuromatch Academy
Content creators: Alex Cayco Gajic, John Murray
Content reviewers: Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom, Siddharth Suresh, Natalie Schaworonkow, Ella Batty
Production editors: Spiros Chavlis
Estimated timing of tutorial: 50 minutes
In this notebook we'll learn to apply PCA for dimensionality reduction, using a classic dataset that is often used to benchmark machine learning algorithms: MNIST. We'll also learn how to use PCA for reconstruction and denoising.
Overview:
You can learn more about the MNIST dataset here.
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
link_id = "kaq2x"
print(f"If you want to download the slides: https://osf.io/download/{link_id}/")
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/{link_id}/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# @title Install and import feedback gadget
!pip3 install vibecheck datatops --quiet
from vibecheck import DatatopsContentReviewContainer
def content_review(notebook_section: str):
return DatatopsContentReviewContainer(
"", # No text prompt
notebook_section,
{
"url": "https://pmyvdlilci.execute-api.us-east-1.amazonaws.com/klab",
"name": "neuromatch_cn",
"user_key": "y1x3mpx5",
},
).render()
feedback_prefix = "W1D4_T3"
# Imports
import numpy as np
import matplotlib.pyplot as plt
# @title Figure Settings
import logging
logging.getLogger('matplotlib.font_manager').disabled = True
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/main/nma.mplstyle")
# @title Plotting Functions
def plot_variance_explained(variance_explained):
"""
Plots eigenvalues.
Args:
variance_explained (numpy array of floats) : Vector of variance explained
for each PC
Returns:
Nothing.
"""
plt.figure()
plt.plot(np.arange(1, len(variance_explained) + 1), variance_explained,
'--k')
plt.xlabel('Number of components')
plt.ylabel('Variance explained')
plt.show()
def plot_MNIST_reconstruction(X, X_reconstructed, keep_dims):
"""
Plots 9 images in the MNIST dataset side-by-side with the reconstructed
images.
Args:
X (numpy array of floats) : Data matrix each column
corresponds to a different
random variable
X_reconstructed (numpy array of floats) : Data matrix each column
corresponds to a different
random variable
keep_dims (int) : Dimensions to keep
Returns:
Nothing.
"""
plt.figure()
ax = plt.subplot(121)
k = 0
for k1 in range(3):
for k2 in range(3):
k = k + 1
plt.imshow(np.reshape(X[k, :], (28, 28)),
extent=[(k1 + 1) * 28, k1 * 28, (k2 + 1) * 28, k2 * 28],
vmin=0, vmax=255)
plt.xlim((3 * 28, 0))
plt.ylim((3 * 28, 0))
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False)
ax.set_xticks([])
ax.set_yticks([])
plt.title('Data')
plt.clim([0, 250])
ax = plt.subplot(122)
k = 0
for k1 in range(3):
for k2 in range(3):
k = k + 1
plt.imshow(np.reshape(np.real(X_reconstructed[k, :]), (28, 28)),
extent=[(k1 + 1) * 28, k1 * 28, (k2 + 1) * 28, k2 * 28],
vmin=0, vmax=255)
plt.xlim((3 * 28, 0))
plt.ylim((3 * 28, 0))
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False)
ax.set_xticks([])
ax.set_yticks([])
plt.clim([0, 250])
plt.title(f'Reconstructed K: {keep_dims}')
plt.tight_layout()
plt.show()
def plot_MNIST_sample(X):
"""
Plots 9 images in the MNIST dataset.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
Returns:
Nothing.
"""
fig, ax = plt.subplots()
k = 0
for k1 in range(3):
for k2 in range(3):
k = k + 1
plt.imshow(np.reshape(X[k, :], (28, 28)),
extent=[(k1 + 1) * 28, k1 * 28, (k2+1) * 28, k2 * 28],
vmin=0, vmax=255)
plt.xlim((3 * 28, 0))
plt.ylim((3 * 28, 0))
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False)
plt.clim([0, 250])
ax.set_xticks([])
ax.set_yticks([])
plt.show()
def plot_MNIST_weights(weights):
"""
Visualize PCA basis vector weights for MNIST. Red = positive weights,
blue = negative weights, white = zero weight.
Args:
weights (numpy array of floats) : PCA basis vector
Returns:
Nothing.
"""
fig, ax = plt.subplots()
plt.imshow(np.real(np.reshape(weights, (28, 28))), cmap='seismic')
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False)
plt.clim(-.15, .15)
plt.colorbar(ticks=[-.15, -.1, -.05, 0, .05, .1, .15])
ax.set_xticks([])
ax.set_yticks([])
plt.show()
def plot_eigenvalues(evals, xlimit=False):
"""
Plots eigenvalues.
Args:
(numpy array of floats) : Vector of eigenvalues
(boolean) : enable plt.show()
Returns:
Nothing.
"""
plt.figure()
plt.plot(np.arange(1, len(evals) + 1), evals, 'o-k')
plt.xlabel('Component')
plt.ylabel('Eigenvalue')
plt.title('Scree plot')
if xlimit:
plt.xlim([0, 100]) # limit x-axis up to 100 for zooming
plt.show()
# @title Helper Functions
def add_noise(X, frac_noisy_pixels):
"""
Randomly corrupts a fraction of the pixels by setting them to random values.
Args:
X (numpy array of floats) : Data matrix
frac_noisy_pixels (scalar) : Fraction of noisy pixels
Returns:
(numpy array of floats) : Data matrix + noise
"""
X_noisy = np.reshape(X, (X.shape[0] * X.shape[1]))
N_noise_ixs = int(X_noisy.shape[0] * frac_noisy_pixels)
noise_ixs = np.random.choice(X_noisy.shape[0], size=N_noise_ixs,
replace=False)
X_noisy[noise_ixs] = np.random.uniform(0, 255, noise_ixs.shape)
X_noisy = np.reshape(X_noisy, (X.shape[0], X.shape[1]))
return X_noisy
def change_of_basis(X, W):
"""
Projects data onto a new basis.
Args:
X (numpy array of floats) : Data matrix each column corresponding to a
different random variable
W (numpy array of floats) : new orthonormal basis columns correspond to
basis vectors
Returns:
(numpy array of floats) : Data matrix expressed in new basis
"""
Y = np.matmul(X, W)
return Y
def get_sample_cov_matrix(X):
"""
Returns the sample covariance matrix of data X.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
Returns:
(numpy array of floats) : Covariance matrix
"""
X = X - np.mean(X, 0)
cov_matrix = 1 / X.shape[0] * np.matmul(X.T, X)
return cov_matrix
def sort_evals_descending(evals, evectors):
"""
Sorts eigenvalues and eigenvectors in decreasing order. Also aligns first two
eigenvectors to be in first two quadrants (if 2D).
Args:
evals (numpy array of floats) : Vector of eigenvalues
evectors (numpy array of floats) : Corresponding matrix of eigenvectors
each column corresponds to a different
eigenvalue
Returns:
(numpy array of floats) : Vector of eigenvalues after sorting
(numpy array of floats) : Matrix of eigenvectors after sorting
"""
index = np.flip(np.argsort(evals))
evals = evals[index]
evectors = evectors[:, index]
if evals.shape[0] == 2:
if np.arccos(np.matmul(evectors[:, 0],
1 / np.sqrt(2) * np.array([1, 1]))) > np.pi / 2:
evectors[:, 0] = -evectors[:, 0]
if np.arccos(np.matmul(evectors[:, 1],
1 / np.sqrt(2)*np.array([-1, 1]))) > np.pi / 2:
evectors[:, 1] = -evectors[:, 1]
return evals, evectors
def pca(X):
"""
Performs PCA on multivariate data. Eigenvalues are sorted in decreasing order
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
Returns:
(numpy array of floats) : Data projected onto the new basis
(numpy array of floats) : Corresponding matrix of eigenvectors
(numpy array of floats) : Vector of eigenvalues
"""
X = X - np.mean(X, 0)
cov_matrix = get_sample_cov_matrix(X)
evals, evectors = np.linalg.eigh(cov_matrix)
evals, evectors = sort_evals_descending(evals, evectors)
score = change_of_basis(X, evectors)
return score, evectors, evals
# @title Video 1: PCA for dimensionality reduction
from ipywidgets import widgets
from IPython.display import YouTubeVideo
from IPython.display import IFrame
from IPython.display import display
class PlayVideo(IFrame):
def __init__(self, id, source, page=1, width=400, height=300, **kwargs):
self.id = id
if source == 'Bilibili':
src = f'https://player.bilibili.com/player.html?bvid={id}&page={page}'
elif source == 'Osf':
src = f'https://mfr.ca-1.osf.io/render?url=https://osf.io/download/{id}/?direct%26mode=render'
super(PlayVideo, self).__init__(src, width, height, **kwargs)
def display_videos(video_ids, W=400, H=300, fs=1):
tab_contents = []
for i, video_id in enumerate(video_ids):
out = widgets.Output()
with out:
if video_ids[i][0] == 'Youtube':
video = YouTubeVideo(id=video_ids[i][1], width=W,
height=H, fs=fs, rel=0)
print(f'Video available at https://youtube.com/watch?v={video.id}')
else:
video = PlayVideo(id=video_ids[i][1], source=video_ids[i][0], width=W,
height=H, fs=fs, autoplay=False)
if video_ids[i][0] == 'Bilibili':
print(f'Video available at https://www.bilibili.com/video/{video.id}')
elif video_ids[i][0] == 'Osf':
print(f'Video available at https://osf.io/{video.id}')
display(video)
tab_contents.append(out)
return tab_contents
video_ids = [('Youtube', 'oO0bbInoO_0'), ('Bilibili', 'BV1up4y1S7xs')]
tab_contents = display_videos(video_ids, W=854, H=480)
tabs = widgets.Tab()
tabs.children = tab_contents
for i in range(len(tab_contents)):
tabs.set_title(i, video_ids[i][0])
display(tabs)
# @title Submit your feedback
content_review(f"{feedback_prefix}_PCA_for_dimensionality_reduction_Video")
The MNIST dataset consists of 70,000 images of individual handwritten digits. Each image is a 28x28 pixel grayscale image. For convenience, each 28x28 pixel image is often unravelled into a single 784 (=28x28) element vector, so that the whole dataset is represented as a 70,000 x 784 matrix. Each row represents a different image, and each column represents a different pixel.
Enter the following cell to load the MNIST dataset and plot the first nine images. It may take a few minutes to load.
from sklearn.datasets import fetch_openml
# GET mnist data
mnist = fetch_openml(name='mnist_784', as_frame=False, parser='auto')
X = mnist.data
# Visualize
plot_MNIST_sample(X)
The MNIST dataset has an extrinsic dimensionality of 784, much higher than the 2-dimensional examples used in the previous tutorials! To make sense of this data, we'll use dimensionality reduction. But first, we need to determine the intrinsic dimensionality $K$ of the data. One way to do this is to look for an "elbow" in the scree plot, to determine which eigenvalues are significant.
In this exercise you will examine the scree plot in the MNIST dataset.
Steps:
pca
from tutorial 2 (already loaded in) and examine the scree plot.plt.xlim
to zoom into a section of the plot).help(pca)
#################################################
## TODO for students
# Fill out function and remove
raise NotImplementedError("Student exercise: perform PCA and visualize scree plot")
#################################################
# Perform PCA
score, evectors, evals = ...
# Plot the eigenvalues
plot_eigenvalues(evals, xlimit=True) # limit x-axis up to 100 for zooming
# to_remove solution
# Perform PCA
score, evectors, evals = pca(X)
# Plot the eigenvalues
with plt.xkcd():
plot_eigenvalues(evals, xlimit=True) # limit x-axis up to 100 for zooming
# @title Submit your feedback
content_review(f"{feedback_prefix}_Scree_plot_of_MNIST_Exercise")
Estimated timing to here from start of tutorial: 15 min
The scree plot suggests that most of the eigenvalues are near zero, with fewer than 100 having large values. Another common way to determine the intrinsic dimensionality is by considering the variance explained. This can be examined with a cumulative plot of the fraction of the total variance explained by the top $K$ components, i.e.,
\begin{equation} \text{var explained} = \frac{\sum_{i=1}^K \lambda_i}{\sum_{i=1}^N \lambda_i} \end{equation}where $\lambda_i$ is the $i^{th}$ eigenvalue and $N$ is the total number of components (the original number of dimensions in the data).
The intrinsic dimensionality is often quantified by the $K$ necessary to explain a large proportion of the total variance of the data (often a defined threshold, e.g., 90%).
In this exercise you will plot the explained variance.
Steps:
np.cumsum
.plot_variance_explained
.Questions:
def get_variance_explained(evals):
"""
Calculates variance explained from the eigenvalues.
Args:
evals (numpy array of floats) : Vector of eigenvalues
Returns:
(numpy array of floats) : Vector of variance explained
"""
#################################################
## TO DO for students: calculate the explained variance using the equation
## from Section 2.
# Comment once you've filled in the function
raise NotImplementedError("Student exercise: calculate explain variance!")
#################################################
# Cumulatively sum the eigenvalues
csum = ...
# Normalize by the sum of eigenvalues
variance_explained = ...
return variance_explained
# Calculate the variance explained
variance_explained = get_variance_explained(evals)
# Visualize
plot_variance_explained(variance_explained)
# to_remove solution
def get_variance_explained(evals):
"""
Plots eigenvalues.
Args:
(numpy array of floats) : Vector of eigenvalues
Returns:
Nothing.
"""
# Cumulatively sum the eigenvalues
csum = np.cumsum(evals)
# Normalize by the sum of eigenvalues
variance_explained = csum / np.sum(evals)
return variance_explained
# Calculate the variance explained
variance_explained = get_variance_explained(evals)
# Visualize
with plt.xkcd():
plot_variance_explained(variance_explained)
# @title Submit your feedback
content_review(f"{feedback_prefix}_Plot_the_explained_variance_Exercise")
Estimated timing to here from start of tutorial: 25 min
# @title Video 2: Data Reconstruction
from ipywidgets import widgets
from IPython.display import YouTubeVideo
from IPython.display import IFrame
from IPython.display import display
class PlayVideo(IFrame):
def __init__(self, id, source, page=1, width=400, height=300, **kwargs):
self.id = id
if source == 'Bilibili':
src = f'https://player.bilibili.com/player.html?bvid={id}&page={page}'
elif source == 'Osf':
src = f'https://mfr.ca-1.osf.io/render?url=https://osf.io/download/{id}/?direct%26mode=render'
super(PlayVideo, self).__init__(src, width, height, **kwargs)
def display_videos(video_ids, W=400, H=300, fs=1):
tab_contents = []
for i, video_id in enumerate(video_ids):
out = widgets.Output()
with out:
if video_ids[i][0] == 'Youtube':
video = YouTubeVideo(id=video_ids[i][1], width=W,
height=H, fs=fs, rel=0)
print(f'Video available at https://youtube.com/watch?v={video.id}')
else:
video = PlayVideo(id=video_ids[i][1], source=video_ids[i][0], width=W,
height=H, fs=fs, autoplay=False)
if video_ids[i][0] == 'Bilibili':
print(f'Video available at https://www.bilibili.com/video/{video.id}')
elif video_ids[i][0] == 'Osf':
print(f'Video available at https://osf.io/{video.id}')
display(video)
tab_contents.append(out)
return tab_contents
video_ids = [('Youtube', 'ZCUhW26AdBQ'), ('Bilibili', 'BV1XK4y1s7KF')]
tab_contents = display_videos(video_ids, W=854, H=480)
tabs = widgets.Tab()
tabs.children = tab_contents
for i in range(len(tab_contents)):
tabs.set_title(i, video_ids[i][0])
display(tabs)
# @title Submit your feedback
content_review(f"{feedback_prefix}_Data_reconstruction_Video")
Now we have seen that the top 100 or so principal components of the data can explain most of the variance. We can use this fact to perform dimensionality reduction, i.e., by storing the data using only 100 components rather than the samples of all 784 pixels. Remarkably, we will be able to reconstruct much of the structure of the data using only the top 100 components. To see this, recall that to perform PCA we projected the data $\bf X$ onto the eigenvectors of the covariance matrix:
\begin{equation} \bf S = X W \end{equation}Since $\bf W$ is an orthogonal matrix, ${\bf W}^{-1} = {\bf W}^\top$. So by multiplying by ${\bf W}^\top$ on each side we can rewrite this equation as
\begin{equation} {\bf X = S W}^\top. \end{equation}This now gives us a way to reconstruct the data matrix from the scores and loadings. To reconstruct the data from a low-dimensional approximation, we just have to truncate these matrices. Let's denote ${\bf S}_{1:K}$ and ${\bf W}_{1:K}$ the matrices with only the first $K$ columns of $\bf S$ and $\bf W$, respectively. Then our reconstruction is:
\begin{equation} {\bf \hat X = S}_{1:K} ({\bf W}_{1:K})^\top. \end{equation}Fill in the function below to reconstruct the data using different numbers of principal components.
Steps:
def reconstruct_data(score, evectors, X_mean, K):
"""
Reconstruct the data based on the top K components.
Args:
score (numpy array of floats) : Score matrix
evectors (numpy array of floats) : Matrix of eigenvectors
X_mean (numpy array of floats) : Vector corresponding to data mean
K (scalar) : Number of components to include
Returns:
(numpy array of floats) : Matrix of reconstructed data
"""
#################################################
## TO DO for students: Reconstruct the original data in X_reconstructed
# Comment once you've filled in the function
raise NotImplementedError("Student exercise: reconstructing data function!")
#################################################
# Reconstruct the data from the score and eigenvectors
# Don't forget to add the mean!!
X_reconstructed = ...
return X_reconstructed
K = 784 # data dimensions
# Reconstruct the data based on all components
X_mean = np.mean(X, 0)
X_reconstructed = reconstruct_data(score, evectors, X_mean, K)
# Plot the data and reconstruction
plot_MNIST_reconstruction(X, X_reconstructed, K)
# to_remove solution
def reconstruct_data(score, evectors, X_mean, K):
"""
Reconstruct the data based on the top K components.
Args:
score (numpy array of floats) : Score matrix
evectors (numpy array of floats) : Matrix of eigenvectors
X_mean (numpy array of floats) : Vector corresponding to data mean
K (scalar) : Number of components to include
Returns:
(numpy array of floats) : Matrix of reconstructed data
"""
# Reconstruct the data from the score and eigenvectors
# Don't forget to add the mean!!
X_reconstructed = np.matmul(score[:, :K], evectors[:, :K].T) + X_mean
return X_reconstructed
K = 784 # data dimensions
# Reconstruct the data based on all components
X_mean = np.mean(X, 0)
X_reconstructed = reconstruct_data(score, evectors, X_mean, K)
# Plot the data and reconstruction
with plt.xkcd():
plot_MNIST_reconstruction(X, X_reconstructed, K)
# @title Submit your feedback
content_review(f"{feedback_prefix}_Data_reconstruction_Exercise")
Now run the code below and experiment with the slider to reconstruct the data matrix using different numbers of principal components.
# @markdown Make sure you execute this cell to enable the widget!
def refresh(K=100):
X_reconstructed = reconstruct_data(score, evectors, X_mean, K)
plot_MNIST_reconstruction(X, X_reconstructed, K)
_ = widgets.interact(refresh, K=(1, 784, 10))
# to_remove explanation
"""
1) By around 91 components, they're pretty clear and similar to the original data.
This means that the intrinsic dimensionality of the data is less than 100.
2) You can't see too much but you seem to be able to partially distinguish ones or other numbers with
a vertical straight line (like the 9) from the rest, which resembles 0 in the reconstruction. You could
probably usually distinguish 0s from 1s.
""";
# @title Submit your feedback
content_review(f"{feedback_prefix}_Reconstruct_the_data_matrix_using_different_numbers_of_PCs_Interactive_Demo_and_discussion")
Next, let's take a closer look at the first principal component by visualizing its corresponding weights.
Steps:
plot_MNIST_weights
to visualize the weights of the first basis vector.help(plot_MNIST_weights)
################################################################
# Comment once you've filled in the function
raise NotImplementedError("Student exercise: visualize PCA components")
################################################################
# Plot the weights of the first principal component
plot_MNIST_weights(...)
# to_remove solution
# Plot the weights of the first principal component
with plt.xkcd():
plot_MNIST_weights(evectors[:, 0])
# @title Submit your feedback
content_review(f"{feedback_prefix}_Visualization_of_the_weights_Exercise")
Estimated timing of tutorial: 50 minutes
In this lecture, we saw that PCA finds an optimal low-dimensional basis to minimize the reconstruction error. Because of this property, PCA can be useful for denoising corrupted samples of the data.
In this exercise you will add salt-and-pepper noise to the original data and see how that affects the eigenvalues.
Steps:
add_noise
to add noise to 20% of the pixels.help(add_noise)
#################################################
## TO DO for students
# Comment once you've filled in the function
raise NotImplementedError("Student exercise: make MNIST noisy and compute PCA!")
#################################################
np.random.seed(2020) # set random seed
# Add noise to data
X_noisy = ...
# Perform PCA on noisy data
score_noisy, evectors_noisy, evals_noisy = ...
# Compute variance explained
variance_explained_noisy = ...
# Visualize
plot_MNIST_sample(X_noisy)
plot_variance_explained(variance_explained_noisy)
# to_remove solution
np.random.seed(2020) # set random seed
# Add noise to data
X_noisy = add_noise(X, .2)
# Perform PCA on noisy data
score_noisy, evectors_noisy, evals_noisy = pca(X_noisy)
# Compute variance explained
variance_explained_noisy = get_variance_explained(evals_noisy)
# Visualize
with plt.xkcd():
plot_MNIST_sample(X_noisy)
plot_variance_explained(variance_explained_noisy)
# @title Submit your feedback
content_review(f"{feedback_prefix}_Add_noise_to_the_data_Bonus_Exercise")
Next, use PCA to perform denoising by projecting the noise-corrupted data onto the basis vectors found from the original dataset. By taking the top K components of this projection, we can reduce noise in dimensions orthogonal to the K-dimensional latent space.
Steps:
evectors
, not evectors_noisy
) and take the top $K$ components.#################################################
## TO DO for students
# Comment once you've filled in the function
raise NotImplementedError("Student exercise: reconstruct noisy data from PCA")
#################################################
np.random.seed(2020) # set random seed
# Add noise to data
X_noisy = ...
# Compute mean of noise-corrupted data
X_noisy_mean = ...
# Project onto the original basis vectors
projX_noisy = ...
# Reconstruct the data using the top 50 components
K = 50
X_reconstructed = ...
# Visualize
plot_MNIST_reconstruction(X_noisy, X_reconstructed, K)
# to_remove solution
np.random.seed(2020) # set random seed
# Add noise to data
X_noisy = add_noise(X, .2)
# Compute mean of noise-corrupted data
X_noisy_mean = np.mean(X_noisy, 0)
# Project onto the original basis vectors
projX_noisy = np.matmul(X_noisy - X_noisy_mean, evectors)
# Reconstruct the data using the top 50 components
K = 50
X_reconstructed = reconstruct_data(projX_noisy, evectors, X_noisy_mean, K)
# Visualize
with plt.xkcd():
plot_MNIST_reconstruction(X_noisy, X_reconstructed, K)
# @title Submit your feedback
content_review(f"{feedback_prefix}_Denoising_Bonus_Exercise")