#!/usr/bin/env python # coding: utf-8 # # "Is There Anything On?" # # ## A Movie Recommendation System # In this exercise, we implement a collaborative filtering learning algorithm and apply it to a dataset of movie ratings$^{*}$. The objective of collaborative filtering is to predict movie ratings for the movies that users have not yet rated. This allows us to recommend movies with a high predicted rating to the user. # # $^{*}$MovieLens 100k Dataset from GroupLens Research. # We begin by importing standard machine learning libraries, as well as both loading and converting our data from MATLAB to a more python friendly format. # In[1]: # standard imports for data science and machine learning import scipy.io import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') # The code below loads the dataset ex8_movies.mat, which contains the matrices $Y$ and $R$. The matrix $Y$ (a $num\_movies \text{ x } num\_users$ matrix) stores the ratings $y^{(i,j)}$ (from 1 to 5). Restated, the data consist of ratings on a scale of 1 to 5, information on $n_u = 943$ users, and data on $n_m = 1682$ movies. The matrix $R$ is a binary-valued indicator matrix in which $R(i,j) = 1$, if user $j$ gave a rating to movie $i$ and $R(i,j) = 0$ otherwise. # In[2]: mat = scipy.io.loadmat('data/ex8_movies.mat') Y = mat['Y'] R = mat['R'] # Below is a slice of the matrix $Y$. As we can see, it contains ratings from 1 to 5. The zeros represent movies that users did not rate. # In[3]: print('Y dimensions', Y.shape) print(Y[:10, :10]) # The code below presents us with a window into the matrix $R$. The contents reflect what was stated above: user-rated movies are coded with a 1, while unrated movies are coded with a zero. # In[4]: print('R dimensions', R.shape) print(R[:10, :10]) # Throughout this part of the exercise, we will also be working with the matrices $X$ and $\theta$. In the cell that follows, we load the MATLAB data file containing these matrices. # In[5]: mat2 = scipy.io.loadmat('data/ex8_movieParams.mat') X = mat2['X'] Theta = mat2['Theta'] num_users = mat2['num_users'] num_movies = mat2['num_movies'] num_features = mat2['num_features'] # The $i-$th row of $X$ corresponds to the feature vector $x^{(i)}$ for the $i$-th movie, and the $j$-th row of $\theta$ corresponds to one parameter vector $\theta^{(j)}$ for the $j$-th user. Both $x^{(i)}$ and $\theta^{(j)}$ are $n$-dimensional vectors. For our purposes, we will use $n=100$. Thus, $x^{(i)} \in\mathbb{R}^{100}$ and $\theta^{(j)} \in\mathbb{R}^{100}$. This implies $X$ is a $n_m\times 100$ matrix and Theta is a $n_u\times 100$ matrix. # # As one can see, the MATLAB file we imported contains more information than simply the contents of two more matrices. # In[6]: print('X dimensions', X.shape) print('Pramaeters Theta:', Theta) print('Number of users represented:', num_users) print('Number of movies represented:', num_movies) print('Number of features represented:', num_features) # Using matrix R to index into matrix Y allows us to obtain an average rating for our first movie, "Toy Story." # In[7]: print(f'Average rating for movie 1 (Toy Story): {np.mean(Y[0, R[0,:]==1])} / 5') # Below, we visualize the rankings in our matrix. It's clear that some movies are rated more often than others, while many are not rated at all. This may be attributed to movie popularity and/or user reluctance to participate in the rating system. # In[8]: plt.imshow(Y, extent=[0,1000,1600,0], aspect='auto') plt.title('Ratings Matrix'); plt.xlabel('Users'); plt.ylabel('Movies'); # Now, we will implement the collaborative filtering learning algorithm. We start by implementing the cost function. The collaborative filtering algorithm when used in movie recommendation considers a set of n-dimensional parameter vectors $x^{(1)},\ldots,x^{(n_m)}$ and $\theta^{(1)},\ldots, \theta^{(n_u)}$, where the model predicts the rating for movie $i$ by user $j$ as $y^{(i,j)} = (\theta^{(j)})^T x^{(i)}$. Given a dataset that consists of a set of ratings produced by some users on some movies, we wish to learn the parameter vectors $x^{(1)},\ldots,x^{(n_m)},\;\theta^{(1)},\ldots,\theta^{(n_u)}$ that produce the best fit (minimizes the squared error). # # We will complete the code in cofi_cost_func.py to compute the cost function and gradient needed for collaborative filtering. Note that the parameters to the function (i.e., the values we are trying to learn) are $X$ and Theta. In order to use an off-the-shelf minimizer like fmincg, the cost function has been set up to unroll the parameters into a single vector "params." # The collaborative filtering cost function (without regularization) is as follows: # $J\left(x^{(i)},\ldots,x^{(n_m)},\;\theta^{(1)},\ldots,\theta^{(n_u)}\right) # =\frac{1}{2}\sum_{(i,j):r(i,j)=1}\left((\theta^{(j)})^Tx^{(i)}-y^{(i,j)}\right)^2$. # We display the cost function below. Note, we modified it to return the cost in the variable $J$. Furthermore, we accumulate the cost for user $j$ and movie $i$ only if $R(i,j) = 1$. # In[9]: def cofi_cost_func(params, Y, R, num_users, num_movies, num_features, lambda_r=0): X = np.reshape(params[:num_movies*num_features], newshape=(num_movies, num_features), order='F') Theta = np.reshape(params[num_movies*num_features:], newshape=(num_users, num_features), order='F') C = np.subtract(X.dot(Theta.T), Y) ** 2 J = np.sum(np.sum(R*C)) / 2 return J # We'll test our cost function on a data set of reduced size, so as to accelerate the testing process. # In[10]: #reduce dataset to test num_users = 4 num_movies = 5 num_features = 3 X = X[:num_movies, :num_features] Theta = Theta[:num_users, :num_features] Y = Y[:num_movies, :num_users] R = R[:num_movies, :num_users] # Below we test our cost function. The outcome is approximately 22.22, which is consistent with our expectations. # In[11]: J = cofi_cost_func(np.hstack((X.ravel(order='F'), Theta.ravel(order='F'))), Y, R, num_users, num_movies, num_features, 0) print('Cost at loaded parameters: ', J) # We now turn our attention to implementing the gradient. Specically, we complete the code in cofi_cost_func.py to calculate the variables X_grad and Theta_grad. Note that X_grad is a matrix of the same size as $X$. Similarly, Theta_grad is a matrix of the same size as Theta. The gradients of the cost function are given by: # # $\frac{\partial J}{\partial x_k^{(i)}} # = \sum_{j:r(i,j)=1}\left((\theta^{(j)})^T x^{(i)} -y^{(i,j)}\right)\theta_k^{(j)}$ # # $\frac{\partial J}{\partial \theta_k^{(j)}} = \sum_{i:r(i,j)=1}\left((\theta^{(j)})^T x^{(i)} -y^{(i,j)}\right) x_k^{(i)}$ # # # Our function returns the gradient for both sets of variables by unrolling them into a single vector. After we complete the code to compute the gradients, we will check our gradient implementation using finite difference methods. If our code is correct, we will see that the analytical and numerical gradients match up closely. # In[12]: def cofi_cost_func(params, Y, R, num_users, num_movies, num_features, lambda_r): X = np.reshape(params[:num_movies*num_features], newshape=(num_movies, num_features), order='F') Theta = np.reshape(params[num_movies*num_features:], newshape=(num_users, num_features), order='F') C = np.subtract(X.dot(Theta.T), Y) ** 2 J = np.sum(np.sum(R*C)) / 2 X_grad = np.zeros(X.shape) Theta_grad = np.zeros(Theta.shape) for i in range(num_movies): idx = np.argwhere(R[i,:]==1).T[0] X_grad[i,:] = np.subtract(X[i,:].dot(Theta[idx,:].T), Y[i,idx]).dot(Theta[idx,:]) for j in range(num_users): idx = np.argwhere(R[:,j]==1).T[0] Theta_grad[j,:] = np.subtract(X[idx,:].dot(Theta[j,:].T), Y[idx,j]).T.dot(X[idx,:]) grad = np.hstack((X_grad.ravel(order='F'), Theta_grad.ravel(order='F'))) return J, grad # As the function signature suggests, this routine is responsible for calculating the finite differences between the analytical and numerical gradients. The differences should be small. # In[13]: def compute_numerical_gradient(theta, Y, R, num_users, num_movies, num_features, lambda_r): e = 0.0001 num_grad = np.zeros(theta.shape) perturb = np.zeros(theta.shape) for p in range(len(theta)): perturb[p] = e loss1,_ = cofi_cost_func(theta-perturb, Y, R, num_users, num_movies, num_features, lambda_r) loss2,_ = cofi_cost_func(theta+perturb, Y, R, num_users, num_movies, num_features, lambda_r) num_grad[p] = (loss2-loss1)/(2*e) perturb[p] = 0 return num_grad # The `check_cost_function` calls our gradient check function and provides the output we need to discern whether or not our gradient calculations are reasonable. # In[14]: def check_cost_function(lambda_r=0): X_t = np.random.uniform(0,1,(4,3)) Theta_t = np.random.uniform(0,1,(5,3)) Y = X_t.dot(Theta_t.T) Y[np.random.uniform(0,1,Y.shape)>0.5] = 0 R = np.zeros(Y.shape) R[Y!=0] = 1 X = np.random.normal(size=X_t.shape) Theta = np.random.normal(size=Theta_t.shape) num_users = Y.shape[1] num_movies = Y.shape[0] num_features = Theta_t.shape[1] params = np.hstack((X.ravel(order='F'), Theta.ravel(order='F'))) cost, grad = cofi_cost_func(params, Y, R, num_users, num_movies, num_features, lambda_r) num_grad = compute_numerical_gradient(params, Y, R, num_users, num_movies, num_features, lambda_r) print('The columns should be very similar...') for i, j in zip(num_grad, grad): print(i,j) diff = np.linalg.norm(num_grad-grad)/np.linalg.norm(num_grad+grad) print('''If your cost function implementation is correct, then the relative difference will be small (less than 1e-9). Relative Difference:''', diff) # As one can easily see, our gradient calculations pass muster. # In[15]: check_cost_function() # Below, we introduce regularization to our cost function. Though we left room for a regularization term, $\lambda$, in our function signature in the code above, that code lacked the machinery to implement regularization. The cost function for collaborative filtering with regularization is given by: # # $J\left(x^{(i)},\ldots,x^{(n_m)},\;\theta^{(1)},\ldots,\theta^{(n_u)}\right) # =\\ \qquad\qquad \frac{1}{2}\sum_{(i,j):r(i,j)=1}\left((\theta^{(j)})^Tx^{(i)}-y^{(i,j)}\right)^2 # +\left(\frac{\lambda}{2}\sum_{j=1}^{n_u}{\sum_{k=1}^{n}{(\theta_k^{(j)})^2}}\right) # +\left(\frac{\lambda}{2}\sum_{i=1}^{n_m}{\sum_{k=1}^{n}{(x_k^{(i)})^2}}\right)$ # # Our cost function with regularization should produce a cost of about 31.34. # In[16]: def cofi_cost_func(params, Y, R, num_users, num_movies, num_features, lambda_r): X = np.reshape(params[:num_movies*num_features], newshape=(num_movies, num_features), order='F') Theta = np.reshape(params[num_movies*num_features:], newshape=(num_users, num_features), order='F') C = np.subtract(X.dot(Theta.T), Y) ** 2 J = np.sum(np.sum(R*C)) / 2 + ((lambda_r/2)*np.sum(np.sum(Theta**2))) + ((lambda_r/2)*np.sum(np.sum(X**2))) X_grad = np.zeros(X.shape) Theta_grad = np.zeros(Theta.shape) for i in range(num_movies): idx = np.argwhere(R[i,:]==1).T[0] X_grad[i,:] = np.subtract(X[i,:].dot(Theta[idx,:].T), Y[i,idx]).dot(Theta[idx,:]) for j in range(num_users): idx = np.argwhere(R[:,j]==1).T[0] Theta_grad[j,:] = np.subtract(X[idx,:].dot(Theta[j,:].T), Y[idx,j]).T.dot(X[idx,:]) grad = np.hstack((X_grad.ravel(order='F'), Theta_grad.ravel(order='F'))) return J, grad # As expected, our cost function with regularization ($\lambda = 1.5$) yielded a value of approximately 31.344. # In[17]: J, _ = cofi_cost_func(np.hstack((X.ravel(order='F'), Theta.ravel(order='F'))), Y, R, num_users, num_movies, num_features, 1.5) print('Cost at loaded parameters (lambda = 1.5):', round(J,3)) # As with our unregularized cost function, we will check our numerical gradient calculations against their analytical counterparts. # In[20]: def cofi_cost_func(params, Y, R, num_users, num_movies, num_features, lambda_r): X = np.reshape(params[:num_movies*num_features], newshape=(num_movies, num_features), order='F') Theta = np.reshape(params[num_movies*num_features:], newshape=(num_users, num_features), order='F') C = np.subtract(X.dot(Theta.T), Y) ** 2 J = np.sum(np.sum(R*C)) / 2 + ((lambda_r/2)*np.sum(np.sum(Theta**2))) + ((lambda_r/2)*np.sum(np.sum(X**2))) X_grad = np.zeros(X.shape) Theta_grad = np.zeros(Theta.shape) for i in range(num_movies): idx = np.argwhere(R[i,:]==1).T[0] X_grad[i,:] = np.subtract(X[i,:].dot(Theta[idx,:].T), Y[i,idx]).dot(Theta[idx,:]) + (lambda_r * X[i,:]) for j in range(num_users): idx = np.argwhere(R[:,j]==1).T[0] Theta_grad[j,:] = np.subtract(X[idx,:].dot(Theta[j,:].T), Y[idx,j]).T.dot(X[idx,:]) + (lambda_r * Theta[j,:]) grad = np.hstack((X_grad.ravel(order='F'), Theta_grad.ravel(order='F'))) return J, grad # Presented below are gradient calculations produced by our function with regularization. The difference between the numerical and analyticall gradient calculations is miniscule. # In[21]: check_cost_function(1.5) # Having finished implementing our cost function and gradient, we can finally start training our algorithm to make movie recommendations for ourselves. In the code below, we enter our own movie preferences, so that later we can get our personalized movie recommendations! # In the following cells, we load a list of movies and assign ratings to a fraction of them. # In[22]: with open('data/movie_ids.txt', encoding='latin-1') as f: content = f.readlines() movie_list = [' '.join(line.split()[1:]) for line in content] # In[23]: my_ratings = np.zeros((1682, 1)) my_ratings[0] = 2 my_ratings[10] = 5 my_ratings[21] = 1 my_ratings[70] = 1 my_ratings[97] = 5 my_ratings[98] = 3 my_ratings[150] = 3 my_ratings[154] = 2 my_ratings[175] = 5 my_ratings[312] = 1 # Below, we review the movies we rated and the ratings assigned thereto. # In[24]: for i,r in enumerate(my_ratings): if r > 0: print('Rated {0} for {1}'.format(int(r[0]), movie_list[i])) # Next, the code below will proceed to train the collaborative filtering model. Restated, it will learn the parameters X and Theta. # In[25]: mat = scipy.io.loadmat('data/ex8_movies.mat') Y = np.hstack((my_ratings, mat['Y'])) R = np.hstack((my_ratings !=0, mat['R'])) # Having added our preferences to our list of films and pre-existing user ratings, we normalize the data to ensure that no single rating or subset of ratings dominate our calculations. # In[26]: def normalize_ratings(Y, R): Y_mean = np.zeros((Y.shape[0], 1)) Y_norm = np.zeros(Y.shape) for i in range(Y.shape[0]): idx = np.argwhere(R[i,:]==1).T[0] Y_mean[i] = np.mean(Y[i,idx], axis=0) Y_norm[i,idx] = np.subtract(Y[i,idx], Y_mean[i]) return Y_norm, Y_mean # Here, we retrieve pertinent data used in training our model. # In[27]: Y_norm, Y_mean = normalize_ratings(Y, R) # In[28]: num_users = Y.shape[1] num_movies = Y.shape[0] num_features = 10 # The above data are used to initialize matrices $X$ and $\theta$. # In[29]: X = np.random.normal(size=(num_movies, num_features)) Theta = np.random.normal(size=(num_users, num_features)) initial_params = np.hstack((X.ravel(order='F'), Theta.ravel(order='F'))) # We're now ready to minimize our cost function with regularization. # In[30]: import scipy.optimize as opt lambda_r = 10 opt_results = opt.minimize(cofi_cost_func, initial_params, args=(Y, R, num_users, num_movies, num_features, lambda_r), method='L-BFGS-B', jac=True, options={'maxiter':100}) theta = opt_results['x'] # After reshaping our data, we are ready to make predictions concerning which films, if any, we might enjoy. The recommendations look reasonable and definitely fit squarely in the set of films we'd enjoy watching! # In[31]: X = np.reshape(theta[:num_movies*num_features], newshape=(num_movies, num_features), order='F') Theta = np.reshape(theta[num_movies*num_features:], newshape=(num_users, num_features), order='F') # In[32]: p = X.dot(Theta.T) my_predictions = p[:,0] + Y_mean.T[0] # In[33]: sort_idxs = np.argsort(my_predictions)[::-1] print('Top recommendations for you:') for i in range(10): j = sort_idxs[i] print('Predicting rating {0} for movie {1}'.format(my_predictions[j], movie_list[j])) # In[ ]: