from time import time import logging import pylab as pl import numpy as np import matplotlib.pyplot as plt # !pip install -U scikit-learn from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.datasets import fetch_lfw_people from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.decomposition import PCA from sklearn.svm import SVC #Display progress logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') #Download the data lfw_people =fetch_lfw_people(min_faces_per_person=70, resize=0.4) #Find out shape infomration about the images to help with plotting them n_samples, h, w=lfw_people.images.shape np.random.seed(42) # for machine learning we use the data directly (as relative pixel # position info is ignored by this model) X = lfw_people.data n_features = X.shape[1] # the label to predict is the ID of the person y = lfw_people.target target_names = lfw_people.target_names n_classes = target_names.shape[0] print ("Total dataset size:") print ("n_samples: %d" % n_samples) print ("n_features: %d" % n_features) print ("n_classes: %d" % n_classes) print ("Classes: %s" % target_names) #Lets look at the data to see what they look like pl.figure for i in range(0,3): pl.subplot(1,3,i+1) pl.imshow(X[i].reshape((h,w)), cmap=pl.cm.bone) pl.title(target_names[lfw_people.target[i]]) pl.xticks(()) pl.yticks(()) X_train, X_test, y_train, y_test =train_test_split(X,y, test_size=0.25, random_state=42) y_test.dtype # Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled # dataset): unsupervised feature extraction / dimensionality reduction n_components = 250 print("Extracting the top %d eignefaces from %d faces" % (n_components, X_train.shape[0])) #Initiate a time counter (kinda like tic toc) t0=time() #Here we take the training data and compute the PCs pca=PCA(n_components=n_components, whiten=True).fit(X_train) #Print the time it took to compute print("Done in %0.3fs" % (time()-t0)) #Reshape the PCs to the image format eigenfaces =pca.components_.reshape((n_components,h,w)) pl.figure for i in range(0,3): pl.subplot(1,3,i+1) pl.imshow(eigenfaces[i], cmap=pl.cm.bone) pl.title("Eigenface PC- %d" % (i+1)) pl.xticks(()) pl.yticks(()) # cbar = pl.colorbar() # cbar.solids.set_edgecolor("face") # pl.draw() # print(pca.explained_variance_ ) # print(pca.explained_variance_ratio_) top_pcs=21 plt.figure( figsize=(9, 3)) plt.subplot(121) plt.bar(np.arange(top_pcs),pca.explained_variance_[0:top_pcs]) plt.xlabel('PCs') plt.ylabel('Var') plt.subplot(122) plt.bar(np.arange(top_pcs),pca.explained_variance_ratio_[0:top_pcs]) plt.xlabel('PCs') plt.ylabel('Ratio') np.shape(pca.explained_variance_) print("Projecting the input data on the eignefaces orthonormal basis") t0=time()#tic X_train_pca = pca.transform(X_train) #take the training data and project it to eigenfaces X_test_pca = pca.transform(X_test)#take the test data and project it to eigenfaces print("Done in %0.3fs" % (time()- t0)) #toc #Training print ("Fitting the classifier to the training set") t0 = time() #These set parameters that we want to optimize. These are passed to GridSearch #which uses the optimal paramters in the fitting classifier =clf param_grid = { 'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], } # for sklearn version 0.16 or prior, the class_weight parameter value is 'auto' clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid) clf = clf.fit(X_train_pca, y_train) print ("done in %0.3fs" % (time() - t0)) print ("Best estimator found by grid search:") print (clf.best_estimator_) #Testing the classifier print ("Predicting the people names on the testing set") t0 = time() y_pred = clf.predict(X_test_pca) print ("done in %0.3fs" % (time() - t0)) print (classification_report(y_test, y_pred, target_names=target_names)) print (confusion_matrix(y_test, y_pred, labels=range(n_classes))) #Create a helper function to look at the pictures def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" pl.figure(figsize=(1.8 * n_col, 2.4 * n_row)) pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): pl.subplot(n_row, n_col, i + 1) pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray) pl.title(titles[i], size=12) pl.xticks(()) pl.yticks(()) # plot the result of the prediction on a portion of the test set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] return 'predicted: %s\ntrue: %s' % (pred_name, true_name) prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])] #Now print out preductions # prediction_titles=(X_test, prediction_titles,h,w) plot_gallery(X_test, prediction_titles, h, w ) #Plot the eignefaces eigenface_titles =["Eigneface %d " % i for i in range(eigenfaces.shape[0])] plot_gallery(eigenfaces, eigenface_titles, h, w) plt.show()