# Load libraries
import numpy as np # Math
import scipy.io # Import data
import time
import sklearn.neighbors, sklearn.linear_model, sklearn.ensemble, sklearn.naive_bayes # Baseline classification techniques
import matplotlib.pyplot as plt
# Load 400 text documents representing 5 classes
# X_train matrix contains the training data
# y_train vector contains the training labels
# X_test matrix contains the test data
# y_test vector contains the test labels
[X_train, y_train, X_test, y_test] = np.load('datasets/20news_5classes_400docs.npy')
print('X_train size=',X_train.shape)
print('X_test size=',X_test.shape)
print('y_train size=',y_train.shape)
print('y_test size=',y_test.shape)
# Your code here
clf, train_accuracy, test_accuracy, exec_time = [], [], [], []
clf.append(sklearn.neighbors.KNeighborsClassifier()) # k-NN classifier
clf.append(sklearn.svm.LinearSVC()) # linear SVM classifier
clf.append(sklearn.linear_model.LogisticRegression()) # logistic classifier
clf.append(sklearn.ensemble.RandomForestClassifier())
clf.append(sklearn.linear_model.RidgeClassifier())
clf.append(sklearn.naive_bayes.BernoulliNB())
clf.append(sklearn.naive_bayes.MultinomialNB())
for c in clf:
t_start = time.process_time()
c.fit(X_train, y_train)
train_pred = c.predict(X_train)
test_pred = c.predict(X_test)
train_accuracy.append('{:5.2f}'.format(100*sklearn.metrics.accuracy_score(y_train, train_pred)))
test_accuracy.append('{:5.2f}'.format(100*sklearn.metrics.accuracy_score(y_test, test_pred)))
exec_time.append('{:5.2f}'.format(time.process_time() - t_start))
print('Train accuracy: {}'.format(' '.join(train_accuracy)))
print('Test accuracy: {}'.format(' '.join(test_accuracy)))
print('Execution time: {}'.format(' '.join(exec_time)))
Observe the best result. What is the best technique?
Do you think the other classification techniques are not as efficient?
Should you believe all blackbox data analysis techniques?
Let us consider one classification technique like logistic regression:
model = sklearn.linear_model.LogisticRegression(C=C_value)
and its hyperparamater C, which is the trade-off between the data term and the regularization term.
Hint: You may use the function np.array_split()
num_folds = 5
X_train = X_train.toarray() # for np.array_split
X_train_folds = np.array_split(X_train, num_folds)
y_train_folds = np.array_split(y_train, num_folds)
Values of the hyperparameter C:
C_choices = [1e-2, 5*1e-2, 1e-1, 5*1e-1, 1e0, 5*1e0, 1e1, 5*1e1, 1e2, 5*1e2, 1e3, 5*1e3]
num_Cs = len(C_choices)
accuracy_tab = np.zeros([num_folds,num_Cs])
for C_idx, C_value in enumerate(C_choices):
for fold_idx in range(num_folds):
# Extract train dataset for the current fold
fold_x_train = np.concatenate([X_train_folds[i] for i in range(num_folds) if i!=fold_idx])
fold_y_train = np.concatenate([y_train_folds[i] for i in range(num_folds) if i!=fold_idx])
# validation dataset for the current fold
fold_x_val = X_train_folds[fold_idx]
fold_y_val = y_train_folds[fold_idx]
# Run Logistic Regression model for the current fold
model = sklearn.linear_model.LogisticRegression(C=C_value)
model.fit(fold_x_train, fold_y_train)
test_pred = model.predict(fold_x_val)
accuracy = sklearn.metrics.accuracy_score(test_pred, fold_y_val)
# Store accuracy value
accuracy_tab[fold_idx,C_idx] = accuracy
print(accuracy_tab)
Hint: You may use the function plt.scatter(), np.mean(), np.std(), plt.errorbar(), plt.show()
# plot the raw observations
for C_idx, C_value in enumerate(C_choices):
accuracies_C_idx = accuracy_tab[:,C_idx]
plt.scatter([np.log(C_value)]* len(accuracies_C_idx), accuracies_C_idx)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.mean(accuracy_tab,axis=0)
accuracies_std = np.std(accuracy_tab,axis=0)
plt.errorbar(np.log(C_choices), accuracies_mean, yerr=accuracies_std)
# Add text
plt.title('Cross-validation on C')
plt.xlabel('log C')
plt.ylabel('Cross-validation accuracy')
plt.show()
Did we do better than the best technique in Question 1? or not?
Hint: You may use the function np.argmax()
idx_best_C = np.argmax(accuracies_mean)
best_C = C_choices[idx_best_C]
model = sklearn.linear_model.LogisticRegression(C=best_C)
model.fit(X_train, y_train)
test_pred = model.predict(X_test)
accuracy_testset = sklearn.metrics.accuracy_score(test_pred, y_test)
print('best accuracy=',accuracy_testset)