import numpy as np
import os
import sys
import cntk
from cntk.layers import Convolution2D, MaxPooling, Dense, Dropout
from common.params import *
from common.utils import *
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Numpy: ", np.__version__)
print("CNTK: ", cntk.__version__)
print("GPU: ", get_gpu_name())
OS: linux Python: 3.5.2 |Anaconda custom (64-bit)| (default, Jul 2 2016, 17:53:06) [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] Numpy: 1.13.3 CNTK: 2.2 GPU: ['Tesla K80']
def create_symbol():
# Weight initialiser from uniform distribution
# Activation (unless states) is None
with cntk.layers.default_options(init = cntk.glorot_uniform(), activation = cntk.relu):
x = Convolution2D(filter_shape=(3, 3), num_filters=50, pad=True)(features)
x = Convolution2D(filter_shape=(3, 3), num_filters=50, pad=True)(x)
x = MaxPooling((2, 2), strides=(2, 2), pad=False)(x)
x = Dropout(0.25)(x)
x = Convolution2D(filter_shape=(3, 3), num_filters=100, pad=True)(x)
x = Convolution2D(filter_shape=(3, 3), num_filters=100, pad=True)(x)
x = MaxPooling((2, 2), strides=(2, 2), pad=False)(x)
x = Dropout(0.25)(x)
x = Dense(512)(x)
x = Dropout(0.5)(x)
x = Dense(N_CLASSES, activation=None)(x)
return x
def init_model(m):
# Loss (dense labels); check if support for sparse labels
loss = cntk.cross_entropy_with_softmax(m, labels)
# Momentum SGD
# https://github.com/Microsoft/CNTK/blob/master/Manual/Manual_How_to_use_learners.ipynb
# unit_gain=False: momentum_direction = momentum*old_momentum_direction + gradient
# if unit_gain=True then ...(1-momentum)*gradient
learner = cntk.momentum_sgd(m.parameters,
lr=cntk.learning_rate_schedule(LR, cntk.UnitType.minibatch) ,
momentum=cntk.momentum_schedule(MOMENTUM),
unit_gain=False)
trainer = cntk.Trainer(m, (loss, cntk.classification_error(m, labels)), [learner])
return trainer
%%time
# Data into format for library
x_train, x_test, y_train, y_test = cifar_for_library(channel_first=True, one_hot=True)
# CNTK format
y_train = y_train.astype(np.float32)
y_test = y_test.astype(np.float32)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)
Preparing train set... Preparing test set... (50000, 3, 32, 32) (10000, 3, 32, 32) (50000, 10) (10000, 10) float32 float32 float32 float32 CPU times: user 833 ms, sys: 553 ms, total: 1.39 s Wall time: 1.38 s
%%time
# Placeholders
features = cntk.input_variable((3, 32, 32), np.float32)
labels = cntk.input_variable(N_CLASSES, np.float32)
# Load symbol
sym = create_symbol()
CPU times: user 22.6 ms, sys: 28.6 ms, total: 51.2 ms Wall time: 76.1 ms
%%time
trainer = init_model(sym)
CPU times: user 72.1 ms, sys: 224 ms, total: 297 ms Wall time: 303 ms
%%time
# 163s
# Train model
for j in range(EPOCHS):
for data, label in yield_mb(x_train, y_train, BATCHSIZE, shuffle=True):
trainer.train_minibatch({features: data, labels: label})
# Log (this is just last batch in epoch, not average of batches)
eval_error = trainer.previous_minibatch_evaluation_average
print("Epoch %d | Accuracy: %.6f" % (j+1, (1-eval_error)))
Epoch 1 | Accuracy: 0.562500 Epoch 2 | Accuracy: 0.640625 Epoch 3 | Accuracy: 0.625000 Epoch 4 | Accuracy: 0.703125 Epoch 5 | Accuracy: 0.703125 Epoch 6 | Accuracy: 0.765625 Epoch 7 | Accuracy: 0.859375 Epoch 8 | Accuracy: 0.796875 Epoch 9 | Accuracy: 0.781250 Epoch 10 | Accuracy: 0.796875 CPU times: user 2min 19s, sys: 21.4 s, total: 2min 40s Wall time: 2min 43s
%%time
# Predict and then score accuracy
# (We don't need softmax -> monotonic function)
n_samples = (y_test.shape[0]//BATCHSIZE)*BATCHSIZE
y_guess = np.zeros(n_samples, dtype=np.int)
y_truth = np.argmax(y_test[:n_samples], axis=-1)
c = 0
for data, label in yield_mb(x_test, y_test, BATCHSIZE):
predicted_label_probs = sym.eval({features : data})
y_guess[c*BATCHSIZE:(c+1)*BATCHSIZE] = np.argmax(predicted_label_probs, axis=-1)
c += 1
CPU times: user 850 ms, sys: 337 ms, total: 1.19 s Wall time: 1.4 s
print("Accuracy: ", sum(y_guess == y_truth)/len(y_guess))
Accuracy: 0.780649038462