import os
import sys
import numpy as np
os.environ['KERAS_BACKEND'] = "cntk"
import keras as K
import cntk
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from common.params import *
from common.utils import *
Using CNTK backend
# channels_first is faster
K.backend.set_image_data_format('channels_first')
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Keras: ", K.__version__)
print("Numpy: ", np.__version__)
print("CNTK: ", cntk.__version__)
print(K.backend.backend())
# Should be channels-first, otherwise slow
print(K.backend.image_data_format())
print("GPU: ", get_gpu_name())
OS: linux Python: 3.5.2 |Anaconda custom (64-bit)| (default, Jul 2 2016, 17:53:06) [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] Keras: 2.1.1 Numpy: 1.13.3 CNTK: 2.2 cntk channels_first GPU: ['Tesla K80']
def create_symbol():
model = Sequential()
model.add(Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu', input_shape=(3, 32, 32)))
model.add(Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(N_CLASSES, activation='softmax'))
return model
def init_model(m):
m.compile(
loss = "categorical_crossentropy",
optimizer = K.optimizers.SGD(LR, MOMENTUM),
metrics = ['accuracy'])
return m
%%time
# Data into format for library
x_train, x_test, y_train, y_test = cifar_for_library(channel_first=True, one_hot=True)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)
Preparing train set... Preparing test set... (50000, 3, 32, 32) (10000, 3, 32, 32) (50000, 10) (10000, 10) float32 float32 int32 int32 CPU times: user 876 ms, sys: 535 ms, total: 1.41 s Wall time: 1.41 s
%%time
# Load symbol
sym = create_symbol()
CPU times: user 189 ms, sys: 232 ms, total: 420 ms Wall time: 429 ms
%%time
# Initialise model
model = init_model(sym)
CPU times: user 4.89 ms, sys: 1.74 ms, total: 6.63 ms Wall time: 6.61 ms
model.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 50, 32, 32) 1400 _________________________________________________________________ conv2d_2 (Conv2D) (None, 50, 32, 32) 22550 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 50, 16, 16) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 50, 16, 16) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 100, 16, 16) 45100 _________________________________________________________________ conv2d_4 (Conv2D) (None, 100, 16, 16) 90100 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 100, 8, 8) 0 _________________________________________________________________ dropout_2 (Dropout) (None, 100, 8, 8) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 6400) 0 _________________________________________________________________ dense_1 (Dense) (None, 512) 3277312 _________________________________________________________________ dropout_3 (Dropout) (None, 512) 0 _________________________________________________________________ dense_2 (Dense) (None, 10) 5130 ================================================================= Total params: 3,441,592 Trainable params: 3,441,592 Non-trainable params: 0 _________________________________________________________________
%%time
# 194s
# Train model
model.fit(x_train,
y_train,
batch_size=BATCHSIZE,
epochs=EPOCHS,
verbose=1)
Epoch 1/10 50000/50000 [==============================] - 20s 409us/step - loss: 1.8268 - acc: 0.3311 Epoch 2/10 50000/50000 [==============================] - 19s 385us/step - loss: 1.3521 - acc: 0.5112 Epoch 3/10 50000/50000 [==============================] - 19s 385us/step - loss: 1.1328 - acc: 0.5959 Epoch 4/10 50000/50000 [==============================] - 19s 387us/step - loss: 0.9807 - acc: 0.6517 Epoch 5/10 50000/50000 [==============================] - 19s 385us/step - loss: 0.8652 - acc: 0.6922 Epoch 6/10 50000/50000 [==============================] - 19s 386us/step - loss: 0.7804 - acc: 0.7242 Epoch 7/10 50000/50000 [==============================] - 19s 387us/step - loss: 0.7033 - acc: 0.7514 Epoch 8/10 50000/50000 [==============================] - 19s 386us/step - loss: 0.6422 - acc: 0.7734 Epoch 9/10 50000/50000 [==============================] - 19s 385us/step - loss: 0.5917 - acc: 0.7911 Epoch 10/10 50000/50000 [==============================] - 19s 386us/step - loss: 0.5363 - acc: 0.8099 CPU times: user 2min 44s, sys: 29.2 s, total: 3min 14s Wall time: 3min 14s
<keras.callbacks.History at 0x7fc8a856ac50>
%%time
y_guess = model.predict(x_test, batch_size=BATCHSIZE)
y_guess = np.argmax(y_guess, axis=-1)
y_truth = np.argmax(y_test, axis=-1)
CPU times: user 1.06 s, sys: 225 ms, total: 1.29 s Wall time: 1.46 s
print("Accuracy: ", sum(y_guess == y_truth)/len(y_guess))
Accuracy: 0.7687