!pip install torch torchvision --upgrade
Collecting torch Downloading https://files.pythonhosted.org/packages/49/0e/e382bcf1a6ae8225f50b99cc26effa2d4cc6d66975ccf3fa9590efcbedce/torch-0.4.1-cp36-cp36m-manylinux1_x86_64.whl (519.5MB) 100% |████████████████████████████████| 519.5MB 26kB/s tcmalloc: large alloc 1073750016 bytes == 0x58dea000 @ 0x7f838e7a81c4 0x46d6a4 0x5fcbcc 0x4c494d 0x54f3c4 0x553aaf 0x54e4c8 0x54f4f6 0x553aaf 0x54efc1 0x54f24d 0x553aaf 0x54efc1 0x54f24d 0x553aaf 0x54efc1 0x54f24d 0x551ee0 0x54e4c8 0x54f4f6 0x553aaf 0x54efc1 0x54f24d 0x551ee0 0x54efc1 0x54f24d 0x551ee0 0x54e4c8 0x54f4f6 0x553aaf 0x54e4c8 Collecting torchvision Downloading https://files.pythonhosted.org/packages/ca/0d/f00b2885711e08bd71242ebe7b96561e6f6d01fdb4b9dcf4d37e2e13c5e1/torchvision-0.2.1-py2.py3-none-any.whl (54kB) 100% |████████████████████████████████| 61kB 21.3MB/s Collecting pillow>=4.1.1 (from torchvision) Downloading https://files.pythonhosted.org/packages/d1/24/f53ff6b61b3d728b90934bddb4f03f8ab584a7f49299bf3bde56e2952612/Pillow-5.2.0-cp36-cp36m-manylinux1_x86_64.whl (2.0MB) 100% |████████████████████████████████| 2.0MB 3.9MB/s Requirement already satisfied, skipping upgrade: numpy in /usr/local/lib/python3.6/dist-packages (from torchvision) (1.14.6) Requirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from torchvision) (1.11.0) Installing collected packages: torch, pillow, torchvision Found existing installation: Pillow 4.0.0 Uninstalling Pillow-4.0.0: Successfully uninstalled Pillow-4.0.0 Successfully installed pillow-5.2.0 torch-0.4.1 torchvision-0.2.1
import numpy as np
import torch
from matplotlib import pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'FreeSerif'
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 12
plt.rcParams['xtick.labelsize'] = 24
plt.rcParams['ytick.labelsize'] = 24
plt.rcParams['legend.fontsize'] = 24
plt.rcParams['axes.titlesize'] = 32
plt.rcParams['axes.labelsize'] = 24
%load_ext autoreload
%autoreload 2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload
from keras.datasets import fashion_mnist
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
(X_train, X_valid) = X_train[5000:], X_train[:5000]
(y_train, y_valid) = y_train[5000:], y_train[:5000]
w, h = 28, 28
X_train = X_train.reshape(X_train.shape[0], w, h, 1)
X_valid = X_valid.reshape(X_valid.shape[0], w, h, 1)
X_test = X_test.reshape(X_test.shape[0], w, h, 1)
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_valid = tf.keras.utils.to_categorical(y_valid, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)
print("x_train shape:", X_train.shape, "y_train shape:", y_train.shape)
print(X_train.shape[0], 'train set')
print(X_valid.shape[0], 'validation set')
print(X_test.shape[0], 'test set')
x_train shape: (50000, 28, 28, 1) y_train shape: (50000, 10) 50000 train set 5000 validation set 10000 test set
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_2 (Conv2D) (None, 28, 28, 64) 320 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 14, 14, 64) 0 _________________________________________________________________ dropout_3 (Dropout) (None, 14, 14, 64) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 14, 14, 32) 8224 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 7, 7, 32) 0 _________________________________________________________________ dropout_4 (Dropout) (None, 7, 7, 32) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 1568) 0 _________________________________________________________________ dense_2 (Dense) (None, 256) 401664 _________________________________________________________________ dropout_5 (Dropout) (None, 256) 0 _________________________________________________________________ dense_3 (Dense) (None, 10) 2570 ================================================================= Total params: 412,778 Trainable params: 412,778 Non-trainable params: 0 _________________________________________________________________
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath='model.weights.best.hdf5.new', verbose = 1, save_best_only=True)
history = model.fit(X_train,
y_train,
batch_size=64,
epochs=15,
validation_data=(X_valid, y_valid),
callbacks=[checkpointer])
Train on 50000 samples, validate on 5000 samples Epoch 1/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.6267 - acc: 0.7695 Epoch 00001: val_loss improved from inf to 0.40868, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 74s 1ms/step - loss: 0.6265 - acc: 0.7695 - val_loss: 0.4087 - val_acc: 0.8490 Epoch 2/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.4268 - acc: 0.8458 Epoch 00002: val_loss improved from 0.40868 to 0.34215, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.4268 - acc: 0.8457 - val_loss: 0.3422 - val_acc: 0.8752 Epoch 3/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.3842 - acc: 0.8600 Epoch 00003: val_loss improved from 0.34215 to 0.30506, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 74s 1ms/step - loss: 0.3842 - acc: 0.8600 - val_loss: 0.3051 - val_acc: 0.8882 Epoch 4/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.3543 - acc: 0.8693 Epoch 00004: val_loss improved from 0.30506 to 0.28825, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.3542 - acc: 0.8693 - val_loss: 0.2882 - val_acc: 0.8922 Epoch 5/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.3340 - acc: 0.8782 Epoch 00005: val_loss improved from 0.28825 to 0.27590, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.3340 - acc: 0.8782 - val_loss: 0.2759 - val_acc: 0.8968 Epoch 6/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.3200 - acc: 0.8821 Epoch 00006: val_loss improved from 0.27590 to 0.26287, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.3200 - acc: 0.8821 - val_loss: 0.2629 - val_acc: 0.9008 Epoch 7/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.3057 - acc: 0.8887 Epoch 00007: val_loss improved from 0.26287 to 0.24731, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 72s 1ms/step - loss: 0.3057 - acc: 0.8886 - val_loss: 0.2473 - val_acc: 0.9086 Epoch 8/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2955 - acc: 0.8909 Epoch 00008: val_loss improved from 0.24731 to 0.24382, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 72s 1ms/step - loss: 0.2955 - acc: 0.8909 - val_loss: 0.2438 - val_acc: 0.9114 Epoch 9/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2835 - acc: 0.8962 Epoch 00009: val_loss did not improve from 0.24382 50000/50000 [==============================] - 71s 1ms/step - loss: 0.2835 - acc: 0.8961 - val_loss: 0.2464 - val_acc: 0.9106 Epoch 10/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2767 - acc: 0.8972 Epoch 00010: val_loss improved from 0.24382 to 0.24327, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.2766 - acc: 0.8973 - val_loss: 0.2433 - val_acc: 0.9060 Epoch 11/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2712 - acc: 0.8994 Epoch 00011: val_loss improved from 0.24327 to 0.22561, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.2713 - acc: 0.8994 - val_loss: 0.2256 - val_acc: 0.9148 Epoch 12/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2668 - acc: 0.9013 Epoch 00012: val_loss did not improve from 0.22561 50000/50000 [==============================] - 73s 1ms/step - loss: 0.2667 - acc: 0.9013 - val_loss: 0.2333 - val_acc: 0.9096 Epoch 13/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2588 - acc: 0.9037 Epoch 00013: val_loss improved from 0.22561 to 0.21897, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.2589 - acc: 0.9037 - val_loss: 0.2190 - val_acc: 0.9194 Epoch 14/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2524 - acc: 0.9066 Epoch 00014: val_loss improved from 0.21897 to 0.21356, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.2523 - acc: 0.9066 - val_loss: 0.2136 - val_acc: 0.9184 Epoch 15/15 49984/50000 [============================>.] - ETA: 0s - loss: 0.2499 - acc: 0.9065 Epoch 00015: val_loss improved from 0.21356 to 0.21033, saving model to model.weights.best.hdf5.new 50000/50000 [==============================] - 73s 1ms/step - loss: 0.2499 - acc: 0.9065 - val_loss: 0.2103 - val_acc: 0.9218
plt.figure(figsize=(10,7))
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='best')
plt.show()
plt.figure(figsize=(10,7))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('Categorical Crossentropy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='best')
plt.show()
/usr/local/lib/python3.6/dist-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))
model.load_weights('model.weights.best.hdf5')
# Evaluate the model on test set
score = model.evaluate(X_test, y_test, verbose=0)
# Print test accuracy
print('\n', 'Test accuracy:', score[1])
Test accuracy: 0.9124
set_sizes = [1000, 2000, 5000, 10000, 20000, 50000]
accuracy_metric = []
for size in set_sizes:
print(size)
used_indices = np.random.choice(np.arange(X_train.shape[0]), size, replace=False)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(X_train[used_indices, :], y_train[used_indices, :],
batch_size=64,
epochs=8,
validation_data=(X_valid, y_valid))
accuracy_metric.append(model.evaluate(X_test,y_test, verbose=1))
print(accuracy_metric[-1])
1000 Train on 1000 samples, validate on 5000 samples Epoch 1/8 1000/1000 [==============================] - 3s 3ms/step - loss: 1.8767 - acc: 0.3680 - val_loss: 1.3096 - val_acc: 0.5394 Epoch 2/8 1000/1000 [==============================] - 3s 3ms/step - loss: 1.2025 - acc: 0.5340 - val_loss: 0.9917 - val_acc: 0.6254 Epoch 3/8 1000/1000 [==============================] - 3s 3ms/step - loss: 0.9814 - acc: 0.6270 - val_loss: 0.8611 - val_acc: 0.6716 Epoch 4/8 1000/1000 [==============================] - 3s 3ms/step - loss: 0.8631 - acc: 0.6920 - val_loss: 0.7552 - val_acc: 0.7274 Epoch 5/8 1000/1000 [==============================] - 3s 3ms/step - loss: 0.7628 - acc: 0.7130 - val_loss: 0.7411 - val_acc: 0.7194 Epoch 6/8 1000/1000 [==============================] - 3s 3ms/step - loss: 0.6886 - acc: 0.7560 - val_loss: 0.6736 - val_acc: 0.7392 Epoch 7/8 1000/1000 [==============================] - 3s 3ms/step - loss: 0.6353 - acc: 0.7650 - val_loss: 0.6374 - val_acc: 0.7568 Epoch 8/8 1000/1000 [==============================] - 3s 3ms/step - loss: 0.6027 - acc: 0.7790 - val_loss: 0.6258 - val_acc: 0.7734 10000/10000 [==============================] - 3s 338us/step [0.6141497712612152, 0.7792] 2000 Train on 2000 samples, validate on 5000 samples Epoch 1/8 2000/2000 [==============================] - 5s 2ms/step - loss: 1.5987 - acc: 0.4250 - val_loss: 1.0255 - val_acc: 0.5884 Epoch 2/8 2000/2000 [==============================] - 4s 2ms/step - loss: 1.0319 - acc: 0.6020 - val_loss: 0.8175 - val_acc: 0.7148 Epoch 3/8 2000/2000 [==============================] - 4s 2ms/step - loss: 0.8689 - acc: 0.6840 - val_loss: 0.7105 - val_acc: 0.7368 Epoch 4/8 2000/2000 [==============================] - 4s 2ms/step - loss: 0.7273 - acc: 0.7395 - val_loss: 0.6643 - val_acc: 0.7418 Epoch 5/8 2000/2000 [==============================] - 4s 2ms/step - loss: 0.6822 - acc: 0.7455 - val_loss: 0.6085 - val_acc: 0.7802 Epoch 6/8 2000/2000 [==============================] - 4s 2ms/step - loss: 0.5910 - acc: 0.7750 - val_loss: 0.5710 - val_acc: 0.7804 Epoch 7/8 2000/2000 [==============================] - 4s 2ms/step - loss: 0.5726 - acc: 0.7900 - val_loss: 0.5612 - val_acc: 0.7952 Epoch 8/8 2000/2000 [==============================] - 4s 2ms/step - loss: 0.5556 - acc: 0.7885 - val_loss: 0.5647 - val_acc: 0.7766 10000/10000 [==============================] - 3s 291us/step [0.558463538980484, 0.7828] 5000 Train on 5000 samples, validate on 5000 samples Epoch 1/8 5000/5000 [==============================] - 9s 2ms/step - loss: 1.2318 - acc: 0.5398 - val_loss: 0.7429 - val_acc: 0.7394 Epoch 2/8 5000/5000 [==============================] - 8s 2ms/step - loss: 0.7345 - acc: 0.7312 - val_loss: 0.5958 - val_acc: 0.7710 Epoch 3/8 5000/5000 [==============================] - 8s 2ms/step - loss: 0.6222 - acc: 0.7662 - val_loss: 0.5504 - val_acc: 0.7936 Epoch 4/8 5000/5000 [==============================] - 9s 2ms/step - loss: 0.5593 - acc: 0.7920 - val_loss: 0.5152 - val_acc: 0.8066 Epoch 5/8 5000/5000 [==============================] - 8s 2ms/step - loss: 0.5294 - acc: 0.8024 - val_loss: 0.4772 - val_acc: 0.8250 Epoch 6/8 5000/5000 [==============================] - 9s 2ms/step - loss: 0.4973 - acc: 0.8164 - val_loss: 0.4632 - val_acc: 0.8320 Epoch 7/8 5000/5000 [==============================] - 8s 2ms/step - loss: 0.4790 - acc: 0.8222 - val_loss: 0.4571 - val_acc: 0.8318 Epoch 8/8 5000/5000 [==============================] - 8s 2ms/step - loss: 0.4558 - acc: 0.8290 - val_loss: 0.4495 - val_acc: 0.8310 10000/10000 [==============================] - 3s 303us/step [0.45170925884246826, 0.8286] 10000 Train on 10000 samples, validate on 5000 samples Epoch 1/8 10000/10000 [==============================] - 16s 2ms/step - loss: 1.0071 - acc: 0.6246 - val_loss: 0.5801 - val_acc: 0.7846 Epoch 2/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.6097 - acc: 0.7729 - val_loss: 0.5075 - val_acc: 0.8128 Epoch 3/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.5392 - acc: 0.7993 - val_loss: 0.4515 - val_acc: 0.8340 Epoch 4/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.4864 - acc: 0.8213 - val_loss: 0.4330 - val_acc: 0.8388 Epoch 5/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.4619 - acc: 0.8277 - val_loss: 0.3975 - val_acc: 0.8584 Epoch 6/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4427 - acc: 0.8375 - val_loss: 0.3996 - val_acc: 0.8488 Epoch 7/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.4221 - acc: 0.8462 - val_loss: 0.3993 - val_acc: 0.8512 Epoch 8/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.4062 - acc: 0.8483 - val_loss: 0.3709 - val_acc: 0.8666 10000/10000 [==============================] - 3s 293us/step [0.3737544047355652, 0.8658] 20000 Train on 20000 samples, validate on 5000 samples Epoch 1/8 20000/20000 [==============================] - 30s 1ms/step - loss: 0.7701 - acc: 0.7122 - val_loss: 0.5003 - val_acc: 0.8260 Epoch 2/8 20000/20000 [==============================] - 29s 1ms/step - loss: 0.4991 - acc: 0.8183 - val_loss: 0.4286 - val_acc: 0.8340 Epoch 3/8 20000/20000 [==============================] - 29s 1ms/step - loss: 0.4454 - acc: 0.8353 - val_loss: 0.3756 - val_acc: 0.8628 Epoch 4/8 20000/20000 [==============================] - 29s 1ms/step - loss: 0.4064 - acc: 0.8515 - val_loss: 0.3396 - val_acc: 0.8774 Epoch 5/8 20000/20000 [==============================] - 29s 1ms/step - loss: 0.3859 - acc: 0.8578 - val_loss: 0.3346 - val_acc: 0.8752 Epoch 6/8 20000/20000 [==============================] - 29s 1ms/step - loss: 0.3628 - acc: 0.8675 - val_loss: 0.3117 - val_acc: 0.8858 Epoch 7/8 20000/20000 [==============================] - 29s 1ms/step - loss: 0.3452 - acc: 0.8728 - val_loss: 0.3115 - val_acc: 0.8876 Epoch 8/8 20000/20000 [==============================] - 29s 1ms/step - loss: 0.3362 - acc: 0.8746 - val_loss: 0.3036 - val_acc: 0.8876 10000/10000 [==============================] - 3s 319us/step [0.3247652856349945, 0.8808] 60000
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-77-e004e619b3cb> in <module>() 4 for size in set_sizes: 5 print(size) ----> 6 used_indices = np.random.choice(np.arange(X_train.shape[0]), size, replace=False) 7 model = tf.keras.Sequential() 8 mtrand.pyx in mtrand.RandomState.choice() ValueError: Cannot take a larger sample than population when 'replace=False'
accuracy_loss = np.asarray(accuracy_metric)
plt.figure(figsize=(10,7))
plt.plot(set_sizes[:-1], accuracy_loss[:, 1])
plt.ylabel('Accuracy')
plt.xlabel('Train size')
plt.legend(['Train'], loc='best')
plt.show()
# Plot training & validation loss values
plt.figure(figsize=(10,7))
plt.plot(set_sizes[:-1], accuracy_loss[:, 0])
plt.ylabel('Categorical Crossentropy')
plt.xlabel('Train size')
plt.legend(['Train'], loc='best')
plt.show()
/usr/local/lib/python3.6/dist-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))
def X_noize(X_train, mean, var):
n_imgs = X_train.shape[0]
n_chan = X_train.shape[1]
n_rows = X_train.shape[2]
n_cols = X_train.shape[3]
if var == 0:
noise = np.zeros((n_imgs, n_chan, n_rows, n_cols))
else:
noise = np.random.normal(mean, var/255.,
(n_imgs, n_chan, n_rows, n_cols))
noisy_X = X_train + noise
noisy_X = np.clip(noisy_X, 0., 1.)
return noisy_X
noises = [x for x in range(0, 40, 5)]
histories = []
for noise in noises:
print(noise)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
histories.append(model.fit(X_noize(X_train[:10000], mean=0, var=noise), y_train[:10000],
batch_size=64,
epochs=8,
validation_data=(X_valid, y_valid)))
0 Train on 10000 samples, validate on 5000 samples Epoch 1/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.9754 - acc: 0.6389 - val_loss: 0.5852 - val_acc: 0.7768 Epoch 2/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.5895 - acc: 0.7804 - val_loss: 0.4855 - val_acc: 0.8222 Epoch 3/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.5172 - acc: 0.8077 - val_loss: 0.4439 - val_acc: 0.8414 Epoch 4/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.4738 - acc: 0.8206 - val_loss: 0.4111 - val_acc: 0.8494 Epoch 5/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4420 - acc: 0.8366 - val_loss: 0.3984 - val_acc: 0.8524 Epoch 6/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4192 - acc: 0.8458 - val_loss: 0.3813 - val_acc: 0.8580 Epoch 7/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.4007 - acc: 0.8501 - val_loss: 0.3601 - val_acc: 0.8626 Epoch 8/8 10000/10000 [==============================] - 15s 2ms/step - loss: 0.3784 - acc: 0.8597 - val_loss: 0.3592 - val_acc: 0.8666 10 Train on 10000 samples, validate on 5000 samples Epoch 1/8 10000/10000 [==============================] - 17s 2ms/step - loss: 0.9864 - acc: 0.6338 - val_loss: 0.5696 - val_acc: 0.7850 Epoch 2/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.5975 - acc: 0.7789 - val_loss: 0.5027 - val_acc: 0.8124 Epoch 3/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.5294 - acc: 0.8047 - val_loss: 0.4417 - val_acc: 0.8368 Epoch 4/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4807 - acc: 0.8203 - val_loss: 0.4161 - val_acc: 0.8476 Epoch 5/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4579 - acc: 0.8271 - val_loss: 0.4096 - val_acc: 0.8492 Epoch 6/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4277 - acc: 0.8374 - val_loss: 0.3763 - val_acc: 0.8610 Epoch 7/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4099 - acc: 0.8444 - val_loss: 0.3668 - val_acc: 0.8646 Epoch 8/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.3971 - acc: 0.8487 - val_loss: 0.3557 - val_acc: 0.8702 20 Train on 10000 samples, validate on 5000 samples Epoch 1/8 10000/10000 [==============================] - 17s 2ms/step - loss: 1.0531 - acc: 0.6066 - val_loss: 0.6350 - val_acc: 0.7668 Epoch 2/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.6382 - acc: 0.7607 - val_loss: 0.5313 - val_acc: 0.8006 Epoch 3/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.5665 - acc: 0.7898 - val_loss: 0.4811 - val_acc: 0.8200 Epoch 4/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.5053 - acc: 0.8133 - val_loss: 0.4374 - val_acc: 0.8432 Epoch 5/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4804 - acc: 0.8201 - val_loss: 0.4306 - val_acc: 0.8424 Epoch 6/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4513 - acc: 0.8324 - val_loss: 0.4172 - val_acc: 0.8450 Epoch 7/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4319 - acc: 0.8400 - val_loss: 0.3877 - val_acc: 0.8564 Epoch 8/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4140 - acc: 0.8422 - val_loss: 0.3798 - val_acc: 0.8554 30 Train on 10000 samples, validate on 5000 samples Epoch 1/8 10000/10000 [==============================] - 17s 2ms/step - loss: 1.0608 - acc: 0.6043 - val_loss: 0.6511 - val_acc: 0.7708 Epoch 2/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.6564 - acc: 0.7569 - val_loss: 0.5463 - val_acc: 0.8112 Epoch 3/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.5769 - acc: 0.7850 - val_loss: 0.4936 - val_acc: 0.8134 Epoch 4/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.5296 - acc: 0.8029 - val_loss: 0.4627 - val_acc: 0.8336 Epoch 5/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.5011 - acc: 0.8120 - val_loss: 0.4296 - val_acc: 0.8478 Epoch 6/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4687 - acc: 0.8233 - val_loss: 0.4231 - val_acc: 0.8434 Epoch 7/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4537 - acc: 0.8309 - val_loss: 0.4083 - val_acc: 0.8534 Epoch 8/8 10000/10000 [==============================] - 16s 2ms/step - loss: 0.4353 - acc: 0.8376 - val_loss: 0.3983 - val_acc: 0.8540
plt.figure(figsize=(17, 10))
i = 0
for history in histories:
plt.plot(history.history['acc'], label="Noise =" + str(noises[i]))
i+= 1
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.show()
/usr/local/lib/python3.6/dist-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))