%reload_ext autoreload
%autoreload 2
%matplotlib inline
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import ktrain
from ktrain import vision as vis
Using TensorFlow backend.
# build a regular expression that extracts class from file name
import re
p = re.compile(r'([^/]+)_\d+.jpg$')
r = p.search('/hello/world/beagle_007.jpg')
r.group(1)
'beagle'
# use the vision.images_from_fname to load the dataset and extract class names from file names
# Don't forget to do the following
# 1. Download data from here: https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet.tgz
# 2. set DATADIR to the extracted folder
DATADIR='data/oxford-iiit-pet/images'
data_aug = vis.get_data_aug(horizontal_flip=True, shear_range=0.2) # additions to default data augmentation
(train_data, val_data, preproc) = vis.images_from_fname(
DATADIR,
pattern = r'([^/]+)_\d+.jpg$',
data_aug = data_aug,
target_size=(299,299), color_mode='rgb', val_pct=0.2)
Found 7390 images belonging to 1 classes. Found 37 classes: ['Abyssinian', 'Bengal', 'Birman', 'Bombay', 'British_Shorthair', 'Egyptian_Mau', 'Maine_Coon', 'Persian', 'Ragdoll', 'Russian_Blue', 'Siamese', 'Sphynx', 'american_bulldog', 'american_pit_bull_terrier', 'basset_hound', 'beagle', 'boxer', 'chihuahua', 'english_cocker_spaniel', 'english_setter', 'german_shorthaired', 'great_pyrenees', 'havanese', 'japanese_chin', 'keeshond', 'leonberger', 'miniature_pinscher', 'newfoundland', 'pomeranian', 'pug', 'saint_bernard', 'samoyed', 'scottish_terrier', 'shiba_inu', 'staffordshire_bull_terrier', 'wheaten_terrier', 'yorkshire_terrier'] y shape: (7390,37) Found 5852 images. Found 1538 images.
# build a pre-trained ResNet50 model
# Note that we have used the pt_fc and pt_ps arguments to include
# an extra Dense layer (and dropout) before final dropout and final Dense layer
model = vis.image_classifier('pretrained_resnet50', train_data, val_data, pt_fc=[512], pt_ps=[0.25])
The normalization scheme has been changed for use with a pretrained_resnet50 model. If you decide to use a different model, please reload your dataset with a ktrain.vision.data.images_from* function. Is Multi-Label? False pretrained_resnet50 model created.
# wrap model and data in Learner object
learner = ktrain.get_learner(model=model, train_data=train_data, val_data=val_data,
workers=8, use_multiprocessing=False, batch_size=64)
learner.print_layers()
0 (trainable=False) : <keras.engine.input_layer.InputLayer object at 0x7effa9315d30> 1 (trainable=False) : <keras.layers.convolutional.ZeroPadding2D object at 0x7effeb828828> 2 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7effeb809d30> 3 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7f000bed1eb8> 4 (trainable=False) : <keras.layers.core.Activation object at 0x7f000bed1cf8> 5 (trainable=False) : <keras.layers.convolutional.ZeroPadding2D object at 0x7effa92d7668> 6 (trainable=False) : <keras.layers.pooling.MaxPooling2D object at 0x7effa92d7470> 7 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7f000beedf60> 8 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7f000a3d5748> 9 (trainable=False) : <keras.layers.core.Activation object at 0x7f000a3d5eb8> 10 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7f000a381b70> 11 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7f000a39cf28> 12 (trainable=False) : <keras.layers.core.Activation object at 0x7f000a2cfef0> 13 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7f000a27e390> 14 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7effa8504080> 15 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7f000a295160> 16 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7effa8418be0> 17 (trainable=False) : <keras.layers.merge.Add object at 0x7effa81c7518> 18 (trainable=False) : <keras.layers.core.Activation object at 0x7effa8146208> 19 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7effa8146940> 20 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7effa81306d8> 21 (trainable=False) : <keras.layers.core.Activation object at 0x7effa80a9518> 22 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7effa8052cc0> 23 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7effa80725c0> 24 (trainable=False) : <keras.layers.core.Activation object at 0x7eff987b2d68> 25 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff98755208> 26 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff9876e2e8> 27 (trainable=False) : <keras.layers.merge.Add object at 0x7eff987362b0> 28 (trainable=False) : <keras.layers.core.Activation object at 0x7eff986b86a0> 29 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff986b8400> 30 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff985c3278> 31 (trainable=False) : <keras.layers.core.Activation object at 0x7eff985eefd0> 32 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff9854a5c0> 33 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff985662b0> 34 (trainable=False) : <keras.layers.core.Activation object at 0x7eff985303c8> 35 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff984aeeb8> 36 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff9844feb8> 37 (trainable=False) : <keras.layers.merge.Add object at 0x7eff98417a20> 38 (trainable=False) : <keras.layers.core.Activation object at 0x7eff983b5390> 39 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff983b50f0> 40 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff9831d7b8> 41 (trainable=False) : <keras.layers.core.Activation object at 0x7eff982a1e10> 42 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff982422b0> 43 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff98259160> 44 (trainable=False) : <keras.layers.core.Activation object at 0x7eff98225048> 45 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff981a6a20> 46 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff9810dc50> 47 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff9814afd0> 48 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff98053978> 49 (trainable=False) : <keras.layers.merge.Add object at 0x7eff747de048> 50 (trainable=False) : <keras.layers.core.Activation object at 0x7eff7475e7b8> 51 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff7475e048> 52 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff746e6da0> 53 (trainable=False) : <keras.layers.core.Activation object at 0x7eff746934a8> 54 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff74670668> 55 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff7460b2b0> 56 (trainable=False) : <keras.layers.core.Activation object at 0x7eff745d20b8> 57 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff74553e80> 58 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff74510080> 59 (trainable=False) : <keras.layers.merge.Add object at 0x7eff74535828> 60 (trainable=False) : <keras.layers.core.Activation object at 0x7eff74457438> 61 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff74457198> 62 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff743dfe48> 63 (trainable=False) : <keras.layers.core.Activation object at 0x7eff74345eb8> 64 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff74366358> 65 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff742ff048> 66 (trainable=False) : <keras.layers.core.Activation object at 0x7eff742c9358> 67 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff7424bbe0> 68 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff7426e748> 69 (trainable=False) : <keras.layers.merge.Add object at 0x7eff742324e0> 70 (trainable=False) : <keras.layers.core.Activation object at 0x7eff7415d2b0> 71 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff7415d400> 72 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff740c2390> 73 (trainable=False) : <keras.layers.core.Activation object at 0x7eff74089c50> 74 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff74041ba8> 75 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff547bf438> 76 (trainable=False) : <keras.layers.core.Activation object at 0x7eff547e3ef0> 77 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff54709d30> 78 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff5472e630> 79 (trainable=False) : <keras.layers.merge.Add object at 0x7eff5466bdd8> 80 (trainable=False) : <keras.layers.core.Activation object at 0x7eff546191d0> 81 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff546190f0> 82 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff5459a208> 83 (trainable=False) : <keras.layers.core.Activation object at 0x7eff545435f8> 84 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff544fccf8> 85 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff5451d7f0> 86 (trainable=False) : <keras.layers.core.Activation object at 0x7eff544e2e48> 87 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff5440c630> 88 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff543ef080> 89 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff54428320> 90 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff54310b38> 91 (trainable=False) : <keras.layers.merge.Add object at 0x7eff542d2dd8> 92 (trainable=False) : <keras.layers.core.Activation object at 0x7eff541fb710> 93 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff541fb860> 94 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff541b27b8> 95 (trainable=False) : <keras.layers.core.Activation object at 0x7eff5415f4a8> 96 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff5410d828> 97 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff54128518> 98 (trainable=False) : <keras.layers.core.Activation object at 0x7eff540ee278> 99 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff54071cc0> 100 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff347ed080> 101 (trainable=False) : <keras.layers.merge.Add object at 0x7eff3473d128> 102 (trainable=False) : <keras.layers.core.Activation object at 0x7eff347385f8> 103 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff34738358> 104 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff346a2048> 105 (trainable=False) : <keras.layers.core.Activation object at 0x7eff3461d518> 106 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff345c31d0> 107 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff345e7588> 108 (trainable=False) : <keras.layers.core.Activation object at 0x7eff345ac898> 109 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff3452cc88> 110 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff344e4048> 111 (trainable=False) : <keras.layers.merge.Add object at 0x7eff344b3518> 112 (trainable=False) : <keras.layers.core.Activation object at 0x7eff3442d5c0> 113 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff3442d320> 114 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff34362780> 115 (trainable=False) : <keras.layers.core.Activation object at 0x7eff3430f4e0> 116 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff34337860> 117 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff342d9550> 118 (trainable=False) : <keras.layers.core.Activation object at 0x7eff3429e908> 119 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff34221cf8> 120 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff341dc358> 121 (trainable=False) : <keras.layers.merge.Add object at 0x7eff341ad160> 122 (trainable=False) : <keras.layers.core.Activation object at 0x7eff34123630> 123 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff34123390> 124 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff3408fa58> 125 (trainable=False) : <keras.layers.core.Activation object at 0x7eff147c9518> 126 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff147f1cc0> 127 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff147935c0> 128 (trainable=False) : <keras.layers.core.Activation object at 0x7eff146dcd68> 129 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff1467d208> 130 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff146952e8> 131 (trainable=False) : <keras.layers.merge.Add object at 0x7eff14667518> 132 (trainable=False) : <keras.layers.core.Activation object at 0x7eff145df6a0> 133 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff145df400> 134 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff1454aac8> 135 (trainable=False) : <keras.layers.core.Activation object at 0x7eff14535588> 136 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff144edd30> 137 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff14490630> 138 (trainable=False) : <keras.layers.core.Activation object at 0x7eff143cddd8> 139 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff1437a278> 140 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff14392128> 141 (trainable=False) : <keras.layers.merge.Add object at 0x7eff143654e0> 142 (trainable=False) : <keras.layers.core.Activation object at 0x7eff142cf710> 143 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff142cf470> 144 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff142b3b38> 145 (trainable=False) : <keras.layers.core.Activation object at 0x7eff142235f8> 146 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff141d8da0> 147 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff141f86a0> 148 (trainable=False) : <keras.layers.core.Activation object at 0x7eff140bce48> 149 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff140ea2e8> 150 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff1404e0b8> 151 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff140820b8> 152 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff007b2b00> 153 (trainable=False) : <keras.layers.merge.Add object at 0x7eff00775c50> 154 (trainable=False) : <keras.layers.core.Activation object at 0x7eff00699390> 155 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff006994e0> 156 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff00621dd8> 157 (trainable=False) : <keras.layers.core.Activation object at 0x7eff005d4e48> 158 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff005a84a8> 159 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff00545198> 160 (trainable=False) : <keras.layers.core.Activation object at 0x7eff00510278> 161 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff00491da0> 162 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff004b0898> 163 (trainable=False) : <keras.layers.merge.Add object at 0x7eff00478ef0> 164 (trainable=False) : <keras.layers.core.Activation object at 0x7eff0039c400> 165 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff0039c550> 166 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff0031ee48> 167 (trainable=False) : <keras.layers.core.Activation object at 0x7eff002d0eb8> 168 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff002a4518> 169 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff00245208> 170 (trainable=False) : <keras.layers.core.Activation object at 0x7eff002112e8> 171 (trainable=False) : <keras.layers.convolutional.Conv2D object at 0x7eff00190f28> 172 (trainable=False) : <keras.layers.normalization.BatchNormalization object at 0x7eff001b0d30> 173 (trainable=False) : <keras.layers.merge.Add object at 0x7eff00176f60> 174 (trainable=False) : <keras.layers.core.Activation object at 0x7eff0009a5c0> 175 (trainable=True) : <keras.layers.core.Flatten object at 0x7effa9315da0> 176 (trainable=True) : <keras.layers.core.Dropout object at 0x7efeec7c5048> 177 (trainable=True) : <keras.layers.core.Dense object at 0x7efebc0e1dd8> 178 (trainable=True) : <keras.layers.core.Dropout object at 0x7efeec7914a8> 179 (trainable=True) : <keras.layers.core.Dense object at 0x7efebc0dcc88>
# find a good learning rate
learner.lr_find(max_epochs=5)
simulating training for different learning rates... this may take a few moments... Epoch 1/5 92/92 [==============================] - 32s 349ms/step - loss: 7.2042 - acc: 0.0324 Epoch 2/5 92/92 [==============================] - 26s 278ms/step - loss: 3.6258 - acc: 0.1726 Epoch 3/5 92/92 [==============================] - 26s 286ms/step - loss: 2.7193 - acc: 0.3370 Epoch 4/5 92/92 [==============================] - 25s 275ms/step - loss: 4.1443 - acc: 0.0447 Epoch 5/5 73/92 [======================>.......] - ETA: 5s - loss: 12.4748 - acc: 0.0276 done. Please invoke the Learner.lr_plot() method to visually inspect the loss plot to help identify the maximal learning rate associated with falling loss.
learner.lr_plot()
For demonstration purposes, we use autofit
to train, which employs a triangular learning rate policy with epochs=20
and reduce_on_plateau=2
. You may choose to try something different.
# train for 20 epochs and reduce LR after no improvement for 2 epochs
learner.autofit(5e-4, 20, reduce_on_plateau=2)
begin training using triangular learning rate policy with max lr of 0.0005... Epoch 1/20 92/92 [==============================] - 32s 350ms/step - loss: 3.0374 - acc: 0.2756 - val_loss: 0.9356 - val_acc: 0.7035 Epoch 2/20 92/92 [==============================] - 30s 329ms/step - loss: 1.9647 - acc: 0.4609 - val_loss: 0.6550 - val_acc: 0.8032 Epoch 3/20 92/92 [==============================] - 31s 336ms/step - loss: 1.6151 - acc: 0.5372 - val_loss: 0.5620 - val_acc: 0.8373 Epoch 4/20 92/92 [==============================] - 31s 334ms/step - loss: 1.3980 - acc: 0.5919 - val_loss: 0.4786 - val_acc: 0.8574 Epoch 5/20 92/92 [==============================] - 31s 334ms/step - loss: 1.2780 - acc: 0.6299 - val_loss: 0.4058 - val_acc: 0.8701 Epoch 6/20 92/92 [==============================] - 30s 326ms/step - loss: 1.2129 - acc: 0.6414 - val_loss: 0.4124 - val_acc: 0.8809 Epoch 7/20 92/92 [==============================] - 31s 339ms/step - loss: 1.0986 - acc: 0.6736 - val_loss: 0.4517 - val_acc: 0.8655 Epoch 00007: Reducing Max LR on Plateau: new max lr will be 0.00025 (if not early_stopping). Epoch 8/20 92/92 [==============================] - 31s 332ms/step - loss: 0.9641 - acc: 0.6951 - val_loss: 0.4073 - val_acc: 0.8835 Epoch 9/20 92/92 [==============================] - 30s 330ms/step - loss: 0.8744 - acc: 0.7328 - val_loss: 0.4083 - val_acc: 0.8842 Epoch 00009: Reducing Max LR on Plateau: new max lr will be 0.000125 (if not early_stopping). Epoch 10/20 92/92 [==============================] - 31s 338ms/step - loss: 0.8001 - acc: 0.7473 - val_loss: 0.3986 - val_acc: 0.8902 Epoch 11/20 92/92 [==============================] - 30s 331ms/step - loss: 0.7494 - acc: 0.7600 - val_loss: 0.3515 - val_acc: 0.9009 Epoch 12/20 92/92 [==============================] - 31s 332ms/step - loss: 0.7711 - acc: 0.7605 - val_loss: 0.3588 - val_acc: 0.9016 Epoch 13/20 92/92 [==============================] - 31s 338ms/step - loss: 0.7120 - acc: 0.7775 - val_loss: 0.3648 - val_acc: 0.8989 Epoch 00013: Reducing Max LR on Plateau: new max lr will be 6.25e-05 (if not early_stopping). Epoch 14/20 92/92 [==============================] - 31s 339ms/step - loss: 0.6650 - acc: 0.7836 - val_loss: 0.3347 - val_acc: 0.9023 Epoch 15/20 92/92 [==============================] - 31s 334ms/step - loss: 0.6482 - acc: 0.7930 - val_loss: 0.3450 - val_acc: 0.9070 Epoch 16/20 92/92 [==============================] - 31s 334ms/step - loss: 0.6566 - acc: 0.7876 - val_loss: 0.3415 - val_acc: 0.9003 Epoch 00016: Reducing Max LR on Plateau: new max lr will be 3.125e-05 (if not early_stopping). Epoch 17/20 92/92 [==============================] - 30s 329ms/step - loss: 0.6001 - acc: 0.8079 - val_loss: 0.3497 - val_acc: 0.9029 Epoch 18/20 92/92 [==============================] - 30s 324ms/step - loss: 0.5975 - acc: 0.8089 - val_loss: 0.3469 - val_acc: 0.9050 Epoch 00018: Reducing Max LR on Plateau: new max lr will be 1.5625e-05 (if not early_stopping). Epoch 19/20 92/92 [==============================] - 31s 336ms/step - loss: 0.5817 - acc: 0.8120 - val_loss: 0.3503 - val_acc: 0.9036 Epoch 20/20 92/92 [==============================] - 31s 341ms/step - loss: 0.5783 - acc: 0.8105 - val_loss: 0.3301 - val_acc: 0.9103
<keras.callbacks.History at 0x7fd8f48bbb38>
# get a Predictor instance that wraps model and Preprocessor object
predictor = ktrain.get_predictor(learner.model, preproc)
# get some random file names of images
!!ls {DATADIR} | sort -R |head -10
['Ragdoll_186.jpg', 'leonberger_73.jpg', 'great_pyrenees_11.jpg', 'Persian_101.jpg', 'samoyed_76.jpg', 'beagle_28.jpg', 'keeshond_25.jpg', 'staffordshire_bull_terrier_8.jpg', 'newfoundland_146.jpg', 'japanese_chin_87.jpg']
# visualize ragdoll
vis.show_image(DATADIR+'/Ragdoll_186.jpg')
<matplotlib.image.AxesImage at 0x7f7ded1ce080>
# correctly predict ragdoll
predictor.predict_filename(DATADIR+'/Ragdoll_186.jpg')
['Ragdoll']
# visualize a staffordshire bull terrier
vis.show_image(DATADIR+'/staffordshire_bull_terrier_8.jpg')
<matplotlib.image.AxesImage at 0x7f7df4fe2e48>
# correctly predict the staffordshire bull terrier
predictor.predict_filename(DATADIR+'/staffordshire_bull_terrier_8.jpg')
['staffordshire_bull_terrier']
# save predictor object for later use
predictor.save('/tmp/pet_detector')
# load predictor object
predictor = ktrain.load_predictor('/tmp/pet_detector')
# predict a Newfoundland
predictor.predict_filename(DATADIR+'/newfoundland_146.jpg')
['newfoundland']
# let's look at the dog we correctly predicted - it's cute
vis.show_image(DATADIR+'/newfoundland_146.jpg')
<matplotlib.image.AxesImage at 0x7f7df4f7f748>