from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
simple_cnn = Sequential()
simple_cnn.add(Conv2D(16, (3, 3), input_shape=(70, 116, 1), padding='same', activation='relu', name='conv1_1'))
simple_cnn.add(Conv2D(16, (3, 3), input_shape=(70, 116, 1), padding='same', activation='relu', name='conv1_2'))
simple_cnn.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
simple_cnn.add(Conv2D(32, (3, 3), padding='same', activation='relu', name='conv2_1'))
simple_cnn.add(Conv2D(32, (3, 3), padding='same', activation='relu', name='conv2_2'))
simple_cnn.add(MaxPooling2D(pool_size=(5, 2), name='pool2'))
simple_cnn.add(Flatten(name='flatten'))
simple_cnn.add(Dense(32, activation='relu', name='fc1'))
simple_cnn.add(Dropout(0.5))
simple_cnn.add(Dense(1, activation='sigmoid', name='prediction'))
from keras import optimizers
adam = optimizers.Adam(lr=1e-7, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
simple_cnn.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['accuracy'])
Using TensorFlow backend.
simple_cnn.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv1_1 (Conv2D) (None, 70, 116, 16) 160 _________________________________________________________________ conv1_2 (Conv2D) (None, 70, 116, 16) 2320 _________________________________________________________________ pool1 (MaxPooling2D) (None, 35, 58, 16) 0 _________________________________________________________________ conv2_1 (Conv2D) (None, 35, 58, 32) 4640 _________________________________________________________________ conv2_2 (Conv2D) (None, 35, 58, 32) 9248 _________________________________________________________________ pool2 (MaxPooling2D) (None, 7, 29, 32) 0 _________________________________________________________________ flatten (Flatten) (None, 6496) 0 _________________________________________________________________ fc1 (Dense) (None, 32) 207904 _________________________________________________________________ dropout_1 (Dropout) (None, 32) 0 _________________________________________________________________ prediction (Dense) (None, 1) 33 ================================================================= Total params: 224,305 Trainable params: 224,305 Non-trainable params: 0 _________________________________________________________________
from keras.utils import plot_model
plot_model(simple_cnn, to_file='simple_cnn.png')
from keras.preprocessing.image import ImageDataGenerator
batch_size = 256
train_datagen = ImageDataGenerator(
rotation_range=180,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(
rotation_range=180,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(
'/home/xenialxerus/cnn-medical-image-segmentation/train/',
target_size=(70, 116),
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale')
validation_generator = test_datagen.flow_from_directory(
'/home/xenialxerus/cnn-medical-image-segmentation/test/',
target_size=(70, 116),
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale')
Found 0 images belonging to 0 classes. Found 0 images belonging to 0 classes.
simple_cnn.load_weights('simple_cnn_weights/simple_cnn_0_7903.h5')
# time-consuming training process, expect to run for a long time
simple_cnn.fit_generator(
train_generator,
steps_per_epoch= 1393544//256,
epochs=5,
validation_data=validation_generator,
validation_steps=235343//256)
simple_cnn.save_weights('simple_cnn.h5')
Epoch 1/5
import skimage.io as io
import numpy as np
import glob
masked_imgs = [img for img in glob.glob("data_simple_cnn/test/mask/*")]
no_masked_imgs = [img for img in glob.glob("data_simple_cnn/test/no_mask/*")]
n_masked = len(masked_imgs)
n_no_masked = len(no_masked_imgs)
count_masked = 0
for img in masked_imgs:
img = io.imread(img)
img = np.expand_dims(img, 0)
img = np.expand_dims(img, -1)
pred = simple_cnn.predict(img)
if pred[0][0] < 0.8:
count_masked += 1
print("masked accuracy: ", float(count_masked) / n_masked)
count_no_masked = 0
for img in no_masked_imgs:
img = io.imread(img)
img = np.expand_dims(img, 0)
img = np.expand_dims(img, -1)
pred = simple_cnn.predict(img)
if pred[0][0] < 0.8:
count_no_masked += 1
print("no_masked accuracy: ", float(count_no_masked) / n_no_masked)
import keras.backend as K
from keras.objectives import *
import tensorflow as tf
def binary_crossentropy_with_logits(ground_truth, predictions):
return K.mean(K.binary_crossentropy(ground_truth,
predictions,
from_logits=True),
axis=-1)
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred):
y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
log_softmax = tf.nn.log_softmax(y_pred)
y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
unpacked = tf.unstack(y_true, axis=-1)
y_true = tf.stack(unpacked[:-1], axis=-1)
cross_entropy = -K.sum(y_true * log_softmax, axis=1)
cross_entropy_mean = K.mean(cross_entropy)
return cross_entropy_mean
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
nb_classes = K.int_shape(y_pred)[-1]
y_pred = K.reshape(y_pred, (-1, nb_classes))
y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
nb_classes + 1)
unpacked = tf.unstack(y_true, axis=-1)
legal_labels = ~tf.cast(unpacked[-1], tf.bool)
y_true = tf.stack(unpacked[:-1], axis=-1)
return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Add, Dropout, UpSampling2D
'''
simple_cnn = Sequential()
simple_cnn.add(Conv2D(16, (3, 3), input_shape=(70, 116, 1), padding='same', activation='relu', name='conv1_1'))
simple_cnn.add(Conv2D(16, (3, 3), input_shape=(70, 116, 1), padding='same', activation='relu', name='conv1_2'))
simple_cnn.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
simple_cnn.add(Conv2D(32, (3, 3), padding='same', activation='relu', name='conv2_1'))
simple_cnn.add(Conv2D(32, (3, 3), padding='same', activation='relu', name='conv2_2'))
simple_cnn.add(MaxPooling2D(pool_size=(5, 2), name='pool2'))
simple_cnn.add(Flatten(name='flatten'))
simple_cnn.add(Dense(32, activation='relu', name='fc1'))
simple_cnn.add(Dropout(0.5))
simple_cnn.add(Dense(1, activation='sigmoid', name='prediction'))
'''
def fcn_cnn():
fc_cnn = Sequential()
fc_cnn.add(Conv2D(16, (3, 3), input_shape=(70, 116, 1), padding='same', activation='relu', name='conv1_1'))
fc_cnn.add(Conv2D(16, (3, 3), input_shape=(70, 116, 1), padding='same', activation='relu', name='conv1_2'))
fc_cnn.add(MaxPooling2D(pool_size=(2, 2), name='pool1')) # (35, 58, 1)
fc_cnn.add(Conv2D(32, (3, 3), padding='same', activation='relu', name='conv2_1'))
fc_cnn.add(Conv2D(32, (3, 3), padding='same', activation='relu', name='conv2_2'))
fc_cnn.add(MaxPooling2D(pool_size=(5, 2), name='pool2')) # (7, 29, 1)
# continue to use convoluational layers instead of fully connected layers
fc_cnn.add(Conv2D(128, (7, 29), padding='same', activation='relu', name='fc3'))
fc_cnn.add(Dropout(0.5))
fc_cnn.add(Conv2D(128, (1, 1), padding='same', activation='relu', name='fc4'))
fc_cnn.add(Dropout(0.5))
fc_cnn.add(Conv2D(2, (1, 1), padding='same', name='logit_fc4')) # [7, 29, 2]
# deconv_logit_fc4 by factor [5, 2] to [35, 58, 2]
# fc_cnn.add(Conv2DTranspose(2, kernel_size=(2*5-5%2, 2*2-2%2), strides=(5, 2), padding='same', name='deconv_logit_fc4'))
# use UpSampling2D instead of Conv2DTranspose
fc_cnn.add(UpSampling2D(size=(10, 4), name='score_fr'))
# conv logit from pool1 to [35, 58, 2]
#logit_pool1 = Conv2D(2, (1, 1), padding='same', name='logit_pool1')(fc_cnn.layers[2].output)
# add deconv_logits_fc4 and logit_pool1
#logit_pool1_deconv_logit_fc4 = Add()([logit_pool1, fc_cnn.layers[-1].output])
# deconv above sum by [2, 2] to [70, 116, 2] --> pixel-wise classification: segmentation logit
#final_deconv = Conv2DTranspose(2, kernel_size=(2*2-2%2, 2*2-2%2), strides=(2, 2),
# padding='same', name='final_deconv')(logit_pool1_deconv_logit_fc4)
#return Model(fc_cnn.input, final_deconv)
return fc_cnn
fcn = fcn_cnn()
adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
fcn.compile(loss=softmax_sparse_crossentropy_ignoring_last_label,
optimizer=adam,
metrics=[sparse_accuracy_ignoring_last_label])
simple_cnn.summary()
# look up simple_cnn weights, do not need to run
layers = simple_cnn.layers
for i in range(len(layers)):
n = len(layers[i].get_weights())
print(str(i) + "-layer weight len: ", n)
if n == 2:
print(" weight matrix size: ", layers[i].get_weights()[0].shape)
print(" bias vector size: ", layers[i].get_weights()[1].shape)
print(layers[i].get_weights()[0])
fcn.summary()
from keras.utils import plot_model
plot_model(fcn, to_file='fcn.png')
# look up fcn weights, do not need to run
layers = fcn.layers
for i in range(len(layers)):
n = len(layers[i].get_weights())
print(str(i) + "-layer weight len: ", n)
if n == 2:
print(" weight matrix size: ", layers[i].get_weights()[0].shape)
print(" bias vector size: ", layers[i].get_weights()[1].shape)
print(layers[i].get_weights()[0])
simple_cnn.load_weights('simple_cnn.h5')
def set_weights(fcn, simple_cnn):
for i in range(5):
fcn.layers[i].set_weights(simple_cnn.layers[i].get_weights())
set_weights(fcn, simple_cnn)
# look up simple_cnn initialized weights, do not need to run
layers = fcn.layers
for i in range(len(layers)):
n = len(layers[i].get_weights())
print(str(i) + "-layer weight len: ", n)
if n == 2:
print(" weight matrix size: ", layers[i].get_weights()[0].shape)
print(" bias vector size: ", layers[i].get_weights()[1].shape)
print(layers[i].get_weights()[0])
# read training data
import skimage.io as io
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
batch_size = 256
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=180.,
horizontal_flip=True,
fill_mode='nearest')
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
imgs_filename = ["data_fcn_full/train/images/images/"+str(i)+".jpg" for i in range(1, 14401)]
masks_filename = ["data_fcn_full/train/masks/masks/"+str(i)+"_mask.jpg" for i in range(1, 14401)]
sample_imgs = [np.expand_dims(io.imread(img_name), -1) for img_name in imgs_filename]
sample_masks = [np.expand_dims(io.imread(mask_name), -1) for mask_name in masks_filename]
image_datagen.fit(sample_imgs, augment=True, seed=seed)
mask_datagen.fit(sample_masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data_fcn_full/train/images',
target_size=(70, 116),
batch_size=batch_size,
class_mode=None,
seed=seed,
color_mode='grayscale')
mask_generator = mask_datagen.flow_from_directory(
'data_fcn_full/train/masks',
target_size=(70, 116),
batch_size=batch_size,
class_mode=None,
seed=seed,
color_mode='grayscale')
train_generator = zip(image_generator, mask_generator)
# read validation data
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=180.,
horizontal_flip=True,
fill_mode='nearest')
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
imgs_filename = ["data_fcn_full/validation/images/images/"+str(i)+".jpg" for i in range(1, 3571)]
masks_filename = ["data_fcn_full/validation/masks/masks/"+str(i)+"_mask.jpg" for i in range(1, 3571)]
sample_imgs = [np.expand_dims(io.imread(img_name), -1) for img_name in imgs_filename]
sample_masks = [np.expand_dims(io.imread(mask_name), -1) for mask_name in masks_filename]
image_datagen.fit(sample_imgs, augment=True, seed=seed)
mask_datagen.fit(sample_masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data_fcn_full/validation/images',
target_size=(70, 116),
batch_size=batch_size,
class_mode=None,
seed=seed,
color_mode='grayscale')
mask_generator = mask_datagen.flow_from_directory(
'data_fcn_full/validation/masks',
target_size=(70, 116),
batch_size=batch_size,
class_mode=None,
seed=seed,
color_mode='grayscale')
validation_generator = zip(image_generator, mask_generator)
# time-consuming training process of fcn, expect to run for a long time
fcn.fit_generator(
train_generator,
steps_per_epoch=1500000//256,
epochs=2,
validation_data=validation_generator,
validation_steps=150000//256)
fcn.save_weights('fcn.h5')
%matplotlib inline
import skimage.io as io
import numpy as np
import matplotlib.pyplot as plt
img = io.imread('data_fcn_full/validation/images/images/68.jpg') # numpy.ndarray [70, 116]
mask = io.imread('data_fcn_full/validation/masks/masks/68_mask.jpg')
img = np.expand_dims(img, 0)
img = np.expand_dims(img, -1)
pred = fcn.predict(img) # numpy.ndarray [1, 70, 116, 2]
# print(pred[0, :10, :10, :])
img = np.squeeze(img)
pred = np.argmax(pred, 3) # numpy.ndarray [1, 70, 116]
pred = np.squeeze(pred, 0) # numpy.ndaaray [70, 116]
# print(pred[:10, :10])
plt.figure(figsize = (15, 7))
plt.subplot(1,3,1)
plt.imshow(img)
plt.subplot(1,3,2)
plt.imshow(pred)
plt.subplot(1,3,3)
plt.imshow(mask)