Diplomado en Inteligencia Artificial y Aprendizaje Profundo
Esta lección está dedicada a presentar la programación orientada a objetos aplicada a Keras, conocida genéricamente coo subclassing.
El tema es recomdado para usuarios con conocimientos en programacón y keras en nuestro caso.
Hermos el modelo de clasificación con múltiples categorias, utiilzado en la introducción de la API funcional. El ejemplo es de nueo Iris, con el propósito de comparar los ods estilos de programación.
try:
%tensorflow_version 2.x
except Exception:
pass
from __future__ import absolute_import, division, print_function, unicode_literals
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
#
from tensorflow.keras.models import Model
#
from tensorflow.keras.layers import Dense, Input, Activation
#
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.utils import plot_model
#
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
#
print(tf.__version__)
2.1.0
Este conjunto de datos fue introducido por sir Ronald Fisher
# nombres de las columnas de los datos
col_names = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
target_dimensions = ['Setosa', 'Versicolor', 'Virginica']
# lee los datos
training_data_path = tf.keras.utils.get_file("iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv")
test_data_path = tf.keras.utils.get_file("iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv")
training = pd.read_csv(training_data_path, names=col_names, header=0)
test = pd.read_csv(test_data_path, names=col_names, header=0)
La variable objetivo (target) tiene tres categorías. Usaremos la codificación one-hot.
y_train= pd.DataFrame(to_categorical(training.Species))
y_train.columns = target_dimensions
y_test = pd.DataFrame(to_categorical(test.Species))
y_test.columns = target_dimensions
training.drop(['Species'], axis=1, inplace=True)
#test.drop(['Species'], axis=1, inplace=True)
y_test_species = test.pop('Species') # extrae la columna y la coloca en y_test_species
#
#Si necesita subir al dataframe la recodificación use estas líneas
#training = training.join(labels_training )
#test = test.join(labels_test )
# crea el objeto StandardScaler
scaler = StandardScaler()
# Ajusta los parámetros del scaler
scaler.fit(training)
print (scaler.mean_)
# escala training y test
x_train = scaler.transform(training)
x_test = scaler.transform(test)
# labels ( no requieren escalación)
[5.845 3.065 3.73916667 1.19666667]
Vamos a derivar una clase de la clase Model
class CustomModel(Model):
def __init__(self, **kwargs):
super(CustomModel, self).__init__(**kwargs)
self.dense1 = Dense(5, activation='relu', )
self.dense2 = Dense(10, activation='relu')
self.dense3 = Dense(3, activation='softmax')
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.dense3(x)
model_iris = CustomModel(name='my_custom_model')
## Compila
model_iris.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model_iris.fit(x_train, y_train.values,
batch_size= 64,
epochs= 30,
validation_split=0.2)
Train on 96 samples, validate on 24 samples Epoch 1/30 96/96 [==============================] - 1s 11ms/sample - loss: 0.9798 - accuracy: 0.3229 - val_loss: 0.8623 - val_accuracy: 0.5833 Epoch 2/30 96/96 [==============================] - 0s 271us/sample - loss: 0.9531 - accuracy: 0.3958 - val_loss: 0.8450 - val_accuracy: 0.6250 Epoch 3/30 96/96 [==============================] - 0s 259us/sample - loss: 0.9355 - accuracy: 0.4688 - val_loss: 0.8324 - val_accuracy: 0.6667 Epoch 4/30 96/96 [==============================] - 0s 307us/sample - loss: 0.9198 - accuracy: 0.5521 - val_loss: 0.8203 - val_accuracy: 0.8750 Epoch 5/30 96/96 [==============================] - 0s 247us/sample - loss: 0.9067 - accuracy: 0.6771 - val_loss: 0.8102 - val_accuracy: 0.8750 Epoch 6/30 96/96 [==============================] - 0s 300us/sample - loss: 0.8946 - accuracy: 0.7188 - val_loss: 0.7992 - val_accuracy: 0.9167 Epoch 7/30 96/96 [==============================] - 0s 303us/sample - loss: 0.8828 - accuracy: 0.7604 - val_loss: 0.7906 - val_accuracy: 0.9167 Epoch 8/30 96/96 [==============================] - 0s 370us/sample - loss: 0.8712 - accuracy: 0.7812 - val_loss: 0.7811 - val_accuracy: 0.9167 Epoch 9/30 96/96 [==============================] - 0s 278us/sample - loss: 0.8600 - accuracy: 0.8021 - val_loss: 0.7713 - val_accuracy: 0.9167 Epoch 10/30 96/96 [==============================] - 0s 358us/sample - loss: 0.8490 - accuracy: 0.8021 - val_loss: 0.7634 - val_accuracy: 0.9167 Epoch 11/30 96/96 [==============================] - 0s 274us/sample - loss: 0.8384 - accuracy: 0.8021 - val_loss: 0.7544 - val_accuracy: 0.9167 Epoch 12/30 96/96 [==============================] - 0s 282us/sample - loss: 0.8280 - accuracy: 0.8021 - val_loss: 0.7453 - val_accuracy: 0.9167 Epoch 13/30 96/96 [==============================] - 0s 347us/sample - loss: 0.8177 - accuracy: 0.8125 - val_loss: 0.7367 - val_accuracy: 0.9167 Epoch 14/30 96/96 [==============================] - 0s 289us/sample - loss: 0.8076 - accuracy: 0.8125 - val_loss: 0.7280 - val_accuracy: 0.9167 Epoch 15/30 96/96 [==============================] - 0s 379us/sample - loss: 0.7975 - accuracy: 0.8125 - val_loss: 0.7199 - val_accuracy: 0.9167 Epoch 16/30 96/96 [==============================] - 0s 295us/sample - loss: 0.7873 - accuracy: 0.8125 - val_loss: 0.7123 - val_accuracy: 0.9167 Epoch 17/30 96/96 [==============================] - 0s 319us/sample - loss: 0.7773 - accuracy: 0.8125 - val_loss: 0.7041 - val_accuracy: 0.9167 Epoch 18/30 96/96 [==============================] - 0s 331us/sample - loss: 0.7673 - accuracy: 0.8125 - val_loss: 0.6959 - val_accuracy: 0.9167 Epoch 19/30 96/96 [==============================] - 0s 341us/sample - loss: 0.7575 - accuracy: 0.8125 - val_loss: 0.6886 - val_accuracy: 0.9167 Epoch 20/30 96/96 [==============================] - 0s 450us/sample - loss: 0.7479 - accuracy: 0.8125 - val_loss: 0.6795 - val_accuracy: 0.9167 Epoch 21/30 96/96 [==============================] - 0s 297us/sample - loss: 0.7387 - accuracy: 0.8125 - val_loss: 0.6731 - val_accuracy: 0.9167 Epoch 22/30 96/96 [==============================] - 0s 355us/sample - loss: 0.7290 - accuracy: 0.8125 - val_loss: 0.6652 - val_accuracy: 0.9167 Epoch 23/30 96/96 [==============================] - 0s 265us/sample - loss: 0.7200 - accuracy: 0.8125 - val_loss: 0.6576 - val_accuracy: 0.9167 Epoch 24/30 96/96 [==============================] - 0s 317us/sample - loss: 0.7111 - accuracy: 0.8125 - val_loss: 0.6506 - val_accuracy: 0.9167 Epoch 25/30 96/96 [==============================] - 0s 252us/sample - loss: 0.7020 - accuracy: 0.8125 - val_loss: 0.6432 - val_accuracy: 0.9167 Epoch 26/30 96/96 [==============================] - 0s 361us/sample - loss: 0.6933 - accuracy: 0.8125 - val_loss: 0.6347 - val_accuracy: 0.9167 Epoch 27/30 96/96 [==============================] - 0s 395us/sample - loss: 0.6845 - accuracy: 0.8125 - val_loss: 0.6273 - val_accuracy: 0.9167 Epoch 28/30 96/96 [==============================] - 0s 285us/sample - loss: 0.6761 - accuracy: 0.8125 - val_loss: 0.6204 - val_accuracy: 0.9167 Epoch 29/30 96/96 [==============================] - 0s 352us/sample - loss: 0.6675 - accuracy: 0.8125 - val_loss: 0.6132 - val_accuracy: 0.9167 Epoch 30/30 96/96 [==============================] - 0s 264us/sample - loss: 0.6592 - accuracy: 0.8125 - val_loss: 0.6065 - val_accuracy: 0.9167
model_iris.summary()
#plot_model(model_iris, to_file='../Imagenes/iris_model.png',
# show_shapes=True)
Model: "my_custom_model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) multiple 25 _________________________________________________________________ dense_1 (Dense) multiple 60 _________________________________________________________________ dense_2 (Dense) multiple 33 ================================================================= Total params: 118 Trainable params: 118 Non-trainable params: 0 _________________________________________________________________
def plot_metric(history, metric):
train_metrics = history.history[metric]
val_metrics = history.history['val_'+metric]
epochs = range(1, len(train_metrics) + 1)
plt.plot(epochs, train_metrics, 'bo--')
plt.plot(epochs, val_metrics, 'ro-')
plt.title('Entrenamiento y validación '+ metric)
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend(["train_"+metric, 'val_'+metric])
plt.show()
plot_metric(history, 'loss')
plot_metric(history, 'accuracy')
model_iris.evaluate(x = x_test,y = y_test.values)
30/30 [==============================] - 0s 165us/sample - loss: 0.7790 - accuracy: 0.7000
[0.7789719104766846, 0.7]
# Predicting the Test set results
y_pred = model_iris.predict(x_test)
y_pred_c = np.argmax(y_pred, axis=1)
cm = confusion_matrix(y_test_species, y_pred_c)
print("Our accuracy is {}%".format(((cm[0][0] + cm[1][1]+ cm[2][2])/y_test_species.shape[0])*100))
sns.heatmap(cm,annot=True)
plt.savefig('h.png')