#!/usr/bin/env python # coding: utf-8 # # # Predicting Diabetes with Keras # ### A Very Short Introduction to Hyperparameter Optimization with Talos # The goal of a Talos experiment, is to find a set of suitable hyperparameters for Keras model. In order to do this, you need to have three things: # # - Keras model # - Talos hyperparameter dictionary # - Talos experiment configuration # # Below we will briefly overview each. # ### The Keras Model # # As a model, any Keras model will do. Let's consider as an example a very simple model that makes a prediction on the classic *Pima Indians Diabetes* dataset. A brief overview of the dataset can be found [here](https://www.kaggle.com/uciml/pima-indians-diabetes-database) and the dataset we will use can be found [here](https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv). The below model does not require you to separately download the file. # In[ ]: from numpy import loadtxt dataset = loadtxt("https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv", delimiter=",") x = dataset[:,0:8] y = dataset[:, 8] # In[ ]: from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense def diabetes(): model = Sequential() model.add(Dense(12, input_dim=8, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X, Y, epochs=100, batch_size=10, verbose=0) return model # ### Talos Hyperparameter Dictionary # Let's prepare for an experiment where we will optimize against three common attributes: # # - neurons on the first layer # - activations # - batch_sizes # In[ ]: from tensorflow.keras.activations import relu, elu p = { 'first_neuron': [12, 24, 48], 'activation': ['relu', 'elu'], 'batch_size': [10, 20, 30] } # ### Configuring the Keras Model for Talos # In order to prepare a Keras model for a Talos experiment, we need to do four things: # # - add input parameters to the function # - replace the hyperparameter inputs with references to params dictionary # - make sure model.fit() stores the history object # - modify the output of the model # # These steps are always the same. # In[ ]: # add input parameters to the function def diabetes(x_train, y_train, x_val, y_val, params): # replace the hyperparameter inputs with references to params dictionary model = Sequential() model.add(Dense(params['first_neuron'], input_dim=8, activation=params['activation'])) #model.add(Dense(8, activation=params['activation'])) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # make sure history object is returned by model.fit() out = model.fit(x=x, y=y, validation_data=[x_val, y_val], epochs=100, batch_size=params['batch_size'], verbose=0) # modify the output model return out, model # That's it, there is nothing more to it. A more complicated experiment would just entail more of the same in terms of the way the params dictionary references are made. Otherwise the changes would always be exactly the same. # ### Talos Experiment # The Talos experiment is performed through the Scan() command. In case you don't have Talos installed already, you can do that now. # In[ ]: import talos # While many configurations are possible, the only things that you absolutely must input to a Talos experiment are: # # - x # - y # - params (the dictionary 'p' we created above) # - model (the 'diabetes' we created above) # In[ ]: t = talos.Scan(x=x, y=y, params=p, model=diabetes, experiment_name='diabetes')