#!/usr/bin/env python # coding: utf-8 # # Eager Mode # In[1]: import tensorflow as tf import numpy as np # Everything is eager by default in tensorflow 2.0 # In[2]: a = np.array([1., 2.]) b = np.array([2., 5.]) tf.add(a, b) # ## Graph Model : AutoGraph # As tensorflow suggest (https://www.youtube.com/watch?v=jh4ITuOytE4), we can use Autograph to automatically convert eager function to graph function by using the tf.function decorator # # The following code first compiles inside the function into graph code before to execute it. Note that if you remove the @tf.function decorator the code will work perfectly but will not be executed in graph mode. # In[3]: @tf.function def add_fc(a, b): return tf.add(a, b) print (add_fc(a, b)) # The following code show the code generated to create the graph # In[4]: def add_fc(a, b): return tf.add(a, b) print(tf.autograph.to_code(add_fc)) # # Graph mode and eager mode with a keras model # In[5]: # Flatten model = tf.keras.models.Sequential() # Add layers model.add(tf.keras.layers.Dense(256, activation="relu")) model.add(tf.keras.layers.Dense(128, activation="relu")) model.add(tf.keras.layers.Dense(10, activation="softmax")) # By calling model.predict you get a numpy array but the output is not differentiable # In[6]: model_output = model.predict(np.zeros((1, 30))) model_output # By calling model(), eager execution is used and the output is differentiable # In[7]: model_output = model(np.zeros((1, 30))) model_output # Of course, you can execute the model in graph mode by using the @tf.function decorator # In[8]: @tf.function def predict(x): return model(x) model_output = predict(np.zeros((1, 30))) print(model_output) # In[ ]: # In[ ]: