import tensorflow as tf
import numpy as np
Everything is eager by default in tensorflow 2.0
a = np.array([1., 2.])
b = np.array([2., 5.])
tf.add(a, b)
<tf.Tensor: id=2, shape=(2,), dtype=float64, numpy=array([3., 7.])>
As tensorflow suggest (https://www.youtube.com/watch?v=jh4ITuOytE4), we can use Autograph to automatically convert eager function to graph function by using the tf.function decorator
The following code first compiles inside the function into graph code before to execute it. Note that if you remove the @tf.function decorator the code will work perfectly but will not be executed in graph mode.
@tf.function
def add_fc(a, b):
return tf.add(a, b)
print (add_fc(a, b))
tf.Tensor([3. 7.], shape=(2,), dtype=float64)
The following code show the code generated to create the graph
def add_fc(a, b):
return tf.add(a, b)
print(tf.autograph.to_code(add_fc))
def tf__add_fc(a, b): do_return = False retval_ = ag__.UndefinedReturnValue() do_return = True retval_ = ag__.converted_call('add', tf, ag__.ConversionOptions(recursive=True, force_conversion=False, optional_features=(), internal_convert_user_code=True), (a, b), None) cond = ag__.is_undefined_return(retval_) def get_state(): return () def set_state(_): pass def if_true(): retval_ = None return retval_ def if_false(): return retval_ retval_ = ag__.if_stmt(cond, if_true, if_false, get_state, set_state) return retval_
# Flatten
model = tf.keras.models.Sequential()
# Add layers
model.add(tf.keras.layers.Dense(256, activation="relu"))
model.add(tf.keras.layers.Dense(128, activation="relu"))
model.add(tf.keras.layers.Dense(10, activation="softmax"))
By calling model.predict you get a numpy array but the output is not differentiable
model_output = model.predict(np.zeros((1, 30)))
model_output
array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]], dtype=float32)
By calling model(), eager execution is used and the output is differentiable
model_output = model(np.zeros((1, 30)))
model_output
<tf.Tensor: id=161, shape=(1, 10), dtype=float32, numpy=array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]], dtype=float32)>
Of course, you can execute the model in graph mode by using the @tf.function decorator
@tf.function
def predict(x):
return model(x)
model_output = predict(np.zeros((1, 30)))
print(model_output)
tf.Tensor([[0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1]], shape=(1, 10), dtype=float32)