#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf @tf.function def simple_nn_layer(x, y): return tf.nn.relu(tf.matmul(x, y)) x = tf.random.uniform((3, 3)) y = tf.random.uniform((3, 3)) simple_nn_layer(x, y) simple_nn_layer def linear_layer(x): return 2 * x + 1 @tf.function def deep_net(x): return tf.nn.relu(linear_layer(x)) deep_net(tf.constant((1, 2, 3))) import timeit conv_layer = tf.keras.layers.Conv2D(100, 3) @tf.function def conv_fn(image): return conv_layer(image) image = tf.zeros([1, 200, 200, 100]) # warm up conv_layer(image); conv_fn(image) print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10)) print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10)) print("Note how there's not much difference in performance for convolutions") lstm_cell = tf.keras.layers.LSTMCell(10) @tf.function def lstm_fn(input, state): return lstm_cell(input, state) input = tf.zeros([10, 10]) state = [tf.zeros([10, 10])] * 2 # warm up lstm_cell(input, state); lstm_fn(input, state) print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10)) print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10)) @tf.function def square_if_positive(x): if x > 0: x = x * x else: x = 0 return x print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2)))) print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2)))) @tf.function def sum_even(items): s = 0 for c in items: if c % 2 > 0: continue s += c return s sum_even(tf.constant([10, 12, 15, 20])) print(tf.autograph.to_code(sum_even.python_function)) @tf.function def fizzbuzz(n): for i in tf.range(n): if i % 3 == 0: tf.print('Fizz') elif i % 5 == 0: tf.print('Buzz') else: tf.print(i) fizzbuzz(tf.constant(15)) class CustomModel(tf.keras.models.Model): @tf.function def call(self, input_data): if tf.reduce_mean(input_data) > 0: return input_data else: return input_data // 2 model = CustomModel() model(tf.constant([-2, -4])) v = tf.Variable(5) @tf.function def find_next_odd(): v.assign(v + 1) if v % 2 == 0: v.assign(v + 1) find_next_odd() v @tf.function def f(x): if x > 0: # Try setting a breakpoint here! # Example: # import pdb # pdb.set_trace() x = x + 1 return x tf.config.experimental_run_functions_eagerly(True) # You can now set breakpoints and run the code in a debugger. f(tf.constant(1)) tf.config.experimental_run_functions_eagerly(False) def prepare_mnist_features_and_labels(x, y): x = tf.cast(x, tf.float32) / 255.0 y = tf.cast(y, tf.int64) return x, y def mnist_dataset(): (x, y), _ = tf.keras.datasets.mnist.load_data() ds = tf.data.Dataset.from_tensor_slices((x, y)) ds = ds.map(prepare_mnist_features_and_labels) ds = ds.take(20000).shuffle(20000).batch(100) return ds train_dataset = mnist_dataset() model = tf.keras.Sequential(( tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(10))) model.build() optimizer = tf.keras.optimizers.Adam() compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy() def train_one_step(model, optimizer, x, y): with tf.GradientTape() as tape: logits = model(x) loss = compute_loss(y, logits) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) compute_accuracy(y, logits) return loss @tf.function def train(model, optimizer): train_ds = mnist_dataset() step = 0 loss = 0.0 accuracy = 0.0 for x, y in train_ds: step += 1 loss = train_one_step(model, optimizer, x, y) if step % 10 == 0: tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result()) return step, loss, accuracy step, loss, accuracy = train(model, optimizer) print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result()) def square_if_positive(x): return [i ** 2 if i > 0 else i for i in x] square_if_positive(range(-5, 5)) @tf.function def square_if_positive_naive(x): result = tf.TensorArray(tf.int32, size=x.shape[0]) for i in tf.range(x.shape[0]): if x[i] > 0: result = result.write(i, x[i] ** 2) else: result = result.write(i, x[i]) return result.stack() square_if_positive_naive(tf.range(-5, 5)) def square_if_positive_vectorized(x): return tf.where(x > 0, x ** 2, x) square_if_positive_vectorized(tf.range(-5, 5)) import timeit @tf.function def f(x, y): return tf.matmul(x, y) print( "First invocation:", timeit.timeit(lambda: f(tf.ones((10, 10)), tf.ones((10, 10))), number=1)) print( "Second invocation:", timeit.timeit(lambda: f(tf.ones((10, 10)), tf.ones((10, 10))), number=1)) @tf.function def f(): print('Tracing!') tf.print('Executing') print('First invocation:') f() print('Second invocation:') f() @tf.function def f(n): print(n, 'Tracing!') tf.print(n, 'Executing') f(1) f(1) f(2) f(2) @tf.function def f(x): print(x.shape, 'Tracing!') tf.print(x, 'Executing') f(tf.constant([1])) f(tf.constant([2])) f(tf.constant([1, 2])) f(tf.constant([3, 4])) def f(): print('Tracing!') tf.print('Executing') tf.function(f)() tf.function(f)() def outer(): @tf.function def f(): print('Tracing!') tf.print('Executing') f() outer() outer()