#!/usr/bin/env python # coding: utf-8 # **Chapter 13 – Loading and Preprocessing Data with TensorFlow** # _This notebook contains all the sample code and solutions to the exercises in chapter 13._ # # # #
# Open In Colab # # #
# # Setup # This project requires Python 3.7 or above: # In[1]: import sys assert sys.version_info >= (3, 7) # It also requires Scikit-Learn ≥ 1.0.1: # In[2]: from packaging import version import sklearn assert version.parse(sklearn.__version__) >= version.parse("1.0.1") # And TensorFlow ≥ 2.8: # In[3]: import tensorflow as tf assert version.parse(tf.__version__) >= version.parse("2.8.0") # # The tf.data API # In[4]: import tensorflow as tf X = tf.range(10) # any data tensor dataset = tf.data.Dataset.from_tensor_slices(X) dataset # In[5]: for item in dataset: print(item) # In[6]: X_nested = {"a": ([1, 2, 3], [4, 5, 6]), "b": [7, 8, 9]} dataset = tf.data.Dataset.from_tensor_slices(X_nested) for item in dataset: print(item) # ## Chaining Transformations # In[7]: dataset = tf.data.Dataset.from_tensor_slices(tf.range(10)) dataset = dataset.repeat(3).batch(7) for item in dataset: print(item) # In[8]: dataset = dataset.map(lambda x: x * 2) # x is a batch for item in dataset: print(item) # In[9]: dataset = dataset.filter(lambda x: tf.reduce_sum(x) > 50) for item in dataset: print(item) # In[10]: for item in dataset.take(2): print(item) # ## Shuffling the Data # In[11]: dataset = tf.data.Dataset.range(10).repeat(2) dataset = dataset.shuffle(buffer_size=4, seed=42).batch(7) for item in dataset: print(item) # ### Interleaving lines from multiple files # Let's start by loading and preparing the California housing dataset. We first load it, then split it into a training set, a validation set and a test set: # In[12]: # extra code – fetches, splits and normalizes the California housing dataset from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split( housing.data, housing.target.reshape(-1, 1), random_state=42) X_train, X_valid, y_train, y_valid = train_test_split( X_train_full, y_train_full, random_state=42) # For a very large dataset that does not fit in memory, you will typically want to split it into many files first, then have TensorFlow read these files in parallel. To demonstrate this, let's start by splitting the housing dataset and saving it to 20 CSV files: # In[13]: # extra code – split the dataset into 20 parts and save it to CSV files import numpy as np from pathlib import Path def save_to_csv_files(data, name_prefix, header=None, n_parts=10): housing_dir = Path() / "datasets" / "housing" housing_dir.mkdir(parents=True, exist_ok=True) filename_format = "my_{}_{:02d}.csv" filepaths = [] m = len(data) chunks = np.array_split(np.arange(m), n_parts) for file_idx, row_indices in enumerate(chunks): part_csv = housing_dir / filename_format.format(name_prefix, file_idx) filepaths.append(str(part_csv)) with open(part_csv, "w") as f: if header is not None: f.write(header) f.write("\n") for row_idx in row_indices: f.write(",".join([repr(col) for col in data[row_idx]])) f.write("\n") return filepaths train_data = np.c_[X_train, y_train] valid_data = np.c_[X_valid, y_valid] test_data = np.c_[X_test, y_test] header_cols = housing.feature_names + ["MedianHouseValue"] header = ",".join(header_cols) train_filepaths = save_to_csv_files(train_data, "train", header, n_parts=20) valid_filepaths = save_to_csv_files(valid_data, "valid", header, n_parts=10) test_filepaths = save_to_csv_files(test_data, "test", header, n_parts=10) # Okay, now let's take a peek at the first few lines of one of these CSV files: # In[14]: print("".join(open(train_filepaths[0]).readlines()[:4])) # In[15]: train_filepaths # **Building an Input Pipeline** # In[16]: filepath_dataset = tf.data.Dataset.list_files(train_filepaths, seed=42) # In[17]: # extra code – shows that the file paths are shuffled for filepath in filepath_dataset: print(filepath) # In[18]: n_readers = 5 dataset = filepath_dataset.interleave( lambda filepath: tf.data.TextLineDataset(filepath).skip(1), cycle_length=n_readers) # In[19]: for line in dataset.take(5): print(line) # ## Preprocessing the Data # In[20]: # extra code – compute the mean and standard deviation of each feature from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) # In[21]: X_mean, X_std = scaler.mean_, scaler.scale_ # extra code n_inputs = 8 def parse_csv_line(line): defs = [0.] * n_inputs + [tf.constant([], dtype=tf.float32)] fields = tf.io.decode_csv(line, record_defaults=defs) return tf.stack(fields[:-1]), tf.stack(fields[-1:]) def preprocess(line): x, y = parse_csv_line(line) return (x - X_mean) / X_std, y # In[22]: preprocess(b'4.2083,44.0,5.3232,0.9171,846.0,2.3370,37.47,-122.2,2.782') # ## Putting Everything Together + Prefetching # In[23]: def csv_reader_dataset(filepaths, n_readers=5, n_read_threads=None, n_parse_threads=5, shuffle_buffer_size=10_000, seed=42, batch_size=32): dataset = tf.data.Dataset.list_files(filepaths, seed=seed) dataset = dataset.interleave( lambda filepath: tf.data.TextLineDataset(filepath).skip(1), cycle_length=n_readers, num_parallel_calls=n_read_threads) dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads) dataset = dataset.shuffle(shuffle_buffer_size, seed=seed) return dataset.batch(batch_size).prefetch(1) # In[24]: # extra code – show the first couple of batches produced by the dataset example_set = csv_reader_dataset(train_filepaths, batch_size=3) for X_batch, y_batch in example_set.take(2): print("X =", X_batch) print("y =", y_batch) print() # Here is a short description of each method in the `Dataset` class: # In[25]: # extra code – list all methods of the tf.data.Dataset class for m in dir(tf.data.Dataset): if not (m.startswith("_") or m.endswith("_")): func = getattr(tf.data.Dataset, m) if hasattr(func, "__doc__"): print("● {:21s}{}".format(m + "()", func.__doc__.split("\n")[0])) # ## Using the Dataset with Keras # In[26]: train_set = csv_reader_dataset(train_filepaths) valid_set = csv_reader_dataset(valid_filepaths) test_set = csv_reader_dataset(test_filepaths) # In[27]: # extra code – for reproducibility tf.keras.backend.clear_session() tf.random.set_seed(42) # In[28]: model = tf.keras.Sequential([ tf.keras.layers.Dense(30, activation="relu", kernel_initializer="he_normal", input_shape=X_train.shape[1:]), tf.keras.layers.Dense(1), ]) model.compile(loss="mse", optimizer="sgd") model.fit(train_set, validation_data=valid_set, epochs=5) # In[29]: test_mse = model.evaluate(test_set) new_set = test_set.take(3) # pretend we have 3 new samples y_pred = model.predict(new_set) # or you could just pass a NumPy array # In[30]: # extra code – defines the optimizer and loss function for training optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) loss_fn = tf.keras.losses.mean_squared_error n_epochs = 5 for epoch in range(n_epochs): for X_batch, y_batch in train_set: # extra code – perform one Gradient Descent step # as explained in Chapter 12 print("\rEpoch {}/{}".format(epoch + 1, n_epochs), end="") with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) # In[31]: @tf.function def train_one_epoch(model, optimizer, loss_fn, train_set): for X_batch, y_batch in train_set: with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) loss_fn = tf.keras.losses.mean_squared_error for epoch in range(n_epochs): print("\rEpoch {}/{}".format(epoch + 1, n_epochs), end="") train_one_epoch(model, optimizer, loss_fn, train_set) # # The TFRecord Format # A TFRecord file is just a list of binary records. You can create one using a `tf.io.TFRecordWriter`: # In[32]: with tf.io.TFRecordWriter("my_data.tfrecord") as f: f.write(b"This is the first record") f.write(b"And this is the second record") # And you can read it using a `tf.data.TFRecordDataset`: # In[33]: filepaths = ["my_data.tfrecord"] dataset = tf.data.TFRecordDataset(filepaths) for item in dataset: print(item) # You can read multiple TFRecord files with just one `TFRecordDataset`. By default it will read them one at a time, but if you set `num_parallel_reads=3`, it will read 3 at a time in parallel and interleave their records: # In[34]: # extra code – shows how to read multiple files in parallel and interleave them filepaths = ["my_test_{}.tfrecord".format(i) for i in range(5)] for i, filepath in enumerate(filepaths): with tf.io.TFRecordWriter(filepath) as f: for j in range(3): f.write("File {} record {}".format(i, j).encode("utf-8")) dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=3) for item in dataset: print(item) # ## Compressed TFRecord Files # In[35]: options = tf.io.TFRecordOptions(compression_type="GZIP") with tf.io.TFRecordWriter("my_compressed.tfrecord", options) as f: f.write(b"Compress, compress, compress!") # In[36]: dataset = tf.data.TFRecordDataset(["my_compressed.tfrecord"], compression_type="GZIP") # In[37]: # extra code – shows that the data is decompressed correctly for item in dataset: print(item) # ## A Brief Introduction to Protocol Buffers # For this section you need to [install protobuf](https://developers.google.com/protocol-buffers/docs/downloads). In general you will not have to do so when using TensorFlow, as it comes with functions to create and parse protocol buffers of type `tf.train.Example`, which are generally sufficient. However, in this section we will learn about protocol buffers by creating our own simple protobuf definition, so we need the protobuf compiler (`protoc`): we will use it to compile the protobuf definition to a Python module that we can then use in our code. # First let's write a simple protobuf definition: # In[38]: get_ipython().run_cell_magic('writefile', 'person.proto', 'syntax = "proto3";\nmessage Person {\n string name = 1;\n int32 id = 2;\n repeated string email = 3;\n}\n') # And let's compile it (the `--descriptor_set_out` and `--include_imports` options are only required for the `tf.io.decode_proto()` example below): # In[39]: get_ipython().system('protoc person.proto --python_out=. --descriptor_set_out=person.desc --include_imports') # In[40]: get_ipython().run_line_magic('ls', 'person*') # In[41]: from person_pb2 import Person # import the generated access class person = Person(name="Al", id=123, email=["a@b.com"]) # create a Person print(person) # display the Person # In[42]: person.name # read a field # In[43]: person.name = "Alice" # modify a field # In[44]: person.email[0] # repeated fields can be accessed like arrays # In[45]: person.email.append("c@d.com") # add an email address # In[46]: serialized = person.SerializeToString() # serialize person to a byte string serialized # In[47]: person2 = Person() # create a new Person person2.ParseFromString(serialized) # parse the byte string (27 bytes long) # In[48]: person == person2 # now they are equal # ### Custom protobuf # In rare cases, you may want to parse a custom protobuf (like the one we just created) in TensorFlow. For this you can use the `tf.io.decode_proto()` function: # In[49]: # extra code – shows how to use the tf.io.decode_proto() function person_tf = tf.io.decode_proto( bytes=serialized, message_type="Person", field_names=["name", "id", "email"], output_types=[tf.string, tf.int32, tf.string], descriptor_source="person.desc") person_tf.values # For more details, see the [`tf.io.decode_proto()`](https://www.tensorflow.org/api_docs/python/tf/io/decode_proto) documentation. # ## TensorFlow Protobufs # Here is the definition of the tf.train.Example protobuf: # ```proto # syntax = "proto3"; # # message BytesList { repeated bytes value = 1; } # message FloatList { repeated float value = 1 [packed = true]; } # message Int64List { repeated int64 value = 1 [packed = true]; } # message Feature { # oneof kind { # BytesList bytes_list = 1; # FloatList float_list = 2; # Int64List int64_list = 3; # } # }; # message Features { map feature = 1; }; # message Example { Features features = 1; }; # ``` # In[50]: from tensorflow.train import BytesList, FloatList, Int64List from tensorflow.train import Feature, Features, Example person_example = Example( features=Features( feature={ "name": Feature(bytes_list=BytesList(value=[b"Alice"])), "id": Feature(int64_list=Int64List(value=[123])), "emails": Feature(bytes_list=BytesList(value=[b"a@b.com", b"c@d.com"])) })) # In[51]: with tf.io.TFRecordWriter("my_contacts.tfrecord") as f: for _ in range(5): f.write(person_example.SerializeToString()) # ## Loading and Parsing Examples # In[52]: feature_description = { "name": tf.io.FixedLenFeature([], tf.string, default_value=""), "id": tf.io.FixedLenFeature([], tf.int64, default_value=0), "emails": tf.io.VarLenFeature(tf.string), } def parse(serialized_example): return tf.io.parse_single_example(serialized_example, feature_description) dataset = tf.data.TFRecordDataset(["my_contacts.tfrecord"]).map(parse) for parsed_example in dataset: print(parsed_example) # In[53]: tf.sparse.to_dense(parsed_example["emails"], default_value=b"") # In[54]: parsed_example["emails"].values # In[55]: def parse(serialized_examples): return tf.io.parse_example(serialized_examples, feature_description) dataset = tf.data.TFRecordDataset(["my_contacts.tfrecord"]).batch(2).map(parse) for parsed_examples in dataset: print(parsed_examples) # two examples at a time # In[56]: parsed_examples # ## Extra Material – Storing Images and Tensors in TFRecords # Let's load and display an example image: # In[57]: import matplotlib.pyplot as plt from sklearn.datasets import load_sample_images img = load_sample_images()["images"][0] plt.imshow(img) plt.axis("off") plt.title("Original Image") plt.show() # Now let's create an `Example` protobuf containing the image encoded as JPEG: # In[58]: data = tf.io.encode_jpeg(img) example_with_image = Example(features=Features(feature={ "image": Feature(bytes_list=BytesList(value=[data.numpy()]))})) serialized_example = example_with_image.SerializeToString() with tf.io.TFRecordWriter("my_image.tfrecord") as f: f.write(serialized_example) # Finally, let's create a tf.data pipeline that will read this TFRecord file, parse each `Example` protobuf (in this case just one), and parse and display the image that the example contains: # In[59]: feature_description = { "image": tf.io.VarLenFeature(tf.string) } def parse(serialized_example): example_with_image = tf.io.parse_single_example(serialized_example, feature_description) return tf.io.decode_jpeg(example_with_image["image"].values[0]) # or you can use tf.io.decode_image() instead dataset = tf.data.TFRecordDataset("my_image.tfrecord").map(parse) for image in dataset: plt.imshow(image) plt.axis("off") plt.show() # Or use `decode_image()` which supports BMP, GIF, JPEG and PNG formats: # Tensors can be serialized and parsed easily using `tf.io.serialize_tensor()` and `tf.io.parse_tensor()`: # In[60]: tensor = tf.constant([[0., 1.], [2., 3.], [4., 5.]]) serialized = tf.io.serialize_tensor(tensor) serialized # In[61]: tf.io.parse_tensor(serialized, out_type=tf.float32) # In[62]: sparse_tensor = parsed_example["emails"] serialized_sparse = tf.io.serialize_sparse(sparse_tensor) serialized_sparse # In[63]: BytesList(value=serialized_sparse.numpy()) # ## Handling Lists of Lists Using the `SequenceExample` Protobuf # ```proto # syntax = "proto3"; # # message FeatureList { repeated Feature feature = 1; }; # message FeatureLists { map feature_list = 1; }; # message SequenceExample { # Features context = 1; # FeatureLists feature_lists = 2; # }; # ``` # In[64]: from tensorflow.train import FeatureList, FeatureLists, SequenceExample context = Features(feature={ "author_id": Feature(int64_list=Int64List(value=[123])), "title": Feature(bytes_list=BytesList(value=[b"A", b"desert", b"place", b"."])), "pub_date": Feature(int64_list=Int64List(value=[1623, 12, 25])) }) content = [["When", "shall", "we", "three", "meet", "again", "?"], ["In", "thunder", ",", "lightning", ",", "or", "in", "rain", "?"]] comments = [["When", "the", "hurlyburly", "'s", "done", "."], ["When", "the", "battle", "'s", "lost", "and", "won", "."]] def words_to_feature(words): return Feature(bytes_list=BytesList(value=[word.encode("utf-8") for word in words])) content_features = [words_to_feature(sentence) for sentence in content] comments_features = [words_to_feature(comment) for comment in comments] sequence_example = SequenceExample( context=context, feature_lists=FeatureLists(feature_list={ "content": FeatureList(feature=content_features), "comments": FeatureList(feature=comments_features) })) # In[65]: sequence_example # In[66]: serialized_sequence_example = sequence_example.SerializeToString() # In[67]: context_feature_descriptions = { "author_id": tf.io.FixedLenFeature([], tf.int64, default_value=0), "title": tf.io.VarLenFeature(tf.string), "pub_date": tf.io.FixedLenFeature([3], tf.int64, default_value=[0, 0, 0]), } sequence_feature_descriptions = { "content": tf.io.VarLenFeature(tf.string), "comments": tf.io.VarLenFeature(tf.string), } # In[68]: parsed_context, parsed_feature_lists = tf.io.parse_single_sequence_example( serialized_sequence_example, context_feature_descriptions, sequence_feature_descriptions) parsed_content = tf.RaggedTensor.from_sparse(parsed_feature_lists["content"]) # In[69]: parsed_context # In[70]: parsed_context["title"].values # In[71]: parsed_feature_lists # In[72]: print(tf.RaggedTensor.from_sparse(parsed_feature_lists["content"])) # # Keras Preprocessing Layers # ## The `Normalization` Layer # In[73]: tf.random.set_seed(42) # extra code – ensures reproducibility norm_layer = tf.keras.layers.Normalization() model = tf.keras.models.Sequential([ norm_layer, tf.keras.layers.Dense(1) ]) model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(learning_rate=2e-3)) norm_layer.adapt(X_train) # computes the mean and variance of every feature model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=5) # In[74]: norm_layer = tf.keras.layers.Normalization() norm_layer.adapt(X_train) X_train_scaled = norm_layer(X_train) X_valid_scaled = norm_layer(X_valid) # In[75]: tf.random.set_seed(42) # extra code – ensures reproducibility model = tf.keras.models.Sequential([tf.keras.layers.Dense(1)]) model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(learning_rate=2e-3)) model.fit(X_train_scaled, y_train, epochs=5, validation_data=(X_valid_scaled, y_valid)) # In[76]: final_model = tf.keras.Sequential([norm_layer, model]) X_new = X_test[:3] # pretend we have a few new instances (unscaled) y_pred = final_model(X_new) # preprocesses the data and makes predictions # In[77]: y_pred # In[78]: # extra code – creates a dataset to demo applying the norm_layer using map() dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).batch(5) # In[79]: dataset = dataset.map(lambda X, y: (norm_layer(X), y)) # In[80]: list(dataset.take(1)) # extra code – shows the first batch # In[81]: class MyNormalization(tf.keras.layers.Layer): def adapt(self, X): self.mean_ = np.mean(X, axis=0, keepdims=True) self.std_ = np.std(X, axis=0, keepdims=True) def call(self, inputs): eps = tf.keras.backend.epsilon() # a small smoothing term return (inputs - self.mean_) / (self.std_ + eps) # In[82]: my_norm_layer = MyNormalization() my_norm_layer.adapt(X_train) X_train_scaled = my_norm_layer(X_train) # ## The `Discretization` Layer # In[83]: age = tf.constant([[10.], [93.], [57.], [18.], [37.], [5.]]) discretize_layer = tf.keras.layers.Discretization(bin_boundaries=[18., 50.]) age_categories = discretize_layer(age) age_categories # In[84]: discretize_layer = tf.keras.layers.Discretization(num_bins=3) discretize_layer.adapt(age) age_categories = discretize_layer(age) age_categories # ## The `CategoryEncoding` Layer # In[85]: onehot_layer = tf.keras.layers.CategoryEncoding(num_tokens=3) onehot_layer(age_categories) # In[86]: two_age_categories = np.array([[1, 0], [2, 2], [2, 0]]) onehot_layer(two_age_categories) # In[87]: onehot_layer = tf.keras.layers.CategoryEncoding(num_tokens=3, output_mode="count") onehot_layer(two_age_categories) # In[88]: onehot_layer = tf.keras.layers.CategoryEncoding(num_tokens=3 + 3) onehot_layer(two_age_categories + [0, 3]) # adds 3 to the second feature # In[89]: # extra code – shows another way to one-hot encode each feature separately onehot_layer = tf.keras.layers.CategoryEncoding(num_tokens=3, output_mode="one_hot") tf.keras.layers.concatenate([onehot_layer(cat) for cat in tf.transpose(two_age_categories)]) # In[90]: # extra code – shows another way to do this, using tf.one_hot() and Flatten tf.keras.layers.Flatten()(tf.one_hot(two_age_categories, depth=3)) # ## The `StringLookup` Layer # In[91]: cities = ["Auckland", "Paris", "Paris", "San Francisco"] str_lookup_layer = tf.keras.layers.StringLookup() str_lookup_layer.adapt(cities) str_lookup_layer([["Paris"], ["Auckland"], ["Auckland"], ["Montreal"]]) # In[92]: str_lookup_layer = tf.keras.layers.StringLookup(num_oov_indices=5) str_lookup_layer.adapt(cities) str_lookup_layer([["Paris"], ["Auckland"], ["Foo"], ["Bar"], ["Baz"]]) # In[93]: str_lookup_layer = tf.keras.layers.StringLookup(output_mode="one_hot") str_lookup_layer.adapt(cities) str_lookup_layer([["Paris"], ["Auckland"], ["Auckland"], ["Montreal"]]) # In[94]: # extra code – an example using the IntegerLookup layer ids = [123, 456, 789] int_lookup_layer = tf.keras.layers.IntegerLookup() int_lookup_layer.adapt(ids) int_lookup_layer([[123], [456], [123], [111]]) # ## The `Hashing` Layer # In[95]: hashing_layer = tf.keras.layers.Hashing(num_bins=10) hashing_layer([["Paris"], ["Tokyo"], ["Auckland"], ["Montreal"]]) # ## Encoding Categorical Features Using Embeddings # In[96]: tf.random.set_seed(42) embedding_layer = tf.keras.layers.Embedding(input_dim=5, output_dim=2) embedding_layer(np.array([2, 4, 2])) # **Warning**: there's a bug in Keras 2.8.0 ([issue #16101](https://github.com/keras-team/keras/issues/16101)) which prevents using a `StringLookup` layer as the first layer of a `Sequential` model. Luckily, there's a simple workaround: just add an `InputLayer` as the first layer. # In[97]: tf.random.set_seed(42) ocean_prox = ["<1H OCEAN", "INLAND", "NEAR OCEAN", "NEAR BAY", "ISLAND"] str_lookup_layer = tf.keras.layers.StringLookup() str_lookup_layer.adapt(ocean_prox) lookup_and_embed = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=[], dtype=tf.string), # WORKAROUND str_lookup_layer, tf.keras.layers.Embedding(input_dim=str_lookup_layer.vocabulary_size(), output_dim=2) ]) lookup_and_embed(np.array(["<1H OCEAN", "ISLAND", "<1H OCEAN"])) # In[98]: # extra code – set seeds and generates fake random data # (feel free to load the real dataset if you prefer) tf.random.set_seed(42) np.random.seed(42) X_train_num = np.random.rand(10_000, 8) X_train_cat = np.random.choice(ocean_prox, size=10_000) y_train = np.random.rand(10_000, 1) X_valid_num = np.random.rand(2_000, 8) X_valid_cat = np.random.choice(ocean_prox, size=2_000) y_valid = np.random.rand(2_000, 1) num_input = tf.keras.layers.Input(shape=[8], name="num") cat_input = tf.keras.layers.Input(shape=[], dtype=tf.string, name="cat") cat_embeddings = lookup_and_embed(cat_input) encoded_inputs = tf.keras.layers.concatenate([num_input, cat_embeddings]) outputs = tf.keras.layers.Dense(1)(encoded_inputs) model = tf.keras.models.Model(inputs=[num_input, cat_input], outputs=[outputs]) model.compile(loss="mse", optimizer="sgd") history = model.fit((X_train_num, X_train_cat), y_train, epochs=5, validation_data=((X_valid_num, X_valid_cat), y_valid)) # In[99]: # extra code – shows that the model can also be trained using a tf.data.Dataset train_set = tf.data.Dataset.from_tensor_slices( ((X_train_num, X_train_cat), y_train)).batch(32) valid_set = tf.data.Dataset.from_tensor_slices( ((X_valid_num, X_valid_cat), y_valid)).batch(32) history = model.fit(train_set, epochs=5, validation_data=valid_set) # In[100]: # extra code – shows that the dataset can contain dictionaries train_set = tf.data.Dataset.from_tensor_slices( ({"num": X_train_num, "cat": X_train_cat}, y_train)).batch(32) valid_set = tf.data.Dataset.from_tensor_slices( ({"num": X_valid_num, "cat": X_valid_cat}, y_valid)).batch(32) history = model.fit(train_set, epochs=5, validation_data=valid_set) # ## Text Preprocessing # In[101]: train_data = ["To be", "!(to be)", "That's the question", "Be, be, be."] text_vec_layer = tf.keras.layers.TextVectorization() text_vec_layer.adapt(train_data) text_vec_layer(["Be good!", "Question: be or be?"]) # In[102]: text_vec_layer = tf.keras.layers.TextVectorization(ragged=True) text_vec_layer.adapt(train_data) text_vec_layer(["Be good!", "Question: be or be?"]) # In[103]: text_vec_layer = tf.keras.layers.TextVectorization(output_mode="tf_idf") text_vec_layer.adapt(train_data) text_vec_layer(["Be good!", "Question: be or be?"]) # In[104]: 2 * np.log(1 + 4 / (1 + 3)) # In[105]: 1 * np.log(1 + 4 / (1 + 1)) # # Using Pretrained Language Model Components # In[106]: import tensorflow_hub as hub hub_layer = hub.KerasLayer("https://tfhub.dev/google/nnlm-en-dim50/2") sentence_embeddings = hub_layer(tf.constant(["To be", "Not to be"])) sentence_embeddings.numpy().round(2) # ## Image Preprocessing Layers # In[107]: from sklearn.datasets import load_sample_images images = load_sample_images()["images"] crop_image_layer = tf.keras.layers.CenterCrop(height=100, width=100) cropped_images = crop_image_layer(images) # In[108]: plt.imshow(images[0]) plt.axis("off") plt.show() # In[109]: plt.imshow(cropped_images[0] / 255) plt.axis("off") plt.show() # # TensorFlow Datasets # In[110]: import tensorflow_datasets as tfds datasets = tfds.load(name="mnist") mnist_train, mnist_test = datasets["train"], datasets["test"] # In[111]: for batch in mnist_train.shuffle(10_000, seed=42).batch(32).prefetch(1): images = batch["image"] labels = batch["label"] # [...] do something with the images and labels # In[112]: mnist_train = mnist_train.shuffle(10_000, seed=42).batch(32) mnist_train = mnist_train.map(lambda items: (items["image"], items["label"])) mnist_train = mnist_train.prefetch(1) # In[113]: train_set, valid_set, test_set = tfds.load( name="mnist", split=["train[:90%]", "train[90%:]", "test"], as_supervised=True ) train_set = train_set.shuffle(10_000, seed=42).batch(32).prefetch(1) valid_set = valid_set.batch(32).cache() test_set = test_set.batch(32).cache() tf.random.set_seed(42) model = tf.keras.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"]) history = model.fit(train_set, validation_data=valid_set, epochs=5) test_loss, test_accuracy = model.evaluate(test_set) # # Exercises # # ## 1. to 8. # 1. Ingesting a large dataset and preprocessing it efficiently can be a complex engineering challenge. The Data API makes it fairly simple. It offers many features, including loading data from various sources (such as text or binary files), reading data in parallel from multiple sources, transforming it, interleaving the records, shuffling the data, batching it, and prefetching it. # 2. Splitting a large dataset into multiple files makes it possible to shuffle it at a coarse level before shuffling it at a finer level using a shuffling buffer. It also makes it possible to handle huge datasets that do not fit on a single machine. It's also simpler to manipulate thousands of small files rather than one huge file; for example, it's easier to split the data into multiple subsets. Lastly, if the data is split across multiple files spread across multiple servers, it is possible to download several files from different servers simultaneously, which improves the bandwidth usage. # 3. You can use TensorBoard to visualize profiling data: if the GPU is not fully utilized then your input pipeline is likely to be the bottleneck. You can fix it by making sure it reads and preprocesses the data in multiple threads in parallel, and ensuring it prefetches a few batches. If this is insufficient to get your GPU to 100% usage during training, make sure your preprocessing code is optimized. You can also try saving the dataset into multiple TFRecord files, and if necessary perform some of the preprocessing ahead of time so that it does not need to be done on the fly during training (TF Transform can help with this). If necessary, use a machine with more CPU and RAM, and ensure that the GPU bandwidth is large enough. # 4. A TFRecord file is composed of a sequence of arbitrary binary records: you can store absolutely any binary data you want in each record. However, in practice most TFRecord files contain sequences of serialized protocol buffers. This makes it possible to benefit from the advantages of protocol buffers, such as the fact that they can be read easily across multiple platforms and languages and their definition can be updated later in a backward-compatible way. # 5. The `Example` protobuf format has the advantage that TensorFlow provides some operations to parse it (the `tf.io.parse`*`example()` functions) without you having to define your own format. It is sufficiently flexible to represent instances in most datasets. However, if it does not cover your use case, you can define your own protocol buffer, compile it using `protoc` (setting the `--descriptor_set_out` and `--include_imports` arguments to export the protobuf descriptor), and use the `tf.io.decode_proto()` function to parse the serialized protobufs (see the "Custom protobuf" section of the notebook for an example). It's more complicated, and it requires deploying the descriptor along with the model, but it can be done. # 6. When using TFRecords, you will generally want to activate compression if the TFRecord files will need to be downloaded by the training script, as compression will make files smaller and thus reduce download time. But if the files are located on the same machine as the training script, it's usually preferable to leave compression off, to avoid wasting CPU for decompression. # 7. Let's look at the pros and cons of each preprocessing option: # * If you preprocess the data when creating the data files, the training script will run faster, since it will not have to perform preprocessing on the fly. In some cases, the preprocessed data will also be much smaller than the original data, so you can save some space and speed up downloads. It may also be helpful to materialize the preprocessed data, for example to inspect it or archive it. However, this approach has a few cons. First, it's not easy to experiment with various preprocessing logics if you need to generate a preprocessed dataset for each variant. Second, if you want to perform data augmentation, you have to materialize many variants of your dataset, which will use a large amount of disk space and take a lot of time to generate. Lastly, the trained model will expect preprocessed data, so you will have to add preprocessing code in your application before it calls the model. There's a risk of code duplication and preprocessing mismatch in this case. # * If the data is preprocessed with the tf.data pipeline, it's much easier to tweak the preprocessing logic and apply data augmentation. Also, tf.data makes it easy to build highly efficient preprocessing pipelines (e.g., with multithreading and prefetching). However, preprocessing the data this way will slow down training. Moreover, each training instance will be preprocessed once per epoch rather than just once if the data was preprocessed when creating the data files. Well, unless the dataset fits in RAM and you can cache it using the dataset's `cache()` method. Lastly, the trained model will still expect preprocessed data. But if you use preprocessing layers in your tf.data pipeline to handle the preprocessing step, then you can just reuse these layers in your final model (adding them after training), to avoid code duplication and preprocessing mismatch. # * If you add preprocessing layers to your model, you will only have to write the preprocessing code once for both training and inference. If your model needs to be deployed to many different platforms, you will not need to write the preprocessing code multiple times. Plus, you will not run the risk of using the wrong preprocessing logic for your model, since it will be part of the model. On the downside, preprocessing the data on the fly during training will slow things down, and each instance will be preprocessed once per epoch. # 8. Let's look at how to encode categorical text features and text: # * To encode a categorical feature that has a natural order, such as a movie rating (e.g., "bad," "average," "good"), the simplest option is to use ordinal encoding: sort the categories in their natural order and map each category to its rank (e.g., "bad" maps to 0, "average" maps to 1, and "good" maps to 2). However, most categorical features don't have such a natural order. For example, there's no natural order for professions or countries. In this case, you can use one-hot encoding, or embeddings if there are many categories. With Keras, the `StringLookup` layer can be used for ordinal encoding (using the default `output_mode="int"`), or one-hot encoding (using `output_mode="one_hot"`). It can also perform multi-hot encoding (using `output_mode="multi_hot"`) if you want to encode multiple categorical text features together, assuming they share the same categories and it doesn't matter which feature contributed which category. For trainable embeddings, you must first use the `StringLookup` layer to produce an ordinal encoding, then use the `Embedding` layer. # * For text, the `TextVectorization` layer is easy to use and it can work well for simple tasks, or you can use TF Text for more advanced features. However, you'll often want to use pretrained language models, which you can obtain using tools like TF Hub or Hugging Face's Transformers library. These last two options are discussed in Chapter 16. # ## 9. # ### a. # _Exercise: Load the Fashion MNIST dataset (introduced in Chapter 10); split it into a training set, a validation set, and a test set; shuffle the training set; and save each dataset to multiple TFRecord files. Each record should be a serialized `Example` protobuf with two features: the serialized image (use `tf.io.serialize_tensor()` to serialize each image), and the label. Note: for large images, you could use `tf.io.encode_jpeg()` instead. This would save a lot of space, but it would lose a bit of image quality._ # In[114]: (X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() X_valid, X_train = X_train_full[:5000], X_train_full[5000:] y_valid, y_train = y_train_full[:5000], y_train_full[5000:] # In[115]: tf.random.set_seed(42) train_set = tf.data.Dataset.from_tensor_slices((X_train, y_train)) train_set = train_set.shuffle(len(X_train), seed=42) valid_set = tf.data.Dataset.from_tensor_slices((X_valid, y_valid)) test_set = tf.data.Dataset.from_tensor_slices((X_test, y_test)) # In[116]: def create_example(image, label): image_data = tf.io.serialize_tensor(image) #image_data = tf.io.encode_jpeg(image[..., np.newaxis]) return Example( features=Features( feature={ "image": Feature(bytes_list=BytesList(value=[image_data.numpy()])), "label": Feature(int64_list=Int64List(value=[label])), })) # In[117]: for image, label in valid_set.take(1): print(create_example(image, label)) # The following function saves a given dataset to a set of TFRecord files. The examples are written to the files in a round-robin fashion. To do this, we enumerate all the examples using the `dataset.enumerate()` method, and we compute `index % n_shards` to decide which file to write to. We use the standard `contextlib.ExitStack` class to make sure that all writers are properly closed whether or not an I/O error occurs while writing. # In[118]: from contextlib import ExitStack def write_tfrecords(name, dataset, n_shards=10): paths = ["{}.tfrecord-{:05d}-of-{:05d}".format(name, index, n_shards) for index in range(n_shards)] with ExitStack() as stack: writers = [stack.enter_context(tf.io.TFRecordWriter(path)) for path in paths] for index, (image, label) in dataset.enumerate(): shard = index % n_shards example = create_example(image, label) writers[shard].write(example.SerializeToString()) return paths # In[119]: train_filepaths = write_tfrecords("my_fashion_mnist.train", train_set) valid_filepaths = write_tfrecords("my_fashion_mnist.valid", valid_set) test_filepaths = write_tfrecords("my_fashion_mnist.test", test_set) # ### b. # _Exercise: Then use tf.data to create an efficient dataset for each set. Finally, use a Keras model to train these datasets, including a preprocessing layer to standardize each input feature. Try to make the input pipeline as efficient as possible, using TensorBoard to visualize profiling data._ # In[120]: def preprocess(tfrecord): feature_descriptions = { "image": tf.io.FixedLenFeature([], tf.string, default_value=""), "label": tf.io.FixedLenFeature([], tf.int64, default_value=-1) } example = tf.io.parse_single_example(tfrecord, feature_descriptions) image = tf.io.parse_tensor(example["image"], out_type=tf.uint8) #image = tf.io.decode_jpeg(example["image"]) image = tf.reshape(image, shape=[28, 28]) return image, example["label"] def mnist_dataset(filepaths, n_read_threads=5, shuffle_buffer_size=None, n_parse_threads=5, batch_size=32, cache=True): dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=n_read_threads) if cache: dataset = dataset.cache() if shuffle_buffer_size: dataset = dataset.shuffle(shuffle_buffer_size) dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads) dataset = dataset.batch(batch_size) return dataset.prefetch(1) # In[121]: train_set = mnist_dataset(train_filepaths, shuffle_buffer_size=60000) valid_set = mnist_dataset(valid_filepaths) test_set = mnist_dataset(test_filepaths) # In[122]: for X, y in train_set.take(1): for i in range(5): plt.subplot(1, 5, i + 1) plt.imshow(X[i].numpy(), cmap="binary") plt.axis("off") plt.title(str(y[i].numpy())) # In[123]: tf.random.set_seed(42) standardization = tf.keras.layers.Normalization(input_shape=[28, 28]) sample_image_batches = train_set.take(100).map(lambda image, label: image) sample_images = np.concatenate(list(sample_image_batches.as_numpy_iterator()), axis=0).astype(np.float32) standardization.adapt(sample_images) model = tf.keras.Sequential([ standardization, tf.keras.layers.Flatten(), tf.keras.layers.Dense(100, activation="relu"), tf.keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"]) # In[124]: from datetime import datetime logs = Path() / "my_logs" / "run_" / datetime.now().strftime("%Y%m%d_%H%M%S") tensorboard_cb = tf.keras.callbacks.TensorBoard( log_dir=logs, histogram_freq=1, profile_batch=10) model.fit(train_set, epochs=5, validation_data=valid_set, callbacks=[tensorboard_cb]) # In[125]: get_ipython().run_line_magic('load_ext', 'tensorboard') get_ipython().run_line_magic('tensorboard', '--logdir=./my_logs') # ## 10. # _Exercise: In this exercise you will download a dataset, split it, create a `tf.data.Dataset` to load it and preprocess it efficiently, then build and train a binary classification model containing an `Embedding` layer._ # # ### a. # _Exercise: Download the [Large Movie Review Dataset](https://homl.info/imdb), which contains 50,000 movies reviews from the [Internet Movie Database](https://imdb.com/). The data is organized in two directories, `train` and `test`, each containing a `pos` subdirectory with 12,500 positive reviews and a `neg` subdirectory with 12,500 negative reviews. Each review is stored in a separate text file. There are other files and folders (including preprocessed bag-of-words), but we will ignore them in this exercise._ # In[126]: from pathlib import Path root = "https://ai.stanford.edu/~amaas/data/sentiment/" filename = "aclImdb_v1.tar.gz" filepath = tf.keras.utils.get_file(filename, root + filename, extract=True, cache_dir=".") path = Path(filepath).with_name("aclImdb") path # Let's define a `tree()` function to view the structure of the `aclImdb` directory: # In[127]: def tree(path, level=0, indent=4, max_files=3): if level == 0: print(f"{path}/") level += 1 sub_paths = sorted(path.iterdir()) sub_dirs = [sub_path for sub_path in sub_paths if sub_path.is_dir()] filepaths = [sub_path for sub_path in sub_paths if not sub_path in sub_dirs] indent_str = " " * indent * level for sub_dir in sub_dirs: print(f"{indent_str}{sub_dir.name}/") tree(sub_dir, level + 1, indent) for filepath in filepaths[:max_files]: print(f"{indent_str}{filepath.name}") if len(filepaths) > max_files: print(f"{indent_str}...") # In[128]: tree(path) # In[129]: def review_paths(dirpath): return [str(path) for path in dirpath.glob("*.txt")] train_pos = review_paths(path / "train" / "pos") train_neg = review_paths(path / "train" / "neg") test_valid_pos = review_paths(path / "test" / "pos") test_valid_neg = review_paths(path / "test" / "neg") len(train_pos), len(train_neg), len(test_valid_pos), len(test_valid_neg) # ### b. # _Exercise: Split the test set into a validation set (15,000) and a test set (10,000)._ # In[130]: np.random.shuffle(test_valid_pos) test_pos = test_valid_pos[:5000] test_neg = test_valid_neg[:5000] valid_pos = test_valid_pos[5000:] valid_neg = test_valid_neg[5000:] # ### c. # _Exercise: Use tf.data to create an efficient dataset for each set._ # Since the dataset fits in memory, we can just load all the data using pure Python code and use `tf.data.Dataset.from_tensor_slices()`: # In[131]: def imdb_dataset(filepaths_positive, filepaths_negative): reviews = [] labels = [] for filepaths, label in ((filepaths_negative, 0), (filepaths_positive, 1)): for filepath in filepaths: with open(filepath) as review_file: reviews.append(review_file.read()) labels.append(label) return tf.data.Dataset.from_tensor_slices( (tf.constant(reviews), tf.constant(labels))) # In[132]: for X, y in imdb_dataset(train_pos, train_neg).take(3): print(X) print(y) print() # In[133]: get_ipython().run_line_magic('timeit', '-r1 for X, y in imdb_dataset(train_pos, train_neg).repeat(10): pass') # It takes about 17 seconds to load the dataset and go through it 10 times. # But let's pretend the dataset does not fit in memory, just to make things more interesting. Luckily, each review fits on just one line (they use `
` to indicate line breaks), so we can read the reviews using a `TextLineDataset`. If they didn't we would have to preprocess the input files (e.g., converting them to TFRecords). For very large datasets, it would make sense to use a tool like Apache Beam for that. # In[134]: def imdb_dataset(filepaths_positive, filepaths_negative, n_read_threads=5): dataset_neg = tf.data.TextLineDataset(filepaths_negative, num_parallel_reads=n_read_threads) dataset_neg = dataset_neg.map(lambda review: (review, 0)) dataset_pos = tf.data.TextLineDataset(filepaths_positive, num_parallel_reads=n_read_threads) dataset_pos = dataset_pos.map(lambda review: (review, 1)) return tf.data.Dataset.concatenate(dataset_pos, dataset_neg) # In[135]: get_ipython().run_line_magic('timeit', '-r1 for X, y in imdb_dataset(train_pos, train_neg).repeat(10): pass') # Now it takes about 33 seconds to go through the dataset 10 times. That's much slower, essentially because the dataset is not cached in RAM, so it must be reloaded at each epoch. If you add `.cache()` just before `.repeat(10)`, you will see that this implementation will be about as fast as the previous one. # In[136]: get_ipython().run_line_magic('timeit', '-r1 for X, y in imdb_dataset(train_pos, train_neg).cache().repeat(10): pass') # In[137]: batch_size = 32 train_set = imdb_dataset(train_pos, train_neg).shuffle(25000, seed=42) train_set = train_set.batch(batch_size).prefetch(1) valid_set = imdb_dataset(valid_pos, valid_neg).batch(batch_size).prefetch(1) test_set = imdb_dataset(test_pos, test_neg).batch(batch_size).prefetch(1) # ### d. # _Exercise: Create a binary classification model, using a `TextVectorization` layer to preprocess each review._ # Let's create a `TextVectorization` layer and adapt it to the full IMDB training set (if the training set did not fit in RAM, we could just use a smaller sample of the training set by calling `train_set.take(500)`). Let's use TF-IDF for now. # In[138]: max_tokens = 1000 sample_reviews = train_set.map(lambda review, label: review) text_vectorization = tf.keras.layers.TextVectorization( max_tokens=max_tokens, output_mode="tf_idf") text_vectorization.adapt(sample_reviews) # Good! Now let's take a look at the first 10 words in the vocabulary: # In[139]: text_vectorization.get_vocabulary()[:10] # These are the most common words in the reviews. # We're ready to train the model! # In[140]: tf.random.set_seed(42) model = tf.keras.Sequential([ text_vectorization, tf.keras.layers.Dense(100, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ]) model.compile(loss="binary_crossentropy", optimizer="nadam", metrics=["accuracy"]) model.fit(train_set, epochs=5, validation_data=valid_set) # We get about 84.2% accuracy on the validation set after just the first epoch, but after that the model makes no significant progress. We will do better in Chapter 16. For now the point is just to perform efficient preprocessing using `tf.data` and Keras preprocessing layers. # ### e. # _Exercise: Add an `Embedding` layer and compute the mean embedding for each review, multiplied by the square root of the number of words (see Chapter 16). This rescaled mean embedding can then be passed to the rest of your model._ # To compute the mean embedding for each review, and multiply it by the square root of the number of words in that review, we will need a little function. For each sentence, this function needs to compute $M \times \sqrt N$, where $M$ is the mean of all the word embeddings in the sentence (excluding padding tokens), and $N$ is the number of words in the sentence (also excluding padding tokens). We can rewrite $M$ as $\dfrac{S}{N}$, where $S$ is the sum of all word embeddings (it does not matter whether or not we include the padding tokens in this sum, since their representation is a zero vector). So the function must return $M \times \sqrt N = \dfrac{S}{N} \times \sqrt N = \dfrac{S}{\sqrt N \times \sqrt N} \times \sqrt N= \dfrac{S}{\sqrt N}$. # In[141]: def compute_mean_embedding(inputs): not_pad = tf.math.count_nonzero(inputs, axis=-1) n_words = tf.math.count_nonzero(not_pad, axis=-1, keepdims=True) sqrt_n_words = tf.math.sqrt(tf.cast(n_words, tf.float32)) return tf.reduce_sum(inputs, axis=1) / sqrt_n_words another_example = tf.constant([[[1., 2., 3.], [4., 5., 0.], [0., 0., 0.]], [[6., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]) compute_mean_embedding(another_example) # Let's check that this is correct. The first review contains 2 words (the last token is a zero vector, which represents the `` token). Let's compute the mean embedding for these 2 words, and multiply the result by the square root of 2: # In[142]: tf.reduce_mean(another_example[0:1, :2], axis=1) * tf.sqrt(2.) # Looks good! Now let's check the second review, which contains just one word (we ignore the two padding tokens): # In[143]: tf.reduce_mean(another_example[1:2, :1], axis=1) * tf.sqrt(1.) # Perfect. Now we're ready to train our final model. It's the same as before, except we replaced TF-IDF with ordinal encoding (`output_mode="int"`) followed by an `Embedding` layer, followed by a `Lambda` layer that calls the `compute_mean_embedding` layer: # In[144]: embedding_size = 20 tf.random.set_seed(42) text_vectorization = tf.keras.layers.TextVectorization( max_tokens=max_tokens, output_mode="int") text_vectorization.adapt(sample_reviews) model = tf.keras.Sequential([ text_vectorization, tf.keras.layers.Embedding(input_dim=max_tokens, output_dim=embedding_size, mask_zero=True), # tokens => zero vectors tf.keras.layers.Lambda(compute_mean_embedding), tf.keras.layers.Dense(100, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ]) # ### f. # _Exercise: Train the model and see what accuracy you get. Try to optimize your pipelines to make training as fast as possible._ # In[145]: model.compile(loss="binary_crossentropy", optimizer="nadam", metrics=["accuracy"]) model.fit(train_set, epochs=5, validation_data=valid_set) # The model is just marginally better using embeddings (but we will do better in Chapter 16). The pipeline looks fast enough (we optimized it earlier). # ### g. # _Exercise: Use TFDS to load the same dataset more easily: `tfds.load("imdb_reviews")`._ # In[146]: import tensorflow_datasets as tfds datasets = tfds.load(name="imdb_reviews") train_set, test_set = datasets["train"], datasets["test"] # In[147]: for example in train_set.take(1): print(example["text"]) print(example["label"]) # In[ ]: