Tensorflow the name's enough! Let's dig in!
We'll use a pretrained model to classify Iris flower dataset.
import pandas as pd
import tensorflow as tf
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',
'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Sentosa', 'Versicolor', 'Virginica']
def maybe_download():
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
return train_path, test_path
def load_data(y_name='Species'):
"""Returns the iris dataset as (train_x, train_y), (test_x, test_y)."""
train_path, test_path = maybe_download()
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Return the read end of the pipeline.
return dataset.make_one_shot_iterator().get_next()
def eval_input_fn(features, labels, batch_size):
"""An input function for evaluation or prediction"""
features=dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
# Return the read end of the pipeline.
return dataset.make_one_shot_iterator().get_next()
# The remainder of this file contains a simple example of a csv parser,
# implemented using a the `Dataset` class.
# `tf.parse_csv` sets the types of the outputs to match the examples given in
# the `record_defaults` argument.
CSV_TYPES = [[0.0], [0.0], [0.0], [0.0], [0]]
# def _parse_line(line):
# # Decode the line into its fields
# fields = tf.decode_csv(line, record_defaults=CSV_TYPES)
# # Pack the result into a dictionary
# features = dict(zip(CSV_COLUMN_NAMES, fields))
# # Separate the label from the features
# label = features.pop('Species')
# return features, label
# def csv_input_fn(csv_path, batch_size):
# # Create a dataset containing the text lines.
# dataset = tf.data.TextLineDataset(csv_path).skip(1)
# # Parse each line.
# dataset = dataset.map(_parse_line)
# # Shuffle, repeat, and batch the examples.
# dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# # Return the read end of the pipeline.
# return dataset.make_one_shot_iterator().get_next()
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
# parser = argparse.ArgumentParser()
# parser.add_argument('--batch_size', default=100, type=int, help='batch size')
# parser.add_argument('--train_steps', default=1000, type=int,
# help='number of training steps')
def main(argv):
# args = parser.parse_args(argv[1:])
# Fetch the data
(train_x, train_y), (test_x, test_y) = load_data()
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[10, 10],
# The model must choose between 3 classes.
n_classes=3)
# Train the Model.
classifier.train(
input_fn=lambda:train_input_fn(train_x, train_y,
100),
steps=1000)
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:eval_input_fn(test_x, test_y,
100))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
# Generate predictions from the model
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
'SepalLength': [5.1, 5.9, 6.9],
'SepalWidth': [3.3, 3.0, 3.1],
'PetalLength': [1.7, 4.2, 5.4],
'PetalWidth': [0.5, 1.5, 2.1],
}
predictions = classifier.predict(
input_fn=lambda:eval_input_fn(predict_x,
labels=None,
batch_size=100))
for pred_dict, expec in zip(predictions, expected):
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
print(template.format(SPECIES[class_id],
100 * probability, expec))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpevAffN INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_task_type': 'worker', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fd6545063d0>, '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 100, '_model_dir': '/tmp/tmpevAffN', '_save_summary_steps': 100} INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Saving checkpoints for 1 into /tmp/tmpevAffN/model.ckpt. INFO:tensorflow:loss = 139.59, step = 1 INFO:tensorflow:global_step/sec: 236.62 INFO:tensorflow:loss = 22.4486, step = 101 (0.426 sec) INFO:tensorflow:global_step/sec: 220.134 INFO:tensorflow:loss = 16.4447, step = 201 (0.453 sec) INFO:tensorflow:global_step/sec: 405.709 INFO:tensorflow:loss = 10.2933, step = 301 (0.245 sec) INFO:tensorflow:global_step/sec: 408.988 INFO:tensorflow:loss = 8.26349, step = 401 (0.245 sec) INFO:tensorflow:global_step/sec: 451.818 INFO:tensorflow:loss = 5.78063, step = 501 (0.221 sec) INFO:tensorflow:global_step/sec: 504.152 INFO:tensorflow:loss = 6.63015, step = 601 (0.198 sec) INFO:tensorflow:global_step/sec: 529.939 INFO:tensorflow:loss = 4.01163, step = 701 (0.189 sec) INFO:tensorflow:global_step/sec: 432.051 INFO:tensorflow:loss = 3.45307, step = 801 (0.232 sec) INFO:tensorflow:global_step/sec: 419.46 INFO:tensorflow:loss = 3.85858, step = 901 (0.238 sec) INFO:tensorflow:Saving checkpoints for 1000 into /tmp/tmpevAffN/model.ckpt. INFO:tensorflow:Loss for final step: 8.55838. INFO:tensorflow:Starting evaluation at 2018-01-30-02:46:59 INFO:tensorflow:Restoring parameters from /tmp/tmpevAffN/model.ckpt-1000 INFO:tensorflow:Finished evaluation at 2018-01-30-02:46:59 INFO:tensorflow:Saving dict for global step 1000: accuracy = 0.966667, average_loss = 0.0587675, global_step = 1000, loss = 1.76303 Test set accuracy: 0.967 WARNING:tensorflow:Input graph does not contain a QueueRunner. That means predict yields forever. This is probably a mistake. INFO:tensorflow:Restoring parameters from /tmp/tmpevAffN/model.ckpt-1000 Prediction is "Sentosa" (99.8%), expected "Setosa" Prediction is "Versicolor" (99.6%), expected "Versicolor" Prediction is "Virginica" (96.0%), expected "Virginica"
An exception has occurred, use %tb to see the full traceback.
SystemExit