import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 输入图片 28*28 个像素
n_inputs = 28 # 输入的每行有 28 个数据,输入层 神经元的个数
max_time = 28 # 输入的次数为 28 次
lstm_size = 100 # 隐藏层 block 单元
n_classes = 10 # 分类个数
batch_size = 50 # 单批次的样本数量
n_batch = mnist.train.num_examples / batch_size # 一共会分成多少批次
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[n_classes]))
def LSTM(x, weights, biases):
inputs = tf.reshape(x, [-1, max_time, n_inputs])
# 定义隐藏层 block 单元
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# final_state[0]: cell state
# final_state[1]: hidden_state
outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32)
return tf.nn.softmax(tf.matmul(final_state[1], weights) + biases)
prediction = LSTM(x, weights, biases)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(101):
for batch in range(int(n_batch)):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})
if epoch % 10 == 0:
acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels})
print("Iterator:", str(epoch), ", Accuracy:", str(acc))