import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, SimpleRNN
import matplotlib.pyplot as plt
%matplotlib inline
Using TensorFlow backend.
t = np.arange(0,1500)
x = np.sin(0.02*t)+ np.random.rand(1500) * 2
plt.plot(x)
plt.show()
train,test = x[0:1000], x[1000:]
x = [1,2,3,4,5,6,7,8,9,10]
for step=1, x input and its y prediction becomes:
x y
1 2
2 3
3 4
4 5
..
9 10
for step=3, x and y contain:
x y
1,2,3 4
2,3,4 5
3,4,5 6
4,5,6 7
...
7,8,9 10
step = 10
# convert into dataset data and label
def convertToDataset(data, step):
#data = np.append(data,np.repeat(data[-1,],step))
X, Y =[], []
for i in range(len(data)-step):
d=i+step
X.append(data[i:d,])
Y.append(data[d,])
return np.array(X), np.array(Y)
trainX,trainY =convertToDataset(train,step)
testX,testY =convertToDataset(test,step)
print(trainX.shape)
print(testX.shape)
(990, 10) (490, 10)
(NumberOfSequences, TimeSteps, ElementsPerStep)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0],testX.shape[1], 1))
print(trainX.shape)
print(testX.shape)
(990, 10, 1) (490, 10, 1)
model = Sequential()
model.add(SimpleRNN(units=64, activation="tanh"))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(trainX,trainY, epochs=100, batch_size=16, verbose=2)
Epoch 1/100 - 7s - loss: 0.7639 Epoch 2/100 - 1s - loss: 0.4325 Epoch 3/100 - 1s - loss: 0.4140 Epoch 4/100 - 1s - loss: 0.4124 Epoch 5/100 - 1s - loss: 0.4048 Epoch 6/100 - 1s - loss: 0.4024 Epoch 7/100 - 1s - loss: 0.3989 Epoch 8/100 - 1s - loss: 0.3947 Epoch 9/100 - 1s - loss: 0.3917 Epoch 10/100 - 1s - loss: 0.3912 Epoch 11/100 - 1s - loss: 0.3889 Epoch 12/100 - 1s - loss: 0.3908 Epoch 13/100 - 1s - loss: 0.3847 Epoch 14/100 - 1s - loss: 0.3862 Epoch 15/100 - 1s - loss: 0.3829 Epoch 16/100 - 1s - loss: 0.3755 Epoch 17/100 - 1s - loss: 0.3829 Epoch 18/100 - 1s - loss: 0.3742 Epoch 19/100 - 1s - loss: 0.3674 Epoch 20/100 - 1s - loss: 0.3691 Epoch 21/100 - 1s - loss: 0.3724 Epoch 22/100 - 1s - loss: 0.3703 Epoch 23/100 - 1s - loss: 0.3643 Epoch 24/100 - 1s - loss: 0.3625 Epoch 25/100 - 1s - loss: 0.3631 Epoch 26/100 - 1s - loss: 0.3631 Epoch 27/100 - 1s - loss: 0.3628 Epoch 28/100 - 1s - loss: 0.3552 Epoch 29/100 - 1s - loss: 0.3560 Epoch 30/100 - 1s - loss: 0.3585 Epoch 31/100 - 1s - loss: 0.3478 Epoch 32/100 - 1s - loss: 0.3495 Epoch 33/100 - 1s - loss: 0.3499 Epoch 34/100 - 1s - loss: 0.3411 Epoch 35/100 - 1s - loss: 0.3415 Epoch 36/100 - 1s - loss: 0.3381 Epoch 37/100 - 1s - loss: 0.3396 Epoch 38/100 - 1s - loss: 0.3311 Epoch 39/100 - 1s - loss: 0.3328 Epoch 40/100 - 1s - loss: 0.3291 Epoch 41/100 - 1s - loss: 0.3262 Epoch 42/100 - 1s - loss: 0.3210 Epoch 43/100 - 1s - loss: 0.3249 Epoch 44/100 - 1s - loss: 0.3210 Epoch 45/100 - 1s - loss: 0.3164 Epoch 46/100 - 1s - loss: 0.3159 Epoch 47/100 - 1s - loss: 0.3071 Epoch 48/100 - 1s - loss: 0.3031 Epoch 49/100 - 1s - loss: 0.3018 Epoch 50/100 - 1s - loss: 0.2984 Epoch 51/100 - 1s - loss: 0.3010 Epoch 52/100 - 1s - loss: 0.2901 Epoch 53/100 - 1s - loss: 0.2881 Epoch 54/100 - 1s - loss: 0.2847 Epoch 55/100 - 1s - loss: 0.2789 Epoch 56/100 - 1s - loss: 0.2759 Epoch 57/100 - 1s - loss: 0.2741 Epoch 58/100 - 1s - loss: 0.2668 Epoch 59/100 - 1s - loss: 0.2603 Epoch 60/100 - 1s - loss: 0.2589 Epoch 61/100 - 1s - loss: 0.2579 Epoch 62/100 - 1s - loss: 0.2502 Epoch 63/100 - 1s - loss: 0.2473 Epoch 64/100 - 1s - loss: 0.2426 Epoch 65/100 - 1s - loss: 0.2319 Epoch 66/100 - 1s - loss: 0.2306 Epoch 67/100 - 1s - loss: 0.2281 Epoch 68/100 - 1s - loss: 0.2242 Epoch 69/100 - 1s - loss: 0.2205 Epoch 70/100 - 1s - loss: 0.2181 Epoch 71/100 - 1s - loss: 0.2095 Epoch 72/100 - 1s - loss: 0.2083 Epoch 73/100 - 1s - loss: 0.2023 Epoch 74/100 - 1s - loss: 0.1980 Epoch 75/100 - 1s - loss: 0.1979 Epoch 76/100 - 1s - loss: 0.1899 Epoch 77/100 - 1s - loss: 0.1857 Epoch 78/100 - 1s - loss: 0.1832 Epoch 79/100 - 1s - loss: 0.1764 Epoch 80/100 - 1s - loss: 0.1780 Epoch 81/100 - 1s - loss: 0.1701 Epoch 82/100 - 1s - loss: 0.1676 Epoch 83/100 - 1s - loss: 0.1635 Epoch 84/100 - 1s - loss: 0.1614 Epoch 85/100 - 1s - loss: 0.1601 Epoch 86/100 - 1s - loss: 0.1538 Epoch 87/100 - 1s - loss: 0.1492 Epoch 88/100 - 1s - loss: 0.1477 Epoch 89/100 - 1s - loss: 0.1400 Epoch 90/100 - 1s - loss: 0.1365 Epoch 91/100 - 1s - loss: 0.1354 Epoch 92/100 - 1s - loss: 0.1334 Epoch 93/100 - 1s - loss: 0.1303 Epoch 94/100 - 1s - loss: 0.1269 Epoch 95/100 - 1s - loss: 0.1231 Epoch 96/100 - 1s - loss: 0.1245 Epoch 97/100 - 1s - loss: 0.1151 Epoch 98/100 - 1s - loss: 0.1133 Epoch 99/100 - 1s - loss: 0.1130 Epoch 100/100 - 1s - loss: 0.1077
import matplotlib.pyplot as plt
%matplotlib inline
loss = history.history['loss']
plt.plot(loss, label='Training loss')
plt.legend()
plt.show()
trainScore = model.evaluate(trainX, trainY, verbose=0)
print(trainScore)
0.08597392486502425
trainPredict = model.predict(trainX)
testPredict= model.predict(testX)
predicted=np.concatenate((trainPredict,testPredict),axis=0)
plt.plot(x)
plt.plot(predicted)
plt.axvline(len(trainX), c="r")
plt.show()
testX,testY =convertToDataset(test,50)
testX = np.reshape(testX, (testX.shape[0],testX.shape[1], 1))
testX.shape
(450, 50, 1)
testPredict= model.predict(testX)
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-22-f283b2fe1718> in <module> 1 #trainPredict = model.predict(trainX) ----> 2 testPredict= model.predict(testX) 3 #predicted=np.concatenate((trainPredict,testPredict),axis=0) ~\Miniconda3\envs\tensorflow\lib\site-packages\keras\engine\training.py in predict(self, x, batch_size, verbose, steps) 1147 'argument.') 1148 # Validate user data. -> 1149 x, _, _ = self._standardize_user_data(x) 1150 if self.stateful: 1151 if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0: ~\Miniconda3\envs\tensorflow\lib\site-packages\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size) 749 feed_input_shapes, 750 check_batch_axis=False, # Don't enforce the batch size. --> 751 exception_prefix='input') 752 753 if y is not None: ~\Miniconda3\envs\tensorflow\lib\site-packages\keras\engine\training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix) 136 ': expected ' + names[i] + ' to have shape ' + 137 str(shape) + ' but got array with shape ' + --> 138 str(data_shape)) 139 return data 140 ValueError: Error when checking input: expected sequential_1_input to have shape (10, 1) but got array with shape (50, 1)
model.input
<tf.Tensor 'sequential_1_input:0' shape=(?, 10, 1) dtype=float32>