from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_squared_error
import tensorflow as tf
aus_path = '/content/drive/MyDrive/Datasets/AusGas.csv'
datasets_ts = pd.read_csv(aus_path)
datasets_ts.head()
Month | GasProd | |
---|---|---|
0 | Jan-1956 | 1709 |
1 | Feb-1956 | 1646 |
2 | Mar-1956 | 1794 |
3 | Apr-1956 | 1878 |
4 | May-1956 | 2173 |
datasets_ts.shape
(476, 2)
datasets_ts.plot()
plt.show()
dataset_train = datasets_ts.iloc[:450, :]
dataset_train.shape
(450, 2)
#converting pandas type data to numpy type as numpy type required for tensorflow
training_set = dataset_train.iloc[:,1].values
training_set[:5]
array([1709, 1646, 1794, 1878, 2173])
training_set.shape
(450,)
training_set[-1]
array([56977])
training_set = training_set.reshape(-1,1)
from sklearn.preprocessing import MinMaxScaler
mm = MinMaxScaler()
trn_set_scl = mm.fit_transform(training_set)
print(trn_set_scl[:5])
print(type(trn_set_scl))
print(trn_set_scl.ndim)
print(trn_set_scl.shape)
[[0.00103697] [0. ] [0.00243605] [0.00381868] [0.00867433]] <class 'numpy.ndarray'> 2 (450, 1)
timesteps = 10
trn_size = dataset_train.shape[0]
trn_size
450
X_train = []
y_train = []
for i in range(timesteps, trn_size):
X_train.append(trn_set_scl[i-timesteps:i, 0])
y_train.append(trn_set_scl[i,0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train[:10]
array([[0.00103697, 0. , 0.00243605, 0.00381868, 0.00867433, 0.01111038, 0.01352997, 0.01267406, 0.00885538, 0.00781842], [0. , 0.00243605, 0.00381868, 0.00867433, 0.01111038, 0.01352997, 0.01267406, 0.00885538, 0.00781842, 0.0052013 ], [0.00243605, 0.00381868, 0.00867433, 0.01111038, 0.01352997, 0.01267406, 0.00885538, 0.00781842, 0.0052013 , 0.00294631], [0.00381868, 0.00867433, 0.01111038, 0.01352997, 0.01267406, 0.00885538, 0.00781842, 0.0052013 , 0.00294631, 0.00172828], [0.00867433, 0.01111038, 0.01352997, 0.01267406, 0.00885538, 0.00781842, 0.0052013 , 0.00294631, 0.00172828, 0.00069131], [0.01111038, 0.01352997, 0.01267406, 0.00885538, 0.00781842, 0.0052013 , 0.00294631, 0.00172828, 0.00069131, 0.00450999], [0.01352997, 0.01267406, 0.00885538, 0.00781842, 0.0052013 , 0.00294631, 0.00172828, 0.00069131, 0.00450999, 0.00485565], [0.01267406, 0.00885538, 0.00781842, 0.0052013 , 0.00294631, 0.00172828, 0.00069131, 0.00450999, 0.00485565, 0.01094578], [0.00885538, 0.00781842, 0.0052013 , 0.00294631, 0.00172828, 0.00069131, 0.00450999, 0.00485565, 0.01094578, 0.01041907], [0.00781842, 0.0052013 , 0.00294631, 0.00172828, 0.00069131, 0.00450999, 0.00485565, 0.01094578, 0.01041907, 0.01632814]])
print(X_train.shape)
(440, 10)
X_train.shape[0]
440
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
(440, 10, 1)
import tensorflow as tf
from tensorflow import keras
### INITIALIZE RNN
tf.random.set_seed(seed = 2022)
model = keras.Sequential([
keras.layers.LSTM(units = 100, return_sequences=True,
input_shape=(X_train.shape[1], X_train.shape[2])),
keras.layers.LSTM(units = 70, return_sequences=True),
keras.layers.LSTM(units = 50),
keras.layers.Dense(1)
])
model.compile(optimizer= 'adam', loss = 'mean_squared_error')
model.fit(X_train, y_train, epochs = 50, batch_size= 25)
Epoch 1/50 18/18 [==============================] - 6s 22ms/step - loss: 0.0495 Epoch 2/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0110 Epoch 3/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0089 Epoch 4/50 18/18 [==============================] - 0s 22ms/step - loss: 0.0074 Epoch 5/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0073 Epoch 6/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0072 Epoch 7/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0073 Epoch 8/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0075 Epoch 9/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0072 Epoch 10/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0077 Epoch 11/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0087 Epoch 12/50 18/18 [==============================] - 0s 22ms/step - loss: 0.0080 Epoch 13/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0074 Epoch 14/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0072 Epoch 15/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0071 Epoch 16/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0076 Epoch 17/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0074 Epoch 18/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0085 Epoch 19/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0073 Epoch 20/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0072 Epoch 21/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0071 Epoch 22/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0076 Epoch 23/50 18/18 [==============================] - 0s 22ms/step - loss: 0.0073 Epoch 24/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0068 Epoch 25/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0070 Epoch 26/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0066 Epoch 27/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0067 Epoch 28/50 18/18 [==============================] - 0s 22ms/step - loss: 0.0069 Epoch 29/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0069 Epoch 30/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0065 Epoch 31/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0067 Epoch 32/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0064 Epoch 33/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0058 Epoch 34/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0064 Epoch 35/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0056 Epoch 36/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0051 Epoch 37/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0040 Epoch 38/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0033 Epoch 39/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0031 Epoch 40/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0023 Epoch 41/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0018 Epoch 42/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0019 Epoch 43/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0022 Epoch 44/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0017 Epoch 45/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0019 Epoch 46/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0015 Epoch 47/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0013 Epoch 48/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0013 Epoch 49/50 18/18 [==============================] - 0s 20ms/step - loss: 0.0014 Epoch 50/50 18/18 [==============================] - 0s 21ms/step - loss: 0.0013
<keras.callbacks.History at 0x7fd728dcb1f0>
dataset_test = datasets_ts.iloc[450:,:]
dataset_test.shape
(26, 2)
test_set = dataset_test.iloc[:, 1].values
test_set
array([56807, 54634, 51367, 48073, 46251, 43736, 39975, 40478, 46895, 46147, 55011, 57799, 62450, 63896, 57784, 53231, 50354, 38410, 41600, 41471, 46287, 49013, 56624, 61739, 66600, 60054])
dataset_total = pd.concat((dataset_train['GasProd'], dataset_test['GasProd']), axis = 0)
dataset_total
0 1709 1 1646 2 1794 3 1878 4 2173 ... 471 49013 472 56624 473 61739 474 66600 475 60054 Name: GasProd, Length: 476, dtype: int64
dataset_total.shape
(476,)
# we take 10 extra from train set to get them as part of test set
inputs = dataset_total[len(dataset_total) - len(dataset_test) - timesteps:].values
inputs = inputs.reshape(-1, 1)
len(inputs)
print(inputs.shape)
(36, 1)
inputs_scl = mm.transform(inputs)
X_test = []
for i in range(timesteps, len(inputs_scl)):
X_test.append(inputs_scl[i-timesteps:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1],1))
X_test.shape
(26, 10, 1)
len(X_test)
26
predicted_values = model.predict(X_test)
predicted_values
1/1 [==============================] - 1s 1s/step
array([[0.9448642 ], [0.9393854 ], [0.84788895], [0.7300566 ], [0.6365161 ], [0.59788865], [0.5923539 ], [0.62368196], [0.65269816], [0.7293409 ], [0.78864384], [0.87034726], [0.91909623], [0.9322104 ], [0.9092065 ], [0.82623047], [0.734792 ], [0.6868149 ], [0.61729074], [0.59133595], [0.6262625 ], [0.7192022 ], [0.8436632 ], [0.96191865], [1.0090406 ], [1.0018649 ]], dtype=float32)
predicted_values.shape
(26, 1)
predicted_values = mm.inverse_transform( predicted_values)
plt.plot(test_set, color = 'red', label = 'Real Gas Prod')
plt.plot(predicted_values, color = 'blue', label = 'Predicted Gas Prod')
plt.title('Gas Prod Prediction')
plt.xlabel('Time')
plt.ylabel('Gas Production')
plt.legend()
plt.show()
np.sqrt(mean_squared_error(predicted_values, test_set))
3873.1714158644168
aus_path = '/content/drive/MyDrive/Datasets/FRED-NROUST.csv'
datasets_ts = pd.read_csv(aus_path)
datasets_ts.head()
Date | Value | |
---|---|---|
0 | 01-01-1949 | 5.255053 |
1 | 01-04-1949 | 5.261516 |
2 | 01-07-1949 | 5.268013 |
3 | 01-10-1949 | 5.274564 |
4 | 01-01-1950 | 5.281182 |
datasets_ts.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 296 entries, 0 to 295 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Date 296 non-null object 1 Value 296 non-null float64 dtypes: float64(1), object(1) memory usage: 4.8+ KB
### PREPROCESSING
datasets_ts.shape
(296, 2)
dataset_train = datasets_ts.iloc[:288, :]
dataset_train.shape
(288, 2)
#converting pandas type data to numpy type as numpy type required for tensorflow
training_set = dataset_train.iloc[:,1].values
training_set[:5]
array([5.25505257, 5.26151562, 5.268013 , 5.274564 , 5.281182 ])
training_set.shape
(288,)
training_set[-1]
4.480756
training_set = training_set.reshape(-1,1)
from sklearn.preprocessing import MinMaxScaler
mm = MinMaxScaler()
trn_set_scl = mm.fit_transform(training_set)
print(trn_set_scl[:5])
print(type(trn_set_scl))
print(trn_set_scl.ndim)
print(trn_set_scl.shape)
[[0.44014855] [0.44382247] [0.4475159 ] [0.45123982] [0.45500182]] <class 'numpy.ndarray'> 2 (288, 1)
X_train = []
y_train = []
timesteps = 5
trn_size = dataset_train.shape[0]
for i in range(timesteps, trn_size):
X_train.append(trn_set_scl[i-timesteps:i, 0])
y_train.append(trn_set_scl[i,0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
(283, 5, 1)
import tensorflow as tf
from tensorflow import keras
### INITIALIZE RNN
tf.random.set_seed(seed = 2022)
model = keras.Sequential([
keras.layers.LSTM(units = 100, return_sequences=True,
input_shape=(X_train.shape[1], X_train.shape[2])),
keras.layers.LSTM(units = 70, return_sequences=True),
keras.layers.LSTM(units = 50),
keras.layers.Dense(1)
])
model.compile(optimizer= 'adam', loss = 'mean_squared_error')
model.fit(X_train, y_train, epochs = 50, batch_size= 25)
Epoch 1/50 12/12 [==============================] - 6s 14ms/step - loss: 0.2336 Epoch 2/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0328 Epoch 3/50 12/12 [==============================] - 0s 15ms/step - loss: 0.0233 Epoch 4/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0128 Epoch 5/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0089 Epoch 6/50 12/12 [==============================] - 0s 15ms/step - loss: 0.0057 Epoch 7/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0042 Epoch 8/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0036 Epoch 9/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0039 Epoch 10/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0036 Epoch 11/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0035 Epoch 12/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0035 Epoch 13/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0035 Epoch 14/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0034 Epoch 15/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0034 Epoch 16/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0034 Epoch 17/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0034 Epoch 18/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0034 Epoch 19/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0033 Epoch 20/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0034 Epoch 21/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0034 Epoch 22/50 12/12 [==============================] - 0s 15ms/step - loss: 0.0034 Epoch 23/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0033 Epoch 24/50 12/12 [==============================] - 0s 15ms/step - loss: 0.0033 Epoch 25/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0035 Epoch 26/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0034 Epoch 27/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0034 Epoch 28/50 12/12 [==============================] - 0s 16ms/step - loss: 0.0035 Epoch 29/50 12/12 [==============================] - 0s 15ms/step - loss: 0.0032 Epoch 30/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0033 Epoch 31/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0034 Epoch 32/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0035 Epoch 33/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0032 Epoch 34/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0032 Epoch 35/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0032 Epoch 36/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0031 Epoch 37/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0032 Epoch 38/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0035 Epoch 39/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0032 Epoch 40/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0032 Epoch 41/50 12/12 [==============================] - 0s 14ms/step - loss: 0.0032 Epoch 42/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0031 Epoch 43/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0034 Epoch 44/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0031 Epoch 45/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0032 Epoch 46/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0031 Epoch 47/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0031 Epoch 48/50 12/12 [==============================] - 0s 15ms/step - loss: 0.0030 Epoch 49/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0031 Epoch 50/50 12/12 [==============================] - 0s 13ms/step - loss: 0.0030
<keras.callbacks.History at 0x7fd72893ddf0>
dataset_test = datasets_ts.iloc[288:,:]
dataset_test.shape
(8, 2)
test_set = dataset_test.iloc[:, 1].values
test_set.shape
(8,)
dataset_total = pd.concat((dataset_train['Value'], dataset_test['Value']), axis = 0)
dataset_total
0 5.255053 1 5.261516 2 5.268013 3 5.274564 4 5.281182 ... 291 4.450651 292 4.445137 293 4.440214 294 4.435784 295 4.431760 Name: Value, Length: 296, dtype: float64
dataset_total.shape
(296,)
# we take 10 extra from train set to get them as part of test set
inputs = dataset_total[len(dataset_total) - len(dataset_test) - timesteps:].values
inputs = inputs.reshape(-1, 1)
len(inputs)
print(inputs.shape)
(13, 1)
inputs_scl = mm.transform(inputs)
X_test = []
for i in range(timesteps, len(inputs_scl)):
X_test.append(inputs_scl[i-timesteps:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1],1))
X_test.shape
(8, 5, 1)
len(X_test)
8
predicted_values = model.predict(X_test)
predicted_values
1/1 [==============================] - 1s 1s/step
array([[ 0.00885722], [ 0.00287885], [-0.00272651], [-0.00786749], [-0.01250179], [-0.01663146], [-0.02029072], [-0.0235314 ]], dtype=float32)
predicted_values.shape
(8, 1)
predicted_values = mm.inverse_transform( predicted_values)
predicted_values
array([[12.390582], [12.372082], [12.354734], [12.338824], [12.324484], [12.311703], [12.300379], [12.29035 ]], dtype=float32)
plt.plot(test_set, color = 'red', label = 'Real Value')
plt.plot(predicted_values, color = 'blue', label = 'Predicted Value')
plt.title('FRED')
plt.xlabel('Time')
plt.ylabel('Value')
plt.legend()
plt.show()
np.sqrt(mean_squared_error(predicted_values, test_set))
0.01645952443885413
BP = '/content/drive/MyDrive/Datasets/BUNDESBANK-BBK01_WT5511.csv'
datasets_ts = pd.read_csv(BP)
datasets_ts.shape
(577, 2)
datasets_ts.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 577 entries, 0 to 576 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Date 577 non-null object 1 Value 577 non-null float64 dtypes: float64(1), object(1) memory usage: 9.1+ KB
datasets_ts[550:].plot()
plt.show()
### PREPROCESSING
datasets_ts.shape
(577, 2)
dataset_train = datasets_ts.iloc[:567, :]
dataset_train.shape
(567, 2)
#converting pandas type data to numpy type as numpy type required for tensorflow
training_set = dataset_train.iloc[:,1].values
training_set[:5]
array([39.1 , 42. , 40.95, 38.9 , 39.85])
training_set.shape
(567,)
training_set[-1]
1175.0
training_set = training_set.reshape(-1,1)
from sklearn.preprocessing import MinMaxScaler
mm = MinMaxScaler()
trn_set_scl = mm.fit_transform(training_set)
print(trn_set_scl[:5])
print(type(trn_set_scl))
print(trn_set_scl.ndim)
print(trn_set_scl.shape)
[[0.00230037] [0.00391955] [0.0033333 ] [0.0021887 ] [0.00271912]] <class 'numpy.ndarray'> 2 (567, 1)
X_train = []
y_train = []
timesteps = 3
trn_size = dataset_train.shape[0]
for i in range(timesteps, trn_size):
X_train.append(trn_set_scl[i-timesteps:i, 0])
y_train.append(trn_set_scl[i,0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
(564, 3, 1)
import tensorflow as tf
from tensorflow import keras
### INITIALIZE RNN
tf.random.set_seed(seed = 2022)
model = keras.Sequential([
keras.layers.LSTM(units = 100, return_sequences=True,
input_shape=(X_train.shape[1], X_train.shape[2])),
keras.layers.LSTM(units = 70, return_sequences=True),
keras.layers.LSTM(units = 50),
keras.layers.Dense(1)
])
model.compile(optimizer= 'adam', loss = 'mean_squared_error')
model.fit(X_train, y_train, epochs = 50, batch_size= 25)
Epoch 1/50 23/23 [==============================] - 6s 10ms/step - loss: 0.0541 Epoch 2/50 23/23 [==============================] - 0s 11ms/step - loss: 0.0103 Epoch 3/50 23/23 [==============================] - 0s 10ms/step - loss: 0.0020 Epoch 4/50 23/23 [==============================] - 0s 10ms/step - loss: 0.0012 Epoch 5/50 23/23 [==============================] - 0s 11ms/step - loss: 0.0010 Epoch 6/50 23/23 [==============================] - 0s 10ms/step - loss: 0.0010 Epoch 7/50 23/23 [==============================] - 0s 10ms/step - loss: 8.7942e-04 Epoch 8/50 23/23 [==============================] - 0s 10ms/step - loss: 8.8551e-04 Epoch 9/50 23/23 [==============================] - 0s 10ms/step - loss: 8.1862e-04 Epoch 10/50 23/23 [==============================] - 0s 10ms/step - loss: 8.7045e-04 Epoch 11/50 23/23 [==============================] - 0s 11ms/step - loss: 7.5975e-04 Epoch 12/50 23/23 [==============================] - 0s 10ms/step - loss: 7.5902e-04 Epoch 13/50 23/23 [==============================] - 0s 10ms/step - loss: 8.9314e-04 Epoch 14/50 23/23 [==============================] - 0s 11ms/step - loss: 8.3249e-04 Epoch 15/50 23/23 [==============================] - 0s 12ms/step - loss: 8.4278e-04 Epoch 16/50 23/23 [==============================] - 0s 10ms/step - loss: 7.4229e-04 Epoch 17/50 23/23 [==============================] - 0s 10ms/step - loss: 7.8622e-04 Epoch 18/50 23/23 [==============================] - 0s 11ms/step - loss: 8.1194e-04 Epoch 19/50 23/23 [==============================] - 0s 11ms/step - loss: 7.3879e-04 Epoch 20/50 23/23 [==============================] - 0s 9ms/step - loss: 8.0373e-04 Epoch 21/50 23/23 [==============================] - 0s 10ms/step - loss: 7.5231e-04 Epoch 22/50 23/23 [==============================] - 0s 11ms/step - loss: 9.1205e-04 Epoch 23/50 23/23 [==============================] - 0s 10ms/step - loss: 8.8026e-04 Epoch 24/50 23/23 [==============================] - 0s 9ms/step - loss: 9.3064e-04 Epoch 25/50 23/23 [==============================] - 0s 13ms/step - loss: 7.3644e-04 Epoch 26/50 23/23 [==============================] - 0s 10ms/step - loss: 9.2470e-04 Epoch 27/50 23/23 [==============================] - 0s 11ms/step - loss: 7.2811e-04 Epoch 28/50 23/23 [==============================] - 0s 10ms/step - loss: 7.8623e-04 Epoch 29/50 23/23 [==============================] - 0s 9ms/step - loss: 7.5455e-04 Epoch 30/50 23/23 [==============================] - 0s 10ms/step - loss: 7.1341e-04 Epoch 31/50 23/23 [==============================] - 0s 10ms/step - loss: 7.3496e-04 Epoch 32/50 23/23 [==============================] - 0s 10ms/step - loss: 9.0110e-04 Epoch 33/50 23/23 [==============================] - 0s 11ms/step - loss: 0.0010 Epoch 34/50 23/23 [==============================] - 0s 10ms/step - loss: 7.8195e-04 Epoch 35/50 23/23 [==============================] - 0s 10ms/step - loss: 7.2222e-04 Epoch 36/50 23/23 [==============================] - 0s 11ms/step - loss: 7.3901e-04 Epoch 37/50 23/23 [==============================] - 0s 10ms/step - loss: 8.0781e-04 Epoch 38/50 23/23 [==============================] - 0s 11ms/step - loss: 7.3369e-04 Epoch 39/50 23/23 [==============================] - 0s 10ms/step - loss: 7.9736e-04 Epoch 40/50 23/23 [==============================] - 0s 11ms/step - loss: 7.2592e-04 Epoch 41/50 23/23 [==============================] - 0s 10ms/step - loss: 6.9940e-04 Epoch 42/50 23/23 [==============================] - 0s 10ms/step - loss: 7.8201e-04 Epoch 43/50 23/23 [==============================] - 0s 11ms/step - loss: 6.8979e-04 Epoch 44/50 23/23 [==============================] - 0s 11ms/step - loss: 7.8807e-04 Epoch 45/50 23/23 [==============================] - 0s 10ms/step - loss: 6.9239e-04 Epoch 46/50 23/23 [==============================] - 0s 11ms/step - loss: 7.7157e-04 Epoch 47/50 23/23 [==============================] - 0s 10ms/step - loss: 7.6565e-04 Epoch 48/50 23/23 [==============================] - 0s 11ms/step - loss: 7.8424e-04 Epoch 49/50 23/23 [==============================] - 0s 15ms/step - loss: 7.5482e-04 Epoch 50/50 23/23 [==============================] - 0s 16ms/step - loss: 6.9723e-04
<keras.callbacks.History at 0x7fd724f540a0>
dataset_test = datasets_ts.iloc[567:,:]
dataset_test.shape
(10, 2)
test_set = dataset_test.iloc[:, 1].values
test_set.shape
(10,)
dataset_total = pd.concat((dataset_train['Value'], dataset_test['Value']), axis = 0)
dataset_total
0 39.10 1 42.00 2 40.95 3 38.90 4 39.85 ... 572 1062.25 573 1112.90 574 1234.15 575 1233.60 576 1237.70 Name: Value, Length: 577, dtype: float64
dataset_total.shape
(577,)
# we take 10 extra from train set to get them as part of test set
inputs = dataset_total[len(dataset_total) - len(dataset_test) - timesteps:].values
inputs = inputs.reshape(-1, 1)
len(inputs)
print(inputs.shape)
(13, 1)
inputs_scl = mm.transform(inputs)
X_test = []
for i in range(timesteps, len(inputs_scl)):
X_test.append(inputs_scl[i-timesteps:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1],1))
X_test.shape
(10, 3, 1)
len(X_test)
10
predicted_values = model.predict(X_test)
predicted_values
1/1 [==============================] - 1s 1s/step
array([[0.63930076], [0.6260639 ], [0.6085445 ], [0.5887968 ], [0.60318446], [0.6001811 ], [0.59202194], [0.5700504 ], [0.5893592 ], [0.62479705]], dtype=float32)
predicted_values.shape
(10, 1)
predicted_values = mm.inverse_transform( predicted_values)
predicted_values
array([[1179.9805], [1156.273 ], [1124.8954], [1089.5269], [1115.2954], [1109.9164], [1095.3031], [1055.9517], [1090.5342], [1154.004 ]], dtype=float32)
plt.plot(test_set, color = 'red', label = 'Real Value')
plt.plot(predicted_values, color = 'blue', label = 'Predicted Value')
plt.title('BANK')
plt.xlabel('Time')
plt.ylabel('Value')
plt.legend()
plt.show()
np.sqrt(mean_squared_error(predicted_values, test_set))
89.2723631026785