#!/usr/bin/env python # coding: utf-8 # # Exercise 9.1 # ## Sinus forecasting # In this task, we will learn to implement RNNs in Keras. # Therefore: # - Run the provided script and comment on the output. # - Vary the number and size of the LSTM layers and compare training time and stability of the performance. # In[13]: import numpy as np import matplotlib.pyplot as plt from tensorflow import keras layers = keras.layers # ### Generation of data # We start by creating a signal trace: `t = 0-100`, `f = sin(pi * t)` # In[2]: N = 10000 t = np.linspace(0, 100, N) # time steps f = np.sin(np.pi * t) # signal # Split into semi-redundant sub-sequences of `length = window_size + 1` and perform shuffle # In[3]: window_size = 20 n = N - window_size - 1 # number of possible splits data = np.stack([f[i: i + window_size + 1] for i in range(n)]) # Finally, split the data into features # In[4]: X, y = np.split(data, [-1], axis=1) X = X[..., np.newaxis] print('Example:') print('X =', X[0, :, 0]) print('y =', y[0, :]) # ### Define and train RNN # In[5]: z0 = layers.Input(shape=[None, 1]) z = layers.LSTM(16)(z0) z = layers.Dense(1)(z) model = keras.models.Model(inputs=z0, outputs=z) print(model.summary()) model.compile(loss='mse', optimizer='adam') # In[6]: results = model.fit(X, y, epochs=60, batch_size=32, verbose=2, validation_split=0.1, callbacks=[ keras.callbacks.ReduceLROnPlateau(factor=0.67, patience=3, verbose=1, min_lr=1E-5), keras.callbacks.EarlyStopping(patience=4, verbose=1)]) # In[7]: plt.figure(1, (12, 4)) plt.subplot(1, 2, 1) plt.plot(results.history['loss']) plt.plot(results.history['val_loss']) plt.ylabel('loss') plt.yscale("log") plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper right') plt.tight_layout() # ### Evaluate the model # Investigate the forecasting capabilities of the model. # In[8]: def predict_next_k(model, window, k=10): """Predict next k steps for the given model and starting sequence """ x = window[np.newaxis, :, np.newaxis] # initial input y = np.zeros(k) for i in range(k): y[i] = model.predict(x, verbose=0) # create the new input including the last prediction x = np.roll(x, -1, axis=1) # shift all inputs 1 step to the left x[:, -1] = y[i] # add latest prediction to end return y # In[9]: def plot_prediction(i0=0, k=500): """ Predict and plot the next k steps for an input starting at i0 """ y0 = f[i0: i0 + window_size] # starting window (input) y1 = predict_next_k(model, y0, k) # predict next k steps t0 = t[i0: i0 + window_size] t1 = t[i0 + window_size: i0 + window_size + k] plt.figure(figsize=(12, 4)) plt.plot(t, f, label='data') plt.plot(t0, y0, color='C1', lw=3, label='prediction') plt.plot(t1, y1, color='C1', ls='--') plt.xlim(0, 10) plt.legend() plt.xlabel('$t$') plt.ylabel('$f(t)$') # In[10]: plot_prediction(12) # In[11]: plot_prediction(85) # In[12]: plot_prediction(115)