#!/usr/bin/env python # coding: utf-8 # In[6]: from pandas import DataFrame from pandas import Series from pandas import concat from pandas import read_csv from pandas import datetime import pandas as pd from sklearn.metrics import mean_squared_error from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras_tqdm import TQDMCallback, TQDMNotebookCallback import gdax from time import sleep from math import sqrt from matplotlib import pyplot import numpy def get_loads(symbol, start=None, end=None, granularity=86400): """ This was fiddly so I'm not live-coding this one""" public_client = gdax.PublicClient() if end is None: end = pd.to_datetime('now') if start is None: start = end-pd.Timedelta(seconds=granularity) while True: response = public_client.get_product_historic_rates( product_id=symbol, granularity=granularity, start=start.isoformat(), end=end.isoformat() ) if not response: raise StopIteration() if not isinstance(response,list): raise ValueError(response) for r in response: yield r sleep(3) end = pd.to_datetime(r[0], unit='s') start = end-pd.Timedelta(seconds=granularity*len(response)) print(f"{start}-{end}") def get_data(): df = pd.DataFrame( get_loads('BTC-USD'), columns=['time','low','high','open','close','volume'] ) df['time'] = pd.to_datetime(df['time'], unit='s') return df.set_index('time')['close'] # date-time parsing function for loading the dataset def parser(x): return datetime.strptime('190'+x, '%Y-%m') # frame a sequence as a supervised learning problem def timeseries_to_supervised(data, lag=1): df = DataFrame(data) columns = [df.shift(i) for i in range(1, lag+1)] columns.append(df) df = concat(columns, axis=1) df.fillna(0, inplace=True) return df # create a differenced series def difference(dataset, interval=1): diff = list() for i in range(interval, len(dataset)): value = dataset[i] - dataset[i - interval] diff.append(value) return Series(diff) # invert differenced value def inverse_difference(history, yhat, interval=1): return yhat + history[-interval] # scale train and test data to [-1, 1] def scale(train, test): # fit scaler scaler = MinMaxScaler(feature_range=(-1, 1)) scaler = scaler.fit(train) # transform train train = train.reshape(train.shape[0], train.shape[1]) train_scaled = scaler.transform(train) # transform test test = test.reshape(test.shape[0], test.shape[1]) test_scaled = scaler.transform(test) return scaler, train_scaled, test_scaled # inverse scaling for a forecasted value def invert_scale(scaler, X, value): new_row = [x for x in X] + [value] array = numpy.array(new_row) array = array.reshape(1, len(array)) inverted = scaler.inverse_transform(array) return inverted[0, -1] # fit an LSTM network to training data def fit_lstm(train, batch_size, nb_epoch, neurons): X, y = train[:, 0:-1], train[:, -1] X = X.reshape(X.shape[0], 1, X.shape[1]) model = Sequential() model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') for i in range(nb_epoch): model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False, callbacks=[TQDMNotebookCallback(leave_inner=False,leave_outer=False)]) model.reset_states() return model # make a one-step forecast def forecast_lstm(model, batch_size, X): X = X.reshape(1, 1, len(X)) yhat = model.predict(X, batch_size=batch_size) return yhat[0,0] # load dataset #series = read_csv(source_file, header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) series = get_data() split = series.shape[0]//3 # transform data to be stationary raw_values = series.values diff_values = difference(raw_values, 1) # transform data to be supervised learning supervised = timeseries_to_supervised(diff_values, 1) supervised_values = supervised.values # split data into train and test-sets train, test = supervised_values[0:-split], supervised_values[-split:] # transform the scale of the data scaler, train_scaled, test_scaled = scale(train, test) # fit the model lstm_model = fit_lstm(train_scaled, 1, 3000, 4) # forecast the entire training dataset to build up state for forecasting train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1) lstm_model.predict(train_reshaped, batch_size=1) # walk-forward validation on the test data predictions = list() for i in range(len(test_scaled)): # make one-step forecast X, y = test_scaled[i, 0:-1], test_scaled[i, -1] yhat = forecast_lstm(lstm_model, 1, X) # invert scaling yhat = invert_scale(scaler, X, yhat) # invert differencing yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i) # store forecast predictions.append(yhat) expected = raw_values[len(train) + i + 1] print('Month=%d, Predicted=%f, Expected=%f' % (i+1, yhat, expected)) # In[10]: # report performance rmse = sqrt(mean_squared_error(raw_values[-split:], predictions)) print('Test RMSE: %.3f' % rmse) # line plot of observed vs predicted get_ipython().run_line_magic('matplotlib', 'nbagg') pyplot.plot(raw_values[-split:]) pyplot.plot(predictions) pyplot.show() # In[11]: lstm_model.save('lstm_1_3000_4.h5') # In[ ]: