In [1]:
from statsmodels.tsa.statespace.varmax import VARMAX
import pandas as pd
import numpy as np

%matplotlib inline

import matplotlib
import matplotlib.pyplot as plt

from random import random
In [2]:
cols = ['v1', 'v2']
lst = []
for i in range(100):
    v1 = random()
    v2 = v1 + random()
    row = [v1,v2]
    lst.append(row)
    
df = pd.DataFrame(lst, columns=cols)
df.info()
df.tail(3)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 100 entries, 0 to 99
Data columns (total 2 columns):
v1    100 non-null float64
v2    100 non-null float64
dtypes: float64(2)
memory usage: 1.6 KB
Out[2]:
v1 v2
97 0.023646 0.941460
98 0.353059 0.483105
99 0.418206 1.151123
In [3]:
model = VARMAX(df.values.tolist(), order=(1,1))
model_fit = model.fit(disp=False)

y = pd.DataFrame(model_fit.forecast(), columns=cols)
y.loc[100] = y.loc[0]
y.loc[99] = df.loc[99]
y.loc[0] = [np.nan, np.nan]
y.tail(3)
/anaconda3/lib/python3.6/site-packages/statsmodels/tsa/statespace/varmax.py:152: EstimationWarning: Estimation of VARMA(p,q) models is not generically robust, due especially to identification issues.
  EstimationWarning)
/anaconda3/lib/python3.6/site-packages/statsmodels/base/model.py:508: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  "Check mle_retvals", ConvergenceWarning)
/anaconda3/lib/python3.6/site-packages/statsmodels/tsa/statespace/varmax.py:152: EstimationWarning: Estimation of VARMA(p,q) models is not generically robust, due especially to identification issues.
  EstimationWarning)
Out[3]:
v1 v2
0 NaN NaN
100 0.518711 0.961162
99 0.418206 1.151123
In [5]:
plt.plot(df['v1'], 'b')
plt.plot(df['v2'], 'g')

plt.plot(y.tail(2), 'r')

plt.show()
In [10]:
model_fit.summary()
Out[10]:
Statespace Model Results
Dep. Variable: ['y1', 'y2'] No. Observations: 100
Model: VARMA(1,1) Log Likelihood -32.332
+ intercept AIC 90.664
Date: Sat, 01 Sep 2018 BIC 124.531
Time: 21:25:31 HQIC 104.370
Sample: 0
- 100
Covariance Type: opg
Ljung-Box (Q): 33.31, 37.15 Jarque-Bera (JB): 3.53, 6.67
Prob(Q): 0.76, 0.60 Prob(JB): 0.17, 0.04
Heteroskedasticity (H): 1.15, 1.03 Skew: 0.00, -0.05
Prob(H) (two-sided): 0.70, 0.94 Kurtosis: 2.08, 1.74
Results for equation y1
coef std err z P>|z| [0.025 0.975]
const 0.2345 0.662 0.354 0.723 -1.064 1.533
L1.y1 -0.6849 1.119 -0.612 0.540 -2.878 1.508
L1.y2 0.5941 0.800 0.743 0.458 -0.974 2.162
L1.e(y1) 0.6219 1.127 0.552 0.581 -1.586 2.830
L1.e(y2) -0.6160 0.808 -0.762 0.446 -2.200 0.968
Results for equation y2
coef std err z P>|z| [0.025 0.975]
const 1.1360 0.826 1.376 0.169 -0.483 2.755
L1.y1 -0.3137 1.411 -0.222 0.824 -3.079 2.452
L1.y2 -0.0052 1.023 -0.005 0.996 -2.010 2.000
L1.e(y1) 0.3306 1.417 0.233 0.816 -2.446 3.107
L1.e(y2) -0.1307 1.005 -0.130 0.897 -2.099 1.838
Error covariance matrix
coef std err z P>|z| [0.025 0.975]
sqrt.var.y1 0.2661 0.028 9.669 0.000 0.212 0.320
sqrt.cov.y1.y2 0.2827 0.049 5.755 0.000 0.186 0.379
sqrt.var.y2 0.3037 0.039 7.855 0.000 0.228 0.380


Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
In [ ]: