# State space model in Stan,PyMC and Edward¶

In [4]:
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
%matplotlib inline
import statsmodels.api as sm
import numpy as np

/Users/apple/Library/Python/3.6/lib/python/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
from pandas.core import datetools


## Nile dataset¶

In [5]:
data=pd.read_csv("https://raw.githubusercontent.com/statsmodels/statsmodels/master/statsmodels/datasets/nile/nile.csv",index_col='year',)

In [15]:
data.head()

Out[15]:
volume
year
1871 1120
1872 1160
1873 963
1874 1210
1875 1160
In [16]:
data.plot()

Out[16]:
<matplotlib.axes._subplots.AxesSubplot at 0x115bbc320>
In [8]:
y=data["volume"]
n=len(y)
T = 1000
N = len(y)
T = 1000

In [25]:
model_sm= sm.tsa.UnobservedComponents(y.values, 'local level')
result_sm = model_sm.fit()
fig=result_sm.plot_components()


# Stan¶

In [26]:
import pystan

In [27]:
print(pystan.__version__)

2.17.1.0

In [28]:
model = """
data {
int n;
vector[n] y;
}
parameters {
real muZero;
vector[n] mu;
real<lower=0> sigmaV;
real<lower=0> sigmaW;
}
model {
mu[1] ~ normal(muZero, sqrt(sigmaW));
for(i in 2:n)
mu[i] ~ normal(mu[i-1], sqrt(sigmaW));

for(i in 1:n) {
y[i] ~ normal(mu[i], sqrt(sigmaV));
}
}
"""
fit = pystan.stan(model_code=model, data={'n': N, 'y': y}, iter=T, chains=3)
fit

INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_19c7f6adbb1d4d8f65e4ad86cc022f5a NOW.

Out[28]:
Inference for Stan model: anon_model_19c7f6adbb1d4d8f65e4ad86cc022f5a.
3 chains, each with iter=1000; warmup=500; thin=1;
post-warmup draws per chain=500, total post-warmup draws=1500.

mean se_mean     sd   2.5%    25%    50%    75%  97.5%  n_eff   Rhat
muZero 1099.7   16.24  93.31 914.72 1041.9 1102.1 1161.6 1279.8     33   1.06
mu[0]  1097.9   16.05  78.62 916.47 1051.9 1101.1 1150.4 1243.5     24   1.09
mu[1]  1097.1   16.05  71.78 918.33 1057.3 1101.8 1146.6 1226.1     20   1.11
mu[2]  1087.0   14.39  67.51 917.35 1050.1 1090.6 1132.8 1205.3     22    1.1
mu[3]  1100.7   17.42  67.47 918.24 1065.6 1107.5 1143.7 1215.3     15   1.15
mu[4]  1099.7    17.2  66.61 918.07 1066.1 1105.6 1140.6 1218.4     15   1.15
mu[5]  1091.6   16.31  65.25 917.61 1056.3 1097.4 1132.5 1205.2     16   1.14
mu[6]  1074.8   13.24  64.85 916.34 1040.1 1080.1 1115.3 1197.9     24   1.09
mu[7]  1107.0   19.09  68.85 916.74 1073.6 1111.2 1151.6 1228.9     13   1.18
mu[8]  1118.1   21.03  75.81 916.66 1080.7 1121.9 1163.5 1264.3     13   1.19
mu[9]  1091.5   16.63  66.54 916.95 1056.1 1095.4 1135.3 1212.4     16   1.15
mu[10] 1060.5   11.42  60.43 916.06 1025.1 1065.0 1101.1 1167.6     28   1.09
mu[11] 1042.0    8.63   59.8 914.07 1006.1 1045.4 1083.9 1152.2     48   1.06
mu[12] 1041.2    8.71  60.35 913.29 1004.2 1043.6 1083.3 1151.7     48   1.06
mu[13] 1030.9     7.5  58.12 911.56 993.31 1033.8 1071.0 1135.8     60   1.04
mu[14] 1026.3     7.1  57.67 910.71 992.01 1027.3 1064.9 1138.0     66   1.05
mu[15] 1023.5    7.31   58.0 910.48 986.04 1024.8 1061.8 1136.6     63   1.05
mu[16] 1032.0    8.27  60.24 910.23 992.98 1033.1 1071.0 1156.4     53   1.06
mu[17] 1013.9    6.06  61.17 891.85 972.06 1018.0 1054.9 1127.8    102   1.03
mu[18] 1030.3    8.36  60.27 908.98  990.9 1032.3 1071.7 1148.5     52   1.05
mu[19] 1062.6   14.74  64.23 914.46 1024.7 1066.0 1106.5 1181.9     19   1.12
mu[20] 1080.4   19.95  69.12 914.68 1041.9 1079.8 1126.0 1208.8     12   1.16
mu[21] 1100.0   23.94   75.7 914.87 1059.2 1103.3 1150.0 1233.3     10    1.2
mu[22] 1106.9   24.82  78.49 913.87 1064.5 1110.7 1159.3 1244.5     10   1.19
mu[23] 1114.4   27.82  83.45 913.27 1068.4 1121.5 1171.3 1264.2      9   1.21
mu[24] 1107.0   26.33  83.25 913.32 1060.8 1110.9 1162.2 1255.8     10    1.2
mu[25] 1079.5   22.74  75.43 912.26 1031.8 1084.8 1128.1 1224.4     11   1.17
mu[26] 1033.5   13.95  60.81 910.27  994.6 1033.0 1073.0 1155.5     19   1.13
mu[27] 994.23    8.57  55.52 898.42 953.26 992.92 1031.5 1108.8     42   1.05
mu[28]  937.4    3.19  55.52 821.25 903.94 937.48 973.86 1045.9    303    1.0
mu[29] 907.66    3.89  55.74 788.78 872.77 910.73 944.89 1007.4    205   1.01
mu[30] 885.99    4.78  55.38  767.1 850.26 891.08  924.2 983.32    134   1.03
mu[31] 864.32    6.95  58.98 742.45 825.17 869.32 907.76 963.79     72   1.05
mu[32] 867.88    5.58  54.13 752.44 832.76  871.4 907.11  963.9     94   1.04
mu[33] 856.93    8.73  55.89 738.65 818.13 860.59 899.63 955.05     41   1.05
mu[34] 847.77     9.0  56.94 734.34 809.35 851.52 889.52 948.65     40   1.05
mu[35] 860.98    5.67  53.18 746.26 828.18 862.94 898.54 956.82     88   1.03
mu[36] 862.49    5.61  53.54 752.64 827.51 864.23  901.6 957.58     91   1.03
mu[37] 890.15    2.39  52.63  788.7 855.24 889.97  922.8 996.07    486    1.0
mu[38] 896.74    3.75  55.45 792.53 860.71 895.94 929.29 1014.8    219   1.01
mu[39] 877.43    3.38   51.7 774.53 843.09 877.98 909.61 987.61    234   1.02
mu[40] 843.95    7.23  53.14 735.11 808.56 844.82 883.43 938.15     54   1.05
mu[41]  811.5    13.0   61.0 685.76  772.7 813.23 854.52 917.01     22    1.1
mu[42]  787.7   16.95  71.89 638.05 740.58 790.52 837.27 914.13     18   1.12
mu[43] 816.95   11.44  58.35 696.64 780.93 818.71 856.19 918.22     26   1.08
mu[44] 840.43    7.42  52.98 735.91 803.66  842.0 878.46 939.24     51   1.04
mu[45] 884.66    3.56  55.99 777.34 846.03 885.28 916.89 1006.4    247    1.0
mu[46]  892.2    4.04  56.25 792.44 853.78 889.48 925.34 1017.1    194   1.01
mu[47] 864.89    2.27  51.08 764.92  830.0 863.89 899.22 970.08    506    1.0
mu[48] 845.51    4.82  51.49  746.1  809.6 846.04 882.65 941.57    114   1.02
mu[49] 838.79    5.86  52.42 735.98  802.9 840.78 874.06 935.02     80   1.03
mu[50] 832.37    7.24  53.66 722.51 796.32 833.19 870.61  930.1     55   1.04
mu[51] 836.14    5.43  53.25 726.88 800.49 837.36 875.39 929.13     96   1.03
mu[52] 836.43     5.2  54.03 718.63 801.81 837.72 873.25  934.5    108   1.02
mu[53] 830.13    6.46  55.96  714.9 792.58 832.37 867.69 932.28     75   1.03
mu[54]  816.9    8.97  58.16 694.78  779.7 818.49 856.27 924.91     42   1.05
mu[55] 822.59    8.03  56.18 702.65 784.99 827.31 859.93 924.92     49   1.05
mu[56] 822.91    6.68  55.49 709.32 785.05  825.7 861.34 922.45     69   1.05
mu[57] 837.26    3.55   53.3 727.15 803.35 835.99 873.84 938.97    226   1.03
mu[58]  857.7     2.8  50.16 758.78 823.41 857.52 892.81 953.11    320   1.01
mu[59] 844.35    4.29  51.48 740.49 811.83 844.81 879.51 938.48    144   1.02
mu[60] 846.29    3.62  54.64  736.9 811.04 847.77 885.03 944.18    228   1.02
mu[61] 857.44    2.82  52.89 756.11 820.58 857.72 892.29 960.18    353   1.01
mu[62] 867.61    2.35  51.57  768.0 833.43 865.33 901.81 969.87    482    1.0
mu[63] 880.88     2.9  53.64 774.95 845.43 879.91 913.72 991.17    343    1.0
mu[64] 886.41    3.44  54.22 784.41  849.8 884.97 921.01 1002.8    249   1.01
mu[65] 875.97    2.78  52.38  774.1 841.98 874.87  907.7 989.22    356   1.01
mu[66] 864.29    2.37  51.93 765.58 828.52 863.34 898.48 971.35    479   1.01
mu[67] 858.22    2.65  52.78 751.47  822.8 857.33 894.51 960.07    396   1.01
mu[68] 825.46    5.83  54.34 719.53 789.59 827.09 862.64 926.68     87   1.03
mu[69] 801.77   10.14  60.86 674.61 760.62 803.62 843.87 911.75     36   1.06
mu[70] 796.17   11.45  61.65 674.52 755.87  797.2 838.58 911.96     29   1.07
mu[71] 813.79    6.97  54.41  701.7 776.72  814.8 850.04 914.33     61   1.04
mu[72] 820.35    6.51  53.71 714.04 783.83 819.36 857.39 918.67     68   1.03
mu[73] 824.95     5.3  52.72 709.97 791.04 825.65 860.73 928.05     99   1.02
mu[74] 843.56    3.13  50.97 739.13 809.48 845.44  879.3 936.54    265   1.01
mu[75] 868.53    3.25  53.14 765.23 834.03 865.75  902.8 975.21    268    1.0
mu[76] 864.56    2.34  51.62 764.34 830.31 864.94 898.28 971.02    486    1.0
mu[77] 863.64     2.5  52.05 761.96 829.18 863.22 899.01 967.58    434    1.0
mu[78] 858.84    2.55  51.39 754.64 825.44 858.86  895.0 960.17    406   1.01
mu[79] 859.48     2.1  53.74 747.36 826.51  861.6 895.15 964.93    652   1.01
mu[80] 848.88     4.3  56.35  724.4 812.86 849.81 886.79 953.35    172   1.03
mu[81] 853.59     4.1   55.4 737.57 818.57 854.91 892.04 956.94    183   1.02
mu[82] 876.05    1.72  53.52 767.79 843.68 877.49 910.71 977.73    971    1.0
mu[83] 903.05    3.63  54.11  797.9 866.72 902.11 937.19 1015.7    222   1.01
mu[84]  903.9    2.85  53.84 796.74 871.04 903.68 936.41 1011.2    358    1.0
mu[85] 906.53    2.57  52.65 804.07 873.04 905.71 938.77 1012.4    418   1.01
mu[86]  896.4    1.77   51.2 793.53 863.85 897.05 929.16  996.4    841    1.0
mu[87] 905.03    2.08  52.27 802.42 870.12 904.96 938.93 1015.1    632    1.0
mu[88] 910.83    2.49  51.74 808.45 877.57 909.68 943.86 1015.9    433    1.0
mu[89] 908.29    2.25  50.91 810.23  873.6 907.66 941.98 1013.8    511    1.0
mu[90] 920.28    4.08  54.44 818.43 884.61 919.06 953.51 1038.0    178   1.01
mu[91] 916.93    3.77  53.14 820.47 879.63 915.09 951.62 1025.6    199   1.01
mu[92] 917.29    4.23  55.05 814.44 879.94 915.17 952.72 1030.8    169   1.02
mu[93] 925.42    6.05  62.61 820.96 882.21 917.59 965.31 1064.0    107   1.03
mu[94] 893.25    2.26  54.44 794.79 856.89 892.02 926.42 1011.3    580    1.0
mu[95] 859.39    1.83  52.33 753.11 825.84 860.13 893.49 963.08    822   1.01
mu[96] 845.74    3.28  54.36 737.19  810.2  847.2 883.35 946.24    274   1.01
mu[97] 815.02    9.66   61.1 685.82 776.19 816.19 857.71 926.03     40   1.05
mu[98] 800.94   11.42   64.6 669.21 757.95 801.93 846.84 921.13     32   1.06
mu[99] 794.36   12.11  72.68 648.11 742.73 795.72 846.41 920.96     36   1.05
sigmaV  1.6e4  1257.7 4534.7 9364.2  1.3e4  1.5e4  1.8e4  2.9e4     13   1.19
sigmaW 2438.2  329.47 2057.5   1.22 1078.6 1973.2 3246.4 7999.7     39    1.1
lp__   -921.3   26.04  78.11 -994.0 -959.9 -941.2 -917.5 -606.9      9   1.34

Samples were drawn using NUTS at Fri Apr 27 09:27:35 2018.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
In [32]:
fit.traceplot()

Out[32]:
In [29]:
la = fit.extract()

In [30]:
def plotseries(y,mu_samples,title,ax=0):
#axis=0 stan, 1 pymc
mu_lower, mu_upper = np.percentile(mu_samples, q=[2.5, 97.5], axis=ax)
pred = mu_samples.mean(axis=ax)
plt.plot(list(range(len(y)+1)), list(y)+[None], color='black')
plt.plot(list(range(1, len(y)+1)), pred)
plt.fill_between(list(range(1, len(y)+1)), mu_lower, mu_upper, alpha=0.3)
plt.title(title)
plt.show()

In [31]:
plotseries(y,la['mu'],'stan',0)

In [103]:
import pickle

In [104]:
with open('nile_stan_nuts.pickle', mode='wb') as f:
pickle.dump(la,f)


# PyMC¶

In [2]:
import pymc3 as pm

/Users/apple/Library/Python/3.6/lib/python/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from float to np.floating is deprecated. In future, it will be treated as np.float64 == np.dtype(float).type.
from ._conv import register_converters as _register_converters

In [101]:
print(pm.__version__)

3.4.1

In [26]:
with pm.Model() as model:

muZero = pm.Normal(name='muZero', mu=0.0, tau=1.0)
sigmaW = pm.InverseGamma(name='sigmaW', alpha=1.0, beta=1.0)

mu = [0]*N
mu[0] = pm.Normal(name='mu0', mu=muZero, tau=1/sigmaW)
for n in range(1, N):
mu[n] = pm.Normal(name='mu'+str(n), mu=mu[n-1], tau=1/sigmaW)

sigmaV = pm.InverseGamma(name='sigmaV', alpha=1.0, beta=1.0)
y_pre = pm.Normal('y_pre', mu=mu, tau=1/sigmaV, observed=y)

start = pm.find_MAP()
step = pm.NUTS()
trace = pm.sample(T, step, start=start)

logp = -876.02, ||grad|| = 60.474: 100%|â–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆ| 31/31 [00:00<00:00, 331.74it/s]
Multiprocess sampling (2 chains in 2 jobs)
INFO:pymc3:Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigmaV_log__, mu99, mu98, mu97, mu96, mu95, mu94, mu93, mu92, mu91, mu90, mu89, mu88, mu87, mu86, mu85, mu84, mu83, mu82, mu81, mu80, mu79, mu78, mu77, mu76, mu75, mu74, mu73, mu72, mu71, mu70, mu69, mu68, mu67, mu66, mu65, mu64, mu63, mu62, mu61, mu60, mu59, mu58, mu57, mu56, mu55, mu54, mu53, mu52, mu51, mu50, mu49, mu48, mu47, mu46, mu45, mu44, mu43, mu42, mu41, mu40, mu39, mu38, mu37, mu36, mu35, mu34, mu33, mu32, mu31, mu30, mu29, mu28, mu27, mu26, mu25, mu24, mu23, mu22, mu21, mu20, mu19, mu18, mu17, mu16, mu15, mu14, mu13, mu12, mu11, mu10, mu9, mu8, mu7, mu6, mu5, mu4, mu3, mu2, mu1, mu0, sigmaW_log__, muZero]
INFO:pymc3:NUTS: [sigmaV_log__, mu99, mu98, mu97, mu96, mu95, mu94, mu93, mu92, mu91, mu90, mu89, mu88, mu87, mu86, mu85, mu84, mu83, mu82, mu81, mu80, mu79, mu78, mu77, mu76, mu75, mu74, mu73, mu72, mu71, mu70, mu69, mu68, mu67, mu66, mu65, mu64, mu63, mu62, mu61, mu60, mu59, mu58, mu57, mu56, mu55, mu54, mu53, mu52, mu51, mu50, mu49, mu48, mu47, mu46, mu45, mu44, mu43, mu42, mu41, mu40, mu39, mu38, mu37, mu36, mu35, mu34, mu33, mu32, mu31, mu30, mu29, mu28, mu27, mu26, mu25, mu24, mu23, mu22, mu21, mu20, mu19, mu18, mu17, mu16, mu15, mu14, mu13, mu12, mu11, mu10, mu9, mu8, mu7, mu6, mu5, mu4, mu3, mu2, mu1, mu0, sigmaW_log__, muZero]
100%|â–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆ| 1500/1500 [02:16<00:00, 11.02it/s]
There were 2 divergences after tuning. Increase target_accept or reparameterize.
ERROR:pymc3:There were 2 divergences after tuning. Increase target_accept or reparameterize.
The acceptance probability does not match the target. It is 0.4238921757976004, but should be close to 0.8. Try to increase the number of tuning steps.
WARNING:pymc3:The acceptance probability does not match the target. It is 0.4238921757976004, but should be close to 0.8. Try to increase the number of tuning steps.
The acceptance probability does not match the target. It is 0.510485232585227, but should be close to 0.8. Try to increase the number of tuning steps.
WARNING:pymc3:The acceptance probability does not match the target. It is 0.510485232585227, but should be close to 0.8. Try to increase the number of tuning steps.
The gelman-rubin statistic is larger than 1.05 for some parameters. This indicates slight problems during sampling.
INFO:pymc3:The gelman-rubin statistic is larger than 1.05 for some parameters. This indicates slight problems during sampling.
The estimated number of effective samples is smaller than 200 for some parameters.
ERROR:pymc3:The estimated number of effective samples is smaller than 200 for some parameters.

In [99]:
mu_samples = np.array([trace['mu'+str(i)] for i in range(N)])
plotseries(y,mu_samples,"pymc",1)

In [105]:
with open('nile_pymc_nuts.pickle', mode='wb') as f:
pickle.dump(trace,f)


# Edward¶

In [1]:
import tensorflow as tf
import edward as ed
from edward.models import Normal, InverseGamma, Empirical

/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
return f(*args, **kwds)
/Users/apple/Library/Python/3.6/lib/python/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from float to np.floating is deprecated. In future, it will be treated as np.float64 == np.dtype(float).type.
from ._conv import register_converters as _register_converters

In [2]:
print(ed.__version__)

1.3.5

In [9]:
muZero = Normal(loc=0.0, scale=1.0)
sigmaW = InverseGamma(concentration=1.0, rate=1.0)

mu = [0]*N
mu[0] = Normal(loc=muZero, scale=sigmaW)
for n in range(1, N):
mu[n] = Normal(loc=mu[n-1], scale=sigmaW)

sigmaV = InverseGamma(concentration=1.0, rate=1.0)
y_pre   = Normal(loc=tf.stack(mu), scale=sigmaV)

qmuZero = Empirical(tf.Variable(tf.zeros(T)))
qsigmaW = Empirical(tf.Variable(tf.zeros(T)))
qmu = [Empirical(tf.Variable(tf.zeros(T))) for n in range(N)]
qsigmaV = Empirical(tf.Variable(tf.zeros(T)))

latent_vars = {m: qm for m, qm in zip(mu, qmu)}
latent_vars[muZero] = qmuZero
latent_vars[sigmaW] = qsigmaW
latent_vars[sigmaV] = qsigmaV

inference = ed.HMC(latent_vars, data={y_pre: y.values})
inference.run(n_iter=T)

/usr/local/lib/python3.6/site-packages/edward/util/random_variables.py:52: FutureWarning: Conversion of the second argument of issubdtype from float to np.floating is deprecated. In future, it will be treated as np.float64 == np.dtype(float).type.
not np.issubdtype(value.dtype, np.float) and \
/usr/local/lib/python3.6/site-packages/edward/util/random_variables.py:53: FutureWarning: Conversion of the second argument of issubdtype from int to np.signedinteger is deprecated. In future, it will be treated as np.int64 == np.dtype(int).type.
not np.issubdtype(value.dtype, np.int) and \

1000/1000 [100%] â–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆ Elapsed: 51s | Acceptance Rate: 0.000

In [36]:
qmu_sample=np.array([ _qmu.sample(1000).eval()  for _qmu in qmu])

In [37]:
qmu_sample.shape

Out[37]:
(100, 1000)
In [39]:
plotseries(y,qmu_sample,"Edward"+ed.__version__,1)

In [106]:
with open('nile_edward_hmc.pickle', mode='wb') as f:
pickle.dump(qmu_sample,f)

In [108]:
qmu_sample3=np.array([ _qmu.sample(1000).eval()  for _qmu in qmu])

In [ ]:
plotseries(y,qmu_sample3,"Edward HMC run1000+3000",1)


# SGHMC¶

In [44]:
def nile_st_SGHMC(y,N,T):
muZero = Normal(loc=0.0, scale=1.0)
sigmaW = InverseGamma(concentration=1.0, rate=1.0)

mu = [0]*N
mu[0] = Normal(loc=muZero, scale=sigmaW)
for n in range(1, N):
mu[n] = Normal(loc=mu[n-1], scale=sigmaW)

sigmaV = InverseGamma(concentration=1.0, rate=1.0)
y_pre   = Normal(loc=tf.stack(mu), scale=sigmaV)

qmuZero = Empirical(tf.Variable(tf.zeros(T)))
qsigmaW = Empirical(tf.Variable(tf.zeros(T)))
qmu = [Empirical(tf.Variable(tf.zeros(T))) for n in range(N)]
qsigmaV = Empirical(tf.Variable(tf.zeros(T)))

latent_vars = {m: qm for m, qm in zip(mu, qmu)}
latent_vars[muZero] =qmuZero
latent_vars[sigmaW]= qsigmaW
latent_vars[sigmaV] = qsigmaV

return ed.SGHMC(latent_vars, data={y_pre: y.values}) ,qmu

In [45]:
inferenceSGHMC,qmuSGHMC=nile_st_SGHMC(y,N,T)

/usr/local/lib/python3.6/site-packages/edward/util/random_variables.py:52: FutureWarning: Conversion of the second argument of issubdtype from float to np.floating is deprecated. In future, it will be treated as np.float64 == np.dtype(float).type.
not np.issubdtype(value.dtype, np.float) and \
/usr/local/lib/python3.6/site-packages/edward/util/random_variables.py:53: FutureWarning: Conversion of the second argument of issubdtype from int to np.signedinteger is deprecated. In future, it will be treated as np.int64 == np.dtype(int).type.
not np.issubdtype(value.dtype, np.int) and \

In [46]:
inferenceSGHMC.run(n_iter=T)

1000/1000 [100%] â–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆâ–ˆ Elapsed: 24s | Acceptance Rate: 1.000

In [47]:
qmuSGHMCsample=np.array([ _qmu.sample(1000).eval()  for _qmu in qmuSGHMC])

In [49]:
plotseries(y,qmuSGHMCsample,"Edward SGHMC run1000",1)

In [55]:
mu_lower, mu_upper = np.percentile(qmuSGHMCsample, q=[2.5, 97.5], axis=1)
pred = qmuSGHMCsample.mean(axis=1)
plt.plot(list(range(1, len(y)+1)), pred)
plt.fill_between(list(range(1, len(y)+1)), mu_lower, mu_upper, alpha=0.3)
plt.title("Edward SGHMC run1000")
plt.show()

In [ ]: