In [1]:
:set -XFlexibleContexts -XMonadComprehensions -XNoImplicitPrelude -XRebindableSyntax
import Language.Stochaskell
stochaskell
Stochaskell, version 0.1.0
Copyright (C) 2015-2020 David A Roberts
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions; see the LICENSE for details.

Using installation directory at 
  /home/jovyan/stochaskell
In [2]:
:opt svg
import Language.Stochaskell.Plot
In [3]:
poissonProc :: R -> P R -> P RVec
poissonProc m f = do
  n <- poisson m
  xs <- orderedSample n f
  return xs
In [4]:
poissonProcHomo :: R -> R -> P RVec
poissonProcHomo rate t = do
  poissonProc (rate * t) (uniform 0 t)
In [5]:
simulate (poissonProcHomo 1 5)
[0.9716267457678796,1.5309201571040059,2.0869329893499335,3.5964876124419183,4.223162045679379]
In [6]:
samples <- sequence [simulate (poissonProcHomo 1 5) | i <- [1..5]]
toRenderable $ do
  sequence_ $ plotStep "" (0,5) . list <$> samples
  xlabel "Time"
  ylabel "Cumulative number of events"
In [7]:
gp :: (R -> R -> R) -> Z -> RVec -> P RVec
gp kernel n x = do
  let mu = vector [ 0 | i <- 1...n ]
      cov = matrix [ kernel (x!i) (x!j) | i <- 1...n, j <- 1...n ]
  g <- normal mu cov
  return g
In [8]:
kernelSE1 = kernelSE (log 1) (log 1)

kernelSE lsv lls2 a b =
  exp (lsv - (a - b)^2 / (2 * exp lls2))
  + if a == b then 1e-6 else 0
In [9]:
let test = list [i/10 | i <- [0..100]]
samples <- sequence [simulate (gp kernelSE1 101 test) | i <- [1..5]]
toRenderable $ sequence [plot $ line "" [test `zip` list ys] | ys <- samples]
In [10]:
normalChol :: Z -> RVec -> RMat -> P RVec
normalChol n mu cov = do
  w <- joint vector [ normal 0 1 | i <- 1...n ]
  return (mu + chol cov #> w)
In [11]:
gpChol :: (R -> R) -> (R -> R -> R) -> Z -> RVec -> P RVec
gpChol expect kernel n x = do
  let mu  = vector [ expect (x!i) | i <- 1...n ]
      cov = matrix [ kernel (x!i) (x!j) | i <- 1...n, j <- 1...n ]
  g <- normalChol n mu cov
  return g
In [12]:
samples <- sequence [simulate (gpChol (const 0) kernelSE1 101 test) | i <- [1..5]]
toRenderable $ do
  sequence_ [plot $ line "" [test `zip` list ys] | ys <- samples]
  xlabel "Function input value"
  ylabel "Function output value"
In [13]:
gpClassifier :: (R -> R -> R) -> Z -> RVec -> P (RVec,BVec)
gpClassifier kernel n x = do
  g <- gpChol (const 0) kernel n x
  phi <- joint vector [ bernoulliLogit (g!i) | i <- 1...n ]
  return (g,phi)
In [14]:
model :: R -> P (Z,RVec,RVec,BVec)
model rate = do
  x <- poissonProcHomo rate 10
  let n = vectorSize x
  (y,z) <- gpClassifier kernelSE1 n x
  return (n,x,y,z)
In [15]:
(n,x,y,z) <- simulate (model 10)
toRenderable . plot $ line "" [list x `zip` list y]
In [16]:
samples <- hmcStan 1000 [y' | (n',x',y',z') <- model 5, n' == n, x' == x, z' == z]
--- Generating Stan code ---
functions {
  // https://github.com/stan-dev/stan/issues/452
  real to_real(real x) { return x; }
}
data {
  int x_stan_0_0;
  vector[x_stan_0_0] x_stan_0_1;
  
  int<lower=0,upper=1> x_stan_0_3[x_stan_0_0];
}
parameters {
  
  
  vector[x_stan_0_0] x_stan_0_2;
  
}
model {
  
  vector[x_stan_0_0] v_0_1;
  
  matrix[x_stan_0_0,x_stan_0_0] v_0_3;
  matrix[x_stan_0_0,x_stan_0_0] v_0_4;
  
  vector[x_stan_0_0] v_0_6;
  vector[x_stan_0_0] v_0_7;
  
  for (i_1_1 in 1:x_stan_0_0) {
  
    v_0_1[i_1_1] = 0;
  }
  
  for (i_1_1 in 1:x_stan_0_0) for (i_1_2 in 1:x_stan_0_0) {
    real v_1_0;
    real v_1_1;
    real v_1_2;
    real v_1_3;
    real v_1_4;
    int v_1_5;
    real v_1_6;
    real v_1_7;
    v_1_0 = x_stan_0_1[i_1_1] - x_stan_0_1[i_1_2];
    v_1_1 = v_1_0 .* v_1_0;
    v_1_2 = to_real(v_1_1) ./ to_real(2);
    v_1_3 = -(v_1_2);
    v_1_4 = exp(v_1_3);
    v_1_5 = x_stan_0_1[i_1_1] == x_stan_0_1[i_1_2];
    v_1_6 = v_1_5 ? 1.0e-6 : 0;
    v_1_7 = v_1_4 + v_1_6;
    v_0_3[i_1_1, i_1_2] = v_1_7;
  }
  v_0_4 = cholesky_decompose(to_matrix(v_0_3));
  
  v_0_6 = to_matrix(v_0_4) * to_vector(x_stan_0_2);
  v_0_7 = v_0_1 + v_0_6;

  
  
  for (i_1_1 in 1:x_stan_0_0) {
    x_stan_0_2[i_1_1] ~ normal(0, 1);
  }
  for (i_1_1 in 1:x_stan_0_0) {
    x_stan_0_3[i_1_1] ~ bernoulli_logit(v_0_7[i_1_1]);
  }
}

make -C /home/jovyan/stochaskell/cmdstan /home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f
make[1]: Entering directory '/home/jovyan/stochaskell/cmdstan'

--- Translating Stan model to C++ code ---
bin/stanc  --o=/home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f.hpp /home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f.stan
Model name=model_7569d5f54491ccd122205a025152ebf820b7d76f_model
Input file=/home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f.stan
Output file=/home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f.hpp

--- Compiling, linking C++ code ---
g++ -std=c++1y -pthread -Wno-sign-compare     -O3 -I src -I stan/src -I stan/lib/stan_math/ -I stan/lib/stan_math/lib/eigen_3.3.3 -I stan/lib/stan_math/lib/boost_1.69.0 -I stan/lib/stan_math/lib/sundials_4.1.0/include    -DBOOST_RESULT_OF_USE_TR1 -DBOOST_NO_DECLTYPE -DBOOST_DISABLE_ASSERTS -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION     -c  -x c++ -o /home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f.o /home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f.hpp
g++ -std=c++1y -pthread -Wno-sign-compare     -O3 -I src -I stan/src -I stan/lib/stan_math/ -I stan/lib/stan_math/lib/eigen_3.3.3 -I stan/lib/stan_math/lib/boost_1.69.0 -I stan/lib/stan_math/lib/sundials_4.1.0/include    -DBOOST_RESULT_OF_USE_TR1 -DBOOST_NO_DECLTYPE -DBOOST_DISABLE_ASSERTS -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION                   src/cmdstan/main.o stan/lib/stan_math/lib/sundials_4.1.0/lib/libsundials_nvecserial.a stan/lib/stan_math/lib/sundials_4.1.0/lib/libsundials_cvodes.a stan/lib/stan_math/lib/sundials_4.1.0/lib/libsundials_idas.a  /home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f.o -o /home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f
make[1]: Leaving directory '/home/jovyan/stochaskell/cmdstan'
--- Sampling Stan model ---
/home/jovyan/stochaskell/cache/stan/model_7569d5f54491ccd122205a025152ebf820b7d76f method=sample num_samples=1000 num_warmup=1000 save_warmup=0 thin=1 adapt engaged=1 algorithm=hmc engine=nuts max_depth=10 metric=diag_e stepsize=1.0 stepsize_jitter=0.0 data file=/tmp/stan-3387388d1f5fd324/stan.data init=0 output file=/tmp/stan-3387388d1f5fd324/stan.csv
method = sample (Default)
  sample
    num_samples = 1000 (Default)
    num_warmup = 1000 (Default)
    save_warmup = 0 (Default)
    thin = 1 (Default)
    adapt
      engaged = 1 (Default)
      gamma = 0.050000000000000003 (Default)
      delta = 0.80000000000000004 (Default)
      kappa = 0.75 (Default)
      t0 = 10 (Default)
      init_buffer = 75 (Default)
      term_buffer = 50 (Default)
      window = 25 (Default)
    algorithm = hmc (Default)
      hmc
        engine = nuts (Default)
          nuts
            max_depth = 10 (Default)
        metric = diag_e (Default)
        metric_file =  (Default)
        stepsize = 1 (Default)
        stepsize_jitter = 0 (Default)
id = 0 (Default)
data
  file = /tmp/stan-3387388d1f5fd324/stan.data
init = 0
random
  seed = -1 (Default)
output
  file = /tmp/stan-3387388d1f5fd324/stan.csv
  diagnostic_file =  (Default)
  refresh = 100 (Default)


Gradient evaluation took 0.003398 seconds
1000 transitions using 10 leapfrog steps per transition would take 33.98 seconds.
Adjust your expectations accordingly!


Iteration:    1 / 2000 [  0%]  (Warmup)
Iteration:  100 / 2000 [  5%]  (Warmup)
Iteration:  200 / 2000 [ 10%]  (Warmup)
Iteration:  300 / 2000 [ 15%]  (Warmup)
Iteration:  400 / 2000 [ 20%]  (Warmup)
Iteration:  500 / 2000 [ 25%]  (Warmup)
Iteration:  600 / 2000 [ 30%]  (Warmup)
Iteration:  700 / 2000 [ 35%]  (Warmup)
Iteration:  800 / 2000 [ 40%]  (Warmup)
Iteration:  900 / 2000 [ 45%]  (Warmup)
Iteration: 1000 / 2000 [ 50%]  (Warmup)
Iteration: 1001 / 2000 [ 50%]  (Sampling)
Iteration: 1100 / 2000 [ 55%]  (Sampling)
Iteration: 1200 / 2000 [ 60%]  (Sampling)
Iteration: 1300 / 2000 [ 65%]  (Sampling)
Iteration: 1400 / 2000 [ 70%]  (Sampling)
Iteration: 1500 / 2000 [ 75%]  (Sampling)
Iteration: 1600 / 2000 [ 80%]  (Sampling)
Iteration: 1700 / 2000 [ 85%]  (Sampling)
Iteration: 1800 / 2000 [ 90%]  (Sampling)
Iteration: 1900 / 2000 [ 95%]  (Sampling)
Iteration: 2000 / 2000 [100%]  (Sampling)

 Elapsed Time: 35.3845 seconds (Warm-up)
               22.6767 seconds (Sampling)
               58.0612 seconds (Total)

Stan took 58.08748379s
# stan_version_major = 2
# stan_version_minor = 20
# stan_version_patch = 0
# model = model_7569d5f54491ccd122205a025152ebf820b7d76f_model
# method = sample (Default)
#   sample
#     num_samples = 1000 (Default)
#     num_warmup = 1000 (Default)
#     save_warmup = 0 (Default)
#     thin = 1 (Default)
#     adapt
#       engaged = 1 (Default)
#       gamma = 0.050000000000000003 (Default)
#       delta = 0.80000000000000004 (Default)
#       kappa = 0.75 (Default)
#       t0 = 10 (Default)
#       init_buffer = 75 (Default)
#       term_buffer = 50 (Default)
#       window = 25 (Default)
#     algorithm = hmc (Default)
#       hmc
#         engine = nuts (Default)
#           nuts
#             max_depth = 10 (Default)
#         metric = diag_e (Default)
#         metric_file =  (Default)
#         stepsize = 1 (Default)
#         stepsize_jitter = 0 (Default)
# id = 0 (Default)
# data
#   file = /tmp/stan-3387388d1f5fd324/stan.data
# init = 0
# random
#   seed = -1 (Default)
# output
#   file = /tmp/stan-3387388d1f5fd324/stan.csv
#   diagnostic_file =  (Default)
#   refresh = 100 (Default)
# Adaptation terminated
# Step size = 0.307326
# Diagonal elements of inverse mass matrix:
# 0.454784, 0.609914, 0.892531, 0.966999, 0.975292, 0.813568, 0.984162, 1.12342, 0.791994, 0.867618, 1.05041, 0.97575, 0.893809, 0.841702, 1.12326, 0.842975, 0.919122, 0.968328, 0.965005, 0.985369, 1.04096, 1.03149, 1.04123, 0.96826, 0.933623, 0.949133, 1.01007, 0.946262, 0.889816, 1.06354, 0.783898, 1.04882, 1.02299, 0.792839, 0.916242, 0.744308, 1.03046, 0.849252, 0.926423, 0.955199, 0.888793, 0.855933, 1.03896, 1.06476, 0.829021, 0.981326, 1.05203, 0.818894, 0.715086, 0.763326, 0.99942, 1.00893, 0.918666, 0.93359, 1.00808, 1.14396, 0.888619, 1.01575, 0.837915, 0.97647, 0.853298, 0.777611, 0.806646, 0.861312, 0.893244, 1.03236, 0.990303, 1.00321, 0.954213, 0.904326, 0.957451, 1.01408, 0.920718, 0.87331, 0.820142, 0.902128, 1.14986, 0.867055, 1.05263, 0.887892, 0.893434, 1.02279, 0.98857, 1.10319, 1.07899, 1.0448, 1.02726, 1.00964, 1.01895, 1.0081, 1.00302, 0.899133, 1.00881, 1.0358, 1.01014, 0.985071, 0.990859, 0.900477, 1.02481, 0.970945, 0.956314, 1.04053
# 
#  Elapsed Time: 35.3845 seconds (Warm-up)
#                22.6767 seconds (Sampling)
#                58.0612 seconds (Total)
# 

Extracting: v_0_7
--- Removing temporary files ---
In [17]:
toRenderable $ do
  plot $ line "truth" [list x `zip` list y]
  plot $ points "data" $ list x `zip` [if b then 1.8 else -1.8 | b <- list z]
  setColors [black `withOpacity` 0.1]
  plot $ line "posterior" [list x `zip` list y' | (i,y') <- [0..] `zip` samples, i `mod` 100 == 0]
  ylim (-2,2)