{-# LANGUAGE FlexibleContexts, MonadComprehensions, NoImplicitPrelude, RebindableSyntax, TypeFamilies #-}
import Language.Stochaskell
stochaskell
Stochaskell, version 0.1.0 Copyright (C) 2015-2019 David A Roberts This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions; see the LICENSE for details. Using installation directory at /home/jovyan/stochaskell
prior :: P (Z,RVec,RVec)
prior = do
n <- poisson 5
let base = uniform 0 5
x <- orderedSample n base
let mu = vector [ 0 | i <- 1...n ]
kernel a b = exp (-(a - b)^2 / 2) + if a == b then 1e-6 else 0
cov = matrix [ kernel (x!i) (x!j) | i <- 1...n, j <- 1...n ]
y <- normal mu cov
return (n,x,y)
let posterior = [ y | (n,x,y) <- prior, n == 5, x == list [0.5,1.5,2.5,3.5,4.5] ]
samples <- hmcStan 1000 posterior
print $ last samples
--- Generating Stan code --- data { int x_stan_0_0; vector[x_stan_0_0] x_stan_0_1; } parameters { vector[x_stan_0_0] x_stan_0_2; } model { vector[x_stan_0_0] v_0_1; matrix[x_stan_0_0,x_stan_0_0] v_0_3; for (i_1_1 in 1:x_stan_0_0) { v_0_1[i_1_1] = 0; } for (i_1_1 in 1:x_stan_0_0) for (i_1_2 in 1:x_stan_0_0) { real v_1_0; real v_1_1; real v_1_2; real v_1_3; real v_1_4; int v_1_5; real v_1_6; real v_1_7; v_1_0 = x_stan_0_1[i_1_1] - x_stan_0_1[i_1_2]; v_1_1 = v_1_0 .* v_1_0; v_1_2 = v_1_1 ./ 2; v_1_3 = -(v_1_2); v_1_4 = exp(v_1_3); v_1_5 = x_stan_0_1[i_1_1] == x_stan_0_1[i_1_2]; v_1_6 = v_1_5 ? 1.0e-6 : 0; v_1_7 = v_1_4 + v_1_6; v_0_3[i_1_1, i_1_2] = v_1_7; } x_stan_0_2 ~ multi_normal(v_0_1, v_0_3); } make -C /home/jovyan/stochaskell/cmdstan /home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc make[1]: Entering directory '/home/jovyan/stochaskell/cmdstan' --- Translating Stan model to C++ code --- bin/stanc /home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc.stan --o=/home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc.hpp Model name=model_674d3e4301d1e412a3dc977884d91470db35a6bc_model Input file=/home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc.stan Output file=/home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc.hpp --- Linking C++ model --- g++ -I src -I stan/src -isystem stan/lib/stan_math/ -isystem stan/lib/stan_math/lib/eigen_3.3.3 -isystem stan/lib/stan_math/lib/boost_1.62.0 -isystem stan/lib/stan_math/lib/cvodes_2.9.0/include -Wall -DEIGEN_NO_DEBUG -DBOOST_RESULT_OF_USE_TR1 -DBOOST_NO_DECLTYPE -DBOOST_DISABLE_ASSERTS -DFUSION_MAX_VECTOR_SIZE=12 -DNO_FPRINTF_OUTPUT -pipe -lpthread -O3 -o /home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc src/cmdstan/main.cpp -include /home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc.hpp stan/lib/stan_math/lib/cvodes_2.9.0/lib/libsundials_nvecserial.a stan/lib/stan_math/lib/cvodes_2.9.0/lib/libsundials_cvodes.a make[1]: Leaving directory '/home/jovyan/stochaskell/cmdstan' --- Sampling Stan model --- /home/jovyan/stochaskell/cache/stan/model_674d3e4301d1e412a3dc977884d91470db35a6bc method=sample num_samples=1000 num_warmup=1000 save_warmup=0 thin=1 adapt engaged=1 algorithm=hmc engine=nuts max_depth=10 metric=diag_e stepsize=1.0 stepsize_jitter=0.0 data file=/tmp/stan-5f9713f853bba6d1/stan.data init=0 output file=/tmp/stan-5f9713f853bba6d1/stan.csv method = sample (Default) sample num_samples = 1000 (Default) num_warmup = 1000 (Default) save_warmup = 0 (Default) thin = 1 (Default) adapt engaged = 1 (Default) gamma = 0.050000000000000003 (Default) delta = 0.80000000000000004 (Default) kappa = 0.75 (Default) t0 = 10 (Default) init_buffer = 75 (Default) term_buffer = 50 (Default) window = 25 (Default) algorithm = hmc (Default) hmc engine = nuts (Default) nuts max_depth = 10 (Default) metric = diag_e (Default) stepsize = 1 (Default) stepsize_jitter = 0 (Default) id = 0 (Default) data file = /tmp/stan-5f9713f853bba6d1/stan.data init = 0 random seed = 122441814 output file = /tmp/stan-5f9713f853bba6d1/stan.csv diagnostic_file = (Default) refresh = 100 (Default) Gradient evaluation took 2.4e-05 seconds 1000 transitions using 10 leapfrog steps per transition would take 0.24 seconds. Adjust your expectations accordingly! Iteration: 1 / 2000 [ 0%] (Warmup) Iteration: 100 / 2000 [ 5%] (Warmup) Iteration: 200 / 2000 [ 10%] (Warmup) Iteration: 300 / 2000 [ 15%] (Warmup) Iteration: 400 / 2000 [ 20%] (Warmup) Iteration: 500 / 2000 [ 25%] (Warmup) Iteration: 600 / 2000 [ 30%] (Warmup) Iteration: 700 / 2000 [ 35%] (Warmup) Iteration: 800 / 2000 [ 40%] (Warmup) Iteration: 900 / 2000 [ 45%] (Warmup) Iteration: 1000 / 2000 [ 50%] (Warmup) Iteration: 1001 / 2000 [ 50%] (Sampling) Iteration: 1100 / 2000 [ 55%] (Sampling) Iteration: 1200 / 2000 [ 60%] (Sampling) Iteration: 1300 / 2000 [ 65%] (Sampling) Iteration: 1400 / 2000 [ 70%] (Sampling) Iteration: 1500 / 2000 [ 75%] (Sampling) Iteration: 1600 / 2000 [ 80%] (Sampling) Iteration: 1700 / 2000 [ 85%] (Sampling) Iteration: 1800 / 2000 [ 90%] (Sampling) Iteration: 1900 / 2000 [ 95%] (Sampling) Iteration: 2000 / 2000 [100%] (Sampling) Elapsed Time: 0.099845 seconds (Warm-up) 0.106995 seconds (Sampling) 0.20684 seconds (Total) # stan_version_major = 2 # stan_version_minor = 16 # stan_version_patch = 0 # model = model_674d3e4301d1e412a3dc977884d91470db35a6bc_model # method = sample (Default) # sample # num_samples = 1000 (Default) # num_warmup = 1000 (Default) # save_warmup = 0 (Default) # thin = 1 (Default) # adapt # engaged = 1 (Default) # gamma = 0.050000000000000003 (Default) # delta = 0.80000000000000004 (Default) # kappa = 0.75 (Default) # t0 = 10 (Default) # init_buffer = 75 (Default) # term_buffer = 50 (Default) # window = 25 (Default) # algorithm = hmc (Default) # hmc # engine = nuts (Default) # nuts # max_depth = 10 (Default) # metric = diag_e (Default) # stepsize = 1 (Default) # stepsize_jitter = 0 (Default) # id = 0 (Default) # data # file = /tmp/stan-5f9713f853bba6d1/stan.data # init = 0 # random # seed = 122441814 # output # file = /tmp/stan-5f9713f853bba6d1/stan.csv # diagnostic_file = (Default) # refresh = 100 (Default) # Adaptation terminated # Step size = 0.329682 # Diagonal elements of inverse mass matrix: # 0.987448, 0.984921, 0.843981, 0.967326, 1.19354 # # Elapsed Time: 0.099845 seconds (Warm-up) # 0.106995 seconds (Sampling) # 0.20684 seconds (Total) # Extracting: x_stan_0_2 --- Removing temporary files ---
[-1.5277,-1.83303,-0.756986,-0.317618,0.158873]
import Language.PyMC3
putStrLn $ pmProgram' defaultPyMC3Inference posterior Nothing
samples <- runPyMC3 defaultPyMC3Inference posterior Nothing
print $ last samples
import numpy as np import pymc3 as pm import sys import time import theano import theano.tensor as tt import theano.tensor.basic import theano.tensor.slinalg from theano.ifelse import ifelse def ascolumn(a): return a.dimshuffle(0,'x') def asrow(a): return a.dimshuffle('x',0) def quad_form_diag(m,v): d = theano.tensor.basic.diag(v) return d.dot(m).dot(d) def lkj_corr(name, eta, n): C_triu = pm.LKJCorr(name + '_triu', eta=eta, n=n) shape = n * (n - 1) // 2 tri_index = np.zeros([n, n], dtype='int32') tri_index[np.triu_indices(n, k=1)] = np.arange(shape) tri_index[np.triu_indices(n, k=1)[::-1]] = np.arange(shape) return pm.Deterministic(name, tt.fill_diagonal(C_triu[tri_index], 1)) theano.config.floatX = 'float32' with pm.Model() as model: x_pm_0_0 = tt.as_tensor_variable(np.load('x_pm_0_0.npy').astype(theano.config.floatX)) x_pm_0_0 = tt.as_tensor_variable(np.load('x_pm_0_0.npy').astype(theano.config.floatX)) x_pm_0_1 = tt.as_tensor_variable(np.load('x_pm_0_1.npy').astype(theano.config.floatX)) v_0_3 = 0.0 * np.ones((int(x_pm_0_0.eval())), dtype=theano.config.floatX) def v_0_4_fn(i_1_1, i_1_2): v_1_0 = x_pm_0_1[i_1_1-1] - x_pm_0_1[i_1_2-1] v_1_1 = v_1_0 * v_1_0 v_1_2 = v_1_1 / 2.0 v_1_3 = -(v_1_2) v_1_4 = pm.math.exp(v_1_3) v_1_5 = tt.eq(x_pm_0_1[i_1_1-1], x_pm_0_1[i_1_2-1]) v_1_6 = ifelse(v_1_5, 1.0e-6, 0.0) v_1_7 = v_1_4 + v_1_6 return v_1_7 v_0_4 = pm.Deterministic('v_0_4', tt.stack([[v_0_4_fn(i_1_1, i_1_2) for i_1_2 in range(1, int(x_pm_0_0.eval())+1)] for i_1_1 in range(1, int(x_pm_0_0.eval())+1)])) x_pm_0_2 = pm.MvNormal('x_pm_0_2', mu=v_0_3, cov=v_0_4, shape=(int(x_pm_0_0.eval())-1+1)) t0 = time.time() trace = pm.sample(draws=500,step=None,init="auto",start=None,tune=500,chains=None) sys.stderr.write('PyMC3 took %fs\n' % (time.time() - t0)) print(map(list, zip(trace['x_pm_0_2'].tolist())))
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [x_pm_0_2] 0%| | 0/1000 [00:00<?, ?it/s] 0%| | 1/1000 [00:00<03:44, 4.45it/s] 3%|3 | 32/1000 [00:00<02:33, 6.32it/s] 6%|6 | 63/1000 [00:00<01:44, 8.95it/s] 10%|9 | 95/1000 [00:00<01:11, 12.63it/s] 13%|#3 | 130/1000 [00:00<00:48, 17.76it/s] 17%|#7 | 170/1000 [00:00<00:33, 24.89it/s] 21%|##1 | 212/1000 [00:00<00:22, 34.64it/s] 25%|##5 | 252/1000 [00:00<00:15, 47.67it/s] 29%|##9 | 291/1000 [00:01<00:10, 64.69it/s] 33%|###3 | 330/1000 [00:01<00:07, 86.21it/s] 37%|###6 | 367/1000 [00:01<00:05, 111.07it/s] 41%|#### | 407/1000 [00:01<00:04, 141.74it/s] 45%|####4 | 448/1000 [00:01<00:03, 176.30it/s] 49%|####8 | 488/1000 [00:01<00:02, 211.60it/s] 54%|#####3 | 536/1000 [00:01<00:01, 253.98it/s] 58%|#####8 | 581/1000 [00:01<00:01, 291.26it/s] 63%|######2 | 626/1000 [00:01<00:01, 325.20it/s] 67%|######7 | 671/1000 [00:01<00:00, 354.20it/s] 72%|#######2 | 721/1000 [00:02<00:00, 386.59it/s] 77%|#######6 | 767/1000 [00:02<00:00, 404.44it/s] 82%|########1 | 816/1000 [00:02<00:00, 423.69it/s] 86%|########6 | 864/1000 [00:02<00:00, 438.57it/s] 91%|#########1| 911/1000 [00:02<00:00, 443.34it/s] 96%|#########5| 958/1000 [00:02<00:00, 438.53it/s] 100%|##########| 1000/1000 [00:02<00:00, 376.22it/s]
[0.19914907217025757,0.27238166332244873,0.3973581790924072,0.9212682247161865,1.5233206562697887e-2]
import Language.Edward
putStrLn $ edProgram 1000 10 0.3 posterior Nothing
samples <- hmcEdward 1000 10 0.3 posterior Nothing
print $ last samples
import sys import time from collections import OrderedDict import edward as ed import numpy as np import tensorflow as tf def all_equal(x,y): return tf.reduce_all(tf.equal(x,y)) def ascolumn(a): return tf.reshape(a, (-1, 1)) def asrow(a): return tf.reshape(a, ( 1,-1)) def samples(inference, latent): s = [] for x,q in latent.items(): try: x_unconstrained = inference.transformations[x] params = x_unconstrained.bijector.inverse(q.params) except KeyError, AttributeError: sys.stderr.write(str(x) + ' is already unconstrained\n') params = q.params s.append(params.eval().tolist()) return map(list, zip(*s)) with tf.Session().as_default(): x_ed_0_0 = tf.constant(np.load('x_ed_0_0.npy'), dtype=tf.int32) x_ed_0_0 = tf.constant(np.load('x_ed_0_0.npy'), dtype=tf.float32) x_ed_0_1 = tf.constant(np.load('x_ed_0_1.npy'), dtype=tf.float32) v_0_3 = 0.0 * tf.ones([int(x_ed_0_0.eval())], dtype=tf.float32) def v_0_4_fn(i_1_1, i_1_2): v_1_0 = x_ed_0_1[i_1_1-1] - x_ed_0_1[i_1_2-1] v_1_1 = v_1_0 * v_1_0 v_1_2 = v_1_1 / 2.0 v_1_3 = -(v_1_2) v_1_4 = tf.exp(v_1_3) v_1_5 = all_equal(x_ed_0_1[i_1_1-1], x_ed_0_1[i_1_2-1]) v_1_6 = tf.cond(v_1_5, lambda: 1.0e-6, lambda: 0.0) v_1_7 = v_1_4 + v_1_6 return v_1_7 v_0_4 = tf.stack([[v_0_4_fn(i_1_1, i_1_2) for i_1_2 in xrange(1, int(x_ed_0_0.eval())+1)] for i_1_1 in xrange(1, int(x_ed_0_0.eval())+1)]) x_ed_0_2 = ed.models.MultivariateNormalFullCovariance(loc=v_0_3, covariance_matrix=v_0_4) dim_x_ed_0_2 = [int(x_ed_0_0.eval())-1+1] latent = OrderedDict([(x_ed_0_2, ed.models.Empirical(params=tf.Variable(tf.zeros([1000] + dim_x_ed_0_2))))]) data = {} inference = ed.HMC(latent, data) stdout = sys.stdout; sys.stdout = sys.stderr t0 = time.time() inference.run(step_size=0.3,n_steps=10,auto_transform=True) print('Edward took %fs' % (time.time() - t0)) sys.stdout = stdout print(samples(inference, latent))
2019-09-05 16:35:53.145652: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 2019-09-05 16:35:53.145715: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 2019-09-05 16:35:53.145864: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 1/1000 [ 0%] ETA: 6943s | Acceptance Rate: nan 10/1000 [ 1%] ETA: 689s | Acceptance Rate: 1.000 20/1000 [ 2%] ETA: 342s | Acceptance Rate: 1.000 30/1000 [ 3%] ETA: 226s | Acceptance Rate: 1.000 40/1000 [ 4%] █ ETA: 168s | Acceptance Rate: 1.000 50/1000 [ 5%] █ ETA: 133s | Acceptance Rate: 1.000 60/1000 [ 6%] █ ETA: 110s | Acceptance Rate: 1.000 70/1000 [ 7%] ██ ETA: 93s | Acceptance Rate: 1.000 90/1000 [ 9%] ██ ETA: 71s | Acceptance Rate: 1.000 100/1000 [ 10%] ███ ETA: 63s | Acceptance Rate: 0.990 110/1000 [ 11%] ███ ETA: 57s | Acceptance Rate: 0.991 120/1000 [ 12%] ███ ETA: 52s | Acceptance Rate: 0.983 130/1000 [ 13%] ███ ETA: 47s | Acceptance Rate: 0.969 140/1000 [ 14%] ████ ETA: 43s | Acceptance Rate: 0.964 150/1000 [ 15%] ████ ETA: 40s | Acceptance Rate: 0.953 160/1000 [ 16%] ████ ETA: 37s | Acceptance Rate: 0.956 170/1000 [ 17%] █████ ETA: 35s | Acceptance Rate: 0.953 180/1000 [ 18%] █████ ETA: 32s | Acceptance Rate: 0.955 190/1000 [ 19%] █████ ETA: 30s | Acceptance Rate: 0.952 200/1000 [ 20%] ██████ ETA: 28s | Acceptance Rate: 0.955 210/1000 [ 21%] ██████ ETA: 27s | Acceptance Rate: 0.957 220/1000 [ 22%] ██████ ETA: 25s | Acceptance Rate: 0.954 230/1000 [ 23%] ██████ ETA: 24s | Acceptance Rate: 0.956 240/1000 [ 24%] ███████ ETA: 22s | Acceptance Rate: 0.958 250/1000 [ 25%] ███████ ETA: 21s | Acceptance Rate: 0.960 260/1000 [ 26%] ███████ ETA: 20s | Acceptance Rate: 0.950 270/1000 [ 27%] ████████ ETA: 19s | Acceptance Rate: 0.948 280/1000 [ 28%] ████████ ETA: 18s | Acceptance Rate: 0.950 290/1000 [ 28%] ████████ ETA: 17s | Acceptance Rate: 0.952 310/1000 [ 31%] █████████ ETA: 16s | Acceptance Rate: 0.951 320/1000 [ 32%] █████████ ETA: 15s | Acceptance Rate: 0.953 330/1000 [ 33%] █████████ ETA: 14s | Acceptance Rate: 0.951 340/1000 [ 34%] ██████████ ETA: 14s | Acceptance Rate: 0.953 350/1000 [ 35%] ██████████ ETA: 13s | Acceptance Rate: 0.954 360/1000 [ 36%] ██████████ ETA: 13s | Acceptance Rate: 0.955 370/1000 [ 37%] ███████████ ETA: 12s | Acceptance Rate: 0.954 380/1000 [ 38%] ███████████ ETA: 12s | Acceptance Rate: 0.955 400/1000 [ 40%] ████████████ ETA: 11s | Acceptance Rate: 0.955 410/1000 [ 41%] ████████████ ETA: 10s | Acceptance Rate: 0.956 420/1000 [ 42%] ████████████ ETA: 10s | Acceptance Rate: 0.955 430/1000 [ 43%] ████████████ ETA: 9s | Acceptance Rate: 0.956 440/1000 [ 44%] █████████████ ETA: 9s | Acceptance Rate: 0.957 450/1000 [ 45%] █████████████ ETA: 9s | Acceptance Rate: 0.958 460/1000 [ 46%] █████████████ ETA: 8s | Acceptance Rate: 0.959 470/1000 [ 47%] ██████████████ ETA: 8s | Acceptance Rate: 0.959 480/1000 [ 48%] ██████████████ ETA: 8s | Acceptance Rate: 0.958 490/1000 [ 49%] ██████████████ ETA: 7s | Acceptance Rate: 0.959 500/1000 [ 50%] ███████████████ ETA: 7s | Acceptance Rate: 0.958 510/1000 [ 51%] ███████████████ ETA: 7s | Acceptance Rate: 0.957 520/1000 [ 52%] ███████████████ ETA: 7s | Acceptance Rate: 0.958 530/1000 [ 53%] ███████████████ ETA: 6s | Acceptance Rate: 0.958 540/1000 [ 54%] ████████████████ ETA: 6s | Acceptance Rate: 0.959 550/1000 [ 55%] ████████████████ ETA: 6s | Acceptance Rate: 0.958 560/1000 [ 56%] ████████████████ ETA: 6s | Acceptance Rate: 0.959 570/1000 [ 56%] █████████████████ ETA: 5s | Acceptance Rate: 0.960 580/1000 [ 57%] █████████████████ ETA: 5s | Acceptance Rate: 0.960 590/1000 [ 59%] █████████████████ ETA: 5s | Acceptance Rate: 0.959 600/1000 [ 60%] ██████████████████ ETA: 5s | Acceptance Rate: 0.958 610/1000 [ 61%] ██████████████████ ETA: 4s | Acceptance Rate: 0.959 620/1000 [ 62%] ██████████████████ ETA: 4s | Acceptance Rate: 0.960 630/1000 [ 63%] ██████████████████ ETA: 4s | Acceptance Rate: 0.959 640/1000 [ 64%] ███████████████████ ETA: 4s | Acceptance Rate: 0.959 650/1000 [ 65%] ███████████████████ ETA: 4s | Acceptance Rate: 0.960 660/1000 [ 66%] ███████████████████ ETA: 4s | Acceptance Rate: 0.961 670/1000 [ 67%] ████████████████████ ETA: 3s | Acceptance Rate: 0.961 680/1000 [ 68%] ████████████████████ ETA: 3s | Acceptance Rate: 0.959 690/1000 [ 69%] ████████████████████ ETA: 3s | Acceptance Rate: 0.959 700/1000 [ 70%] █████████████████████ ETA: 3s | Acceptance Rate: 0.959 710/1000 [ 71%] █████████████████████ ETA: 3s | Acceptance Rate: 0.959 720/1000 [ 72%] █████████████████████ ETA: 3s | Acceptance Rate: 0.960 730/1000 [ 73%] █████████████████████ ETA: 2s | Acceptance Rate: 0.960 740/1000 [ 74%] ██████████████████████ ETA: 2s | Acceptance Rate: 0.961 750/1000 [ 75%] ██████████████████████ ETA: 2s | Acceptance Rate: 0.961 760/1000 [ 76%] ██████████████████████ ETA: 2s | Acceptance Rate: 0.962 770/1000 [ 77%] ███████████████████████ ETA: 2s | Acceptance Rate: 0.962 780/1000 [ 78%] ███████████████████████ ETA: 2s | Acceptance Rate: 0.961 790/1000 [ 79%] ███████████████████████ ETA: 2s | Acceptance Rate: 0.958 800/1000 [ 80%] ████████████████████████ ETA: 2s | Acceptance Rate: 0.957 810/1000 [ 81%] ████████████████████████ ETA: 1s | Acceptance Rate: 0.957 820/1000 [ 82%] ████████████████████████ ETA: 1s | Acceptance Rate: 0.957 830/1000 [ 83%] ████████████████████████ ETA: 1s | Acceptance Rate: 0.955 840/1000 [ 84%] █████████████████████████ ETA: 1s | Acceptance Rate: 0.955 850/1000 [ 85%] █████████████████████████ ETA: 1s | Acceptance Rate: 0.955 860/1000 [ 86%] █████████████████████████ ETA: 1s | Acceptance Rate: 0.955 870/1000 [ 87%] ██████████████████████████ ETA: 1s | Acceptance Rate: 0.955 880/1000 [ 88%] ██████████████████████████ ETA: 1s | Acceptance Rate: 0.953 890/1000 [ 89%] ██████████████████████████ ETA: 1s | Acceptance Rate: 0.954 900/1000 [ 90%] ███████████████████████████ ETA: 0s | Acceptance Rate: 0.954 910/1000 [ 91%] ███████████████████████████ ETA: 0s | Acceptance Rate: 0.954 920/1000 [ 92%] ███████████████████████████ ETA: 0s | Acceptance Rate: 0.953 930/1000 [ 93%] ███████████████████████████ ETA: 0s | Acceptance Rate: 0.954 940/1000 [ 94%] ████████████████████████████ ETA: 0s | Acceptance Rate: 0.953 950/1000 [ 95%] ████████████████████████████ ETA: 0s | Acceptance Rate: 0.954 960/1000 [ 96%] ████████████████████████████ ETA: 0s | Acceptance Rate: 0.954 970/1000 [ 97%] █████████████████████████████ ETA: 0s | Acceptance Rate: 0.954 980/1000 [ 98%] █████████████████████████████ ETA: 0s | Acceptance Rate: 0.953 990/1000 [ 99%] █████████████████████████████ ETA: 0s | Acceptance Rate: 0.953 1000/1000 [100%] ██████████████████████████████ Elapsed: 8s | Acceptance Rate: 0.954
[2.4720120429992676,2.3773159980773926,0.5680612325668335,-3.847859799861908e-2,1.0919506549835205]