In [1]:
import pymc3 as pm
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

%matplotlib inline
%config InlineBackend.figure_format = 'retina'
%qtconsole --colors=linux
plt.style.use('ggplot')

from matplotlib import gridspec
from theano import tensor as tt
from scipy import stats


# Chapter 15 - The SIMPLE model of memory¶

## 15.1 The SIMPLE model¶

Brown, Neath, and Chater (2007) proposed the SIMPLE (Scale-Invariant Memory, Perception, and LEarning) model, which, among various applications, has been applied to the basic memory phenomenon of free recall.

$$c_x \sim \text{Uniform}(0,100)$$$$s_x \sim \text{Uniform}(0,100)$$$$t_x \sim \text{Uniform}(0,1)$$

$$\eta_{ijx} = \text{exp}(-\,c_x \,\lvert \text{log} T_{ix}\,-\,\text{log} T_{jx}\rvert)$$
$$d_{ijx} = \frac{\eta_{ijx}} {\sum_{k}\eta_{ikx}}$$
$$r_{ijx} = \frac{1} {1\,+\,\text{exp}(-\,s_{x}(d_{ijx}\,-\,t_{x}))}$$
$$\theta_{ix} = \text{min}(1,\sum_{k}r_{ikx})$$ $$y_{ix} \sim \text{Binomial}(\theta_{ix},\eta_{x})$$

Model correction on SIMPLE model could be done by replacing $\theta_{ix} = \text{min}(1,\sum_{k}r_{ikx})$ with $\theta_{ix} = 1\,-\,\prod_{k}(1-r_{ikx})$ (see Box 15.2 on page 200)

In [2]:
y = pd.read_csv('data/k_M.txt', ',', header=None)
n = np.array([1440, 1280, 1520, 1520, 1200, 1280])
listlength = np.array([10, 15, 20, 20, 30, 40])
lagall = np.array([2, 2, 2, 1, 1, 1])
offset = np.array([15, 20, 25, 10, 15, 20])
dsets = 6
m = np.zeros(np.shape(y))
for i in range(dsets):
m[i, 0:listlength[i]]=offset[i]+np.arange((listlength[i])*lagall[i], 0, -lagall[i])
pcmat = np.asarray(pc).T

In [4]:
ymat = np.asarray(y)
nitem = m.shape[1]
m2 = m
m2[m2==0] = 1e-5 # to avoid NaN in ADVI
nmat = np.repeat(n[:,np.newaxis], nitem, axis=1)
mmat1 = np.repeat(m2[:,:,np.newaxis],nitem,axis=2)
mmat2 = np.transpose(mmat1, (0, 2, 1))

with pm.Model() as simple1:
cx = pm.Uniform('cx', lower=0, upper=100, shape=dsets, testval=np.ones(dsets)*20)
sx = pm.Uniform('sx', lower=0, upper=100, shape=dsets)
tx = pm.Uniform('tx', lower=0, upper=1, shape=dsets)

# Similarities
eta = tt.exp(-cx[:,np.newaxis,np.newaxis]*abs(tt.log(mmat1)-tt.log(mmat2)))
etasum = tt.reshape(tt.repeat(tt.sum(eta, axis=2), nitem), (dsets, nitem, nitem))

# Discriminabilities
disc = eta/etasum

# Response Probabilities
resp = 1/(1+tt.exp(-sx[:, np.newaxis, np.newaxis]*(disc-tx[:, np.newaxis, np.newaxis])))

# Free Recall Overall Response Probability
# theta = tt.clip(tt.sum(resp, axis=2), 0., .999)
theta=1-tt.prod(1-resp, axis=2)

pm.traceplot(trace, varnames=['cx', 'sx', 'tx']);

Auto-assigning NUTS sampler...
Average Loss = 1,437.6:  18%|█▊        | 35485/200000 [01:32<07:10, 382.20it/s]
Convergence archived at 35500
Interrupted at 35,500 [17%]: Average Loss = 28,068
100%|██████████| 3500/3500.0 [20:53<00:00,  3.00it/s]

In [5]:
ymat = np.asarray(y).T
mmat = m.T

with pm.Model() as simple1:
cx = pm.Uniform('cx', lower=0, upper=100, shape=dsets, testval=np.ones(dsets)*20)
sx = pm.Uniform('sx', lower=0, upper=100, shape=dsets)
tx = pm.Uniform('tx', lower=0, upper=1, shape=dsets)

yobs=[]
for x in range(dsets):
sz = listlength[x]
# Similarities
m1 = np.array([mmat[0:sz, x], ]*sz).T
m2 = np.array([mmat[0:sz, x], ]*sz)

eta = tt.exp(-cx[x]*abs(tt.log(m1)-tt.log(m2)))
etasum = tt.reshape(tt.repeat(tt.sum(eta, axis=1), sz), (sz, sz))
# Discriminabilities
disc = eta/etasum
# Response Probabilities
resp = 1/(1+tt.exp(-sx[x]*(disc-tx[x])))
# Free Recall Overall Response Probability
theta = tt.clip(tt.sum(resp, axis=1), 0, .999)
# theta=1-tt.prod(1-resp,axis=1)

yobs.append([pm.Binomial("yobs_%x"%x, p=theta, n=n[x], observed=ymat[0:sz, x])])


Auto-assigning NUTS sampler...