%matplotlib inline
import importlib, utils2; importlib.reload(utils2)
from utils2 import *
Using TensorFlow backend. /home/jhoward/anaconda3/lib/python3.6/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20. "This module will be removed in 0.20.", DeprecationWarning)
np.set_printoptions(4)
cfg = K.tf.ConfigProto(gpu_options={'allow_growth': True})
K.set_session(K.tf.Session(config=cfg))
A memory network is a network that can retain information; it can be trained on a structured story and will learn how to answer questions about said story.
This notebook contains an implementation of an end-to-end memory network trained on the Babi tasks dataset.
Code from this section is mainly taken from the babi-memnn example in the keras repo.
The Babi dataset is a collection of tasks (or stories) that detail events in a particular format. At the end of each task is a question with a labelled answer.
This section shows how to construct the dataset from the raw data.
def tokenize(sent):
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
This parser formats the story into a time-order labelled sequence of sentences, followed by the question and the labelled answer.
def parse_stories(lines):
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
if int(nid) == 1: story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
substory = [[str(i)+":"]+x for i,x in enumerate(story) if x]
data.append((substory, q, a))
story.append('')
else: story.append(tokenize(line))
return data
Next we download and parse the data set.
path = get_file('babi-tasks-v1-2.tar.gz',
origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
tar = tarfile.open(path)
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
'two_supporting_facts_1k': 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
# challenge_type = 'two_supporting_facts_10k'
challenge = challenges[challenge_type]
def get_stories(f):
data = parse_stories(f.readlines())
return [(story, q, answer) for story, q, answer in data]
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
/home/jhoward/anaconda3/lib/python3.6/re.py:212: FutureWarning: split() requires a non-empty pattern match. return _compile(pattern, flags).split(string, maxsplit)
Here we calculate upper bounds for things like words in sentence, sentences in a story, etc. for the corpus, which will be useful later.
stories = train_stories + test_stories
story_maxlen = max((len(s) for x, _, _ in stories for s in x))
story_maxsents = max((len(x) for x, _, _ in stories))
query_maxlen = max(len(x) for _, x, _ in stories)
def do_flatten(el):
return isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes))
def flatten(l):
for el in l:
if do_flatten(el): yield from flatten(el)
else: yield el
Create vocabulary of corpus and find size, including a padding element.
vocab = sorted(set(flatten(stories)))
vocab.insert(0, '<PAD>')
vocab_size = len(vocab)
story_maxsents, vocab_size, story_maxlen, query_maxlen, len(train_stories), len(test_stories)
(10, 32, 8, 4, 10000, 1000)
Now the dataset is in the correct format.
Each task in the dataset contains a list of tokenized sentences ordered in time, followed by a question about the story with a given answer.
In the example below, we go can backward through the sentences to find the answer to the question "Where is Daniel?" as sentence 12, the last sentence to mention Daniel.
This task structure is called a "one supporting fact" structure, which means that we only need to find one sentence in the story to answer our question.
test_stories[534]
([['0:', 'Mary', 'moved', 'to', 'the', 'office', '.'], ['1:', 'John', 'moved', 'to', 'the', 'garden', '.'], ['3:', 'Sandra', 'moved', 'to', 'the', 'bedroom', '.'], ['4:', 'Sandra', 'went', 'back', 'to', 'the', 'office', '.'], ['6:', 'John', 'went', 'to', 'the', 'bedroom', '.'], ['7:', 'John', 'journeyed', 'to', 'the', 'garden', '.'], ['9:', 'Daniel', 'went', 'back', 'to', 'the', 'hallway', '.'], ['10:', 'John', 'journeyed', 'to', 'the', 'bedroom', '.'], ['12:', 'Daniel', 'journeyed', 'to', 'the', 'bathroom', '.'], ['13:', 'John', 'travelled', 'to', 'the', 'garden', '.']], ['Where', 'is', 'Daniel', '?'], 'bathroom')
Create an index mapping for the vocabulary.
word_idx = dict((c, i) for i, c in enumerate(vocab))
Next we vectorize our dataset by mapping words to their indices. We enforce consistent dimension by padding vectors up to the upper bounds we calculated earlier with our pad element.
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []; Xq = []; Y = []
for story, query, answer in data:
x = [[word_idx[w] for w in s] for s in story]
xq = [word_idx[w] for w in query]
y = [word_idx[answer]]
X.append(x); Xq.append(xq); Y.append(y)
return ([pad_sequences(x, maxlen=story_maxlen) for x in X],
pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories,
word_idx, story_maxlen, query_maxlen)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories,
word_idx, story_maxlen, query_maxlen)
def stack_inputs(inputs):
for i,it in enumerate(inputs):
inputs[i] = np.concatenate([it,
np.zeros((story_maxsents-it.shape[0],story_maxlen), 'int')])
return np.stack(inputs)
inputs_train = stack_inputs(inputs_train)
inputs_test = stack_inputs(inputs_test)
inputs_train.shape, inputs_test.shape
((10000, 10, 8), (1000, 10, 8))
Our inputs for keras.
inps = [inputs_train, queries_train]
val_inps = [inputs_test, queries_test]
The approach to solving this task relies not only on word embeddings, but sentence embeddings.
The authors of the Babi paper constructed sentence embeddings by simply adding up the word embeddings; this might seem naive, but given the relatively small length of these sentences we can expect the sum to capture relevant information.
emb_dim = 20
parms = {'verbose': 2, 'callbacks': [TQDMNotebookCallback(leave_inner=False)]}
We use TimeDistributed here to apply the embedding to every element of the sequence, then the Lambda layer adds them up
def emb_sent_bow(inp):
emb = TimeDistributed(Embedding(vocab_size, emb_dim))(inp)
return Lambda(lambda x: K.sum(x, 2))(emb)
The embedding works as desired; the raw input has 10 sentences of 8 words, and the output has 10 sentence embeddings of length 20.
inp_story = Input((story_maxsents, story_maxlen))
emb_story = emb_sent_bow(inp_story)
inp_story.shape, emb_story.shape
(TensorShape([Dimension(None), Dimension(10), Dimension(8)]), TensorShape([Dimension(None), Dimension(10), Dimension(20)]))
We do the same for the queries, omitting the TimeDistributed since there is only one query. We use Reshape to match the rank of the input.
inp_q = Input((query_maxlen,))
emb_q = Embedding(vocab_size, emb_dim)(inp_q)
emb_q = Lambda(lambda x: K.sum(x, 1))(emb_q)
emb_q = Reshape((1, emb_dim))(emb_q)
inp_q.shape, emb_q.shape
(TensorShape([Dimension(None), Dimension(4)]), TensorShape([Dimension(None), Dimension(1), Dimension(20)]))
The actual memory network is incredibly simple.
x = merge([emb_story, emb_q], mode='dot', dot_axes=2)
x = Reshape((story_maxsents,))(x)
x = Activation('softmax')(x)
match = Reshape((story_maxsents,1))(x)
match.shape
TensorShape([Dimension(None), Dimension(10), Dimension(1)])
emb_c = emb_sent_bow(inp_story)
x = merge([match, emb_c], mode='dot', dot_axes=1)
response = Reshape((emb_dim,))(x)
res = Dense(vocab_size, activation='softmax')(response)
answer = Model([inp_story, inp_q], res)
answer.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
And it works extremely well
K.set_value(answer.optimizer.lr, 1e-2)
hist=answer.fit(inps, answers_train, **parms, nb_epoch=4, batch_size=32,
validation_data=(val_inps, answers_test))
Train on 10000 samples, validate on 1000 samples Epoch 1/4 1s - loss: 0.4320 - acc: 0.8527 - val_loss: 1.3040e-04 - val_acc: 1.0000 Epoch 2/4 1s - loss: 0.0036 - acc: 0.9993 - val_loss: 2.7034e-05 - val_acc: 1.0000 Epoch 3/4 1s - loss: 0.0118 - acc: 0.9979 - val_loss: 0.0045 - val_acc: 0.9970 Epoch 4/4 1s - loss: 0.0132 - acc: 0.9977 - val_loss: 2.1098e-05 - val_acc: 1.0000
We can look inside our model to see how it's weighting the sentence embeddings.
f = Model([inp_story, inp_q], match)
qnum=6
l_st = len(train_stories[qnum][0])+1
train_stories[qnum]
([['0:', 'Sandra', 'travelled', 'to', 'the', 'office', '.'], ['1:', 'Sandra', 'went', 'to', 'the', 'bathroom', '.'], ['3:', 'Mary', 'went', 'to', 'the', 'bedroom', '.'], ['4:', 'Daniel', 'moved', 'to', 'the', 'hallway', '.']], ['Where', 'is', 'Sandra', '?'], 'bathroom')
Sure enough, for the question "Where is Sandra?", the largest weight is the last sentence with the name Sandra, sentence 1 with 0.98.
The second highest is of course the first sentence, which also mentions Sandra. But the model has learned that the last occurring sentence is what is important; this is why we added the counter at the beginning of each sentence.
np.squeeze(f.predict([inputs_train[qnum:qnum+1], queries_train[qnum:qnum+1]]))[:l_st]
array([ 1.8035e-02, 9.8090e-01, 7.1948e-05, 9.8939e-04, 3.5892e-11], dtype=float32)
answers_train[qnum:qnum+10,0]
array([19, 19, 27, 22, 19, 20, 19, 19, 20, 20])
np.argmax(answer.predict([inputs_train[qnum:qnum+10], queries_train[qnum:qnum+10]]), 1)
array([19, 19, 27, 22, 19, 20, 19, 19, 20, 20])
answer.predict([inputs_train[qnum:qnum+1], queries_train[qnum:qnum+1]])
array([[ 2.7342e-12, 6.6743e-12, 8.4153e-12, 5.0037e-12, 6.6239e-12, 7.5889e-12, 1.7636e-11, 2.0748e-11, 1.4377e-11, 1.1266e-11, 8.6431e-12, 1.0421e-11, 1.6382e-11, 8.8259e-12, 8.7487e-12, 1.2887e-11, 8.8348e-12, 1.1246e-11, 4.5876e-12, 1.0000e+00, 4.7602e-10, 3.3521e-10, 6.1148e-10, 1.3745e-11, 1.4630e-11, 4.9215e-09, 1.2494e-11, 1.2910e-08, 4.1088e-12, 8.2288e-12, 8.0250e-12, 9.1950e-12]], dtype=float32)
vocab[19]
'bathroom'
Next, let's look at an example of a two-supporting fact story.
test_stories[534]
([['0:', 'Mary', 'went', 'to', 'the', 'hallway', '.'], ['1:', 'Daniel', 'went', 'back', 'to', 'the', 'bedroom', '.'], ['2:', 'Sandra', 'went', 'back', 'to', 'the', 'garden', '.'], ['3:', 'Mary', 'went', 'to', 'the', 'office', '.'], ['4:', 'Mary', 'journeyed', 'to', 'the', 'kitchen', '.'], ['5:', 'Sandra', 'moved', 'to', 'the', 'office', '.'], ['6:', 'Sandra', 'journeyed', 'to', 'the', 'hallway', '.'], ['7:', 'Daniel', 'journeyed', 'to', 'the', 'garden', '.'], ['8:', 'Mary', 'journeyed', 'to', 'the', 'bathroom', '.'], ['9:', 'John', 'went', 'back', 'to', 'the', 'bathroom', '.'], ['10:', 'Sandra', 'travelled', 'to', 'the', 'garden', '.'], ['11:', 'John', 'moved', 'to', 'the', 'office', '.'], ['12:', 'Daniel', 'went', 'back', 'to', 'the', 'kitchen', '.'], ['13:', 'Mary', 'moved', 'to', 'the', 'kitchen', '.'], ['14:', 'Mary', 'moved', 'to', 'the', 'hallway', '.'], ['15:', 'Mary', 'went', 'to', 'the', 'kitchen', '.'], ['16:', 'Sandra', 'went', 'back', 'to', 'the', 'bedroom', '.'], ['17:', 'Sandra', 'travelled', 'to', 'the', 'hallway', '.'], ['18:', 'Sandra', 'travelled', 'to', 'the', 'kitchen', '.'], ['19:', 'Sandra', 'moved', 'to', 'the', 'garden', '.'], ['20:', 'Daniel', 'went', 'to', 'the', 'garden', '.'], ['21:', 'Sandra', 'went', 'back', 'to', 'the', 'bathroom', '.'], ['22:', 'John', 'moved', 'to', 'the', 'garden', '.'], ['23:', 'Mary', 'went', 'to', 'the', 'bathroom', '.'], ['24:', 'Daniel', 'travelled', 'to', 'the', 'kitchen', '.'], ['25:', 'John', 'went', 'back', 'to', 'the', 'hallway', '.'], ['26:', 'Sandra', 'went', 'back', 'to', 'the', 'hallway', '.'], ['27:', 'Mary', 'went', 'to', 'the', 'hallway', '.'], ['28:', 'Daniel', 'went', 'back', 'to', 'the', 'garden', '.'], ['29:', 'Sandra', 'went', 'back', 'to', 'the', 'office', '.'], ['30:', 'Sandra', 'moved', 'to', 'the', 'kitchen', '.'], ['31:', 'Mary', 'travelled', 'to', 'the', 'garden', '.'], ['32:', 'Sandra', 'went', 'to', 'the', 'garden', '.'], ['33:', 'Daniel', 'journeyed', 'to', 'the', 'hallway', '.'], ['34:', 'Mary', 'went', 'back', 'to', 'the', 'hallway', '.'], ['35:', 'Daniel', 'travelled', 'to', 'the', 'garden', '.'], ['36:', 'John', 'journeyed', 'to', 'the', 'bathroom', '.'], ['37:', 'Daniel', 'travelled', 'to', 'the', 'hallway', '.'], ['38:', 'Daniel', 'travelled', 'to', 'the', 'bedroom', '.'], ['39:', 'Mary', 'went', 'back', 'to', 'the', 'kitchen', '.'], ['40:', 'Daniel', 'went', 'to', 'the', 'office', '.'], ['41:', 'John', 'journeyed', 'to', 'the', 'hallway', '.'], ['42:', 'John', 'went', 'to', 'the', 'kitchen', '.'], ['43:', 'Daniel', 'travelled', 'to', 'the', 'hallway', '.'], ['44:', 'Sandra', 'went', 'back', 'to', 'the', 'kitchen', '.'], ['45:', 'Mary', 'moved', 'to', 'the', 'office', '.'], ['46:', 'Sandra', 'went', 'back', 'to', 'the', 'garden', '.'], ['47:', 'Sandra', 'went', 'back', 'to', 'the', 'kitchen', '.'], ['48:', 'Sandra', 'moved', 'to', 'the', 'garden', '.'], ['49:', 'Sandra', 'moved', 'to', 'the', 'office', '.'], ['50:', 'John', 'went', 'back', 'to', 'the', 'hallway', '.'], ['51:', 'Daniel', 'went', 'to', 'the', 'garden', '.'], ['52:', 'Sandra', 'travelled', 'to', 'the', 'kitchen', '.'], ['53:', 'Sandra', 'moved', 'to', 'the', 'bathroom', '.'], ['54:', 'John', 'journeyed', 'to', 'the', 'garden', '.'], ['55:', 'Mary', 'moved', 'to', 'the', 'hallway', '.'], ['56:', 'John', 'went', 'back', 'to', 'the', 'office', '.'], ['57:', 'Mary', 'went', 'back', 'to', 'the', 'office', '.'], ['58:', 'Daniel', 'travelled', 'to', 'the', 'bathroom', '.'], ['59:', 'Sandra', 'travelled', 'to', 'the', 'hallway', '.'], ['60:', 'Sandra', 'journeyed', 'to', 'the', 'bathroom', '.'], ['61:', 'Sandra', 'travelled', 'to', 'the', 'bedroom', '.'], ['62:', 'Mary', 'went', 'back', 'to', 'the', 'hallway', '.'], ['63:', 'Sandra', 'travelled', 'to', 'the', 'kitchen', '.'], ['64:', 'Daniel', 'travelled', 'to', 'the', 'garden', '.'], ['65:', 'Daniel', 'journeyed', 'to', 'the', 'bedroom', '.'], ['66:', 'Mary', 'journeyed', 'to', 'the', 'office', '.'], ['67:', 'Sandra', 'went', 'back', 'to', 'the', 'office', '.'], ['68:', 'John', 'travelled', 'to', 'the', 'hallway', '.'], ['69:', 'Daniel', 'picked', 'up', 'the', 'milk', 'there', '.'], ['70:', 'Daniel', 'picked', 'up', 'the', 'apple', 'there', '.'], ['71:', 'Sandra', 'moved', 'to', 'the', 'hallway', '.'], ['72:', 'John', 'journeyed', 'to', 'the', 'bedroom', '.'], ['73:', 'John', 'went', 'back', 'to', 'the', 'garden', '.'], ['74:', 'Sandra', 'journeyed', 'to', 'the', 'office', '.'], ['75:', 'Sandra', 'moved', 'to', 'the', 'bedroom', '.'], ['76:', 'Mary', 'moved', 'to', 'the', 'kitchen', '.'], ['77:', 'Mary', 'went', 'to', 'the', 'office', '.'], ['78:', 'Sandra', 'grabbed', 'the', 'football', 'there', '.'], ['79:', 'Sandra', 'discarded', 'the', 'football', '.'], ['81:', 'Daniel', 'took', 'the', 'football', 'there', '.'], ['82:', 'Daniel', 'put', 'down', 'the', 'apple', 'there', '.'], ['84:', 'Daniel', 'took', 'the', 'apple', 'there', '.'], ['85:', 'Daniel', 'travelled', 'to', 'the', 'hallway', '.'], ['87:', 'Sandra', 'journeyed', 'to', 'the', 'bathroom', '.'], ['88:', 'Daniel', 'left', 'the', 'milk', 'there', '.'], ['90:', 'Daniel', 'went', 'to', 'the', 'kitchen', '.'], ['91:', 'Daniel', 'went', 'back', 'to', 'the', 'bathroom', '.']], ['Where', 'is', 'the', 'milk', '?'], 'hallway')
We can see that the question "Where is the milk?" requires to supporting facts to answer, "Daniel traveled to the hallway" and "Daniel left the milk there".
inputs_train.shape, inputs_test.shape
((10000, 88, 8), (1000, 88, 8))
The approach is basically the same; we add more embedding dimensions to account for the increased task complexity.
parms = {'verbose': 2, 'callbacks': [TQDMNotebookCallback(leave_inner=False)]}
emb_dim = 30
def emb_sent_bow(inp):
emb_op = TimeDistributed(Embedding(vocab_size, emb_dim))
emb = emb_op(inp)
emb = Lambda(lambda x: K.sum(x, 2))(emb)
# return Elemwise(0, False)(emb), emb_op
return emb, emb_op
inp_story = Input((story_maxsents, story_maxlen))
inp_q = Input((query_maxlen,))
emb_story, emb_story_op = emb_sent_bow(inp_story)
emb_q = emb_story_op.layer(inp_q)
emb_q = Lambda(lambda x: K.sum(x, 1))(emb_q)
h = Dense(emb_dim)
The main difference is that we are going to do the same process twice. Here we've defined a "hop" as the operation that returns the weighted average of the input sentence embeddings.
def one_hop(u, A):
C, _ = emb_sent_bow(inp_story)
x = Reshape((1, emb_dim))(u)
x = merge([A, x], mode='dot', dot_axes=2)
x = Reshape((story_maxsents,))(x)
x = Activation('softmax')(x)
match = Reshape((story_maxsents,1))(x)
x = merge([match, C], mode='dot', dot_axes=1)
x = Reshape((emb_dim,))(x)
x = h(x)
x = merge([x, emb_q], 'sum')
return x, C
We do one hop, and repeat the process using the resulting weighted sentence average as the new weights.
This works because the first hop allows us to find the first fact relevant to the query, and then we can use that fact to find the next fact that answers the question. In our example, our model would first find the last sentence to mention "milk", and then use the information in that fact to know that it next has to find the last occurrence of "Daniel".
This is facilitated by generating a new embedding function for the input story each time we hop. This means that the first embedding is learning things that help us find the first fact from the query, and the second is helping us find the second fact from the first.
This approach can be extended to n-supporting factor problems by doing n hops.
response, emb_story = one_hop(emb_q, emb_story)
response, emb_story = one_hop(response, emb_story)
# response, emb_story = one_hop(response, emb_story)
res = Dense(vocab_size, activation='softmax')(response)
answer = Model([inp_story, inp_q], res)
answer.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
Fitting this model can be tricky.
K.set_value(answer.optimizer.lr, 5e-3)
hist=answer.fit(inps, answers_train, **parms, nb_epoch=8, batch_size=32,
validation_data=(val_inps, answers_test))
Train on 10000 samples, validate on 1000 samples Epoch 1/8 2s - loss: 1.7092 - acc: 0.2788 - val_loss: 1.3558 - val_acc: 0.4790 Epoch 2/8 2s - loss: 0.9756 - acc: 0.6360 - val_loss: 0.7827 - val_acc: 0.7020 Epoch 3/8 2s - loss: 0.6668 - acc: 0.7574 - val_loss: 0.7171 - val_acc: 0.7650 Epoch 4/8 2s - loss: 0.5795 - acc: 0.7955 - val_loss: 0.6547 - val_acc: 0.7790 Epoch 5/8 2s - loss: 0.5244 - acc: 0.8205 - val_loss: 0.5644 - val_acc: 0.8050 Epoch 6/8 2s - loss: 0.4731 - acc: 0.8476 - val_loss: 0.5577 - val_acc: 0.8210 Epoch 7/8 2s - loss: 0.4471 - acc: 0.8563 - val_loss: 0.5086 - val_acc: 0.8500 Epoch 8/8 2s - loss: 0.4274 - acc: 0.8686 - val_loss: 0.4933 - val_acc: 0.8480
np.array(hist.history['val_acc'])
array([ 0.976, 0.993, 1. , 0.999, 0.998, 0.999, 0.992, 0.999])
class Elemwise(Layer):
def __init__(self, axis, is_mult, init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.axis = axis
self.is_mult = is_mult
super(Elemwise, self).__init__(**kwargs)
def build(self, input_shape):
input_dims = input_shape[1:]
dims = [1] * len(input_dims)
dims[self.axis] = input_dims[self.axis]
self.b = self.add_weight(dims, self.init, '{}_bo'.format(self.name))
self.built = True
def call(self, x, mask=None):
return x * self.b if self.is_mult else x + self.b
def get_output_shape_for(self, input_shape):
return input_shape
def get_config(self):
config = {'init': self.init.__name__, 'axis': self.axis}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))