Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
Demo of a bidirectional RNN for sentiment classification (here: a binary classification problem with two labels, positive and negative) using LSTM (Long Short Term Memory) cells.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
import torch
import torch.nn.functional as F
from torchtext import data
from torchtext import datasets
import time
import random
torch.backends.cudnn.deterministic = True
Sebastian Raschka CPython 3.7.1 IPython 7.4.0 torch 1.0.1.post2
RANDOM_SEED = 123
torch.manual_seed(RANDOM_SEED)
VOCABULARY_SIZE = 20000
LEARNING_RATE = 1e-4
BATCH_SIZE = 128
NUM_EPOCHS = 15
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BIDIRECTIONAL = True
EMBEDDING_DIM = 128
NUM_LAYERS = 2
HIDDEN_DIM = 128
OUTPUT_DIM = 1
Load the IMDB Movie Review dataset:
TEXT = data.Field(tokenize='spacy',
include_lengths=True) # necessary for packed_padded_sequence
LABEL = data.LabelField(dtype=torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split(random_state=random.seed(RANDOM_SEED),
split_ratio=0.8)
print(f'Num Train: {len(train_data)}')
print(f'Num Valid: {len(valid_data)}')
print(f'Num Test: {len(test_data)}')
Num Train: 20000 Num Valid: 5000 Num Test: 25000
Build the vocabulary based on the top "VOCABULARY_SIZE" words:
TEXT.build_vocab(train_data, max_size=VOCABULARY_SIZE)
LABEL.build_vocab(train_data)
print(f'Vocabulary size: {len(TEXT.vocab)}')
print(f'Number of classes: {len(LABEL.vocab)}')
Vocabulary size: 20002 Number of classes: 2
The TEXT.vocab dictionary will contain the word counts and indices. The reason why the number of words is VOCABULARY_SIZE + 2 is that it contains to special tokens for padding and unknown words: <unk>
and <pad>
.
Make dataset iterators:
train_loader, valid_loader, test_loader = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
sort_within_batch=True, # necessary for packed_padded_sequence
device=DEVICE)
Testing the iterators (note that the number of rows depends on the longest document in the respective batch):
print('Train')
for batch in train_loader:
print(f'Text matrix size: {batch.text[0].size()}')
print(f'Target vector size: {batch.label.size()}')
break
print('\nValid:')
for batch in valid_loader:
print(f'Text matrix size: {batch.text[0].size()}')
print(f'Target vector size: {batch.label.size()}')
break
print('\nTest:')
for batch in test_loader:
print(f'Text matrix size: {batch.text[0].size()}')
print(f'Target vector size: {batch.label.size()}')
break
Train Text matrix size: torch.Size([132, 128]) Target vector size: torch.Size([128]) Valid: Text matrix size: torch.Size([61, 128]) Target vector size: torch.Size([128]) Test: Text matrix size: torch.Size([42, 128]) Target vector size: torch.Size([128])
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.rnn = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=NUM_LAYERS,
bidirectional=BIDIRECTIONAL)
self.fc = nn.Linear(hidden_dim*2, output_dim)
def forward(self, text, text_length):
#[sentence len, batch size] => [sentence len, batch size, embedding size]
embedded = self.embedding(text)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, text_length)
packed_output, (hidden, cell) = self.rnn(packed)
# combine both directions
combined = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)
return self.fc(combined.squeeze(0)).view(-1)
INPUT_DIM = len(TEXT.vocab)
torch.manual_seed(RANDOM_SEED)
model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
def compute_binary_accuracy(model, data_loader, device):
model.eval()
correct_pred, num_examples = 0, 0
with torch.no_grad():
for batch_idx, batch_data in enumerate(data_loader):
text, text_lengths = batch_data.text
logits = model(text, text_lengths)
predicted_labels = (torch.sigmoid(logits) > 0.5).long()
num_examples += batch_data.label.size(0)
correct_pred += (predicted_labels == batch_data.label.long()).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(NUM_EPOCHS):
model.train()
for batch_idx, batch_data in enumerate(train_loader):
text, text_lengths = batch_data.text
### FORWARD AND BACK PROP
logits = model(text, text_lengths)
cost = F.binary_cross_entropy_with_logits(logits, batch_data.label)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | '
f'Batch {batch_idx:03d}/{len(train_loader):03d} | '
f'Cost: {cost:.4f}')
with torch.set_grad_enabled(False):
print(f'training accuracy: '
f'{compute_binary_accuracy(model, train_loader, DEVICE):.2f}%'
f'\nvalid accuracy: '
f'{compute_binary_accuracy(model, valid_loader, DEVICE):.2f}%')
print(f'Time elapsed: {(time.time() - start_time)/60:.2f} min')
print(f'Total Training Time: {(time.time() - start_time)/60:.2f} min')
print(f'Test accuracy: {compute_binary_accuracy(model, test_loader, DEVICE):.2f}%')
Epoch: 001/015 | Batch 000/157 | Cost: 0.6920 Epoch: 001/015 | Batch 050/157 | Cost: 0.6899 Epoch: 001/015 | Batch 100/157 | Cost: 0.6789 Epoch: 001/015 | Batch 150/157 | Cost: 0.6822 training accuracy: 59.57% valid accuracy: 57.96% Time elapsed: 0.33 min Epoch: 002/015 | Batch 000/157 | Cost: 0.6753 Epoch: 002/015 | Batch 050/157 | Cost: 0.6222 Epoch: 002/015 | Batch 100/157 | Cost: 0.6967 Epoch: 002/015 | Batch 150/157 | Cost: 0.5783 training accuracy: 70.41% valid accuracy: 68.04% Time elapsed: 0.67 min Epoch: 003/015 | Batch 000/157 | Cost: 0.5828 Epoch: 003/015 | Batch 050/157 | Cost: 0.5243 Epoch: 003/015 | Batch 100/157 | Cost: 0.4915 Epoch: 003/015 | Batch 150/157 | Cost: 0.5100 training accuracy: 76.81% valid accuracy: 74.58% Time elapsed: 1.00 min Epoch: 004/015 | Batch 000/157 | Cost: 0.4957 Epoch: 004/015 | Batch 050/157 | Cost: 0.4526 Epoch: 004/015 | Batch 100/157 | Cost: 0.3544 Epoch: 004/015 | Batch 150/157 | Cost: 0.4816 training accuracy: 79.42% valid accuracy: 77.06% Time elapsed: 1.34 min Epoch: 005/015 | Batch 000/157 | Cost: 0.4932 Epoch: 005/015 | Batch 050/157 | Cost: 0.3864 Epoch: 005/015 | Batch 100/157 | Cost: 0.3410 Epoch: 005/015 | Batch 150/157 | Cost: 0.4023 training accuracy: 83.50% valid accuracy: 80.04% Time elapsed: 1.68 min Epoch: 006/015 | Batch 000/157 | Cost: 0.4139 Epoch: 006/015 | Batch 050/157 | Cost: 0.3480 Epoch: 006/015 | Batch 100/157 | Cost: 0.3831 Epoch: 006/015 | Batch 150/157 | Cost: 0.3973 training accuracy: 85.54% valid accuracy: 81.82% Time elapsed: 2.02 min Epoch: 007/015 | Batch 000/157 | Cost: 0.3975 Epoch: 007/015 | Batch 050/157 | Cost: 0.3051 Epoch: 007/015 | Batch 100/157 | Cost: 0.3075 Epoch: 007/015 | Batch 150/157 | Cost: 0.3503 training accuracy: 85.21% valid accuracy: 80.54% Time elapsed: 2.35 min Epoch: 008/015 | Batch 000/157 | Cost: 0.3074 Epoch: 008/015 | Batch 050/157 | Cost: 0.2903 Epoch: 008/015 | Batch 100/157 | Cost: 0.3141 Epoch: 008/015 | Batch 150/157 | Cost: 0.2595 training accuracy: 87.69% valid accuracy: 82.68% Time elapsed: 2.69 min Epoch: 009/015 | Batch 000/157 | Cost: 0.2799 Epoch: 009/015 | Batch 050/157 | Cost: 0.2448 Epoch: 009/015 | Batch 100/157 | Cost: 0.2151 Epoch: 009/015 | Batch 150/157 | Cost: 0.2847 training accuracy: 88.93% valid accuracy: 83.70% Time elapsed: 3.03 min Epoch: 010/015 | Batch 000/157 | Cost: 0.2497 Epoch: 010/015 | Batch 050/157 | Cost: 0.2540 Epoch: 010/015 | Batch 100/157 | Cost: 0.3835 Epoch: 010/015 | Batch 150/157 | Cost: 0.2845 training accuracy: 90.00% valid accuracy: 84.10% Time elapsed: 3.38 min Epoch: 011/015 | Batch 000/157 | Cost: 0.2449 Epoch: 011/015 | Batch 050/157 | Cost: 0.1758 Epoch: 011/015 | Batch 100/157 | Cost: 0.1718 Epoch: 011/015 | Batch 150/157 | Cost: 0.2826 training accuracy: 91.14% valid accuracy: 85.00% Time elapsed: 3.71 min Epoch: 012/015 | Batch 000/157 | Cost: 0.1856 Epoch: 012/015 | Batch 050/157 | Cost: 0.2359 Epoch: 012/015 | Batch 100/157 | Cost: 0.2082 Epoch: 012/015 | Batch 150/157 | Cost: 0.2608 training accuracy: 91.82% valid accuracy: 85.04% Time elapsed: 4.05 min Epoch: 013/015 | Batch 000/157 | Cost: 0.2708 Epoch: 013/015 | Batch 050/157 | Cost: 0.2684 Epoch: 013/015 | Batch 100/157 | Cost: 0.1688 Epoch: 013/015 | Batch 150/157 | Cost: 0.2290 training accuracy: 92.83% valid accuracy: 85.64% Time elapsed: 4.39 min Epoch: 014/015 | Batch 000/157 | Cost: 0.2259 Epoch: 014/015 | Batch 050/157 | Cost: 0.1845 Epoch: 014/015 | Batch 100/157 | Cost: 0.1361 Epoch: 014/015 | Batch 150/157 | Cost: 0.2182 training accuracy: 93.69% valid accuracy: 85.96% Time elapsed: 4.73 min Epoch: 015/015 | Batch 000/157 | Cost: 0.2125 Epoch: 015/015 | Batch 050/157 | Cost: 0.1962 Epoch: 015/015 | Batch 100/157 | Cost: 0.2078 Epoch: 015/015 | Batch 150/157 | Cost: 0.5364 training accuracy: 91.02% valid accuracy: 83.28% Time elapsed: 5.06 min Total Training Time: 5.06 min Test accuracy: 83.21%
import spacy
nlp = spacy.load('en')
def predict_sentiment(model, sentence):
# based on:
# https://github.com/bentrevett/pytorch-sentiment-analysis/blob/
# master/2%20-%20Upgraded%20Sentiment%20Analysis.ipynb
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
length = [len(indexed)]
tensor = torch.LongTensor(indexed).to(DEVICE)
tensor = tensor.unsqueeze(1)
length_tensor = torch.LongTensor(length)
prediction = torch.sigmoid(model(tensor, length_tensor))
return prediction.item()
print('Probability positive:')
predict_sentiment(model, "I really love this movie. This movie is so great!")
Probability positive:
0.9388696551322937