Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Sebastian Raschka CPython 3.6.8 IPython 7.2.0 torch 1.0.0
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
##########################
### SETTINGS
##########################
# Device
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
# Hyperparameters
random_seed = 1
learning_rate = 0.05
num_epochs = 10
batch_size = 128
# Architecture
num_classes = 10
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.MNIST(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
Image batch dimensions: torch.Size([128, 1, 28, 28]) Image label dimensions: torch.Size([128])
##########################
### MODEL
##########################
class ConvNet(torch.nn.Module):
def __init__(self, num_classes):
super(ConvNet, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
# 28x28x1 => 28x28x4
self.conv_1 = torch.nn.Conv2d(in_channels=1,
out_channels=4,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(28-1) - 28 + 3) / 2 = 1
# 28x28x4 => 14x14x4
self.pool_1 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(14-1) - 28 + 2) = 0
# 14x14x4 => 14x14x8
self.conv_2 = torch.nn.Conv2d(in_channels=4,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(14-1) - 14 + 3) / 2 = 1
# 14x14x8 => 7x7x8
self.pool_2 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(7-1) - 14 + 2) = 0
self.linear_1 = torch.nn.Linear(7*7*8, num_classes)
###############################################
# Reinitialize weights using He initialization
###############################################
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
nn.init.kaiming_normal_(m.weight.detach())
m.bias.detach().zero_()
elif isinstance(m, torch.nn.Linear):
nn.init.kaiming_normal_(m.weight.detach())
m.bias.detach().zero_()
def forward(self, x):
out = self.conv_1(x)
out = F.relu(out)
out = self.pool_1(out)
out = self.conv_2(out)
out = F.relu(out)
out = self.pool_2(out)
logits = self.linear_1(out.view(-1, 7*7*8))
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
model = ConvNet(num_classes=num_classes)
model = model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for features, targets in data_loader:
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(num_epochs):
model = model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model = model.eval()
print('Epoch: %03d/%03d training accuracy: %.2f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
Epoch: 001/010 | Batch 000/469 | Cost: 2.4577 Epoch: 001/010 | Batch 050/469 | Cost: 1.1068 Epoch: 001/010 | Batch 100/469 | Cost: 0.6610 Epoch: 001/010 | Batch 150/469 | Cost: 0.5354 Epoch: 001/010 | Batch 200/469 | Cost: 0.4479 Epoch: 001/010 | Batch 250/469 | Cost: 0.3158 Epoch: 001/010 | Batch 300/469 | Cost: 0.4542 Epoch: 001/010 | Batch 350/469 | Cost: 0.4278 Epoch: 001/010 | Batch 400/469 | Cost: 0.1387 Epoch: 001/010 | Batch 450/469 | Cost: 0.1410 Epoch: 001/010 training accuracy: 91.97% Time elapsed: 0.23 min Epoch: 002/010 | Batch 000/469 | Cost: 0.2198 Epoch: 002/010 | Batch 050/469 | Cost: 0.1464 Epoch: 002/010 | Batch 100/469 | Cost: 0.2629 Epoch: 002/010 | Batch 150/469 | Cost: 0.1920 Epoch: 002/010 | Batch 200/469 | Cost: 0.1485 Epoch: 002/010 | Batch 250/469 | Cost: 0.1229 Epoch: 002/010 | Batch 300/469 | Cost: 0.1591 Epoch: 002/010 | Batch 350/469 | Cost: 0.1411 Epoch: 002/010 | Batch 400/469 | Cost: 0.1404 Epoch: 002/010 | Batch 450/469 | Cost: 0.1211 Epoch: 002/010 training accuracy: 95.21% Time elapsed: 0.46 min Epoch: 003/010 | Batch 000/469 | Cost: 0.1289 Epoch: 003/010 | Batch 050/469 | Cost: 0.2468 Epoch: 003/010 | Batch 100/469 | Cost: 0.1308 Epoch: 003/010 | Batch 150/469 | Cost: 0.1887 Epoch: 003/010 | Batch 200/469 | Cost: 0.1053 Epoch: 003/010 | Batch 250/469 | Cost: 0.1564 Epoch: 003/010 | Batch 300/469 | Cost: 0.1235 Epoch: 003/010 | Batch 350/469 | Cost: 0.1388 Epoch: 003/010 | Batch 400/469 | Cost: 0.1556 Epoch: 003/010 | Batch 450/469 | Cost: 0.1658 Epoch: 003/010 training accuracy: 96.45% Time elapsed: 0.69 min Epoch: 004/010 | Batch 000/469 | Cost: 0.1827 Epoch: 004/010 | Batch 050/469 | Cost: 0.0613 Epoch: 004/010 | Batch 100/469 | Cost: 0.1967 Epoch: 004/010 | Batch 150/469 | Cost: 0.1072 Epoch: 004/010 | Batch 200/469 | Cost: 0.1063 Epoch: 004/010 | Batch 250/469 | Cost: 0.0970 Epoch: 004/010 | Batch 300/469 | Cost: 0.0593 Epoch: 004/010 | Batch 350/469 | Cost: 0.1031 Epoch: 004/010 | Batch 400/469 | Cost: 0.1503 Epoch: 004/010 | Batch 450/469 | Cost: 0.1611 Epoch: 004/010 training accuracy: 96.62% Time elapsed: 0.92 min Epoch: 005/010 | Batch 000/469 | Cost: 0.0469 Epoch: 005/010 | Batch 050/469 | Cost: 0.0351 Epoch: 005/010 | Batch 100/469 | Cost: 0.1232 Epoch: 005/010 | Batch 150/469 | Cost: 0.0432 Epoch: 005/010 | Batch 200/469 | Cost: 0.1049 Epoch: 005/010 | Batch 250/469 | Cost: 0.1136 Epoch: 005/010 | Batch 300/469 | Cost: 0.2226 Epoch: 005/010 | Batch 350/469 | Cost: 0.1271 Epoch: 005/010 | Batch 400/469 | Cost: 0.1405 Epoch: 005/010 | Batch 450/469 | Cost: 0.0651 Epoch: 005/010 training accuracy: 97.22% Time elapsed: 1.15 min Epoch: 006/010 | Batch 000/469 | Cost: 0.0886 Epoch: 006/010 | Batch 050/469 | Cost: 0.1358 Epoch: 006/010 | Batch 100/469 | Cost: 0.1083 Epoch: 006/010 | Batch 150/469 | Cost: 0.0799 Epoch: 006/010 | Batch 200/469 | Cost: 0.0815 Epoch: 006/010 | Batch 250/469 | Cost: 0.1873 Epoch: 006/010 | Batch 300/469 | Cost: 0.1785 Epoch: 006/010 | Batch 350/469 | Cost: 0.1107 Epoch: 006/010 | Batch 400/469 | Cost: 0.1059 Epoch: 006/010 | Batch 450/469 | Cost: 0.0741 Epoch: 006/010 training accuracy: 97.22% Time elapsed: 1.38 min Epoch: 007/010 | Batch 000/469 | Cost: 0.1303 Epoch: 007/010 | Batch 050/469 | Cost: 0.0944 Epoch: 007/010 | Batch 100/469 | Cost: 0.0867 Epoch: 007/010 | Batch 150/469 | Cost: 0.1706 Epoch: 007/010 | Batch 200/469 | Cost: 0.0840 Epoch: 007/010 | Batch 250/469 | Cost: 0.0876 Epoch: 007/010 | Batch 300/469 | Cost: 0.0565 Epoch: 007/010 | Batch 350/469 | Cost: 0.0805 Epoch: 007/010 | Batch 400/469 | Cost: 0.0784 Epoch: 007/010 | Batch 450/469 | Cost: 0.1238 Epoch: 007/010 training accuracy: 97.47% Time elapsed: 1.62 min Epoch: 008/010 | Batch 000/469 | Cost: 0.0740 Epoch: 008/010 | Batch 050/469 | Cost: 0.0674 Epoch: 008/010 | Batch 100/469 | Cost: 0.1884 Epoch: 008/010 | Batch 150/469 | Cost: 0.0757 Epoch: 008/010 | Batch 200/469 | Cost: 0.0633 Epoch: 008/010 | Batch 250/469 | Cost: 0.1166 Epoch: 008/010 | Batch 300/469 | Cost: 0.0309 Epoch: 008/010 | Batch 350/469 | Cost: 0.0821 Epoch: 008/010 | Batch 400/469 | Cost: 0.1253 Epoch: 008/010 | Batch 450/469 | Cost: 0.0486 Epoch: 008/010 training accuracy: 97.53% Time elapsed: 1.85 min Epoch: 009/010 | Batch 000/469 | Cost: 0.0538 Epoch: 009/010 | Batch 050/469 | Cost: 0.1860 Epoch: 009/010 | Batch 100/469 | Cost: 0.0645 Epoch: 009/010 | Batch 150/469 | Cost: 0.0392 Epoch: 009/010 | Batch 200/469 | Cost: 0.0662 Epoch: 009/010 | Batch 250/469 | Cost: 0.0885 Epoch: 009/010 | Batch 300/469 | Cost: 0.1958 Epoch: 009/010 | Batch 350/469 | Cost: 0.0716 Epoch: 009/010 | Batch 400/469 | Cost: 0.0790 Epoch: 009/010 | Batch 450/469 | Cost: 0.0223 Epoch: 009/010 training accuracy: 97.89% Time elapsed: 2.08 min Epoch: 010/010 | Batch 000/469 | Cost: 0.0982 Epoch: 010/010 | Batch 050/469 | Cost: 0.0772 Epoch: 010/010 | Batch 100/469 | Cost: 0.1971 Epoch: 010/010 | Batch 150/469 | Cost: 0.0399 Epoch: 010/010 | Batch 200/469 | Cost: 0.0341 Epoch: 010/010 | Batch 250/469 | Cost: 0.0538 Epoch: 010/010 | Batch 300/469 | Cost: 0.1165 Epoch: 010/010 | Batch 350/469 | Cost: 0.1016 Epoch: 010/010 | Batch 400/469 | Cost: 0.1560 Epoch: 010/010 | Batch 450/469 | Cost: 0.1757 Epoch: 010/010 training accuracy: 97.80% Time elapsed: 2.31 min Total Training Time: 2.31 min
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
Test accuracy: 97.67%
%watermark -iv
numpy 1.15.4 torch 1.0.0