Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Sebastian Raschka CPython 3.7.3 IPython 7.6.1 torch 1.1.0
import time
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
##########################
### SETTINGS
##########################
# Device
device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
# Hyperparameters
random_seed = 1
learning_rate = 0.05
num_epochs = 10
batch_size = 128
# Architecture
num_classes = 10
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.MNIST(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
Image batch dimensions: torch.Size([128, 1, 28, 28]) Image label dimensions: torch.Size([128])
##########################
### MODEL
##########################
class ConvNet(torch.nn.Module):
def __init__(self, num_classes):
super(ConvNet, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
# 28x28x1 => 28x28x8
self.conv_1 = torch.nn.Conv2d(in_channels=1,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(28-1) - 28 + 3) / 2 = 1
# 28x28x8 => 14x14x8
self.pool_1 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(14-1) - 28 + 2) = 0
# 14x14x8 => 14x14x16
self.conv_2 = torch.nn.Conv2d(in_channels=8,
out_channels=16,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(14-1) - 14 + 3) / 2 = 1
# 14x14x16 => 7x7x16
self.pool_2 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(7-1) - 14 + 2) = 0
self.linear_1 = torch.nn.Linear(7*7*16, num_classes)
# optionally initialize weights from Gaussian;
# Guassian weight init is not recommended and only for demonstration purposes
for m in self.modules():
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0.0, 0.01)
m.bias.data.zero_()
if m.bias is not None:
m.bias.detach().zero_()
def forward(self, x):
out = self.conv_1(x)
out = F.relu(out)
out = self.pool_1(out)
out = self.conv_2(out)
out = F.relu(out)
out = self.pool_2(out)
logits = self.linear_1(out.view(-1, 7*7*16))
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
model = ConvNet(num_classes=num_classes)
model = model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for features, targets in data_loader:
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(num_epochs):
model = model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model = model.eval()
print('Epoch: %03d/%03d training accuracy: %.2f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
Epoch: 001/010 | Batch 000/469 | Cost: 2.3026 Epoch: 001/010 | Batch 050/469 | Cost: 2.3036 Epoch: 001/010 | Batch 100/469 | Cost: 2.3001 Epoch: 001/010 | Batch 150/469 | Cost: 2.3050 Epoch: 001/010 | Batch 200/469 | Cost: 2.2984 Epoch: 001/010 | Batch 250/469 | Cost: 2.2986 Epoch: 001/010 | Batch 300/469 | Cost: 2.2983 Epoch: 001/010 | Batch 350/469 | Cost: 2.2941 Epoch: 001/010 | Batch 400/469 | Cost: 2.2962 Epoch: 001/010 | Batch 450/469 | Cost: 2.2265 Epoch: 001/010 training accuracy: 65.38% Time elapsed: 0.24 min Epoch: 002/010 | Batch 000/469 | Cost: 1.8989 Epoch: 002/010 | Batch 050/469 | Cost: 0.6029 Epoch: 002/010 | Batch 100/469 | Cost: 0.6099 Epoch: 002/010 | Batch 150/469 | Cost: 0.4786 Epoch: 002/010 | Batch 200/469 | Cost: 0.4518 Epoch: 002/010 | Batch 250/469 | Cost: 0.3553 Epoch: 002/010 | Batch 300/469 | Cost: 0.3167 Epoch: 002/010 | Batch 350/469 | Cost: 0.2241 Epoch: 002/010 | Batch 400/469 | Cost: 0.2259 Epoch: 002/010 | Batch 450/469 | Cost: 0.3056 Epoch: 002/010 training accuracy: 93.11% Time elapsed: 0.47 min Epoch: 003/010 | Batch 000/469 | Cost: 0.3313 Epoch: 003/010 | Batch 050/469 | Cost: 0.1042 Epoch: 003/010 | Batch 100/469 | Cost: 0.1328 Epoch: 003/010 | Batch 150/469 | Cost: 0.2803 Epoch: 003/010 | Batch 200/469 | Cost: 0.0975 Epoch: 003/010 | Batch 250/469 | Cost: 0.1839 Epoch: 003/010 | Batch 300/469 | Cost: 0.1774 Epoch: 003/010 | Batch 350/469 | Cost: 0.1143 Epoch: 003/010 | Batch 400/469 | Cost: 0.1753 Epoch: 003/010 | Batch 450/469 | Cost: 0.1543 Epoch: 003/010 training accuracy: 95.68% Time elapsed: 0.70 min Epoch: 004/010 | Batch 000/469 | Cost: 0.1057 Epoch: 004/010 | Batch 050/469 | Cost: 0.1035 Epoch: 004/010 | Batch 100/469 | Cost: 0.1851 Epoch: 004/010 | Batch 150/469 | Cost: 0.1608 Epoch: 004/010 | Batch 200/469 | Cost: 0.1458 Epoch: 004/010 | Batch 250/469 | Cost: 0.1913 Epoch: 004/010 | Batch 300/469 | Cost: 0.1295 Epoch: 004/010 | Batch 350/469 | Cost: 0.1518 Epoch: 004/010 | Batch 400/469 | Cost: 0.1717 Epoch: 004/010 | Batch 450/469 | Cost: 0.0792 Epoch: 004/010 training accuracy: 96.46% Time elapsed: 0.93 min Epoch: 005/010 | Batch 000/469 | Cost: 0.0905 Epoch: 005/010 | Batch 050/469 | Cost: 0.1622 Epoch: 005/010 | Batch 100/469 | Cost: 0.1934 Epoch: 005/010 | Batch 150/469 | Cost: 0.1874 Epoch: 005/010 | Batch 200/469 | Cost: 0.0742 Epoch: 005/010 | Batch 250/469 | Cost: 0.1056 Epoch: 005/010 | Batch 300/469 | Cost: 0.0997 Epoch: 005/010 | Batch 350/469 | Cost: 0.0948 Epoch: 005/010 | Batch 400/469 | Cost: 0.0575 Epoch: 005/010 | Batch 450/469 | Cost: 0.1157 Epoch: 005/010 training accuracy: 96.97% Time elapsed: 1.16 min Epoch: 006/010 | Batch 000/469 | Cost: 0.1326 Epoch: 006/010 | Batch 050/469 | Cost: 0.1549 Epoch: 006/010 | Batch 100/469 | Cost: 0.0784 Epoch: 006/010 | Batch 150/469 | Cost: 0.0898 Epoch: 006/010 | Batch 200/469 | Cost: 0.0991 Epoch: 006/010 | Batch 250/469 | Cost: 0.0965 Epoch: 006/010 | Batch 300/469 | Cost: 0.0477 Epoch: 006/010 | Batch 350/469 | Cost: 0.0712 Epoch: 006/010 | Batch 400/469 | Cost: 0.1109 Epoch: 006/010 | Batch 450/469 | Cost: 0.0325 Epoch: 006/010 training accuracy: 97.60% Time elapsed: 1.39 min Epoch: 007/010 | Batch 000/469 | Cost: 0.0665 Epoch: 007/010 | Batch 050/469 | Cost: 0.0868 Epoch: 007/010 | Batch 100/469 | Cost: 0.0427 Epoch: 007/010 | Batch 150/469 | Cost: 0.0385 Epoch: 007/010 | Batch 200/469 | Cost: 0.0611 Epoch: 007/010 | Batch 250/469 | Cost: 0.0484 Epoch: 007/010 | Batch 300/469 | Cost: 0.1288 Epoch: 007/010 | Batch 350/469 | Cost: 0.0309 Epoch: 007/010 | Batch 400/469 | Cost: 0.0359 Epoch: 007/010 | Batch 450/469 | Cost: 0.0139 Epoch: 007/010 training accuracy: 97.64% Time elapsed: 1.62 min Epoch: 008/010 | Batch 000/469 | Cost: 0.0939 Epoch: 008/010 | Batch 050/469 | Cost: 0.1478 Epoch: 008/010 | Batch 100/469 | Cost: 0.0769 Epoch: 008/010 | Batch 150/469 | Cost: 0.0713 Epoch: 008/010 | Batch 200/469 | Cost: 0.1272 Epoch: 008/010 | Batch 250/469 | Cost: 0.0446 Epoch: 008/010 | Batch 300/469 | Cost: 0.0525 Epoch: 008/010 | Batch 350/469 | Cost: 0.1729 Epoch: 008/010 | Batch 400/469 | Cost: 0.0672 Epoch: 008/010 | Batch 450/469 | Cost: 0.0754 Epoch: 008/010 training accuracy: 96.67% Time elapsed: 1.85 min Epoch: 009/010 | Batch 000/469 | Cost: 0.0988 Epoch: 009/010 | Batch 050/469 | Cost: 0.0409 Epoch: 009/010 | Batch 100/469 | Cost: 0.1046 Epoch: 009/010 | Batch 150/469 | Cost: 0.0523 Epoch: 009/010 | Batch 200/469 | Cost: 0.0815 Epoch: 009/010 | Batch 250/469 | Cost: 0.0811 Epoch: 009/010 | Batch 300/469 | Cost: 0.0416 Epoch: 009/010 | Batch 350/469 | Cost: 0.0747 Epoch: 009/010 | Batch 400/469 | Cost: 0.0467 Epoch: 009/010 | Batch 450/469 | Cost: 0.0669 Epoch: 009/010 training accuracy: 97.90% Time elapsed: 2.08 min Epoch: 010/010 | Batch 000/469 | Cost: 0.0257 Epoch: 010/010 | Batch 050/469 | Cost: 0.0357 Epoch: 010/010 | Batch 100/469 | Cost: 0.1469 Epoch: 010/010 | Batch 150/469 | Cost: 0.0170 Epoch: 010/010 | Batch 200/469 | Cost: 0.0493 Epoch: 010/010 | Batch 250/469 | Cost: 0.0489 Epoch: 010/010 | Batch 300/469 | Cost: 0.1348 Epoch: 010/010 | Batch 350/469 | Cost: 0.0815 Epoch: 010/010 | Batch 400/469 | Cost: 0.0552 Epoch: 010/010 | Batch 450/469 | Cost: 0.0422 Epoch: 010/010 training accuracy: 97.99% Time elapsed: 2.31 min Total Training Time: 2.31 min
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
Test accuracy: 97.97%
%watermark -iv
torch 1.1.0 numpy 1.16.4 torchvision 0.3.0