Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch,pytorch_lightning
Author: Sebastian Raschka Python implementation: CPython Python version : 3.9.6 IPython version : 7.29.0 torch : 1.8.0 pytorch_lightning: 1.5.2
import pytorch_lightning as pl
import torch
import torch.nn as nn
from torchmetrics import Accuracy
class MultiLayerPerceptron(pl.LightningModule):
def __init__(self,image_shape=(1, 28, 28), hidden_units=(32, 16)):
super().__init__()
# new PL attributes:
self.train_acc = Accuracy()
self.valid_acc = Accuracy()
self.test_acc = Accuracy()
# Model similar to previous section:
input_size = image_shape[0] * image_shape[1] * image_shape[2]
all_layers = [nn.Flatten()]
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 10))
all_layers.append(nn.Softmax(dim=1))
self.model = nn.Sequential(*all_layers)
def forward(self, x):
x = self.model(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = nn.functional.cross_entropy(self(x), y)
preds = torch.argmax(logits, dim=1)
self.train_acc.update(preds, y)
self.log("train_loss", loss, prog_bar=True)
return loss
def training_epoch_end(self, outs):
self.log("train_acc", self.train_acc.compute())
self.train_acc.reset()
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = nn.functional.cross_entropy(self(x), y)
preds = torch.argmax(logits, dim=1)
self.valid_acc.update(preds, y)
self.log("valid_loss", loss, prog_bar=True)
return loss
def validation_epoch_end(self, outs):
self.log("valid_acc", self.valid_acc.compute(), prog_bar=True)
self.valid_acc.reset()
def test_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = nn.functional.cross_entropy(self(x), y)
preds = torch.argmax(logits, dim=1)
self.test_acc.update(preds, y)
self.log("test_loss", loss, prog_bar=True)
self.log("test_acc", self.test_acc.compute(), prog_bar=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
return optimizer
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torchvision.datasets import MNIST
from torchvision import transforms
class MnistDataModule(pl.LightningDataModule):
def __init__(self, data_path='./'):
super().__init__()
self.data_path = data_path
self.transform = transforms.Compose([transforms.ToTensor()])
def prepare_data(self):
MNIST(root=self.data_path, download=True)
def setup(self, stage=None):
# stage is either 'fit', 'validate', 'test', or 'predict'
# here note relevant
mnist_all = MNIST(
root=self.data_path,
train=True,
transform=self.transform,
download=False
)
self.train, self.val = random_split(
mnist_all, [55000, 5000], generator=torch.Generator().manual_seed(1)
)
self.test = MNIST(
root=self.data_path,
train=False,
transform=self.transform,
download=False
)
def train_dataloader(self):
return DataLoader(self.train, batch_size=64, num_workers=4)
def val_dataloader(self):
return DataLoader(self.val, batch_size=64, num_workers=4)
def test_dataloader(self):
return DataLoader(self.test, batch_size=64, num_workers=4)
torch.manual_seed(1)
mnist_dm = MnistDataModule()
mnistclassifier = MultiLayerPerceptron()
if torch.cuda.is_available(): # if you have GPUs
trainer = pl.Trainer(max_epochs=10, gpus=1)
else:
trainer = pl.Trainer(max_epochs=10)
trainer.fit(model=mnistclassifier, datamodule=mnist_dm)
GPU available: False, used: False TPU available: False, using: 0 TPU cores IPU available: False, using: 0 IPUs | Name | Type | Params ----------------------------------------- 0 | train_acc | Accuracy | 0 1 | valid_acc | Accuracy | 0 2 | test_acc | Accuracy | 0 3 | model | Sequential | 25.8 K ----------------------------------------- 25.8 K Trainable params 0 Non-trainable params 25.8 K Total params 0.103 Total estimated model params size (MB)
Validation sanity check: 0it [00:00, ?it/s]
Training: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
trainer.test(model=mnistclassifier, datamodule=mnist_dm)
Testing: 0it [00:00, ?it/s]
-------------------------------------------------------------------------------- DATALOADER:0 TEST RESULTS {'test_acc': 0.938988208770752, 'test_loss': 1.5173449516296387} --------------------------------------------------------------------------------
[{'test_loss': 1.5173449516296387, 'test_acc': 0.938988208770752}]
from IPython.display import Image
Image(filename='lightning-mlp_images/1.png')
# Start tensorboard
%load_ext tensorboard
%tensorboard --logdir lightning_logs/
Reusing TensorBoard on port 6006 (pid 54170), started 2:14:59 ago. (Use '!kill 54170' to kill it.)
if torch.cuda.is_available(): # if you have GPUs
trainer = pl.Trainer(max_epochs=15, resume_from_checkpoint='./lightning_logs/version_0/checkpoints/epoch=9-step=8599.ckpt', gpus=1)
else:
trainer = pl.Trainer(max_epochs=15, resume_from_checkpoint='./lightning_logs/version_0/checkpoints/epoch=9-step=8599.ckpt')
trainer.fit(model=mnistclassifier, datamodule=mnist_dm)
/Users/sebastian/miniforge3/lib/python3.9/site-packages/pytorch_lightning/trainer/connectors/checkpoint_connector.py:45: LightningDeprecationWarning: Setting `Trainer(resume_from_checkpoint=)` is deprecated in v1.5 and will be removed in v1.7. Please pass `Trainer.fit(ckpt_path=)` directly instead. rank_zero_deprecation( GPU available: False, used: False TPU available: False, using: 0 TPU cores IPU available: False, using: 0 IPUs /Users/sebastian/miniforge3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:1906: LightningDeprecationWarning: `trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v1.7. Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead. rank_zero_deprecation( /Users/sebastian/miniforge3/lib/python3.9/site-packages/pytorch_lightning/core/datamodule.py:469: LightningDeprecationWarning: DataModule.setup has already been called, so it will not be called again. In v1.6 this behavior will change to always call DataModule.setup. rank_zero_deprecation( Restoring states from the checkpoint path at ./lightning_logs/version_0/checkpoints/epoch=9-step=8599.ckpt /Users/sebastian/miniforge3/lib/python3.9/site-packages/pytorch_lightning/trainer/connectors/checkpoint_connector.py:247: UserWarning: You're resuming from a checkpoint that ended mid-epoch. Training will start from the beginning of the next epoch. This can cause unreliable results if further training is done, consider using an end of epoch checkpoint. rank_zero_warn( Restored all states from the checkpoint file at ./lightning_logs/version_0/checkpoints/epoch=9-step=8599.ckpt | Name | Type | Params ----------------------------------------- 0 | train_acc | Accuracy | 0 1 | valid_acc | Accuracy | 0 2 | test_acc | Accuracy | 0 3 | model | Sequential | 25.8 K ----------------------------------------- 25.8 K Trainable params 0 Non-trainable params 25.8 K Total params 0.103 Total estimated model params size (MB)
Validation sanity check: 0it [00:00, ?it/s]
Training: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
Validating: 0it [00:00, ?it/s]
/Users/sebastian/miniforge3/lib/python3.9/site-packages/pytorch_lightning/core/datamodule.py:469: LightningDeprecationWarning: DataModule.teardown has already been called, so it will not be called again. In v1.6 this behavior will change to always call DataModule.teardown. rank_zero_deprecation(
from IPython.display import Image
Image(filename='lightning-mlp_images/2.png')
%tensorboard --logdir lightning_logs/
Reusing TensorBoard on port 6006 (pid 54170), started 2:15:51 ago. (Use '!kill 54170' to kill it.)
trainer.test(model=mnistclassifier, datamodule=mnist_dm)
Testing: 0it [00:00, ?it/s]
-------------------------------------------------------------------------------- DATALOADER:0 TEST RESULTS {'test_acc': 0.9453728795051575, 'test_loss': 1.5110704898834229} --------------------------------------------------------------------------------
[{'test_loss': 1.5110704898834229, 'test_acc': 0.9453728795051575}]