import torch
entrypoints = torch.hub.list('pytorch/vision') # , force_reload=True)
len(entrypoints), entrypoints[::20]
Using cache found in /home/buffet/.cache/torch/hub/pytorch_vision_main
(99, ['alexnet', 'efficientnet_v2_l', 'mvit_v2_s', 'resnet101', 'swin_t'])
# Resnet18 모델 호출 및 파라미터 확인
resnet18 = torch.hub.load('pytorch/vision', 'resnet18', weights='DEFAULT')
print(torch.hub.help('pytorch/vision', 'resnet18', force_reload=True))
Using cache found in /home/buffet/.cache/torch/hub/pytorch_vision_main Downloading: "https://github.com/pytorch/vision/zipball/main" to /home/buffet/.cache/torch/hub/main.zip
ResNet-18 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__. Args: weights (:class:`~torchvision.models.ResNet18_Weights`, optional): The pretrained weights to use. See :class:`~torchvision.models.ResNet18_Weights` below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet`` base class. Please refer to the `source code <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_ for more details about this class. .. autoclass:: torchvision.models.ResNet18_Weights :members:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
torch.manual_seed(4242)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data/ch02/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=64, shuffle=True)
train_loader
<torch.utils.data.dataloader.DataLoader at 0x7fc4c57a4f70>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net()
model
Net( (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1)) (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1)) (conv2_drop): Dropout2d(p=0.5, inplace=False) (fc1): Linear(in_features=320, out_features=50, bias=True) (fc2): Linear(in_features=50, out_features=10, bias=True) )
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(10):
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
print(f'Epoch : {epoch} >>> Current loss', float(loss))
torch.save(model.state_dict(), 'data/ch02/mnist/mnist.pth')
pretrained_model = Net()
pretrained_model.load_state_dict(torch.load('data/ch02/mnist/mnist.pth'))
Epoch : 0 >>> Current loss 0.48367545008659363 Epoch : 1 >>> Current loss 0.13018010556697845 Epoch : 2 >>> Current loss 0.4380616843700409 Epoch : 3 >>> Current loss 0.44207993149757385 Epoch : 4 >>> Current loss 0.21662652492523193 Epoch : 5 >>> Current loss 0.3018296957015991 Epoch : 6 >>> Current loss 0.23590700328350067 Epoch : 7 >>> Current loss 0.2751865088939667 Epoch : 8 >>> Current loss 0.0252096988260746 Epoch : 9 >>> Current loss 0.06518728286027908
<All keys matched successfully>