Goal: Build a DCGAN to generate pictures of dogs.
from torch import nn
from torch.utils.data import DataLoader
import torchvision
# dataset builders
import zipfile
from torch.utils.data import Dataset
import os
!wget -P /content/dl_test/ 'https://cg.cs.tsinghua.edu.cn/ThuDogs/low-annotations.zip'
--2023-12-31 14:53:33-- https://cg.cs.tsinghua.edu.cn/ThuDogs/low-annotations.zip Resolving cg.cs.tsinghua.edu.cn (cg.cs.tsinghua.edu.cn)... 101.6.6.219, 2402:f000:1:416:101:6:6:219 Connecting to cg.cs.tsinghua.edu.cn (cg.cs.tsinghua.edu.cn)|101.6.6.219|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 37326330 (36M) [application/zip] Saving to: ‘/content/dl_test/low-annotations.zip’ low-annotations.zip 100%[===================>] 35.60M 10.8MB/s in 4.0s 2023-12-31 14:53:39 (8.96 MB/s) - ‘/content/dl_test/low-annotations.zip’ saved [37326330/37326330]
with zipfile.ZipFile("/content/dl_test/low-annotations.zip","r") as zip_ref:
zip_ref.extractall("/content/dl_test/")
!wget -P /content/imgs/ 'https://cloud.tsinghua.edu.cn/seafhttp/files/817eeb66-a140-4903-a869-c59e12fb72c4/low-resolution.zip'
--2023-12-31 15:02:35-- https://cloud.tsinghua.edu.cn/seafhttp/files/817eeb66-a140-4903-a869-c59e12fb72c4/low-resolution.zip Resolving cloud.tsinghua.edu.cn (cloud.tsinghua.edu.cn)... 166.111.6.101, 2402:f000:1:406:166:111:6:101 Connecting to cloud.tsinghua.edu.cn (cloud.tsinghua.edu.cn)|166.111.6.101|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 2664983287 (2.5G) [application/zip] Saving to: ‘/content/imgs/low-resolution.zip’ low-resolution.zip 100%[===================>] 2.48G 12.3MB/s in 3m 27s 2023-12-31 15:06:03 (12.3 MB/s) - ‘/content/imgs/low-resolution.zip’ saved [2664983287/2664983287]
with zipfile.ZipFile('/content/imgs/low-resolution.zip','r') as zip_ref:
zip_ref.extractall('/content/imgs/')
img_files=[x[2] for x in os.walk('/content/imgs/low-resolution/')]
_=[]
for val in img_files:
_+=val
img_files=_
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.name='Discriminator'
self.out=32
self.model=nn.Sequential(
nn.Conv2d(
in_channels=3,
out_channels=self.out,
kernel_size=4,
stride=2,
padding=1), #16x16
nn.LeakyReLU(0.2),
self._convlayer(self.out,2*self.out), #8x8
self._convlayer(2*self.out,4*self.out), #4x4
self._convlayer(4*self.out,8*self.out), #2x2
nn.Conv2d(8*self.out,1,kernel_size=4,stride=2,padding=0),
nn.Sigmoid()
)
def _convlayer(self,in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=4,
stride=2,
padding=1
),
nn.BatchNorm2d(num_features=out_channels),
nn.LeakyReLU(0.2)
)
def forward(self,x):
return self.model(x)
_class Discriminator_alt_1(nn.Module):
def __init(self)__:
super().__init__()
self.model=nn.Sequential(
nn.Conv2d(),
nn.MaxPool2d(),
nn.LeakyReLU(),
nn.Conv2d(),
nn.MaxPool2d(),
nn.LeakyReLU(),
nn.Conv2d(),
nn.MaxPool2d(),
nn.LeakyReLU(),
# one fully connected layer
)
File "<ipython-input-34-542ba4d8878c>", line 2 def __init(self)__: ^ SyntaxError: expected ':'
model=Discriminator()
model.forward(a)
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-48-9643b26f2404> in <cell line: 1>() ----> 1 model.forward(a) <ipython-input-45-8adbe3b45c74> in forward(self, x) 38 39 def forward(self,x): ---> 40 return self.model(x) /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs) 1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1517 else: -> 1518 return self._call_impl(*args, **kwargs) 1519 1520 def _call_impl(self, *args, **kwargs): /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1525 or _global_backward_pre_hooks or _global_backward_hooks 1526 or _global_forward_hooks or _global_forward_pre_hooks): -> 1527 return forward_call(*args, **kwargs) 1528 1529 try: /usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py in forward(self, input) 213 def forward(self, input): 214 for module in self: --> 215 input = module(input) 216 return input 217 /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs) 1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1517 else: -> 1518 return self._call_impl(*args, **kwargs) 1519 1520 def _call_impl(self, *args, **kwargs): /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1525 or _global_backward_pre_hooks or _global_backward_hooks 1526 or _global_forward_hooks or _global_forward_pre_hooks): -> 1527 return forward_call(*args, **kwargs) 1528 1529 try: /usr/local/lib/python3.10/dist-packages/torch/nn/modules/conv.py in forward(self, input) 458 459 def forward(self, input: Tensor) -> Tensor: --> 460 return self._conv_forward(input, self.weight, self.bias) 461 462 class Conv3d(_ConvNd): /usr/local/lib/python3.10/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight, bias) 454 weight, bias, self.stride, 455 _pair(0), self.dilation, self.groups) --> 456 return F.conv2d(input, weight, bias, self.stride, 457 self.padding, self.dilation, self.groups) 458 RuntimeError: Input type (unsigned char) and bias type (float) should be the same
model.forward(a.type(torch.float32))
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-61-a766de7fa834> in <cell line: 1>() ----> 1 model.forward(a.type(torch.float32)) <ipython-input-45-8adbe3b45c74> in forward(self, x) 38 39 def forward(self,x): ---> 40 return self.model(x) /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs) 1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1517 else: -> 1518 return self._call_impl(*args, **kwargs) 1519 1520 def _call_impl(self, *args, **kwargs): /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1525 or _global_backward_pre_hooks or _global_backward_hooks 1526 or _global_forward_hooks or _global_forward_pre_hooks): -> 1527 return forward_call(*args, **kwargs) 1528 1529 try: /usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py in forward(self, input) 213 def forward(self, input): 214 for module in self: --> 215 input = module(input) 216 return input 217 /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs) 1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1517 else: -> 1518 return self._call_impl(*args, **kwargs) 1519 1520 def _call_impl(self, *args, **kwargs): /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1525 or _global_backward_pre_hooks or _global_backward_hooks 1526 or _global_forward_hooks or _global_forward_pre_hooks): -> 1527 return forward_call(*args, **kwargs) 1528 1529 try: /usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py in forward(self, input) 213 def forward(self, input): 214 for module in self: --> 215 input = module(input) 216 return input 217 /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs) 1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1517 else: -> 1518 return self._call_impl(*args, **kwargs) 1519 1520 def _call_impl(self, *args, **kwargs): /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1525 or _global_backward_pre_hooks or _global_backward_hooks 1526 or _global_forward_hooks or _global_forward_pre_hooks): -> 1527 return forward_call(*args, **kwargs) 1528 1529 try: /usr/local/lib/python3.10/dist-packages/torch/nn/modules/batchnorm.py in forward(self, input) 136 137 def forward(self, input: Tensor) -> Tensor: --> 138 self._check_input_dim(input) 139 140 # exponential_average_factor is set to self.momentum /usr/local/lib/python3.10/dist-packages/torch/nn/modules/batchnorm.py in _check_input_dim(self, input) 414 def _check_input_dim(self, input): 415 if input.dim() != 4: --> 416 raise ValueError(f"expected 4D input (got {input.dim()}D input)") 417 418 ValueError: expected 4D input (got 3D input)
a.type(torch.float32)
tensor([[[253., 253., 253., ..., 246., 255., 254.], [253., 253., 253., ..., 248., 255., 254.], [253., 253., 253., ..., 250., 255., 255.], ..., [140., 115., 146., ..., 122., 123., 122.], [130., 138., 166., ..., 118., 120., 118.], [168., 204., 245., ..., 118., 120., 120.]], [[231., 231., 231., ..., 228., 237., 238.], [231., 231., 231., ..., 230., 237., 238.], [231., 231., 231., ..., 232., 238., 239.], ..., [ 74., 49., 78., ..., 55., 56., 56.], [ 62., 70., 98., ..., 49., 50., 51.], [100., 136., 177., ..., 49., 50., 50.]], [[194., 194., 194., ..., 216., 223., 222.], [194., 194., 194., ..., 218., 223., 222.], [194., 194., 194., ..., 220., 224., 223.], ..., [ 26., 1., 33., ..., 28., 30., 30.], [ 15., 23., 53., ..., 20., 24., 24.], [ 53., 89., 132., ..., 20., 24., 24.]]])
import torch
a.unsqueeze(0).size()
torch.Size([1, 3, 218, 178])
model.forward(a.type(torch.float32).unsqueeze(0)).size()
torch.Size([1, 1, 5, 4])