#!/usr/bin/env python # coding: utf-8 # # One Hundred Layers Tiramisu # In[1]: import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict # ### Initial Conv Block # In[29]: class _FirstConv(nn.Sequential): def __init__(self, num_input_features): super(_FirstConv, self).__init__() self.add_module('conv0', nn.Conv2d(3, num_input_features, kernel_size=7, stride=2, padding=3, bias=False)) self.add_module('norm0', nn.BatchNorm2d(num_input_features)) self.add_module('relu0', nn.ReLU(inplace=True)) self.add_module('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) # In[30]: # Test conv1 = _FirstConv(5) # ### Dense Layer # In[31]: class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): super(_DenseLayer, self).__init__() self.add_module('norm.1', nn.BatchNorm2d(num_input_features)), self.add_module('relu.1', nn.ReLU(inplace=True)), self.add_module('conv.1', nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), self.add_module('norm.2', nn.BatchNorm2d(bn_size * growth_rate)), self.add_module('relu.2', nn.ReLU(inplace=True)), self.add_module('conv.2', nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), self.drop_rate = drop_rate def forward(self, x): new_features = super(_DenseLayer, self).forward(x) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return torch.cat([x, new_features], 1) # In[32]: # Test dense1 = _DenseLayer(5,3,1,.5) # ### Dense Block # In[33]: class _DenseBlock(nn.Sequential): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) self.add_module('denselayer%d' % (i + 1), layer) # In[34]: # Test denseBlock1 = _DenseBlock(4,4,1,4,.5) # ### Transition Up # In[35]: class _TransitionUp(nn.Sequential): def __init__(self, num_input_features, num_output_features): super(_TransitionUp, self).__init__() self.add_module('norm', nn.BatchNorm2d(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) # In[36]: # Test transUp = _TransitionUp(5,10) # ### Transition Down # In[37]: class _TransitionDown(nn.Sequential): def __init__(self, num_input_features, num_output_features): super(_TransitionDown, self).__init__() self.add_module('norm', nn.BatchNorm2d(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) # In[38]: # Test transDown = _TransitionDown(5,10) # ### Final Model # In[45]: class FCDenseNet(nn.Module): r"""FC-DenseNet model class, based on `"The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation" ` Args: growth_rate (int) - how many filters to add each layer (`k` in paper) block_config (list of 4 ints) - how many layers in each pooling block num_init_features (int) - the number of filters to learn in the first convolution layer bn_size (int) - multiplicative factor for number of bottle neck layers (i.e. bn_size * k features in the bottleneck layer) drop_rate (float) - dropout rate after each dense layer num_classes (int) - number of classification classes """ def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000): super(FCDenseNet, self).__init__() self.features = nn.Sequential() self.features.add_module('firstConv', _FirstConv(num_init_features)) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate) self.features.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.features.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm self.features.add_module('norm5', nn.BatchNorm2d(num_features)) # Linear layer self.classifier = nn.Linear(num_features, num_classes) def forward(self, x): features = self.features(x) out = F.relu(features, inplace=True) out = F.avg_pool2d(out, kernel_size=7).view(features.size(0), -1) out = self.classifier(out) return out # In[46]: # Test model = FCDenseNet() # In[ ]: