diff --git a/training/deeplab_resnet.py b/training/deeplab_resnet.py index 61e3548..1d1decb 100644 --- a/training/deeplab_resnet.py +++ b/training/deeplab_resnet.py @@ -23,10 +23,10 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - +import numpy as np import torch.nn as nn import torch -import numpy as np + affine_par = True @@ -77,10 +77,10 @@ def forward(self, x): class Bottleneck(nn.Module): expansion = 4 - def __init__(self, inplanes, planes, stride=1, dilation_ = 1, downsample=None): + def __init__(self, inplanes, planes, stride=1, dilation_=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change - self.bn1 = nn.BatchNorm2d(planes,affine = affine_par) + self.bn1 = nn.BatchNorm2d(planes,affine=affine_par) for i in self.bn1.parameters(): i.requires_grad = False padding = 1 @@ -89,12 +89,12 @@ def __init__(self, inplanes, planes, stride=1, dilation_ = 1, downsample=None): elif dilation_ == 4: padding = 4 self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change - padding=padding, bias=False, dilation = dilation_) - self.bn2 = nn.BatchNorm2d(planes,affine = affine_par) + padding=padding, bias=False, dilation=dilation_) + self.bn2 = nn.BatchNorm2d(planes, affine=affine_par) for i in self.bn2.parameters(): i.requires_grad = False self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par) + self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par) for i in self.bn3.parameters(): i.requires_grad = False self.relu = nn.ReLU(inplace=True) @@ -130,7 +130,7 @@ def __init__(self,dilation_series,padding_series,NoLabels): super(Classifier_Module, self).__init__() self.conv2d_list = nn.ModuleList() for dilation,padding in zip(dilation_series,padding_series): - self.conv2d_list.append(nn.Conv2d(2048,NoLabels,kernel_size=3,stride=1, padding =padding, dilation = dilation,bias = True)) + self.conv2d_list.append(nn.Conv2d(2048, NoLabels, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True)) for m in self.conv2d_list: m.weight.data.normal_(0, 0.01) @@ -155,9 +155,9 @@ def __init__(self, block, layers,NoLabels): self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation__ = 2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation__ = 4) - self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],NoLabels) + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation__=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation__=4) + self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24], [6,12,18,24], NoLabels) for m in self.modules(): if isinstance(m, nn.Conv2d): @@ -169,21 +169,21 @@ def __init__(self, block, layers,NoLabels): # for i in m.parameters(): # i.requires_grad = False - def _make_layer(self, block, planes, blocks, stride=1,dilation__ = 1): + def _make_layer(self, block, planes, blocks, stride=1,dilation__=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion or dilation__ == 2 or dilation__ == 4: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(planes * block.expansion,affine = affine_par), + nn.BatchNorm2d(planes * block.expansion, affine=affine_par), ) for i in downsample._modules['1'].parameters(): i.requires_grad = False layers = [] - layers.append(block(self.inplanes, planes, stride,dilation_=dilation__, downsample = downsample )) + layers.append(block(self.inplanes, planes, stride,dilation_=dilation__, downsample=downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): - layers.append(block(self.inplanes, planes,dilation_=dilation__)) + layers.append(block(self.inplanes, planes, dilation_=dilation__)) return nn.Sequential(*layers) @@ -213,16 +213,16 @@ def forward(self, x): class MS_Deeplab(nn.Module): def __init__(self,block,NoLabels): super(MS_Deeplab,self).__init__() - self.Scale = ResNet(block,[3, 4, 23, 3],NoLabels) #changed to fix #4 + self.Scale = ResNet(block, [3, 4, 23, 3], NoLabels) #changed to fix #4 def forward(self,x): input_size_1 = x.size()[2] input_size_2 = x.size()[3] #print(x.size()) - self.interp1 = nn.UpsamplingBilinear2d(size = (int(input_size_1*0.75)+1, int(input_size_2*0.75)+1)) - self.interp2 = nn.UpsamplingBilinear2d(size = (int(input_size_1*0.5)+1, int(input_size_2*0.5)+1)) - self.interp3 = nn.UpsamplingBilinear2d(size = (outS(input_size_1), outS(input_size_2))) + self.interp1 = nn.UpsamplingBilinear2d(size=(int(input_size_1*0.75)+1, int(input_size_2*0.75)+1)) + self.interp2 = nn.UpsamplingBilinear2d(size=(int(input_size_1*0.5)+1, int(input_size_2*0.5)+1)) + self.interp3 = nn.UpsamplingBilinear2d(size=(outS(input_size_1), outS(input_size_2))) out = [] x2 = self.interp1(x) @@ -244,7 +244,7 @@ def forward(self,x): class Deeplab(nn.Module): def __init__(self,block,NoLabels): super(Deeplab,self).__init__() - self.Scale = ResNet(block,[3, 4, 23, 3],NoLabels) #changed to fix #4 + self.Scale = ResNet(block, [3, 4, 23, 3], NoLabels) #changed to fix #4 def forward(self,x): return self.Scale(x)