Maximum Classifier Discrepancy f

2019-04-03  本文已影响0人  蜉蝣之翼

代码地址

解析 语义分割域适应的代码
训练模型 python adapt_trainer.py gta city --net drn_d_105
adapt_trainer.py文件阅读

rom models.dilated_fcn import DRNSegBase, DRNSegPixelClassifier
                model_g = DRNSegBase(model_name=net_name, n_class=n_class, input_ch=input_ch)
                model_f1 = DRNSegPixelClassifier(n_class=n_class)
model_f2 = DRNSegPixelClassifier(n_class=n_class)
 class DRNSegBase(nn.Module):
    def __init__(self, model_name, n_class, pretrained_model=None, pretrained=True, input_ch=3):
        super(DRNSegBase, self).__init__()

        model = drn.__dict__.get(model_name)(
            pretrained=pretrained, num_classes=1000, input_ch=input_ch)
        pmodel = nn.DataParallel(model)
        if pretrained_model is not None:
            pmodel.load_state_dict(pretrained_model)
        self.base = nn.Sequential(*list(model.children())[:-2])

        self.seg = nn.Conv2d(model.out_dim, n_class,
                             kernel_size=1, bias=True)
        m = self.seg
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
        m.bias.data.zero_()

    def forward(self, x):
        x = self.base(x)
        x = self.seg(x)
        return x

    def optim_parameters(self, memo=None):
        for param in self.base.parameters():
            yield param
        for param in self.seg.parameters():
            yield param

网络架构

class DRN(nn.Module):
    def __init__(self, block, layers, num_classes=1000,
                 channels=(16, 32, 64, 128, 256, 512, 512, 512),
                 out_map=False, out_middle=False, pool_size=28, arch='D'):
        super(DRN, self).__init__()
        self.inplanes = channels[0]
        self.out_map = out_map
        self.out_dim = channels[-1]
        self.out_middle = out_middle
        self.arch = arch

      
        self.layer0 = nn.Sequential(
                nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,
                          bias=False),
                nn.BatchNorm2d(channels[0]),
                nn.ReLU(inplace=True)
        )

        self.layer1 = self._make_conv_layers(
                channels[0], layers[0], stride=1)
        self.layer2 = self._make_conv_layers(
                channels[1], layers[1], stride=2)

        self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
        self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
        self.layer5 = self._make_layer(block, channels[4], layers[4], dilation=2,
                                       new_level=False)
        self.layer6 = None if layers[5] == 0 else \
            self._make_layer(block, channels[5], layers[5], dilation=4,
                             new_level=False)
       
        self.layer7 = None if layers[6] == 0 else \
                self._make_conv_layers(channels[6], layers[6], dilation=2)
        self.layer8 = None if layers[7] == 0 else \
                self._make_conv_layers(channels[7], layers[7], dilation=1)

        
        self.avgpool = nn.AvgPool2d(pool_size)
        self.fc = nn.Conv2d(self.out_dim, num_classes, kernel_size=1,
                                stride=1, padding=0, bias=True)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)

测试

>>> import drn
>>> print(drn.__dict__)
>>> print(drn.__dict__).get('drn_d_105')
<function drn_d_105 at 0x7f2a25fe9aa0>
>>> model=drn.__dict__.get('drn_d_105')(pretrained=True,num_classes=1000,input_ch=3)
Downloading: "https://tigress-web.princeton.edu/~fy/drn/models/drn_d_105-12b40979.pth" to /home/名字/.torch/models/drn_d_105-12b40979.pth
100.0%
上一篇下一篇

猜你喜欢

热点阅读