Python torch.nn 模块,Sequential() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.Sequential()

项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self):
        super(mnist_model, self).__init__()
        self.feats = nn.Sequential(
            nn.Conv2d(1, 32, 5, 1, 1),
            nn.MaxPool2d(2, 2),
            nn.ReLU(True),
            nn.BatchNorm2d(32),

            nn.Conv2d(32, 64, 3,  1, 1),
            nn.ReLU(True),
            nn.BatchNorm2d(64),

            nn.Conv2d(64, 64, 3,  1, 1),
            nn.MaxPool2d(2, 2),
            nn.ReLU(True),
            nn.BatchNorm2d(64),

            nn.Conv2d(64, 128, 3, 1, 1),
            nn.ReLU(True),
            nn.BatchNorm2d(128)
        )

        self.classifier = nn.Conv2d(128, 10, 1)
        self.avgpool = nn.AvgPool2d(6, 6)
        self.dropout = nn.Dropout(0.5)
项目:crowdcount-mcnn    作者:svishwa    | 项目源码 | 文件源码
def __init__(self, bn=False):
        super(MCNN, self).__init__()

        self.branch1 = nn.Sequential(Conv2d( 1, 16, 9, same_padding=True, bn=bn),
                                     nn.MaxPool2d(2),
                                     Conv2d(16, 32, 7, same_padding=True, bn=bn),
                                     nn.MaxPool2d(2),
                                     Conv2d(32, 16, 7, same_padding=True, bn=bn),
                                     Conv2d(16,  8, 7, same_padding=True, bn=bn))

        self.branch2 = nn.Sequential(Conv2d( 1, 20, 7, same_padding=True, bn=bn),
                                     nn.MaxPool2d(2),
                                     Conv2d(20, 40, 5, same_padding=True, bn=bn),
                                     nn.MaxPool2d(2),
                                     Conv2d(40, 20, 5, same_padding=True, bn=bn),
                                     Conv2d(20, 10, 5, same_padding=True, bn=bn))

        self.branch3 = nn.Sequential(Conv2d( 1, 24, 5, same_padding=True, bn=bn),
                                     nn.MaxPool2d(2),
                                     Conv2d(24, 48, 3, same_padding=True, bn=bn),
                                     nn.MaxPool2d(2),
                                     Conv2d(48, 24, 3, same_padding=True, bn=bn),
                                     Conv2d(24, 12, 3, same_padding=True, bn=bn))

        self.fuse = nn.Sequential(Conv2d( 30, 1, 1, same_padding=True, bn=bn))
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
        super(Block, self).__init__()
        group_width = cardinality * bottleneck_width
        self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(group_width)
        self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
        self.bn2 = nn.BatchNorm2d(group_width)
        self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion*group_width)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*group_width:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*group_width)
            )
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, in_channels, n_filters, k_size,  stride, padding, bias=True):
        super(conv2DBatchNorm, self).__init__()

        self.cb_unit = nn.Sequential(nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
                                               padding=padding, stride=stride, bias=bias),
                                 nn.BatchNorm2d(int(n_filters)),)
项目:DeepLearning_PlantDiseases    作者:MarkoArsenovic    | 项目源码 | 文件源码
def _augment_module_post(net: nn.Module, callback_dict: dict) -> (dict, list):
    backward_hook_remove_func_list = []

    vis_param_dict = dict()
    vis_param_dict['layer'] = None
    vis_param_dict['index'] = None
    vis_param_dict['method'] = GradType.NAIVE

    for x, y in net.named_modules():
        if not isinstance(y, nn.Sequential) and y is not net:
            # I should add hook to all layers, in case they will be needed.
            backward_hook_remove_func_list.append(
                y.register_backward_hook(
                    partial(_backward_hook, module_name=x, callback_dict=callback_dict, vis_param_dict=vis_param_dict)))

    def remove_handles():
        for x in backward_hook_remove_func_list:
            x.remove()

    return vis_param_dict, remove_handles
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def conv_bn(in_planes, out_planes, kernel_size, stride=1, padding=0, bias=False):
    "convolution with batchnorm, relu"
    return nn.Sequential(
        nn.Conv2d(in_planes, out_planes, kernel_size, stride=stride,
                  padding=padding, bias=False),
        nn.BatchNorm2d(out_planes, eps=1e-3),
        nn.ReLU()
    )
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def _make_layer(self, block, planes, blocks, stride=1,
                    batch_norm=True):
        downsample = None
        if self.shortcut == 'C' or \
                self.shortcut == 'B' and \
                (stride != 1 or self.inplanes != planes * block.expansion):
            downsample = [nn.Conv2d(self.inplanes, planes * block.expansion,
                                    kernel_size=1, stride=stride, bias=not batch_norm)]
            if batch_norm:
                downsample.append(nn.BatchNorm2d(planes * block.expansion))
            downsample = nn.Sequential(*downsample)
        else:
            downsample = PlainDownSample(
                self.inplanes, planes * block.expansion, stride)

        layers = []
        layers.append(block(self.inplanes, planes,
                            stride, downsample, batch_norm))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, batch_norm=batch_norm))

        return nn.Sequential(*layers)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32):
        """ Constructor
        Args:
            in_channels: input channel dimensionality
            out_channels: output channel dimensionality
            stride: conv stride. Replaces pooling layer.
            cardinality: num of convolution groups.
        """
        super(DResNeXtBottleneck, self).__init__()
        D = out_channels // 2
        self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
        self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
        self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
        self.shortcut = nn.Sequential()
        if in_channels != out_channels:
            self.shortcut.add_module('shortcut_conv',
                                     nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0,
                                               bias=False))
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def def_netF():
    vgg19 = M.vgg19()
    vgg19.load_state_dict(torch.load('vgg19.pth'))
    vgg19.classifier = nn.Sequential(
        *list(vgg19.classifier.children())[:2]
    )
    for param in vgg19.parameters():
        param.requires_grad = False
    return vgg19
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def _make_layer(self, block, planes, blocks, stride=1):
    downsample = None
    if stride != 1 or self.inplanes != planes * block.expansion:
      downsample = nn.Sequential(
        nn.Conv2d(self.inplanes, planes * block.expansion,
              kernel_size=1, stride=stride, bias=False),
        nn.BatchNorm2d(planes * block.expansion),
      )

    layers = []
    layers.append(block(self.inplanes, planes, self.cardinality, self.base_width, stride, downsample))
    self.inplanes = planes * block.expansion
    for i in range(1, blocks):
      layers.append(block(self.inplanes, planes, self.cardinality, self.base_width))

    return nn.Sequential(*layers)
项目:CycleGAN-Tensorflow-PyTorch-Simple    作者:LynnHo    | 项目源码 | 文件源码
def __init__(self, dim=64):
        super(Generator, self).__init__()

        conv_bn_relu = conv_norm_act
        dconv_bn_relu = dconv_norm_act

        self.ls = nn.Sequential(nn.ReflectionPad2d(3),
                                conv_bn_relu(3, dim * 1, 7, 1),
                                conv_bn_relu(dim * 1, dim * 2, 3, 2, 1),
                                conv_bn_relu(dim * 2, dim * 4, 3, 2, 1),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                dconv_bn_relu(dim * 4, dim * 2, 3, 2, 1, 1),
                                dconv_bn_relu(dim * 2, dim * 1, 3, 2, 1, 1),
                                nn.ReflectionPad2d(3),
                                nn.Conv2d(dim, 3, 7, 1),
                                nn.Tanh())
项目:speed    作者:keon    | 项目源码 | 文件源码
def __init__(self, n_layers=2, h_size=512):
        super(ResLSTM, self).__init__()
        print('Building AlexNet + LSTM model...')
        self.h_size = h_size
        self.n_layers = n_layers

        resnet = models.resnet50(pretrained=True)
        self.conv = nn.Sequential(*list(resnet.children())[:-1])

        self.lstm = nn.LSTM(1280, h_size, dropout=0.2, num_layers=n_layers)
        self.fc = nn.Sequential(
            nn.Linear(h_size, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 1)
        )
项目:speed    作者:keon    | 项目源码 | 文件源码
def __init__(self, h_size=512, n_layers=3):
        super(DenseLSTM, self).__init__()
        print('Building DenseNet + LSTM model...')
        self.h_size = h_size
        self.n_layers = n_layers

        densenet = models.densenet201(pretrained=True)
        self.conv = nn.Sequential(*list(densenet.children())[:-1])

        self.lstm = nn.LSTM(23040, h_size, dropout=0.2, num_layers=n_layers)
        self.fc = nn.Sequential(
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(256, 1)
        )
项目:speed    作者:keon    | 项目源码 | 文件源码
def __init__(self, n_layers=2, h_size=420):
        super(AlexLSTM, self).__init__()
        print('Building AlexNet + LSTM model...')
        self.h_size = h_size
        self.n_layers = n_layers

        alexnet = models.alexnet(pretrained=True)
        self.conv = nn.Sequential(*list(alexnet.children())[:-1])

        self.lstm = nn.LSTM(1280, h_size, dropout=0.2, num_layers=n_layers)
        self.fc = nn.Sequential(
            nn.Linear(h_size, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 1)
        )
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
        conv_block = []
        p = 0
        # TODO: support padding types
        assert(padding_type == 'zero')
        p = 1

        # TODO: InstanceNorm
        conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
                       norm_layer(dim, affine=True),
                       nn.ReLU(True)]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]
        conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
                       norm_layer(dim, affine=True)]

        return nn.Sequential(*conv_block)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_parameters(self):
        def num_params(module):
            return len(list(module.parameters()))
        class Net(nn.Container):
            def __init__(self):
                super(Net, self).__init__(
                    l1=l,
                    l2=l
                )
                self.param = Parameter(torch.Tensor(3, 5))
        l = nn.Linear(10, 20)
        n = Net()
        s = nn.Sequential(n, n, n, n)
        self.assertEqual(num_params(l), 2)
        self.assertEqual(num_params(n), 3)
        self.assertEqual(num_params(s), 3)
项目:DenseNet    作者:kevinzakka    | 项目源码 | 文件源码
def __init__(self, num_layers, in_channels, growth_rate, bottleneck, p):
        """
        Initialize the different parts of the DenseBlock.

        Params
        ------
        - num_layers: the number of layers L in the dense block.
        - in_channels: the number of input channels feeding into the first 
          subblock.
        - growth_rate: the number of output feature maps produced by each subblock.
          This number is common across all subblocks.
        """
        super(DenseBlock, self).__init__()

        # create L subblocks
        layers = []
        for i in range(num_layers):
            cumul_channels = in_channels + i * growth_rate
            layers.append(SubBlock(cumul_channels, growth_rate, bottleneck, p))

        self.block = nn.Sequential(*layers)
        self.out_channels = cumul_channels + growth_rate
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet50(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet34(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet34(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet50(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet101(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return self.pretrained_model(x)

    return MyModel(torchvision.models.resnet18(pretrained=True))
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def __init__(self, opt):
        super(resnet_mil, self).__init__()
        import model.resnet as resnet
        resnet = resnet.resnet101()
        resnet.load_state_dict(torch.load('/media/jxgu/d2tb/model/resnet/resnet101.pth'))
        self.conv = torch.nn.Sequential()
        self.conv.add_module("conv1", resnet.conv1)
        self.conv.add_module("bn1", resnet.bn1)
        self.conv.add_module("relu", resnet.relu)
        self.conv.add_module("maxpool", resnet.maxpool)
        self.conv.add_module("layer1", resnet.layer1)
        self.conv.add_module("layer2", resnet.layer2)
        self.conv.add_module("layer3", resnet.layer3)
        self.conv.add_module("layer4", resnet.layer4)
        self.l1 = nn.Sequential(nn.Linear(2048, 1000),
                                nn.ReLU(True),
                                nn.Dropout(0.5))
        self.att_size = 7
        self.pool_mil = nn.MaxPool2d(kernel_size=self.att_size, stride=0)
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def __init__(self,out_size,gpu_id,num_seg):
        super(VC_inception_v4,self).__init__()
        sys.path.insert(0,'../tool/models_zoo/')
        from inceptionv4.pytorch_load import inceptionv4
        self.inception_v4=inceptionv4(pretrained=True).cuda()
        mod=[nn.Dropout(p=0.8)]#.cuda(self.gpu_id)]
        mod.append(nn.Linear(1536,101))#.cuda(self.gpu_id))
        new_fc=nn.Sequential(*mod)#.cuda(self.gpu_id)
        self.inception_v4.classif=new_fc
        self.num_seg=num_seg
        #self.resnet101.fc=nn.Linear(2048,101).cuda(gpu_id)

        self.avg_pool2d=nn.AvgPool2d(kernel_size=(3,1))#.cuda(self.gpu_id)
        # for params in self.inception_v4.parameters():
        #     params.requires_grad=False
        # for params in self.inception_v4.features[21].parameters():
        #     params.requires_grad=True
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def _make_test_model():
        import torch.nn as nn
        from inferno.extensions.layers.reshape import AsMatrix

        toy_net = nn.Sequential(nn.Conv2d(3, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 256, 3, 1, 1),
                                nn.ELU(),
                                nn.AdaptiveAvgPool2d((1, 1)),
                                AsMatrix(),
                                nn.Linear(256, 10),
                                nn.Softmax())
        return toy_net
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def setUp(self):
        # Build a simple ass model
        model = Sequential(Conv2D(3, 32, 3, 'ReLU'),
                           MaxPool2d(2, 2),
                           Conv2D(32, 32, 3, 'ReLU'),
                           MaxPool2d(2, 2),
                           Conv2D(32, 32, 3, 'ReLU'),
                           MaxPool2d(2, 2),
                           Conv2D(32, 32, 3, 'ReLU'),
                           AdaptiveAvgPool2d((1, 1)),
                           AsMatrix(),
                           Linear(32, 10),
                           Softmax())
        train_dataloader = generate_random_dataloader(512, (3, 32, 32), 10, batch_size=16)
        validate_dataloader = generate_random_dataloader(32, (3, 32, 32), 10, batch_size=16)
        # Build trainer
        trainer = Trainer(model)\
            .bind_loader('train', train_dataloader)\
            .bind_loader('validate', validate_dataloader)\
            .save_to_directory(to_directory=join(self.WORKING_DIRECTORY, 'Weights'))\
            .build_criterion('CrossEntropyLoss').build_optimizer('RMSprop')
        self.trainer = trainer
项目:PyTorchDemystified    作者:hhsecond    | 项目源码 | 文件源码
def __init__(self, config):
        super(SNLIClassifier, self).__init__()
        self.config = config
        self.embed = nn.Embedding(config.n_embed, config.d_embed)
        self.projection = Linear(config.d_embed, config.d_proj)
        self.embed_bn = BatchNorm(config.d_proj)
        self.embed_dropout = nn.Dropout(p=config.embed_dropout)
        self.encoder = SPINN(config) if config.spinn else Encoder(config)
        feat_in_size = config.d_hidden * (
            2 if self.config.birnn and not self.config.spinn else 1)
        self.feature = Feature(feat_in_size, config.mlp_dropout)
        self.mlp_dropout = nn.Dropout(p=config.mlp_dropout)
        self.relu = nn.ReLU()
        mlp_in_size = 4 * feat_in_size
        mlp = [nn.Linear(mlp_in_size, config.d_mlp), self.relu,
               nn.BatchNorm1d(config.d_mlp), self.mlp_dropout]
        for i in range(config.n_mlp_layers - 1):
            mlp.extend([nn.Linear(config.d_mlp, config.d_mlp), self.relu,
                        nn.BatchNorm1d(config.d_mlp), self.mlp_dropout])
        mlp.append(nn.Linear(config.d_mlp, config.d_out))
        self.out = nn.Sequential(*mlp)
项目:Tacotron_pytorch    作者:root20    | 项目源码 | 文件源码
def __init__(self, hidden_size, output_size, r_factor=2, dropout_p=0.5):
        super(AttnDecoderRNN, self).__init__()
        self.r_factor = r_factor

        self.prenet = nn.Sequential(
            nn.Linear(output_size, 2 * hidden_size),
            nn.ReLU(),
            nn.Dropout(dropout_p),
            nn.Linear(2 * hidden_size, hidden_size),
            nn.ReLU(),
            nn.Dropout(dropout_p)
        )
        self.linear_dec = nn.Linear(2 * hidden_size, 2 * hidden_size)
        self.gru_att = nn.GRU(hidden_size, 2 * hidden_size, batch_first=True)

        self.attn = nn.Linear(2 * hidden_size, 1)       # TODO: change name...

        self.short_cut = nn.Linear(4 * hidden_size, 2 * hidden_size)
        self.gru_dec1 = nn.GRU(4 * hidden_size, 2 * hidden_size, num_layers=1, batch_first=True)
        self.gru_dec2 = nn.GRU(2 * hidden_size, 2 * hidden_size, num_layers=1, batch_first=True)

        self.out = nn.Linear(2 * hidden_size, r_factor * output_size)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def __init__(self, in_planes, out_planes, pool_method, stride):
        super(Block, self).__init__()
        self.branches = nn.ModuleList([
            nn.Sequential(
                _make_conv(in_planes, out_planes, kernel_size=1, padding=0),
                _make_conv(out_planes, out_planes, stride=stride)
            ),
            nn.Sequential(
                _make_conv(in_planes, out_planes, kernel_size=1, padding=0),
                _make_conv(out_planes, out_planes),
                _make_conv(out_planes, out_planes, stride=stride))
        ])

        if pool_method == 'Avg':
            assert stride == 1
            self.branches.append(
                _make_conv(in_planes, out_planes, kernel_size=1, padding=0))
            self.branches.append(nn.Sequential(
                nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
                _make_conv(in_planes, out_planes, kernel_size=1, padding=0)))
        else:
            self.branches.append(
                nn.MaxPool2d(kernel_size=3, stride=stride, padding=1))
项目:pytorch-tutorials    作者:tfygg    | 项目源码 | 文件源码
def __init__(self):
        super(LeNetSeq, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(3, 6, 5),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Conv2d(6, 16, 5),
            nn.ReLU(),
            nn.MaxPool2d(2),
        )

        self.fc = nn.Sequential(
            nn.Linear(16*5*5, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10)
        )
项目:faster_rcnn_pytorch    作者:longcw    | 项目源码 | 文件源码
def __init__(self, bn=False):
        super(VGG16, self).__init__()

        self.conv1 = nn.Sequential(Conv2d(3, 64, 3, same_padding=True, bn=bn),
                                   Conv2d(64, 64, 3, same_padding=True, bn=bn),
                                   nn.MaxPool2d(2))
        self.conv2 = nn.Sequential(Conv2d(64, 128, 3, same_padding=True, bn=bn),
                                   Conv2d(128, 128, 3, same_padding=True, bn=bn),
                                   nn.MaxPool2d(2))
        network.set_trainable(self.conv1, requires_grad=False)
        network.set_trainable(self.conv2, requires_grad=False)

        self.conv3 = nn.Sequential(Conv2d(128, 256, 3, same_padding=True, bn=bn),
                                   Conv2d(256, 256, 3, same_padding=True, bn=bn),
                                   Conv2d(256, 256, 3, same_padding=True, bn=bn),
                                   nn.MaxPool2d(2))
        self.conv4 = nn.Sequential(Conv2d(256, 512, 3, same_padding=True, bn=bn),
                                   Conv2d(512, 512, 3, same_padding=True, bn=bn),
                                   Conv2d(512, 512, 3, same_padding=True, bn=bn),
                                   nn.MaxPool2d(2))
        self.conv5 = nn.Sequential(Conv2d(512, 512, 3, same_padding=True, bn=bn),
                                   Conv2d(512, 512, 3, same_padding=True, bn=bn),
                                   Conv2d(512, 512, 3, same_padding=True, bn=bn))
项目:pytorch-arda    作者:corenel    | 项目源码 | 文件源码
def __init__(self):
        """Init LeNet encoder."""
        super(Generator, self).__init__()

        self.restored = False

        self.encoder = nn.Sequential(
            # 1st conv block
            # input [1 x 28 x 28]
            # output [64 x 12 x 12]
            nn.Conv2d(1, 64, 5, 1, 0, bias=False),
            nn.MaxPool2d(2),
            nn.ReLU(),
            # 2nd conv block
            # input [64 x 12 x 12]
            # output [50 x 4 x 4]
            nn.Conv2d(64, 50, 5, 1, 0, bias=False),
            nn.Dropout2d(),
            nn.MaxPool2d(2),
            nn.ReLU()
        )
        self.fc1 = nn.Linear(50 * 4 * 4, 500)
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def convLayer(opt, layer_pos, nInput, nOutput, k ):
    "3x3 convolution with padding"
    #if 'BN_momentum' in opt.keys():
    #    batchNorm = nn.BatchNorm2d(nOutput,momentum=opt['BN_momentum'])
    #else:
    #    batchNorm = nn.BatchNorm2d(nOutput)

    seq = nn.Sequential(
        nn.Conv2d(nInput, nOutput, kernel_size=k,
                  stride=1, padding=1, bias=True),
        #batchNorm,
        opt['bnorm2d'][layer_pos],
        nn.ReLU(True),
        nn.MaxPool2d(kernel_size=2, stride=2)
    )
    if opt['useDropout']: # Add dropout module
        list_seq = list(seq.modules())[1:]
        list_seq.append(nn.Dropout(0.1))
        seq = nn.Sequential(*list_seq)
    return seq
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size())
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def _make_layer(self, num_blocks, stride):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for stride in strides:
            layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
            self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
        # Increase bottleneck_width by 2 after each stage.
        self.bottleneck_width *= 2
        return nn.Sequential(*layers)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
        super(pspnet, self).__init__()
        self.is_deconv = is_deconv
        self.in_channels = in_channels
        self.is_batchnorm = is_batchnorm
        self.feature_scale = feature_scale
        self.layers = [2, 2, 2, 2] # Currently hardcoded for ResNet-18

        filters = [64, 128, 256, 512]
        filters = [x / self.feature_scale for x in filters]

        self.inplanes = filters[0]


        # Encoder
        self.convbnrelu1 = conv2DBatchNormRelu(in_channels=3, k_size=7, n_filters=64,
                                               padding=3, stride=2, bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        block = residualBlock
        self.encoder1 = self._make_layer(block, filters[0], self.layers[0])
        self.encoder2 = self._make_layer(block, filters[1], self.layers[1], stride=2)
        self.encoder3 = self._make_layer(block, filters[2], self.layers[2], stride=2)
        self.encoder4 = self._make_layer(block, filters[3], self.layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)


        # Decoder
        self.decoder4 = linknetUp(filters[3], filters[2])
        self.decoder4 = linknetUp(filters[2], filters[1])
        self.decoder4 = linknetUp(filters[1], filters[0])
        self.decoder4 = linknetUp(filters[0], filters[0])

        # Final Classifier
        self.finaldeconvbnrelu1 = nn.Sequential(nn.ConvTranspose2d(filters[0], 32/feature_scale, 3, 2, 1),
                                      nn.BatchNorm2d(32/feature_scale),
                                      nn.ReLU(inplace=True),)
        self.finalconvbnrelu2 = conv2DBatchNormRelu(in_channels=32/feature_scale, k_size=3, n_filters=32/feature_scale, padding=1, stride=1)
        self.finalconv3 = nn.Conv2d(32/feature_scale, n_classes, 2, 2, 0)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
                                                 kernel_size=1, stride=stride, bias=False),
                                       nn.BatchNorm2d(planes * block.expansion),)
        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))
        return nn.Sequential(*layers)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
        super(linknet, self).__init__()
        self.is_deconv = is_deconv
        self.in_channels = in_channels
        self.is_batchnorm = is_batchnorm
        self.feature_scale = feature_scale
        self.layers = [2, 2, 2, 2] # Currently hardcoded for ResNet-18

        filters = [64, 128, 256, 512]
        filters = [x / self.feature_scale for x in filters]

        self.inplanes = filters[0]


        # Encoder
        self.convbnrelu1 = conv2DBatchNormRelu(in_channels=3, k_size=7, n_filters=64,
                                               padding=3, stride=2, bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        block = residualBlock
        self.encoder1 = self._make_layer(block, filters[0], self.layers[0])
        self.encoder2 = self._make_layer(block, filters[1], self.layers[1], stride=2)
        self.encoder3 = self._make_layer(block, filters[2], self.layers[2], stride=2)
        self.encoder4 = self._make_layer(block, filters[3], self.layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)


        # Decoder
        self.decoder4 = linknetUp(filters[3], filters[2])
        self.decoder4 = linknetUp(filters[2], filters[1])
        self.decoder4 = linknetUp(filters[1], filters[0])
        self.decoder4 = linknetUp(filters[0], filters[0])

        # Final Classifier
        self.finaldeconvbnrelu1 = nn.Sequential(nn.ConvTranspose2d(filters[0], 32/feature_scale, 3, 2, 1),
                                      nn.BatchNorm2d(32/feature_scale),
                                      nn.ReLU(inplace=True),)
        self.finalconvbnrelu2 = conv2DBatchNormRelu(in_channels=32/feature_scale, k_size=3, n_filters=32/feature_scale, padding=1, stride=1)
        self.finalconv3 = nn.Conv2d(32/feature_scale, n_classes, 2, 2, 0)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
                                                 kernel_size=1, stride=stride, bias=False),
                                       nn.BatchNorm2d(planes * block.expansion),)
        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))
        return nn.Sequential(*layers)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, in_channels, n_filters, k_size,  stride, padding, bias=True):
        super(conv2DBatchNormRelu, self).__init__()

        self.cbr_unit = nn.Sequential(nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
                                                padding=padding, stride=stride, bias=bias),
                                 nn.BatchNorm2d(int(n_filters)),
                                 nn.ReLU(inplace=True),)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
        super(deconv2DBatchNormRelu, self).__init__()

        self.dcbr_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size,
                                                padding=padding, stride=stride, bias=bias),
                                 nn.BatchNorm2d(int(n_filters)),
                                 nn.ReLU(inplace=True),)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, in_size, out_size, is_batchnorm):
        super(unetConv2, self).__init__()

        if is_batchnorm:
            self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 0),
                                       nn.BatchNorm2d(out_size),
                                       nn.ReLU(),)
            self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 0),
                                       nn.BatchNorm2d(out_size),
                                       nn.ReLU(),)
        else:
            self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 0),
                                       nn.ReLU(),)
            self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 0),
                                       nn.ReLU(),)
项目:DeepLearning_PlantDiseases    作者:MarkoArsenovic    | 项目源码 | 文件源码
def _augment_module_pre(net: nn.Module) -> (dict, list):
    callback_dict = OrderedDict()  # not necessarily ordered, but this can help some readability.

    forward_hook_remove_func_list = []

    for x, y in net.named_modules():
        if not isinstance(y, nn.Sequential) and y is not net:
            if isinstance(y, nn.ReLU):
                callback_dict[x] = {}
                forward_hook_remove_func_list.append(
                    y.register_forward_hook(partial(_forward_hook, module_name=x, callback_dict=callback_dict)))

    def remove_handles():
        for x in forward_hook_remove_func_list:
            x.remove()

    return callback_dict, remove_handles
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def __init__(self, num_layers, in_channels = 3, out_channels = 8, batch_norm = True):
        super(ConvEncoder2D, self).__init__()

        # set up number of layers
        if isinstance(num_layers, int):
            num_layers = [num_layers, 0]

        network = []

        # several 3x3 convolutional layers and max-pooling layers
        for k in range(num_layers[0]):
            # 2d convolution
            network.append(nn.Conv2d(in_channels, out_channels, 3, padding = 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm2d(out_channels))

            # non-linearity and max-pooling
            network.append(nn.LeakyReLU(0.2, True))
            network.append(nn.MaxPool2d(2))

            # double channel size
            in_channels = out_channels
            out_channels *= 2

        # several 1x1 convolutional layers
        for k in range(num_layers[1]):
            # 2d convolution
            network.append(nn.Conv2d(in_channels, in_channels, 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm2d(in_channels))

            # non-linearity
            network.append(nn.LeakyReLU(0.2, True))

        # set up modules for network
        self.network = nn.Sequential(*network)
        self.network.apply(weights_init)
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def __init__(self, num_layers, in_channels = 3, out_channels = 8, batch_norm = True):
        super(ConvEncoder3D, self).__init__()

        # set up number of layers
        if isinstance(num_layers, int):
            num_layers = [num_layers, 0]

        network = []

        # several 3x3 convolutional layers and max-pooling layers
        for k in range(num_layers[0]):
            # 3d convolution
            network.append(nn.Conv3d(in_channels, out_channels, 3, padding = 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm3d(out_channels))

            # non-linearity and max-pooling
            network.append(nn.LeakyReLU(0.2, True))
            network.append(nn.MaxPool3d(2))

            # double channel size
            in_channels = out_channels
            out_channels *= 2

        # several 1x1 convolutional layers
        for k in range(num_layers[1]):
            # 3d convolution
            network.append(nn.Conv3d(in_channels, in_channels, 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm3d(in_channels))

            # non-linearity
            network.append(nn.LeakyReLU(0.2, True))

        # set up modules for network
        self.network = nn.Sequential(*network)
        self.network.apply(weights_init)
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self, in_planes, scale=1.0, activation=nn.ReLU(True)):
        super(block35, self).__init__(in_planes, scale, activation)
        self.Branch_0 = nn.Sequential(OrderedDict([
            ('Conv2d_1x1', conv_bn(in_planes, 32, 1))
        ]))
        self.Branch_1 = nn.Sequential(OrderedDict([
            ('Conv2d_0a_1x1', conv_bn(in_planes, 32, 1)),
            ('Conv2d_0b_3x3', conv_bn(32, 32, 3, padding=1))
        ]))
        self.Branch_2 = nn.Sequential(OrderedDict([
            ('Conv2d_0a_1x1', conv_bn(in_planes, 32, 1)),
            ('Conv2d_0b_3x3', conv_bn(32, 48, 3, padding=1)),
            ('Conv2d_0c_3x3', conv_bn(48, 64, 3, padding=1))
        ]))
        self.Conv2d_1x1 = conv_bn(128, in_planes, 1)
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self, in_planes, scale=1.0, activation=nn.ReLU(True)):
        super(block17, self).__init__(in_planes, scale, activation)

        self.Branch_0 = nn.Sequential(OrderedDict([
            ('Conv2d_1x1', conv_bn(in_planes, 192, 1))
        ]))
        self.Branch_1 = nn.Sequential(OrderedDict([
            ('Conv2d_0a_1x1', conv_bn(in_planes, 128, 1)),
            ('Conv2d_0b_1x7', conv_bn(128, 160, (1, 7), padding=(0, 3))),
            ('Conv2d_0c_7x1', conv_bn(160, 192, (7, 1), padding=(3, 0)))
        ]))
        self.Conv2d_1x1 = conv_bn(384, in_planes, 1)