Python torch.nn 模块,BatchNorm1d() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.BatchNorm1d()

项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def __init__(self, dim_in, dim_z, config='pendulum'):
        super(AE, self).__init__()
        _, _, dec = load_config(config)

        # TODO, refactor encoder to allow output of dim_z instead of dim_z * 2
        self.encoder = nn.Sequential(
            nn.Linear(dim_in, 800),
            nn.BatchNorm1d(800),
            nn.ReLU(),
            nn.Linear(800, 800),
            nn.BatchNorm1d(800),
            nn.ReLU(),
            nn.Linear(800, dim_z),
            nn.BatchNorm1d(dim_z),
            nn.Sigmoid()
        )

        self.decoder = dec(dim_z, dim_in)
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):
        super(STN3d, self).__init__()
        self.num_points = num_points
        self.conv1 = nn.Conv1d(3, 64, 1)
        self.conv2 = nn.Conv1d(64, 128, 1)
        self.conv3 = nn.Conv1d(128, 1024, 1)
        self.mp1 = nn.MaxPool1d(num_points)
        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 9)
        self.relu = nn.ReLU()

        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.bn4 = nn.BatchNorm1d(512)
        self.bn5 = nn.BatchNorm1d(256)
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def __init__(self, num_classes, pretrained=False,
              bn_after_act=False, bn_before_act=False):
    super(Vgg19, self).__init__()

    self.pretrained = pretrained
    self.bn_before_act = bn_before_act
    self.bn_after_act = bn_after_act

    model = models.vgg19(pretrained = pretrained)
    self.features = model.features


    self.fc17 = nn.Linear(512 * 7 * 7, 4096)
    self.bn17 = nn.BatchNorm1d(4096)
    self.fc18 = nn.Linear(4096, 4096)
    self.bn18 = nn.BatchNorm1d(4096)
    self.fc19 = nn.Linear(4096, num_classes)

    self._initialize_weights()
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __init__(self):
        super(GlobalFeatNet, self).__init__()
        self.conv1 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
        self.bn1 = nn.BatchNorm2d(512)
        self.conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(512)
        self.conv3 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
        self.bn3 = nn.BatchNorm2d(512)
        self.conv4 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(512)
        self.fc1 = nn.Linear(25088, 1024)
        self.bn5 = nn.BatchNorm1d(1024)
        self.fc2 = nn.Linear(1024, 512)
        self.bn6 = nn.BatchNorm1d(512)
        self.fc3 = nn.Linear(512, 256)
        self.bn7 = nn.BatchNorm1d(256)
项目:PyTorchDemystified    作者:hhsecond    | 项目源码 | 文件源码
def __init__(self, config):
        super(SNLIClassifier, self).__init__()
        self.config = config
        self.embed = nn.Embedding(config.n_embed, config.d_embed)
        self.projection = Linear(config.d_embed, config.d_proj)
        self.embed_bn = BatchNorm(config.d_proj)
        self.embed_dropout = nn.Dropout(p=config.embed_dropout)
        self.encoder = SPINN(config) if config.spinn else Encoder(config)
        feat_in_size = config.d_hidden * (
            2 if self.config.birnn and not self.config.spinn else 1)
        self.feature = Feature(feat_in_size, config.mlp_dropout)
        self.mlp_dropout = nn.Dropout(p=config.mlp_dropout)
        self.relu = nn.ReLU()
        mlp_in_size = 4 * feat_in_size
        mlp = [nn.Linear(mlp_in_size, config.d_mlp), self.relu,
               nn.BatchNorm1d(config.d_mlp), self.mlp_dropout]
        for i in range(config.n_mlp_layers - 1):
            mlp.extend([nn.Linear(config.d_mlp, config.d_mlp), self.relu,
                        nn.BatchNorm1d(config.d_mlp), self.mlp_dropout])
        mlp.append(nn.Linear(config.d_mlp, config.d_out))
        self.out = nn.Sequential(*mlp)
项目:MMdnn    作者:Microsoft    | 项目源码 | 文件源码
def _layer_BatchNorm(self):
        self.add_body(0, """
    @staticmethod
    def __batch_normalization(dim, name, **kwargs):
        if   dim == 1:  layer = nn.BatchNorm1d(**kwargs)
        elif dim == 2:  layer = nn.BatchNorm2d(**kwargs)
        elif dim == 3:  layer = nn.BatchNorm3d(**kwargs)
        else:           raise NotImplementedError()

        if 'scale' in __weights_dict[name]:
            layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
        else:
            layer.weight.data.fill_(1)

        if 'bias' in __weights_dict[name]:
            layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
        else:
            layer.bias.data.fill_(0)

        layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
        layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
        return layer""")
项目:seq_tagger    作者:OSU-slatelab    | 项目源码 | 文件源码
def setUttEncoder(module):  # set utterance encoder to the module
    if SharedModel.args.utt_enc_noise == True:
        module.uttEncNoise = Variable(torch.FloatTensor(), volatile=True)
        if SharedModel.args.no_cuda == False:
            module.uttEncNoise = module.uttEncNoise.cuda()

    if SharedModel.args.utt_enc_type >= 2:
        module.uttEncoder = nn.ModuleList()
        for i in [int(x) for x in SharedModel.args.conv_filters.split('_')]:
            module.uttEncoder.append( nn.Conv1d(2*SharedModel.args.hid_dim * (2 if SharedModel.args.attn == 2 else 1), SharedModel.args.conv_out_dim, i, 1, int(math.ceil((i-1)/2))) )

    if SharedModel.args.utt_enc_bn == True:
        uttEncOutSize = 2 * SharedModel.args.hid_dim
        if SharedModel.args.utt_enc_type >= 2:
            uttEncOutSize = 3 * SharedModel.args.conv_out_dim
        elif SharedModel.args.attn == 2:
            uttEncOutSize = 4 * SharedModel.args.hid_dim
        module.uttBn = nn.BatchNorm1d(uttEncOutSize)
项目:Tacotron_pytorch    作者:root20    | 项目源码 | 文件源码
def __init__(self, input_dim, conv_bank_dim, conv_dim1, conv_dim2, gru_dim, num_filters, is_masked):
        super(CBHG, self).__init__()
        self.num_filters = num_filters

        bank_out_dim = num_filters * conv_bank_dim
        self.conv_bank = nn.ModuleList()
        for i in range(num_filters):
            self.conv_bank.append(nn.Conv1d(input_dim, conv_bank_dim, i + 1, stride=1, padding=int(np.ceil(i / 2))))

        # define batch normalization layer, we use BN1D since the sequence length is not fixed
        self.bn_list = nn.ModuleList()
        self.bn_list.append(nn.BatchNorm1d(bank_out_dim))
        self.bn_list.append(nn.BatchNorm1d(conv_dim1))
        self.bn_list.append(nn.BatchNorm1d(conv_dim2))

        self.conv1 = nn.Conv1d(bank_out_dim, conv_dim1, 3, stride=1, padding=1)
        self.conv2 = nn.Conv1d(conv_dim1, conv_dim2, 3, stride=1, padding=1)

        if input_dim != conv_dim2:
            self.residual_proj = nn.Linear(input_dim, conv_dim2)

        self.highway = Highway(conv_dim2, 4)
        self.BGRU = nn.GRU(input_size=conv_dim2, hidden_size=gru_dim, num_layers=1, batch_first=True, bidirectional=True)
项目:clevr-iep    作者:facebookresearch    | 项目源码 | 文件源码
def build_mlp(input_dim, hidden_dims, output_dim,
              use_batchnorm=False, dropout=0):
  layers = []
  D = input_dim
  if dropout > 0:
    layers.append(nn.Dropout(p=dropout))
  if use_batchnorm:
    layers.append(nn.BatchNorm1d(input_dim))
  for dim in hidden_dims:
    layers.append(nn.Linear(D, dim))
    if use_batchnorm:
      layers.append(nn.BatchNorm1d(dim))
    if dropout > 0:
      layers.append(nn.Dropout(p=dropout))
    layers.append(nn.ReLU(inplace=True))
    D = dim
  layers.append(nn.Linear(D, output_dim))
  return nn.Sequential(*layers)
项目:e2e-model-learning    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, X, Y, hidden_layer_sizes):
        super(Net, self).__init__()

        # Initialize linear layer with least squares solution
        X_ = np.hstack([X, np.ones((X.shape[0],1))])
        Theta = np.linalg.solve(X_.T.dot(X_), X_.T.dot(Y))

        self.lin = nn.Linear(X.shape[1], Y.shape[1])
        W,b = self.lin.parameters()
        W.data = torch.Tensor(Theta[:-1,:].T)
        b.data = torch.Tensor(Theta[-1,:])

        # Set up non-linear network of 
        # Linear -> BatchNorm -> ReLU -> Dropout layers
        layer_sizes = [X.shape[1]] + hidden_layer_sizes
        layers = reduce(operator.add, 
            [[nn.Linear(a,b), nn.BatchNorm1d(b), nn.ReLU(), nn.Dropout(p=0.2)] 
                for a,b in zip(layer_sizes[0:-1], layer_sizes[1:])])
        layers += [nn.Linear(layer_sizes[-1], Y.shape[1])]
        self.net = nn.Sequential(*layers)
        self.sig = Parameter(torch.ones(1, Y.shape[1]).cuda())
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_batchnorm_eval(self):
        types = (torch.FloatTensor,)
        if TEST_CUDA:
            types += (torch.cuda.FloatTensor,)
        for tp in types:
            module = nn.BatchNorm1d(3).type(tp)
            module.eval()

            data = Variable(torch.rand(4, 3).type(tp), requires_grad=True)
            grad = torch.rand(4, 3).type(tp)

            # 1st pass
            res1 = module(data)
            res1.backward(grad)
            grad1 = data.grad.data.clone()

            # 2nd pass
            if data.grad is not None:
                data.grad.data.zero_()

            res2 = module(data)
            res2.backward(grad)
            grad2 = data.grad.data.clone()
            self.assertEqual(res1, res2)
            self.assertEqual(grad1, grad2)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,opt):
        super(RNN, self).__init__()
        if opt.type_=='word':pass
        self.lstm = nn.LSTM(input_size = opt.embedding_dim,\
                            hidden_size = opt.hidden_size,
                            num_layers = opt.num_layers,
                            bias = True,
                            batch_first = False,
                            # dropout = 0.5,
                            bidirectional = True
                            )

        self.fc = nn.Sequential(
            nn.Linear((opt.hidden_size*2*2),opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,opt):
        super(RCNN, self).__init__()
        kernel_size = 2 if opt.type_=='word' else 3
        self.conv = nn.Sequential(
                                nn.Conv1d(in_channels = opt.hidden_size*2 + opt.embedding_dim,
                                        out_channels = opt.title_dim*3,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim*3),
                                nn.ReLU(inplace=True),
                                nn.Conv1d(in_channels = opt.title_dim*3,
                                        out_channels = opt.title_dim*3,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim*3),
                                nn.ReLU(inplace=True),
                                # nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            ) 
        self.fc=nn.Linear((opt.title_dim*3*2),opt.num_classes)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(StackLayer2, self).__init__()
        self.model_name = 'StackLayer2'
        self.opt=opt
        #self.fc=nn.Sequential(
        #    nn.Linear(opt.model_num*opt.num_classes,opt.linear_hidden_size),
        #    nn.BatchNorm1d(opt.linear_hidden_size),
        #    nn.ReLU(inplace=True),
        #    nn.Linear(opt.linear_hidden_size,opt.num_classes)
        #)
        # self.weights = nn.Parameter(t.zeros(opt.num_classes,opt.model_num))
        self.weights=nn.Parameter(t.ones(opt.model_num)/opt.model_num)   
        #self.fc=nn.Linear(opt.model_num*opt.num_classes,opt.num_classes)
        #weights=np.zeros((opt.num_classes,opt.model_num*opt.num_classes),dtype=np.float32)
        #for i in range(opt.model_num):
        #    weights[range(1999),range(i*1999,i*1999+1999)]=0.125
        #self.fc.weight.data=t.from_numpy(weights)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(CNNTextInception, self).__init__()
        incept_dim=opt.inception_dim
        self.model_name = 'CNNTextInception'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.title_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),#(batch_size,64,opt.title_seq_len)->(batch_size,32,(opt.title_seq_len)/2)
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.title_seq_len)
        )
        self.content_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),#(batch_size,64,opt.content_seq_len)->(batch_size,64,(opt.content_seq_len)/2)
            #Inception(incept_dim,incept_dim),#(batch_size,64,opt.content_seq_len/2)->(batch_size,32,(opt.content_seq_len)/4)
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.content_seq_len)
        )
        self.fc = nn.Sequential(
            nn.Linear(incept_dim*2,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(FastText2, self).__init__()
        self.model_name = 'FastText2'
        self.opt=opt
        # self.pre = nn.Sequential(
        #     nn.Linear(opt.embedding_dim,opt.embedding_dim),
        #     nn.BatchNorm1d(opt.embedding_dim),
        #     # nn.ReLU(True)
        # )
        self.pre_fc = nn.Linear(opt.embedding_dim,opt.embedding_dim*2)
        self.bn = nn.BatchNorm1d(opt.embedding_dim*2)
        self.pre_fc2 = nn.Linear(opt.embedding_dim,opt.embedding_dim*2)
        self.bn2 = nn.BatchNorm1d(opt.embedding_dim*2) 

        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.fc = nn.Sequential(
            nn.Linear(opt.embedding_dim*4,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
        if opt.embedding_path:
            print('load embedding')
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def batch_norm(num_features, eps=1e-05, momentum=0.1, affine=True, dim=2):

    in_dim = dim
    if in_dim == 1:
        return nn.BatchNorm1d(num_features=num_features, 
                        eps=eps, 
                        momentum=momentum, 
                        affine=affine)

    elif in_dim == 2:
        return nn.BatchNorm2d(num_features=num_features, 
                        eps=eps, 
                        momentum=momentum, 
                        affine=affine)

    elif in_dim == 3:
        return nn.BatchNorm3d(num_features=num_features, 
                        eps=eps, 
                        momentum=momentum, 
                        affine=affine)

# flatten
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_batchnorm_eval(self):
        types = (torch.FloatTensor,)
        if TEST_CUDA:
            types += (torch.cuda.FloatTensor,)
        for tp in types:
            module = nn.BatchNorm1d(3).type(tp)
            module.eval()

            data = Variable(torch.rand(4, 3).type(tp), requires_grad=True)
            grad = torch.rand(4, 3).type(tp)

            # 1st pass
            res1 = module(data)
            res1.backward(grad)
            grad1 = data.grad.data.clone()

            # 2nd pass
            if data.grad is not None:
                data.grad.data.zero_()

            res2 = module(data)
            res2.backward(grad)
            grad2 = data.grad.data.clone()
            self.assertEqual(res1, res2)
            self.assertEqual(grad1, grad2)
项目:pointnet.pytorch    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):
        super(STN3d, self).__init__()
        self.num_points = num_points
        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 1024, 1)
        self.mp1 = torch.nn.MaxPool1d(num_points)
        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 9)
        self.relu = nn.ReLU()

        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.bn4 = nn.BatchNorm1d(512)
        self.bn5 = nn.BatchNorm1d(256)
项目:seq2seq.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self, input_size, hidden_size, kernel_size=3,
                 num_layers=4, bias=True,
                 dropout=0, causal=True):
        super(StackedConv, self).__init__()
        self.convs = nn.ModuleList()
        size = input_size
        for l in range(num_layers):
            self.convs.append(GatedConv1d(size, hidden_size, 1, bias=bias,
                                          causal=False))
            self.convs.append(nn.BatchNorm1d(hidden_size))
            self.convs.append(MaskedConv1d(hidden_size, hidden_size,
                                           kernel_size, bias=bias,
                                           groups=hidden_size,
                                           causal=causal))
            self.convs.append(nn.BatchNorm1d(hidden_size))
            size = hidden_size
项目:pytorch-misc    作者:Jiaming-Liu    | 项目源码 | 文件源码
def _initialize_weights(self):
        print('initializing')
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                receptive_field_size = m.kernel_size[0] * m.kernel_size[1]
                fansum = (m.out_channels + m.in_channels) * receptive_field_size
                scale = 1. / max(1., float(fansum) / 2.)
                stdv = math.sqrt(3. * scale)
                m.weight.data.uniform_(-stdv, stdv)

                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                fansum = m.weight.size(1) + m.weight.size(0)
                scale = 1. / max(1., float(fansum) / 2.)
                stdv = math.sqrt(3. * scale)
                m.weight.data.uniform_(-stdv, stdv)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:pytorch-geometric-gan    作者:lim0606    | 项目源码 | 文件源码
def __init__(self, isize, nz, nc, ngf, ngpu):
        super(MLP_G, self).__init__()
        self.ngpu = ngpu

        main = nn.Sequential(
            # Z goes into a linear of size: ngf
            nn.Linear(nz, ngf, bias=False),
            nn.BatchNorm1d(ngf),
            nn.ReLU(True),
            nn.Linear(ngf, ngf, bias=False),
            nn.BatchNorm1d(ngf),
            nn.ReLU(True),
            nn.Linear(ngf, nc * isize * isize),
        )
        self.main = main
        self.nc = nc
        self.isize = isize
        self.nz = nz
项目:pytorch-geometric-gan    作者:lim0606    | 项目源码 | 文件源码
def __init__(self, isize, nz, nc, ngf, ngpu):
        super(MLP_G, self).__init__()
        self.ngpu = ngpu

        main = nn.Sequential(
            # Z goes into a linear of size: ngf
            nn.Linear(nz, ngf, bias=False),
            nn.BatchNorm1d(ngf),
            nn.ReLU(True),
            nn.Linear(ngf, ngf, bias=False),
            nn.BatchNorm1d(ngf),
            nn.ReLU(True),
            nn.Linear(ngf, ngf, bias=False),
            nn.BatchNorm1d(ngf),
            nn.ReLU(True),
            nn.Linear(ngf, nc * isize * isize),
        )
        self.main = main
        self.nc = nc
        self.isize = isize
        self.nz = nz
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _test_batchnorm_eval(self, test_type=torch.FloatTensor):
        module = nn.BatchNorm1d(3).type(test_type)
        module.eval()

        data = Variable(torch.rand(4, 3).type(test_type), requires_grad=True)
        grad = torch.rand(4, 3).type(test_type)

        # 1st pass
        res1 = module(data)
        res1.backward(grad)
        grad1 = data.grad.data.clone()

        # 2nd pass
        if data.grad is not None:
            data.grad.data.zero_()

        res2 = module(data)
        res2.backward(grad)
        grad2 = data.grad.data.clone()
        self.assertEqual(res1, res2)
        self.assertEqual(grad1, grad2)
项目:pointGAN    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, num_points = 2500, global_feat = True, trans = True):
        super(PointNetfeat, self).__init__()
        self.stn = STN3d(num_points = num_points)
        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 1024, 1)

        self.bn1 = torch.nn.BatchNorm1d(64)
        self.bn2 = torch.nn.BatchNorm1d(128)
        self.bn3 = torch.nn.BatchNorm1d(1024)
        self.trans = trans


        #self.mp1 = torch.nn.MaxPool1d(num_points)
        self.num_points = num_points
        self.global_feat = global_feat
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def __init__(self, ngpu, **kwargs):
        super(netD_images, self).__init__()
        self.ngpu = ngpu
        self.L = kwargs['L']
        self.K = kwargs['K']
        self.arguments = kwargs['arguments']

        self.l1 = nn.Linear(self.L, self.K, bias=True)
        initializationhelper(self.l1, 'tanh')
        self.l1_bn = nn.BatchNorm1d(self.K)

        self.l2 = nn.Linear(self.K, self.K, bias=True) 
        initializationhelper(self.l2, 'relu')
        #self.l2_bn = nn.BatchNorm1d(self.K)

        self.l3 = nn.Linear(self.K, 1, bias=True)
        initializationhelper(self.l3, 'relu')
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def __init__(self, n_z, n_hidden, depth, ngpu):
        super(Code_Discriminator, self).__init__()

        self.n_z    = n_z
        self.ngpu   = ngpu
        main        = nn.Sequential()
        layer       = 1

        # Convert the n_z vector represent prior distribution/encoding of image using MLP as instructed in paper

        main.add_module('linear_{0}-{1}-{2}'.format(layer, n_z, n_hidden), nn.Linear(n_z, n_hidden))
        main.add_module('batchnorm_{0}-{1}'.format(layer, n_hidden), nn.BatchNorm1d(n_hidden))
        main.add_module('LeakyReLU_{0}'.format(layer), nn.LeakyReLU(0.2, inplace=True))

        for layer in range(2, depth):
            main.add_module('linear_{0}-{1}-{2}'.format(layer, n_hidden, n_hidden), nn.Linear(n_hidden, n_hidden))
            main.add_module('batchnorm_{0}-{1}'.format(layer, n_hidden), nn.BatchNorm1d(n_hidden))
            main.add_module('LeakyReLU_{0}'.format(layer), nn.LeakyReLU(0.2, inplace=True))

        layer       = layer + 1
        main.add_module('linear_{0}-{1}-{2}'.format(layer, n_hidden, 1), nn.Linear(n_hidden, 1))
        main.add_module('Sigmoid_{0}'.format(layer), nn.Sigmoid())

        self.code_dis   = main
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _test_batchnorm_eval(self, test_type=torch.FloatTensor):
        module = nn.BatchNorm1d(3).type(test_type)
        module.eval()

        data = Variable(torch.rand(4, 3).type(test_type), requires_grad=True)
        grad = torch.rand(4, 3).type(test_type)

        # 1st pass
        res1 = module(data)
        res1.backward(grad)
        grad1 = data.grad.data.clone()

        # 2nd pass
        if data.grad is not None:
            data.grad.data.zero_()

        res2 = module(data)
        res2.backward(grad)
        grad2 = data.grad.data.clone()
        self.assertEqual(res1, res2)
        self.assertEqual(grad1, grad2)
项目:keita    作者:iwasaki-kenta    | 项目源码 | 文件源码
def __init__(self, embed_dim, hidden_dim=64):
        super(HierarchialNetwork1D, self).__init__()
        self.layers = nn.ModuleList()

        first_block = nn.Sequential(
            nn.Conv1d(in_channels=embed_dim, out_channels=hidden_dim, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.BatchNorm1d(hidden_dim)
        )
        self.layers.append(first_block)

        for layer_index in range(4):
            conv_block = nn.Sequential(
                nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1),
                nn.ReLU(inplace=True),
                nn.BatchNorm1d(hidden_dim)
            )
            self.layers.append(conv_block)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()

        self.conv0 = nn.Conv1d(nc, ndf, 4, 2, 1, bias=False)
        self.conv1 = nn.Conv1d(ndf, ndf * 2, 4, 2, 1, bias=False)
        self.conv2 = nn.Conv1d(ndf * 2, ndf * 4, 4, 2, 1, bias=False)
        self.conv3 = nn.Conv1d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)

        self.fc0_size = 512 * 128
        self.fc0 = nn.Linear(self.fc0_size, 100)

        self.relu = nn.LeakyReLU(0.2, inplace=True)

        self.bn1 = nn.BatchNorm1d(ndf * 2)
        self.bn2 = nn.BatchNorm1d(ndf * 4)
        self.bn3 = nn.BatchNorm1d(ndf * 8)

        self.sigmoid = nn.Sigmoid()

        self.apply(weights_init)

        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def _initialize_weights(self):
    if not self.pretrained:
      for m in self.modules():
        if isinstance(m, nn.Conv2d):
          n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
          m.weight.data.normal_(0, math.sqrt(2. / n))
          if m.bias is not None:
            m.bias.data.zero_()
        elif isinstance(m, nn.BatchNorm1d):
          m.weight.data.fill_(1)
          m.bias.data.zero_()
        elif isinstance(m, nn.Linear):
          n = m.weight.size(1)
          m.weight.data.normal_(0, 0.01)
          m.bias.data.zero_()
    else:
      for m in self.modules():
        if isinstance(m, nn.BatchNorm1d):
          m.weight.data.fill_(1)
          m.bias.data.zero_()
        elif isinstance(m, nn.Linear):
          n = m.weight.size(1)
          m.weight.data.normal_(0, 0.01)
          m.bias.data.zero_()
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(EncodeLayer, self).__init__()
        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if batchnorm:
            main = nn.Sequential(
                nn.Linear(input, output),
                nn.BatchNorm1d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Linear(input, output),
                self.act,
            )
        self.main = main
        self.fc1 = nn.Linear(output, zdim)
        self.fc2 = nn.Linear(output, zdim)
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(DecodeLayer, self).__init__()

        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if input == 0:
            input = output
            self.fc = nn.Linear(zdim, input)
        else:
            self.fc = nn.Linear(zdim, input)
            input *= 2
        if batchnorm:
            main = nn.Sequential(
                nn.Linear(input, output),
                nn.BatchNorm1d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Linear(input, output),
                self.act,
            )
        self.main = main
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(EncodeLayer, self).__init__()
        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if batchnorm:
            main = nn.Sequential(
                nn.Linear(input, output),
                nn.BatchNorm1d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Linear(input, output),
                self.act,
            )
        self.main = main
        self.fc1 = nn.Linear(output, zdim)
        self.fc2 = nn.Linear(output, zdim)
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(DecodeLayer, self).__init__()

        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if input == 0:
            input = output
            self.fc = nn.Linear(zdim, input)
        else:
            self.fc = nn.Linear(zdim, input)
            input *= 2
        if batchnorm:
            main = nn.Sequential(
                nn.Linear(input, output),
                nn.BatchNorm1d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Linear(input, output),
                self.act,
            )
        self.main = main
项目:BuboQA    作者:castorini    | 项目源码 | 文件源码
def __init__(self, config):
        super(RelationClassifier, self).__init__()
        self.config = config
        self.embed = nn.Embedding(config.n_embed, config.d_embed)
        self.encoder = Encoder(config)
        self.dropout = nn.Dropout(p=config.dropout_prob)
        self.relu = nn.ReLU()
        seq_in_size = config.d_hidden
        if self.config.birnn:
            seq_in_size *= 2

        self.out = nn.Sequential(
                        nn.Linear(seq_in_size, seq_in_size), # can apply batch norm after this - add later
                        nn.BatchNorm1d(seq_in_size),
                        self.relu,
                        self.dropout,
                        nn.Linear(seq_in_size, config.d_out)
        )
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(Generator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]
        self.noise_dim = opts.noise_dim


        hidden_lst = [self.cnn_feat_size + self.noise_dim] + opts.G_hidden + [self.cnn_feat_size]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()
        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                if opts.G_nonlinear == 'elu':
                    layers['elu%d' % n] = nn.ELU()
                elif opts.G_nonlinear == 'lrelu':
                    layers['leaky_relu%d'%n] = nn.LeakyReLU(0.2)


        self.net = nn.Sequential(layers)
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(ID_Generator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]
        self.noise_dim = opts.noise_dim


        hidden_lst = [self.cnn_feat_size*2 + self.noise_dim] + opts.G_hidden + [self.cnn_feat_size]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()
        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                if opts.G_nonlinear == 'elu':
                    layers['elu%d' % n] = nn.ELU()
                elif opts.G_nonlinear == 'lrelu':
                    layers['leaky_relu%d'%n] = nn.LeakyReLU(0.2)


        self.net = nn.Sequential(layers)
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(Discriminator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]

        hidden_lst = [self.cnn_feat_size] + opts.D_hidden + [1]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()

        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                layers['leaky_relu%d' % n] = nn.LeakyReLU(0.2)
        layers['sigmoid'] = nn.Sigmoid()

        self.net = nn.Sequential(layers)
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def weights_init(m):
    if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
        m.weight.data.normal_(0, 0.02)
        m.bias.data.zero_()
    elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d):
        m.weight.data.normal_(1, 0.02)
        m.bias.data.zero_()
    elif isinstance(m, nn.Linear):
        m.bias.data.zero_()
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __init__(self):
        super(ClassificationNet, self).__init__()
        self.fc1 = nn.Linear(512, 256)
        self.bn1 = nn.BatchNorm1d(256)
        self.fc2 = nn.Linear(256, 205)
        self.bn2 = nn.BatchNorm1d(205)
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __init__(self):
        super(ColorizationNet, self).__init__()
        self.fc1 = nn.Linear(512, 256)
        self.bn1 = nn.BatchNorm1d(256)
        self.conv1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.conv2 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1)
        self.bn3 = nn.BatchNorm2d(64)
        self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(64)
        self.conv4 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
        self.bn5 = nn.BatchNorm2d(32)
        self.conv5 = nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
项目:PyTorchDemystified    作者:hhsecond    | 项目源码 | 文件源码
def __init__(self, size, dropout):
        super(Feature, self).__init__()
        self.bn = nn.BatchNorm1d(size * 4)
        self.dropout = nn.Dropout(p=dropout)
项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def __init__(self, embed_size):
        """Load the pretrained ResNet-152 and replace top fc layer."""
        super(EncoderCNN, self).__init__()
        resnet = models.resnet152(pretrained=True)
        modules = list(resnet.children())[:-1]      # delete the last fc layer.
        self.resnet = nn.Sequential(*modules)
        self.linear = nn.Linear(resnet.fc.in_features, embed_size)
        self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
        self.init_weights()
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def __init__(self, cut_at_pooling=False, num_features=256, norm=False,
                 dropout=0, num_classes=0):
        super(InceptionNet, self).__init__()
        self.cut_at_pooling = cut_at_pooling

        self.conv1 = _make_conv(3, 32)
        self.conv2 = _make_conv(32, 32)
        self.conv3 = _make_conv(32, 32)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
        self.in_planes = 32
        self.inception4a = self._make_inception(64, 'Avg', 1)
        self.inception4b = self._make_inception(64, 'Max', 2)
        self.inception5a = self._make_inception(128, 'Avg', 1)
        self.inception5b = self._make_inception(128, 'Max', 2)
        self.inception6a = self._make_inception(256, 'Avg', 1)
        self.inception6b = self._make_inception(256, 'Max', 2)

        if not self.cut_at_pooling:
            self.num_features = num_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes

            self.avgpool = nn.AdaptiveAvgPool2d(1)

            if self.has_embedding:
                self.feat = nn.Linear(self.in_planes, self.num_features)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
            else:
                # Change the num_features to CNN output channels
                self.num_features = self.in_planes
            if self.dropout > 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_classes > 0:
                self.classifier = nn.Linear(self.num_features, self.num_classes)

        self.reset_params()
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self, noise_input_size, cond_input_size):
        super(_netG, self).__init__()
        self.noise_input_size = noise_input_size
        self.cond_input_size = cond_input_size

        # first dense block
        # input shape [batch_size x 147]
        self.fc1 = nn.Sequential(
            nn.Linear(self.noise_input_size + self.cond_input_size, 100 * 10),
            nn.BatchNorm1d(100 * 10),
            nn.LeakyReLU(0.2, inplace=True)
        )
        # Convolutional block
        self.conv1 = nn.Sequential(
            # input shape [batch_size x 10 x 100]
            nn.ConvTranspose1d(10, 250, 13, stride=2, padding=6,
                              output_padding=1, bias=True),
            nn.BatchNorm1d(250),
            nn.LeakyReLU(0.2, inplace=True),

            # input shape [batch_size x 250 x 200]
            nn.ConvTranspose1d(250, 100, 13, stride=2, padding=6,
                              output_padding=1, bias=True),
            nn.BatchNorm1d(100),
            nn.LeakyReLU(0.2, inplace=True),

             # input shape [batch_size x 100 x 400]
            nn.ConvTranspose1d(100, 1, 13, stride=1, padding=6,
                              bias=True),
            nn.BatchNorm1d(1),
            # input shape [batch_size x 1 x 400]
            nn.Tanh()
        )
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self, cond_input_size):
        super(_netD, self).__init__()
        self.cond_input_size = cond_input_size
        # Convolutional block
        self.conv1 = nn.Sequential(
            # input shape batch_size x 1 (number of channels) x 400 (length of pulse)
            nn.Conv1d(1, 100, 13, stride=5, padding=6, bias=True),
            nn.BatchNorm1d(100),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 100 x 80]
            nn.Conv1d(100, 250, 13, stride=5, padding=6, bias=True),
            nn.BatchNorm1d(250),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 250 x 16]
            nn.Conv1d(250, 300, 13, stride=4, padding=6, bias=True),
            nn.BatchNorm1d(300),
            nn.LeakyReLU(0.2, inplace=True)
            # shape [batch_size x 300 x 4]
        )
        # after flatten 300 * 4 + 47 (conditional input size)
        # Dense block
        self.fc1 = nn.Sequential(
            nn.Linear(1200 + self.cond_input_size, 200),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(200,1),
            nn.Sigmoid()
        )
项目:FreezeOut    作者:ajbrock    | 项目源码 | 文件源码
def __init__(self,growthRate, depth, nClasses, epochs, t_0, scale_lr=True, how_scale = 'cubic',const_time=False, cfg=cfg['E'],batch_norm=True):
        super(DenseNet, self).__init__()

        self.epochs = epochs
        self.t_0 = t_0
        self.scale_lr = scale_lr
        self.how_scale = how_scale
        self.const_time = const_time

        self.layer_index = 0
        self.features = self.make_layers(cfg,batch_norm)

        self.classifier = nn.Sequential(
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(True),
            nn.BatchNorm1d(512),
            nn.Dropout(),
            nn.Linear(512, nClasses),
        )
        self.classifier.layer_index = self.layer_index
        self.classifier.active = True
        self._initialize_weights()

        # Optimizer
        self.optim = optim.SGD([{'params':m.parameters(), 'lr':m.lr, 'layer_index':m.layer_index} for m in self.modules() if hasattr(m,'active')],  
                         nesterov=True,momentum=0.9, weight_decay=1e-4)
        # Iteration Counter            
        self.j = 0  

        # A simple dummy variable that indicates we are using an iteration-wise
        # annealing scheme as opposed to epoch-wise. 
        self.lr_sched = {'itr':0}
项目:clevr-iep    作者:facebookresearch    | 项目源码 | 文件源码
def build_classifier(module_C, module_H, module_W, num_answers,
                     fc_dims=[], proj_dim=None, downsample='maxpool2',
                     with_batchnorm=True, dropout=0):
  layers = []
  prev_dim = module_C * module_H * module_W
  if proj_dim is not None and proj_dim > 0:
    layers.append(nn.Conv2d(module_C, proj_dim, kernel_size=1))
    if with_batchnorm:
      layers.append(nn.BatchNorm2d(proj_dim))
    layers.append(nn.ReLU(inplace=True))
    prev_dim = proj_dim * module_H * module_W
  if downsample == 'maxpool2':
    layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
    prev_dim //= 4
  elif downsample == 'maxpool4':
    layers.append(nn.MaxPool2d(kernel_size=4, stride=4))
    prev_dim //= 16
  layers.append(Flatten())
  for next_dim in fc_dims:
    layers.append(nn.Linear(prev_dim, next_dim))
    if with_batchnorm:
      layers.append(nn.BatchNorm1d(next_dim))
    layers.append(nn.ReLU(inplace=True))
    if dropout > 0:
      layers.append(nn.Dropout(p=dropout))
    prev_dim = next_dim
  layers.append(nn.Linear(prev_dim, num_answers))
  return nn.Sequential(*layers)
项目:bigBatch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self):
        super(mnist_model, self).__init__()
        self.layers = nn.Sequential(
            nn.Linear(28 * 28, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(True),
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(True),
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(True),
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(True),
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(True),
            nn.Linear(512, 10),
        )
        self.regime = {
            0: {'optimizer': 'SGD', 'lr':  1e-1,
                'weight_decay': 1e-4, 'momentum': 0.9},
            10: {'lr': 1e-2},
            20: {'lr':  1e-3},
            30: {'lr':  1e-4}
        }