Python torch.nn 模块,Conv1d() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.Conv1d()

项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):
        super(STN3d, self).__init__()
        self.num_points = num_points
        self.conv1 = nn.Conv1d(3, 64, 1)
        self.conv2 = nn.Conv1d(64, 128, 1)
        self.conv3 = nn.Conv1d(128, 1024, 1)
        self.mp1 = nn.MaxPool1d(num_points)
        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 9)
        self.relu = nn.ReLU()

        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.bn4 = nn.BatchNorm1d(512)
        self.bn5 = nn.BatchNorm1d(256)
项目:seq_tagger    作者:OSU-slatelab    | 项目源码 | 文件源码
def setUttEncoder(module):  # set utterance encoder to the module
    if SharedModel.args.utt_enc_noise == True:
        module.uttEncNoise = Variable(torch.FloatTensor(), volatile=True)
        if SharedModel.args.no_cuda == False:
            module.uttEncNoise = module.uttEncNoise.cuda()

    if SharedModel.args.utt_enc_type >= 2:
        module.uttEncoder = nn.ModuleList()
        for i in [int(x) for x in SharedModel.args.conv_filters.split('_')]:
            module.uttEncoder.append( nn.Conv1d(2*SharedModel.args.hid_dim * (2 if SharedModel.args.attn == 2 else 1), SharedModel.args.conv_out_dim, i, 1, int(math.ceil((i-1)/2))) )

    if SharedModel.args.utt_enc_bn == True:
        uttEncOutSize = 2 * SharedModel.args.hid_dim
        if SharedModel.args.utt_enc_type >= 2:
            uttEncOutSize = 3 * SharedModel.args.conv_out_dim
        elif SharedModel.args.attn == 2:
            uttEncOutSize = 4 * SharedModel.args.hid_dim
        module.uttBn = nn.BatchNorm1d(uttEncOutSize)
项目:Tacotron_pytorch    作者:root20    | 项目源码 | 文件源码
def __init__(self, input_dim, conv_bank_dim, conv_dim1, conv_dim2, gru_dim, num_filters, is_masked):
        super(CBHG, self).__init__()
        self.num_filters = num_filters

        bank_out_dim = num_filters * conv_bank_dim
        self.conv_bank = nn.ModuleList()
        for i in range(num_filters):
            self.conv_bank.append(nn.Conv1d(input_dim, conv_bank_dim, i + 1, stride=1, padding=int(np.ceil(i / 2))))

        # define batch normalization layer, we use BN1D since the sequence length is not fixed
        self.bn_list = nn.ModuleList()
        self.bn_list.append(nn.BatchNorm1d(bank_out_dim))
        self.bn_list.append(nn.BatchNorm1d(conv_dim1))
        self.bn_list.append(nn.BatchNorm1d(conv_dim2))

        self.conv1 = nn.Conv1d(bank_out_dim, conv_dim1, 3, stride=1, padding=1)
        self.conv2 = nn.Conv1d(conv_dim1, conv_dim2, 3, stride=1, padding=1)

        if input_dim != conv_dim2:
            self.residual_proj = nn.Linear(input_dim, conv_dim2)

        self.highway = Highway(conv_dim2, 4)
        self.BGRU = nn.GRU(input_size=conv_dim2, hidden_size=gru_dim, num_layers=1, batch_first=True, bidirectional=True)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,opt):
        super(RCNN, self).__init__()
        kernel_size = 2 if opt.type_=='word' else 3
        self.conv = nn.Sequential(
                                nn.Conv1d(in_channels = opt.hidden_size*2 + opt.embedding_dim,
                                        out_channels = opt.title_dim*3,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim*3),
                                nn.ReLU(inplace=True),
                                nn.Conv1d(in_channels = opt.title_dim*3,
                                        out_channels = opt.title_dim*3,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim*3),
                                nn.ReLU(inplace=True),
                                # nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            ) 
        self.fc=nn.Linear((opt.title_dim*3*2),opt.num_classes)
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def _init_pool(self, dsz, **kwargs):
        filtsz = kwargs['filtsz']
        cmotsz = kwargs['cmotsz']
        convs = []
        for i, fsz in enumerate(filtsz):
            pad = fsz//2
            conv = nn.Sequential(
                nn.Conv1d(dsz, cmotsz, fsz, padding=pad),
                pytorch_activation("relu")
            )
            convs.append(conv)
            # Add the module so its managed correctly
        self.convs = nn.ModuleList(convs)
        # Width of concat of parallel convs
        self.conv_drop = nn.Dropout(self.pdrop)

        return cmotsz * len(filtsz)
项目:speech    作者:awni    | 项目源码 | 文件源码
def __init__(self, kernel_size=11, log_t=False):
        """
        Module which Performs a single attention step along the
        second axis of a given encoded input. The module uses
        both 'content' and 'location' based attention.

        The 'content' based attention is an inner product of the
        decoder hidden state with each time-step of the encoder
        state.

        The 'location' based attention performs a 1D convollution
        on the previous attention vector and adds this into the
        next attention vector prior to normalization.

        *NB* Should compute attention differently if using cuda or cpu
        based on performance. See
        https://gist.github.com/awni/9989dd31642d42405903dec8ab91d1f0
        """
        super(Attention, self).__init__()
        assert kernel_size % 2 == 1, \
            "Kernel size should be odd for 'same' conv."
        padding = (kernel_size - 1) // 2
        self.conv = nn.Conv1d(1, 1, kernel_size, padding=padding)
        self.log_t = log_t
项目:keita    作者:iwasaki-kenta    | 项目源码 | 文件源码
def __init__(self, embed_dim, hidden_dim=64):
        super(HierarchialNetwork1D, self).__init__()
        self.layers = nn.ModuleList()

        first_block = nn.Sequential(
            nn.Conv1d(in_channels=embed_dim, out_channels=hidden_dim, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.BatchNorm1d(hidden_dim)
        )
        self.layers.append(first_block)

        for layer_index in range(4):
            conv_block = nn.Sequential(
                nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1),
                nn.ReLU(inplace=True),
                nn.BatchNorm1d(hidden_dim)
            )
            self.layers.append(conv_block)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()

        self.conv0 = nn.Conv1d(nc, ndf, 4, 2, 1, bias=False)
        self.conv1 = nn.Conv1d(ndf, ndf * 2, 4, 2, 1, bias=False)
        self.conv2 = nn.Conv1d(ndf * 2, ndf * 4, 4, 2, 1, bias=False)
        self.conv3 = nn.Conv1d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)

        self.fc0_size = 512 * 128
        self.fc0 = nn.Linear(self.fc0_size, 100)

        self.relu = nn.LeakyReLU(0.2, inplace=True)

        self.bn1 = nn.BatchNorm1d(ndf * 2)
        self.bn2 = nn.BatchNorm1d(ndf * 4)
        self.bn3 = nn.BatchNorm1d(ndf * 8)

        self.sigmoid = nn.Sigmoid()

        self.apply(weights_init)

        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def weights_init(m):
    if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
        m.weight.data.normal_(0, 0.02)
        m.bias.data.zero_()
    elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d):
        m.weight.data.normal_(1, 0.02)
        m.bias.data.zero_()
    elif isinstance(m, nn.Linear):
        m.bias.data.zero_()
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def __init__(self,
                 embedding_dim: int,
                 num_filters: int,
                 ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),  # pylint: disable=bad-whitespace
                 conv_layer_activation: Activation = Activation.by_name('relu')(),
                 output_dim: Optional[int] = None) -> None:
        super(CnnEncoder, self).__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes
        self._activation = conv_layer_activation
        self._output_dim = output_dim

        self._convolution_layers = [Conv1d(in_channels=self._embedding_dim,
                                           out_channels=self._num_filters,
                                           kernel_size=ngram_size)
                                    for ngram_size in self._ngram_filter_sizes]
        for i, conv_layer in enumerate(self._convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)

        maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
        if self._output_dim:
            self.projection_layer = Linear(maxpool_output_dim, self._output_dim)
        else:
            self.projection_layer = None
            self._output_dim = maxpool_output_dim
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __init__(self, d_model, d_ff, dropout):
        super().__init__()

        self.seq = nn.Sequential(
                nn.Conv1d(d_model, d_ff, 1),
                nn.ReLU(),
                nn.Conv1d(d_ff, d_model, 1),
                nn.Dropout(dropout)
            )
        self.lm = LayerNorm(d_model)
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def test_convtbc(self):
        # ksz, in_channels, out_channels
        conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
        # out_channels, in_channels, ksz
        conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)

        conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
        conv_tbc.bias.data.copy_(conv1d.bias.data)

        input_tbc = Variable(torch.randn(7, 2, 4), requires_grad=True)
        input1d = Variable(input_tbc.data.transpose(0, 1).transpose(1, 2), requires_grad=True)

        output_tbc = conv_tbc(input_tbc)
        output1d = conv1d(input1d)

        self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)

        grad_tbc = torch.randn(output_tbc.size())
        grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()

        output_tbc.backward(grad_tbc)
        output1d.backward(grad1d)

        self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
        self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
        self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
项目:attention-is-all-you-need-pytorch    作者:jadore801120    | 项目源码 | 文件源码
def __init__(self, d_hid, d_inner_hid, dropout=0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) # position-wise
        self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1) # position-wise
        self.layer_norm = LayerNormalization(d_hid)
        self.dropout = nn.Dropout(dropout)
        self.relu = nn.ReLU()
项目:MMdnn    作者:Microsoft    | 项目源码 | 文件源码
def _layer_Conv(self):
        self.add_body(0, """
    @staticmethod
    def __conv(dim, name, **kwargs):
        if   dim == 1:  layer = nn.Conv1d(**kwargs)
        elif dim == 2:  layer = nn.Conv2d(**kwargs)
        elif dim == 3:  layer = nn.Conv3d(**kwargs)
        else:           raise NotImplementedError()

        layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
        if 'bias' in __weights_dict[name]:
            layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
        return layer""")
项目:fairseq-py    作者:facebookresearch    | 项目源码 | 文件源码
def test_convtbc(self):
        # ksz, in_channels, out_channels
        conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
        # out_channels, in_channels, ksz
        conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)

        conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
        conv_tbc.bias.data.copy_(conv1d.bias.data)

        input_tbc = Variable(torch.randn(7, 2, 4), requires_grad=True)
        input1d = Variable(input_tbc.data.transpose(0, 1).transpose(1, 2), requires_grad=True)

        output_tbc = conv_tbc(input_tbc)
        output1d = conv1d(input1d)

        self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)

        grad_tbc = torch.randn(output_tbc.size())
        grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()

        output_tbc.backward(grad_tbc)
        output1d.backward(grad1d)

        self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
        self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
        self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self, cond_input_size):
        super(_netD, self).__init__()
        self.cond_input_size = cond_input_size
        # Convolutional block
        self.conv1 = nn.Sequential(
            # input shape batch_size x 1 (number of channels) x 400 (length of pulse)
            nn.Conv1d(1, 100, 13, stride=5, padding=6, bias=True),
            nn.BatchNorm1d(100),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 100 x 80]
            nn.Conv1d(100, 250, 13, stride=5, padding=6, bias=True),
            nn.BatchNorm1d(250),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 250 x 16]
            nn.Conv1d(250, 300, 13, stride=4, padding=6, bias=True),
            nn.BatchNorm1d(300),
            nn.LeakyReLU(0.2, inplace=True)
            # shape [batch_size x 300 x 4]
        )
        # after flatten 300 * 4 + 47 (conditional input size)
        # Dense block
        self.fc1 = nn.Sequential(
            nn.Linear(1200 + self.cond_input_size, 200),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(200,1),
            nn.Sigmoid()
        )
项目:SeqMatchSeq    作者:pcgreat    | 项目源码 | 文件源码
def __init__(self, cov_dim, mem_dim, window_size):
        super(MyTemporalConvoluation, self).__init__()
        self.conv1 = nn.Conv1d(cov_dim, mem_dim, window_size)
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):    
        super(Feats_STN3d, self).__init__()
        self.conv1 = nn.Conv1d(128, 256, 1)
        self.conv2 = nn.Conv1d(256, 1024, 1)
        self.mp1 = nn.MaxPool1d(num_points) 

        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 128*128)

        self.bn1 = nn.BatchNorm1d(256)
        self.bn2 = nn.BatchNorm1d(1024)
        self.bn3 = nn.BatchNorm1d(512)
        self.bn4 = nn.BatchNorm1d(256)
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500, global_feat = True):
        super(PointNetfeat, self).__init__()
        self.stn = STN3d(num_points = num_points) # bz x 3 x 3 
        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 1024, 1)
        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.mp1 = torch.nn.MaxPool1d(num_points)
        self.num_points = num_points
        self.global_feat = global_feat
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500, k = 2):
        super(PointNetPartDenseCls, self).__init__()
        self.num_points = num_points
        self.k = k
        # T1 
        self.stn1 = STN3d(num_points = num_points) # bz x 3 x 3, after transform => bz x 2048 x 3 

        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 128, 1)
        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(128)

        # T2 
        self.stn2 = Feats_STN3d(num_points = num_points)

        self.conv4 = torch.nn.Conv1d(128, 128, 1)
        self.conv5 = torch.nn.Conv1d(128, 512, 1)
        self.conv6 = torch.nn.Conv1d(512, 2048, 1)
        self.bn4 = nn.BatchNorm1d(128)
        self.bn5 = nn.BatchNorm1d(512)
        self.bn6 = nn.BatchNorm1d(2048)
        # pool layer 
        self.mp1 = torch.nn.MaxPool1d(num_points) 

        # MLP(256, 256, 128)
        self.conv7 = torch.nn.Conv1d(3024-16, 256, 1)
        self.conv8 = torch.nn.Conv1d(256, 256, 1)
        self.conv9 = torch.nn.Conv1d(256, 128, 1)
        self.bn7 = nn.BatchNorm1d(256)
        self.bn8 = nn.BatchNorm1d(256)
        self.bn9 = nn.BatchNorm1d(128)
        # last layer 
        self.conv10 = torch.nn.Conv1d(128, self.k, 1) # 50 
        self.bn10 = nn.BatchNorm1d(self.k)
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500, k = 2):
        super(PointNetDenseCls, self).__init__()
        self.num_points = num_points
        self.k = k
        self.feat = PointNetfeat(num_points, global_feat=False)
        self.conv1 = torch.nn.Conv1d(1088, 512, 1)
        self.conv2 = torch.nn.Conv1d(512, 256, 1)
        self.conv3 = torch.nn.Conv1d(256, 128, 1)
        self.conv4 = torch.nn.Conv1d(128, self.k, 1)
        self.bn1 = nn.BatchNorm1d(512)
        self.bn2 = nn.BatchNorm1d(256)
        self.bn3 = nn.BatchNorm1d(128)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_example_non_batch, output_dim, dropout=0):
        super(ObserveEmbeddingCNN1D2C, self).__init__()
        self.input_dim = input_example_non_batch.nelement()
        self.input_sample = input_example_non_batch.view(1, -1).cpu()
        self.output_dim = output_dim
        self.conv1 = nn.Conv1d(1, 64, 3, padding=1)
        self.conv2 = nn.Conv1d(64, 64, 3, padding=1)
        self.drop = nn.Dropout(dropout)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=1)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=1,padding=1)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3,padding=1)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=1,padding=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            #('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 3,stride=1,padding=1)),
            ]))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(CNNText_tmp, self).__init__()
        incept_dim=opt.inception_dim
        self.model_name = 'CNNText_tmp'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.title_conv=nn.Sequential(
            nn.Conv1d(in_channels = opt.embedding_dim,out_channels = incept_dim,kernel_size = 3,padding=1),
            nn.BatchNorm1d(incept_dim),
            nn.ReLU(inplace=True),
            Inception(incept_dim,incept_dim),#(batch_size,64,opt.title_seq_len)->(batch_size,32,(opt.title_seq_len)/2)
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.title_seq_len)
        )
        self.content_conv=nn.Sequential(
            #(batch_size,256,opt.content_seq_len)->(batch_size,64,opt.content_seq_len)
            nn.Conv1d(in_channels = opt.embedding_dim,out_channels = incept_dim,kernel_size = 3,padding=1),
            nn.BatchNorm1d(incept_dim),
            nn.ReLU(inplace=True),
            Inception(incept_dim,incept_dim),#(batch_size,64,opt.content_seq_len)->(batch_size,64,(opt.content_seq_len)/2)
            #Inception(incept_dim,incept_dim),#(batch_size,64,opt.content_seq_len/2)->(batch_size,32,(opt.content_seq_len)/4)
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.content_seq_len)
        )
        self.fc = nn.Sequential(
            nn.Linear(incept_dim*2,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=2)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=2)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            #('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 3,stride=2)),
            ]))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,opt):
        super(CNN, self).__init__()
        if opt.type_=='word':
            kernel_sizes1=[1,2,3,4,4]
            kernel_sizes2=[1,2,2,2,3]
        else:
            kernel_sizes1=[2,3,5,6,8]
            kernel_sizes2=[1,2,3,3,4]
        self.convs=[ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size1),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),

                                nn.Conv1d(in_channels = opt.title_dim,
                                out_channels = opt.title_dim,
                                kernel_size = kernel_size2),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),
                            )
         for kernel_size1,kernel_size2  in zip(kernel_sizes1,kernel_sizes2)]
        self.convs=nn.ModuleList(self.convs)

        self.fc = nn.Sequential(
            nn.Linear(len(kernel_sizes1)*(opt.title_dim*2),opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%5==0)
        cos=int(co/5)
        self.activa = nn.Sequential(nn.BatchNorm1d(co),nn.ReLU(True))
        self.branch1 = nn.Conv1d(cin,cos,1)
        self.branch2 = nn.Conv1d(cin,cos,2,padding=1)
        self.branch3 = nn.Conv1d(cin,cos,3,padding=1)
        self.branch4 = nn.Conv1d(cin,cos,4,padding=2)
        self.branch5 = nn.Conv1d(cin,cos,5,padding=2)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiCNNTextBN, self).__init__()
        self.model_name = 'MultiCNNTextBN'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        title_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            )
         for kernel_size in kernel_sizes]

        content_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.content_seq_len - kernel_size + 1))
                            )
            for kernel_size in kernel_sizes ]

        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Sequential(
            nn.Linear(len(kernel_sizes)*(opt.title_dim+opt.content_dim),opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )


        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiCNNText, self).__init__()
        self.model_name = 'MultiCNNText'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        title_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size),
                                nn.ReLU(),
                                nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            )
         for kernel_size in [3,4,5,6]]

        content_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.ReLU(),
                                nn.MaxPool1d(kernel_size = (opt.content_seq_len - kernel_size + 1))
                            )
            for kernel_size in [3,4,5,6]
        ]
        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Linear((opt.title_dim+opt.content_dim)*4, opt.num_classes)
        self.drop = nn.Dropout(0.5)

        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=1)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=1,padding=1)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3,padding=1)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=1,padding=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            #('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 3,stride=1,padding=1)),
            ]))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,dim_size=None,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.dim_size=dim_size
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=2)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=2,padding=1)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3,padding=1)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=2,padding=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            ('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 5,stride=1,padding=2)),
            ]))
        if self.dim_size is not None:
            self.maxpool=nn.MaxPool1d(self.dim_size/2)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(CNNText_tmp, self).__init__()
        self.model_name = 'CNNText_tmp'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.title_conv=nn.Sequential(
            #(batch_size,256,opt.title_seq_len)->(batch_size,64,opt.title_seq_len)
            nn.Conv1d(in_channels = opt.embedding_dim,out_channels = 128,kernel_size = 3),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            Inception(128,128),#(batch_size,64,opt.title_seq_len)->(batch_size,32,(opt.title_seq_len)/2)
            Inception(128,opt.title_dim,opt.title_seq_len/2),
        )
        self.content_conv=nn.Sequential(
            #(batch_size,256,opt.content_seq_len)->(batch_size,64,opt.content_seq_len)
            nn.Conv1d(in_channels = opt.embedding_dim,out_channels = 128,kernel_size = 3),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            Inception(128,128),#(batch_size,64,opt.content_seq_len)->(batch_size,64,(opt.content_seq_len)/2)
            Inception(128,128),#(batch_size,64,opt.content_seq_len/2)->(batch_size,32,(opt.content_seq_len)/4)
            Inception(128,opt.content_dim,opt.content_seq_len/4),
        )
        self.fc = nn.Sequential(
            nn.Linear(opt.title_dim+opt.content_dim,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def conv(in_ch, out_ch, kernel_size,
            stride=1, padding=0, dilation=1, groups=1, bias=True, dim=2):

    #TODO: in the future some preprocessing goes here
    in_dim = dim
    if in_dim == 1:
        return nn.Conv1d(in_ch, out_ch, kernel_size,
                        stride=stride,
                        padding=padding,
                        dilation=dilation,
                        groups=groups,
                        bias=bias)

    elif in_dim == 2:
        return nn.Conv2d(in_ch, out_ch, kernel_size,
                        stride=stride,
                        padding=padding,
                        dilation=dilation,
                        groups=groups,
                        bias=bias)

    elif in_dim == 3:
        return nn.Conv3d(in_ch, out_ch, kernel_size,
                        stride=stride,
                        padding=padding,
                        dilation=dilation,
                        groups=groups,
                        bias=bias)
# Transposed Concolution
项目:ml-utils    作者:LinxiFan    | 项目源码 | 文件源码
def is_conv_layer(layer, dim=None):
    if dim is None:
        cls = _ConvNd
    elif dim == 1:
        cls = nn.Conv1d
    elif dim == 2:
        cls = nn.Conv2d
    elif dim == 3:
        cls = nn.Conv3d
    return isinstance(layer, cls)
项目:seq2seq.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self, in_channels, interm_channels=None, out_channels=None, kernel_size=3, dilation=1, causal=True):
        super(ResidualBlock, self).__init__()
        out_channels = out_channels or in_channels
        interm_channels = interm_channels or in_channels // 2
        self.layernorm1 = LayerNorm1d(in_channels)
        self.layernorm2 = LayerNorm1d(interm_channels)
        self.layernorm3 = LayerNorm1d(interm_channels)
        self.conv1 = nn.Conv1d(in_channels, interm_channels, 1)
        self.conv2 = MaskedConv1d(
            interm_channels, interm_channels, kernel_size, dilation=dilation, causal=causal)
        self.conv3 = nn.Conv1d(interm_channels, out_channels, 1)
        self.relu = nn.ReLU(True)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_conv_modules_raise_error_on_incorrect_input_size(self):
        modules = [nn.Conv1d(3, 8, 3), nn.ConvTranspose1d(3, 8, 3),
                   nn.Conv2d(3, 8, 3), nn.ConvTranspose2d(3, 8, 3),
                   nn.Conv3d(3, 8, 3), nn.ConvTranspose3d(3, 8, 3)]

        invalid_input_dims = [(2, 4), (2, 4),
                              (3, 5), (3, 5),
                              (4, 6), (4, 6)]

        for invalid_dims, module in zip(invalid_input_dims, modules):
            for dims in invalid_dims:
                input = Variable(torch.Tensor(torch.Size((3, ) * dims)))
                self.assertRaises(ValueError, lambda: module(input))
项目:NeuroNLP2    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size,
                 rnn_mode, hidden_size, num_layers, num_labels, tag_space=0,
                 embedd_word=None, embedd_char=None, p_in=0.2, p_rnn=0.5):
        super(BiRecurrentConv, self).__init__()

        self.word_embedd = Embedding(num_words, word_dim, init_embedding=embedd_word)
        self.char_embedd = Embedding(num_chars, char_dim, init_embedding=embedd_char)
        self.conv1d = nn.Conv1d(char_dim, num_filters, kernel_size, padding=kernel_size - 1)
        self.dropout_in = nn.Dropout(p=p_in)
        self.dropout_rnn = nn.Dropout(p_rnn)

        if rnn_mode == 'RNN':
            RNN = nn.RNN
        elif rnn_mode == 'LSTM':
            RNN = nn.LSTM
        elif rnn_mode == 'GRU':
            RNN = nn.GRU
        else:
            raise ValueError('Unknown RNN mode: %s' % rnn_mode)

        self.rnn = RNN(word_dim + num_filters, hidden_size, num_layers=num_layers,
                       batch_first=True, bidirectional=True, dropout=p_rnn)

        self.dense = None
        out_dim = hidden_size * 2
        if tag_space:
            self.dense = nn.Linear(out_dim, tag_space)
            out_dim = tag_space
        self.dense_softmax = nn.Linear(out_dim, num_labels)

        # TODO set dim for log_softmax and set reduce=False to NLLLoss
        self.logsoftmax = nn.LogSoftmax()
        self.nll_loss = nn.NLLLoss(size_average=False)
项目:NeuroNLP2    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, word_dim, num_words, char_dim, num_chars, pos_dim, num_pos, num_filters, kernel_size,
                 rnn_mode, hidden_size, num_layers, num_labels, arc_space, type_space,
                 embedd_word=None, embedd_char=None, embedd_pos=None,
                 p_in=0.2, p_out=0.5, p_rnn=(0.5, 0.5), biaffine=True):
        super(BiRecurrentConvBiAffine, self).__init__()

        self.word_embedd = Embedding(num_words, word_dim, init_embedding=embedd_word)
        self.char_embedd = Embedding(num_chars, char_dim, init_embedding=embedd_char)
        self.pos_embedd = Embedding(num_pos, pos_dim, init_embedding=embedd_pos)
        self.conv1d = nn.Conv1d(char_dim, num_filters, kernel_size, padding=kernel_size - 1)
        self.dropout_in = nn.Dropout2d(p=p_in)
        self.dropout_out = nn.Dropout2d(p=p_out)
        self.num_labels = num_labels

        if rnn_mode == 'RNN':
            RNN = VarMaskedRNN
        elif rnn_mode == 'LSTM':
            RNN = VarMaskedLSTM
        elif rnn_mode == 'FastLSTM':
            RNN = VarMaskedFastLSTM
        elif rnn_mode == 'GRU':
            RNN = VarMaskedGRU
        else:
            raise ValueError('Unknown RNN mode: %s' % rnn_mode)

        self.rnn = RNN(word_dim + num_filters + pos_dim, hidden_size, num_layers=num_layers,
                       batch_first=True, bidirectional=True, dropout=p_rnn)

        out_dim = hidden_size * 2
        self.arc_h = nn.Linear(out_dim, arc_space)
        self.arc_c = nn.Linear(out_dim, arc_space)
        self.attention = BiAAttention(arc_space, arc_space, 1, biaffine=biaffine)

        self.type_h = nn.Linear(out_dim, type_space)
        self.type_c = nn.Linear(out_dim, type_space)
        self.bilinear = BiLinear(type_space, type_space, self.num_labels)
        self.logsoftmax = nn.LogSoftmax()
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def _pool(self, btc):
        embeddings = btc.transpose(1, 2).contiguous()
        mots = []
        for conv in self.convs:
            # In Conv1d, data BxCxT, max over time
            conv_out = conv(embeddings)
            mot, _ = conv_out.max(2)
            mots.append(mot)
            #  Not required/working in latest pytorch
            #mots.append(mot.squeeze(2))

        mots = torch.cat(mots, 1)
        return self.conv_drop(mots)
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0):
    c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding)
    if unif > 0:
        c.weight.data.uniform_(-unif, unif)
    return c
项目:pytorch-planet-amazon    作者:rwightman    | 项目源码 | 文件源码
def is_sparseable(m):
    return True if hasattr(m, 'weight') and isinstance(m, (
            nn.Conv1d, nn.Conv2d, nn.Conv3d,
            nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d,
            nn.Linear)) else False
项目:speech    作者:awni    | 项目源码 | 文件源码
def __init__(self, n_channels, kernel_size=15, log_t=False):
        super(NNAttention, self).__init__()
        assert kernel_size % 2 == 1, \
            "Kernel size should be odd for 'same' conv."
        padding = (kernel_size - 1) // 2
        self.conv = nn.Conv1d(1, n_channels, kernel_size, padding=padding)
        self.nn = nn.Sequential(
                     nn.ReLU(),
                     model.LinearND(n_channels, 1))
        self.log_t = log_t
项目:neural-combinatorial-rl-pytorch    作者:pemami4911    | 项目源码 | 文件源码
def __init__(self, dim, use_tanh=False, C=10, use_cuda=True):
        super(Attention, self).__init__()
        self.use_tanh = use_tanh
        self.project_query = nn.Linear(dim, dim)
        self.project_ref = nn.Conv1d(dim, dim, 1, 1)
        self.C = C  # tanh exploration
        self.tanh = nn.Tanh()

        v = torch.FloatTensor(dim)
        if use_cuda:
            v = v.cuda()  
        self.v = nn.Parameter(v)
        self.v.data.uniform_(-(1. / math.sqrt(dim)) , 1. / math.sqrt(dim))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_conv_modules_raise_error_on_incorrect_input_size(self):
        modules = [nn.Conv1d(3, 8, 3), nn.ConvTranspose1d(3, 8, 3),
                   nn.Conv2d(3, 8, 3), nn.ConvTranspose2d(3, 8, 3),
                   nn.Conv3d(3, 8, 3), nn.ConvTranspose3d(3, 8, 3)]

        invalid_input_dims = [(2, 4), (2, 4),
                              (3, 5), (3, 5),
                              (4, 6), (4, 6)]

        for invalid_dims, module in zip(invalid_input_dims, modules):
            for dims in invalid_dims:
                input = Variable(torch.Tensor(torch.Size((3, ) * dims)))
                self.assertRaises(ValueError, lambda: module(input))
项目:keita    作者:iwasaki-kenta    | 项目源码 | 文件源码
def __init__(self, feature_dim, num_classes=3):
        super(TCML, self).__init__()

        self.dilations = [1, 2, 4, 8, 16, 1, 2, 4, 8, 16]
        self.dense_blocks = nn.ModuleList(
            [TemporalDenseBlock(feature_dim + 128 * index, hidden_size=128, dilation=dilation) for
             index, dilation in
             enumerate(self.dilations)])

        self.conv1 = nn.Conv1d(in_channels=feature_dim + 128 * len(self.dilations), out_channels=512, kernel_size=1,
                               stride=1)
        self.conv2 = nn.Conv1d(in_channels=512, out_channels=num_classes, kernel_size=1, stride=1)
项目:kdnet.pytorch    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, k = 16):
        super(KDNet_Batch, self).__init__()
        self.conv1 = nn.Conv1d(3,8 * 3,1,1)
        self.conv2 = nn.Conv1d(8*2,32 * 3,1,1)
        self.conv3 = nn.Conv1d(32*2,64 * 3,1,1)
        self.conv4 = nn.Conv1d(64*2,64 * 3,1,1)
        self.conv5 = nn.Conv1d(64*2,64 * 3,1,1)
        self.conv6 = nn.Conv1d(64*2,128 * 3,1,1)
        self.conv7 = nn.Conv1d(128*2,256 * 3,1,1)
        self.conv8 = nn.Conv1d(256*2,512 * 3,1,1)
        self.conv9 = nn.Conv1d(512*2,512 * 3,1,1)
        self.conv10 = nn.Conv1d(512*2,512 * 3,1,1)
        self.conv11 = nn.Conv1d(512*2,1024 * 3,1,1)   
        self.bn1 = nn.BatchNorm1d(8*3)
        self.bn2 = nn.BatchNorm1d(32*3)
        self.bn3 = nn.BatchNorm1d(64*3)
        self.bn4 = nn.BatchNorm1d(64*3)
        self.bn5 = nn.BatchNorm1d(64*3)
        self.bn6 = nn.BatchNorm1d(128*3)
        self.bn7 = nn.BatchNorm1d(256*3)
        self.bn8 = nn.BatchNorm1d(512*3)
        self.bn9 = nn.BatchNorm1d(512*3)
        self.bn10 = nn.BatchNorm1d(512*3)
        self.bn11 = nn.BatchNorm1d(1024*3)


        self.fc = nn.Linear(1024 * 2, k)
项目:kdnet.pytorch    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, k = 16):
        super(KDNet_Batch, self).__init__()
        self.conv1 = nn.Conv1d(4,8 * 3,1,1)
        self.conv2 = nn.Conv1d(8,32 * 3,1,1)
        self.conv3 = nn.Conv1d(32,64 * 3,1,1)
        self.conv4 = nn.Conv1d(64,64 * 3,1,1)
        self.conv5 = nn.Conv1d(64,64 * 3,1,1)
        self.conv6 = nn.Conv1d(64,128 * 3,1,1)
        self.conv7 = nn.Conv1d(128,256 * 3,1,1)
        self.conv8 = nn.Conv1d(256,512 * 3,1,1)
        self.conv9 = nn.Conv1d(512,512 * 3,1,1)
        self.conv10 = nn.Conv1d(512,512 * 3,1,1)
        self.conv11 = nn.Conv1d(512,1024 * 3,1,1)
        self.bn1 = nn.BatchNorm1d(8*3)
        self.bn2 = nn.BatchNorm1d(32*3)
        self.bn3 = nn.BatchNorm1d(64*3)
        self.bn4 = nn.BatchNorm1d(64*3)
        self.bn5 = nn.BatchNorm1d(64*3)
        self.bn6 = nn.BatchNorm1d(128*3)
        self.bn7 = nn.BatchNorm1d(256*3)
        self.bn8 = nn.BatchNorm1d(512*3)
        self.bn9 = nn.BatchNorm1d(512*3)
        self.bn10 = nn.BatchNorm1d(512*3)
        self.bn11 = nn.BatchNorm1d(1024*3)


        self.fc = nn.Linear(1024, k)
项目:kdnet.pytorch    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, k = 16):
        super(KDNet_Batch, self).__init__()
        self.conv1 = nn.Conv1d(3,8 * 3,1,1)
        self.conv2 = nn.Conv1d(8,32 * 3,1,1)
        self.conv3 = nn.Conv1d(32,64 * 3,1,1)
        self.conv4 = nn.Conv1d(64,64 * 3,1,1)
        self.conv5 = nn.Conv1d(64,64 * 3,1,1)
        self.conv6 = nn.Conv1d(64,128 * 3,1,1)
        self.conv7 = nn.Conv1d(128,256 * 3,1,1)
        self.conv8 = nn.Conv1d(256,512 * 3,1,1)
        self.conv9 = nn.Conv1d(512,512 * 3,1,1)
        self.conv10 = nn.Conv1d(512,512 * 3,1,1)
        self.conv11 = nn.Conv1d(512,1024 * 3,1,1)
        self.bn1 = nn.BatchNorm1d(8*3)
        self.bn2 = nn.BatchNorm1d(32*3)
        self.bn3 = nn.BatchNorm1d(64*3)
        self.bn4 = nn.BatchNorm1d(64*3)
        self.bn5 = nn.BatchNorm1d(64*3)
        self.bn6 = nn.BatchNorm1d(128*3)
        self.bn7 = nn.BatchNorm1d(256*3)
        self.bn8 = nn.BatchNorm1d(512*3)
        self.bn9 = nn.BatchNorm1d(512*3)
        self.bn10 = nn.BatchNorm1d(512*3)
        self.bn11 = nn.BatchNorm1d(1024*3)


        self.fc = nn.Linear(1024, k)
项目:kdnet.pytorch    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, k = 16):
        super(KDNet, self).__init__()
        self.conv1 = nn.Conv1d(3,8 * 3,1,1)
        self.conv2 = nn.Conv1d(8,32 * 3,1,1)
        self.conv3 = nn.Conv1d(32,64 * 3,1,1)
        self.conv4 = nn.Conv1d(64,64 * 3,1,1)
        self.conv5 = nn.Conv1d(64,64 * 3,1,1)
        self.conv6 = nn.Conv1d(64,128 * 3,1,1)
        self.conv7 = nn.Conv1d(128,256 * 3,1,1)
        self.conv8 = nn.Conv1d(256,512 * 3,1,1)
        self.conv9 = nn.Conv1d(512,512 * 3,1,1)
        self.conv10 = nn.Conv1d(512,512 * 3,1,1)
        self.conv11 = nn.Conv1d(512,1024 * 3,1,1)
        self.fc = nn.Linear(1024, k)
项目:kdnet.pytorch    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, k = 16):
        super(KDNet, self).__init__()
        self.conv1 = nn.Conv1d(3,8 * 3,1,1)
        self.conv2 = nn.Conv1d(8,32 * 3,1,1)
        self.conv3 = nn.Conv1d(32,64 * 3,1,1)
        self.conv4 = nn.Conv1d(64,64 * 3,1,1)
        self.conv5 = nn.Conv1d(64,64 * 3,1,1)
        self.conv6 = nn.Conv1d(64,128 * 3,1,1)
        self.conv7 = nn.Conv1d(128,256 * 3,1,1)
        self.conv8 = nn.Conv1d(256,512 * 3,1,1)
        self.conv9 = nn.Conv1d(512,512 * 3,1,1)
        self.conv10 = nn.Conv1d(512,512 * 3,1,1)
        self.conv11 = nn.Conv1d(512,1024 * 3,1,1)      
        self.fc = nn.Linear(1024, k)