Python torch.nn 模块,MaxPool1d() 实例源码

我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用torch.nn.MaxPool1d()

项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):
        super(STN3d, self).__init__()
        self.num_points = num_points
        self.conv1 = nn.Conv1d(3, 64, 1)
        self.conv2 = nn.Conv1d(64, 128, 1)
        self.conv3 = nn.Conv1d(128, 1024, 1)
        self.mp1 = nn.MaxPool1d(num_points)
        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 9)
        self.relu = nn.ReLU()

        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.bn4 = nn.BatchNorm1d(512)
        self.bn5 = nn.BatchNorm1d(256)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,opt):
        super(RCNN, self).__init__()
        kernel_size = 2 if opt.type_=='word' else 3
        self.conv = nn.Sequential(
                                nn.Conv1d(in_channels = opt.hidden_size*2 + opt.embedding_dim,
                                        out_channels = opt.title_dim*3,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim*3),
                                nn.ReLU(inplace=True),
                                nn.Conv1d(in_channels = opt.title_dim*3,
                                        out_channels = opt.title_dim*3,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim*3),
                                nn.ReLU(inplace=True),
                                # nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            ) 
        self.fc=nn.Linear((opt.title_dim*3*2),opt.num_classes)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(CNNTextInception, self).__init__()
        incept_dim=opt.inception_dim
        self.model_name = 'CNNTextInception'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.title_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),#(batch_size,64,opt.title_seq_len)->(batch_size,32,(opt.title_seq_len)/2)
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.title_seq_len)
        )
        self.content_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),#(batch_size,64,opt.content_seq_len)->(batch_size,64,(opt.content_seq_len)/2)
            #Inception(incept_dim,incept_dim),#(batch_size,64,opt.content_seq_len/2)->(batch_size,32,(opt.content_seq_len)/4)
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.content_seq_len)
        )
        self.fc = nn.Sequential(
            nn.Linear(incept_dim*2,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):    
        super(Feats_STN3d, self).__init__()
        self.conv1 = nn.Conv1d(128, 256, 1)
        self.conv2 = nn.Conv1d(256, 1024, 1)
        self.mp1 = nn.MaxPool1d(num_points) 

        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 128*128)

        self.bn1 = nn.BatchNorm1d(256)
        self.bn2 = nn.BatchNorm1d(1024)
        self.bn3 = nn.BatchNorm1d(512)
        self.bn4 = nn.BatchNorm1d(256)
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500, global_feat = True):
        super(PointNetfeat, self).__init__()
        self.stn = STN3d(num_points = num_points) # bz x 3 x 3 
        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 1024, 1)
        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.mp1 = torch.nn.MaxPool1d(num_points)
        self.num_points = num_points
        self.global_feat = global_feat
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500, k = 2):
        super(PointNetPartDenseCls, self).__init__()
        self.num_points = num_points
        self.k = k
        # T1 
        self.stn1 = STN3d(num_points = num_points) # bz x 3 x 3, after transform => bz x 2048 x 3 

        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 128, 1)
        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(128)

        # T2 
        self.stn2 = Feats_STN3d(num_points = num_points)

        self.conv4 = torch.nn.Conv1d(128, 128, 1)
        self.conv5 = torch.nn.Conv1d(128, 512, 1)
        self.conv6 = torch.nn.Conv1d(512, 2048, 1)
        self.bn4 = nn.BatchNorm1d(128)
        self.bn5 = nn.BatchNorm1d(512)
        self.bn6 = nn.BatchNorm1d(2048)
        # pool layer 
        self.mp1 = torch.nn.MaxPool1d(num_points) 

        # MLP(256, 256, 128)
        self.conv7 = torch.nn.Conv1d(3024-16, 256, 1)
        self.conv8 = torch.nn.Conv1d(256, 256, 1)
        self.conv9 = torch.nn.Conv1d(256, 128, 1)
        self.bn7 = nn.BatchNorm1d(256)
        self.bn8 = nn.BatchNorm1d(256)
        self.bn9 = nn.BatchNorm1d(128)
        # last layer 
        self.conv10 = torch.nn.Conv1d(128, self.k, 1) # 50 
        self.bn10 = nn.BatchNorm1d(self.k)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def forward_cnn(self, x):
        x = F.relu(self.conv1(x))
        x = nn.MaxPool1d(2)(x)
        x = F.relu(self.conv2(x))
        x = nn.MaxPool1d(2)(x)
        return x
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=1)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=1,padding=1)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3,padding=1)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=1,padding=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            #('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 3,stride=1,padding=1)),
            ]))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(CNNText_tmp, self).__init__()
        incept_dim=opt.inception_dim
        self.model_name = 'CNNText_tmp'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.title_conv=nn.Sequential(
            nn.Conv1d(in_channels = opt.embedding_dim,out_channels = incept_dim,kernel_size = 3,padding=1),
            nn.BatchNorm1d(incept_dim),
            nn.ReLU(inplace=True),
            Inception(incept_dim,incept_dim),#(batch_size,64,opt.title_seq_len)->(batch_size,32,(opt.title_seq_len)/2)
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.title_seq_len)
        )
        self.content_conv=nn.Sequential(
            #(batch_size,256,opt.content_seq_len)->(batch_size,64,opt.content_seq_len)
            nn.Conv1d(in_channels = opt.embedding_dim,out_channels = incept_dim,kernel_size = 3,padding=1),
            nn.BatchNorm1d(incept_dim),
            nn.ReLU(inplace=True),
            Inception(incept_dim,incept_dim),#(batch_size,64,opt.content_seq_len)->(batch_size,64,(opt.content_seq_len)/2)
            #Inception(incept_dim,incept_dim),#(batch_size,64,opt.content_seq_len/2)->(batch_size,32,(opt.content_seq_len)/4)
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.content_seq_len)
        )
        self.fc = nn.Sequential(
            nn.Linear(incept_dim*2,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=2)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=2)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            #('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 3,stride=2)),
            ]))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(InceptionText, self).__init__()
        incept_dim=opt.inception_dim
        self.model_name = 'InceptionText'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.title_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.title_seq_len)
        )
        self.content_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.content_seq_len)
        )
        self.fc = nn.Sequential(
            nn.Linear(incept_dim*2,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )

        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiCNNTextBN, self).__init__()
        self.model_name = 'MultiCNNTextBN'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        title_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            )
         for kernel_size in kernel_sizes]

        content_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.content_seq_len - kernel_size + 1))
                            )
            for kernel_size in kernel_sizes ]

        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Sequential(
            nn.Linear(len(kernel_sizes)*(opt.title_dim+opt.content_dim),opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )


        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiCNNText, self).__init__()
        self.model_name = 'MultiCNNText'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        title_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size),
                                nn.ReLU(),
                                nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            )
         for kernel_size in [3,4,5,6]]

        content_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.ReLU(),
                                nn.MaxPool1d(kernel_size = (opt.content_seq_len - kernel_size + 1))
                            )
            for kernel_size in [3,4,5,6]
        ]
        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Linear((opt.title_dim+opt.content_dim)*4, opt.num_classes)
        self.drop = nn.Dropout(0.5)

        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=1)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=1,padding=1)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3,padding=1)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=1,padding=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            #('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 3,stride=1,padding=1)),
            ]))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(CNNText_inception, self).__init__()
        incept_dim=opt.inception_dim
        self.model_name = 'CNNText_inception'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
        self.title_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),#(batch_size,64,opt.title_seq_len)->(batch_size,32,(opt.title_seq_len)/2)
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.title_seq_len)
        )
        self.content_conv=nn.Sequential(
            Inception(opt.embedding_dim,incept_dim),#(batch_size,64,opt.content_seq_len)->(batch_size,64,(opt.content_seq_len)/2)
            #Inception(incept_dim,incept_dim),#(batch_size,64,opt.content_seq_len/2)->(batch_size,32,(opt.content_seq_len)/4)
            Inception(incept_dim,incept_dim),
            nn.MaxPool1d(opt.content_seq_len)
        )
        self.fc = nn.Sequential(
            nn.Linear(incept_dim*2,opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )
        if opt.embedding_path:
            print('load embedding')
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,cin,co,dim_size=None,relu=True,norm=True):
        super(Inception, self).__init__()
        assert(co%4==0)
        cos=[co/4]*4
        self.dim_size=dim_size
        self.activa=nn.Sequential()
        if norm:self.activa.add_module('norm',nn.BatchNorm1d(co))
        if relu:self.activa.add_module('relu',nn.ReLU(True))
        self.branch1 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[0], 1,stride=2)),
            ])) 
        self.branch2 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[1], 1)),
            ('norm1', nn.BatchNorm1d(cos[1])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[1],cos[1], 3,stride=2,padding=1)),
            ]))
        self.branch3 =nn.Sequential(OrderedDict([
            ('conv1', nn.Conv1d(cin,cos[2], 3,padding=1)),
            ('norm1', nn.BatchNorm1d(cos[2])),
            ('relu1', nn.ReLU(inplace=True)),
            ('conv3', nn.Conv1d(cos[2],cos[2], 5,stride=2,padding=2)),
            ]))
        self.branch4 =nn.Sequential(OrderedDict([
            ('pool',nn.MaxPool1d(2)),
            ('conv3', nn.Conv1d(cin,cos[3], 5,stride=1,padding=2)),
            ]))
        if self.dim_size is not None:
            self.maxpool=nn.MaxPool1d(self.dim_size/2)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_maxpool(self):
        x = Variable(torch.randn(20, 16, 50))
        self.assertONNXExpected(export_to_string(nn.MaxPool1d(3, stride=2), x))
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(FFTModel, self).__init__()

        self.test_idx = 0
        self.test_frequencies, self.test_samples, self.test_ffts = get_batch(1)

        def init_conv(conv):
            conv.weight.data.normal_(0.0, 0.02)

        def init_linear(linear):
            linear.weight.data.normal_(0.0, 0.1)
            linear.bias.data.zero_()

        ndf = 1
        self.conv0 = nn.Conv1d(1, ndf, 9, bias=False)
        init_conv(self.conv0)

        self.maxPool = nn.MaxPool1d(2)

        self.relu = nn.ReLU(inplace=True)

        self.fc0_size = 5135 * ndf
        self.fc0 = nn.Linear(self.fc0_size, fft_size)

        init_linear(self.fc0)

        self.loss = nn.SmoothL1Loss(size_average=False)

        learning_rate = 0.0005
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)

        self.cuda()
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt):
        super(RNNText2, self).__init__()
        self.model_name = 'RNNText2'
        self.opt = opt

        # kernel_size = opt.kernel_size
        self.encoder = nn.Embedding(opt.vocab_size, opt.embedding_dim)

        self.title_lstm = nn.LSTM(input_size=opt.embedding_dim,
                                  hidden_size=opt.hidden_size,
                                  num_layers=1,
                                  bias=True,
                                  batch_first=False,
                                #   dropout=0.5,
                                  bidirectional=True
                                  )
        self.title_conv = nn.ModuleList([nn.Sequential(
                        nn.Conv1d(in_channels=opt.hidden_size * 2 + opt.embedding_dim,
                                out_channels=opt.title_dim,
                                kernel_size=kernel_size),
                        nn.ReLU(),
                        nn.MaxPool1d(kernel_size=(opt.title_seq_len - kernel_size + 1))
                    ) for kernel_size in opt.kernel_sizes])

        self.content_lstm = nn.LSTM(input_size=opt.embedding_dim,
                                    hidden_size=opt.hidden_size,
                                    num_layers=1,
                                    bias=True,
                                    batch_first=False,
                                    # dropout=0.5,
                                    bidirectional=True
                                    )

        self.content_conv = nn.ModuleList([nn.Sequential(
                                nn.Conv1d(in_channels=opt.hidden_size * 2 + opt.embedding_dim,
                                        out_channels=opt.content_dim,
                                        kernel_size=kernel_size),
                                nn.ReLU(),
                                nn.MaxPool1d(kernel_size=(opt.content_seq_len - kernel_size + 1))
                            )
                                for kernel_size in opt.kernel_sizes  ])

        self.dropout = nn.Dropout()
        self.fc = nn.Linear(len(opt.kernel_sizes) *
                            (opt.title_dim + opt.content_dim), opt.num_classes)

        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(
                np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(RNNText, self).__init__()
        self.model_name = 'RNNText'
        self.opt=opt

        kernel_size = opt.kernel_size
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        self.title_lstm = nn.LSTM(  input_size = opt.embedding_dim,\
                            hidden_size = opt.hidden_size,
                            num_layers = 1,
                            bias = True,
                            batch_first = False,
                            # dropout = 0.5,
                            bidirectional = True
                            )
        self.title_conv = nn.Sequential(
                                nn.Conv1d(in_channels = opt.hidden_size*2 + opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size),
                                nn.ReLU(),
                                # nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size + 1))
                            )

        self.content_lstm =nn.LSTM(  input_size = opt.embedding_dim,\
                            hidden_size = opt.hidden_size,
                            num_layers = 1,
                            bias = True,
                            batch_first = False,
                            # dropout = 0.5,
                            bidirectional = True
                            )

        self.content_conv = nn.Sequential(
            nn.Conv1d(in_channels = opt.hidden_size*2 + opt.embedding_dim,
                      out_channels = opt.content_dim,
                      kernel_size =  kernel_size),
            nn.ReLU(),
            # nn.MaxPool1d(kernel_size = (opt.content_seq_len - opt.kernel_size + 1))
        )
        self.dropout = nn.Dropout()
        self.fc = nn.Linear(3 * (opt.title_dim+opt.content_dim), opt.num_classes)
        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiCNNTextBNDeep, self).__init__()
        self.model_name = 'MultiCNNTextBNDeep'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        title_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),

                                nn.Conv1d(in_channels = opt.title_dim,
                                out_channels = opt.title_dim,
                                kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size*2 + 2))
                            )
         for kernel_size in kernel_sizes]

        content_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),

                                nn.Conv1d(in_channels = opt.content_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.content_seq_len - kernel_size*2 + 2))
                            )
            for kernel_size in kernel_sizes ]

        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Sequential(
            nn.Linear(len(kernel_sizes)*(opt.title_dim+opt.content_dim),opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )


        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(DeepText, self).__init__()
        self.model_name = 'DeepText'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        title_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),

                                nn.Conv1d(in_channels = opt.title_dim,
                                out_channels = opt.title_dim,
                                kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.title_seq_len - kernel_size*2 + 2))
                            )
         for kernel_size in kernel_sizes]

        content_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),

                                nn.Conv1d(in_channels = opt.content_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.content_seq_len - kernel_size*2 + 2))
                            )
            for kernel_size in kernel_sizes ]

        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Sequential(
            nn.Linear(len(kernel_sizes)*(opt.title_dim+opt.content_dim),opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )


        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiCNNTextBNDeep, self).__init__()
        self.model_name = 'MultiCNNTextBNDeep-copy-1'
        self.opt=opt
        self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)

        title_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.title_dim,
                                        kernel_size = kernel_size[0]),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),

                                nn.Conv1d(in_channels = opt.title_dim,
                                out_channels = opt.title_dim,
                                kernel_size = kernel_size[1]),
                                nn.BatchNorm1d(opt.title_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.title_seq_len - (kernel_size[0]+kernel_size[1]) + 2))
                            )
         for kernel_size in kernel_sizes]

        content_convs = [ nn.Sequential(
                                nn.Conv1d(in_channels = opt.embedding_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size[0]),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),

                                nn.Conv1d(in_channels = opt.content_dim,
                                        out_channels = opt.content_dim,
                                        kernel_size = kernel_size[1]),
                                nn.BatchNorm1d(opt.content_dim),
                                nn.ReLU(inplace=True),
                                nn.MaxPool1d(kernel_size = (opt.content_seq_len - (kernel_size[0]+kernel_size[1])+ 2))
                            )
            for kernel_size in kernel_sizes ]

        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Sequential(
            nn.Linear(len(kernel_sizes)*(opt.title_dim+opt.content_dim),opt.linear_hidden_size),
            nn.BatchNorm1d(opt.linear_hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(opt.linear_hidden_size,opt.num_classes)
        )


        if opt.embedding_path:
            self.encoder.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path)['vector']))
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def pool(kernel_size, power=2, output_size=None, 
            out_ratio=None, stride=None, padding=0, 
            dilation=1, return_indices=False, ceil_mode=False, 
            mode='max', count_include_pad=True, _random_samples=None, dim=2):

    in_dim = dim
    if mode == 'max':
        if in_dim == 1:
            return nn.MaxPool1d(kernel_size=kernel_size, stride=stride, 
                        padding=padding, dilation=dilation, 
                        return_indices=return_indices, 
                        ceil_mode=ceil_mode)

        elif in_dim == 2:
            return nn.MaxPool2d(kernel_size=kernel_size, stride=stride, 
                        padding=padding, dilation=dilation, 
                        return_indices=return_indices, 
                        ceil_mode=ceil_mode)

        elif in_dim == 3:
            return nn.MaxPool3d(kernel_size=kernel_size, stride=stride, 
                        padding=padding, dilation=dilation, 
                        return_indices=return_indices, 
                        ceil_mode=ceil_mode)

    elif mode=='ave':
        if in_dim == 1:
            return nn.AvgPool1d(kernel_size=kernel_size, stride=stride, 
                        padding=padding, ceil_mode=ceil_mode, 
                        count_include_pad=count_include_pad)

        elif in_dim == 2:
            return nn.AvgPool2d(kernel_size=kernel_size, stride=stride, 
                        padding=padding, ceil_mode=ceil_mode, 
                        count_include_pad=count_include_pad)

        elif in_dim == 3:
            return nn.AvgPool3d(kernel_size=kernel_size, stride=stride)

    elif mode=='fractional_max':
        return nn.FractionalMaxPool2d(kernel_size=kernel_size, 
                        output_size=out_size, 
                        output_ratio=out_ratio, 
                        return_indices=return_indices, 
                        _random_samples=_random_samples)

    elif mode=='power':
        return nn.LPPool2d(norm_type=power, kernel_size=kernel_size, 
                        stride=stride, ceil_mode=ceil_mode)

# normalization
项目:char-cnn-pytorch    作者:srviest    | 项目源码 | 文件源码
def __init__(self, args):
        super(CharCNN, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv1d(args.num_features, 256, kernel_size=7, stride=1),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=3, stride=3)
        )

        self.conv2 = nn.Sequential(
            nn.Conv1d(256, 256, kernel_size=7, stride=1),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=3, stride=3)
        )            

        self.conv3 = nn.Sequential(
            nn.Conv1d(256, 256, kernel_size=3, stride=1),
            nn.ReLU()
        )

        self.conv4 = nn.Sequential(
            nn.Conv1d(256, 256, kernel_size=3, stride=1),
            nn.ReLU()    
        )

        self.conv5 = nn.Sequential(
            nn.Conv1d(256, 256, kernel_size=3, stride=1),
            nn.ReLU()
        )

        self.conv6 = nn.Sequential(
            nn.Conv1d(256, 256, kernel_size=3, stride=1),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=3, stride=3)
        )


        self.fc1 = nn.Sequential(
            nn.Linear(8704, 1024),
            nn.ReLU(),
            nn.Dropout(p=args.dropout)
        )

        self.fc2 = nn.Sequential(
            nn.Linear(1024, 1024),
            nn.ReLU(),
            nn.Dropout(p=args.dropout)
        )

        self.fc3 = nn.Linear(1024, 4)
        self.log_softmax = nn.LogSoftmax()