Python torch.nn.functional 模块,max_pool1d() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用torch.nn.functional.max_pool1d()

项目:cnn-text-classification-pytorch    作者:Shawn1993    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x) # (N,W,D)

        if self.args.static:
            x = Variable(x)

        x = x.unsqueeze(1) # (N,Ci,W,D)

        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)


        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)

        x = torch.cat(x, 1)

        '''
        x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
        x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
        x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
        x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''
        x = self.dropout(x) # (N,len(Ks)*Co)
        logit = self.fc1(x) # (N,C)
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x)  # (N,W,D)
        x = self.dropout_embed(x)
        x = x.unsqueeze(1)  # (N,Ci,W,D)
        if self.args.batch_normalizations is True:
            x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        else:
            # x = [self.dropout(F.relu(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [self.dropout(F.tanh(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [F.tanh(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        x = self.dropout(x)  # (N,len(Ks)*Co)
        if self.args.batch_normalizations is True:
            x = self.fc1_bn(self.fc1(x))
            logit = self.fc2_bn(self.fc2(F.tanh(x)))
        else:
            logit = self.fc(x)
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x)
        x = self.dropout_embed(x)
        # x = x.view(len(x), x.size(1), -1)
        # x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)
        # print(self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        bilstm_out = F.tanh(bilstm_out)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        bilstm_out = F.tanh(bilstm_out)
        # bilstm_out = self.dropout(bilstm_out)

        # bilstm_out = self.hidden2label1(bilstm_out)
        # logit = self.hidden2label2(F.tanh(bilstm_out))

        logit = self.hidden2label(bilstm_out)

        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        embed = self.dropout(embed)
        x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)
        # print("bbb {}".format(self.hidden[0]))
        hidden = torch.cat(self.hidden, 0)
        # print("ccc {}".format(hidden.size()))
        hidden = torch.cat(hidden, 1)
        # print("ddd {}".format(hidden.size()))
        # bilstm_out = torch.transpose(bilstm_out, 0, 1)
        # bilstm_out = torch.transpose(bilstm_out, 1, 2)
        # print("aaa {}".format(bilstm_out.size()))
        # bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        # bilstm_out = F.avg_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        # print("sss {}".format(bilstm_out.size()))
        # print("Hidden {} ".format(hidden))
        logit = self.hidden2label(F.tanh(hidden))
        # print("Logit {} ".format(logit))
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        # print("aa", x)
        x = self.embed(x) # (N,W,D)
        # print("embed", x)

        if self.args.static:
            x = Variable(x.data)
        # print("var", x)

        x = x.unsqueeze(1) # (N,Ci,W,D)
        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        '''
        x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
        x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
        x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
        x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''
        x = self.dropout(x) # (N,len(Ks)*Co)
        logit = self.fc1(x) # (N,C)
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        # CNN
        embed = self.dropout(embed)
        cnn_x = embed
        cnn_x = cnn_x.unsqueeze(1)
        cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 0)
        cnn_x = torch.transpose(cnn_x, 1, 2)

        # BiLSTM
        bilstm_out, self.hidden = self.bilstm(cnn_x, self.hidden)
        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)

        # linear
        cnn_bilstm_out = self.hidden2label1(F.tanh(bilstm_out))
        cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out))

        # dropout
        logit = self.dropout(cnn_bilstm_out)

        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, input):
        embed = self.embed(input)
        embed = self.dropout(embed)  # add this reduce the acc
        input = embed.view(len(input), embed.size(1), -1)
        # gru
        gru_out, hidden = self.bigru(input, self.hidden)
        gru_out = torch.transpose(gru_out, 0, 1)
        gru_out = torch.transpose(gru_out, 1, 2)
        # pooling
        # gru_out = F.tanh(gru_out)
        gru_out = F.max_pool1d(gru_out, gru_out.size(2)).squeeze(2)
        gru_out = F.tanh(gru_out)
        # linear
        y = self.hidden2label(gru_out)
        logit = y
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        one_layer = self.embed(x)  # (N,W,D) #  torch.Size([64, 43, 300])
        # one_layer = self.dropout(one_layer)
        one_layer = one_layer.unsqueeze(1)  # (N,Ci,W,D)  #  torch.Size([64, 1, 43, 300])
        # one layer
        one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2) for conv in self.convs1] # torch.Size([64, 100, 36])
        # two layer
        two_layer = [F.relu(conv(one_layer.unsqueeze(1))).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
        print("two_layer {}".format(two_layer[0].size()))
        # pooling
        output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer]   #  torch.Size([64, 100]) torch.Size([64, 100])
        output = torch.cat(output, 1)  # torch.Size([64, 300])
        # dropout
        output = self.dropout(output)
        # linear
        output = self.fc1(F.relu(output))
        logit = self.fc2(F.relu(output))
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        # CNN
        cnn_x = embed
        cnn_x = self.dropout(cnn_x)
        cnn_x = cnn_x.unsqueeze(1)
        cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 0)
        cnn_x = torch.transpose(cnn_x, 1, 2)
        # GRU
        lstm_out, self.hidden = self.gru(cnn_x, self.hidden)
        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)
        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
        # linear
        cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out))
        cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))
        # output
        logit = cnn_lstm_out

        return logit
项目:semanaly    作者:zqhZY    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embeding(x)

        x = x.unsqueeze(1)
        x= [F.relu(conv(x).squeeze(3)) for conv in self.convs]
        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
        x = torch.cat(x, 1)

        '''
                x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
                x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
                x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
                x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''
        x = self.dropout(x)
        out = self.fc(x)
        return out
项目:char-cnn-pytorch    作者:srviest    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x) # (N,W,D)

        if self.args.static:
            x = Variable(x)

        x = x.unsqueeze(1) # (N,Ci,W,D)

        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)


        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)

        x = torch.cat(x, 1)

        '''
        x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
        x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
        x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
        x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''
        x = self.dropout(x) # (N,len(Ks)*Co)
        logit = self.fc1(x) # (N,C)
        return logit
项目:generals_a3c    作者:yilundu    | 项目源码 | 文件源码
def forward(self, input):
        x = F.elu(self.conv1(input))
        x = F.elu(self.conv2(x))
        x = F.elu(self.conv3(x))

        # Next flatten the output to be batched into LSTM layers
        # The shape of x is batch_size, channels, height, width
        x = self.pre_lstm_bn(x)

        x = torch.transpose(x, 1, 3)
        x = torch.transpose(x, 1, 2)
        x = x.contiguous()

        x = x.view(x.size(0), self.batch, self.hidden_dim)
        x, hidden = self.lstm(x, (self.hidden_state, self.cell_state))
        self.hidden_state, self.cell_state = hidden

        x = torch.transpose(x, 2, 1)
        x = x.contiguous()
        x = x.view(x.size(0), self.hidden_dim, self.height, self.width)

        x = self.lstm_batch_norm(x)

        x = F.elu(self.conv4(x))
        x = F.elu(self.conv5(x))

        logit = self.move_conv(x)
        logit = logit.view(logit.size(0), -1)

        x = self.value_conv(x)
        x = x.view(x.size(0), self.hidden_dim, self.batch)
        x = F.max_pool1d(x, self.batch)
        x = x.squeeze()
        val = self.value_linear(x)

        return val, logit
项目:sru    作者:taolei87    | 项目源码 | 文件源码
def forward(self, x):
        # x is (batch, len, d)
        x = x.unsqueeze(1) # (batch, Ci, len, d)
        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(batch, Co, len), ...]
        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]
        x = torch.cat(x, 1)
        return x
项目:MP-CNN-Variants    作者:tuzhucheng    | 项目源码 | 文件源码
def _get_blocks_for_sentence(self, sent):
        block_a = {}
        block_b = {}
        for ws in self.filter_widths:
            if np.isinf(ws):
                sent_flattened, sent_flattened_size = sent.contiguous().view(sent.size(0), 1, -1), sent.size(1) * sent.size(2)
                block_a[ws] = {
                    'max': F.max_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1),
                    'min': F.max_pool1d(-1 * sent_flattened, sent_flattened_size).view(sent.size(0), -1),
                    'mean': F.avg_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1)
                }
                continue

            holistic_conv_out = self.holistic_conv_layers[ws - 1](sent)
            block_a[ws] = {
                'max': F.max_pool1d(holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters),
                'min': F.max_pool1d(-1 * holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters),
                'mean': F.avg_pool1d(holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters)
            }

            per_dim_conv_out = self.per_dim_conv_layers[ws - 1](sent)
            block_b[ws] = {
                'max': F.max_pool1d(per_dim_conv_out, per_dim_conv_out.size(2)).contiguous().view(-1, self.n_word_dim, self.n_per_dim_filters),
                'min': F.max_pool1d(-1 * per_dim_conv_out, per_dim_conv_out.size(2)).contiguous().view(-1, self.n_word_dim, self.n_per_dim_filters)
            }
        return block_a, block_b
项目:cnn-text-classification-pytorch    作者:Shawn1993    | 项目源码 | 文件源码
def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x
项目:seq_tagger    作者:OSU-slatelab    | 项目源码 | 文件源码
def fwdUttEnc(module, rnnOut):  # forward utterance encoder to get the utterance representation
    uttEncOut = None
    if SharedModel.args.utt_enc_type == 0:  # Encoding by summation
        uttEncOut = rnnOut.sum(0).squeeze(0)
    elif SharedModel.args.utt_enc_type == 1:    # Encoding by mean
        uttEncOut = rnnOut.mean(0).squeeze(0)
    else:   # Encoding by CNN
        uttEncOut = []
        for i, curConv in enumerate(module.uttEncoder):
            curConvInput = rnnOut.permute(1, 2, 0)
            curConvOut = curConv(curConvInput)
            curPoolOut = None
            if SharedModel.args.utt_enc_type == 2:  # using average pooling
                curPoolOut = F.avg_pool1d(curConvOut, curConvOut.data.size(2))
            else:   # using max pooling
                curPoolOut = F.max_pool1d(curConvOut, curConvOut.data.size(2))
            uttEncOut.append(curPoolOut)
        uttEncOut = torch.cat(uttEncOut, 1)
        uttEncOut = uttEncOut.squeeze(2)
        uttEncOut = F.tanh(uttEncOut)

    if SharedModel.args.utt_enc_noise == True:
        module.uttEncNoise.data.resize_(uttEncOut.size()).normal_(0, 0.1)   # Add white noises to the utterance encoding
        uttEncOut.add_(module.uttEncNoise)

    if SharedModel.args.utt_enc_bn == True:
        uttEncOut = module.uttBn(uttEncOut)
    return uttEncOut
项目:Tacotron_pytorch    作者:root20    | 项目源码 | 文件源码
def forward(self, input, lengths):
        N, T = input.size(0), input.size(1)

        conv_bank_out = []
        input_t = input.transpose(1, 2)  # NxTxH -> NxHxT
        for i in range(self.num_filters):
            tmp_input = input_t
            if i % 2 == 0:
                tmp_input = tmp_input.unsqueeze(-1)
                tmp_input = F.pad(tmp_input, (0,0,0,1)).squeeze(-1)   # NxHxT
            conv_bank_out.append(self.conv_bank[i](tmp_input))

        residual = torch.cat(conv_bank_out, dim=1)                  # NxHFxT
        residual = F.relu(self.bn_list[0](residual))
        residual = F.max_pool1d(residual, 2, stride=1)
        residual = self.conv1(residual)                             # NxHxT
        residual = F.relu(self.bn_list[1](residual))
        residual = self.conv2(residual)                             # NxHxT
        residual = self.bn_list[2](residual).transpose(1,2)         # NxHxT -> NxTxH

        rnn_input = input
        if rnn_input.size() != residual.size():
            rnn_input = self.residual_proj(rnn_input)
        rnn_input = rnn_input + residual
        rnn_input = self.highway(rnn_input).view(N, T, -1)

        output = rnn.pack_padded_sequence(rnn_input, lengths, True)
        output, _ = self.BGRU(output)                               # zero h_0 is used by default
        output, _ = rnn.pad_packed_sequence(output, True)           # NxTx2H
        return output
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        # print("fffff",x)
        embed = self.embed(x)

        # CNN
        cnn_x = embed
        cnn_x = torch.transpose(cnn_x, 0, 1)
        cnn_x = cnn_x.unsqueeze(1)
        cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in cnn_x]  # [(N,Co), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 1)
        cnn_x = self.dropout(cnn_x)

        # LSTM
        lstm_x = embed.view(len(x), embed.size(1), -1)
        lstm_out, self.hidden = self.lstm(lstm_x, self.hidden)
        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)
        # lstm_out = F.tanh(lstm_out)
        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)

        # CNN and LSTM cat
        cnn_x = torch.transpose(cnn_x, 0, 1)
        lstm_out = torch.transpose(lstm_out, 0, 1)
        cnn_lstm_out = torch.cat((cnn_x, lstm_out), 0)
        cnn_lstm_out = torch.transpose(cnn_lstm_out, 0, 1)

        # linear
        cnn_lstm_out = self.hidden2label1(F.tanh(cnn_lstm_out))
        cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))

        # output
        logit = cnn_lstm_out
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        x_no_static = self.embed_no_static(x)
        # x_no_static = self.dropout(x_no_static)
        x_static = self.embed_static(x)
        # fix the embedding
        x_static = Variable(x_static.data)
        # x_static = self.dropout(x_static)
        x = torch.stack([x_static, x_no_static], 1)
        one_layer = x  # (N,W,D) #  torch.Size([64, 43, 300])
        # print("one_layer {}".format(one_layer.size()))
        # one_layer = self.dropout(one_layer)
        # one_layer = one_layer.unsqueeze(1)  # (N,Ci,W,D)  #  torch.Size([64, 1, 43, 300])
        # one layer
        one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2).unsqueeze(1) for conv in self.convs1] # torch.Size([64, 100, 36])
        # one_layer = [F.relu(conv(one_layer)).squeeze(3).unsqueeze(1) for conv in self.convs1] # torch.Size([64, 100, 36])
        # print(one_layer[0].size())
        # print(one_layer[1].size())
        # two layer
        two_layer = [F.relu(conv(one_layer)).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
        # print("two_layer {}".format(two_layer[0].size()))
        # print("two_layer {}".format(two_layer[1].size()))
        # pooling
        output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer]   #  torch.Size([64, 100]) torch.Size([64, 100])
        output = torch.cat(output, 1)  # torch.Size([64, 300])
        # dropout
        output = self.dropout(output)
        # linear
        output = self.fc1(output)
        logit = self.fc2(F.relu(output))
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        # print("aaaaa")
        x_no_static = self.embed_no_static(x)
        # x_no_static = self.dropout(x_no_static)
        x_static = self.embed_static(x)
        # fix the embedding
        # x_static = Variable(x_static.data)
        # x_static = self.dropout(x_static)
        x = torch.stack([x_static, x_no_static], 1)
        # x = x.unsqueeze(1) # (N,Ci,W,D)
        x = self.dropout(x)
        if self.args.batch_normalizations is True:
            x = [F.relu(self.convs1_bn(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        else:
            x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]  # [(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        '''
        x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
        x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
        x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
        x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''
        x = self.dropout(x)  # (N,len(Ks)*Co)

        if self.args.batch_normalizations is True:
            x = self.fc1(x)
            logit = self.fc2(F.relu(x))
        else:
            x = self.fc1(x)
            logit = self.fc2(F.relu(x))
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)

        # CNN
        cnn_x = embed
        cnn_x = torch.transpose(cnn_x, 0, 1)
        cnn_x = cnn_x.unsqueeze(1)
        # cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = [conv(cnn_x).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        # cnn_x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in cnn_x]  # [(N,Co), ...]*len(Ks)
        cnn_x = [F.tanh(F.max_pool1d(i, i.size(2)).squeeze(2)) for i in cnn_x]  # [(N,Co), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 1)
        cnn_x = self.dropout(cnn_x)

        # BiLSTM
        bilstm_x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(bilstm_x, self.hidden)
        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        # bilstm_out = F.tanh(bilstm_out)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        bilstm_out = F.tanh(bilstm_out)

        # CNN and BiLSTM CAT
        cnn_x = torch.transpose(cnn_x, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        cnn_bilstm_out = torch.cat((cnn_x, bilstm_out), 0)
        cnn_bilstm_out = torch.transpose(cnn_bilstm_out, 0, 1)

        # linear
        cnn_bilstm_out = self.hidden2label1(F.tanh(cnn_bilstm_out))
        # cnn_bilstm_out = F.tanh(self.hidden2label1(cnn_bilstm_out))
        cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out))
        # cnn_bilstm_out = self.hidden2label2(cnn_bilstm_out)

        # output
        logit = cnn_bilstm_out
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        # print("source x {} ".format(x.size()))
        x = self.embed(x)  # (N,W,D)
        x = self.dropout(x)
        x = x.unsqueeze(1)  # (N,Ci,W,D)
        if self.args.batch_normalizations is True:
            x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        else:
            # x = [self.dropout(F.relu(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [F.tanh(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [conv(x).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        # x = self.dropout(x)  # (N,len(Ks)*Co)
        if self.args.batch_normalizations is True:
            x = self.fc1_bn(self.fc1(x))
            fc = self.fc2_bn(self.fc2(F.tanh(x)))
        else:
            fc = self.fc1(x)
            # fc = self.fc2(F.relu(x))

        # print("xxx {} ".format(x.size()))

        gate_layer = F.sigmoid(self.gate_layer(x))

        # calculate highway layer values
        # print(" fc_size {} gate_layer_size {}".format(fc.size(), gate_layer.size()))
        gate_fc_layer = torch.mul(fc, gate_layer)
        # print("gate_layer {} ".format(gate_layer))
        # print("1 - gate_layer size {} ".format((1 - gate_layer).size()))
        # if write like follow ,can run,but not equal the HighWay NetWorks formula
        # gate_input = torch.mul((1 - gate_layer), fc)
        gate_input = torch.mul((1 - gate_layer), x)
        highway_output = torch.add(gate_fc_layer, gate_input)

        logit = self.logit_layer(highway_output)

        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        bilstm_out = F.tanh(bilstm_out)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        y = self.hidden2label1(bilstm_out)
        y = self.hidden2label2(y)
        logit = y
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)

        embed = self.dropout(embed)

        # CNN
        cnn_x = embed
        cnn_x = torch.transpose(cnn_x, 0, 1)
        cnn_x = cnn_x.unsqueeze(1)
        # cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = [conv(cnn_x).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        # cnn_x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in cnn_x]  # [(N,Co), ...]*len(Ks)
        cnn_x = [F.tanh(F.max_pool1d(i, i.size(2)).squeeze(2)) for i in cnn_x]  # [(N,Co), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 1)
        cnn_x = self.dropout(cnn_x)

        # BiGRU
        bigru_x = embed.view(len(x), embed.size(1), -1)
        bigru_x, self.hidden = self.bigru(bigru_x, self.hidden)
        bigru_x = torch.transpose(bigru_x, 0, 1)
        bigru_x = torch.transpose(bigru_x, 1, 2)
        # bilstm_out = F.tanh(bilstm_out)
        bigru_x = F.max_pool1d(bigru_x, bigru_x.size(2)).squeeze(2)
        bigru_x = F.tanh(bigru_x)

        # CNN and BiGRU CAT
        cnn_x = torch.transpose(cnn_x, 0, 1)
        bigru_x = torch.transpose(bigru_x, 0, 1)
        cnn_bigru_out = torch.cat((cnn_x, bigru_x), 0)
        cnn_bigru_out = torch.transpose(cnn_bigru_out, 0, 1)

        # linear
        cnn_bigru_out = self.hidden2label1(F.tanh(cnn_bigru_out))
        logit = self.hidden2label2(F.tanh(cnn_bigru_out))

        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x)
        x = self.dropout(x)
        # x = x.view(len(x), x.size(1), -1)
        # x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        # bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2))
        bilstm_out = bilstm_out.squeeze(2)

        hidden2lable = self.hidden2label1(F.tanh(bilstm_out))

        gate_layer = F.sigmoid(self.gate_layer(bilstm_out))
        # calculate highway layer values
        gate_hidden_layer = torch.mul(hidden2lable, gate_layer)
        # if write like follow ,can run,but not equal the HighWay NetWorks formula
        # gate_input = torch.mul((1 - gate_layer), hidden2lable)
        gate_input = torch.mul((1 - gate_layer), bilstm_out)
        highway_output = torch.add(gate_hidden_layer, gate_input)

        logit = self.logit_layer(highway_output)

        return logit
项目:semanaly    作者:zqhZY    | 项目源码 | 文件源码
def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3)  # (N,Co,W)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x
项目:Atari-Game-with-DQN    作者:tonyabracadabra    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)

        bilstm_out = F.tanh(bilstm_out)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        y = self.hidden2label1(bilstm_out)
        y = self.hidden2label2(y)
        logit = y
        return logit
项目:keita    作者:iwasaki-kenta    | 项目源码 | 文件源码
def forward(self, x):
        # Transpose to the shape (sentence_batch size, word dim, sequence length)
        x = x.transpose(0, 1).transpose(1, 2)

        feature_maps = []

        for layer in self.layers:
            x = layer(x)
            feature_maps.append(F.max_pool1d(x, kernel_size=x.size(2)).squeeze())

        features = torch.cat(feature_maps, dim=1)
        return features
项目:keita    作者:iwasaki-kenta    | 项目源码 | 文件源码
def forward(self, x):
        """
        A bidirectional RNN encoder. Has support for global max/average pooling.

        :param x: A tuple of Variable's representing padded sentence tensor batch
            [seq. length, batch size, embed. size] and sentence lengths.
        :return: Global max/average pooled embedding from bidirectional RNN encoder of [batch_size, hidden_size]
        """

        sentences, sentence_lengths = x

        # Sort sentences by descending length.
        sorted_sentence_lengths, sort_indices = torch.sort(sentence_lengths, dim=0, descending=True)
        _, unsort_indices = torch.sort(sort_indices, dim=0)

        sorted_sentence_lengths = sorted_sentence_lengths.data
        sorted_sentences = sentences.index_select(1, sort_indices)

        # Handle padding for RNN's.
        packed_sentences = nn.utils.rnn.pack_padded_sequence(sorted_sentences, sorted_sentence_lengths.clone().cpu().numpy())

        # [seq. length, sentence_batch size, 2 * num. layers * num. hidden]
        encoder_outputs = self.encoder(packed_sentences)[0]
        encoder_outputs = nn.utils.rnn.pad_packed_sequence(encoder_outputs)[0]

        # Unsort outputs.
        encoder_outputs = encoder_outputs.index_select(1, unsort_indices)

        # Apply global max/average pooling 1D.
        encoder_outputs = encoder_outputs.transpose(0, 2).transpose(0, 1)
        if self.pooling_mode == "max":
            encoder_outputs = F.max_pool1d(encoder_outputs, kernel_size=encoder_outputs.size(2))
        elif self.pooling_mode == "avg":
            encoder_outputs = F.avg_pool1d(encoder_outputs, kernel_size=encoder_outputs.size(2))

        encoder_outputs = encoder_outputs.squeeze()

        return encoder_outputs
项目:SeqGAN-PyTorch    作者:ZiJianZhao    | 项目源码 | 文件源码
def forward(self, x):
        """
        Args:
            x: (batch_size * seq_len)
        """
        emb = self.emb(x).unsqueeze(1)  # batch_size * 1 * seq_len * emb_dim
        convs = [F.relu(conv(emb)).squeeze(3) for conv in self.convs]  # [batch_size * num_filter * length]
        pools = [F.max_pool1d(conv, conv.size(2)).squeeze(2) for conv in convs] # [batch_size * num_filter]
        pred = torch.cat(pools, 1)  # batch_size * num_filters_sum
        highway = self.highway(pred)
        pred = F.sigmoid(highway) *  F.relu(highway) + (1. - F.sigmoid(highway)) * pred
        pred = self.softmax(self.lin(self.dropout(pred)))
        return pred
项目:char-cnn-pytorch    作者:srviest    | 项目源码 | 文件源码
def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x
项目:pytorch2c    作者:lantiga    | 项目源码 | 文件源码
def base_test():

    fc1 = nn.Linear(10,20)
    fc1.weight.data.normal_(0.0,1.0)
    fc1.bias.data.normal_(0.0,1.0)

    fc2 = nn.Linear(20,2)
    fc2.weight.data.normal_(0.0,1.0)
    fc2.bias.data.normal_(0.0,1.0)

    fc3 = nn.Linear(10,2)
    fc3.weight.data.normal_(0.0,1.0)
    fc3.bias.data.normal_(0.0,1.0)

    fc4 = nn.Linear(10,2)
    fc4.weight.data.normal_(0.0,1.0)
    fc4.bias.data.normal_(0.0,1.0)

    softmax = nn.Softmax()

    model0 = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))
    model1 = lambda x: F.softmax(F.elu(fc3(x)))
    model2 = lambda x: F.softmax(F.tanh(fc3(x)))
    model3 = lambda x: F.softmax(F.sigmoid(fc3(x)))
    model4 = lambda x: softmax(F.leaky_relu(fc4(x))).clone()
    model5 = lambda x: softmax(F.logsigmoid(fc4(x.transpose(0,1))))
    model6 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze())
    model7 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze(dim=0))
    model8 = lambda x: fc3(F.max_pool3d(x.unsqueeze(0),2).squeeze())
    model9 = lambda x: fc3(F.max_pool1d(x.abs().view(1,1,-1),4).squeeze().view(10,10))
    #model10 = lambda x: fc3(x.double())
    #model10 = lambda x: fc3(x.view(1,10,10).select(0,0))
    model10 = lambda x, y: F.softmax(F.tanh(fc3(torch.cat((x,y),1))))

    data = Variable(torch.rand(10,10))
    data2 = Variable(torch.rand(20,20))
    data1a = Variable(torch.rand(10,5))
    data1b = Variable(torch.rand(10,5))
    data3 = Variable(torch.rand(2,20,20))

    out = model0(data) + \
          model1(data) * model2(data) / model3(data) / 2.0 + \
          2.0 * model4(data) + model5(data) + 1 - 2.0 + \
          model6(data2) + model7(data2) + model8(data3) + model9(data2) + model10(data1a,data1b)

    out_path = 'out'
    if not os.path.isdir(out_path):
        os.mkdir(out_path)
    uid = str(uuid.uuid4())

    torch2c.compile(out,'base',os.path.join(out_path,uid),compile_test=True)
项目:BuboQA    作者:castorini    | 项目源码 | 文件源码
def forward(self, x):
        # x = (sequence length, batch_size, dimension of embedding)
        text = x.text
        batch_size = text.size()[1]
        x = self.embed(text)
        if self.config.relation_prediction_mode.upper() == "LSTM":
            # h0 / c0 = (layer*direction, batch_size, hidden_dim)
            if self.config.cuda:
                h0 = Variable(torch.zeros(self.config.num_layer * 2, batch_size,
                                          self.config.hidden_size).cuda())
                c0 = Variable(torch.zeros(self.config.num_layer * 2, batch_size,
                                          self.config.hidden_size).cuda())
            else:
                h0 = Variable(torch.zeros(self.config.num_layer * 2, batch_size,
                                          self.config.hidden_size))
                c0 = Variable(torch.zeros(self.config.num_layer * 2, batch_size,
                                          self.config.hidden_size))
            # output = (sentence length, batch_size, hidden_size * num_direction)
            # ht = (layer*direction, batch, hidden_dim)
            # ct = (layer*direction, batch, hidden_dim)
            outputs, (ht, ct) = self.lstm(x, (h0, c0))
            tags = self.hidden2tag(ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1))
            scores = F.log_softmax(tags)
            return scores
        elif self.config.relation_prediction_mode.upper() == "GRU":
            if self.config.cuda:
                h0 = Variable(torch.zeros(self.config.num_layer * 2, batch_size,
                                          self.config.hidden_size).cuda())
            else:
                h0 = Variable(torch.zeros(self.config.num_layer * 2, batch_size,
                                          self.config.hidden_size))
            outputs, ht = self.gru(x, h0)

            tags = self.hidden2tag(ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1))
            scores = F.log_softmax(tags)
            return scores
        elif self.config.relation_prediction_mode.upper() == "CNN":
            x = x.transpose(0, 1).contiguous().unsqueeze(1)  # (batch, channel_input, sent_len, embed_dim)
            x = [F.relu(self.conv1(x)).squeeze(3), F.relu(self.conv2(x)).squeeze(3), F.relu(self.conv3(x)).squeeze(3)]
            # (batch, channel_output, ~=sent_len) * Ks
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]  # max-over-time pooling
            # (batch, channel_output) * Ks
            x = torch.cat(x, 1)  # (batch, channel_output * Ks)
            x = self.dropout(x)
            logit = self.fc1(x)  # (batch, target_size)
            scores = F.log_softmax(logit)
            return scores
        else:
            print("Unknown Mode")
            exit(1)