Python torch.nn.functional 模块,relu() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.functional.relu()

项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def forward(self, x):
        for name, module in self.base._modules.items():
            if name == 'avgpool':
                break
            x = module(x)

        if self.cut_at_pooling:
            return x

        x = F.avg_pool2d(x, x.size()[2:])
        x = x.view(x.size(0), -1)

        if self.has_embedding:
            x = self.feat(x)
            x = self.feat_bn(x)
        if self.norm:
            x = F.normalize(x)
        elif self.has_embedding:
            x = F.relu(x)
        if self.dropout > 0:
            x = self.drop(x)
        if self.num_classes > 0:
            x = self.classifier(x)
        return x
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):
        super(STN3d, self).__init__()
        self.num_points = num_points
        self.conv1 = nn.Conv1d(3, 64, 1)
        self.conv2 = nn.Conv1d(64, 128, 1)
        self.conv3 = nn.Conv1d(128, 1024, 1)
        self.mp1 = nn.MaxPool1d(num_points)
        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 9)
        self.relu = nn.ReLU()

        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.bn4 = nn.BatchNorm1d(512)
        self.bn5 = nn.BatchNorm1d(256)
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    residual = x

    bottleneck = self.conv_reduce(x)
    bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)

    bottleneck = self.conv_conv(bottleneck)
    bottleneck = F.relu(self.bn(bottleneck), inplace=True)

    bottleneck = self.conv_expand(bottleneck)
    bottleneck = self.bn_expand(bottleneck)

    if self.downsample is not None:
      residual = self.downsample(x)

    return F.relu(residual + bottleneck, inplace=True)
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    if isinstance(x, list):
      x, is_list, features = x[0], True, x[1:]
    else:
      is_list, features = False, None
    residual = x

    conv_a = self.conv_a(x)
    bn_a = self.bn_a(conv_a)
    relu_a = F.relu(bn_a, inplace=True)

    conv_b = self.conv_b(relu_a)
    bn_b = self.bn_b(conv_b)

    if self.downsample is not None:
      residual = self.downsample(x)

    output = F.relu(residual + bn_b, inplace=True)

    if is_list:
      return [output] + features + [bn_a, bn_b]
    else:
      return output
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    if isinstance(x, list):
      assert len(x) == 1, 'The length of inputs must be one vs {}'.format(len(x))
      x, is_list = x[0], True
    else:
      x, is_list = x, False
    x = self.conv_1_3x3(x)
    x = F.relu(self.bn_1(x), inplace=True)

    if is_list: x = [x]
    x = self.stage_1(x)
    x = self.stage_2(x)
    x = self.stage_3(x)
    if is_list:
      x, features = x[0], x[1:]
    else:
      features = None
    x = self.avgpool(x)
    x = x.view(x.size(0), -1)
    cls = self.classifier(x)

    if is_list: return cls, features
    else:       return cls
项目:RetinaNet    作者:c0nn3r    | 项目源码 | 文件源码
def __init__(self, mode, anchors=9, classes=80, depth=4,
                 base_activation=F.relu,
                 output_activation=F.sigmoid):
        super(SubNet, self).__init__()
        self.anchors = anchors
        self.classes = classes
        self.depth = depth
        self.base_activation = base_activation
        self.output_activation = output_activation

        self.subnet_base = nn.ModuleList([conv3x3(256, 256, padding=1)
                                          for _ in range(depth)])

        if mode == 'boxes':
            self.subnet_output = conv3x3(256, 4 * self.anchors, padding=1)
        elif mode == 'classes':
            # add an extra dim for confidence
            self.subnet_output = conv3x3(256, (1 + self.classes) * self.anchors, padding=1)

        self._output_layer_init(self.subnet_output.bias.data)
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def forward(self, x1, x2):
        x1 = F.relu(self.bn1(self.conv1(x1)))
        x1 = F.relu(self.bn2(self.conv2(x1)))
        x1 = F.relu(self.bn3(self.conv3(x1)))
        x1 = F.relu(self.bn4(self.conv4(x1)))
        x1 = F.relu(self.bn5(self.conv5(x1)))
        x1 = F.relu(self.bn6(self.conv6(x1)))
        if self.training:
            x2 = x1.clone()
        else:
            x2 = F.relu(self.bn1(self.conv1(x2)))
            x2 = F.relu(self.bn2(self.conv2(x2)))
            x2 = F.relu(self.bn3(self.conv3(x2)))
            x2 = F.relu(self.bn4(self.conv4(x2)))
            x2 = F.relu(self.bn5(self.conv5(x2)))
            x2 = F.relu(self.bn6(self.conv6(x2)))
        return x1, x2
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def forward(self, mid_input, global_input):
        w = mid_input.size()[2]
        h = mid_input.size()[3]
        global_input = global_input.unsqueeze(2).unsqueeze(2).expand_as(mid_input)
        fusion_layer = torch.cat((mid_input, global_input), 1)
        fusion_layer = fusion_layer.permute(2, 3, 0, 1).contiguous()
        fusion_layer = fusion_layer.view(-1, 512)
        fusion_layer = self.bn1(self.fc1(fusion_layer))
        fusion_layer = fusion_layer.view(w, h, -1, 256)

        x = fusion_layer.permute(2, 3, 0, 1).contiguous()
        x = F.relu(self.bn2(self.conv1(x)))
        x = self.upsample(x)
        x = F.relu(self.bn3(self.conv2(x)))
        x = F.relu(self.bn4(self.conv3(x)))
        x = self.upsample(x)
        x = F.sigmoid(self.bn5(self.conv4(x)))
        x = self.upsample(self.conv5(x))
        return x
项目:DenseNet    作者:kevinzakka    | 项目源码 | 文件源码
def forward(self, x):
        """
        Compute the forward pass of the composite transformation H(x),
        where x is the concatenation of the current and all preceding
        feature maps.
        """
        if self.bottleneck:
            out = self.conv1(F.relu(self.bn1(x)))
            if self.p > 0:
                out = F.dropout(out, p=self.p, training=self.training)
            out = self.conv2(F.relu(self.bn2(out)))
            if self.p > 0:
                out = F.dropout(out, p=self.p, training=self.training)
        else:
            out = self.conv2(F.relu(self.bn2(x)))
            if self.p > 0:
                out = F.dropout(out, p=self.p, training=self.training)  
        return torch.cat((x, out), 1)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def forward(self, x):
        n_idx = 0
        c_idx = 1
        h_idx = 2
        w_idx = 3

        x = self.lookup_table(x)
        x = x.unsqueeze(c_idx)

        enc_outs = []
        for encoder in self.encoders:
            enc_ = F.relu(encoder(x))
            k_h = enc_.size()[h_idx]
            enc_ = F.max_pool2d(enc_, kernel_size=(k_h, 1))
            enc_ = enc_.squeeze(w_idx)
            enc_ = enc_.squeeze(h_idx)
            enc_outs.append(enc_)

        encoding = self.dropout(torch.cat(enc_outs, 1))
        return F.log_softmax(self.logistic(encoding))
项目:FastNeuralStyle    作者:bengxy    | 项目源码 | 文件源码
def forward(self, X):
        h = F.relu(self.conv1_1(X))
        h = F.relu(self.conv1_2(h))
        relu1_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        relu2_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        relu3_3 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        relu4_3 = h

        return [relu1_2,relu2_2,relu3_3,relu4_3]
项目:simple-pix2pix-pytorch    作者:Eiji-Kb    | 项目源码 | 文件源码
def forward(self, x):           
        en0 = self.c0(x)
        en1 = self.bnc1(self.c1(F.leaky_relu(en0, negative_slope=0.2)))
        en2 = self.bnc2(self.c2(F.leaky_relu(en1, negative_slope=0.2)))
        en3 = self.bnc3(self.c3(F.leaky_relu(en2, negative_slope=0.2)))
        en4 = self.bnc4(self.c4(F.leaky_relu(en3, negative_slope=0.2)))
        en5 = self.bnc5(self.c5(F.leaky_relu(en4, negative_slope=0.2)))
        en6 = self.bnc6(self.c6(F.leaky_relu(en5, negative_slope=0.2)))
        en7 = self.c7(F.leaky_relu(en6, negative_slope=0.2))

        de7 = self.bnd7(self.d7(F.relu(en7)))
        de6 = F.dropout(self.bnd6(self.d6(F.relu(torch.cat((en6, de7),1)))))
        de5 = F.dropout(self.bnd5(self.d5(F.relu(torch.cat((en5, de6),1)))))

        de4 = F.dropout(self.bnd4(self.d4(F.relu(torch.cat((en4, de5),1)))))
        de3 = self.bnd3(self.d3(F.relu(torch.cat((en3, de4),1))))
        de2 = self.bnd2(self.d2(F.relu(torch.cat((en2, de3),1))))
        de1 = self.bnd1(self.d1(F.relu(torch.cat((en1, de2),1))))

        de0 = F.tanh(self.d0(F.relu(torch.cat((en0, de1),1))))       

        return de0
项目:tiny-style-transfer    作者:ggsonic    | 项目源码 | 文件源码
def vgg(inputs, model):
    '''VGG definition with style and content outputs.
    '''
    style, content = [], []

    def block(x, ids):
        for i in ids:
            x = F.relu(F.conv2d(x, Variable(model.features[i].weight.data.cuda()),Variable(model.features[i].bias.data.cuda()), 1, 1), inplace=True)
            if i in style_layers:
                style.append(gram(x))
            if i in content_layers:
                content.append(x)
        return F.max_pool2d(x, 2, 2)

    o = block(inputs, [0, 2])
    o = block(o, [5, 7])
    o = block(o, [10, 12, 14])
    o = block(o, [17, 19, 21])
    o = block(o, [24, 26, 28])
    return style, content
项目:cnn-text-classification-pytorch    作者:Shawn1993    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x) # (N,W,D)

        if self.args.static:
            x = Variable(x)

        x = x.unsqueeze(1) # (N,Ci,W,D)

        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)


        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)

        x = torch.cat(x, 1)

        '''
        x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
        x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
        x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
        x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''
        x = self.dropout(x) # (N,len(Ks)*Co)
        logit = self.fc1(x) # (N,C)
        return logit
项目:pytorch-a2c-ppo-acktr    作者:ikostrikov    | 项目源码 | 文件源码
def reset_parameters(self):
        self.apply(weights_init)

        relu_gain = nn.init.calculate_gain('relu')
        self.conv1.weight.data.mul_(relu_gain)
        self.conv2.weight.data.mul_(relu_gain)
        self.conv3.weight.data.mul_(relu_gain)
        self.linear1.weight.data.mul_(relu_gain)

        if hasattr(self, 'gru'):
            orthogonal(self.gru.weight_ih.data)
            orthogonal(self.gru.weight_hh.data)
            self.gru.bias_ih.data.fill_(0)
            self.gru.bias_hh.data.fill_(0)

        if self.dist.__class__.__name__ == "DiagGaussian":
            self.dist.fc_mean.weight.data.mul_(0.01)
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x)  # (N,W,D)
        x = self.dropout_embed(x)
        x = x.unsqueeze(1)  # (N,Ci,W,D)
        if self.args.batch_normalizations is True:
            x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        else:
            # x = [self.dropout(F.relu(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [self.dropout(F.tanh(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [F.tanh(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        x = self.dropout(x)  # (N,len(Ks)*Co)
        if self.args.batch_normalizations is True:
            x = self.fc1_bn(self.fc1(x))
            logit = self.fc2_bn(self.fc2(F.tanh(x)))
        else:
            logit = self.fc(x)
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        # print("aa", x)
        x = self.embed(x) # (N,W,D)
        # print("embed", x)

        if self.args.static:
            x = Variable(x.data)
        # print("var", x)

        x = x.unsqueeze(1) # (N,Ci,W,D)
        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        '''
        x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
        x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
        x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
        x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''
        x = self.dropout(x) # (N,len(Ks)*Co)
        logit = self.fc1(x) # (N,C)
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        # CNN
        cnn_x = embed
        cnn_x = self.dropout(cnn_x)
        cnn_x = cnn_x.unsqueeze(1)
        cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 0)
        cnn_x = torch.transpose(cnn_x, 1, 2)
        # LSTM
        lstm_out, self.hidden = self.lstm(cnn_x, self.hidden)
        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)
        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
        # linear
        cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out))
        cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))
        # output
        logit = cnn_lstm_out

        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        # CNN
        embed = self.dropout(embed)
        cnn_x = embed
        cnn_x = cnn_x.unsqueeze(1)
        cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 0)
        cnn_x = torch.transpose(cnn_x, 1, 2)

        # BiLSTM
        bilstm_out, self.hidden = self.bilstm(cnn_x, self.hidden)
        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)

        # linear
        cnn_bilstm_out = self.hidden2label1(F.tanh(bilstm_out))
        cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out))

        # dropout
        logit = self.dropout(cnn_bilstm_out)

        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        one_layer = self.embed(x)  # (N,W,D) #  torch.Size([64, 43, 300])
        # one_layer = self.dropout(one_layer)
        one_layer = one_layer.unsqueeze(1)  # (N,Ci,W,D)  #  torch.Size([64, 1, 43, 300])
        # one layer
        one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2) for conv in self.convs1] # torch.Size([64, 100, 36])
        # two layer
        two_layer = [F.relu(conv(one_layer.unsqueeze(1))).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
        print("two_layer {}".format(two_layer[0].size()))
        # pooling
        output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer]   #  torch.Size([64, 100]) torch.Size([64, 100])
        output = torch.cat(output, 1)  # torch.Size([64, 300])
        # dropout
        output = self.dropout(output)
        # linear
        output = self.fc1(F.relu(output))
        logit = self.fc2(F.relu(output))
        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        embed = self.embed(x)
        # CNN
        cnn_x = embed
        cnn_x = self.dropout(cnn_x)
        cnn_x = cnn_x.unsqueeze(1)
        cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 0)
        cnn_x = torch.transpose(cnn_x, 1, 2)
        # GRU
        lstm_out, self.hidden = self.gru(cnn_x, self.hidden)
        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)
        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
        # linear
        cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out))
        cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))
        # output
        logit = cnn_lstm_out

        return logit
项目:fast-neural-style    作者:abhiskk    | 项目源码 | 文件源码
def forward(self, X):
        h = F.relu(self.conv1_1(X))
        h = F.relu(self.conv1_2(h))
        relu1_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        relu2_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        relu3_3 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        relu4_3 = h

        return [relu1_2, relu2_2, relu3_3, relu4_3]
项目:FreezeOut    作者:ajbrock    | 项目源码 | 文件源码
def forward(self, x):

        # If we're not training this layer, set to eval mode so that we use
        # running batchnorm stats (both for time-saving and to avoid updating
        # said stats).
        if not self.active:
            self.eval()

        out = self.conv1(F.relu(self.bn1(x)))
        out = self.conv2(F.relu(self.bn2(out)))
        out = torch.cat((x, out), 1)

        # If we're not active, return a detached output to prevent backprop.
        if self.active: 
            return out
        else:
            return out.detach()
项目:ShuffleNet    作者:jaxony    | 项目源码 | 文件源码
def _make_grouped_conv1x1(self, in_channels, out_channels, groups,
        batch_norm=True, relu=False):

        modules = OrderedDict()

        conv = conv1x1(in_channels, out_channels, groups=groups)
        modules['conv1x1'] = conv

        if batch_norm:
            modules['batch_norm'] = nn.BatchNorm2d(out_channels)
        if relu:
            modules['relu'] = nn.ReLU()
        if len(modules) > 1:
            return nn.Sequential(modules)
        else:
            return conv
项目:ShuffleNet    作者:jaxony    | 项目源码 | 文件源码
def forward(self, x):
        # save for combining later with output
        residual = x

        if self.combine == 'concat':
            residual = F.avg_pool2d(residual, kernel_size=3, 
                stride=2, padding=1)

        out = self.g_conv_1x1_compress(x)
        out = channel_shuffle(out, self.groups)
        out = self.depthwise_conv3x3(out)
        out = self.bn_after_depthwise(out)
        out = self.g_conv_1x1_expand(out)

        out = self._combine_func(residual, out)
        return F.relu(out)
项目:samplernn-pytorch    作者:deepsound-project    | 项目源码 | 文件源码
def forward(self, prev_samples, upper_tier_conditioning):
        (batch_size, _, _) = upper_tier_conditioning.size()

        prev_samples = self.embedding(
            prev_samples.contiguous().view(-1)
        ).view(
            batch_size, -1, self.q_levels
        )

        prev_samples = prev_samples.permute(0, 2, 1)
        upper_tier_conditioning = upper_tier_conditioning.permute(0, 2, 1)

        x = F.relu(self.input(prev_samples) + upper_tier_conditioning)
        x = F.relu(self.hidden(x))
        x = self.output(x).permute(0, 2, 1).contiguous()

        return F.log_softmax(x.view(-1, self.q_levels)) \
                .view(batch_size, -1, self.q_levels)
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def forward(self, x):
        # x (bz x 3 x 2048) -> conv(3, 64) -> conv(64, 128) -> conv(128, 1024) -> max_pool(2048) -> 1024 -> fc(1024, 512)
        #   -> fc(512, 256) -> fc(256, 9)

        batchsize = x.size()[0]
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        x = self.mp1(x)
        x = x.view(-1, 1024)

        x = F.relu(self.bn4(self.fc1(x)))
        x = F.relu(self.bn5(self.fc2(x)))
        x = self.fc3(x) # bz x 9 
        # identity transform
        # bz x 9 
        iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
        if x.is_cuda:
            iden = iden.cuda()
        x = x + iden
        x = x.view(-1, 3, 3) # bz x 3 x 3 
        return x

# 128 x 128 transform
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def forward(self, x):
        batchsize = x.size()[0]
        x = F.relu(self.bn1(self.conv1(x))) # bz x 256 x 2048 
        x = F.relu(self.bn2(self.conv2(x))) # bz x 1024 x 2048
        x = self.mp1(x) # bz x 1024 x 1
        x = x.view(-1, 1024)

        x = F.relu(self.bn3(self.fc1(x))) # bz x 512 
        x = F.relu(self.bn4(self.fc2(x))) # bz x 256
        x = self.fc3(x) # bz x (128*128) 
        # identity transform
        # bz x (128*128)
        iden = Variable(torch.from_numpy(np.eye(128).astype(np.float32))).view(1,128*128).repeat(batchsize,1)
        if x.is_cuda:
            iden = iden.cuda()
        x = x + iden
        x = x.view(-1, 128, 128) # bz x 3 x 3 
        return x
项目:pointnet2.pytorch    作者:eriche2016    | 项目源码 | 文件源码
def forward(self, x):
        batchsize = x.size()[0]
        trans = self.stn(x) # regressing the transforming parameters using STN 
        x = x.transpose(2,1) # bz x 2048 x 3 
        x = torch.bmm(x, trans) # (bz x 2048 x 3) x (bz x 3 x 3) 
        x = x.transpose(2,1) # bz x 3 x 2048
        x = F.relu(self.bn1(self.conv1(x)))
        pointfeat = x # bz x 64 x 2048
        x = F.relu(self.bn2(self.conv2(x))) # bz x 128 x 2048
        x = self.bn3(self.conv3(x)) # bz x 1024 x 2048
        x = self.mp1(x)
        x = x.view(-1, 1024) # bz x 1024
        if self.global_feat: # using global feats for classification
            return x, trans
        else:
            x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
            return torch.cat([x, pointfeat], 1), trans
项目:cnn-graph-classification    作者:giannisnik    | 项目源码 | 文件源码
def forward(self, x_in):
        out = F.relu(F.max_pool3d(self.conv(x_in), (1, self.max_document_length,1)))
        out = out.view(out.size(0), -1)
        out = F.relu(self.fc1(out))
        out = F.dropout(out, training=self.training)
        out = self.fc2(out)
        return F.log_softmax(out)
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def forward(self, x, *args, **kwargs):
        x = F.relu(self.affine1(x))
        return x
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        # out = self.layer4(out)
        out = F.avg_pool2d(out, 8)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)

        # Register a backward hook
        x.register_hook(myGradientHook)

        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def forward(self, x):
        x = F.elu(F.max_pool2d(self.conv1(x), 2))
        x = F.elu(F.max_pool2d(self.bn2(self.conv2(x)), 2))
        x = F.elu(F.max_pool2d(self.bn3(self.conv3(x)), 2))
        x = F.elu(F.max_pool2d(self.bn4(self.conv4(x)), 2))

        x = x.view(-1, 750)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_expand.forward(bottleneck)
        return x + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, input):
        return self.model(input)

        # TODO: fix relu bug
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_expand.forward(bottleneck)
        return x + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, input):
        return self.model(input)

        # TODO: fix relu bug
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, input, inputV):
        x1 = F.leaky_relu(self.down1(input), 0.2, True)
        x2 = F.leaky_relu(self.down2(x1), 0.2, True)
        x3 = F.leaky_relu(self.down3(x2), 0.2, True)
        x4 = F.leaky_relu(self.down4(x3), 0.2, True)
        x5 = F.leaky_relu(self.down5(x4), 0.2, True)
        x6 = F.leaky_relu(self.down6(x5), 0.2, True)
        x7 = F.leaky_relu(self.down7(x6), 0.2, True)
        x8 = F.relu(self.down8(x7), True)

        v1 = F.leaky_relu(self.downV1(inputV), 0.2, True)
        v2 = F.leaky_relu(self.downV2(v1), 0.2, True)
        v3 = F.leaky_relu(self.downV3(v2), 0.2, True)
        v4 = F.leaky_relu(self.downV4(v3), 0.2, True)
        v5 = F.leaky_relu(self.downV5(v4), 0.2, True)
        v6 = F.leaky_relu(self.downV6(v5), 0.2, True)
        v7 = F.leaky_relu(self.downV7(v6), 0.2, True)
        v8 = F.relu(self.downV8(v7), True)

        x = F.relu(self.up8(torch.cat([x8, v8], 1)), True)
        x = F.relu(self.up7(torch.cat([x, x7, v7], 1)), True)
        x = F.relu(self.up6(torch.cat([x, x6, v6], 1)), True)
        x = F.relu(self.up5(torch.cat([x, x5, v5], 1)), True)
        x = F.relu(self.up4(torch.cat([x, x4, v4], 1)), True)
        x = F.relu(self.up3(torch.cat([x, x3, v3], 1)), True)
        x = F.relu(self.up2(torch.cat([x, x2], 1)), True)
        x = F.tanh(self.up1(torch.cat([x, x1], 1)))
        return x


############################
# D network
###########################
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    out = self.conv1(F.relu(self.bn1(x)))
    out = self.conv2(F.relu(self.bn2(out)))
    out = torch.cat((x, out), 1)
    return out
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    out = self.conv1(F.relu(self.bn1(x)))
    out = torch.cat((x, out), 1)
    return out
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    out = self.conv1(F.relu(self.bn1(x)))
    out = F.avg_pool2d(out, 2)
    return out
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    out = self.conv1(x)
    out = self.trans1(self.dense1(out))
    out = self.trans2(self.dense2(out))
    out = self.dense3(out)
    out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
    out = F.log_softmax(self.fc(out))
    return out
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    residual = x

    basicblock = self.conv_a(x)
    basicblock = self.bn_a(basicblock)
    basicblock = F.relu(basicblock, inplace=True)

    basicblock = self.conv_b(basicblock)
    basicblock = self.bn_b(basicblock)

    if self.downsample is not None:
      residual = self.downsample(x)

    return F.relu(residual + basicblock, inplace=True)
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def forward(self, x):
    x = self.conv_1_3x3(x)
    x = F.relu(self.bn_1(x), inplace=True)
    x = self.stage_1(x)
    x = self.stage_2(x)
    x = self.stage_3(x)
    x = self.avgpool(x)
    x = x.view(x.size(0), -1)
    return self.classifier(x)
项目:RetinaNet    作者:c0nn3r    | 项目源码 | 文件源码
def forward(self, x):

        # don't need resnet_feature_2 as it is too large
        _, resnet_feature_3, resnet_feature_4, resnet_feature_5 = self.resnet(x)

        pyramid_feature_6 = self.pyramid_transformation_6(resnet_feature_5)
        pyramid_feature_7 = self.pyramid_transformation_7(F.relu(pyramid_feature_6))

        pyramid_feature_5 = self.pyramid_transformation_5(resnet_feature_5)
        pyramid_feature_4 = self.pyramid_transformation_4(resnet_feature_4)
        upsampled_feature_5 = self._upsample(pyramid_feature_5, pyramid_feature_4)

        pyramid_feature_4 = self.upsample_transform_1(
            torch.add(upsampled_feature_5, pyramid_feature_4)
        )

        pyramid_feature_3 = self.pyramid_transformation_3(resnet_feature_3)
        upsampled_feature_4 = self._upsample(pyramid_feature_4, pyramid_feature_3)

        pyramid_feature_3 = self.upsample_transform_2(
            torch.add(upsampled_feature_4, pyramid_feature_3)
        )

        return (pyramid_feature_3,
                pyramid_feature_4,
                pyramid_feature_5,
                pyramid_feature_6,
                pyramid_feature_7)