Python torch.nn.functional 模块,leaky_relu() 实例源码

我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用torch.nn.functional.leaky_relu()

项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, input, VGG):
        x1 = F.leaky_relu(self.down1(input), 0.2, True)
        x2 = F.leaky_relu(self.down2(x1), 0.2, True)
        x3 = F.leaky_relu(self.down3(x2), 0.2, True)
        x4 = F.leaky_relu(self.down4(x3), 0.2, True)
        x5 = F.leaky_relu(self.down5(x4), 0.2, True)
        x6 = F.leaky_relu(self.down6(x5), 0.2, True)
        x7 = F.leaky_relu(self.down7(x6), 0.2, True)
        x8 = F.relu(self.down8(x7), True)

        VGG = F.relu(self.linear(VGG), True)
        x = F.relu(self.up8(torch.cat([x8, VGG.view(-1, 2048, 1, 1)], 1)), True)
        x = F.relu(self.up7(torch.cat([x, x7], 1)), True)
        x = F.relu(self.up6(torch.cat([x, x6], 1)), True)
        x = F.relu(self.up5(torch.cat([x, x5], 1)), True)
        x = F.relu(self.up4(torch.cat([x, x4], 1)), True)
        x = F.relu(self.up3(torch.cat([x, x3], 1)), True)
        x = F.relu(self.up2(torch.cat([x, x2], 1)), True)
        x = F.tanh(self.up1(torch.cat([x, x1], 1)))
        return x


############################
# D network
###########################
项目:simple-pix2pix-pytorch    作者:Eiji-Kb    | 项目源码 | 文件源码
def forward(self, x):           
        en0 = self.c0(x)
        en1 = self.bnc1(self.c1(F.leaky_relu(en0, negative_slope=0.2)))
        en2 = self.bnc2(self.c2(F.leaky_relu(en1, negative_slope=0.2)))
        en3 = self.bnc3(self.c3(F.leaky_relu(en2, negative_slope=0.2)))
        en4 = self.bnc4(self.c4(F.leaky_relu(en3, negative_slope=0.2)))
        en5 = self.bnc5(self.c5(F.leaky_relu(en4, negative_slope=0.2)))
        en6 = self.bnc6(self.c6(F.leaky_relu(en5, negative_slope=0.2)))
        en7 = self.c7(F.leaky_relu(en6, negative_slope=0.2))

        de7 = self.bnd7(self.d7(F.relu(en7)))
        de6 = F.dropout(self.bnd6(self.d6(F.relu(torch.cat((en6, de7),1)))))
        de5 = F.dropout(self.bnd5(self.d5(F.relu(torch.cat((en5, de6),1)))))

        de4 = F.dropout(self.bnd4(self.d4(F.relu(torch.cat((en4, de5),1)))))
        de3 = self.bnd3(self.d3(F.relu(torch.cat((en3, de4),1))))
        de2 = self.bnd2(self.d2(F.relu(torch.cat((en2, de3),1))))
        de1 = self.bnd1(self.d1(F.relu(torch.cat((en1, de2),1))))

        de0 = F.tanh(self.d0(F.relu(torch.cat((en0, de1),1))))       

        return de0
项目:self-driving-truck    作者:aleju    | 项目源码 | 文件源码
def forward(self, embeddings_supervised, speeds, is_reverse, steering_wheel, steering_wheel_raw, multiactions_vecs):
        def act(x):
            return F.leaky_relu(x, negative_slope=0.2, inplace=True)

        x_emb_sup = embeddings_supervised # 512x3x5
        x_emb_sup = act(self.emb_sup_c1_sd(self.emb_sup_c1_bn(self.emb_sup_c1(x_emb_sup)))) # 1024x1x3
        x_emb_sup = x_emb_sup.view(-1, 1024*1*3)
        x_emb_sup = add_white_noise(x_emb_sup, 0.005, self.training)

        x_emb_add = torch.cat([speeds, is_reverse, steering_wheel, steering_wheel_raw, multiactions_vecs], 1)
        x_emb_add = act(self.emb_add_fc1_bn(self.emb_add_fc1(x_emb_add)))
        x_emb_add = add_white_noise(x_emb_add, 0.005, self.training)

        x_emb = torch.cat([x_emb_sup, x_emb_add], 1)
        x_emb = F.dropout(x_emb, p=0.05, training=self.training)

        embs = F.relu(self.emb_fc1_bn(self.emb_fc1(x_emb)))
        # this is currently always on, to decrease the likelihood of systematic
        # errors that are repeated over many frames
        embs = add_white_noise(embs, 0.005, True)

        return embs
项目:self-driving-truck    作者:aleju    | 项目源码 | 文件源码
def forward(self, embeddings, return_v_adv=False):
        def act(x):
            return F.leaky_relu(x, negative_slope=0.2, inplace=True)

        B, _ = embeddings.size()

        x = act(self.fc1_bn(self.fc1(embeddings)))
        x = add_white_noise(x, 0.005, self.training)
        x = F.dropout(x, p=0.1, training=self.training)

        x_v = self.fc_v(x)
        x_v_expanded = x_v.expand(B, 9)

        x_adv = self.fc_advantage(x)
        x_adv_mean = x_adv.mean(dim=1)
        x_adv_mean = x_adv_mean.expand(B, 9)
        x_adv = x_adv - x_adv_mean

        x = x_v_expanded + x_adv

        if return_v_adv:
            return x, (x_v, x_adv)
        else:
            return x
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def forward(self, prior):
        prior = prior.cuda()
        fc_layer = leaky_relu(self.linear1(prior).view(-1, 512, 4, 4), negative_slope = 0.2)
        deconv_layer1 = self.bn1(leaky_relu(self.deconv1(fc_layer), negative_slope = 0.2))
        deconv_layer2 = self.bn2(leaky_relu(self.deconv2(deconv_layer1), negative_slope = 0.2))
        deconv_layer3 = tanh(self.deconv3(deconv_layer2))
        return deconv_layer3

#    Infer without batch normalization cannot improve image quality
#    def infer(self, prior):
#        prior = prior.cuda()
#        fc_layer = leaky_relu(self.linear1(prior).view(-1, 512, 4, 4), negative_slope = 0.2)
#        deconv_layer1 = leaky_relu(self.deconv1(fc_layer), negative_slope = 0.2)
#        deconv_layer2 = leaky_relu(self.deconv2(deconv_layer1), negative_slope = 0.2)
#        deconv_layer3 = tanh(self.deconv3(deconv_layer2))
#        return deconv_layer3
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        residual = self.shortcut.forward(x)
        return residual + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        residual = self.shortcut.forward(x)
        return residual + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, input, VGG):
        x = self.model(input)
        VGG = F.leaky_relu(self.linear(VGG), 0.2, True)
        return self.final(x + VGG.view(-1, self.ndf * 8, 1, 1))


############################
# VGG feature
###########################
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def forward(self, x):
        out_1 = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out_2 = F.leaky_relu(self.conv2(out_1), 0.05)    # (?, 128, 8, 8)

        out_3 = F.leaky_relu(self.conv3(out_2), 0.05)    # ( " )
        out_4 = F.leaky_relu(self.conv4(out_3), 0.05)    # ( " )

        out_5 = F.leaky_relu(self.deconv1(out_4), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out_5))              # (?, 3, 32, 32)

        return out
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def forward(self, x):
        out_1 = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out_2 = F.leaky_relu(self.conv2(out_1), 0.05)    # (?, 128, 8, 8)

        out_3 = F.leaky_relu(self.conv3(out_2), 0.05)    # ( " )
        out_4 = F.leaky_relu(self.conv4(out_3), 0.05)    # ( " )

        out_5 = F.leaky_relu(self.deconv1(out_4), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out_5))              # (?, 1, 32, 32)

        return out
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
项目:mnist-svhn-transfer    作者:yunjey    | 项目源码 | 文件源码
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)    # (?, 128, 8, 8)

        out = F.leaky_relu(self.conv3(out), 0.05)    # ( " )
        out = F.leaky_relu(self.conv4(out), 0.05)    # ( " )

        out = F.leaky_relu(self.deconv1(out), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out))              # (?, 3, 32, 32)
        return out
项目:mnist-svhn-transfer    作者:yunjey    | 项目源码 | 文件源码
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)    # (?, 128, 8, 8)

        out = F.leaky_relu(self.conv3(out), 0.05)    # ( " )
        out = F.leaky_relu(self.conv4(out), 0.05)    # ( " )

        out = F.leaky_relu(self.deconv1(out), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out))              # (?, 1, 32, 32)
        return out
项目:mnist-svhn-transfer    作者:yunjey    | 项目源码 | 文件源码
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
项目:mnist-svhn-transfer    作者:yunjey    | 项目源码 | 文件源码
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
项目:simple-pix2pix-pytorch    作者:Eiji-Kb    | 项目源码 | 文件源码
def forward(self, x1, x2):
        h = self.c0(torch.cat((x1, x2),1))
        h = self.bnc1(self.c1(F.leaky_relu(h, negative_slope=0.2)))
        h = self.bnc2(self.c2(F.leaky_relu(h, negative_slope=0.2)))
        h = self.bnc3(self.c3(F.leaky_relu(h, negative_slope=0.2)))
        h = self.c4(F.leaky_relu(h, negative_slope=0.2))
        h = F.sigmoid(h)

        return h
项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def forward(self, z):
        z = z.view(z.size(0), z.size(1), 1, 1)      # If image_size is 64, output shape is as below.
        out = self.fc(z)                            # (?, 512, 4, 4)
        out = F.leaky_relu(self.deconv1(out), 0.05)  # (?, 256, 8, 8)
        out = F.leaky_relu(self.deconv2(out), 0.05)  # (?, 128, 16, 16)
        out = F.leaky_relu(self.deconv3(out), 0.05)  # (?, 64, 32, 32)
        out = F.tanh(self.deconv4(out))             # (?, 3, 64, 64)
        return out
项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def forward(self, x):                         # If image_size is 64, output shape is as below.
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 32, 32)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 16, 16)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 8, 8)
        out = F.leaky_relu(self.conv4(out), 0.05)  # (?, 512, 4, 4)
        out = self.fc(out).squeeze()
        return out
项目:Recognizing-Textual-Entailment    作者:codedecde    | 项目源码 | 文件源码
def forward(self, premise, hypothesis, training=False):
        '''
        inputs:
            premise : batch x T
            hypothesis : batch x T
        outputs :
            pred : batch x num_classes
        '''
        self.train(training)
        batch_size = premise.size(0)

        mask_p = torch.ne(premise, 0).type(dtype)
        mask_h = torch.ne(hypothesis, 0).type(dtype)

        encoded_p = self.embedding(premise)  # batch x T x n_embed
        encoded_p = F.dropout(encoded_p, p=self.options['DROPOUT'], training=training)

        encoded_h = self.embedding(hypothesis)  # batch x T x n_embed
        encoded_h = F.dropout(encoded_h, p=self.options['DROPOUT'], training=training)

        encoded_p = encoded_p.transpose(1, 0)  # T x batch x n_embed
        encoded_h = encoded_h.transpose(1, 0)  # T x batch x n_embed

        mask_p = mask_p.transpose(1, 0)  # T x batch
        mask_h = mask_h.transpose(1, 0)  # T x batch

        h_p_0, h_n_0 = self.init_hidden(batch_size)  # 1 x batch x n_dim
        o_p, h_n = self._gru_forward(self.p_gru, encoded_p, mask_p, h_p_0)  # o_p : T x batch x n_dim
                                                                            # h_n : 1 x batch x n_dim

        o_h, h_n = self._gru_forward(self.h_gru, encoded_h, mask_h, h_n_0)  # o_h : T x batch x n_dim
                                                            # h_n : 1 x batch x n_dim

        r_0 = self.attn_gru_init_hidden(batch_size)
        h_star, alpha_vec = self._attn_gru_forward(o_h, mask_h, r_0, o_p, mask_p)

        h_star = self.out(h_star)  # batch x num_classes
        if self.options['LAST_NON_LINEAR']:
            h_star = F.leaky_relu(h_star)  # Non linear projection
        pred = F.log_softmax(h_star)
        return pred
项目:dong_iccv_2017    作者:woozzu    | 项目源码 | 文件源码
def forward(self, img, txt_feat):
        img_feat = self.encoder(img)
        img_feat = F.leaky_relu(img_feat + self.residual_branch(img_feat), 0.2)
        txt_feat = self.compression(txt_feat)

        txt_feat = txt_feat.unsqueeze(-1).unsqueeze(-1)
        txt_feat = txt_feat.repeat(1, 1, img_feat.size(2), img_feat.size(3))
        fusion = torch.cat((img_feat, txt_feat), dim=1)
        output = self.classifier(fusion)
        return output.squeeze()
项目:self-driving-truck    作者:aleju    | 项目源码 | 文件源码
def forward(self, embeddings, softmax):
        def act(x):
            return F.leaky_relu(x, negative_slope=0.2, inplace=True)

        x = act(self.fc1_bn(self.fc1(embeddings)))
        x = add_white_noise(x, 0.005, self.training)
        x = F.dropout(x, p=0.1, training=self.training)
        x = self.fc2(x)
        if softmax:
            return F.softmax(x)
        else:
            return x
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def forward(self, image):
        image = image.cuda()
        conv_layer1 = self.bn1(leaky_relu(self.conv1(image), negative_slope = 0.2))
        conv_layer2 = self.bn2(leaky_relu(self.conv2(conv_layer1), negative_slope = 0.2))
        conv_layer3 = leaky_relu(self.conv3(conv_layer2), negative_slope = 0.2)
        fc_layer1 = self.linear1(conv_layer3.view(-1, 4*4*512))
        return fc_layer1
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def forward(self, prior):
        prior = prior.cuda()
        fc_layer = leaky_relu(self.linear1(prior).view(-1, 512, 4, 4), negative_slope = 0.2)
        deconv_layer1 = self.bn1(leaky_relu(self.deconv1(fc_layer), negative_slope = 0.2))
        deconv_layer2 = self.bn2(leaky_relu(self.deconv2(deconv_layer1), negative_slope = 0.2))
        deconv_layer3 = tanh(self.deconv3(deconv_layer2))
        return deconv_layer3
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def forward(self, image):
        image = image.cuda()
        conv_layer1 = self.bn1(leaky_relu(self.conv1(image), negative_slope = 0.2))
        conv_layer2 = self.bn2(leaky_relu(self.conv2(conv_layer1), negative_slope = 0.2))
        conv_layer3 = leaky_relu(self.conv3(conv_layer2), negative_slope = 0.2)
        fc_layer1 = self.linear1(conv_layer3.view(-1, 4*4*512))
        return sigmoid(fc_layer1)
项目:pytorch-model-zoo    作者:sdhnshu    | 项目源码 | 文件源码
def forward(self, z):
        z = z.view(z.size(0), z.size(1), 1, 1)
        out = self.fc(z)                            # (?, 512, 4, 4)
        out = F.leaky_relu(self.deconv1(out), 0.05)  # (?, 256, 8, 8)
        out = F.leaky_relu(self.deconv2(out), 0.05)  # (?, 128, 16, 16)
        out = F.leaky_relu(self.deconv3(out), 0.05)  # (?, 64, 32, 32)
        out = F.tanh(self.deconv4(out))             # (?, 3, 64, 64)
        return out
项目:pytorch-model-zoo    作者:sdhnshu    | 项目源码 | 文件源码
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 32, 32)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 16, 16)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 8, 8)
        out = F.leaky_relu(self.conv4(out), 0.05)  # (?, 512, 4, 4)
        out = F.sigmoid(self.conv5(out)).squeeze()
        return out
项目:dlcv_for_beginners    作者:frombeijingwithlove    | 项目源码 | 文件源码
def forward(self, x):
        x = F.leaky_relu(self.map1(x), 0.1)
        return F.sigmoid(self.map2(x))
项目:dlcv_for_beginners    作者:frombeijingwithlove    | 项目源码 | 文件源码
def forward(self, x):
        x = F.leaky_relu(self.map1(x), 0.1)
        x = F.leaky_relu(self.map2(x), 0.1)
        return F.sigmoid(self.map3(x))
项目:dlcv_for_beginners    作者:frombeijingwithlove    | 项目源码 | 文件源码
def __init__(self, input_nch, output_nch, kernel_size=3, activation=F.leaky_relu, use_bn=True, same_conv=True):
        super(UNetConvBlock, self).__init__()
        padding = kernel_size // 2 if same_conv else 0  # only support odd kernel
        self.conv0 = nn.Conv2d(input_nch, output_nch, kernel_size, padding=padding)
        self.conv1 = nn.Conv2d(output_nch, output_nch, kernel_size, padding=padding)
        self.act = activation
        self.batch_norm = nn.BatchNorm2d(output_nch) if use_bn else None
项目:yolo-pytorch    作者:makora9143    | 项目源码 | 文件源码
def forward(self, x):
        feature = self.features(x)
        output = self.yolo(feature)
        output = self.flatten(output)
        output = F.leaky_relu(self.fc1(output), 0.1)
        output = self.fc2(output)

        return output
项目:pytorch-MNIST-CelebA-GAN-DCGAN    作者:znxlwm    | 项目源码 | 文件源码
def forward(self, input):
        x = F.leaky_relu(self.fc1(input), 0.2)
        x = F.leaky_relu(self.fc2(x), 0.2)
        x = F.leaky_relu(self.fc3(x), 0.2)
        x = F.tanh(self.fc4(x))

        return x
项目:pytorch-MNIST-CelebA-GAN-DCGAN    作者:znxlwm    | 项目源码 | 文件源码
def forward(self, input):
        x = F.leaky_relu(self.fc1(input), 0.2)
        x = F.dropout(x, 0.3)
        x = F.leaky_relu(self.fc2(x), 0.2)
        x = F.dropout(x, 0.3)
        x = F.leaky_relu(self.fc3(x), 0.2)
        x = F.dropout(x, 0.3)
        x = F.sigmoid(self.fc4(x))

        return x
项目:pytorch-MNIST-CelebA-GAN-DCGAN    作者:znxlwm    | 项目源码 | 文件源码
def forward(self, input):
        x = F.leaky_relu(self.conv1(input), 0.2)
        x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
        x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
        x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)
        x = F.sigmoid(self.conv5(x))

        return x
项目:pytorch-MNIST-CelebA-GAN-DCGAN    作者:znxlwm    | 项目源码 | 文件源码
def forward(self, input):
        x = F.leaky_relu(self.conv1(input), 0.2)
        x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
        x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
        x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)
        x = F.sigmoid(self.conv5(x))

        return x
项目:pytorch-caffe-darknet-convert    作者:marvis    | 项目源码 | 文件源码
def forward(self, x):
        if self.has_mean:
            batch_size = x.data.size(0)
            x = x - torch.autograd.Variable(self.mean_img.repeat(batch_size, 1, 1, 1))

        ind = -2
        self.loss = None
        outputs = dict()
        for block in self.blocks:
            ind = ind + 1
            #if ind > 14:
            #    return x

            if block['type'] == 'net':
                continue
            elif block['type'] == 'convolutional' or block['type'] == 'maxpool' or block['type'] == 'reorg' or block['type'] == 'avgpool' or block['type'] == 'softmax' or block['type'] == 'connected' or block['type'] == 'dropout':
                x = self.models[ind](x)
                outputs[ind] = x
            elif block['type'] == 'route':
                layers = block['layers'].split(',')
                layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
                if len(layers) == 1:
                    x = outputs[layers[0]]
                    outputs[ind] = x
                elif len(layers) == 2:
                    x1 = outputs[layers[0]]
                    x2 = outputs[layers[1]]
                    x = torch.cat((x1,x2),1)
                    outputs[ind] = x
            elif block['type'] == 'shortcut':
                from_layer = int(block['from'])
                activation = block['activation']
                from_layer = from_layer if from_layer > 0 else from_layer + ind
                x1 = outputs[from_layer]
                x2 = outputs[ind-1]
                x  = x1 + x2
                if activation == 'leaky':
                    x = F.leaky_relu(x, 0.1, inplace=True)
                elif activation == 'relu':
                    x = F.relu(x, inplace=True)
                outputs[ind] = x
            elif block['type'] == 'cost':
                continue
            elif block['type'] == 'region':
                continue
            else:
                print('unknown type %s' % (block['type']))
        return x
项目:biaffineparser    作者:chantera    | 项目源码 | 文件源码
def __init__(self,
                 embeddings,
                 n_labels,
                 n_blstm_layers=3,
                 lstm_hidden_size=400,
                 use_gru=False,
                 n_arc_mlp_layers=1,
                 n_arc_mlp_units=500,
                 n_label_mlp_layers=1,
                 n_label_mlp_units=100,
                 mlp_activation=F.leaky_relu,
                 embeddings_dropout=0.33,
                 lstm_dropout=0.33,
                 arc_mlp_dropout=0.33,
                 label_mlp_dropout=0.33,
                 pad_id=0):
        super(DeepBiaffine, self).__init__()
        self._pad_id = pad_id
        blstm_cls = nn.GRU if use_gru else nn.LSTM
        self.embed = Embed(*embeddings, dropout=embeddings_dropout,
                           padding_idx=pad_id)
        embed_size = self.embed.size - self.embed[0].weight.data.shape[1]
        self.blstm = blstm_cls(
            num_layers=n_blstm_layers,
            input_size=embed_size,
            hidden_size=(lstm_hidden_size
                         if lstm_hidden_size is not None else embed_size),
            batch_first=True,
            dropout=lstm_dropout,
            bidirectional=True
        )
        layers = [MLP.Layer(lstm_hidden_size * 2, n_arc_mlp_units,
                            mlp_activation, arc_mlp_dropout)
                  for i in range(n_arc_mlp_layers)]
        self.mlp_arc_head = MLP(layers)
        layers = [MLP.Layer(lstm_hidden_size * 2, n_arc_mlp_units,
                            mlp_activation, arc_mlp_dropout)
                  for i in range(n_arc_mlp_layers)]
        self.mlp_arc_dep = MLP(layers)
        layers = [MLP.Layer(lstm_hidden_size * 2, n_label_mlp_units,
                            mlp_activation, label_mlp_dropout)
                  for i in range(n_label_mlp_layers)]
        self.mlp_label_head = MLP(layers)
        layers = [MLP.Layer(lstm_hidden_size * 2, n_label_mlp_units,
                            mlp_activation, label_mlp_dropout)
                  for i in range(n_label_mlp_layers)]
        self.mlp_label_dep = MLP(layers)
        self.arc_biaffine = \
            Biaffine(n_arc_mlp_units, n_arc_mlp_units, 1,
                     bias=(True, False, False))
        self.label_biaffine = \
            Biaffine(n_label_mlp_units, n_label_mlp_units, n_labels,
                     bias=(True, True, True))
项目:Recognizing-Textual-Entailment    作者:codedecde    | 项目源码 | 文件源码
def forward(self, premise, hypothesis, training=False):
        '''
        inputs:
            premise : batch x T
            hypothesis : batch x T
        outputs :
            pred : batch x num_classes
        '''
        self.train(training)
        batch_size = premise.size(0)

        mask_p = torch.ne(premise, 0).type(dtype)
        mask_h = torch.ne(hypothesis, 0).type(dtype)

        encoded_p = self.embedding(premise)  # batch x T x n_embed
        encoded_p = F.dropout(encoded_p, p=self.options['DROPOUT'], training=training)

        encoded_h = self.embedding(hypothesis)  # batch x T x n_embed
        encoded_h = F.dropout(encoded_h, p=self.options['DROPOUT'], training=training)

        encoded_p = encoded_p.transpose(1, 0)  # T x batch x n_embed
        encoded_h = encoded_h.transpose(1, 0)  # T x batch x n_embed

        mask_p = mask_p.transpose(1, 0)  # T x batch
        mask_h = mask_h.transpose(1, 0)  # T x batch

        h_0 = self.init_hidden(batch_size)  # 1 x batch x n_dim
        o_p, h_n = self._gru_forward(self.p_gru, encoded_p, mask_p, h_0)  # o_p : T x batch x n_dim
                                                                          # h_n : 1 x batch x n_dim

        o_h, h_n = self._gru_forward(self.h_gru, encoded_h, mask_h, h_n)  # o_h : T x batch x n_dim
                                                                          # h_n : 1 x batch x n_dim

        if self.options['WBW_ATTN']:
            r_0 = self.attn_rnn_init_hidden(batch_size)  # batch x n_dim
            r, alpha_vec = self._attn_rnn_forward(o_h, mask_h, r_0, o_p, mask_p)  # r : batch x n_dim
                                                                                  # alpha_vec : T x batch x T         
        else:
            r, alpha = self._attention_forward(o_p, mask_p, o_h[-1])  # r : batch x n_dim
                                                                      # alpha : batch x T

        h_star = self._combine_last(r, o_h[-1])  # batch x n_dim
        h_star = self.out(h_star)  # batch x num_classes
        if self.options['LAST_NON_LINEAR']:
            h_star = F.leaky_relu(h_star)  # Non linear projection
        pred = F.log_softmax(h_star)
        return pred
项目:pytorch2c    作者:lantiga    | 项目源码 | 文件源码
def base_test():

    fc1 = nn.Linear(10,20)
    fc1.weight.data.normal_(0.0,1.0)
    fc1.bias.data.normal_(0.0,1.0)

    fc2 = nn.Linear(20,2)
    fc2.weight.data.normal_(0.0,1.0)
    fc2.bias.data.normal_(0.0,1.0)

    fc3 = nn.Linear(10,2)
    fc3.weight.data.normal_(0.0,1.0)
    fc3.bias.data.normal_(0.0,1.0)

    fc4 = nn.Linear(10,2)
    fc4.weight.data.normal_(0.0,1.0)
    fc4.bias.data.normal_(0.0,1.0)

    softmax = nn.Softmax()

    model0 = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))
    model1 = lambda x: F.softmax(F.elu(fc3(x)))
    model2 = lambda x: F.softmax(F.tanh(fc3(x)))
    model3 = lambda x: F.softmax(F.sigmoid(fc3(x)))
    model4 = lambda x: softmax(F.leaky_relu(fc4(x))).clone()
    model5 = lambda x: softmax(F.logsigmoid(fc4(x.transpose(0,1))))
    model6 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze())
    model7 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze(dim=0))
    model8 = lambda x: fc3(F.max_pool3d(x.unsqueeze(0),2).squeeze())
    model9 = lambda x: fc3(F.max_pool1d(x.abs().view(1,1,-1),4).squeeze().view(10,10))
    #model10 = lambda x: fc3(x.double())
    #model10 = lambda x: fc3(x.view(1,10,10).select(0,0))
    model10 = lambda x, y: F.softmax(F.tanh(fc3(torch.cat((x,y),1))))

    data = Variable(torch.rand(10,10))
    data2 = Variable(torch.rand(20,20))
    data1a = Variable(torch.rand(10,5))
    data1b = Variable(torch.rand(10,5))
    data3 = Variable(torch.rand(2,20,20))

    out = model0(data) + \
          model1(data) * model2(data) / model3(data) / 2.0 + \
          2.0 * model4(data) + model5(data) + 1 - 2.0 + \
          model6(data2) + model7(data2) + model8(data3) + model9(data2) + model10(data1a,data1b)

    out_path = 'out'
    if not os.path.isdir(out_path):
        os.mkdir(out_path)
    uid = str(uuid.uuid4())

    torch2c.compile(out,'base',os.path.join(out_path,uid),compile_test=True)
项目:pytorch-yolo2    作者:marvis    | 项目源码 | 文件源码
def forward(self, x):
        ind = -2
        self.loss = None
        outputs = dict()
        for block in self.blocks:
            ind = ind + 1
            #if ind > 0:
            #    return x

            if block['type'] == 'net':
                continue
            elif block['type'] == 'convolutional' or block['type'] == 'maxpool' or block['type'] == 'reorg' or block['type'] == 'avgpool' or block['type'] == 'softmax' or block['type'] == 'connected':
                x = self.models[ind](x)
                outputs[ind] = x
            elif block['type'] == 'route':
                layers = block['layers'].split(',')
                layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
                if len(layers) == 1:
                    x = outputs[layers[0]]
                    outputs[ind] = x
                elif len(layers) == 2:
                    x1 = outputs[layers[0]]
                    x2 = outputs[layers[1]]
                    x = torch.cat((x1,x2),1)
                    outputs[ind] = x
            elif block['type'] == 'shortcut':
                from_layer = int(block['from'])
                activation = block['activation']
                from_layer = from_layer if from_layer > 0 else from_layer + ind
                x1 = outputs[from_layer]
                x2 = outputs[ind-1]
                x  = x1 + x2
                if activation == 'leaky':
                    x = F.leaky_relu(x, 0.1, inplace=True)
                elif activation == 'relu':
                    x = F.relu(x, inplace=True)
                outputs[ind] = x
            elif block['type'] == 'region':
                continue
                if self.loss:
                    self.loss = self.loss + self.models[ind](x)
                else:
                    self.loss = self.models[ind](x)
                outputs[ind] = None
            elif block['type'] == 'cost':
                continue
            else:
                print('unknown type %s' % (block['type']))
        return x