Python torch.autograd 模块,grad() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用torch.autograd.grad()

项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def calc_gradient_penalty(self, netD, real_data, fake_data):
        alpha = torch.rand(1, 1)
        alpha = alpha.expand(real_data.size())
        alpha = alpha.cuda()

        interpolates = alpha * real_data + ((1 - alpha) * fake_data)

        interpolates = interpolates.cuda()
        interpolates = Variable(interpolates, requires_grad=True)

        disc_interpolates = netD.forward(interpolates)

        gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                                  grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
                                  create_graph=True, retain_graph=True, only_inputs=True)[0]

        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA
        return gradient_penalty
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def calc_gradient_penalty(netD, real_data, fake_data, sketch):
    alpha = torch.rand(opt.batchSize, 1, 1, 1)
    alpha = alpha.cuda() if opt.cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if opt.cuda:
        interpolates = interpolates.cuda()
    interpolates = Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates, Variable(sketch))[0]

    gradients = grad(outputs=disc_interpolates, inputs=interpolates,
                     grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
                         disc_interpolates.size()),
                     create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
    return gradient_penalty
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def calc_gradient_penalty(netD, real_data, fake_data):
    # print "real_data: ", real_data.size(), fake_data.size()
    alpha = torch.rand(opt.batchSize, 1, 1, 1)
    # alpha = alpha.expand(opt.batchSize, real_data.nelement() / opt.batchSize).contiguous().view(opt.batchSize, 3, 64,
    #                                                                                             64)
    alpha = alpha.cuda() if opt.cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if opt.cuda:
        interpolates = interpolates.cuda()
    interpolates = Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates)

    gradients = grad(outputs=disc_interpolates, inputs=interpolates,
                     grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
                         disc_interpolates.size()),
                     create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
    return gradient_penalty
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def calc_gradient_penalty(netD, real_data, fake_data):
    # print "real_data: ", real_data.size(), fake_data.size()
    alpha = torch.rand(opt.batchSize, 1, 1, 1)
    # alpha = alpha.expand(opt.batchSize, real_data.nelement() / opt.batchSize).contiguous().view(opt.batchSize, 3, 64,
    #                                                                                             64)
    alpha = alpha.cuda() if opt.cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if opt.cuda:
        interpolates = interpolates.cuda()
    interpolates = Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates)

    gradients = grad(outputs=disc_interpolates, inputs=interpolates,
                     grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
                         disc_interpolates.size()),
                     create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
    return gradient_penalty
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def calc_gradient_penalty(netD, real_data, fake_data, sketch):
    alpha = torch.rand(opt.batchSize, 1, 1, 1)
    alpha = alpha.cuda() if opt.cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if opt.cuda:
        interpolates = interpolates.cuda()
    interpolates = Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates, Variable(sketch))

    gradients = grad(outputs=disc_interpolates, inputs=interpolates,
                     grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
                         disc_interpolates.size()),
                     create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
    return gradient_penalty
项目:pytorch-arda    作者:corenel    | 项目源码 | 文件源码
def calc_gradient_penalty(D, real_data, fake_data):
    """Calculatge gradient penalty for WGAN-GP."""
    alpha = torch.rand(params.batch_size, 1)
    alpha = alpha.expand(real_data.size())
    alpha = make_cuda(alpha)

    interpolates = make_variable(alpha * real_data + ((1 - alpha) * fake_data))
    interpolates.requires_grad = True

    disc_interpolates = D(interpolates)

    gradients = grad(outputs=disc_interpolates,
                     inputs=interpolates,
                     grad_outputs=make_cuda(
                         torch.ones(disc_interpolates.size())),
                     create_graph=True,
                     retain_graph=True,
                     only_inputs=True)[0]

    gradient_penalty = params.penalty_lambda * \
        ((gradients.norm(2, dim=1) - 1) ** 2).mean()

    return gradient_penalty
项目:GAN-Zoo    作者:corenel    | 项目源码 | 文件源码
def calc_gradient_penalty(D, real_data, fake_data):
    """Calculatge gradient penalty for WGAN-GP."""
    alpha = torch.rand(params.batch_size, 1)
    alpha = alpha.expand(real_data.size())
    alpha = make_cuda(alpha)

    interpolates = make_variable(alpha * real_data + ((1 - alpha) * fake_data))
    interpolates.requires_grad = True

    disc_interpolates = D(interpolates)

    gradients = grad(outputs=disc_interpolates,
                     inputs=interpolates,
                     grad_outputs=make_cuda(
                         torch.ones(disc_interpolates.size())),
                     create_graph=True,
                     retain_graph=True,
                     only_inputs=True)[0]

    gradient_penalty = params.penalty_lambda * \
        ((gradients.norm(2, dim=1) - 1) ** 2).mean()

    return gradient_penalty
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def GAN_loss(self, x):
        x = x.view(x.size(0), -1)
        if isinstance(x, torch.cuda.FloatTensor):
            eps = torch.cuda.FloatTensor(x.size(0), self.nz).normal_()
        else:
            eps = torch.FloatTensor(x.size(0), self.nz).normal_()
        alpha = torch.FloatTensor(x.size(0), 1).uniform_(0,1)
        alpha = alpha.expand(x.size(0), x.size(1))
        recon_pz = self.decode(Variable(eps))
        interpolates = alpha * x.data + (1-alpha) * recon_pz.data
        interpolates = Variable(interpolates, requires_grad=True)
        D_interpolates = self.D(interpolates)
        gradients = grad(D_interpolates, interpolates,create_graph=True)[0]
        slopes = torch.sum(gradients ** 2, 1).sqrt()
        gradient_penalty = (torch.mean(slopes - 1.) ** 2)
        return self.D(x) - self.D(recon_pz) - 10 * gradient_penalty
项目:SGAN    作者:YuhangSong    | 项目源码 | 文件源码
def calc_gradient_reward(noise_v, prediction_v_before_deconv):
    '''
        compute the additional loss of G
    '''

    def get_grad_norm(inputs,outputs):

        gradients = autograd.grad(
            outputs=outputs,
            inputs=inputs,
            grad_outputs=torch.ones(outputs.size()).cuda(),
            create_graph=True,
            retain_graph=True,
            only_inputs=True
        )[0]
        gradients = gradients.contiguous()
        gradients_fl = gradients.view(gradients.size()[0],-1)
        gradients_norm = gradients_fl.norm(2, dim=1) / ((gradients_fl.size()[1])**0.5)

        return gradients_norm

    gradients_norm_noise = get_grad_norm(noise_v,prediction_v_before_deconv)

    logger.plot('gradients_norm_noise', [gradients_norm_noise.data.mean()])

    gradients_reward = (gradients_norm_noise+1.0).log().mean()*params['NOISE_ENCOURAGE_FACTOR']

    return gradients_reward
项目:pytorch-rl    作者:jingweiz    | 项目源码 | 文件源码
def _1st_order_trpo(self, detached_policy_loss_vb, detached_policy_vb, detached_avg_policy_vb, detached_splitted_policy_vb=None):
        on_policy = detached_splitted_policy_vb is None
        # KL divergence k = \delta_{\phi_{\theta}} DKL[ \pi(|\phi_{\theta_a}) || \pi{|\phi_{\theta}}]
        # kl_div_vb = F.kl_div(detached_policy_vb.log(), detached_avg_policy_vb, size_average=False) # NOTE: the built-in one does not work on batch
        kl_div_vb = categorical_kl_div(detached_policy_vb, detached_avg_policy_vb)
        # NOTE: k & g are wll w.r.t. the network output, which is detached_policy_vb
        # NOTE: gradient from this part will not flow back into the model
        # NOTE: that's why we are only using detached policy variables here
        if on_policy:
            k_vb = grad(outputs=kl_div_vb,               inputs=detached_policy_vb, retain_graph=False, only_inputs=True)[0]
            g_vb = grad(outputs=detached_policy_loss_vb, inputs=detached_policy_vb, retain_graph=False, only_inputs=True)[0]
        else:
            # NOTE NOTE NOTE !!!
            # NOTE: here is why we cannot simply detach then split the policy_vb, but must split before detach
            # NOTE: cos if we do that then the split cannot backtrace the grads computed in this later part of the graph
            # NOTE: it would have no way to connect to the graphs in the model
            k_vb = grad(outputs=(kl_div_vb.split(1, 0)),               inputs=(detached_splitted_policy_vb), retain_graph=False, only_inputs=True)
            g_vb = grad(outputs=(detached_policy_loss_vb.split(1, 0)), inputs=(detached_splitted_policy_vb), retain_graph=False, only_inputs=True)
            k_vb = torch.cat(k_vb, 0)
            g_vb = torch.cat(g_vb, 0)

        kg_dot_vb = (k_vb * g_vb).sum(1, keepdim=True)
        kk_dot_vb = (k_vb * k_vb).sum(1, keepdim=True)
        z_star_vb = g_vb - ((kg_dot_vb - self.master.clip_1st_order_trpo) / kk_dot_vb).clamp(min=0) * k_vb

        return z_star_vb