Python torch 模块,sign() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用torch.sign()

项目:audio    作者:pytorch    | 项目源码 | 文件源码
def __call__(self, x):
        """

        Args:
            x (FloatTensor/LongTensor or ndarray)

        Returns:
            x_mu (LongTensor or ndarray)

        """
        mu = self.qc - 1.
        if isinstance(x, np.ndarray):
            x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
            x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
        elif isinstance(x, (torch.Tensor, torch.LongTensor)):
            if isinstance(x, torch.LongTensor):
                x = x.float()
            mu = torch.FloatTensor([mu])
            x_mu = torch.sign(x) * torch.log1p(mu *
                                               torch.abs(x)) / torch.log1p(mu)
            x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
        return x_mu
项目:audio    作者:pytorch    | 项目源码 | 文件源码
def __call__(self, x_mu):
        """

        Args:
            x_mu (FloatTensor/LongTensor or ndarray)

        Returns:
            x (FloatTensor or ndarray)

        """
        mu = self.qc - 1.
        if isinstance(x_mu, np.ndarray):
            x = ((x_mu) / mu) * 2 - 1.
            x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
        elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
            if isinstance(x_mu, torch.LongTensor):
                x_mu = x_mu.float()
            mu = torch.FloatTensor([mu])
            x = ((x_mu) / mu) * 2 - 1.
            x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
        return x
项目:pyro    作者:uber    | 项目源码 | 文件源码
def _test_jacobian(self, input_dim, hidden_dim, multiplier):
        jacobian = torch.zeros(input_dim, input_dim)
        arn = AutoRegressiveNN(input_dim, hidden_dim, multiplier)

        def nonzero(x):
            return torch.sign(torch.abs(x))

        for output_index in range(multiplier):
            for j in range(input_dim):
                for k in range(input_dim):
                    x = Variable(torch.randn(1, input_dim))
                    epsilon_vector = torch.zeros(1, input_dim)
                    epsilon_vector[0, j] = self.epsilon
                    delta = (arn(x + Variable(epsilon_vector)) - arn(x)) / self.epsilon
                    jacobian[j, k] = float(delta[0, k + output_index * input_dim].data.cpu().numpy()[0])

            permutation = arn.get_permutation()
            permuted_jacobian = jacobian.clone()
            for j in range(input_dim):
                for k in range(input_dim):
                    permuted_jacobian[j, k] = jacobian[permutation[j], permutation[k]]

            lower_sum = torch.sum(torch.tril(nonzero(permuted_jacobian), diagonal=0))
            self.assertTrue(lower_sum == float(0.0))
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def preProc2(x):
    # Access the global variables
    global P, expP, negExpP
    P = P.type_as(x)
    expP = expP.type_as(x)
    negExpP = negExpP.type_as(x)

    # Create a variable filled with -1. Second part of the condition
    z = Variable(torch.zeros(x.size())).type_as(x)
    absX = torch.abs(x)
    cond1 = torch.gt(absX, negExpP)
    cond2 = torch.le(absX, negExpP)
    if (torch.sum(cond1) > 0).data.all():
        x1 = torch.sign(x[cond1])
        z[cond1] = x1
    if (torch.sum(cond2) > 0).data.all():
        x2 = x[cond2]*expP
        z[cond2] = x2
    return z
项目:pytorch-playground    作者:aaron-xichen    | 项目源码 | 文件源码
def min_max_quantize(input, bits):
    assert bits >= 1, bits
    if bits == 1:
        return torch.sign(input) - 1
    min_val, max_val = input.min(), input.max()

    if isinstance(min_val, Variable):
        max_val = float(max_val.data.cpu().numpy()[0])
        min_val = float(min_val.data.cpu().numpy()[0])

    input_rescale = (input - min_val) / (max_val - min_val)

    n = math.pow(2.0, bits) - 1
    v = torch.floor(input_rescale * n + 0.5) / n

    v =  v * (max_val - min_val) + min_val
    return v
项目:pyro    作者:uber    | 项目源码 | 文件源码
def _test_jacobian(self, input_dim, hidden_dim):
        jacobian = torch.zeros(input_dim, input_dim)
        iaf = InverseAutoregressiveFlow(input_dim, hidden_dim, sigmoid_bias=0.5)

        def nonzero(x):
            return torch.sign(torch.abs(x))

        x = Variable(torch.randn(1, input_dim))
        iaf_x = iaf(x)
        for j in range(input_dim):
            for k in range(input_dim):
                epsilon_vector = torch.zeros(1, input_dim)
                epsilon_vector[0, j] = self.epsilon
                iaf_x_eps = iaf(x + Variable(epsilon_vector))
                delta = (iaf_x_eps - iaf_x) / self.epsilon
                jacobian[j, k] = float(delta[0, k].data.cpu().numpy()[0])

        permutation = iaf.get_arn().get_permutation()
        permuted_jacobian = jacobian.clone()
        for j in range(input_dim):
            for k in range(input_dim):
                permuted_jacobian[j, k] = jacobian[permutation[j], permutation[k]]

        analytic_ldt = iaf.log_det_jacobian(iaf_x).data.cpu().numpy()[0]
        numeric_ldt = torch.sum(torch.log(torch.diag(permuted_jacobian)))
        ldt_discrepancy = np.fabs(analytic_ldt - numeric_ldt)

        diag_sum = torch.sum(torch.diag(nonzero(permuted_jacobian)))
        lower_sum = torch.sum(torch.tril(nonzero(permuted_jacobian), diagonal=-1))

        self.assertTrue(ldt_discrepancy < self.epsilon)
        self.assertTrue(diag_sum == float(input_dim))
        self.assertTrue(lower_sum == float(0.0))
项目:rl    作者:Shmuma    | 项目源码 | 文件源码
def forward(self, input):
        torch.randn(self.epsilon_input.size(), out=self.epsilon_input)
        torch.randn(self.epsilon_output.size(), out=self.epsilon_output)

        func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))
        eps_in = func(self.epsilon_input)
        eps_out = func(self.epsilon_output)

        bias = self.bias
        if bias is not None:
            bias = bias + self.sigma_bias * Variable(eps_out.t())
        noise_v = Variable(torch.mul(eps_in, eps_out))
        return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def train_data(cuda=False):
    train_x = Variable(torch.linspace(0, 1, 10))
    train_y = Variable(torch.sign(torch.cos(train_x.data * (4 * math.pi))))
    if cuda:
        return train_x.cuda(), train_y.cuda()
    else:
        return train_x, train_y
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def erf_approx(self, x):
        exp = -x * x * (4 / math.pi + self.a_for_erf * x * x) / (1 + self.a_for_erf * x * x)
        return torch.sign(x) * torch.sqrt(1 - torch.exp(exp))
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def erfinv_approx(self, x):
        b = -2 / (math.pi * self.a_for_erf) - torch.log(1 - x * x) / 2
        return torch.sign(x) * torch.sqrt(b + torch.sqrt(b * b - torch.log(1 - x * x) / self.a_for_erf))
项目:URNN-PyTorch    作者:jingli9111    | 项目源码 | 文件源码
def _modReLU(self, h, bias):
        """
        sign(z)*relu(z)
        """
        batch_size = h.size(0)
        sign = torch.sign(h)
        bias_batch = (bias.unsqueeze(0)
                      .expand(batch_size, *bias.size()))
        return sign * functional.relu(torch.abs(h) + bias_batch)
项目:URNN-PyTorch    作者:jingli9111    | 项目源码 | 文件源码
def _modReLU(self, h, bias):
        """
        sign(z)*relu(z)
        """
        batch_size = h.size(0)
        sign = torch.sign(h)
        bias_batch = (bias.unsqueeze(0)
                      .expand(batch_size, *bias.size()))
        return sign * functional.relu(torch.abs(h) + bias_batch)
项目:pytorch-playground    作者:aaron-xichen    | 项目源码 | 文件源码
def linear_quantize(input, sf, bits):
    assert bits >= 1, bits
    if bits == 1:
        return torch.sign(input) - 1
    delta = math.pow(2.0, -sf)
    bound = math.pow(2.0, bits-1)
    min_val = - bound
    max_val = bound - 1
    rounded = torch.floor(input / delta + 0.5)

    clipped_value = torch.clamp(rounded, min_val, max_val) * delta
    return clipped_value
项目:pytorch-playground    作者:aaron-xichen    | 项目源码 | 文件源码
def log_minmax_quantize(input, bits):
    assert bits >= 1, bits
    if bits == 1:
        return torch.sign(input), 0.0, 0.0

    s = torch.sign(input)
    input0 = torch.log(torch.abs(input) + 1e-20)
    v = min_max_quantize(input0, bits)
    v = torch.exp(v) * s
    return v
项目:pytorch-playground    作者:aaron-xichen    | 项目源码 | 文件源码
def log_linear_quantize(input, sf, bits):
    assert bits >= 1, bits
    if bits == 1:
        return torch.sign(input), 0.0, 0.0

    s = torch.sign(input)
    input0 = torch.log(torch.abs(input) + 1e-20)
    v = linear_quantize(input0, sf, bits)
    v = torch.exp(v) * s
    return v
项目:pytorch-playground    作者:aaron-xichen    | 项目源码 | 文件源码
def tanh_quantize(input, bits):
    assert bits >= 1, bits
    if bits == 1:
        return torch.sign(input)
    input = torch.tanh(input) # [-1, 1]
    input_rescale = (input + 1.0) / 2 #[0, 1]
    n = math.pow(2.0, bits) - 1
    v = torch.floor(input_rescale * n + 0.5) / n
    v = 2 * v - 1 # [-1, 1]

    v = 0.5 * torch.log((1 + v) / (1 - v)) # arctanh
    return v
项目:ktorch    作者:farizrahman4u    | 项目源码 | 文件源码
def sign(x):
    y = get_op(lambda x: torch.sign(x))(x)
    return y
项目:paysage    作者:drckf    | 项目源码 | 文件源码
def erf(x: T.FloatTensor) -> T.FloatTensor:
    """
    Elementwise error function of a tensor.

    Args:
        x: A tensor.

    Returns:
        tensor: Elementwise error function
    """
    a = 8.0/(3.0*pi)*(pi-3.0)/(4.0-pi)
    x_sq = x*x
    return torch.sign(x)*torch.sqrt(1-torch.exp(-x_sq*(4/pi+a*x_sq)/(1+a*x_sq)))
项目:paysage    作者:drckf    | 项目源码 | 文件源码
def erfinv(x: T.FloatTensor) -> T.FloatTensor:
    """
    Elementwise error function of a tensor.

    Args:
        x: A tensor.

    Returns:
        tensor: Elementwise error function
    """
    a = 8.0/(3.0*pi)*(pi-3.0)/(4.0-pi)
    x_sq = x*x
    b = -2/(pi*a)-torch.log(1-x_sq)/2
    return torch.sign(x)*torch.sqrt(b+torch.sqrt(b*b-torch.log(1-x_sq)/a))
项目:pytorch-neural-search-optimizer    作者:daviddao    | 项目源码 | 文件源码
def step(self, closure=None):
        """Performs a single optimization step.
        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            weight_decay = group['weight_decay']
            momentum = group['momentum']
            dampening = group['dampening']

            for p in group['params']:
                if p.grad is None:
                    continue
                d_p = p.grad.data
                if weight_decay != 0:
                    d_p.add_(weight_decay, p.data)
                if momentum != 0:
                    param_state = self.state[p]
                    if 'momentum_buffer' not in param_state:
                        buf = param_state['momentum_buffer'] = d_p.clone()
                    else:
                        buf = param_state['momentum_buffer']
                        buf.mul_(momentum).add_(1 - dampening, d_p)
                # Update rule: g * e(sign(g)*sign(m))
                d_p = d_p.mul(torch.exp(torch.sign(d_p)*torch.sign(buf)))

                p.data.add_(-group['lr'], d_p)

        return loss
项目:pytorch-nips2017-attack-example    作者:rwightman    | 项目源码 | 文件源码
def run(self, model, input, target, batch_idx=0):
        input_var = autograd.Variable(input, requires_grad=True)
        target_var = autograd.Variable(target)
        eps = self.eps
        step_alpha = self.step_alpha

        step = 0
        while step < self.num_steps:
            zero_gradients(input_var)
            output = model(input_var)
            if not self.targeted and not step:
                # for non-targeted, we'll move away from most likely
                target_var.data = output.data.max(1)[1]
            loss = self.loss_fn(output, target_var)
            loss.backward()

            # normalize and scale gradient
            if self.norm == 2:
                normed_grad = step_alpha * input_var.grad.data / l2_norm(input_var.grad.data)
            elif self.norm == 1:
                normed_grad = step_alpha * input_var.grad.data / l1_norm(input_var.grad.data)
            else:
                # infinity-norm
                normed_grad = step_alpha * torch.sign(input_var.grad.data)

            # perturb current input image by normalized and scaled gradient
            if self.targeted:
                step_adv = input_var.data - normed_grad
            else:
                step_adv = input_var.data + normed_grad

            # calculate total adversarial perturbation from original image and clip to epsilon constraints
            total_adv = step_adv - input
            if self.norm == 2:
                # total_adv = eps * total_adv / l2norm(total_adv)
                total_adv = torch.clamp(total_adv, -eps, eps)
            elif self.norm == 1:
                # total_adv = eps * total_adv / l1norm(total_adv)
                total_adv = torch.clamp(total_adv, -eps, eps)
            else:
                # infinity-norm
                total_adv = torch.clamp(total_adv, -eps, eps)

            if self.debug:
                print('batch:', batch_idx, 'step:', step, total_adv.mean(), total_adv.min(), total_adv.max())
                sys.stdout.flush()

            # apply total adversarial perturbation to original image and clip to valid pixel range
            input_adv = input + total_adv
            input_adv = torch.clamp(input_adv, -1.0, 1.0)
            input_var.data = input_adv
            step += 1

        return input_adv.permute(0, 2, 3, 1).cpu().numpy()