Python torch.nn.functional 模块,softplus() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用torch.nn.functional.softplus()

项目:pytorch-avitm    作者:hyqneuron    | 项目源码 | 文件源码
def forward(self, input, compute_loss=False, avg_loss=True):
        # compute posterior
        en1 = F.softplus(self.en1_fc(input))                            # en1_fc   output
        en2 = F.softplus(self.en2_fc(en1))                              # encoder2 output
        en2 = self.en2_drop(en2)
        posterior_mean   = self.mean_bn  (self.mean_fc  (en2))          # posterior mean
        posterior_logvar = self.logvar_bn(self.logvar_fc(en2))          # posterior log variance
        posterior_var    = posterior_logvar.exp()
        # take sample
        eps = Variable(input.data.new().resize_as_(posterior_mean.data).normal_()) # noise
        z = posterior_mean + posterior_var.sqrt() * eps                 # reparameterization
        p = F.softmax(z)                                                # mixture probability
        p = self.p_drop(p)
        # do reconstruction
        recon = F.softmax(self.decoder_bn(self.decoder(p)))             # reconstructed distribution over vocabulary

        if compute_loss:
            return recon, self.loss(input, recon, posterior_mean, posterior_logvar, posterior_var, avg_loss)
        else:
            return recon
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def forward(self, inp):
        #if inp.dim() > 2:
        #    inp = inp.permute(0, 2, 1)
        #inp = inp.contiguous().view(-1, self.L1)
        if not (type(inp) == Variable):
            inp = Variable(inp[0])

        if self.arguments.tr_method in ['adversarial', 'adversarial_wasserstein']:
            h = F.softplus((self.l1(inp)))
        elif self.arguments.tr_method == 'ML':
            h = F.softplus((self.l1(inp)))
        else:
            raise ValueError('Whaat method?')
        output = F.softplus(self.l2(h))

        if self.smooth_output:
            output = output.view(-1, 1, int(np.sqrt(self.L2)), int(np.sqrt(self.L2)))
            output = F.softplus(self.sml(output))
            output = output.view(-1, self.L2)
        return output
项目:pyro    作者:uber    | 项目源码 | 文件源码
def forward(self, x):
        a = self.mlp(x)
        return a[:, 0:self.z_size], softplus(a[:, self.z_size:])


# Takes a latent code, z_what, to pixel intensities.
项目:pyro    作者:uber    | 项目源码 | 文件源码
def forward(self, h):
        out = self.mlp(h)
        z_pres_p = sigmoid(out[:, 0:self.z_pres_size])
        z_where_mu = out[:, self.z_pres_size:self.z_pres_size + self.z_where_size]
        z_where_sigma = softplus(out[:, (self.z_pres_size + self.z_where_size):])
        return z_pres_p, z_where_mu, z_where_sigma
项目:chinese_generation    作者:polaroidz    | 项目源码 | 文件源码
def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)

        x = self.max1(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)

        x = self.max2(x)

        x = self.conv3(x)
        x = self.bn3(x)
        x = F.relu(x)

        x = self.avg1(x)

        x = x.view(x.size(0), -1)

        mu = self.fc_mu(x)

        sigma = self.fc_sig(x)
        sigma = F.softplus(sigma) + 1e-6

        return mu, sigma
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def forward(self, x, samples):
        mean = self.mean_drop(x)
        mean = self.mean_lin1(mean)
        mean = F.relu(mean)
        mean = self.mean_drop(mean)
        mean = self.mean_lin2(mean)

        variances = self.vars_drop(x)
        variances = self.vars_lin1(variances)
        variances = F.relu(variances)
        variances = self.vars_drop(variances)
        variances = self.vars_lin2(variances)
        variances = F.softplus(variances) * self.softplus_boost

        return True, torch.cat([mean, variances], dim=1) # TODO: Transform mean and variances in the same fashion as in ProposalNormal
项目:pytorch-REINFORCE    作者:JamesChuanggg    | 项目源码 | 文件源码
def select_action(self, state):
        mu, sigma_sq = self.model(Variable(state).cuda())
        sigma_sq = F.softplus(sigma_sq)

        eps = torch.randn(mu.size())
        # calculate the probability
        action = (mu + sigma_sq.sqrt()*Variable(eps).cuda()).data
        prob = normal(action, mu, sigma_sq)
        entropy = -0.5*((sigma_sq+2*pi.expand_as(sigma_sq)).log()+1)

        log_prob = prob.log()
        return action, log_prob, entropy
项目:Efficient-Dynamic-Batching    作者:jsuarez5341    | 项目源码 | 文件源码
def H(self, x):
      Wg = self.Wg(x)
      noise = Variable(t.randn(*Wg.size()))
      if Wg.data.type() == 'torch.cuda.FloatTensor':
         noise = noise.cuda()
      Wn = F.softplus(self.Wn(x))
      return Wg + noise*Wn

   #Take top k from H
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def decode(self, z):
        return F.softplus(self.fc3(z))
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def forward(self, inp):
        if not (type(inp) == Variable):
            inp = Variable(inp[0])

        output = F.softplus((self.l1(inp)))

        return output
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def forward(self, inp):
        if not (type(inp) == Variable):
            inp = Variable(inp[0])

        output = torch.mm(inp, F.softplus(self.l1))
        #output = output + self.b1
        return output
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def forward(self, inp):
        #if inp.dim() > 2:
        #    inp = inp.permute(0, 2, 1)
        #inp = inp.contiguous().view(-1, self.L1)
        if not (type(inp) == Variable):
            inp = Variable(inp[0])

        h = F.relu(self.l1(inp))
        output = (self.l2(h))

        if self.smooth_output:
            output = output.view(-1, 1, int(np.sqrt(self.L2)), int(np.sqrt(self.L2)))
            output = F.softplus(self.sml(output))
            output = output.view(-1, self.L2)
        return output
项目:pytorch-dnc    作者:jingweiz    | 项目源码 | 文件源码
def forward(self, hidden_vb, memory_vb):
        # outputs for computing addressing for heads
        # NOTE: to be consistent w/ the dnc paper, we use
        # NOTE: sigmoid to constrain to [0, 1]
        # NOTE: oneplus to constrain to [1, +inf]
        self.key_vb   = F.tanh(self.hid_2_key(hidden_vb)).view(-1, self.num_heads, self.mem_wid)    # TODO: relu to bias the memory to store positive values ??? check again
        self.beta_vb  = F.softplus(self.hid_2_beta(hidden_vb)).view(-1, self.num_heads, 1)          # beta >=1: https://github.com/deepmind/dnc/issues/9
        self.gate_vb  = F.sigmoid(self.hid_2_gate(hidden_vb)).view(-1, self.num_heads, 1)           # gate /in (0, 1): interpolation gate, blend wl_{t-1} & wc
        self.shift_vb = F.softmax(self.hid_2_shift(hidden_vb).view(-1, self.num_heads, self.num_allowed_shifts).transpose(0, 2)).transpose(0, 2)    # shift: /sum=1
        self.gamma_vb = (1. + F.softplus(self.hid_2_gamma(hidden_vb))).view(-1, self.num_heads, 1)  # gamma >= 1: sharpen the final weights

        # now we compute the addressing mechanism
        self._content_focus(memory_vb)
        self._location_focus()
项目:pytorch-dnc    作者:jingweiz    | 项目源码 | 文件源码
def forward(self, hidden_vb, memory_vb):
        # outputs for computing addressing for heads
        # NOTE: to be consistent w/ the dnc paper, we use
        # NOTE: sigmoid to constrain to [0, 1]
        # NOTE: oneplus to constrain to [1, +inf]
        self.key_vb   = F.tanh(self.hid_2_key(hidden_vb)).view(-1, self.num_heads, self.mem_wid)    # TODO: relu to bias the memory to store positive values ??? check again
        self.beta_vb  = F.softplus(self.hid_2_beta(hidden_vb)).view(-1, self.num_heads, 1)          # beta >=1: https://github.com/deepmind/dnc/issues/9

        # now we compute the addressing mechanism
        self._content_focus(memory_vb)
项目:ktorch    作者:farizrahman4u    | 项目源码 | 文件源码
def softplus(x):
    def _softplus(x):
        return F.softplus(x)

    return get_op(_softplus)(x)