Python torch 模块,sigmoid() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.sigmoid()

项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def forward(self, input):
        conv1 = self.conv1( input )
        relu1 = self.relu1( conv1 )

        conv2 = self.conv2( relu1 )
        bn2 = self.bn2( conv2 )
        relu2 = self.relu2( bn2 )

        conv3 = self.conv3( relu2 )
        bn3 = self.bn3( conv3 )
        relu3 = self.relu3( bn3 )

        conv4 = self.conv4( relu3 )
        bn4 = self.bn4( conv4 )
        relu4 = self.relu4( bn4 )

        conv5 = self.conv5( relu4 )

        return torch.sigmoid( conv5 ), [relu2, relu3, relu4]
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def forward(self, char, word):
        self.models.eval()
        outs =[]
        for ii,model in enumerate(self.models):
            if model.opt.type_=='char':
                out = t.sigmoid(model(*char))
            else:
                out=t.sigmoid(model(*word))
            outs.append(out.detach())
        for ii,model in enumerate(self.new_model):
            if model.opt.type_=='char':
                out = t.sigmoid(model(*char))
            else:
                out=t.sigmoid(model(*word))
            outs.append(out)
        return sum(outs)/(len(outs))
项目:yolov2    作者:zhangkaij    | 项目源码 | 文件源码
def _generate_pred_bbox(self, bbox_delta, anchors):
        """get predictions boxes from bbox_delta and anchors.

        Args:
            bbox_delta: (dcx, dcy, dw, dh)
                shape:(H*W*num_anchor, 4)
            anchor: (cx, cy, h, w)
                shape:(H*W*num_anchor, 4)
        Output:
            output: (x_min, y_min, x_max, y_max)

        """
        assert bbox_delta.dim() == anchors.dim(), "dim is not equal"

        pred_xy = torch.sigmoid(bbox_delta[:, :2]) + anchors[:, :2]
        pred_wh = torch.exp(bbox_delta[:, 2:]) * anchors[:, 2:]
        pred_bbox = torch.cat((pred_xy, pred_wh), dim=1).contiguous()

        # change (cx, xy, h, w) to (x_min, y_min, x_max, y_max)
        pred_bbox[:, 0:2] = pred_bbox[:, 0:2] - pred_bbox[:, 2:4] / 2
        pred_bbox[:, 2:4] = pred_bbox[:, 0:2] + pred_bbox[:, 2:4]
        pred_bbox[:, 0::2] = pred_bbox[:, 0::2] / self.W
        pred_bbox[:, 1::2] = pred_bbox[:, 1::2] / self.H

        return pred_bbox
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def _step(self, H_t, T_t, C_t, h0, h_mask, t_mask, c_mask):
        s_lm1, rnns = h0, [self.rnn_h, self.rnn_t, self.rnn_c]
        for l, (rnn_h, rnn_t, rnn_c) in enumerate(zip(*rnns)):
            s_lm1_H = h_mask.expand_as(s_lm1) * s_lm1
            s_lm1_T = t_mask.expand_as(s_lm1) * s_lm1
            s_lm1_C = c_mask.expand_as(s_lm1) * s_lm1
            if l == 0:
                H_t = F.tanh(H_t + rnn_h(s_lm1_H))
                T_t = F.sigmoid(T_t + rnn_t(s_lm1_T))
                C_t = F.sigmoid(C_t + rnn_t(s_lm1_C))
            else:
                H_t = F.tanh(rnn_h(s_lm1_H))
                T_t = F.sigmoid(rnn_t(s_lm1_T))
                C_t = F.sigmoid(rnn_t(s_lm1_C))
            s_l = H_t * T_t + s_lm1 * C_t
            s_lm1 = s_l

        return s_l
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def forward(self, inputs):
        current_input = inputs

        for i in range(0, len(self.layers), 2):
            layer, activation = self.layers[i], self.layers[i+1]
            proj, linear = layer(current_input), current_input
            proj = F.dropout(proj, p=self.dropout, training=self.training)
            nonlinear = activation(proj[:, 0:self.input_dim])
            gate = F.sigmoid(proj[:, self.input_dim:(2 * self.input_dim)])

            # apply gate
            current_input = gate * linear + (1 - gate) * nonlinear

        return current_input

# gracefully taken from:
# https://discuss.pytorch.org/t/solved-reverse-gradients-in-backward-pass/3589/4
项目:DCN    作者:alexnowakvila    | 项目源码 | 文件源码
def forward(self, e, input, mask, scale=0):
        hidden = Variable(torch.randn(self.batch_size, self.n,
                                      self.hidden_size)).type(dtype)
        if scale == 0:
            e = Variable(torch.zeros(self.batch_size, self.n)).type(dtype)
        Phi = self.build_Phi(e, mask)
        N = torch.sum(Phi, 2).squeeze()
        N += (N == 0).type(dtype)  # avoid division by zero
        Nh = N.unsqueeze(2).expand(self.batch_size, self.n,
                                   self.hidden_size + self.input_size)
        # Normalize inputs, important part!
        mask_inp = mask.unsqueeze(2).expand_as(input)
        input_n = self.Normalize_inputs(Phi, input) * mask_inp
        # input_n = input * mask_inp
        for i, layer in enumerate(self.layers):
            hidden = layer(input_n, hidden, Phi, Nh)
        hidden_p = hidden.view(self.batch_size * self.n, self.hidden_size)
        scores = self.linear_b(hidden_p)
        probs = torch.sigmoid(scores).view(self.batch_size, self.n) * mask
        # probs has shape (batch_size, n)
        return scores, probs, input_n, Phi
项目:DCN    作者:alexnowakvila    | 项目源码 | 文件源码
def forward(self, e, input, mask, scale=0):
        hidden = Variable(torch.randn(self.batch_size, self.n,
                                      self.hidden_size)).type(dtype)
        if scale == 0:
            e = Variable(torch.zeros(self.batch_size, self.n)).type(dtype)
        Phi = self.build_Phi(e, mask)
        N = torch.sum(Phi, 2).squeeze()
        N += (N == 0).type(dtype)  # avoid division by zero
        Nh = N.unsqueeze(2).expand(self.batch_size, self.n,
                                   self.hidden_size + self.input_size)
        # Normalize inputs, important part!
        mask_inp = mask.unsqueeze(2).expand_as(input)
        input_n = self.Normalize_inputs(Phi, input) * mask_inp
        # input_n = input * mask_inp
        for i, layer in enumerate(self.layers):
            hidden = layer(input_n, hidden, Phi, Nh)
        hidden_p = hidden.view(self.batch_size * self.n, self.hidden_size)
        scores = self.linear_b(hidden_p)
        probs = torch.sigmoid(scores).view(self.batch_size, self.n) * mask
        # probs has shape (batch_size, n)
        return scores, probs, input_n, Phi
项目:DCN    作者:alexnowakvila    | 项目源码 | 文件源码
def forward(self, e, input, mask, scale=0):
        hidden = Variable(torch.randn(self.batch_size, self.n,
                                      self.hidden_size)).type(dtype)
        if scale == 0:
            e = Variable(torch.zeros(self.batch_size, self.n)).type(dtype)
        Phi = self.build_Phi(e, mask)
        N = torch.sum(Phi, 2).squeeze()
        N += (N == 0).type(dtype)  # avoid division by zero
        Nh = N.unsqueeze(2).expand(self.batch_size, self.n,
                                   self.hidden_size + self.input_size)
        # Normalize inputs, important part!
        mask_inp = mask.unsqueeze(2).expand_as(input)
        input_n = self.Normalize_inputs(Phi, input) * mask_inp
        # input_n = input * mask_inp
        for i, layer in enumerate(self.layers):
            hidden = layer(input_n, hidden, Phi, Nh)
        hidden_p = hidden.view(self.batch_size * self.n, self.hidden_size)
        scores = self.linear_b(hidden_p)
        probs = torch.sigmoid(scores).view(self.batch_size, self.n) * mask
        # probs has shape (batch_size, n)
        return scores, probs, input_n, Phi
项目:ladder    作者:abhiskk    | 项目源码 | 文件源码
def g(self, tilde_z_l, u_l):
        if self.use_cuda:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1).cuda())
        else:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1))

        b_a1 = ones.mm(self.a1)
        b_a2 = ones.mm(self.a2)
        b_a3 = ones.mm(self.a3)
        b_a4 = ones.mm(self.a4)
        b_a5 = ones.mm(self.a5)

        b_a6 = ones.mm(self.a6)
        b_a7 = ones.mm(self.a7)
        b_a8 = ones.mm(self.a8)
        b_a9 = ones.mm(self.a9)
        b_a10 = ones.mm(self.a10)

        mu_l = torch.mul(b_a1, torch.sigmoid(torch.mul(b_a2, u_l) + b_a3)) + \
               torch.mul(b_a4, u_l) + \
               b_a5

        v_l = torch.mul(b_a6, torch.sigmoid(torch.mul(b_a7, u_l) + b_a8)) + \
              torch.mul(b_a9, u_l) + \
              b_a10

        hat_z_l = torch.mul(tilde_z_l - mu_l, v_l) + mu_l

        return hat_z_l
项目:pytorch-caffe-darknet-convert    作者:marvis    | 项目源码 | 文件源码
def sigmoid(x):
    return 1.0/(math.exp(-x)+1.)
项目:arc-pytorch    作者:sanyam5    | 项目源码 | 文件源码
def forward(self, image_pairs: Variable) -> Variable:
        arc_out = self.arc(image_pairs)

        d1 = F.elu(self.dense1(arc_out))
        decision = torch.sigmoid(self.dense2(d1))

        return decision
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, o, g = torch.split(wh_b + wi,
                                 split_size=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(c_1)
        return h_1, c_1
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def forward(self, input_, hx, time):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
            time: The current timestep value, which is used to
                get appropriate running statistics.
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh = torch.mm(h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        bn_wh = self.bn_hh(wh, time=time)
        bn_wi = self.bn_ih(wi, time=time)
        f, i, o, g = torch.split(bn_wh + bn_wi + bias_batch,
                                 split_size=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(self.bn_c(c_1, time=time))
        return h_1, c_1
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def forward(self, input):
        return torch.sigmoid(input)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def forward(self, input, target):
        return F.binary_cross_entropy(torch.sigmoid(input), target,
                                      self.weight, self.size_average)
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).

        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, o, g = torch.split(wh_b + wi,
                                 split_size=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(c_1)
        return h_1, c_1
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def forward(self, input_, hx, time):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
            time: The current timestep value, which is used to
                get appropriate running statistics.

        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh = torch.mm(h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        bn_wh = self.bn_hh(wh, time=time)
        bn_wi = self.bn_ih(wi, time=time)
        f, i, o, g = torch.split(bn_wh + bn_wi + bias_batch,
                                 split_size=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(self.bn_c(c_1, time=time))
        return h_1, c_1
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def forward(self, u, x, bias, init=None, mask_h=None):
        bidir = 2 if self.bidirectional else 1
        length = x.size(0) if x.dim() == 3 else 1
        batch = x.size(-2)
        d = self.d_out
        k = u.size(-1) // d
        k_ = k//2 if self.bidirectional else k

        u = u.view(length, batch, d, k_)

        cur = x.new(batch, d).zero_() if init is None else init
        size = (length, batch, d*bidir) if x.dim() == 3 else (batch, d*bidir)
        bias1, bias2 = bias.split(self.d_out)
        u_ = [u.select(-1, i) for i in range(0, k_)]
        h = []
        x_ = x if k_ == 3 else u_[3]
        for i in range(0, length):
            u0i, u1i, u2i = u_[0][i], u_[1][i], u_[2][i]
            g1 = torch.sigmoid(u1i + bias1)
            g2 = torch.sigmoid(u2i + bias2)
            cur = (cur - u0i)*g1 + u0i
            if self.activation_type == 1:
                val = torch.tanh(cur)
            elif self.activation_type == 2:
                val = torch.relu(cur)
            if mask_h is not None:
                val = val*mask_h
            xi = x_[i]
            h.append((val - xi)*g2 + xi)

        if self.bidirectional:
            assert False
        else:
            last_hidden = cur
        h = torch.stack(h)
        return h, last_hidden
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def forward(self, char, word):
        weights = t.nn.functional.softmax(self.weights)
        outs =[]
        for ii,model in enumerate(self.models):
            if model.opt.type_=='char':
                out = t.sigmoid(model(*char))
            else:
                out=t.sigmoid(model(*word))

            out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
            outs.append(out)
            # outs = [t.sigmoid(model(title,content))*weight  for model in  self.models]

        # outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
        return sum(outs)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def forward(self, title, content):
        weights = t.nn.functional.softmax(self.weights)
        outs =[]
        for ii,model in enumerate(self.models):
            out = t.sigmoid(model(title,content))
            out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
            outs.append(out)
            # outs = [t.sigmoid(model(title,content))*weight  for model in  self.models]

        # outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
        return sum(outs)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def forward(self, char, word):
        weights = t.nn.functional.softmax(self.weights)
        outs =[]
        for ii,model in enumerate(self.models):
            if model.opt.type_=='char':
                out = t.sigmoid(model(*char))
            else:
                out=t.sigmoid(model(*word))

            out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
            outs.append(out)
            # outs = [t.sigmoid(model(title,content))*weight  for model in  self.models]

        # outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
        return sum(outs)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def forward(self, char, word):
        weights = t.nn.functional.softmax(self.weights)
        outs =[]
        for ii,model in enumerate(self.models):
            if model.opt.type_=='char':
                out = t.sigmoid(model(*char))
            else:
                out=t.sigmoid(model(*word))
            if self.opt.static:     out = out.detach()
            out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
            outs.append(out)
            # outs = [t.sigmoid(model(title,content))*weight  for model in  self.models]

        # outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
        return sum(outs)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def forward(self, char, word):
        weights = t.nn.functional.softmax(self.weights)
        outs =[]
        for ii,model in enumerate(self.models):
            if model.opt.type_=='char':
                out = t.sigmoid(model(*char))
            else:
                out=t.sigmoid(model(*word))

            out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
            outs.append(out)

        return sum(outs)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self,data_root,labels_file):
        self.data_files_path=glob(data_root+"*val.pth")
        self.model_num=len(self.data_files_path)
        self.label_file_path=labels_file
        self.data=t.zeros(100,1999*self.model_num)
        for i in range(self.model_num):
            self.data[:,i*1999:i*1999+1999]=t.sigmoid(t.load(self.data_files_path[i]).float()[:100]) 
        print self.data.size()
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def forward(self, in_out_pairs):
        # input is batch_size*2 int Variable
        i = self.input_embeddings(in_out_pairs[:, 0])
        o = self.output_embeddings(in_out_pairs[:, 1])
        # raw activations, NCE_Loss handles the sigmoid (we need to know classes to know the sign to apply)
        return (i * o).sum(1).squeeze()
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def forward(self, activations, targets):
        # targets are -1.0 or 1.0, 1-d Variable
        # likelihood assigned by the model to pos and neg samples is given by the sigmoid, with the sign
        # determined by the class.
        # negative log likelihood
        return log(sigmoid(activations * targets)).sum() * -1.0
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def forward(self, input):
        return torch.sigmoid(input)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def forward(self, input, target):
        return F.binary_cross_entropy(torch.sigmoid(input), target,
                                      self.weight, self.size_average)
项目:yolov2    作者:zhangkaij    | 项目源码 | 文件源码
def forward(self, x, targets=None, num_iter=0):

        conv1s = self.conv1s(x)
        conv2 = self.conv2(conv1s)
        conv3 = self.conv3(conv2)

        conv1s_reorg = self.conv_reorg(conv1s)
        conv1s_reorg = self.reorg(conv1s_reorg)

        cat_1_3 = torch.cat([conv1s_reorg, conv3], 1)
        conv4 = self.conv4(cat_1_3)
        output = self.conv5(conv4)
        batchsize, _, self.H, self.W = output.size()
        # output shape: (batchsize, H*W*num_anchor, (num_class+num_loc))
        output = output.permute(0, 2, 3, 1).contiguous().view(batchsize, -1, (self.num_class+self.num_loc))

        bbox_delta = output[:, :, :4].contiguous()
        iou_pred = F.sigmoid(output[:, :, 4]).contiguous()
        class_pred = output[:, :, 5:].contiguous()
        prob_pred = F.softmax(class_pred.view(-1, self.num_class)).view_as(class_pred)
        pred = (bbox_delta, iou_pred, prob_pred)

        self.anchors_cfg[:, 0::2] = self.anchors_cfg[:, 0::2] / self.W
        self.anchors_cfg[:, 1::2] = self.anchors_cfg[:, 1::2] / self.H

        if self.phase == 'train':
            self._calc_loss(pred, targets, num_iter)
        else:
            assert batchsize == 1, "now only support batchsize=1"

            anchors = self._generate_anchors()
            bbox_pred = self._generate_pred_bbox(bbox_delta[0], anchors)
            output = self.detect(bbox_pred, iou_pred.view(-1), prob_pred.view(-1, self.num_class))
            return output
项目:spotlight    作者:maciejkula    | 项目源码 | 文件源码
def predict(self, user_ids, item_ids=None):
        """
        Make predictions: given a user id, compute the recommendation
        scores for items.

        Parameters
        ----------

        user_ids: int or array
           If int, will predict the recommendation scores for this
           user for all items in item_ids. If an array, will predict
           scores for all (user, item) pairs defined by user_ids and
           item_ids.
        item_ids: array, optional
            Array containing the item ids for which prediction scores
            are desired. If not supplied, predictions for all items
            will be computed.

        Returns
        -------

        predictions: np.array
            Predicted scores for all items in item_ids.
        """

        self._check_input(user_ids, item_ids, allow_items_none=True)
        self._net.train(False)

        user_ids, item_ids = _predict_process_ids(user_ids, item_ids,
                                                  self._num_items,
                                                  self._use_cuda)

        out = self._net(user_ids, item_ids)

        if self._loss == 'poisson':
            out = torch.exp(out)
        elif self._loss == 'logistic':
            out = torch.sigmoid(out)

        return cpu(out.data).numpy().flatten()
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def forward(self, input):
        return torch.sigmoid(input)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def sigmoid(input):
    return _autograd_functions.Sigmoid.apply(input)


# etc.
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def binary_cross_entropy(input, target, weight=None, size_average=True):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output.

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: True

    Examples::

        >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
        >>> target = autograd.Variable(torch.LongTensor(3).random_(2))
        >>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
        >>> loss.backward()
    """
    if not (target.size() == input.size()):
        warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
                      "Please ensure they have the same size.".format(target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
                         "!= input nelement ({})".format(target.nelement(), input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)

    return _functions.thnn.BCELoss.apply(input, target, weight, size_average)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def multilabel_soft_margin_loss(input, target, weight=None, size_average=True):
    input = torch.sigmoid(input)
    return binary_cross_entropy(input, target, weight, size_average)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_simple(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        def f(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        trace, z = torch.jit.trace(f, (x, y), nderivs=0)

        torch._C._jit_pass_lint(trace)
        torch._C._jit_pass_onnx(trace)
        torch._C._jit_pass_lint(trace)

        self.assertExpected(str(trace))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_verify(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        @torch.jit.compile(verify=True, optimize=False)
        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        z = traced(x, y)
        z2 = traced(x, y)
        self.assertEqual(z, torch.sigmoid(torch.tanh(x * (x + y))))
        self.assertEqual(z, z2)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_traced_function(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        @torch.jit.compile
        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        z = doit(x, y)
        z2 = doit(x, y)
        self.assertEqual(z, torch.sigmoid(torch.tanh(x * (x + y))))
        self.assertEqual(z, z2)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_disabled_traced_function(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        @torch.jit.compile(enabled=False)
        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        z = doit(x, y)
        z2 = doit(x, y)
        self.assertEqual(z, torch.sigmoid(torch.tanh(x * (x + y))))
        self.assertEqual(z, z2)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_autograd_closure(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        trace = torch._C._tracer_enter((x, y), 1)

        z = torch.sigmoid(x * (x + y))
        w = torch.abs(x * x * x + y) + Variable(torch.ones(1))

        torch._C._tracer_exit((z, w))
        torch._C._jit_pass_lint(trace)

        (z * w).backward()
        torch._C._jit_pass_dce(trace)
        torch._C._jit_pass_lint(trace)

        x_grad = x.grad.data.clone()
        x.grad.data.zero_()

        function = torch._C._jit_createAutogradClosure(trace)
        torch._C._jit_pass_lint(trace)
        z2, w2 = function()(x, y)
        (z2 * w2).backward()
        self.assertEqual(z, z2)
        self.assertEqual(w, w2)
        self.assertEqual(x.grad.data, x_grad)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_python_ir(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        traced, _ = torch.jit.trace(doit, (x, y))
        g = torch._C._jit_get_graph(traced)
        g2 = torch._C.Graph()
        g_to_g2 = {}
        for node in g.inputs():
            g_to_g2[node] = g2.addInput()
        for node in g.nodes():
            if node.kind() == "PythonOp":
                n_ = g2.create(node.pyname(),
                               [g_to_g2[i] for i in node.inputs()]) \
                    .setType(node.typeOption()) \
                    .s_("note", "from_pyop") \
                    .i_("some_value", len(node.scalar_args()))
                assert(n_.i("some_value") == len(node.scalar_args()))
            else:
                n_ = g2.createClone(node, lambda x: g_to_g2[x])
                assert(n_.kindOf("Offset") == "i")

            g_to_g2[node] = g2.appendNode(n_)

        for node in g.outputs():
            g2.registerOutput(g_to_g2[node])

        t_node = g2.create("TensorTest").t_("a", torch.ones([2, 2]))
        assert(t_node.attributeNames() == ["a"])
        g2.appendNode(t_node)
        assert(torch.equal(torch.ones([2, 2]), t_node.t("a")))
        self.assertExpected(str(g2))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_params(self):
        x = Variable(torch.Tensor([[1, 2], [3, 4]]), requires_grad=True)
        y = Variable(torch.Tensor([[1, 2], [3, 4]]), requires_grad=True)
        trace, _ = torch.jit.trace(lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))), (x, y))
        initializers = [x.data]
        torch._C._jit_pass_onnx(trace)
        self.assertONNXExpected(trace.export(initializers))
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def forward(self, input):
        if isinstance(input, Variable):
            return torch.sigmoid(input)
        elif isinstance(input, tuple) or isinstance(input, list):
            return my_data_parallel(self, input)
        else:
            raise RuntimeError('unknown input type')
项目:Semi-supervised_Neural_Network    作者:jibancanyang    | 项目源码 | 文件源码
def g(self, tilde_z_l, u_l):
        if self.use_cuda:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1).cuda())
        else:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1))

        b_a1 = ones.mm(self.a1)
        b_a2 = ones.mm(self.a2)
        b_a3 = ones.mm(self.a3)
        b_a4 = ones.mm(self.a4)
        b_a5 = ones.mm(self.a5)

        b_a6 = ones.mm(self.a6)
        b_a7 = ones.mm(self.a7)
        b_a8 = ones.mm(self.a8)
        b_a9 = ones.mm(self.a9)
        b_a10 = ones.mm(self.a10)

        mu_l = torch.mul(b_a1, torch.sigmoid(torch.mul(b_a2, u_l) + b_a3)) + \
               torch.mul(b_a4, u_l) + \
               b_a5

        v_l = torch.mul(b_a6, torch.sigmoid(torch.mul(b_a7, u_l) + b_a8)) + \
              torch.mul(b_a9, u_l) + \
              b_a10

        hat_z_l = torch.mul(tilde_z_l - mu_l, v_l) + mu_l

        return hat_z_l
项目:pytorch-yolo2    作者:marvis    | 项目源码 | 文件源码
def sigmoid(x):
    return 1.0/(math.exp(-x)+1.)
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def _step(self, H_t, T_t, h0, h_mask, t_mask):
        s_lm1 = h0
        for l, (rnn_h, rnn_t) in enumerate(zip(self.rnn_h, self.rnn_t)):
            s_lm1_H = h_mask.expand_as(s_lm1) * s_lm1
            s_lm1_T = t_mask.expand_as(s_lm1) * s_lm1
            if l == 0:
                H_t = F.tanh(H_t + rnn_h(s_lm1_H))
                T_t = F.sigmoid(T_t + rnn_t(s_lm1_T))
            else:
                H_t = F.tanh(rnn_h(s_lm1_H))
                T_t = F.sigmoid(rnn_t(s_lm1_T))
            s_l = (H_t - s_lm1) * T_t + s_lm1
            s_lm1 = s_l

        return s_l
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def forward(self, input):
        return torch.sigmoid(input)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def sigmoid(input):
    r"""sigmoid(input) -> Variable

    Applies the element-wise function :math:`f(x) = 1 / ( 1 + exp(-x))`

    See :class:`~torch.nn.Sigmoid` for more details.
    """
    return input.sigmoid()


# etc.
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def binary_cross_entropy(input, target, weight=None, size_average=True, reduce=True):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output.

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: ``True``
        reduce (bool, optional): By default, the losses are averaged or summed over
                observations for each minibatch depending on size_average. When reduce
                is False, returns a loss per batch element instead and ignores
                size_average. Default: True

    Examples::

        >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
        >>> target = autograd.Variable(torch.LongTensor(3).random_(2))
        >>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
        >>> loss.backward()
    """
    if not (target.size() == input.size()):
        warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
                      "Please ensure they have the same size.".format(target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
                         "!= input nelement ({})".format(target.nelement(), input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)
        if torch.is_tensor(weight):
            weight = Variable(weight)

    return torch._C._nn.binary_cross_entropy(input, target, weight, size_average, reduce)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def multilabel_soft_margin_loss(input, target, weight=None, size_average=True):
    """multilabel_soft_margin_loss(input, target, weight=None, size_average=True) -> Variable

    See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.
    """
    input = torch.sigmoid(input)
    return binary_cross_entropy(input, target, weight, size_average)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
    hx, cx = hidden
    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    outgate = F.sigmoid(outgate)

    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * F.tanh(cy)
    return hy, cy