Python torch 模块,add() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.add()

项目:RetinaNet    作者:c0nn3r    | 项目源码 | 文件源码
def __init__(self, mode, anchors=9, classes=80, depth=4,
                 base_activation=F.relu,
                 output_activation=F.sigmoid):
        super(SubNet, self).__init__()
        self.anchors = anchors
        self.classes = classes
        self.depth = depth
        self.base_activation = base_activation
        self.output_activation = output_activation

        self.subnet_base = nn.ModuleList([conv3x3(256, 256, padding=1)
                                          for _ in range(depth)])

        if mode == 'boxes':
            self.subnet_output = conv3x3(256, 4 * self.anchors, padding=1)
        elif mode == 'classes':
            # add an extra dim for confidence
            self.subnet_output = conv3x3(256, (1 + self.classes) * self.anchors, padding=1)

        self._output_layer_init(self.subnet_output.bias.data)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_csub(self):
        # with a tensor
        a = torch.randn(100,90)
        b = a.clone().normal_()

        res_add = torch.add(a, -1, b)
        res_csub = a.clone()
        res_csub.sub_(b)
        self.assertEqual(res_add, res_csub)

        # with a scalar
        a = torch.randn(100,100)

        scalar = 123.5
        res_add = torch.add(a, -scalar)
        res_csub = a.clone()
        res_csub.sub_(scalar)
        self.assertEqual(res_add, res_csub)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def assertIsOrdered(self, order, x, mxx, ixx, task):
        SIZE = 4
        if order == 'descending':
            check_order = lambda a, b: a >= b
        elif order == 'ascending':
            check_order = lambda a, b: a <= b
        else:
            error('unknown order "{}", must be "ascending" or "descending"'.format(order))

        are_ordered = True
        for j, k in product(range(SIZE), range(1, SIZE)):
            self.assertTrue(check_order(mxx[j][k-1], mxx[j][k]),
                    'torch.sort ({}) values unordered for {}'.format(order, task))

        seen = set()
        indicesCorrect = True
        size = x.size(x.dim()-1)
        for k in range(size):
            seen.clear()
            for j in range(size):
                self.assertEqual(x[k][ixx[k][j]], mxx[k][j],
                        'torch.sort ({}) indices wrong for {}'.format(order, task))
                seen.add(ixx[k][j])
            self.assertEqual(len(seen), size)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            self.assertEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2^31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
项目:FreezeOut    作者:ajbrock    | 项目源码 | 文件源码
def forward(self, x):

        if not self.active:
            self.eval()

        if not self.equalInOut:
            x = self.relu1(self.bn1(x))
        else:
            out = self.relu1(self.bn1(x))
        out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
        if self.droprate > 0:
            out = F.dropout(out, p=self.droprate, training=self.training)
        out = self.conv2(out)
        out = torch.add(x if self.equalInOut else self.convShortcut(x), out)
        if self.active:
            return out
        else:
            return out.detach()





# note: we call it DenseNet for simple compatibility with the training code.
# similar we call it growthRate instead of widen_factor
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _test_spadd_shape(self, shape_i, shape_v=None):
        shape = shape_i + (shape_v or [])
        x, _, _ = self._gen_sparse(len(shape_i), 10, shape)
        y = self.randn(*shape)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * x.to_dense()

        self.assertEqual(res, expected)

        # Non contiguous dense tensor
        s = list(shape)
        s[0] = shape[-1]
        s[-1] = shape[0]
        y = self.randn(*s)
        y.transpose_(0, len(s) - 1)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * x.to_dense()

        self.assertEqual(res, expected)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
项目:SuperResolution    作者:bguisard    | 项目源码 | 文件源码
def forward(self, x):
        upblock = True
        # Downsizing layer - Large Kernel ensures large receptive field on the residual blocks
        h = F.relu(self.b2(self.c1(x)))

        # Residual Layers
        for r in self.rs:
            h = r(h)  # will go through all residual blocks in this loop

        if upblock:
            # Upsampling Layers - improvement suggested by [2] to remove "checkerboard pattern"
            for u in self.up:
                h = u(h)  # will go through all upsampling blocks in this loop
        else:
            # As recommended by [1]
            h = F.relu(self.bc2(self.dc2(h)))
            h = F.relu(self.bc3(self.dc3(h)))

        # Last layer and scaled tanh activation - Scaled from 0 to 1 instead of 0 - 255
        h = F.tanh(self.c3(h))
        h = torch.add(h, 1.)
        h = torch.mul(h, 0.5)
        return h
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _test_spadd_shape(self, shape_i, shape_v=None):
        shape = shape_i + (shape_v or [])
        x, _, _ = self._gen_sparse(len(shape_i), 10, shape)
        y = self.randn(*shape)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * x.to_dense()

        self.assertEqual(res, expected)

        # Non contiguous dense tensor
        s = list(shape)
        s[0] = shape[-1]
        s[-1] = shape[0]
        y = self.randn(*s)
        y.transpose_(0, len(s) - 1)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * x.to_dense()

        self.assertEqual(res, expected)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _test_spadd_shape(self, shape_i, shape_v=None):
        shape = shape_i + (shape_v or [])
        x, _, _ = self._gen_sparse(len(shape_i), 10, shape)
        y = self.randn(*shape)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * x.to_dense()

        self.assertEqual(res, expected)

        # Non contiguous dense tensor
        s = list(shape)
        s[0] = shape[-1]
        s[-1] = shape[0]
        y = self.randn(*s)
        y.transpose_(0, len(s) - 1)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * x.to_dense()

        self.assertEqual(res, expected)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _test_neg(self, cast):
        float_types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor']
        int_types = ['torch.IntTensor', 'torch.ShortTensor']

        for t in float_types + int_types:
            if t in float_types:
                a = cast(torch.randn(100, 90).type(t))
            else:
                a = cast(torch.Tensor(100, 90).type(t).random_())
            zeros = cast(torch.Tensor().type(t)).resize_as_(a).zero_()

            res_add = torch.add(zeros, -1, a)
            res_neg = a.clone()
            res_neg.neg_()
            self.assertEqual(res_neg, res_add)

            # test out of place as well
            res_neg_out_place = a.clone().neg()
            self.assertEqual(res_neg_out_place, res_add)

            # test via __neg__ operator
            res_neg_op = -a.clone()
            self.assertEqual(res_neg_op, res_add)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
项目:MemNN    作者:berlino    | 项目源码 | 文件源码
def train(epoch): 
    for e_ in range(epoch):
    if (e_ + 1) % 10 == 0:
            adjust_learning_rate(optimizer, e_)
        cnt = 0
        loss = Variable(torch.Tensor([0]))
        for i_q, i_k, i_v, i_cand, i_a in zip(train_q, train_key,train_value, train_cand, train_a):
            cnt += 1
            i_q = i_q.unsqueeze(0) # add dimension
            probs = model.forward(i_q, i_k, i_v,i_cand)
            i_a = Variable(i_a)
            curr_loss = loss_function(probs, i_a)
            loss = torch.add(loss, torch.div(curr_loss, config.batch_size)) 

            # naive batch implemetation, the lr is divided by batch size
            if cnt % config.batch_size == 0:
                print "Training loss", loss.data.sum()
                loss.backward()
                optimizer.step()
                loss = Variable(torch.Tensor([0]))
                model.zero_grad()
            if cnt % config.valid_every == 0:
                print "Accuracy:",eval()
项目:MemNN    作者:berlino    | 项目源码 | 文件源码
def forward(self, qu, w, cand):
        qu = Variable(qu)
        w = Variable(w)
        cand = Variable(cand)
        embed_q = self.embed_B(qu)
        embed_w1 = self.embed_A(w)
        embed_w2 = self.embed_C(w)
        embed_c = self.embed_C(cand)

        #pdb.set_trace()
        q_state = torch.sum(embed_q, 1).squeeze(1)
        w1_state = torch.sum(embed_w1, 1).squeeze(1)
        w2_state = torch.sum(embed_w2, 1).squeeze(1)

        for _ in range(self.config.hop):
            sent_dot = torch.mm(q_state, torch.transpose(w1_state, 0, 1))
            sent_att = F.softmax(sent_dot)

            a_dot = torch.mm(sent_att, w2_state)
            a_dot = self.H(a_dot)
            q_state = torch.add(a_dot, q_state)

        f_feat = torch.mm(q_state, torch.transpose(embed_c, 0, 1))
        score = F.log_softmax(f_feat)
        return score
项目:MemNN    作者:berlino    | 项目源码 | 文件源码
def train(epoch): 
    for e_ in range(epoch):
    if (e_ + 1) % 10 == 0:
            adjust_learning_rate(optimizer, e_)
        cnt = 0
        loss = Variable(torch.Tensor([0]))
        for i_q, i_w, i_e_p, i_a in zip(train_q, train_w, train_e_p, train_a):
            cnt += 1
            i_q = i_q.unsqueeze(0) # add dimension
            probs = model.forward(i_q, i_w, i_e_p)
            i_a = Variable(i_a)
            curr_loss = loss_function(probs, i_a)
            loss = torch.add(loss, torch.div(curr_loss, config.batch_size)) 

            # naive batch implemetation, the lr is divided by batch size
            if cnt % config.batch_size == 0:
                print "Training loss", loss.data.sum()
                loss.backward()
                optimizer.step()
                loss = Variable(torch.Tensor([0]))
                model.zero_grad()
            if cnt % config.valid_every == 0:
                print "Accuracy:",eval()
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _test_spadd_shape(self, shape_i, shape_v=None):
        shape = shape_i + (shape_v or [])
        x, _, _ = self._gen_sparse(len(shape_i), 10, shape)
        y = self.randn(*shape)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * self.safeToDense(x)

        self.assertEqual(res, expected)

        # Non contiguous dense tensor
        s = list(shape)
        s[0] = shape[-1]
        s[-1] = shape[0]
        y = self.randn(*s)
        y.transpose_(0, len(s) - 1)
        r = random.random()

        res = torch.add(y, r, x)
        expected = y + r * self.safeToDense(x)

        self.assertEqual(res, expected)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _test_neg(self, cast):
        float_types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor']
        int_types = ['torch.IntTensor', 'torch.ShortTensor', 'torch.ByteTensor',
                     'torch.CharTensor']

        for t in float_types + int_types:
            if t in float_types:
                a = cast(torch.randn(100, 90).type(t))
            else:
                a = cast(torch.Tensor(100, 90).type(t).random_())
            zeros = cast(torch.Tensor().type(t)).resize_as_(a).zero_()

            res_add = torch.add(zeros, -1, a)
            res_neg = a.clone()
            res_neg.neg_()
            self.assertEqual(res_neg, res_add)

            # test out of place as well
            res_neg_out_place = a.clone().neg()
            self.assertEqual(res_neg_out_place, res_add)

            # test via __neg__ operator
            res_neg_op = -a.clone()
            self.assertEqual(res_neg_op, res_add)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
项目:paysage    作者:drckf    | 项目源码 | 文件源码
def mix(w: T.FloatingPoint,
        x: T.FloatTensor,
        y: T.FloatTensor) -> None:
    """
    Compute a weighted average of two matrices (x and y) and return the result.
    Multilinear interpolation.

    Note:
        Modifies x in place.

    Args:
        w: The mixing coefficient (float or tensor) between 0 and 1.
        x: A tensor.
        y: A tensor:

    Returns:
        tensor = w * x + (1-w) * y

    """
    return torch.add(torch.mul(x, w), torch.mul(1-w, y))
项目:paysage    作者:drckf    | 项目源码 | 文件源码
def pdist(x: T.FloatTensor, y: T.FloatTensor) -> T.FloatTensor:
    """
    Compute the pairwise distance matrix between the rows of x and y.

    Args:
        x (tensor (num_samples_1, num_units))
        y (tensor (num_samples_2, num_units))

    Returns:
        tensor (num_samples_1, num_samples_2)

    """
    inner = dot(x, transpose(y))
    x_mag = norm(x, axis=1) ** 2
    y_mag = norm(y, axis=1) ** 2
    squared = add(unsqueeze(y_mag, axis=0), add(unsqueeze(x_mag, axis=1), -2*inner))
    return torch.sqrt(clip(squared, a_min=0))
项目:RetinaNet    作者:c0nn3r    | 项目源码 | 文件源码
def forward(self, x):

        # don't need resnet_feature_2 as it is too large
        _, resnet_feature_3, resnet_feature_4, resnet_feature_5 = self.resnet(x)

        pyramid_feature_6 = self.pyramid_transformation_6(resnet_feature_5)
        pyramid_feature_7 = self.pyramid_transformation_7(F.relu(pyramid_feature_6))

        pyramid_feature_5 = self.pyramid_transformation_5(resnet_feature_5)
        pyramid_feature_4 = self.pyramid_transformation_4(resnet_feature_4)
        upsampled_feature_5 = self._upsample(pyramid_feature_5, pyramid_feature_4)

        pyramid_feature_4 = self.upsample_transform_1(
            torch.add(upsampled_feature_5, pyramid_feature_4)
        )

        pyramid_feature_3 = self.pyramid_transformation_3(resnet_feature_3)
        upsampled_feature_4 = self._upsample(pyramid_feature_4, pyramid_feature_3)

        pyramid_feature_3 = self.upsample_transform_2(
            torch.add(upsampled_feature_4, pyramid_feature_3)
        )

        return (pyramid_feature_3,
                pyramid_feature_4,
                pyramid_feature_5,
                pyramid_feature_6,
                pyramid_feature_7)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input, target):
         # - log(input) * target - log(1 - input) * (1 - target)
        if input.nelement() != target.nelement():
            raise RuntimeError("input and target size mismatch")

        self.buffer = self.buffer or input.new()

        buffer = self.buffer
        weights = self.weights

        buffer.resize_as_(input)

        if weights is not None and target.dim() != 1:
            weights = self.weights.view(1, target.size(1)).expand_as(target)

        # log(input) * target
        torch.add(buffer, input, self.eps).log_()
        if weights is not None:
            buffer.mul_(weights)

        output = torch.dot(target, buffer)

        # log(1 - input) * (1 - target)
        torch.mul(buffer, input, -1).add_(1+self.eps).log_()
        if weights is not None:
            buffer.mul_(weights)

        output = output + torch.sum(buffer)
        output = output - torch.dot(target, buffer)

        if self.sizeAverage:
            output = output / input.nelement()

        self.output = - output

        return self.output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateGradInput(self, input, target):
         # - (target - input) / ( input (1 - input) )
         # The gradient is slightly incorrect:
         # It should have be divided by (input + self.eps) (1 - input + self.eps)
         # but it is divided by input (1 - input + self.eps) + self.eps
         # This modification requires less memory to be computed.
         if input.nelement() != target.nelement():
            raise RuntimeError("input and target size mismatch")

         self.buffer = self.buffer or input.new()

         buffer = self.buffer
         weights = self.weights
         gradInput = self.gradInput

         if weights is not None and target.dim() != 1:
             weights = self.weights.view(1, target.size(1)).expand_as(target)


         buffer.resize_as_(input)
         # - x ( 1 + self.eps -x ) + self.eps
         torch.add(buffer, input, -1).add_(-self.eps).mul_(input).add_(-self.eps)

         gradInput.resize_as_(input)
         # y - x
         torch.add(gradInput, target, -1, input)
         # - (y - x) / ( x ( 1 + self.eps -x ) + self.eps )
         gradInput.div_(buffer)

         if weights is not None:
             gradInput.mul_(weights)

         if self.sizeAverage:
             gradInput.div_(target.nelement())

         return gradInput
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        self.output.resize_(1)
        assert input[0].dim() == 2

        self.diff = self.diff or input[0].new()

        torch.add(self.diff, input[0], -1, input[1]).abs_()

        self.output.resize_(input[0].size(0))
        self.output.zero_()
        self.output.add_(self.diff.pow_(self.norm).sum(1))
        self.output.pow_(1./self.norm)

        return self.output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_clamp(self):
        m1 = torch.rand(100).mul(5).add(-2.5)  # uniform in [-2.5, 2.5]
        # just in case we're extremely lucky.
        min_val = -1
        max_val = 1
        m1[1] = min_val
        m1[2] = max_val

        res1 = m1.clone()
        res1.clamp_(min_val, max_val)
        res2 = m1.clone()
        for i in iter_indices(res2):
            res2[i] = max(min_val, min(max_val, res2[i]))
        self.assertEqual(res1, res2)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_xcorr3_xcorr2_eq(self):
        def reference(x, k, o3, o32):
            for i in range(o3.size(1)):
                for j in range(k.size(1)):
                    o32[i].add(torch.xcorr2(x[i+j-1], k[j]))
        self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k), reference)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_xcorr3_xcorr2_eq(self):
        def reference(x, k, o3, o32):
            for i in range(x.size(1)):
                for j in range(k.size(1)):
                    o32[i].add(torch.xcorr2(x[i], k[k.size(1) - j + 1], 'F'))
        self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k, 'F'), reference)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_conv3_conv2_eq(self):
        def reference(x, k, o3, o32):
            for i in range(o3.size(1)):
                for j in range(k.size(1)):
                    o32[i].add(torch.conv2(x[i+j-1], k[k.size(1)-j+1]))
        self._test_conv_corr_eq(lambda x, k: torch.conv3(x, k), reference)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_pstrf(self):
        def checkPsdCholesky(a, uplo, inplace):
            if inplace:
                u = torch.Tensor(a.size())
                piv = torch.IntTensor(a.size(0))
                args = [u, piv, a]
            else:
                args = [a]

            if uplo is not None:
                args += [uplo]

            u, piv = torch.pstrf(*args)

            if uplo is False:
                a_reconstructed = torch.mm(u, u.t())
            else:
                a_reconstructed = torch.mm(u.t(), u)

            piv = piv.long()
            a_permuted = a.index_select(0, piv).index_select(1, piv)
            self.assertEqual(a_permuted, a_reconstructed, 1e-14)

        dimensions = ((5, 1), (5, 3), (5, 5), (10, 10))
        for dim in dimensions:
            m = torch.Tensor(*dim).uniform_()
            a = torch.mm(m, m.t())
            # add a small number to the diagonal to make the matrix numerically positive semidefinite
            for i in range(m.size(0)):
                a[i][i] = a[i][i] + 1e-7
            for inplace in (True, False):
                for uplo in (None, True, False):
                    checkPsdCholesky(a, uplo, inplace)
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def forward(self, x):
        x0 = self.conv.forward(x.float())
        x = self.pool_mil(x0)
        x = x.squeeze(2).squeeze(2)
        x1 = torch.add(torch.mul(x0.view(x.size(0), 1000, -1), -1), 1)
        cumprod = torch.cumprod(x1, 2)
        out = torch.max(x, torch.add(torch.mul(cumprod[:, :, -1], -1), 1))
        #out = F.softmax(out)
        return out
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def forward(self, img, att_size=14):
        x0 = self.conv(img)
        x = self.pool_mil(x0)
        x = x.squeeze(2).squeeze(2)
        x = self.l1(x)
        x1 = torch.add(torch.mul(x.view(x.size(0), 1000, -1), -1), 1)
        cumprod = torch.cumprod(x1, 2)
        out = torch.max(x, torch.add(torch.mul(cumprod[:, :, -1], -1), 1))
        return out
项目:Tacotron_pytorch    作者:root20    | 项目源码 | 文件源码
def forward(self, input_enc, input_attW_enc, input_dec, lengths_enc, hidden_att=None, hidden_dec1=None, hidden_dec2=None):
        N = input_dec.size(0)

        out_att = self.prenet(input_dec).unsqueeze(1)                                   # N x O_dec -> N x 1 x H
        out_att, hidden_att = self.gru_att(out_att, hidden_att)                         # N x 1 x 2H
        in_attW_dec = self.linear_dec(out_att.squeeze(1)).unsqueeze(1).expand_as(input_enc)
        in_attW_dec = rnn.pack_padded_sequence(in_attW_dec, lengths_enc, True)          # N*T_enc x 2H

        self.attn_weights = torch.add(input_attW_enc, in_attW_dec.data).tanh()          # N x T_enc x 2H
        self.attn_weights = self.attn(self.attn_weights).exp()                          # N*T_enc x 1
        self.attn_weights = rnn.PackedSequence(self.attn_weights, in_attW_dec.batch_sizes)
        self.attn_weights, _ = rnn.pad_packed_sequence(self.attn_weights, True)
        self.attn_weights = F.normalize(self.attn_weights, 1, 1)                        # N x T_enc x 1

        attn_applied = torch.bmm(self.attn_weights.transpose(1,2), input_enc)           # N x 1 x 2H

        out_dec = torch.cat((attn_applied, out_att), 2)                                 # N x 1 x 4H
        residual = self.short_cut(out_dec.squeeze(1)).unsqueeze(1)                      # N x 1 x 2H

        out_dec, hidden_dec1 = self.gru_dec1(out_dec, hidden_dec1)
        residual = residual + out_dec

        out_dec, hidden_dec2 = self.gru_dec2(residual, hidden_dec2)
        residual = residual + out_dec

        output = self.out(residual.squeeze(1)).view(N, self.r_factor, -1)
        return output, hidden_att, hidden_dec1, hidden_dec2
项目:pytorch-vdsr    作者:twtygqyy    | 项目源码 | 文件源码
def forward(self, x):
        residual = x
        out = self.relu(self.input(x))
        out = self.residual_layer(out)
        out = self.output(out)
        out = torch.add(out,residual)
        return out
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        # print("source x {} ".format(x.size()))
        x = self.embed(x)  # (N,W,D)
        x = self.dropout(x)
        x = x.unsqueeze(1)  # (N,Ci,W,D)
        if self.args.batch_normalizations is True:
            x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        else:
            # x = [self.dropout(F.relu(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [F.tanh(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            # x = [conv(x).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        # x = self.dropout(x)  # (N,len(Ks)*Co)
        if self.args.batch_normalizations is True:
            x = self.fc1_bn(self.fc1(x))
            fc = self.fc2_bn(self.fc2(F.tanh(x)))
        else:
            fc = self.fc1(x)
            # fc = self.fc2(F.relu(x))

        # print("xxx {} ".format(x.size()))

        gate_layer = F.sigmoid(self.gate_layer(x))

        # calculate highway layer values
        # print(" fc_size {} gate_layer_size {}".format(fc.size(), gate_layer.size()))
        gate_fc_layer = torch.mul(fc, gate_layer)
        # print("gate_layer {} ".format(gate_layer))
        # print("1 - gate_layer size {} ".format((1 - gate_layer).size()))
        # if write like follow ,can run,but not equal the HighWay NetWorks formula
        # gate_input = torch.mul((1 - gate_layer), fc)
        gate_input = torch.mul((1 - gate_layer), x)
        highway_output = torch.add(gate_fc_layer, gate_input)

        logit = self.logit_layer(highway_output)

        return logit
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        x = self.embed(x)
        x = self.dropout(x)
        # x = x.view(len(x), x.size(1), -1)
        # x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        # bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2))
        bilstm_out = bilstm_out.squeeze(2)

        hidden2lable = self.hidden2label1(F.tanh(bilstm_out))

        gate_layer = F.sigmoid(self.gate_layer(bilstm_out))
        # calculate highway layer values
        gate_hidden_layer = torch.mul(hidden2lable, gate_layer)
        # if write like follow ,can run,but not equal the HighWay NetWorks formula
        # gate_input = torch.mul((1 - gate_layer), hidden2lable)
        gate_input = torch.mul((1 - gate_layer), bilstm_out)
        highway_output = torch.add(gate_hidden_layer, gate_input)

        logit = self.logit_layer(highway_output)

        return logit
项目:vnet.pytorch    作者:mattmacy    | 项目源码 | 文件源码
def forward(self, x):
        # do we want a PRELU here as well?
        out = self.bn1(self.conv1(x))
        # split input in to 16 channels
        x16 = torch.cat((x, x, x, x, x, x, x, x,
                         x, x, x, x, x, x, x, x), 0)
        out = self.relu1(torch.add(out, x16))
        return out
项目:vnet.pytorch    作者:mattmacy    | 项目源码 | 文件源码
def forward(self, x):
        down = self.relu1(self.bn1(self.down_conv(x)))
        out = self.do1(down)
        out = self.ops(out)
        out = self.relu2(torch.add(out, down))
        return out
项目:vnet.pytorch    作者:mattmacy    | 项目源码 | 文件源码
def forward(self, x, skipx):
        out = self.do1(x)
        skipxdo = self.do2(skipx)
        out = self.relu1(self.bn1(self.up_conv(out)))
        xcat = torch.cat((out, skipxdo), 1)
        out = self.ops(xcat)
        out = self.relu2(torch.add(out, xcat))
        return out
项目:odin-pytorch    作者:ShiyuLiang    | 项目源码 | 文件源码
def forward(self, x):
        if not self.equalInOut:
            x = self.relu1(self.bn1(x))
        else:
            out = self.relu1(self.bn1(x))
        out = self.conv1(self.equalInOut and out or x)
        if self.droprate > 0:
            out = F.dropout(out, p=self.droprate, training=self.training)
        out = self.conv2(self.relu2(self.bn2(out)))
        return torch.add((not self.equalInOut) and self.convShortcut(x) or x, out)
项目:BiDAF-PyTorch    作者:kelayamatoz    | 项目源码 | 文件源码
def exp_mask(logits, mask):
    return torch.add(logits.data, (torch.ones(mask.size()) - mask.type(dtype)) * VERY_NEGATIVE_NUMBER)
项目:BiDAF-PyTorch    作者:kelayamatoz    | 项目源码 | 文件源码
def forward(self, x):
        trans = self.nonlin(self.lin(x))
        gate = self.gate_nonlin(self.gate_lin(x))
        return torch.add(torch.mul(gate, trans), torch.mul((1 - gate), x))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input, target):
        # - log(input) * target - log(1 - input) * (1 - target)
        if input.nelement() != target.nelement():
            raise RuntimeError("input and target size mismatch")

        if self.buffer is None:
            self.buffer = input.new()

        buffer = self.buffer
        weights = self.weights

        buffer.resize_as_(input)

        if weights is not None and target.dim() != 1:
            weights = self.weights.view(1, target.size(1)).expand_as(target)

        # log(input) * target
        torch.add(input, self.eps, out=buffer).log_()
        if weights is not None:
            buffer.mul_(weights)

        output = torch.dot(target, buffer)

        # log(1 - input) * (1 - target)
        torch.mul(input, -1, out=buffer).add_(1 + self.eps).log_()
        if weights is not None:
            buffer.mul_(weights)

        output = output + torch.sum(buffer)
        output = output - torch.dot(target, buffer)

        if self.sizeAverage:
            output = output / input.nelement()

        self.output = - output

        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateGradInput(self, input, target):
        # - (target - input) / ( input (1 - input) )
        # The gradient is slightly incorrect:
        # It should have be divided by (input + self.eps) (1 - input + self.eps)
        # but it is divided by input (1 - input + self.eps) + self.eps
        # This modification requires less memory to be computed.
        if input.nelement() != target.nelement():
            raise RuntimeError("input and target size mismatch")

        if self.buffer is None:
            self.buffer = input.new()

        buffer = self.buffer
        weights = self.weights
        gradInput = self.gradInput

        if weights is not None and target.dim() != 1:
            weights = self.weights.view(1, target.size(1)).expand_as(target)

        buffer.resize_as_(input)
        # - x ( 1 + self.eps -x ) + self.eps
        torch.add(input, -1, out=buffer).add_(-self.eps).mul_(input).add_(-self.eps)

        gradInput.resize_as_(input)
        # y - x
        torch.add(target, -1, input, out=gradInput)
        # - (y - x) / ( x ( 1 + self.eps -x ) + self.eps )
        gradInput.div_(buffer)

        if weights is not None:
            gradInput.mul_(weights)

        if self.sizeAverage:
            gradInput.div_(target.nelement())

        return gradInput
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        self.output.resize_(1)
        assert input[0].dim() == 2

        if self.diff is None:
            self.diff = input[0].new()

        torch.add(input[0], -1, input[1], out=self.diff).abs_()

        self.output.resize_(input[0].size(0))
        self.output.zero_()
        self.output.add_(self.diff.pow_(self.norm).sum(1))
        self.output.pow_(1. / self.norm)

        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_neg(self):
        a = torch.randn(100, 90)
        zeros = torch.Tensor().resize_as_(a).zero_()

        res_add = torch.add(zeros, -1, a)
        res_neg = a.clone()
        res_neg.neg_()
        self.assertEqual(res_neg, res_add)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_clamp(self):
        m1 = torch.rand(100).mul(5).add(-2.5)  # uniform in [-2.5, 2.5]
        # just in case we're extremely lucky.
        min_val = -1
        max_val = 1
        m1[1] = min_val
        m1[2] = max_val

        res1 = m1.clone()
        res1.clamp_(min_val, max_val)
        res2 = m1.clone()
        for i in iter_indices(res2):
            res2[i] = max(min_val, min(max_val, res2[i]))
        self.assertEqual(res1, res2)

        res1 = torch.clamp(m1, min=min_val)
        res2 = m1.clone()
        for i in iter_indices(res2):
            res2[i] = max(min_val, res2[i])
        self.assertEqual(res1, res2)

        res1 = torch.clamp(m1, max=max_val)
        res2 = m1.clone()
        for i in iter_indices(res2):
            res2[i] = min(max_val, res2[i])
        self.assertEqual(res1, res2)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def assertIsOrdered(self, order, x, mxx, ixx, task):
        SIZE = 4
        if order == 'descending':
            def check_order(a, b):
                return a >= b
        elif order == 'ascending':
            def check_order(a, b):
                return a <= b
        else:
            error('unknown order "{}", must be "ascending" or "descending"'.format(order))

        are_ordered = True
        for j, k in product(range(SIZE), range(1, SIZE)):
            self.assertTrue(check_order(mxx[j][k - 1], mxx[j][k]),
                            'torch.sort ({}) values unordered for {}'.format(order, task))

        seen = set()
        indicesCorrect = True
        size = x.size(x.dim() - 1)
        for k in range(size):
            seen.clear()
            for j in range(size):
                self.assertEqual(x[k][ixx[k][j]], mxx[k][j],
                                 'torch.sort ({}) indices wrong for {}'.format(order, task))
                seen.add(ixx[k][j])
            self.assertEqual(len(seen), size)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_xcorr3_xcorr2_eq(self):
        def reference(x, k, o3, o32):
            for i in range(o3.size(1)):
                for j in range(k.size(1)):
                    o32[i].add(torch.xcorr2(x[i + j - 1], k[j]))
        self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k), reference)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_xcorr3_xcorr2_eq_full(self):
        def reference(x, k, o3, o32):
            for i in range(x.size(1)):
                for j in range(k.size(1)):
                    o32[i].add(torch.xcorr2(x[i], k[k.size(1) - j + 1], 'F'))
        self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k, 'F'), reference)